Repository: snowflakedb/snowflake-jdbc Branch: master Commit: 342336349d2a Files: 1042 Total size: 87.1 MB Directory structure: gitextract_690hkf8e/ ├── .cursor/ │ └── skills/ │ └── graphite-pr-workflow/ │ └── SKILL.md ├── .github/ │ ├── CODEOWNERS │ ├── ISSUE_TEMPLATE/ │ │ ├── BUG_REPORT.md │ │ └── FEATURE_REQUEST.md │ ├── pull_request_template.md │ ├── repo_meta.yaml │ └── workflows/ │ ├── build-test.yml │ ├── changelog.yml │ ├── check-style.yml │ ├── cla_bot.yml │ ├── jira_close.yml │ ├── jira_comment.yml │ ├── jira_issue.yml │ ├── parameters_aws.json.gpg │ ├── parameters_aws_auth_tests.json.gpg │ ├── parameters_azure.json.gpg │ ├── parameters_gcp.json.gpg │ ├── rsa_keys/ │ │ ├── rsa_key_jdbc_aws.p8.gpg │ │ ├── rsa_key_jdbc_azure.p8.gpg │ │ └── rsa_key_jdbc_gcp.p8.gpg │ ├── semgrep.yml │ ├── snyk-issue.yml │ ├── snyk-pr.yml │ └── snyk-scan.yml ├── .gitignore ├── .mvn/ │ └── wrapper/ │ ├── maven-wrapper.jar │ └── maven-wrapper.properties ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── FIPS/ │ ├── .gitignore │ ├── pom.xml │ ├── public_pom.xml │ ├── scripts/ │ │ └── check_content.sh │ └── src/ │ └── test/ │ └── java/ │ └── net/ │ └── snowflake/ │ └── client/ │ ├── AbstractDriverIT.java │ ├── DontRunOnGCP.java │ ├── DontRunOnGithubActions.java │ ├── TestUtil.java │ ├── category/ │ │ └── FipsTestSuite.java │ └── jdbc/ │ └── ConnectionFipsIT.java ├── Jenkinsfile ├── LICENSE.txt ├── README.rst ├── SECURITY.md ├── TestOnly/ │ ├── .gitignore │ ├── README.rst │ └── pom.xml ├── ci/ │ ├── _init.sh │ ├── build.sh │ ├── container/ │ │ ├── build_component.sh │ │ ├── change_snowflake_test_pwd.py │ │ ├── create_schema.py │ │ ├── download_artifact.sh │ │ ├── drop_schema.py │ │ ├── hang_webserver.py │ │ ├── sf_test_utils.py │ │ ├── test_authentication.sh │ │ ├── test_component.sh │ │ └── upload_artifact.sh │ ├── image/ │ │ ├── .gitignore │ │ ├── Dockerfile.jdbc-rockylinux-openjdk-test │ │ ├── Dockerfile.jdbc-rockylinux8-openjdk-test │ │ ├── build.sh │ │ ├── scripts/ │ │ │ ├── aws.sh │ │ │ ├── entrypoint.sh │ │ │ ├── git.sh │ │ │ ├── npmrc │ │ │ ├── pip.sh │ │ │ └── python3.6.sh │ │ └── update.sh │ ├── log_analyze_setup.sh │ ├── scripts/ │ │ ├── check_content.sh │ │ ├── check_no_raw_system_calls.sh │ │ ├── login_docker.sh │ │ ├── set_git_info.sh │ │ └── setup_gpg.sh │ ├── test.sh │ ├── test_authentication.sh │ ├── test_mac.sh │ ├── test_revocation.sh │ ├── test_wif.sh │ ├── test_windows.bat │ └── wif/ │ ├── aws-lambda/ │ │ ├── README.md │ │ ├── pom.xml │ │ ├── src/ │ │ │ └── main/ │ │ │ └── java/ │ │ │ └── com/ │ │ │ └── snowflake/ │ │ │ └── wif/ │ │ │ └── aws/ │ │ │ └── WifLambdaFunctionE2e.java │ │ └── test.sh │ ├── azure-function/ │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── host.json │ │ ├── pom.xml │ │ ├── src/ │ │ │ └── main/ │ │ │ └── java/ │ │ │ └── com/ │ │ │ └── snowflake/ │ │ │ └── wif/ │ │ │ └── azure/ │ │ │ └── WifAzureFunctionE2e.java │ │ └── test.sh │ ├── gcp-function/ │ │ ├── README.md │ │ ├── pom.xml │ │ ├── src/ │ │ │ └── main/ │ │ │ └── java/ │ │ │ └── com/ │ │ │ └── snowflake/ │ │ │ └── wif/ │ │ │ └── gcp/ │ │ │ └── WifGcpFunctionE2e.java │ │ └── test.sh │ ├── parameters/ │ │ ├── parameters_wif.json.gpg │ │ ├── parameters_wif_function.json.gpg │ │ ├── rsa_gcp_function.gpg │ │ ├── rsa_wif_aws_azure.gpg │ │ └── rsa_wif_gcp.gpg │ ├── shared/ │ │ └── com/ │ │ └── snowflake/ │ │ └── wif/ │ │ └── common/ │ │ └── WifTestHelper.java │ └── test_wif.sh ├── codecov/ │ ├── codecov │ ├── codecov.SHA256SUM │ ├── codecov.SHA256SUM.sig │ └── codecov_aarch64 ├── codecov.yml ├── dependencies/ │ ├── Readme.md │ ├── arrow-format-17.0.0.jar │ ├── arrow-memory-17.0.0.pom │ ├── arrow-memory-core-17.0.0.jar │ ├── arrow-memory-netty-buffer-patch-17.0.0.jar │ ├── arrow-memory-unsafe-17.0.0.jar │ ├── arrow-vector-17.0.0.jar │ └── tika-core-2.4.1.jar ├── fat-jar-test-app/ │ ├── pom.xml │ ├── run.sh │ └── src/ │ └── main/ │ ├── fips-java/ │ │ └── net/ │ │ └── snowflake/ │ │ └── FipsInitializer.java │ ├── java/ │ │ └── net/ │ │ └── snowflake/ │ │ └── FatJarTestApp.java │ └── resources/ │ ├── logback.xml │ ├── logging.properties │ └── test.csv ├── linkage-checker-exclusion-rules.xml ├── mvnw ├── mvnw.cmd ├── output.json ├── parent-pom.xml ├── pom.xml ├── prepareNewVersion.sh ├── prober/ │ ├── Dockerfile │ ├── Jenkinsfile.groovy │ ├── entrypoint.sh │ └── src/ │ └── main/ │ └── java/ │ └── com/ │ └── snowflake/ │ └── client/ │ └── jdbc/ │ └── prober/ │ └── Prober.java ├── public_pom.xml ├── settings.json ├── src/ │ ├── main/ │ │ ├── java/ │ │ │ └── net/ │ │ │ └── snowflake/ │ │ │ └── client/ │ │ │ ├── api/ │ │ │ │ ├── auth/ │ │ │ │ │ └── AuthenticatorType.java │ │ │ │ ├── connection/ │ │ │ │ │ ├── DownloadStreamConfig.java │ │ │ │ │ ├── SnowflakeConnection.java │ │ │ │ │ ├── SnowflakeDatabaseMetaData.java │ │ │ │ │ └── UploadStreamConfig.java │ │ │ │ ├── datasource/ │ │ │ │ │ ├── SnowflakeDataSource.java │ │ │ │ │ └── SnowflakeDataSourceFactory.java │ │ │ │ ├── driver/ │ │ │ │ │ └── SnowflakeDriver.java │ │ │ │ ├── exception/ │ │ │ │ │ ├── ErrorCode.java │ │ │ │ │ └── SnowflakeSQLException.java │ │ │ │ ├── http/ │ │ │ │ │ └── HttpHeadersCustomizer.java │ │ │ │ ├── loader/ │ │ │ │ │ ├── LoadResultListener.java │ │ │ │ │ ├── Loader.java │ │ │ │ │ ├── LoaderFactory.java │ │ │ │ │ ├── LoaderProperty.java │ │ │ │ │ ├── LoadingError.java │ │ │ │ │ └── Operation.java │ │ │ │ ├── pooling/ │ │ │ │ │ ├── SnowflakeConnectionPoolDataSource.java │ │ │ │ │ └── SnowflakeConnectionPoolDataSourceFactory.java │ │ │ │ ├── resultset/ │ │ │ │ │ ├── FieldMetadata.java │ │ │ │ │ ├── QueryStatus.java │ │ │ │ │ ├── SnowflakeAsyncResultSet.java │ │ │ │ │ ├── SnowflakeResultSet.java │ │ │ │ │ ├── SnowflakeResultSetMetaData.java │ │ │ │ │ ├── SnowflakeResultSetSerializable.java │ │ │ │ │ └── SnowflakeType.java │ │ │ │ └── statement/ │ │ │ │ ├── SnowflakePreparedStatement.java │ │ │ │ └── SnowflakeStatement.java │ │ │ ├── internal/ │ │ │ │ ├── api/ │ │ │ │ │ └── implementation/ │ │ │ │ │ ├── connection/ │ │ │ │ │ │ └── SnowflakeConnectionImpl.java │ │ │ │ │ ├── datasource/ │ │ │ │ │ │ └── SnowflakeBasicDataSource.java │ │ │ │ │ ├── metadata/ │ │ │ │ │ │ └── SnowflakeDatabaseMetaDataImpl.java │ │ │ │ │ ├── pooling/ │ │ │ │ │ │ ├── LogicalConnection.java │ │ │ │ │ │ ├── SnowflakeConnectionPoolDataSourceImpl.java │ │ │ │ │ │ └── SnowflakePooledConnection.java │ │ │ │ │ ├── resultset/ │ │ │ │ │ │ ├── FieldMetadataImpl.java │ │ │ │ │ │ └── SnowflakeBaseResultSet.java │ │ │ │ │ └── statement/ │ │ │ │ │ ├── SnowflakeCallableStatementImpl.java │ │ │ │ │ ├── SnowflakePreparedStatementImpl.java │ │ │ │ │ └── SnowflakeStatementImpl.java │ │ │ │ ├── common/ │ │ │ │ │ └── core/ │ │ │ │ │ ├── SFBinary.java │ │ │ │ │ └── SFBinaryFormat.java │ │ │ │ ├── config/ │ │ │ │ │ ├── ConnectionParameters.java │ │ │ │ │ ├── SFClientConfig.java │ │ │ │ │ ├── SFClientConfigParser.java │ │ │ │ │ └── SFConnectionConfigParser.java │ │ │ │ ├── core/ │ │ │ │ │ ├── ArrowSqlInput.java │ │ │ │ │ ├── AssertUtil.java │ │ │ │ │ ├── AttributeEnhancingHttpRequestRetryHandler.java │ │ │ │ │ ├── BaseSqlInput.java │ │ │ │ │ ├── BasicEvent.java │ │ │ │ │ ├── CachedCredentialType.java │ │ │ │ │ ├── CancellationReason.java │ │ │ │ │ ├── ChunkDownloader.java │ │ │ │ │ ├── ColumnTypeHelper.java │ │ │ │ │ ├── Constants.java │ │ │ │ │ ├── CredentialManager.java │ │ │ │ │ ├── DataConversionContext.java │ │ │ │ │ ├── DefaultFileCacheManager.java │ │ │ │ │ ├── DownloaderMetrics.java │ │ │ │ │ ├── Event.java │ │ │ │ │ ├── EventHandler.java │ │ │ │ │ ├── EventUtil.java │ │ │ │ │ ├── FieldSchemaCreator.java │ │ │ │ │ ├── FileCacheManager.java │ │ │ │ │ ├── FileCacheManagerBuilder.java │ │ │ │ │ ├── FileCacheUtil.java │ │ │ │ │ ├── FileTypeDetector.java │ │ │ │ │ ├── FileUtil.java │ │ │ │ │ ├── HeaderCustomizerHttpRequestInterceptor.java │ │ │ │ │ ├── HeartbeatIntervalSelector.java │ │ │ │ │ ├── HeartbeatRegistry.java │ │ │ │ │ ├── HeartbeatThread.java │ │ │ │ │ ├── HttpClientSettingsKey.java │ │ │ │ │ ├── HttpExecutingContext.java │ │ │ │ │ ├── HttpExecutingContextBuilder.java │ │ │ │ │ ├── HttpProtocol.java │ │ │ │ │ ├── HttpResponseContextDto.java │ │ │ │ │ ├── HttpResponseWithHeaders.java │ │ │ │ │ ├── HttpUtil.java │ │ │ │ │ ├── JsonSqlInput.java │ │ │ │ │ ├── JsonSqlOutput.java │ │ │ │ │ ├── MetaDataOfBinds.java │ │ │ │ │ ├── NoOpFileCacheManager.java │ │ │ │ │ ├── OCSPMode.java │ │ │ │ │ ├── OCSPTelemetryData.java │ │ │ │ │ ├── ObjectMapperFactory.java │ │ │ │ │ ├── OpaqueContextDTO.java │ │ │ │ │ ├── ParameterBindingDTO.java │ │ │ │ │ ├── PrivateLinkDetector.java │ │ │ │ │ ├── QueryContextCache.java │ │ │ │ │ ├── QueryContextDTO.java │ │ │ │ │ ├── QueryContextEntryDTO.java │ │ │ │ │ ├── QueryExecDTO.java │ │ │ │ │ ├── QueryResultFormat.java │ │ │ │ │ ├── ResultUtil.java │ │ │ │ │ ├── SFArrowResultSet.java │ │ │ │ │ ├── SFBaseResultSet.java │ │ │ │ │ ├── SFBaseSession.java │ │ │ │ │ ├── SFBaseStatement.java │ │ │ │ │ ├── SFBasicCrlTrustManager.java │ │ │ │ │ ├── SFChildResult.java │ │ │ │ │ ├── SFCrlTrustManagerFactory.java │ │ │ │ │ ├── SFException.java │ │ │ │ │ ├── SFExtendedCrlTrustManager.java │ │ │ │ │ ├── SFFixedViewResultSet.java │ │ │ │ │ ├── SFJsonResultSet.java │ │ │ │ │ ├── SFLoginInput.java │ │ │ │ │ ├── SFLoginOutput.java │ │ │ │ │ ├── SFOCSPException.java │ │ │ │ │ ├── SFOauthLoginInput.java │ │ │ │ │ ├── SFPreparedStatementMetaData.java │ │ │ │ │ ├── SFPubKeysInternal.java │ │ │ │ │ ├── SFResultSet.java │ │ │ │ │ ├── SFResultSetFactory.java │ │ │ │ │ ├── SFResultSetMetaData.java │ │ │ │ │ ├── SFSSLConnectionSocketFactory.java │ │ │ │ │ ├── SFSession.java │ │ │ │ │ ├── SFSessionProperty.java │ │ │ │ │ ├── SFSqlInput.java │ │ │ │ │ ├── SFStatement.java │ │ │ │ │ ├── SFStatementType.java │ │ │ │ │ ├── SFTrustManager.java │ │ │ │ │ ├── SdkProxyRoutePlanner.java │ │ │ │ │ ├── SecureStorageAppleManager.java │ │ │ │ │ ├── SecureStorageLinuxManager.java │ │ │ │ │ ├── SecureStorageManager.java │ │ │ │ │ ├── SecureStorageWindowsManager.java │ │ │ │ │ ├── SecurityUtil.java │ │ │ │ │ ├── SessionUtil.java │ │ │ │ │ ├── SessionUtilExternalBrowser.java │ │ │ │ │ ├── SessionUtilKeyPair.java │ │ │ │ │ ├── SfSqlArray.java │ │ │ │ │ ├── SfTimestampUtil.java │ │ │ │ │ ├── SnowflakeMutableProxyRoutePlanner.java │ │ │ │ │ ├── SpcsTokenReader.java │ │ │ │ │ ├── StmtUtil.java │ │ │ │ │ ├── SystemUtil.java │ │ │ │ │ ├── URLUtil.java │ │ │ │ │ ├── UUIDUtils.java │ │ │ │ │ ├── arrow/ │ │ │ │ │ │ ├── AbstractArrowVectorConverter.java │ │ │ │ │ │ ├── ArrayConverter.java │ │ │ │ │ │ ├── ArrowResultChunkIndexSorter.java │ │ │ │ │ │ ├── ArrowResultUtil.java │ │ │ │ │ │ ├── ArrowVectorConverter.java │ │ │ │ │ │ ├── ArrowVectorConverterUtil.java │ │ │ │ │ │ ├── BigIntToFixedConverter.java │ │ │ │ │ │ ├── BigIntToScaledFixedConverter.java │ │ │ │ │ │ ├── BigIntToTimeConverter.java │ │ │ │ │ │ ├── BigIntToTimestampLTZConverter.java │ │ │ │ │ │ ├── BigIntToTimestampNTZConverter.java │ │ │ │ │ │ ├── BitToBooleanConverter.java │ │ │ │ │ │ ├── DateConverter.java │ │ │ │ │ │ ├── DecfloatToDecimalConverter.java │ │ │ │ │ │ ├── DecimalToScaledFixedConverter.java │ │ │ │ │ │ ├── DoubleToRealConverter.java │ │ │ │ │ │ ├── IntToFixedConverter.java │ │ │ │ │ │ ├── IntToScaledFixedConverter.java │ │ │ │ │ │ ├── IntToTimeConverter.java │ │ │ │ │ │ ├── IntervalDayTimeToDurationConverter.java │ │ │ │ │ │ ├── IntervalYearMonthToPeriodConverter.java │ │ │ │ │ │ ├── MapConverter.java │ │ │ │ │ │ ├── SmallIntToFixedConverter.java │ │ │ │ │ │ ├── SmallIntToScaledFixedConverter.java │ │ │ │ │ │ ├── StructConverter.java │ │ │ │ │ │ ├── StructObjectWrapper.java │ │ │ │ │ │ ├── StructuredTypeDateTimeConverter.java │ │ │ │ │ │ ├── ThreeFieldStructToTimestampTZConverter.java │ │ │ │ │ │ ├── TinyIntToFixedConverter.java │ │ │ │ │ │ ├── TinyIntToScaledFixedConverter.java │ │ │ │ │ │ ├── TwoFieldStructToTimestampLTZConverter.java │ │ │ │ │ │ ├── TwoFieldStructToTimestampNTZConverter.java │ │ │ │ │ │ ├── TwoFieldStructToTimestampTZConverter.java │ │ │ │ │ │ ├── VarBinaryToBinaryConverter.java │ │ │ │ │ │ ├── VarCharConverter.java │ │ │ │ │ │ ├── VectorTypeConverter.java │ │ │ │ │ │ └── tostringhelpers/ │ │ │ │ │ │ ├── ArrowArrayStringRepresentationBuilder.java │ │ │ │ │ │ ├── ArrowObjectStringRepresentationBuilder.java │ │ │ │ │ │ └── ArrowStringRepresentationBuilderBase.java │ │ │ │ │ ├── auth/ │ │ │ │ │ │ ├── ClientAuthnDTO.java │ │ │ │ │ │ ├── ClientAuthnParameter.java │ │ │ │ │ │ ├── oauth/ │ │ │ │ │ │ │ ├── AccessTokenProvider.java │ │ │ │ │ │ │ ├── AuthorizationCodeRedirectRequestHandler.java │ │ │ │ │ │ │ ├── DPoPUtil.java │ │ │ │ │ │ │ ├── OAuthAccessTokenForRefreshTokenProvider.java │ │ │ │ │ │ │ ├── OAuthAccessTokenProviderFactory.java │ │ │ │ │ │ │ ├── OAuthAuthorizationCodeAccessTokenProvider.java │ │ │ │ │ │ │ ├── OAuthClientCredentialsAccessTokenProvider.java │ │ │ │ │ │ │ ├── OAuthUtil.java │ │ │ │ │ │ │ ├── RandomStateProvider.java │ │ │ │ │ │ │ ├── StateProvider.java │ │ │ │ │ │ │ └── TokenResponseDTO.java │ │ │ │ │ │ └── wif/ │ │ │ │ │ │ ├── AwsAttestationService.java │ │ │ │ │ │ ├── AwsIdentityAttestationCreator.java │ │ │ │ │ │ ├── AzureAttestationService.java │ │ │ │ │ │ ├── AzureIdentityAttestationCreator.java │ │ │ │ │ │ ├── GcpIdentityAttestationCreator.java │ │ │ │ │ │ ├── OidcIdentityAttestationCreator.java │ │ │ │ │ │ ├── PlatformDetectionUtil.java │ │ │ │ │ │ ├── WorkloadIdentityAttestation.java │ │ │ │ │ │ ├── WorkloadIdentityAttestationCreator.java │ │ │ │ │ │ ├── WorkloadIdentityAttestationProvider.java │ │ │ │ │ │ ├── WorkloadIdentityProviderType.java │ │ │ │ │ │ └── WorkloadIdentityUtil.java │ │ │ │ │ ├── bind/ │ │ │ │ │ │ ├── BindException.java │ │ │ │ │ │ └── BindUploader.java │ │ │ │ │ ├── crl/ │ │ │ │ │ │ ├── CRLCache.java │ │ │ │ │ │ ├── CRLCacheConfig.java │ │ │ │ │ │ ├── CRLCacheEntry.java │ │ │ │ │ │ ├── CRLCacheManager.java │ │ │ │ │ │ ├── CRLFileCache.java │ │ │ │ │ │ ├── CRLInMemoryCache.java │ │ │ │ │ │ ├── CRLValidationResult.java │ │ │ │ │ │ ├── CRLValidationUtils.java │ │ │ │ │ │ ├── CRLValidator.java │ │ │ │ │ │ ├── CertRevocationCheckMode.java │ │ │ │ │ │ ├── CertificateValidationResult.java │ │ │ │ │ │ ├── CrlRevocationManager.java │ │ │ │ │ │ ├── NoopCRLCache.java │ │ │ │ │ │ └── VerifiedCertPathBuilder.java │ │ │ │ │ ├── json/ │ │ │ │ │ │ ├── BooleanConverter.java │ │ │ │ │ │ ├── BytesConverter.java │ │ │ │ │ │ ├── Converters.java │ │ │ │ │ │ ├── DateTimeConverter.java │ │ │ │ │ │ ├── NumberConverter.java │ │ │ │ │ │ └── StringConverter.java │ │ │ │ │ ├── minicore/ │ │ │ │ │ │ ├── Minicore.java │ │ │ │ │ │ ├── MinicoreLibrary.java │ │ │ │ │ │ ├── MinicoreLoadError.java │ │ │ │ │ │ ├── MinicoreLoadLogger.java │ │ │ │ │ │ ├── MinicoreLoadResult.java │ │ │ │ │ │ ├── MinicoreLoader.java │ │ │ │ │ │ ├── MinicorePlatform.java │ │ │ │ │ │ └── MinicoreTelemetry.java │ │ │ │ │ └── structs/ │ │ │ │ │ ├── SQLDataCreationHelper.java │ │ │ │ │ └── SnowflakeObjectTypeFactories.java │ │ │ │ ├── driver/ │ │ │ │ │ ├── AutoConfigurationHelper.java │ │ │ │ │ ├── ConnectionFactory.java │ │ │ │ │ ├── DriverInitializer.java │ │ │ │ │ ├── DriverVersion.java │ │ │ │ │ └── DriverVersionProperties.java │ │ │ │ ├── exception/ │ │ │ │ │ └── SnowflakeSQLLoggedException.java │ │ │ │ ├── jdbc/ │ │ │ │ │ ├── ArrowResultChunk.java │ │ │ │ │ ├── BindingParameterMetadata.java │ │ │ │ │ ├── ChunkDownloadContext.java │ │ │ │ │ ├── ColumnTypeInfo.java │ │ │ │ │ ├── CompressedStreamFactory.java │ │ │ │ │ ├── DBMetadataResultSetMetadata.java │ │ │ │ │ ├── DefaultResultStreamProvider.java │ │ │ │ │ ├── DefaultSFConnectionHandler.java │ │ │ │ │ ├── EnvironmentVariables.java │ │ │ │ │ ├── FileBackedOutputStream.java │ │ │ │ │ ├── JsonResultChunk.java │ │ │ │ │ ├── MatDesc.java │ │ │ │ │ ├── OCSPErrorCode.java │ │ │ │ │ ├── QueryIdValidator.java │ │ │ │ │ ├── RestRequest.java │ │ │ │ │ ├── ResultJsonParserV2.java │ │ │ │ │ ├── ResultStreamProvider.java │ │ │ │ │ ├── RetryContext.java │ │ │ │ │ ├── RetryContextManager.java │ │ │ │ │ ├── SFAsyncResultSet.java │ │ │ │ │ ├── SFBaseFileTransferAgent.java │ │ │ │ │ ├── SFConnectionHandler.java │ │ │ │ │ ├── SnowflakeChunkDownloader.java │ │ │ │ │ ├── SnowflakeClob.java │ │ │ │ │ ├── SnowflakeColumn.java │ │ │ │ │ ├── SnowflakeColumnMetadata.java │ │ │ │ │ ├── SnowflakeConnectString.java │ │ │ │ │ ├── SnowflakeDatabaseMetaDataQueryResultSet.java │ │ │ │ │ ├── SnowflakeDatabaseMetaDataResultSet.java │ │ │ │ │ ├── SnowflakeDateWithTimezone.java │ │ │ │ │ ├── SnowflakeFileTransferAgent.java │ │ │ │ │ ├── SnowflakeFileTransferConfig.java │ │ │ │ │ ├── SnowflakeFileTransferMetadata.java │ │ │ │ │ ├── SnowflakeFileTransferMetadataV1.java │ │ │ │ │ ├── SnowflakeFixedView.java │ │ │ │ │ ├── SnowflakeLoggedFeatureNotSupportedException.java │ │ │ │ │ ├── SnowflakeParameterMetadata.java │ │ │ │ │ ├── SnowflakeReauthenticationRequest.java │ │ │ │ │ ├── SnowflakeResultChunk.java │ │ │ │ │ ├── SnowflakeResultSetMetaDataV1.java │ │ │ │ │ ├── SnowflakeResultSetSerializableV1.java │ │ │ │ │ ├── SnowflakeResultSetV1.java │ │ │ │ │ ├── SnowflakeRichResultSetSerializableV1.java │ │ │ │ │ ├── SnowflakeSQLExceptionWithRetryContext.java │ │ │ │ │ ├── SnowflakeSimulatedUploadFailure.java │ │ │ │ │ ├── SnowflakeTimeWithTimezone.java │ │ │ │ │ ├── SnowflakeTimestampWithTimezone.java │ │ │ │ │ ├── SnowflakeUseDPoPNonceException.java │ │ │ │ │ ├── SnowflakeUtil.java │ │ │ │ │ ├── cloud/ │ │ │ │ │ │ └── storage/ │ │ │ │ │ │ ├── AwsSdkGCPSigner.java │ │ │ │ │ │ ├── AzureObjectSummariesIterator.java │ │ │ │ │ │ ├── CloudStorageProxyFactory.java │ │ │ │ │ │ ├── CommonObjectMetadata.java │ │ │ │ │ │ ├── EncryptionProvider.java │ │ │ │ │ │ ├── GCSAccessStrategy.java │ │ │ │ │ │ ├── GCSAccessStrategyAwsSdk.java │ │ │ │ │ │ ├── GCSDefaultAccessStrategy.java │ │ │ │ │ │ ├── GcmEncryptionProvider.java │ │ │ │ │ │ ├── GcsObjectSummariesIterator.java │ │ │ │ │ │ ├── ProxySettings.java │ │ │ │ │ │ ├── QueryIdHelper.java │ │ │ │ │ │ ├── S3ErrorHandler.java │ │ │ │ │ │ ├── S3ObjectMetadata.java │ │ │ │ │ │ ├── S3ObjectSummariesIterator.java │ │ │ │ │ │ ├── SnowflakeAzureClient.java │ │ │ │ │ │ ├── SnowflakeGCSClient.java │ │ │ │ │ │ ├── SnowflakeS3Client.java │ │ │ │ │ │ ├── SnowflakeStorageClient.java │ │ │ │ │ │ ├── StageInfo.java │ │ │ │ │ │ ├── StorageClientFactory.java │ │ │ │ │ │ ├── StorageHelper.java │ │ │ │ │ │ ├── StorageObjectMetadata.java │ │ │ │ │ │ ├── StorageObjectSummary.java │ │ │ │ │ │ ├── StorageObjectSummaryCollection.java │ │ │ │ │ │ └── StorageProviderException.java │ │ │ │ │ ├── diagnostic/ │ │ │ │ │ │ ├── CertificateDiagnosticCheck.java │ │ │ │ │ │ ├── DiagnosticCheck.java │ │ │ │ │ │ ├── DiagnosticContext.java │ │ │ │ │ │ ├── DiagnosticTrustManager.java │ │ │ │ │ │ ├── DnsDiagnosticCheck.java │ │ │ │ │ │ ├── HttpAndHttpsDiagnosticCheck.java │ │ │ │ │ │ ├── ProxyConfig.java │ │ │ │ │ │ ├── SnowflakeEndpoint.java │ │ │ │ │ │ └── TcpDiagnosticCheck.java │ │ │ │ │ ├── telemetry/ │ │ │ │ │ │ ├── CSVMetricsExporter.java │ │ │ │ │ │ ├── ExecTimeTelemetryData.java │ │ │ │ │ │ ├── InternalApiTelemetryTracker.java │ │ │ │ │ │ ├── NoOpTelemetryClient.java │ │ │ │ │ │ ├── PreSessionTelemetryClient.java │ │ │ │ │ │ ├── RevocationCheckTelemetryData.java │ │ │ │ │ │ ├── SqlExceptionTelemetryHandler.java │ │ │ │ │ │ ├── Telemetry.java │ │ │ │ │ │ ├── TelemetryClient.java │ │ │ │ │ │ ├── TelemetryData.java │ │ │ │ │ │ ├── TelemetryField.java │ │ │ │ │ │ └── TelemetryUtil.java │ │ │ │ │ ├── telemetryOOB/ │ │ │ │ │ │ ├── TelemetryEvent.java │ │ │ │ │ │ ├── TelemetryService.java │ │ │ │ │ │ └── TelemetryThreadPool.java │ │ │ │ │ └── util/ │ │ │ │ │ ├── DriverUtil.java │ │ │ │ │ ├── SnowflakeTypeHelper.java │ │ │ │ │ └── SnowflakeTypeUtil.java │ │ │ │ ├── loader/ │ │ │ │ │ ├── BufferStage.java │ │ │ │ │ ├── FileUploader.java │ │ │ │ │ ├── OnError.java │ │ │ │ │ ├── ProcessQueue.java │ │ │ │ │ ├── PutQueue.java │ │ │ │ │ ├── StreamLoader.java │ │ │ │ │ └── Utils.java │ │ │ │ ├── log/ │ │ │ │ │ ├── ArgSupplier.java │ │ │ │ │ ├── CommonsLoggingWrapper.java │ │ │ │ │ ├── CommonsLoggingWrapperMode.java │ │ │ │ │ ├── JDK14JCLWrapper.java │ │ │ │ │ ├── JDK14Logger.java │ │ │ │ │ ├── SFFormatter.java │ │ │ │ │ ├── SFLogLevel.java │ │ │ │ │ ├── SFLogger.java │ │ │ │ │ ├── SFLoggerFactory.java │ │ │ │ │ ├── SFLoggerUtil.java │ │ │ │ │ ├── SFToJavaLogMapper.java │ │ │ │ │ ├── SLF4JJCLWrapper.java │ │ │ │ │ ├── SLF4JLogger.java │ │ │ │ │ ├── StdErrOutThresholdAwareConsoleHandler.java │ │ │ │ │ ├── StdOutConsoleHandler.java │ │ │ │ │ └── UnknownJavaUtilLoggingLevelException.java │ │ │ │ └── util/ │ │ │ │ ├── Converter.java │ │ │ │ ├── DecorrelatedJitterBackoff.java │ │ │ │ ├── EnvironmentProvider.java │ │ │ │ ├── LibcDetails.java │ │ │ │ ├── LibcInfo.java │ │ │ │ ├── MaskedException.java │ │ │ │ ├── OsReleaseDetails.java │ │ │ │ ├── Platform.java │ │ │ │ ├── PlatformDetector.java │ │ │ │ ├── SFPair.java │ │ │ │ ├── SFTimestamp.java │ │ │ │ ├── SecretDetector.java │ │ │ │ ├── SnowflakeEnvironmentProvider.java │ │ │ │ ├── Stopwatch.java │ │ │ │ ├── ThrowingBiCallable.java │ │ │ │ ├── ThrowingBiFunction.java │ │ │ │ ├── ThrowingCallable.java │ │ │ │ ├── ThrowingFunction.java │ │ │ │ ├── ThrowingTriCallable.java │ │ │ │ ├── ThrowingTriFunction.java │ │ │ │ ├── TimeMeasurement.java │ │ │ │ └── VariableTypeArray.java │ │ │ └── jdbc/ │ │ │ └── SnowflakeDriver.java │ │ ├── java-fat-jar/ │ │ │ └── net/ │ │ │ └── snowflake/ │ │ │ └── client/ │ │ │ └── internal/ │ │ │ └── log/ │ │ │ ├── SFBridgeLogger.java │ │ │ ├── SFBridgeLoggerFactory.java │ │ │ └── SFBridgeServiceProvider.java │ │ ├── javadoc/ │ │ │ ├── licenses.html │ │ │ └── overview.html │ │ ├── resources/ │ │ │ ├── META-INF/ │ │ │ │ ├── com.boomi.Dependencies │ │ │ │ └── services/ │ │ │ │ ├── java.nio.file.spi.FileTypeDetector │ │ │ │ └── java.sql.Driver │ │ │ └── net/ │ │ │ └── snowflake/ │ │ │ └── client/ │ │ │ └── jdbc/ │ │ │ ├── jdbc_error_messages.properties │ │ │ ├── jdbc_error_messages_fr.properties │ │ │ └── version.properties │ │ └── resources-fat-jar/ │ │ └── META-INF/ │ │ └── services/ │ │ └── org.slf4j.spi.SLF4JServiceProvider │ └── test/ │ ├── java/ │ │ ├── com/ │ │ │ └── snowflake/ │ │ │ └── client/ │ │ │ └── jdbc/ │ │ │ └── SnowflakeDriverIT.java │ │ └── net/ │ │ └── snowflake/ │ │ └── client/ │ │ ├── .gitignore │ │ ├── AbstractDriverIT.java │ │ ├── AssumptionUtils.java │ │ ├── SystemPropertyOverrider.java │ │ ├── TestUtil.java │ │ ├── ThrowingConsumer.java │ │ ├── ThrowingRunnable.java │ │ ├── annotations/ │ │ │ ├── DontRunOnGithubActions.java │ │ │ ├── DontRunOnJava21.java │ │ │ ├── DontRunOnJava8.java │ │ │ ├── DontRunOnJenkins.java │ │ │ ├── DontRunOnTestaccount.java │ │ │ ├── DontRunOnThinJar.java │ │ │ ├── DontRunOnWindows.java │ │ │ ├── RunOnAWS.java │ │ │ ├── RunOnAzure.java │ │ │ ├── RunOnGCP.java │ │ │ ├── RunOnGithubActionsNotMac.java │ │ │ ├── RunOnLinux.java │ │ │ ├── RunOnLinuxOrMac.java │ │ │ ├── RunOnMac.java │ │ │ ├── RunOnTestaccountNotOnGithubActions.java │ │ │ ├── RunOnWindows.java │ │ │ └── RunOnWindowsOrMac.java │ │ ├── api/ │ │ │ ├── driver/ │ │ │ │ └── SnowflakeDriverTest.java │ │ │ ├── exception/ │ │ │ │ └── SqlFeatureNotSupportedTelemetryTest.java │ │ │ └── pooling/ │ │ │ ├── LogicalConnectionAlreadyClosedLatestIT.java │ │ │ └── LogicalConnectionFeatureNotSupportedLatestIT.java │ │ ├── authentication/ │ │ │ ├── AuthConnectionParameters.java │ │ │ ├── AuthTestHelper.java │ │ │ ├── ExternalBrowserLatestIT.java │ │ │ ├── IdTokenLatestIT.java │ │ │ ├── MFALatestIT.java │ │ │ ├── OauthLatestIT.java │ │ │ ├── OauthOktaAuthorizationCodeLatestIT.java │ │ │ ├── OauthOktaClientCredentialsLatestIT.java │ │ │ ├── OauthSnowflakeAuthorizationCodeLatestIT.java │ │ │ ├── OauthSnowflakeAuthorizationCodeWildcardsLatestIT.java │ │ │ ├── OktaAuthLatestIT.java │ │ │ └── PATLatestIT.java │ │ ├── category/ │ │ │ └── TestTags.java │ │ ├── internal/ │ │ │ ├── api/ │ │ │ │ └── implementation/ │ │ │ │ ├── metadata/ │ │ │ │ │ └── SnowflakeDatabaseMetaDataImplColumnSizeTest.java │ │ │ │ ├── pooling/ │ │ │ │ │ ├── ConnectionPoolingDataSourceIT.java │ │ │ │ │ └── LogicalConnectionLatestIT.java │ │ │ │ └── statement/ │ │ │ │ └── SnowflakeStatementImplCopyResultSetTest.java │ │ │ ├── config/ │ │ │ │ ├── ConnectionAutoUrlParserTest.java │ │ │ │ ├── SFClientConfigParserTest.java │ │ │ │ ├── SFConnectionConfigParserPermissionTest.java │ │ │ │ ├── SFConnectionConfigParserTest.java │ │ │ │ └── SFPermissionsTest.java │ │ │ ├── core/ │ │ │ │ ├── AttributeEnhancingHttpRequestRetryHandlerTest.java │ │ │ │ ├── CertificateChainTrustValidationTestLatestIT.java │ │ │ │ ├── CoreUtilsMiscellaneousTest.java │ │ │ │ ├── CredentialManagerTest.java │ │ │ │ ├── EventHandlerTest.java │ │ │ │ ├── EventTest.java │ │ │ │ ├── FileCacheManagerDefaultDirTest.java │ │ │ │ ├── FileCacheManagerTest.java │ │ │ │ ├── HeaderCustomizerHttpRequestInterceptorTest.java │ │ │ │ ├── HeartbeatIntervalSelectorTest.java │ │ │ │ ├── HeartbeatRegistryTest.java │ │ │ │ ├── HeartbeatThreadTest.java │ │ │ │ ├── HttpUtilLatestIT.java │ │ │ │ ├── HttpUtilTest.java │ │ │ │ ├── HttpUtilWiremockLatestIT.java │ │ │ │ ├── MinicoreTelemetryWiremockIT.java │ │ │ │ ├── OAuthAuthorizationCodeFlowLatestIT.java │ │ │ │ ├── OAuthClientCredentialsFlowLatestIT.java │ │ │ │ ├── OAuthLegacyFlowLatestIT.java │ │ │ │ ├── OAuthTokenCacheLatestIT.java │ │ │ │ ├── OCSPCacheServerTest.java │ │ │ │ ├── ObjectMapperTest.java │ │ │ │ ├── PrivateLinkDetectorTest.java │ │ │ │ ├── ProgrammaticAccessTokenAuthFlowLatestIT.java │ │ │ │ ├── QueryContextCacheTest.java │ │ │ │ ├── QueryContextEntryDTOTest.java │ │ │ │ ├── SFArrowResultSetIT.java │ │ │ │ ├── SFCrlTrustManagerDelegationTest.java │ │ │ │ ├── SFCrlTrustManagerFactoryTest.java │ │ │ │ ├── SFCrlTrustManagerLatestIT.java │ │ │ │ ├── SFExtendedCrlTrustManagerDelegationTest.java │ │ │ │ ├── SFLoginInputTest.java │ │ │ │ ├── SFSSLConnectionSocketFactoryTest.java │ │ │ │ ├── SFSessionPropertyTest.java │ │ │ │ ├── SFStatementTest.java │ │ │ │ ├── SFTrustManagerIT.java │ │ │ │ ├── SFTrustManagerMockitoMockLatestIT.java │ │ │ │ ├── SFTrustManagerOcspCachePoisoningTest.java │ │ │ │ ├── SFTrustManagerProxyWiremockIT.java │ │ │ │ ├── SFTrustManagerTest.java │ │ │ │ ├── SQLInputOutputTest.java │ │ │ │ ├── SecureStorageManagerTest.java │ │ │ │ ├── SessionUtilExternalBrowserTest.java │ │ │ │ ├── SessionUtilKeyPairTest.java │ │ │ │ ├── SessionUtilLatestIT.java │ │ │ │ ├── SessionUtilTest.java │ │ │ │ ├── SessionUtilWiremockIT.java │ │ │ │ ├── SnowflakeMFACacheTest.java │ │ │ │ ├── SpcsTokenReaderTest.java │ │ │ │ ├── SqlInputTimestampUtilTest.java │ │ │ │ ├── StmtUtilTest.java │ │ │ │ ├── URLUtilTest.java │ │ │ │ ├── arrow/ │ │ │ │ │ ├── ArrowResultUtilTest.java │ │ │ │ │ ├── BaseConverterTest.java │ │ │ │ │ ├── BigIntToFixedConverterTest.java │ │ │ │ │ ├── BigIntToTimeConverterTest.java │ │ │ │ │ ├── BigIntToTimestampLTZConverterTest.java │ │ │ │ │ ├── BigIntToTimestampNTZConverterTest.java │ │ │ │ │ ├── BitToBooleanConverterTest.java │ │ │ │ │ ├── DateConverterTest.java │ │ │ │ │ ├── DoubleToRealConverterTest.java │ │ │ │ │ ├── IntToFixedConverterTest.java │ │ │ │ │ ├── IntToTimeConverterTest.java │ │ │ │ │ ├── SmallIntToFixedConverterTest.java │ │ │ │ │ ├── ThreeFieldStructToTimestampTZConverterTest.java │ │ │ │ │ ├── TinyIntToFixedConverterTest.java │ │ │ │ │ ├── TwoFieldStructToTimestampLTZConverterTest.java │ │ │ │ │ ├── TwoFieldStructToTimestampNTZConverterTest.java │ │ │ │ │ ├── TwoFieldStructToTimestampTZConverterTest.java │ │ │ │ │ ├── VarBinaryToBinaryConverterTest.java │ │ │ │ │ └── VarCharConverterTest.java │ │ │ │ ├── auth/ │ │ │ │ │ ├── oauth/ │ │ │ │ │ │ ├── AuthorizationCodeRedirectRequestHandlerTest.java │ │ │ │ │ │ ├── OAuthAccessTokenProviderFactoryTest.java │ │ │ │ │ │ ├── OAuthUtilTest.java │ │ │ │ │ │ ├── RandomStateProviderTest.java │ │ │ │ │ │ └── TokenResponseDTOTest.java │ │ │ │ │ └── wif/ │ │ │ │ │ ├── AwsIdentityAttestationCreatorTest.java │ │ │ │ │ ├── AzureIdentityAttestationCreatorLatestIT.java │ │ │ │ │ ├── GcpIdentityAttestationCreatorLatestIT.java │ │ │ │ │ ├── OidcIdentityAttestationCreatorTest.java │ │ │ │ │ ├── PlatformDetectionUtilTest.java │ │ │ │ │ └── WorkloadIdentityAttestationProviderTest.java │ │ │ │ ├── bind/ │ │ │ │ │ └── BindExceptionTest.java │ │ │ │ ├── crl/ │ │ │ │ │ ├── CRLCacheManagerLatestIT.java │ │ │ │ │ ├── CRLCacheManagerTest.java │ │ │ │ │ ├── CRLFileCacheTest.java │ │ │ │ │ ├── CRLInMemoryCacheTest.java │ │ │ │ │ ├── CRLValidationUtilsTest.java │ │ │ │ │ ├── CRLValidatorTest.java │ │ │ │ │ ├── CRLValidatorWiremockIT.java │ │ │ │ │ ├── CertificateGeneratorUtil.java │ │ │ │ │ └── VerifiedCertPathBuilderTest.java │ │ │ │ ├── json/ │ │ │ │ │ ├── BooleanConverterTest.java │ │ │ │ │ ├── BytesConverterTest.java │ │ │ │ │ ├── DateTimeConverterTest.java │ │ │ │ │ ├── NumberConverterTest.java │ │ │ │ │ └── StringConverterTest.java │ │ │ │ └── minicore/ │ │ │ │ ├── MinicoreLoaderTest.java │ │ │ │ ├── MinicorePlatformTest.java │ │ │ │ ├── MinicoreTelemetryTest.java │ │ │ │ ├── MinicoreTest.java │ │ │ │ └── MinicoreTestLatestIT.java │ │ │ ├── driver/ │ │ │ │ └── DriverInitializerTest.java │ │ │ ├── jdbc/ │ │ │ │ ├── ArrowResultChunkTest.java │ │ │ │ ├── AuthenticatedProxyLatestIT.java │ │ │ │ ├── AutoConnectionConfigurationLatestIT.java │ │ │ │ ├── BaseJDBCTest.java │ │ │ │ ├── BaseJDBCWithSharedConnectionIT.java │ │ │ │ ├── BaseWiremockTest.java │ │ │ │ ├── BatchExecutionIT.java │ │ │ │ ├── BatchExecutionLatestIT.java │ │ │ │ ├── BindUploaderIT.java │ │ │ │ ├── BindUploaderLatestIT.java │ │ │ │ ├── BindingAndInsertingStructuredTypesLatestIT.java │ │ │ │ ├── BindingDataIT.java │ │ │ │ ├── BindingDataLatestIT.java │ │ │ │ ├── CallableStatementIT.java │ │ │ │ ├── CallableStatementITBase.java │ │ │ │ ├── CallableStatementLatestIT.java │ │ │ │ ├── ChunkDownloaderS3RetryUrlLatestIT.java │ │ │ │ ├── ClientMemoryLimitParallelIT.java │ │ │ │ ├── CompressedStreamFactoryTest.java │ │ │ │ ├── ConnectStringParseTest.java │ │ │ │ ├── ConnectionAlreadyClosedIT.java │ │ │ │ ├── ConnectionFeatureNotSupportedIT.java │ │ │ │ ├── ConnectionIT.java │ │ │ │ ├── ConnectionLatestIT.java │ │ │ │ ├── ConnectionManual.java │ │ │ │ ├── ConnectionPoolingIT.java │ │ │ │ ├── ConnectionWithDisableOCSPModeLatestIT.java │ │ │ │ ├── ConnectionWithOCSPModeIT.java │ │ │ │ ├── CustomProxyLatestIT.java │ │ │ │ ├── DatabaseMetaDataIT.java │ │ │ │ ├── DatabaseMetaDataInternalIT.java │ │ │ │ ├── DatabaseMetaDataInternalLatestIT.java │ │ │ │ ├── DatabaseMetaDataLatestIT.java │ │ │ │ ├── DatabaseMetaDataResultSetLatestIT.java │ │ │ │ ├── DatabaseMetaDataResultsetIT.java │ │ │ │ ├── DecfloatTypeLatestIT.java │ │ │ │ ├── DefaultSFConnectionHandlerTest.java │ │ │ │ ├── DellBoomiCloudIT.java │ │ │ │ ├── FileConnectionConfigurationLatestIT.java │ │ │ │ ├── FileUploaderExpandFileNamesTest.java │ │ │ │ ├── FileUploaderLatestIT.java │ │ │ │ ├── FileUploaderMimeTypeToCompressionTypeTest.java │ │ │ │ ├── FileUploaderPrep.java │ │ │ │ ├── FileUploaderSessionlessTest.java │ │ │ │ ├── GCPLargeResult.java │ │ │ │ ├── GitRepositoryDownloadLatestIT.java │ │ │ │ ├── HeartbeatAsyncLatestIT.java │ │ │ │ ├── HeartbeatIT.java │ │ │ │ ├── HeartbeatMultiSessionIT.java │ │ │ │ ├── IntervalDayTimeTypeLatestIT.java │ │ │ │ ├── IntervalYearMonthTypeLatestIT.java │ │ │ │ ├── LobSizeLatestIT.java │ │ │ │ ├── MaxLobSizeLatestIT.java │ │ │ │ ├── MockConnectionTest.java │ │ │ │ ├── MultiStatementArrowIT.java │ │ │ │ ├── MultiStatementIT.java │ │ │ │ ├── MultiStatementLatestIT.java │ │ │ │ ├── OpenGroupCLIFuncIT.java │ │ │ │ ├── OpenGroupCLIFuncLatestIT.java │ │ │ │ ├── PreparedMultiStmtIT.java │ │ │ │ ├── PreparedStatement0IT.java │ │ │ │ ├── PreparedStatement1IT.java │ │ │ │ ├── PreparedStatement1LatestIT.java │ │ │ │ ├── PreparedStatement2IT.java │ │ │ │ ├── PreparedStatement2LatestIT.java │ │ │ │ ├── PreparedStatementFeatureNotSupportedIT.java │ │ │ │ ├── PreparedStatementLargeUpdateLatestIT.java │ │ │ │ ├── PrivateKeyAuthenticationExceptionHandlingLatestIT.java │ │ │ │ ├── ProxyLatestIT.java │ │ │ │ ├── PutFileWithSpaceIncludedIT.java │ │ │ │ ├── PutUnescapeBackslashIT.java │ │ │ │ ├── QueryContextWiremockIT.java │ │ │ │ ├── RestRequestTest.java │ │ │ │ ├── RestRequestTestRetriesWiremockIT.java │ │ │ │ ├── RestRequestWiremockLatestIT.java │ │ │ │ ├── ResultJsonParserV2Test.java │ │ │ │ ├── ResultSet0IT.java │ │ │ │ ├── ResultSetAlreadyClosedIT.java │ │ │ │ ├── ResultSetArrowForce0MultiTimeZone.java │ │ │ │ ├── ResultSetArrowForceLTZMultiTimeZoneIT.java │ │ │ │ ├── ResultSetArrowForceTZMultiTimeZoneIT.java │ │ │ │ ├── ResultSetAsyncIT.java │ │ │ │ ├── ResultSetAsyncLatestIT.java │ │ │ │ ├── ResultSetFeatureNotSupportedIT.java │ │ │ │ ├── ResultSetFormatType.java │ │ │ │ ├── ResultSetIT.java │ │ │ │ ├── ResultSetJsonVsArrowIT.java │ │ │ │ ├── ResultSetJsonVsArrowMultiTZIT.java │ │ │ │ ├── ResultSetLatestIT.java │ │ │ │ ├── ResultSetMultiTimeZoneIT.java │ │ │ │ ├── ResultSetMultiTimeZoneLatestIT.java │ │ │ │ ├── ResultSetVectorLatestIT.java │ │ │ │ ├── SSOConnectionTest.java │ │ │ │ ├── ServiceNameTest.java │ │ │ │ ├── SessionContextWiremockLatestIT.java │ │ │ │ ├── SessionVariablesIT.java │ │ │ │ ├── SnowflakeAzureClientHandleExceptionLatestIT.java │ │ │ │ ├── SnowflakeBasicDataSourceTest.java │ │ │ │ ├── SnowflakeChunkDownloaderLatestIT.java │ │ │ │ ├── SnowflakeClobTest.java │ │ │ │ ├── SnowflakeConnectionImplTest.java │ │ │ │ ├── SnowflakeDriverConnectionStressTest.java │ │ │ │ ├── SnowflakeDriverIT.java │ │ │ │ ├── SnowflakeDriverLatestIT.java │ │ │ │ ├── SnowflakeDriverTest.java │ │ │ │ ├── SnowflakeFileTransferAgentExtractSafeDestFileNameTest.java │ │ │ │ ├── SnowflakeFileTransferConfigTest.java │ │ │ │ ├── SnowflakeGcsClientHandleExceptionLatestIT.java │ │ │ │ ├── SnowflakeResultSetSerializableIT.java │ │ │ │ ├── SnowflakeS3ClientHandleExceptionLatestIT.java │ │ │ │ ├── SnowflakeSerializableTest.java │ │ │ │ ├── SnowflakeTimestampWithTimezoneTest.java │ │ │ │ ├── SnowflakeTypeTest.java │ │ │ │ ├── SnowflakeUtilTest.java │ │ │ │ ├── StatementAlreadyClosedIT.java │ │ │ │ ├── StatementArrowIT.java │ │ │ │ ├── StatementFeatureNotSupportedIT.java │ │ │ │ ├── StatementIT.java │ │ │ │ ├── StatementLargeUpdateIT.java │ │ │ │ ├── StatementLatestIT.java │ │ │ │ ├── StatementNoOpLatestIT.java │ │ │ │ ├── StreamIT.java │ │ │ │ ├── StreamLatestIT.java │ │ │ │ ├── cloud/ │ │ │ │ │ └── storage/ │ │ │ │ │ ├── AwsSdkGCPSignerTest.java │ │ │ │ │ ├── CloudStorageClientLatestIT.java │ │ │ │ │ ├── CloudStorageProxyFactoryTest.java │ │ │ │ │ ├── EncryptionProviderTest.java │ │ │ │ │ ├── GcmEncryptionProviderTest.java │ │ │ │ │ ├── SnowflakeAzureClientLatestIT.java │ │ │ │ │ ├── SnowflakeAzureClientTest.java │ │ │ │ │ ├── SnowflakeS3ClientLatestIT.java │ │ │ │ │ ├── SnowflakeS3ClientTest.java │ │ │ │ │ ├── SnowflakeStorageClientTest.java │ │ │ │ │ ├── StageInfoGcsCustomEndpointTest.java │ │ │ │ │ └── StorageProviderExceptionTest.java │ │ │ │ ├── diagnostic/ │ │ │ │ │ ├── DiagnosticContextLatestIT.java │ │ │ │ │ └── SnowflakeEndpointTest.java │ │ │ │ ├── structuredtypes/ │ │ │ │ │ ├── ResultSetStructuredTypesLatestIT.java │ │ │ │ │ ├── StructuredTypesArrowJsonCompatibilityLatestIT.java │ │ │ │ │ ├── StructuredTypesGetStringBaseIT.java │ │ │ │ │ └── sqldata/ │ │ │ │ │ ├── AllTypesClass.java │ │ │ │ │ ├── NestedStructSqlData.java │ │ │ │ │ ├── NullableFieldsSqlData.java │ │ │ │ │ ├── SimpleClass.java │ │ │ │ │ └── StringClass.java │ │ │ │ ├── telemetry/ │ │ │ │ │ ├── CSVMetricsExporterTest.java │ │ │ │ │ ├── ExecTimeTelemetryDataTest.java │ │ │ │ │ ├── InternalApiMarkerUsageArchTest.java │ │ │ │ │ ├── InternalApiTelemetryTrackerTest.java │ │ │ │ │ ├── NoOpTelemetryClientTest.java │ │ │ │ │ ├── PreSessionTelemetryClientTest.java │ │ │ │ │ ├── TelemetryIT.java │ │ │ │ │ ├── TelemetryTest.java │ │ │ │ │ └── TelemetryUtilTest.java │ │ │ │ └── telemetryOOB/ │ │ │ │ ├── TelemetryServiceIT.java │ │ │ │ ├── TelemetryServiceTest.java │ │ │ │ └── TelemetryThreadPoolTest.java │ │ │ ├── loader/ │ │ │ │ ├── FlatfileReadMultithreadIT.java │ │ │ │ ├── LoaderBase.java │ │ │ │ ├── LoaderIT.java │ │ │ │ ├── LoaderLatestIT.java │ │ │ │ ├── LoaderMultipleBatchIT.java │ │ │ │ ├── LoaderTimestampIT.java │ │ │ │ ├── OnErrorTest.java │ │ │ │ └── TestDataConfigBuilder.java │ │ │ ├── log/ │ │ │ │ ├── AbstractLoggerIT.java │ │ │ │ ├── CommonsLoggingWrapperModeTest.java │ │ │ │ ├── CommonsLoggingWrapperTest.java │ │ │ │ ├── JDK14JCLWrapperLatestIT.java │ │ │ │ ├── JDK14LoggerConsoleHandlerOverrideLatestIT.java │ │ │ │ ├── JDK14LoggerLatestIT.java │ │ │ │ ├── JDK14LoggerTest.java │ │ │ │ ├── JDK14LoggerWithClientLatestIT.java │ │ │ │ ├── MaskedExceptionLoggerIntegrationTest.java │ │ │ │ ├── SFFormatterTest.java │ │ │ │ ├── SFLogLevelTest.java │ │ │ │ ├── SFLoggerFactoryTest.java │ │ │ │ ├── SFToJavaLogMapperTest.java │ │ │ │ ├── SLF4JJJCLWrapperLatestIT.java │ │ │ │ └── SLF4JLoggerLatestIT.java │ │ │ └── util/ │ │ │ ├── DecorrelatedJitterBackoffTest.java │ │ │ ├── LibcDetailsTest.java │ │ │ ├── MaskedExceptionTest.java │ │ │ ├── OsReleaseDetailsTest.java │ │ │ ├── PlatformDetectorLatestIT.java │ │ │ ├── SFPairTest.java │ │ │ ├── SecretDetectorTest.java │ │ │ └── StopwatchTest.java │ │ ├── providers/ │ │ │ ├── BooleanProvider.java │ │ │ ├── ProvidersUtil.java │ │ │ ├── ResultFormatProvider.java │ │ │ ├── ScaleProvider.java │ │ │ ├── SimpleResultFormatProvider.java │ │ │ ├── SnowflakeArgumentsProvider.java │ │ │ └── TimezoneProvider.java │ │ ├── suites/ │ │ │ ├── ArrowTestSuite.java │ │ │ ├── AuthenticationTestSuite.java │ │ │ ├── BaseTestSuite.java │ │ │ ├── ConnectionOldDriverTestSuite.java │ │ │ ├── ConnectionTestSuite.java │ │ │ ├── CoreOldDriverTestSuite.java │ │ │ ├── CoreTestSuite.java │ │ │ ├── DatabaseMetaDataTestSuite.java │ │ │ ├── DiagnosticOldDriverTestSuite.java │ │ │ ├── DiagnosticTestSuite.java │ │ │ ├── LoaderOldDriverTestSuite.java │ │ │ ├── LoaderTestSuite.java │ │ │ ├── OldDriverTestSuite.java │ │ │ ├── OnlyTestingTestSuite.java │ │ │ ├── OthersOldDriverTestSuite.java │ │ │ ├── OthersTestSuite.java │ │ │ ├── ResultSetOldDriverTestSuite.java │ │ │ ├── ResultSetTestSuite.java │ │ │ ├── StatementOldDriverTestSuite.java │ │ │ ├── StatementTestSuite.java │ │ │ ├── UnitOldDriverTestSuite.java │ │ │ ├── UnitTestSuite.java │ │ │ └── WIFTestSuite.java │ │ └── wif/ │ │ └── WIFLatestIT.java │ └── resources/ │ ├── .gitignore │ ├── FileUploaderPrep/ │ │ ├── exampleAzure.json │ │ ├── exampleGCS.json │ │ ├── exampleGCSWithEndpoint.json │ │ ├── exampleGCSWithUseRegionalUrl.json │ │ ├── exampleS3.json │ │ └── exampleS3WithStageEndpoint.json │ ├── allowlist.json │ ├── encrypted_rsa_key.p8 │ ├── encrypted_rsa_key.pub │ ├── invalid_private_key.pem │ ├── logback-test.xml │ ├── logging.properties │ ├── net/ │ │ └── snowflake/ │ │ └── client/ │ │ ├── .gitignore │ │ ├── internal/ │ │ │ └── jdbc/ │ │ │ ├── boomi.policy │ │ │ └── test_copy.csv │ │ ├── jenkins_it_logging.properties │ │ └── travis_it_logging.properties │ ├── orders_100.csv │ ├── orders_101.csv │ ├── os-release-test │ ├── revoked_certs.pem │ ├── rsa_key.p8 │ ├── rsa_key.pem │ ├── rsa_key.pub │ ├── ssl-tests/ │ │ ├── certs/ │ │ │ ├── amz_root_ca1_chain.crt │ │ │ ├── amz_root_ca1_chain.csr │ │ │ ├── amz_root_ca1_chain.srl │ │ │ ├── amz_root_ca1_common.key │ │ │ ├── amz_root_ca1_trust_store.crt │ │ │ ├── amz_root_ca1_trust_store.csr │ │ │ ├── amz_rsa_m02_intermediate.crt │ │ │ ├── amz_rsa_m02_intermediate.csr │ │ │ ├── amz_rsa_m02_intermediate.key │ │ │ ├── amz_rsa_m02_intermediate.srl │ │ │ ├── client_keystore.p12 │ │ │ ├── leaf.crt │ │ │ ├── leaf.csr │ │ │ ├── leaf.key │ │ │ ├── st_class2_root.crt │ │ │ ├── st_class2_root.csr │ │ │ ├── st_class2_root.key │ │ │ ├── st_class2_root.srl │ │ │ ├── st_g2_root.crt │ │ │ ├── st_g2_root.csr │ │ │ ├── st_g2_root.key │ │ │ ├── st_g2_root.srl │ │ │ └── truststore.jks │ │ └── generate_certs.sh │ ├── test_encryption_256.csv │ ├── test_file │ ├── test_file.csv │ └── wiremock/ │ ├── ca-cert.jks │ └── mappings/ │ ├── connection/ │ │ └── session_context_switches.json │ ├── minicore/ │ │ └── minicore_telemetry.json │ ├── oauth/ │ │ ├── authorization_code/ │ │ │ ├── browser_timeout_authorization_error.json │ │ │ ├── dpop_nonce_error_flow.json │ │ │ ├── external_idp_custom_urls.json │ │ │ ├── invalid_scope_error.json │ │ │ ├── invalid_state_error.json │ │ │ ├── successful_dpop_flow.json │ │ │ ├── successful_flow.json │ │ │ ├── successful_flow_with_single_use_refresh_tokens.json │ │ │ └── token_request_error.json │ │ ├── client_credentials/ │ │ │ ├── dpop_nonce_error_flow.json │ │ │ ├── successful_dpop_flow.json │ │ │ ├── successful_flow.json │ │ │ └── token_request_error.json │ │ ├── legacy_oauth/ │ │ │ └── token_expired.json │ │ └── token_caching/ │ │ ├── caching_refreshed_access_token_and_new_refresh_token.json │ │ ├── caching_tokens_after_connecting.json │ │ ├── caching_tokens_and_dpop_key_after_connecting.json │ │ ├── not_caching_after_client_credentials_flow.json │ │ ├── refreshing_expired_access_token.json │ │ ├── refreshing_expired_access_token_dpop.json │ │ ├── refreshing_expired_access_token_dpop_nonce_error.json │ │ ├── restarting_full_flow_on_expiration_and_no_refresh_token.json │ │ ├── restarting_full_flow_on_refresh_token_error.json │ │ └── reusing_cached_access_token_to_authenticate.json │ ├── pat/ │ │ ├── invalid_pat_token.json │ │ └── successful_flow.json │ ├── platform-detection/ │ │ ├── aws_ec2_instance_successful.json │ │ ├── aws_ec2_token_successful.json │ │ ├── azure_managed_identity_successful.json │ │ ├── azure_metadata_unavailable.json │ │ ├── azure_vm_successful.json │ │ ├── ec2_successful_imdsv1.json │ │ ├── ec2_successful_imdsv2.json │ │ ├── gcp_identity_successful.json │ │ ├── gcp_metadata_unavailable.json │ │ ├── gcp_vm_slow_response.json │ │ ├── gcp_vm_successful.json │ │ ├── slow_response_timeout.json │ │ └── timeout_scenarios.json │ ├── querycontext/ │ │ └── qcc-merge-on-failed-query.json │ ├── restrequest/ │ │ ├── certificate_revoked.json │ │ ├── correct_response.json │ │ ├── http_307_retry.json │ │ ├── http_308_retry.json │ │ ├── response503.json │ │ ├── six_malformed_and_correct.json │ │ ├── sticky_header_from_first_query.json │ │ ├── sticky_header_from_login.json │ │ └── sticky_header_updated.json │ ├── session/ │ │ ├── session-util-wiremock-it-always-429-in-federated-step-3.json │ │ ├── session-util-wiremock-it-always-429-in-federated-step-4.json │ │ ├── session-util-wiremock-it-libc-details-in-login-request.json │ │ ├── session-util-wiremock-it-multiple-429-from-okta-in-login-request-to-sf.json │ │ ├── session-util-wiremock-it-multiple-429-in-federated-step-3.json │ │ ├── session-util-wiremock-it-multiple-429-in-federated-step-4.json │ │ ├── session-util-wiremock-it-os-details-in-login-request.json │ │ ├── session-util-wiremock-it-spcs-token-in-login-request.json │ │ └── session-util-wiremock-it-unsupported-mfa-in-federated-step-3.json │ └── wif/ │ ├── azure/ │ │ ├── http_error.json │ │ ├── invalid_issuer_flow.json │ │ ├── missing_issuer_claim.json │ │ ├── missing_sub_claim.json │ │ ├── non_json_response.json │ │ ├── successful_flow_azure_functions.json │ │ ├── successful_flow_azure_functions_custom_entra_resource.json │ │ ├── successful_flow_azure_functions_no_client_id.json │ │ ├── successful_flow_azure_functions_v2_issuer.json │ │ ├── successful_flow_basic.json │ │ ├── successful_flow_v2_issuer.json │ │ └── unparsable_token.json │ └── gcp/ │ ├── http_error.json │ ├── invalid_issuer_claim.json │ ├── missing_issuer_claim.json │ ├── missing_sub_claim.json │ ├── successful_flow.json │ ├── successful_impersonation_flow.json │ └── unparsable_token.json └── thin_public_pom.xml ================================================ FILE CONTENTS ================================================ ================================================ FILE: .cursor/skills/graphite-pr-workflow/SKILL.md ================================================ --- name: graphite-pr-workflow description: Create branches, commits, and pull requests using Graphite CLI (gt) for the snowflake-jdbc repository. Use when the user asks to create a PR, submit a PR, commit changes, or push a branch. Covers Graphite commands, SNOW-ticket commit message conventions, PR description templates, and snowflake-jdbc-specific pre-commit formatting/checkstyle requirements. --- # Graphite PR Workflow (snowflake-jdbc) ## CRITICAL: Do Not Commit or Submit Without Explicit Instruction **NEVER run `gt commit create`, `git commit`, `gt submit`, or open a PR unless the user has explicitly asked you to.** Completing a feature is NOT permission to commit. Passing tests is NOT permission to commit. Pre-commit checks succeeding is NOT permission to commit. When you have finished implementing a feature or change, you MAY ask the user whether they want you to proceed with creating a commit and PR. Phrase it as a question and wait for explicit confirmation before doing anything that mutates git history or the remote. Examples of acceptable prompts after finishing work: - "The change is implemented and tests pass. Would you like me to format, run checkstyle, and create a commit + PR?" - "Should I proceed with `gt commit create` and `gt submit`?" Do NOT interpret ambiguous responses ("looks good", "great", "thanks") as approval to commit. Wait for an explicit "yes, commit" / "create the PR" / "go ahead and submit". ## Required Pre-Commit Checks (snowflake-jdbc) Before running ANY commit command (`gt commit create`, `git commit`, `gt commit amend`), you MUST run the following two Maven commands in order from the repo root and confirm they succeed: 1. **Format the code** with the Spotify fmt plugin: ```bash mvn com.spotify.fmt:fmt-maven-plugin:format ``` 2. **Validate checkstyle** passes cleanly: ```bash mvn clean validate --batch-mode --show-version -P check-style ``` If either command fails, fix the reported issues (re-running the formatter for any new files, or addressing checkstyle violations manually) and re-run both commands until both succeed. Only then may you proceed with committing — and only if the user has explicitly asked you to commit (see section above). ## Commit Message Convention Prefix with Jira ticket: `SNOW-{ticket}: {description}` ``` SNOW-3254915: Add QueryStatus enum and is_still_running/is_an_error static methods ``` Use `NO-SNOW:` for changes without a ticket. ## Creating a PR (only after explicit user approval) ```bash # 1. Create a tracked branch gt branch create {user}/{branch-name} # 2. Format and validate (REQUIRED before staging/committing — see Pre-Commit Checks above) mvn com.spotify.fmt:fmt-maven-plugin:format mvn clean validate --batch-mode --show-version -P check-style # 3. Stage files git add # 4. Commit (runs pre-commit hooks) gt commit create -m "SNOW-{ticket}: {description}" # 5. Push and create PR (draft mode in non-interactive) gt submit --no-edit ``` Then update title/description via `gh pr edit` or the Graphite web UI. ## PR Description Template ```markdown ## Summary - Bullet points describing what changed and why ## Context Brief explanation of how this fits into the larger effort (e.g., "first of two PRs for...") ## Test plan Explain how the change was tested: what commands were run, what was verified, and the results. Be specific to this PR. ``` ## Key Commands | Command | Purpose | |---------|---------| | `mvn com.spotify.fmt:fmt-maven-plugin:format` | Auto-format Java sources (REQUIRED before commit) | | `mvn clean validate --batch-mode --show-version -P check-style` | Run checkstyle validation (REQUIRED before commit) | | `gt branch create {user}/{branch-name}` | Create branch tracked by Graphite | | `gt commit create -m "..."` | Commit with message | | `gt commit amend` | Amend current commit | | `gt submit --no-edit` | Push and create/update PR | | `gt submit --stack` | Submit entire stack of PRs | | `gt restack` | Rebase stack after upstream changes | ## Before Creating a PR: Required Checks Before proceeding with branch creation or committing, you MUST: 1. **Confirm the user has explicitly requested a commit/PR.** See the top of this file. Do not commit proactively. 2. **Run the formatter and checkstyle commands** described in the Required Pre-Commit Checks section above and confirm both pass. 3. **Ask for SNOW ticket number** if the user hasn't provided one. Use AskQuestion: - Prompt: "What is the SNOW Jira ticket number for this change?" - Options: a text-free response, or "NO-SNOW (no ticket)" Do NOT proceed with a commit until the ticket number is confirmed. 4. **Detect stacked PR situation.** Run `git log --oneline main..HEAD` or check `git branch --show-current` and its parent. If the current branch is NOT based directly off `main` (i.e., there are intermediate branches), ask: - "This branch is based off `{parent_branch}`, not `main`. Should this be a stacked PR?" - If yes: use `gt submit --stack` to submit the full stack - If no: use `gt submit --no-edit` for just this branch ## Notes - `gt submit --no-edit` creates PRs in **draft mode** when non-interactive - Pre-commit hooks run automatically on `gt commit create`, but they are NOT a substitute for running the formatter + checkstyle commands manually beforehand ================================================ FILE: .github/CODEOWNERS ================================================ * @snowflakedb/Client /src/main/java/net/snowflake/client/core/crl/** @snowflakedb/pki-oversight @snowflakedb/Client /src/main/java/net/snowflake/client/core/*TrustManager* @snowflakedb/pki-oversight @snowflakedb/Client ================================================ FILE: .github/ISSUE_TEMPLATE/BUG_REPORT.md ================================================ --- name: Bug Report 🐞 about: Something isn't working as expected? Here is the right place to report. labels: bug --- :exclamation: If you need **urgent assistance** then [file a case with Snowflake Support](https://community.snowflake.com/s/article/How-To-Submit-a-Support-Case-in-Snowflake-Lodge). Otherwise continue here. Please answer these questions before submitting your issue. In order to accurately debug the issue this information is required. Thanks! 1. What version of JDBC driver are you using? 2. What operating system and processor architecture are you using? 3. What version of Java are you using? 4. What did you do? If possible, provide a recipe for reproducing the error. A complete runnable program is good. 5. What did you expect to see? What should have happened and what happened instead? 6. Can you set logging to DEBUG and collect the logs? https://community.snowflake.com/s/article/How-to-generate-log-file-on-Snowflake-connectors Before sharing any information, please be sure to review the log and remove any sensitive information. ================================================ FILE: .github/ISSUE_TEMPLATE/FEATURE_REQUEST.md ================================================ --- name: Feature Request 💡 about: Suggest a new idea for the project. labels: feature --- ## What is the current behavior? ## What is the desired behavior? ## How would this improve `snowflake-jdbc`? ## References, Other Background ================================================ FILE: .github/pull_request_template.md ================================================ # Overview SNOW-XXXXX ## Pre-review self checklist - [ ] PR branch is updated with all the changes from `master` branch - [ ] The code is correctly formatted (run `mvn -P check-style validate`) - [ ] New public API is not unnecessary exposed (run `mvn verify` and inspect `target/japicmp/japicmp.html`) - [ ] The pull request name is prefixed with `SNOW-XXXX: ` - [ ] Code is in compliance with internal logging requirements ## External contributors - please answer these questions before submitting a pull request. Thanks! 1. What GitHub issue is this PR addressing? Make sure that there is an accompanying issue to your PR. Issue: #NNNN 2. Fill out the following pre-review checklist: - [ ] I am adding a new automated test(s) to verify correctness of my new code - [ ] I am adding new logging messages - [ ] I am modifying authorization mechanisms - [ ] I am adding new credentials - [ ] I am modifying OCSP code - [ ] I am adding a new dependency or upgrading an existing one - [ ] I am adding new public/protected component not marked with `@SnowflakeJdbcInternalApi` (note that public/protected methods/fields in classes marked with this annotation are already internal) 3. Please describe how your code solves the related issue. Please write a short description of how your code change solves the related issue. ================================================ FILE: .github/repo_meta.yaml ================================================ # point_of_contact: the owner of this repository, can be a GitHub user or GitHub team point_of_contact: @snowflakedb/client # production: whether this repository meets the criteria for being "production", see https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/2239988967/Production+Repository+Criteria for criteria production: true # distributed: whether any source code in this repository is distributed directly to customers (e.g. driver and frontend software) distributed: true # modified: whether any open source dependencies in this repository have been modified modified: true # release_branches: list of release branch patterns, exact matches or regex is acceptable release_branches: - master - release.* # code_owners_file_present: whether there is a CODEOWNERS file in this repository code_owners_file_present: true # jira_project_issue_type: the jira issuetype used to raise issues related to this repository in the SNOW Jira project jira_project_issue_type: Bug # jira_area: the jira area that raised issues should use jira_area: Developer Platform ================================================ FILE: .github/workflows/build-test.yml ================================================ name: Build and Test on: push: branches: - master tags: - v* pull_request: branches: - master - SNOW-** - NO-SNOW-** types: [opened, synchronize, reopened, labeled, unlabeled] workflow_dispatch: inputs: logLevel: default: warning description: "Log level" required: true tags: description: "Test scenario tags" permissions: contents: read concurrency: # older builds for the same pull request numer or branch should be cancelled cancel-in-progress: true group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} jobs: check-no-raw-system-calls: name: Check No Raw System.getProperty/getenv/setProperty runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - name: Check for raw System.getProperty/getenv/setProperty calls shell: bash run: ./ci/scripts/check_no_raw_system_calls.sh build: name: Build runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - name: Build shell: bash env: WHITESOURCE_API_KEY: ${{ secrets.WHITESOURCE_API_KEY }} run: ./ci/build.sh unit-test-linux: name: Unit Tests Linux java 8 runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - uses: actions/setup-java@v5 with: java-version: '8' distribution: 'temurin' cache: maven - name: Unit Tests shell: bash env: JAVA_TOOL_OPTIONS: "-Xms1g -Xmx2g" run: ./mvnw -B -DjenkinsIT -Dskip.unitTests=false -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test --batch-mode --show-version unit-test-windows: name: Unit Tests Windows java 8 runs-on: windows-latest steps: - uses: actions/checkout@v5 - uses: actions/setup-java@v5 with: java-version: '8' distribution: 'temurin' cache: maven - name: Unit Tests shell: cmd env: JAVA_TOOL_OPTIONS: "-Xms1g -Xmx2g" run: .\mvnw.cmd -B -DjenkinsIT -Dskip.unitTests=false -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test --batch-mode --show-version unit-test-mac: name: Unit Tests Mac java 8 runs-on: macos-latest steps: - uses: actions/checkout@v5 - uses: actions/setup-java@v5 with: java-version: '8' distribution: 'zulu' cache: maven - name: Unit Tests shell: bash env: JAVA_TOOL_OPTIONS: "-Xms1g -Xmx2g" run: ./mvnw -B -DjenkinsIT -Dskip.unitTests=false -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn test --batch-mode --show-version test-windows: needs: [build, unit-test-linux, unit-test-windows, unit-test-mac] name: ${{ matrix.runConfig.cloud }} Windows java ${{ matrix.runConfig.javaVersion }} JDBC${{ matrix.additionalMavenProfile }} ${{ matrix.category.name }} runs-on: windows-latest strategy: fail-fast: false matrix: runConfig: >- ${{ fromJSON( ( github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'FULL-TEST-MATRIX') && '[ {"cloud":"AWS","javaVersion":"8"}, {"cloud":"GCP","javaVersion":"17"}, {"cloud":"AZURE","javaVersion":"21"} ]' ) || '[ {"cloud":"AWS","javaVersion":"8"}, {"cloud":"GCP","javaVersion":"11"}, {"cloud":"AZURE","javaVersion":"17"}, {"cloud":"AWS","javaVersion":"21"} ]' ) }} category: [{suites: 'ResultSetTestSuite', name: 'TestCategoryResultSet'}, {suites: 'StatementTestSuite,LoaderTestSuite', name: 'TestCategoryStatement,TestCategoryLoader'}, {suites: 'OthersTestSuite', name: 'TestCategoryOthers'}, {suites: 'DatabaseMetaDataTestSuite', name: 'TestCategoryDatabaseMetaData'}, {suites: 'ArrowTestSuite,ConnectionTestSuite,CoreTestSuite,DiagnosticTestSuite', name: 'TestCategoryArrow,TestCategoryConnection,TestCategoryCore,TestCategoryDiagnostic'}, {suites: 'FipsTestSuite', name: "TestCategoryFips"}] additionalMavenProfile: [''] steps: - uses: actions/checkout@v5 - uses: actions/setup-java@v5 with: java-version: ${{ matrix.runConfig.javaVersion }} distribution: 'temurin' cache: maven - uses: actions/setup-python@v5 with: python-version: '3.12' architecture: 'x64' - name: Tests shell: cmd env: PARAMETERS_SECRET: ${{ secrets.PARAMETERS_SECRET }} JDBC_PRIVATE_KEY_SECRET: ${{ secrets.JDBC_PRIVATE_KEY_SECRET }} CLOUD_PROVIDER: ${{ matrix.runConfig.cloud }} JDBC_TEST_SUITES: ${{ matrix.category.suites }} ADDITIONAL_MAVEN_PROFILE: ${{ matrix.additionalMavenProfile }} JAVA_TOOL_OPTIONS: "-Xms1g -Xmx4g" run: ci\\test_windows.bat test-mac: needs: [build, unit-test-linux, unit-test-windows, unit-test-mac] name: ${{ matrix.runConfig.cloud }} Mac java ${{ matrix.runConfig.javaVersion }} JDBC${{ matrix.additionalMavenProfile }} ${{ matrix.category.name }} runs-on: macos-latest strategy: fail-fast: false matrix: runConfig: >- ${{ fromJSON( ( github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'FULL-TEST-MATRIX') && '[ {"cloud":"AWS","javaVersion":"8"}, {"cloud":"GCP","javaVersion":"17"}, {"cloud":"AZURE","javaVersion":"21"} ]' ) || '[ {"cloud":"AWS","javaVersion":"8"}, {"cloud":"AWS","javaVersion":"21"}, {"cloud":"GCP","javaVersion":"11"}, {"cloud":"AZURE","javaVersion":"17"}, {"cloud":"AWS","javaVersion":"21"} ]' ) }} category: [{suites: 'ResultSetTestSuite', name: 'TestCategoryResultSet'}, {suites: 'StatementTestSuite,LoaderTestSuite', name: 'TestCategoryStatement,TestCategoryLoader'}, {suites: 'OthersTestSuite', name: 'TestCategoryOthers'}, {suites: 'DatabaseMetaDataTestSuite', name: 'TestCategoryDatabaseMetaData'}, {suites: 'ArrowTestSuite,ConnectionTestSuite,CoreTestSuite,DiagnosticTestSuite', name: 'TestCategoryArrow,TestCategoryConnection,TestCategoryCore,TestCategoryDiagnostic'}, {suites: 'FipsTestSuite', name: "TestCategoryFips"}] additionalMavenProfile: [''] steps: - uses: actions/checkout@v5 - uses: actions/setup-java@v5 with: java-version: ${{ matrix.runConfig.javaVersion }} distribution: 'zulu' cache: maven - uses: actions/setup-python@v5 with: python-version: '3.12' - name: Install Homebrew Bash shell: bash run: brew install bash - name: Tests shell: bash env: PARAMETERS_SECRET: ${{ secrets.PARAMETERS_SECRET }} JDBC_PRIVATE_KEY_SECRET: ${{ secrets.JDBC_PRIVATE_KEY_SECRET }} CLOUD_PROVIDER: ${{ matrix.runConfig.cloud }} JDBC_TEST_SUITES: ${{ matrix.category.suites }} ADDITIONAL_MAVEN_PROFILE: ${{ matrix.additionalMavenProfile }} JAVA_TOOL_OPTIONS: "-Xms1g -Xmx4g" # Increase surefire memory because arm64 macOS machines have not enough memory by default run: /opt/homebrew/bin/bash ./ci/test_mac.sh test-rocky: needs: [build, unit-test-linux, unit-test-windows, unit-test-mac] name: ${{ matrix.runConfig.cloud }} Rocky9 java ${{ matrix.runConfig.javaVersion }} JDBC${{ matrix.additionalMavenProfile }} ${{ matrix.category.name }} runs-on: ubuntu-latest strategy: fail-fast: false matrix: runConfig: >- ${{ fromJSON( ( github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'FULL-TEST-MATRIX') && '[ {"cloud":"AWS","javaVersion":"17","image":"jdbc-rockylinux9-openjdk17"} ]' ) || '[ {"cloud":"AWS","javaVersion":"8","image":"jdbc-rockylinux9-openjdk8"}, {"cloud":"GCP","javaVersion":"11","image":"jdbc-rockylinux9-openjdk11"}, {"cloud":"AZURE","javaVersion":"17","image":"jdbc-rockylinux9-openjdk17"}, {"cloud":"AWS","javaVersion":"21","image":"jdbc-rockylinux9-openjdk21"} ]' ) }} category: [{suites: 'ResultSetTestSuite', name: 'TestCategoryResultSet'}, {suites: 'StatementTestSuite,LoaderTestSuite', name: 'TestCategoryStatement,TestCategoryLoader'}, {suites: 'OthersTestSuite', name: 'TestCategoryOthers'}, {suites: 'DatabaseMetaDataTestSuite', name: 'TestCategoryDatabaseMetaData'}, {suites: 'ArrowTestSuite,ConnectionTestSuite,CoreTestSuite,DiagnosticTestSuite', name: 'TestCategoryArrow,TestCategoryConnection,TestCategoryCore,TestCategoryDiagnostic'}, {suites: 'FipsTestSuite', name: "TestCategoryFips"}] additionalMavenProfile: [''] steps: - uses: actions/checkout@v5 - name: Tests shell: bash env: PARAMETERS_SECRET: ${{ secrets.PARAMETERS_SECRET }} JDBC_PRIVATE_KEY_SECRET: ${{ secrets.JDBC_PRIVATE_KEY_SECRET }} CLOUD_PROVIDER: ${{ matrix.runConfig.cloud }} TARGET_DOCKER_TEST_IMAGE: ${{ matrix.runConfig.image }} JDBC_TEST_SUITES: ${{ matrix.category.suites }} ADDITIONAL_MAVEN_PROFILE: ${{ matrix.additionalMavenProfile }} JAVA_TOOL_OPTIONS: "-Xms1g -Xmx4g" run: ./ci/test.sh test-linux: needs: [build, unit-test-linux, unit-test-windows, unit-test-mac] name: ${{ matrix.runConfig.cloud }} Linux java on ${{ matrix.runConfig.image }} JDBC${{ matrix.runConfig.additionalMavenProfile }} ${{ matrix.category.name }} runs-on: ubuntu-latest strategy: fail-fast: false matrix: runConfig: >- ${{ fromJSON( ( github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'FULL-TEST-MATRIX') && '[ {"image":"jdbc-rockylinux8-openjdk8","cloud":"AWS","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk11","cloud":"AWS","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk17","cloud":"AWS","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk21","cloud":"AWS","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk17","cloud":"AZURE","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk17","cloud":"GCP","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk8","cloud":"AWS","additionalMavenProfile":"-Dthin-jar"}, {"image":"jdbc-rockylinux8-openjdk21","cloud":"AWS","additionalMavenProfile":"-Dthin-jar"} ]' ) || '[ {"image":"jdbc-rockylinux8-openjdk8","cloud":"AWS","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk11","cloud":"AWS","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk17","cloud":"AWS","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk21","cloud":"AWS","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk8","cloud":"AZURE","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk11","cloud":"AZURE","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk17","cloud":"AZURE","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk21","cloud":"AZURE","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk8","cloud":"GCP","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk11","cloud":"GCP","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk17","cloud":"GCP","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk21","cloud":"GCP","additionalMavenProfile":""}, {"image":"jdbc-rockylinux8-openjdk8","cloud":"AWS","additionalMavenProfile":"-Dthin-jar"}, {"image":"jdbc-rockylinux8-openjdk21","cloud":"AWS","additionalMavenProfile":"-Dthin-jar"}, {"image":"jdbc-rockylinux8-openjdk17","cloud":"AZURE","additionalMavenProfile":"-Dthin-jar"}, {"image":"jdbc-rockylinux8-openjdk17","cloud":"GCP","additionalMavenProfile":"-Dthin-jar"}, ]' ) }} category: [{suites: 'ResultSetTestSuite', name: 'TestCategoryResultSet'}, {suites: 'StatementTestSuite,LoaderTestSuite', name: 'TestCategoryStatement,TestCategoryLoader'}, {suites: 'OthersTestSuite', name: 'TestCategoryOthers'}, {suites: 'DatabaseMetaDataTestSuite', name: 'TestCategoryDatabaseMetaData'}, {suites: 'ArrowTestSuite,ConnectionTestSuite,CoreTestSuite,DiagnosticTestSuite', name: 'TestCategoryArrow,TestCategoryConnection,TestCategoryCore,TestCategoryDiagnostic'}, {suites: 'FipsTestSuite', name: "TestCategoryFips"}] steps: - uses: actions/checkout@v5 - name: Tests shell: bash env: PARAMETERS_SECRET: ${{ secrets.PARAMETERS_SECRET }} JDBC_PRIVATE_KEY_SECRET: ${{ secrets.JDBC_PRIVATE_KEY_SECRET }} CLOUD_PROVIDER: ${{ matrix.runConfig.cloud }} TARGET_DOCKER_TEST_IMAGE: ${{ matrix.runConfig.image }} JDBC_TEST_SUITES: ${{ matrix.category.suites }} ADDITIONAL_MAVEN_PROFILE: ${{ matrix.runConfig.additionalMavenProfile }} JAVA_TOOL_OPTIONS: "-Xms1g -Xmx4g" run: ./ci/test.sh test-fat-jar-slf4j: name: ${{ matrix.cloud }} Fat Jar SLF4J (${{ matrix.variant }}) Test App runs-on: ubuntu-latest strategy: fail-fast: false matrix: cloud: [ 'AWS', 'GCP', 'AZURE' ] variant: [ 'default', 'fips'] steps: - uses: actions/checkout@v5 - uses: actions/setup-java@v5 with: java-version: '11' distribution: 'temurin' cache: maven - name: Test Fat Jar App (SLF4J) shell: bash env: PARAMETERS_SECRET: ${{ secrets.PARAMETERS_SECRET }} JDBC_PRIVATE_KEY_SECRET: ${{ secrets.JDBC_PRIVATE_KEY_SECRET }} CLOUD_PROVIDER: ${{ matrix.cloud }} run: ./fat-jar-test-app/run.sh ${{ matrix.variant }} slf4j test-fat-jar-jul: name: AWS Fat Jar JUL (${{ matrix.variant }}) Test App runs-on: ubuntu-latest strategy: fail-fast: false matrix: variant: ['default', 'fips'] steps: - uses: actions/checkout@v5 - uses: actions/setup-java@v5 with: java-version: '11' distribution: 'temurin' cache: maven - name: Test Fat Jar App (JUL) shell: bash env: PARAMETERS_SECRET: ${{ secrets.PARAMETERS_SECRET }} JDBC_PRIVATE_KEY_SECRET: ${{ secrets.JDBC_PRIVATE_KEY_SECRET }} CLOUD_PROVIDER: AWS run: ./fat-jar-test-app/run.sh ${{ matrix.variant }} jul ================================================ FILE: .github/workflows/changelog.yml ================================================ name: Changelog Check permissions: contents: read on: pull_request: types: [opened, synchronize, labeled, unlabeled] branches: - master jobs: check_change_log: runs-on: ubuntu-latest if: ${{!contains(github.event.pull_request.labels.*.name, 'NO-CHANGELOG-UPDATES')}} steps: - name: Checkout uses: actions/checkout@v3 with: fetch-depth: 0 - name: Ensure CHANGELOG.md is updated run: git diff --name-only --diff-filter=ACMRT ${{ github.event.pull_request.base.sha }} ${{ github.sha }} | grep -wq "CHANGELOG.md" ================================================ FILE: .github/workflows/check-style.yml ================================================ name: Check Style on: pull_request: branches: - master jobs: check-style: name: Check Style runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Check Style shell: bash run: mvn clean validate --batch-mode --show-version -P check-style ================================================ FILE: .github/workflows/cla_bot.yml ================================================ name: "CLA Assistant" on: issue_comment: types: [created] pull_request_target: types: [opened,closed,synchronize] jobs: CLAAssistant: runs-on: ubuntu-latest permissions: actions: write contents: write pull-requests: write statuses: write steps: - name: "CLA Assistant" if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' uses: contributor-assistant/github-action/@master env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} PERSONAL_ACCESS_TOKEN : ${{ secrets.CLA_BOT_TOKEN }} with: path-to-signatures: 'signatures/version1.json' path-to-document: 'https://github.com/snowflakedb/CLA/blob/main/README.md' branch: 'main' allowlist: 'dependabot[bot],github-actions,Jenkins User,_jenkins,sfc-gh-snyk-sca-sa,snyk-bot' remote-organization-name: 'snowflake-eng' remote-repository-name: 'cla-db' ================================================ FILE: .github/workflows/jira_close.yml ================================================ name: Jira closure on: issues: types: [closed, deleted] jobs: close-issue: runs-on: ubuntu-latest steps: - name: Extract issue from title id: extract env: TITLE: '${{ github.event.issue.title }}' run: | jira=$(echo -n $TITLE | awk '{print $1}' | sed -e 's/://') echo ::set-output name=jira::$jira - name: Close Jira Issue if: startsWith(steps.extract.outputs.jira, 'SNOW-') env: ISSUE_KEY: ${{ steps.extract.outputs.jira }} JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} run: | JIRA_API_URL="${JIRA_BASE_URL}/rest/api/2/issue/${ISSUE_KEY}/transitions" curl -X POST \ --url "$JIRA_API_URL" \ --user "${JIRA_USER_EMAIL}:${JIRA_API_TOKEN}" \ --header "Content-Type: application/json" \ --data "{ \"update\": { \"comment\": [ { \"add\": { \"body\": \"Closed on GitHub\" } } ] }, \"fields\": { \"customfield_12860\": { \"id\": \"11506\" }, \"customfield_10800\": { \"id\": \"-1\" }, \"customfield_12500\": { \"id\": \"11302\" }, \"customfield_12400\": { \"id\": \"-1\" }, \"resolution\": { \"name\": \"Done\" } }, \"transition\": { \"id\": \"71\" } }" ================================================ FILE: .github/workflows/jira_comment.yml ================================================ name: Jira comment on: issue_comment: types: [created] jobs: comment-issue: runs-on: ubuntu-latest steps: - name: Jira login uses: atlassian/gajira-login@master env: JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} - name: Extract issue from title id: extract env: TITLE: "${{ github.event.issue.title }}" run: | jira=$(echo -n $TITLE | awk '{print $1}' | sed -e 's/://') echo ::set-output name=jira::$jira - name: Comment on issue uses: atlassian/gajira-comment@master if: startsWith(steps.extract.outputs.jira, 'SNOW-') with: issue: "${{ steps.extract.outputs.jira }}" comment: "${{ github.event.comment.user.login }} commented:\n\n${{ github.event.comment.body }}\n\n${{ github.event.comment.html_url }}" ================================================ FILE: .github/workflows/jira_issue.yml ================================================ name: Jira creation on: issues: types: [opened] issue_comment: types: [created] jobs: create-issue: runs-on: ubuntu-latest permissions: issues: write if: ((github.event_name == 'issue_comment' && github.event.comment.body == 'recreate jira' && github.event.comment.user.login == 'sfc-gh-mkeller') || (github.event_name == 'issues' && github.event.pull_request.user.login != 'whitesource-for-github-com[bot]')) steps: - name: Create JIRA Ticket id: create env: JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} ISSUE_TITLE: ${{ github.event.issue.title }} ISSUE_BODY: ${{ github.event.issue.body }} ISSUE_URL: ${{ github.event.issue.html_url }} run: | # debug #set -x TMP_BODY=$(mktemp) trap "rm -f $TMP_BODY" EXIT # Escape special characters in title and body TITLE=$(echo "${ISSUE_TITLE//`/\\`}" | sed 's/"/\\"/g' | sed "s/'/\\\'/g") echo "${ISSUE_BODY//`/\\`}" | sed 's/"/\\"/g' | sed "s/'/\\\'/g" > $TMP_BODY echo -e "\n\n_Created from GitHub Action_ for $ISSUE_URL" >> $TMP_BODY BODY=$(cat "$TMP_BODY") PAYLOAD=$(jq -n \ --arg issuetitle "$TITLE" \ --arg issuebody "$BODY" \ '{ fields: { project: { key: "SNOW" }, issuetype: { name: "Bug" }, summary: $issuetitle, description: $issuebody, customfield_11401: { id: "14723" }, assignee: { id: "712020:e527ae71-55cc-4e02-9217-1ca4ca8028a2" }, components: [{ id: "19281" }], labels: ["oss"], priority: { id: "10001" } } }') # Create JIRA issue using REST API RESPONSE=$(curl -s -X POST \ -H "Content-Type: application/json" \ -H "Accept: application/json" \ -u "$JIRA_USER_EMAIL:$JIRA_API_TOKEN" \ "$JIRA_BASE_URL/rest/api/2/issue" \ -d "$PAYLOAD") # Extract JIRA issue key from response JIRA_KEY=$(echo "$RESPONSE" | jq -r '.key') if [ "$JIRA_KEY" = "null" ] || [ -z "$JIRA_KEY" ]; then echo "Failed to create JIRA issue" echo "Response: $RESPONSE" echo "Request payload: $PAYLOAD" exit 1 fi echo "Created JIRA issue: $JIRA_KEY" echo "jira_key=$JIRA_KEY" >> $GITHUB_OUTPUT - name: Update GitHub Issue env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} REPOSITORY: ${{ github.repository }} ISSUE_NUMBER: ${{ github.event.issue.number }} JIRA_KEY: ${{ steps.create.outputs.jira_key }} ISSUE_TITLE: ${{ github.event.issue.title }} run: | TITLE=$(echo "${ISSUE_TITLE//`/\\`}" | sed 's/"/\\"/g' | sed "s/'/\\\'/g") PAYLOAD=$(jq -n \ --arg issuetitle "$TITLE" \ --arg jirakey "$JIRA_KEY" \ '{ title: ($jirakey + ": " + $issuetitle) }') # Update Github issue title with jira id curl -s \ -X PATCH \ -H "Authorization: Bearer $GITHUB_TOKEN" \ -H "Accept: application/vnd.github+json" \ -H "X-GitHub-Api-Version: 2022-11-28" \ "https://api.github.com/repos/$REPOSITORY/issues/$ISSUE_NUMBER" \ -d "$PAYLOAD" if [ "$?" != 0 ]; then echo "Failed to update GH issue. Payload was:" echo "$PAYLOAD" exit 1 fi ================================================ FILE: .github/workflows/semgrep.yml ================================================ --- name: Run semgrep checks on: pull_request: branches: [master] permissions: contents: read jobs: run-semgrep-reusable-workflow: uses: snowflakedb/reusable-workflows/.github/workflows/semgrep-v2.yml@main secrets: token: ${{ secrets.SEMGREP_APP_TOKEN }} ================================================ FILE: .github/workflows/snyk-issue.yml ================================================ name: Snyk Issue on: schedule: - cron: '* */12 * * *' workflow_dispatch: permissions: contents: read issues: write pull-requests: write concurrency: snyk-issue jobs: snyk: runs-on: ubuntu-latest steps: - name: checkout action uses: actions/checkout@v4 with: repository: snowflakedb/whitesource-actions token: ${{ secrets.WHITESOURCE_ACTION_TOKEN }} path: whitesource-actions - name: set-env run: echo "REPO=$(basename $GITHUB_REPOSITORY)" >> $GITHUB_ENV - name: Jira Creation uses: ./whitesource-actions/snyk-issue with: snyk_org: ${{ secrets.SNYK_ORG_ID_PUBLIC_REPO }} snyk_token: ${{ secrets.SNYK_GITHUB_INTEGRATION_TOKEN_PUBLIC_REPO }} jira_token: ${{ secrets.JIRA_TOKEN_PUBLIC_REPO }} env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .github/workflows/snyk-pr.yml ================================================ name: snyk-pr on: pull_request: branches: - master permissions: contents: read issues: write pull-requests: write jobs: snyk: runs-on: ubuntu-latest if: ${{ github.event.pull_request.user.login == 'sfc-gh-snyk-sca-sa' }} steps: - name: checkout uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.ref }} fetch-depth: 0 - name: checkout action uses: actions/checkout@v4 with: repository: snowflakedb/whitesource-actions token: ${{ secrets.WHITESOURCE_ACTION_TOKEN }} path: whitesource-actions - name: PR uses: ./whitesource-actions/snyk-pr env: PR_TITLE: ${{ github.event.pull_request.title }} with: jira_token: ${{ secrets.JIRA_TOKEN_PUBLIC_REPO }} gh_token: ${{ secrets.GITHUB_TOKEN }} amend: false # true if you want the commit to be amended with the JIRA number ================================================ FILE: .github/workflows/snyk-scan.yml ================================================ name: Snyk Scan on: pull_request: branches: - master schedule: - cron: '0 6 * * 1' workflow_dispatch: permissions: contents: read pull-requests: read jobs: detect-changes: name: Detect Changed Files if: github.event_name == 'pull_request' runs-on: ubuntu-latest outputs: any-pom: ${{ steps.changes.outputs.any-pom }} parent-pom: ${{ steps.changes.outputs.parent-pom }} public-pom: ${{ steps.changes.outputs.public-pom }} thin-public-pom: ${{ steps.changes.outputs.thin-public-pom }} fips-public-pom: ${{ steps.changes.outputs.fips-public-pom }} steps: - uses: dorny/paths-filter@v3 id: changes with: filters: | any-pom: - '**/pom.xml' parent-pom: - 'parent-pom.xml' public-pom: - 'public_pom.xml' thin-public-pom: - 'thin_public_pom.xml' fips-public-pom: - 'FIPS/public_pom.xml' snyk-aggregate-test: name: Snyk Aggregate Project Test needs: detect-changes if: needs.detect-changes.outputs.any-pom == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run Snyk aggregate project test uses: snyk/actions/maven@master env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} SNYK_CFG_ORG: ${{ secrets.SNYK_ORG_ID }} MAVEN_OPTS: -DskipTests -Dmaven.test.skip=true -Dmaven.main.skip=true with: args: --maven-aggregate-project --all-projects --policy-path=.snyk command: test snyk-parent-pom-test: name: Snyk Parent POM Test needs: detect-changes if: needs.detect-changes.outputs.parent-pom == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run Snyk parent-pom test uses: snyk/actions/maven@master env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} SNYK_CFG_ORG: ${{ secrets.SNYK_ORG_ID }} MAVEN_OPTS: -DskipTests -Dmaven.test.skip=true -Dmaven.main.skip=true with: args: --file=parent-pom.xml --package-manager=maven command: test snyk-public-pom-test: name: Snyk Public POM Test needs: detect-changes if: needs.detect-changes.outputs.public-pom == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run Snyk public-pom test uses: snyk/actions/maven@master env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} SNYK_CFG_ORG: ${{ secrets.SNYK_ORG_ID }} MAVEN_OPTS: -DskipTests -Dmaven.test.skip=true -Dmaven.main.skip=true with: args: --file=public_pom.xml --package-manager=maven command: test snyk-thin-public-pom-test: name: Snyk Thin Public POM Test needs: detect-changes if: needs.detect-changes.outputs.thin-public-pom == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run Snyk thin-public-pom test uses: snyk/actions/maven@master env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} SNYK_CFG_ORG: ${{ secrets.SNYK_ORG_ID }} MAVEN_OPTS: -DskipTests -Dmaven.test.skip=true -Dmaven.main.skip=true with: args: --file=thin_public_pom.xml --package-manager=maven command: test snyk-fips-public-pom-test: name: Snyk FIPS Public POM Test needs: detect-changes if: needs.detect-changes.outputs.fips-public-pom == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run Snyk FIPS public-pom test uses: snyk/actions/maven@master env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} SNYK_CFG_ORG: ${{ secrets.SNYK_ORG_ID }} MAVEN_OPTS: -DskipTests -Dmaven.test.skip=true -Dmaven.main.skip=true with: args: --file=FIPS/public_pom.xml --package-manager=maven command: test snyk-monitor: name: Snyk Monitor (${{ matrix.pom }}) if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' runs-on: ubuntu-latest strategy: fail-fast: false matrix: pom: - pom.xml - parent-pom.xml - public_pom.xml - thin_public_pom.xml - FIPS/pom.xml - FIPS/public_pom.xml - fat-jar-test-app/pom.xml - ci/wif/aws-lambda/pom.xml - ci/wif/azure-function/pom.xml - ci/wif/gcp-function/pom.xml steps: - uses: actions/checkout@v4 - name: Snyk monitor ${{ matrix.pom }} uses: snyk/actions/maven@master env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} SNYK_CFG_ORG: ${{ secrets.SNYK_ORG_ID }} MAVEN_OPTS: -DskipTests -Dmaven.test.skip=true -Dmaven.main.skip=true with: args: --file=${{ matrix.pom }} --package-manager=maven --policy-path=.snyk --project-name=${{ matrix.pom }} --remote-repo-url=https://github.com/snowflakedb/snowflake-jdbc command: monitor ================================================ FILE: .gitignore ================================================ # Compiled source # ################### *.com *.class *.dll *.exe *.o *.so # Exception: Keep minicore native libraries !src/main/resources/minicore/**/*.dll !src/main/resources/minicore/**/*.so !src/main/resources/minicore/**/*.dylib !src/main/resources/minicore/**/*.a # Packages # ############ # it's better to unpack these files and commit the raw source # git has its own built in compression methods *.7z *.dmg *.gz *.iso *.rar *.tar *.zip # Logs and databases # ###################### *.log *.sql *.sqlite # OS generated files # ###################### .DS_Store .DS_Store? ._* .Spotlight-V100 .Trashes ehthumbs.db Thumbs.db target/* # IDE Specific files # ###################### .idea/* *.iml nb-configuration.xml nbactions.xml # For snowfalke internal use # ############################## dependency-reduced-pom.xml henplus/* login.defaults rt_build.sh sfsql sfsql.cmd snowflake.jceks thin/* .svnignore mvn_settings.xml deploy.sh *-maven-metadata.xml Dockerfile.build parameters*.json snowflake-whitelist Golang ClientTelemetryFramework lib/* .wiremock/** # WhiteSource Scan wss*.config wss-unified-agent.jar whitesource/ *.swp #created in some tests placeholder #vs code .vscode/ # SSH private key for WIF tests ci/wif/parameters/rsa_wif_aws_azure ci/wif/parameters/rsa_wif_gcp ci/wif/parameters/rsa_gcp_function # WIF function builds ci/wif/*/target/ ================================================ FILE: .mvn/wrapper/maven-wrapper.properties ================================================ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # NOTE: Maven 3.9 is incompatible with the linkage checker rules distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.8/apache-maven-3.8.8-bin.zip wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar ================================================ FILE: .pre-commit-config.yaml ================================================ repos: - repo: git@github.com:snowflakedb/casec_precommit.git rev: v1.35.5 hooks: - id: secret-scanner ================================================ FILE: CHANGELOG.md ================================================ #### For all official JDBC Release Notes please refer to https://docs.snowflake.com/en/release-notes/clients-drivers/jdbc # Changelog - v4.2.1-SNAPSHOT - Fixed path traversal via server-controlled filenames in `SnowflakeFileTransferAgent` GET destination filename derivation; backslash separators are now stripped and traversal/absolute basenames are rejected (snowflakedb/snowflake-jdbc#2622). - v4.2.0 - Extended the `SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION` environment variable to also bypass permission verification on the `connections.toml` config file and on the credential cache file (`credential_cache_v1.json`), unblocking driver use in SPCS environments where strict 0600/0700 ownership cannot be guaranteed (snowflakedb/snowflake-jdbc#2614) - Fixed NPE in `RestRequest.sendIBHttpErrorEvent` when `SFSession.getTelemetryClient()` returns null because the session URL is not yet set; a `NoOpTelemetryClient` is now returned instead, allowing the original HTTP error to be surfaced to the caller (snowflakedb/snowflake-jdbc#2610) - Added support for attaching the SPCS service-identifier token (`SPCS_TOKEN`) to login requests when the driver is running inside an SPCS container (gated on the `SNOWFLAKE_RUNNING_INSIDE_SPCS` environment variable; token read from `/snowflake/session/spcs_token`) (snowflakedb/snowflake-jdbc#2603) - Added libc family and version detection (`LIBC_FAMILY`, `LIBC_VERSION`) to the `CLIENT_ENVIRONMENT` section of the login request on Linux (snowflakedb/snowflake-jdbc#2596) - Fixed NPE in `SFTrustManager.validateRevocationStatusMain` when the OCSP cache contains a non-SUCCESSFUL response (e.g. `unauthorized(6)`); the response is now surfaced as an `SFOCSPException` so cache eviction and fail-open run normally (snowflakedb/snowflake-jdbc#2597) - Added IPv6 support for cloud metadata services so Workload Identity Federation and platform detection work on IPv6-only instances (snowflakedb/snowflake-jdbc#2586): - GCP WIF attestation now uses hostname `metadata.google.internal` instead of the IPv4 link-local address. - EC2 instance detection probes the IPv4 and IPv6 IMDS endpoints (`[fd00:ec2::254]`) in parallel so detection succeeds on IPv6-only instances without doubling the detection budget on dual-stack hosts. - Added `enableCopyResultSet` connection property (default `false`): when `true`, `Statement.execute()` exposes the COPY INTO per-file metadata result set via `getResultSet()` instead of consuming it internally (snowflakedb/snowflake-jdbc#2592) - Migrated CI test images from CentOS 7 (EOL) to Rocky Linux 8 (snowflakedb/snowflake-jdbc#2578) - Fixed NPE "The URI scheme of endpointOverride must not be null" happening during file transfer (e.g. PUT) in some use-cases (snowflakedb/snowflake-jdbc#2572) - Fixed connections.toml auto-configuration behaviour (snowflakedb/snowflake-jdbc#2591): - now defaulting to port 443 instead of 80 when neither port nor protocol is specified - config coming from the JDBC connection string are no longer ignored when auto-configuration sourced items also present (when both present, direct connection config takes precedence) - Fixed protocol field in connections.toml being ignored, causing connections to always use HTTPS (snowflakedb/snowflake-jdbc#2585) - Fixed SecurityException on credential cache file ownership check in containers where JVM returns '?' for user.name (snowflakedb/snowflake-jdbc#2600). - Fixed credential cache delete operations ignoring clientStoreTemporaryCredential=false setting (snowflakedb/snowflake-jdbc#2600). - Fixed S3 transfer thread pool leak during repeated PUT/GET operations causing possible OOM (snowflakedb/snowflake-jdbc#2602). - Bumped BouncyCastle to 1.84 to address CVE-2026-0636, CVE-2026-5588, and CVE-2026-5598 (snowflakedb/snowflake-jdbc#2593). - Added `workloadIdentityAwsExternalId` connection property to support AWS STS external ID in Workload Identity Federation role-chaining flows (snowflakedb/snowflake-jdbc#2565). - Bumped grpc-java to 1.81.1 now that they also upgraded to netty 4.1.132.Final as the second part of PR 2561, and also netty itself to 4.1.133.Final to address several CVE (snowflakedb/snowflake-jdbc#2611). - v4.1.0 - Added warning about using plain HTTP OAuth endpoints (snowflakedb/snowflake-jdbc#2556). - Fix initializing ObjectMapper when DATE_OUTPUT_FORMAT is specified (snowflakedb/snowflake-jdbc#2545). - Fix Netty native library conflict in thin JAR (snowflakedb/snowflake-jdbc#2559) - Bumped netty to 4.1.132.Final to address CVE-2026-33870 (High) and CVE-2026-33871 (High) (snowflakedb/snowflake-jdbc#2561) - Added getRole, getWarehouse and getDatabase API extension methods (snowflakedb/snowflake-jdbc#2564) - Fix driver failure when security manager prohibits access to system properties, environment variables and modifying security providers (snowflakedb/snowflake-jdbc#2563) - Removed the io.netty.tryReflectionSetAccessible system property setting as it's no longer needed with modern Arrow/Netty versions (snowflakedb/snowflake-jdbc#2563) - Fixed crash in getColumns operation when table contained unrecognised column type (snowflakedb/snowflake-jdbc#2568). - Fixed session expiration when multiple sessions have different heartbeat intervals (snowflakedb/snowflake-jdbc#2566). - Merge QueryContext from failed query responses (snowflakedb/snowflake-jdbc#2570) - v4.0.2 - Fix expired session token renewal when polling results (snowflakedb/snowflake-jdbc#2489) - Fix missing minicore async initialization that was dropped during public API restructuring in v4.0.0 (snowflakedb/snowflake-jdbc#2501) - Adjust level of logging during Driver initialization (snowflakedb/snowflake-jdbc#2504) - Add sanitization for nonProxyHosts RegEx patterns (snowflakedb/snowflake-jdbc#2506) - Fix bug with malformed file during S3 upload (snowflakedb/snowflake-jdbc#2502) - Added periodic closure of sockets closed by the remote end (snowflakedb/snowflake-jdbc#2481). - Add internal API usage telemetry tracker (snowflakedb/snowflake-jdbc#2509) - Change S3 Client's multipart threshold to 16MB (snowflakedb/snowflake-jdbc#2526) - Fixed fat jar with S3 iteration, the problem of not finding class `software.amazon.awssdk.transfer.s3.internal.ApplyUserAgentInterceptor` (snowflakedb/snowflake-jdbc#2519). - Removed Conscrypt from shading to prevent `failed to find class org/conscrypt/CryptoUpcalls` native error (snowflakedb/snowflake-jdbc#2519). - Add logging implementation to CLIENT_ENVIRONMENT telemetry (snowflakedb/snowflake-jdbc#2527) - Fix NPE when HOME directory cache is not available (snowflakedb/snowflake-jdbc#2534) - Bumped `commons-compress` dependency to latest (1.28.0) to address CVE-2024-25710 and CVE-2024-26308 (snowflakedb/snowflake-jdbc#2538) - Add SLF4J bridge from shaded dependencies to `SFLogger` (snowflakedb/snowflake-jdbc#2543) - Fixed proxy authentication when connecting to GCP (snowflakedb/snowflake-jdbc#2540) - Fixed bug where called-provided schema was ignored in getStreams() (snowflakedb/snowflake-jdbc#2546) - Fixed S3 error handling manifested with `NullPointerException` (snowflakedb/snowflake-jdbc#2550) - v4.0.1 - Add /etc/os-release data to Minicore telemetry (snowflakedb/snowflake-jdbc#2470) - Fix incorrect encryption algorithm chosen when a file was put to S3 with client_encryption_key_size account parameter set to 256 (snowflakedb/snowflake-jdbc#2472) - Fixed fat jar with S3 iteration, the problem of not finding class `software.amazon.awssdk.transfer.s3.internal.ApplyUserAgentInterceptor` (snowflakedb/snowflake-jdbc#2474). - Removed Conscrypt from shading to prevent `failed to find class org/conscrypt/CryptoUpcalls` native error (snowflakedb/snowflake-jdbc#2474). - Update BouncyCastle dependencies to fix CVE-2025-8916 CVE-2025-8885 (snowflakedb/snowflake-jdbc#2479) - Fix external browser authentication after changing enum name. Manifested with `Invalid connection URL: Invalid SSOUrl found` error (snowflakedb/snowflake-jdbc#2475). - Rolled back external browser authenticator name to `externalbrowser` (snowflakedb/snowflake-jdbc#2475). __Due to some underlying issues, Snowflake recommends that AWS and Azure customers do not upgrade to this version if you use PUT or GET queries. Instead, Snowflake recommends that you upgrade directly to version 4.0.1. If you have already upgraded to this version, please upgrade to version 4.0.1 as soon as possible.__ - v4.0.0 - Bumped netty to 4.1.130.Final to address CVE-2025-67735 (snowflakedb/snowflake-jdbc#2447) - Fix OCSP HTTP client cache to honor per-connection proxy settings (snowflakedb/snowflake-jdbc#2449) - Mask secrets in exception logging (snowflakedb/snowflake-jdbc#2457) - Fix NPE when sending in-band telemetry without HTTP response (snowflakedb/snowflake-jdbc#2460) - Migrate from AWS SDK v1 to AWS SDK v2 (snowflakedb/snowflake-jdbc#2385 snowflakedb/snowflake-jdbc#2393) - Return column_size value in database metadata commands as in JDBC spec (snowflakedb/snowflake-jdbc#2418) - Migrate Azure storage from v5 to v12 (snowflakedb/snowflake-jdbc#2417) - Enable bundled BouncyCastle for private key decryption by default (snowflakedb/snowflake-jdbc#2452) - Rename BouncyCastle JVM property from net.snowflake.jdbc.enableBouncyCastle to net.snowflake.jdbc.useBundledBouncyCastleForPrivateKeyDecryption (snowflakedb/snowflake-jdbc#2452). - Major public API restructuring: move all public APIs to net.snowflake.client.api.* package hierarchy (snowflakedb/snowflake-jdbc#2403): - Add new unified QueryStatus class in public API that replaces the deprecated QueryStatus enum and QueryStatusV2 class. - Add new public API interfaces for stream upload/download configuration (DownloadStreamConfig, UploadStreamConfig). - Add SnowflakeDatabaseMetaData interface to public API for database metadata operations. - Add SnowflakeAsyncResultSet interface to public API for async query operations. - Add SnowflakeResultSetSerializable interface to public API. - Deprecate net.snowflake.client.jdbc.SnowflakeDriver in favor of new net.snowflake.client.api.driver.SnowflakeDriver. - Move internal classes to net.snowflake.client.internal.* package hierarchy. - Removed deprecated com.snowflake.client.jdbc.SnowflakeDriver class. - Removed deprecated QueryStatus enum from net.snowflake.client.core package. - Removed deprecated QueryStatusV2 class from net.snowflake.client.jdbc package. - Removed deprecated SnowflakeType enum from net.snowflake.client.jdbc package. - v3.28.0 - Ability to choose connection configuration in auto configuration file by a parameter in JDBC url. (snowflakedb/snowflake-jdbc#2369) - Bumped grpc-java to 1.77.0 to address CVE-2025-58057 from transient dep (snowflakedb/snowflake-jdbc#2415) - Fix Connection and socket timeout are now propagated to HTTP client (snowflakedb/snowflake-jdbc#2394). - Fix Azure 503 retries and configure it with the putGetMaxRetries parameter (snowflakedb/snowflake-jdbc#2422). - Improved retries for SSLHandshakeException errors caused by transient EOFException (snowflakedb/snowflake-jdbc#2423) - Introduced shared library([source code](https://github.com/snowflakedb/universal-driver/tree/main/sf_mini_core)) for extended telemetry to identify and prepare testing platform for native rust extensions (snowflakedb/snowflake-jdbc#2430) - Bumped netty to 4.1.128.Final to address CVE-2025-59419 (snowflakedb/snowflake-jdbc#2389) - v3.27.1 - Added platform detection on login to set PLATFORM metric in CLIENT_ENVIRONMENT (snowflakedb/snowflake-jdbc#2351) - Disable DatabaseMetaDataLatestIT::testUseConnectionCtx test (snowflakedb/snowflake-jdbc#2367) - Fix IT tests to construct OAuth scopes correctly (snowflakedb/snowflake-jdbc#2366) - Fix exponential backoff retry time for non-auth requests (snowflakedb/snowflake-jdbc#2370) - Upgrade aws-sdk to 1.12.792 and add STS dependency (snowflakedb/snowflake-jdbc#2361) - Add rockylinux9 CI tests as part of RHEL 9 support (snowflakedb/snowflake-jdbc#2368) - Bumped grpc-java to 1.76.0 to address CVE-2025-58056 from transient dep (snowflakedb/snowflake-jdbc#2371) - Added `workloadIdentityImpersonationPath` config option for `authenticator=WORKLOAD_IDENTITY` allowing workloads to authenticate as a different identity through transitive service account impersonation (snowflakedb/snowflake-jdbc#2348) - Added support for authentication as a different identity through transitive IAM role impersonation for AWS (snowflakedb/snowflake-jdbc#2364) - Add AWS identity detection with ARN validation (snowflakedb/snowflake-jdbc#2379) - v3.27.0 - Added the `changelog.yml` GitHub workflow to ensure changelog is updated on release PRs (snowflakedb/snowflake-jdbc#2340). - Added HTTP 307 & 308 retries in case of internal IP redirects (snowflakedb/snowflake-jdbc#2344) - Make PAT creation return `ResultSet` when using `execute` method (snowflakedb/snowflake-jdbc#2343) - Renamed CRL_REVOCATION_CHECK_MODE to CERT_REVOCATION_CHECK_MODE in CLIENT_ENVIRONMENT metrics (snowflakedb/snowflake-jdbc#2349) - Test coverage for multistatement jdbc (snowflakedb/snowflake-jdbc#2318). - Fixed permission check for .toml config file (snowflakedb/snowflake-jdbc#2270). - Bumped netty to 4.1.127.Final to address CVE-2025-58056 and CVE-2025-58057 (snowflakedb/snowflake-jdbc#2354) - Add support for x-snowflake-session sticky HTTP session header returned by Snowflake (snowflakedb/snowflake-jdbc#2357) - Added support for Interval Year-Month and Day-Time types in JDBC (snowflakedb/snowflake-jdbc#2345). - Added support for Decfloat types in JDBC (snowflakedb/snowflake-jdbc#2329, snowflakedb/snowflake-jdbc#2332). - Fixed pattern search for file when QUOTED_IDENTIFIERS_IGNORE_CASE enabled (snowflakedb/snowflake-jdbc#2333) - Added support for CRL (certificate revocation list) (snowflakedb/snowflake-jdbc#2287). ================================================ FILE: FIPS/.gitignore ================================================ .idea/ lib/ target/ dependency-reduced-pom.xml generated_public_pom.xml ================================================ FILE: FIPS/pom.xml ================================================ 4.0.0 net.snowflake snowflake-jdbc-parent 4.2.1-SNAPSHOT ../parent-pom.xml snowflake-jdbc-fips 4.2.1-SNAPSHOT jar snowflake-jdbc-fips http://maven.apache.org 3.3.9 scm:svn:http://127.0.0.1/svn/dummy scm:svn:https://127.0.0.1/svn/dummy HEAD http://127.0.0.1/websvn/dummy org.bouncycastle bc-fips org.bouncycastle bcpkix-fips ${project.artifactId} ${basedir}/../src/main/java ${basedir}/src/test/java ${basedir}/../src/main/resources true **/*.dylib **/*.so **/*.dll **/*.a ${basedir}/../src/main/resources false **/*.dylib **/*.so **/*.dll **/*.a ${basedir}/../src/test/resources org.apache.maven.plugins maven-install-plugin ${version.plugin.install} install-tika-core validate install-file ${basedir}/../dependencies/tika-core-${tika.version}.jar org.apache.tika tika-core ${tika.version} jar true install-arrow-memory-pom validate install-file ${basedir}/../dependencies/arrow-memory-${arrow.version}.pom org.apache.arrow arrow-memory ${arrow.version} pom true install-arrow-memory-core validate install-file ${basedir}/../dependencies/arrow-memory-core-${arrow.version}.jar org.apache.arrow arrow-memory-core ${arrow.version} jar true install-arrow-memory-unsafe validate install-file ${basedir}/../dependencies/arrow-memory-unsafe-${arrow.version}.jar org.apache.arrow arrow-memory-unsafe ${arrow.version} jar true install-arrow-memory-netty-buffer-patch validate install-file ${basedir}/../dependencies/arrow-memory-netty-buffer-patch-${arrow.version}.jar org.apache.arrow arrow-memory-netty-buffer-patch ${arrow.version} jar true install-arrow-format validate install-file ${basedir}/../dependencies/arrow-format-${arrow.version}.jar org.apache.arrow arrow-format ${arrow.version} jar true install-arrow-vector validate install-file ${basedir}/../dependencies/arrow-vector-${arrow.version}.jar org.apache.arrow arrow-vector ${arrow.version} jar true org.apache.maven.plugins maven-clean-plugin ${version.plugin.clean} lib *.jar org.apache.maven.plugins maven-compiler-plugin ${version.plugin.compiler} true true javac true 8 8 -Xlint:all,-path org.apache.maven.plugins maven-surefire-plugin ${version.plugin.surefire} **/*SFTrustManagerTest.java org.jacoco jacoco-maven-plugin ${version.plugin.jacoco} pre-unit-test prepare-agent target/jacoco-ut.exec post-unit-test test report target/jacoco-ut.exec target/jacoco-ut ${jacoco.skip.instrument} org.apache.maven.plugins maven-jar-plugin ${version.plugin.jar} net.snowflake.client.api.driver.SnowflakeDriver true test-jar maven-dependency-plugin ${version.plugin.dependency} install-jar install copy ${project.groupId} ${project.artifactId} ${project.version} lib org.apache.maven.plugins maven-source-plugin ${version.plugin.source} attach-sources jar org.apache.maven.plugins maven-javadoc-plugin ${version.plugin.javadoc} 8 attach-javadocs jar org.apache.maven.plugins maven-project-info-reports-plugin ${version.plugin.projectinforeports} self-contained-jar !not-self-contained-jar ${basedir}/../src/main/resources-fat-jar org.codehaus.mojo build-helper-maven-plugin add-fat-jar-sources generate-sources add-source ${basedir}/../src/main/java-fat-jar org.codehaus.mojo buildnumber-maven-plugin ${version.plugin.buildnumber} package create-timestamp yyyyMMddHHmmss buildNumber.timestamp false false org.apache.maven.plugins maven-shade-plugin ${version.plugin.shade} package shade net.snowflake.common ${shadeBase}.snowflake.common org.apache ${shadeBase}.apache org.slf4j ${shadeBase}.org.slf4j software.amazon.awssdk ${shadeBase}.software.amazon.awssdk software.amazon.encryption.s3 ${shadeBase}.software.amazon.encryption.s3 software.amazon.eventstream ${shadeBase}.software.amazon.eventstream software.amazon.ion ${shadeBase}.software.amazon.ion org.reactivestreams ${shadeBase}.reactivestreams org.jvnet.staxex ${shadeBase}.jvnet.staxex jakarta.xml.soap ${shadeBase}.jakarta.xml.soap jakarta.activation ${shadeBase}.jakarta.activation com.azure ${shadeBase}.azure com.fasterxml ${shadeBase}.fasterxml com.google ${shadeBase}.google google.api ${shadeBase}.google.api google.apps ${shadeBase}.google.apps google.cloud ${shadeBase}.google.cloud google.geo ${shadeBase}.google.geo google.iam ${shadeBase}.google.iam google.logging ${shadeBase}.google.logging google.longrunning ${shadeBase}.google.longrunning google.monitoring ${shadeBase}.google.monitoring google.protobuf ${shadeBase}.google.protobuf google.rpc ${shadeBase}.google.rpc google.shopping ${shadeBase}.google.shopping google.storage ${shadeBase}.google.storage google.type ${shadeBase}.google.type org.joda ${shadeBase}.joda javax.servlet ${shadeBase}.javax.servlet org.jsoup ${shadeBase}.org.jsoup com.nimbusds ${shadeBase}.com.nimbusds javax.annotation ${shadeBase}.javax.annotation net.jcip ${shadeBase}.net.jcip net.minidev ${shadeBase}.net.minidev org.objectweb ${shadeBase}.org.objectweb io.netty ${shadeBase}.io.netty com.carrotsearch ${shadeBase}.com.carrotsearch io.opencensus ${shadeBase}.opencensus io.opentelemetry ${shadeBase}.opentelemetry org.threeten ${shadeBase}.threeten io.grpc ${shadeBase}.grpc META-INF.native.io_grpc_netty_shaded_netty_tcnative META-INF.native.${shadeNativeBase}_grpc_netty_shaded_netty_tcnative META-INF.native.libio_grpc_netty_shaded_netty_tcnative META-INF.native.lib${shadeNativeBase}_grpc_netty_shaded_netty_tcnative META-INF.native.io_grpc_netty_shaded_netty_transport_native_epoll META-INF.native.${shadeNativeBase}_grpc_netty_shaded_netty_transport_native_epoll META-INF.native.libio_grpc_netty_shaded_netty_transport_native_epoll META-INF.native.lib${shadeNativeBase}_grpc_netty_shaded_netty_transport_native_epoll org.checkerframework ${shadeBase}.org.checkerframework org.codehaus ${shadeBase}.org.codehaus io.perfmark ${shadeBase}.io.perfmark opencensus ${shadeBase}.opencensus grpc ${shadeBase}.grpc android.annotation ${shadeBase}.android.annotation reactor ${shadeBase}.reactor org.reactivestreams ${shadeBase}.org.reactivestreams *:* META-INF/LICENSE* META-INF/NOTICE* META-INF/DEPENDENCIES META-INF/maven/** META-INF/services/com.fasterxml.* META-INF/versions/9/module-info.* META-INF/versions/11/module-info.* META-INF/*.xml META-INF/*.SF META-INF/*.DSA META-INF/*.RSA .netbeans_automatic_build git.properties arrow-git.properties google-http-client.properties pipes-fork-server-default-log4j2.xml dependencies.properties azure-*.properties VersionInfo.java project.properties org.apache.arrow:arrow-vector codegen/** com.google.guava:guava com/google/common/io/** com/google/common/base/** com/google/common/hash/** com/google/common/collect/** com/google/common/graph/** com/google/common/math/** com/google/common/util/concurrent/** commons-logging:commons-logging org/apache/commons/logging/impl/AvalonLogger.class META-INF/io.netty.versions.properties org.apache.maven.plugins maven-antrun-plugin ${version.plugin.antrun} repack run package check-style com.coveo fmt-maven-plugin ${version.plugin.fmt} check check-content !windows org.codehaus.mojo exec-maven-plugin ${version.plugin.exec} check-shaded-content verify exec ${basedir}/scripts/check_content.sh java-9 (9,) maven-failsafe-plugin org.apache.maven.surefire surefire-junit-platform ${version.plugin.surefire} ${version.plugin.failsafe} --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-exports=java.base/sun.nio.ch=ALL-UNNAMED --add-exports=java.base/sun.security.internal.spec=ALL-UNNAMED --add-exports=jdk.unsupported/sun.misc=ALL-UNNAMED maven-surefire-plugin --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-exports=java.base/sun.nio.ch=ALL-UNNAMED --add-exports=java.base/sun.security.internal.spec=ALL-UNNAMED --add-exports=jdk.unsupported/sun.misc=ALL-UNNAMED jenkinsIT jenkinsIT org.apache.maven.plugins maven-failsafe-plugin org.apache.maven.surefire surefire-junit-platform ${version.plugin.surefire} ${version.plugin.failsafe} DefaultIT integration-test **/ConnectionFipsIT.java net.snowflake.client.internal.log.JDK14Logger ${basedir}/../src/test/resources/logging.properties verify ossrh-deploy ossrhDeploy org.apache.maven.plugins maven-gpg-plugin ${version.plugin.gpg} deploy sign-and-deploy-file target/${project.artifactId}.jar ossrh https://oss.sonatype.org/service/local/staging/deploy/maven2 generated_public_pom.xml target/${project.artifactId}-javadoc.jar target/${project.artifactId}-sources.jar ${env.GPG_KEY_ID} ${env.GPG_KEY_PASSPHRASE} maven-deploy-plugin true central-deploy central-deploy org.codehaus.mojo build-helper-maven-plugin ${version.plugin.buildhelper} attach-public-pom attach-artifact package generated_public_pom.xml pom org.apache.maven.plugins maven-gpg-plugin ${env.GPG_KEY_ID} ${env.GPG_KEY_PASSPHRASE} sign-artifacts sign verify org.sonatype.central central-publishing-maven-plugin ${version.plugin.publishing} true ossrh true published ================================================ FILE: FIPS/public_pom.xml ================================================ 4.0.0 net.snowflake snowflake-jdbc-fips 1.0-SNAPSHOT jar Snowflake JDBC Driver Snowflake JDBC Driver https://www.snowflake.net/ The Apache Software License, Version 2.0 http://www.apache.org/licenses/LICENSE-2.0.txt Snowflake Support Team snowflake-java@snowflake.net Snowflake Computing https://www.snowflake.net scm:git:git://github.com/snowflakedb/snowflake-jdbc http://github.com/snowflakedb/snowflake-jdbc/tree/master 1.0.2.6 1.0.8 5.13.0 org.bouncycastle bc-fips ${bouncycastle.bcfips.version} runtime org.bouncycastle bcpkix-fips ${bouncycastle.bcpkixfips.version} runtime net.java.dev.jna jna ${jna.version} true net.java.dev.jna jna-platform ${jna.version} true ================================================ FILE: FIPS/scripts/check_content.sh ================================================ #!/bin/bash -e # scripts used to check if all dependencies are shaded into snowflake internal path set -o pipefail DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" if jar tvf $DIR/../target/snowflake-jdbc-fips.jar | awk '{print $8}' | grep -v -E "/$" | grep -v -E "^(net|com)/snowflake" | grep -v -E "(com|net)/\$" | grep -v -E "^META-INF" | grep -v -E "^iso3166_" | grep -v -E "^mozilla" | grep -v -E "^com/sun/jna" | grep -v com/sun/ | grep -v mime.types | grep -v -E "^com/github/luben/zstd/" | grep -v -E "^aix/" | grep -v -E "^darwin/" | grep -v -E "^freebsd/" | grep -v -E "^linux/" | grep -v -E "^win/" | grep -v -E "^minicore/" | grep -v -E "^org/conscrypt/"; then echo "[ERROR] JDBC jar includes class not under the snowflake namespace" exit 1 fi ================================================ FILE: FIPS/src/test/java/net/snowflake/client/AbstractDriverIT.java ================================================ package net.snowflake.client; import com.google.common.base.Strings; import java.net.URL; import java.sql.Connection; import java.sql.Date; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.sql.Timestamp; import java.util.Calendar; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.TimeZone; import java.util.logging.Level; import java.util.logging.Logger; import static org.hamcrest.MatcherAssert.assertThat; /** Base test class with common constants, data structures and methods */ public class AbstractDriverIT { // This is required to use ConditionalIgnore annotation. public static final String DRIVER_CLASS = "net.snowflake.client.api.driver.SnowflakeDriver"; public static final String DRIVER_CLASS_COM = "com.snowflake.client.jdbc.SnowflakeDriver"; public static final int DONT_INJECT_SOCKET_TIMEOUT = 0; // data files protected static final String TEST_DATA_FILE = "orders_100.csv"; protected static final String TEST_DATA_FILE_2 = "orders_101.csv"; protected static final String[] fileNames = {TEST_DATA_FILE, TEST_DATA_FILE_2}; private static Logger logger = Logger.getLogger(AbstractDriverIT.class.getName()); protected final int ERROR_CODE_BIND_VARIABLE_NOT_ALLOWED_IN_VIEW_OR_UDF_DEF = 2210; protected final int ERROR_CODE_DOMAIN_OBJECT_DOES_NOT_EXIST = 2003; public static Map getConnectionParameters(String accountName) { Map params = new HashMap<>(); String account; String host; if (accountName == null) { account = TestUtil.systemGetEnv("SNOWFLAKE_TEST_ACCOUNT"); host = TestUtil.systemGetEnv("SNOWFLAKE_TEST_HOST"); } else { account = accountName; // By default, the test will run against reg deployment. // If developer needs to run in IntelliJ, you can set this env as ".dev.local" String deployment = TestUtil.systemGetEnv("SNOWFLAKE_TEST_DEPLOYMENT"); if (Strings.isNullOrEmpty(deployment)) { deployment = ".reg.local"; } host = accountName.trim() + deployment; } assertThat( "set SNOWFLAKE_TEST_ACCOUNT environment variable to the account name.", !Strings.isNullOrEmpty(account)); params.put("account", account); if (Strings.isNullOrEmpty(host)) { host = account + ".snowflakecomputing.com"; } assertThat( "set SNOWFLAKE_TEST_HOST environment variable to the host name.", !Strings.isNullOrEmpty(host)); params.put("host", host); String protocol = TestUtil.systemGetEnv("SNOWFLAKE_TEST_PROTOCOL"); String ssl; if ("http".equals(protocol)) { ssl = "off"; } else { ssl = "on"; } params.put("ssl", ssl); String user = TestUtil.systemGetEnv("SNOWFLAKE_TEST_USER"); assertThat("set SNOWFLAKE_TEST_USER environment variable.", !Strings.isNullOrEmpty(user)); params.put("user", user); String privateKeyFile = TestUtil.systemGetEnv("SNOWFLAKE_TEST_PRIVATE_KEY_FILE"); if (!Strings.isNullOrEmpty(privateKeyFile)) { String workspace = System.getenv("WORKSPACE"); if (workspace != null) { params.put("private_key_file", java.nio.file.Paths.get(workspace, privateKeyFile).toString()); } else { params.put("private_key_file", privateKeyFile); } params.put("authenticator", "SNOWFLAKE_JWT"); String privateKeyPwd = TestUtil.systemGetEnv("SNOWFLAKE_TEST_PRIVATE_KEY_PWD"); if (!Strings.isNullOrEmpty(privateKeyPwd)) { params.put("private_key_pwd", privateKeyPwd); } } else { String password = TestUtil.systemGetEnv("SNOWFLAKE_TEST_PASSWORD"); if (!Strings.isNullOrEmpty(password)) { params.put("password", password); } else { throw new IllegalStateException("Neither SNOWFLAKE_TEST_PRIVATE_KEY_FILE nor SNOWFLAKE_TEST_PASSWORD environment variable is set. Please configure one of them for authentication."); } } String port = TestUtil.systemGetEnv("SNOWFLAKE_TEST_PORT"); if (Strings.isNullOrEmpty(port)) { if ("on".equals(ssl)) { port = "443"; } else { port = "80"; } } assertThat("set SNOWFLAKE_TEST_PORT environment variable.", !Strings.isNullOrEmpty(port)); params.put("port", port); String database = TestUtil.systemGetEnv("SNOWFLAKE_TEST_DATABASE"); assertThat( "set SNOWFLAKE_TEST_DATABASE environment variable.", !Strings.isNullOrEmpty(database)); params.put("database", database); String schema = TestUtil.systemGetEnv("SNOWFLAKE_TEST_SCHEMA"); assertThat("set SNOWFLAKE_TEST_SCHEMA environment variable.", !Strings.isNullOrEmpty(schema)); params.put("schema", schema); String role = TestUtil.systemGetEnv("SNOWFLAKE_TEST_ROLE"); assertThat("set SNOWFLAKE_TEST_ROLE environment variable.", !Strings.isNullOrEmpty(role)); params.put("role", role); String warehouse = TestUtil.systemGetEnv("SNOWFLAKE_TEST_WAREHOUSE"); assertThat( "set SNOWFLAKE_TEST_WAREHOUSE environment variable.", !Strings.isNullOrEmpty(warehouse)); params.put("warehouse", warehouse); params.put("uri", String.format("jdbc:snowflake://%s:%s", host, port)); String adminUser = TestUtil.systemGetEnv("SNOWFLAKE_TEST_ADMIN_USER"); params.put("adminUser", adminUser); String adminPassword = TestUtil.systemGetEnv("SNOWFLAKE_TEST_ADMIN_PASSWORD"); params.put("adminPassword", adminPassword); String ssoUser = TestUtil.systemGetEnv("SNOWFLAKE_TEST_SSO_USER"); params.put("ssoUser", ssoUser); String ssoPassword = TestUtil.systemGetEnv("SNOWFLAKE_TEST_SSO_PASSWORD"); params.put("ssoPassword", ssoPassword); return params; } public static Map getConnectionParameters() { return getConnectionParameters(null); } /** * Gets a connection with default session parameter settings, but tunable query api version and * socket timeout setting * * @param paramProperties connection properties * @return Connection a database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection(Properties paramProperties) throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, paramProperties, false, false); } /** * Gets a connection with custom account name, but otherwise default settings * * @return Connection a database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection(String accountName) throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, null, false, false, accountName); } /** * Gets a connection with default settings * * @return Connection a database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection() throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, null, false, false); } /** * Gets a connection with default session parameter settings, but tunable query api version and * socket timeout setting * * @param injectSocketTimeout number of seconds to inject in connection * @return Connection a database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection(int injectSocketTimeout) throws SQLException { return getConnection(injectSocketTimeout, null, false, false); } /** * Gets a connection with Snowflake admin * * @return Connection a database connection * @throws SQLException raised if any error occurs */ protected static Connection getSnowflakeAdminConnection() throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, null, true, false); } /** * Gets a connection with Snowflake admin * * @param paramProperties connection properties * @return Connection a database connection * @throws SQLException raised if any error occurs */ protected static Connection getSnowflakeAdminConnection(Properties paramProperties) throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, paramProperties, true, false); } /** * Gets a connection in same way as function below but with default account (gotten from * environment variables) * * @param injectSocketTimeout * @param paramProperties * @param isAdmin * @param usesCom * @return * @throws SQLException */ public static Connection getConnection( int injectSocketTimeout, Properties paramProperties, boolean isAdmin, boolean usesCom) throws SQLException { return getConnection(injectSocketTimeout, paramProperties, isAdmin, usesCom, null); } /** * Gets a connection for the custom session parameter settings and tunable query api version and * socket timeout setting * * @param injectSocketTimeout number of seconds to inject in connection * @param paramProperties connection properties * @param isAdmin is Snowflake admin user? * @param usesCom uses com.snowflake instead of net.snowflake? * @return Connection database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection( int injectSocketTimeout, Properties paramProperties, boolean isAdmin, boolean usesCom, String accountName) throws SQLException { // Load Snowflake JDBC class String driverClass = DRIVER_CLASS; if (usesCom) { driverClass = DRIVER_CLASS_COM; } try { Class.forName(driverClass); } catch (Exception e) { logger.log(Level.SEVERE, "Cannot find Driver", e); throw new RuntimeException(e.getCause()); } Map params = getConnectionParameters(accountName); // build connection properties Properties properties = new Properties(); if (isAdmin) { assertThat( "set SNOWFLAKE_TEST_ADMIN_USER environment variable.", !Strings.isNullOrEmpty(params.get("adminUser"))); assertThat( "set SNOWFLAKE_TEST_ADMIN_PASSWORD environment variable.", !Strings.isNullOrEmpty(params.get("adminPassword"))); properties.put("user", params.get("adminUser")); properties.put("password", params.get("adminPassword")); properties.put("role", "accountadmin"); properties.put("account", "snowflake"); } else { properties.put("user", params.get("user")); properties.put("role", params.get("role")); properties.put("account", params.get("account")); if (!Strings.isNullOrEmpty(params.get("private_key_file"))) { properties.put("private_key_file", params.get("private_key_file")); if (params.get("authenticator") != null) { properties.put("authenticator", params.get("authenticator")); } if (!Strings.isNullOrEmpty(params.get("private_key_pwd"))) { properties.put("private_key_pwd", params.get("private_key_pwd")); } } else if (!Strings.isNullOrEmpty(params.get("password"))) { properties.put("password", params.get("password")); } } properties.put("db", params.get("database")); properties.put("schema", params.get("schema")); properties.put("warehouse", params.get("warehouse")); properties.put("ssl", params.get("ssl")); properties.put("internal", Boolean.TRUE.toString()); // TODO: do we need this? properties.put("insecureMode", false); // use OCSP for all tests. if (injectSocketTimeout > 0) { properties.put("injectSocketTimeout", String.valueOf(injectSocketTimeout)); } // Set the session parameter properties if (paramProperties != null) { for (Map.Entry entry : paramProperties.entrySet()) { properties.put(entry.getKey(), entry.getValue()); } } return DriverManager.getConnection(params.get("uri"), properties); } /** * Close SQL Objects * * @param resultSet a result set object * @param statement a statement object * @param connection a connection * @throws SQLException raised if any error occurs */ public void closeSQLObjects(ResultSet resultSet, Statement statement, Connection connection) throws SQLException { if (resultSet != null) { resultSet.close(); } if (statement != null) { statement.close(); } if (connection != null) { connection.close(); } } /** * Close SQL Objects * * @param statement a statement object * @param connection a connection * @throws SQLException raised if any error occurs */ public void closeSQLObjects(Statement statement, Connection connection) throws SQLException { if (statement != null) { statement.close(); } if (connection != null) { connection.close(); } } /** * Get a full path of the file in Resource * * @param fileName a file name * @return a full path name of the file */ public static String getFullPathFileInResource(String fileName) { ClassLoader classLoader = AbstractDriverIT.class.getClassLoader(); URL url = classLoader.getResource(fileName); if (url != null) { return url.getFile(); } else { throw new RuntimeException("No file is found: " + fileName); } } protected static Timestamp buildTimestamp( int year, int month, int day, int hour, int minute, int second, int fractionInNanoseconds) { Calendar cal = Calendar.getInstance(); cal.set(year, month, day, hour, minute, second); Timestamp ts = new Timestamp(cal.getTime().getTime()); ts.setNanos(fractionInNanoseconds); return ts; } protected static Date buildDate(int year, int month, int day) { Calendar cal = Calendar.getInstance(); cal.set(year, month, day, 0, 0, 0); cal.set(Calendar.MILLISECOND, 0); return new Date(cal.getTime().getTime()); } protected static Date buildDateWithTZ(int year, int month, int day, TimeZone tz) { Calendar cal = Calendar.getInstance(); cal.setTimeZone(tz); cal.set(year, month, day, 0, 0, 0); cal.set(Calendar.MILLISECOND, 0); return new Date(cal.getTime().getTime()); } } ================================================ FILE: FIPS/src/test/java/net/snowflake/client/DontRunOnGCP.java ================================================ package net.snowflake.client; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @DisabledIfEnvironmentVariable(named = "CLOUD_PROVIDER", matches = "(?i)GCP(?-i)") public @interface DontRunOnGCP {} ================================================ FILE: FIPS/src/test/java/net/snowflake/client/DontRunOnGithubActions.java ================================================ package net.snowflake.client; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @DisabledIfEnvironmentVariable(named = "GITHUB_ACTIONS", matches = ".*") public @interface DontRunOnGithubActions {} ================================================ FILE: FIPS/src/test/java/net/snowflake/client/TestUtil.java ================================================ package net.snowflake.client; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.junit.jupiter.api.Assertions; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; public class TestUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(TestUtil.class); /** * Util function to assert a piece will throw exception and assert on the error code * * @param errorCode expected error code * @param testCode the code that will run and throws exception */ public static void assertSFException(int errorCode, TestRunInterface testCode) { try { testCode.run(); Assertions.fail(); } catch (SFException e) { assertThat(e.getVendorCode(), is(errorCode)); } } /** Functional interface used to run a piece of code which throws SFException */ @FunctionalInterface public interface TestRunInterface { void run() throws SFException; } /** * System.getenv wrapper. If System.getenv raises a SecurityException, it is ignored and returns * null. * @deprecated This method should be replaced by SnowflakeUtil.systemGetEnv. *

This is replicated from SnowflakeUtil.systemGetEnv, because the old driver doesn't have that * function for the tests to use it. Replace this function call with SnowflakeUtil.systemGetEnv * when it is available. * * @param env the environment variable name. * @return the environment variable value if set, otherwise null. */ @Deprecated public static String systemGetEnv(String env) { try { return System.getenv(env); } catch (SecurityException ex) { logger.debug( "Failed to get environment variable {}. Security exception raised: {}", env, ex.getMessage()); } return null; } } ================================================ FILE: FIPS/src/test/java/net/snowflake/client/category/FipsTestSuite.java ================================================ package net.snowflake.client.category; import org.junit.platform.suite.api.IncludeTags; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.platform.suite.api.ExcludePackages; import org.junit.platform.suite.api.IncludeClassNamePatterns; import org.junit.platform.suite.api.SelectPackages; import org.junit.platform.suite.api.Suite; import org.junit.platform.suite.api.SuiteDisplayName; @Suite @SelectPackages("net.snowflake.client") @ExcludePackages("net.snowflake.client.suites") @IncludeClassNamePatterns(".+") public class FipsTestSuite { } ================================================ FILE: FIPS/src/test/java/net/snowflake/client/jdbc/ConnectionFipsIT.java ================================================ package net.snowflake.client.jdbc; import static org.junit.jupiter.api.Assertions.*; import java.net.URL; import java.nio.file.Files; import java.nio.file.Paths; import java.security.*; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Properties; import javax.net.ssl.HttpsURLConnection; import net.snowflake.client.AbstractDriverIT; import net.snowflake.client.DontRunOnGCP; import net.snowflake.client.DontRunOnGithubActions; import net.snowflake.client.internal.core.SecurityUtil; import org.apache.commons.codec.binary.Base64; import org.bouncycastle.crypto.CryptoServicesRegistrar; import org.bouncycastle.crypto.fips.FipsStatus; import org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag("fips") public class ConnectionFipsIT extends AbstractDriverIT { private static final String JCE_PROVIDER_BOUNCY_CASTLE_FIPS = "BCFIPS"; private static final String JCE_PROVIDER_SUN_JCE = "SunJCE"; private static final String JCE_PROVIDER_SUN_RSA_SIGN = "SunRsaSign"; private static final String JCE_KEYSTORE_BOUNCY_CASTLE = "BCFKS"; private static final String JCE_KEYSTORE_JKS = "JKS"; private static final String BOUNCY_CASTLE_RNG_HYBRID_MODE = "C:HYBRID;ENABLE{All};"; private static final String SSL_ENABLED_PROTOCOLS = "TLSv1.2,TLSv1.1,TLSv1"; private static final String SSL_ENABLED_CIPHERSUITES = "TLS_AES_128_GCM_SHA256," + "TLS_AES_256_GCM_SHA384," + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384," + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256," + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384," + "TLS_RSA_WITH_AES_256_GCM_SHA384," + "TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384," + "TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384," + "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384," + "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384(," + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256," + "TLS_RSA_WITH_AES_128_GCM_SHA256," + "TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256," + "TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256," + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256," + "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256," + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384," + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384," + "TLS_RSA_WITH_AES_256_CBC_SHA256," + "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384," + "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384," + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256," + "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256," + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA," + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA," + "TLS_RSA_WITH_AES_256_CBC_SHA," + "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA," + "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA," + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA," + "TLS_DHE_DSS_WITH_AES_256_CBC_SHA," + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256," + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256," + "TLS_RSA_WITH_AES_128_CBC_SHA256," + "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256," + "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256," + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256," + "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256," + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA," + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA," + "TLS_RSA_WITH_AES_128_CBC_SHA," + "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA," + "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA," + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA," + "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"; private static final String JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE = "javax.net.ssl.keyStoreType"; private static final String JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE = "javax.net.ssl.trustStoreType"; private static final String JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS = "jdk.tls.client.protocols"; private static final String JAVA_SYSTEM_PROPERTY_SSL_CIPHERSUITES = "jdk.tls.client.cipherSuites"; private static final String JAVA_SYSTEM_PROPERTY_SSL_NAMEDGROUPS = "jdk.tls.namedGroups"; private static String JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE_ORIGINAL_VALUE; private static String JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE_ORIGINAL_VALUE; private static String JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS_ORIGINAL_VALUE; private static String JAVA_SYSTEM_PROPERTY_SSL_CIPHERSUITES_ORIGINAL_VALUE; private static Provider JCE_PROVIDER_SUN_JCE_PROVIDER_VALUE; private static Provider JCE_PROVIDER_SUN_RSA_SIGN_PROVIDER_VALUE; private static int JCE_PROVIDER_SUN_JCE_PROVIDER_POSITION; private static int JCE_PROVIDER_SUN_RSA_SIGN_PROVIDER_POSITION; @BeforeAll public static void setup() throws Exception { System.setProperty("javax.net.debug", "ssl"); // Setting up the named group to avoid test failure on GCP environment. System.setProperty(JAVA_SYSTEM_PROPERTY_SSL_NAMEDGROUPS, "secp256r1, secp384r1, ffdhe2048, ffdhe3072"); // get keystore types for BouncyCastle libraries JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE_ORIGINAL_VALUE = System.getProperty(JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE); JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE_ORIGINAL_VALUE = System.getProperty(JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE); // set keystore types for BouncyCastle libraries System.setProperty(JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE, JCE_KEYSTORE_BOUNCY_CASTLE); System.setProperty(JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE, JCE_KEYSTORE_JKS); // remove Java's standard encryption and SSL providers List providers = Arrays.asList(Security.getProviders()); JCE_PROVIDER_SUN_JCE_PROVIDER_VALUE = Security.getProvider(JCE_PROVIDER_SUN_JCE); JCE_PROVIDER_SUN_JCE_PROVIDER_POSITION = providers.indexOf(JCE_PROVIDER_SUN_JCE_PROVIDER_VALUE); JCE_PROVIDER_SUN_RSA_SIGN_PROVIDER_VALUE = Security.getProvider(JCE_PROVIDER_SUN_RSA_SIGN); JCE_PROVIDER_SUN_RSA_SIGN_PROVIDER_POSITION = providers.indexOf(JCE_PROVIDER_SUN_RSA_SIGN_PROVIDER_VALUE); Security.removeProvider(JCE_PROVIDER_SUN_JCE); Security.removeProvider(JCE_PROVIDER_SUN_RSA_SIGN); // workaround to connect to accounts.google.com over HTTPS, which consists // of disabling TLS 1.3 and disabling default SSL cipher suites that are // using CHACHA20_POLY1305 algorithms JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS_ORIGINAL_VALUE = System.getProperty(JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS); JAVA_SYSTEM_PROPERTY_SSL_CIPHERSUITES_ORIGINAL_VALUE = System.getProperty(JAVA_SYSTEM_PROPERTY_SSL_CIPHERSUITES); System.setProperty(JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS, SSL_ENABLED_PROTOCOLS); System.setProperty(JAVA_SYSTEM_PROPERTY_SSL_CIPHERSUITES, SSL_ENABLED_CIPHERSUITES); /* * Insert BouncyCastle's FIPS-compliant encryption and SSL providers. */ BouncyCastleFipsProvider bcFipsProvider = new BouncyCastleFipsProvider(BOUNCY_CASTLE_RNG_HYBRID_MODE); /* * We remove BCFIPS provider pessimistically. This is a no-op if provider * does not exist. This is necessary to always add it to the first * position when calling insertProviderAt. * * JavaDoc for insertProviderAt states: * "A provider cannot be added if it is already installed." */ Security.removeProvider(JCE_PROVIDER_BOUNCY_CASTLE_FIPS); Security.insertProviderAt(bcFipsProvider, 1); if (!CryptoServicesRegistrar.isInApprovedOnlyMode()) { if (FipsStatus.isReady()) { CryptoServicesRegistrar.setApprovedOnlyMode(true); } else { throw new RuntimeException( "FIPS is not ready to be enabled and FIPS " + "mode is required for this test to run"); } } // attempts an SSL connection to Google // connectToGoogle(); } @AfterAll public static void teardown() throws Exception { // Remove BouncyCastle FIPS Provider Security.removeProvider(JCE_PROVIDER_BOUNCY_CASTLE_FIPS); // Restore ciphers removed to connect to accounts.google.com if (JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS_ORIGINAL_VALUE == null) { System.clearProperty(JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS); } else { System.setProperty( JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS, JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS_ORIGINAL_VALUE); } if (JAVA_SYSTEM_PROPERTY_SSL_CIPHERSUITES_ORIGINAL_VALUE == null) { System.clearProperty(JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE); } else { System.setProperty( JAVA_SYSTEM_PROPERTY_SSL_CIPHERSUITES, JAVA_SYSTEM_PROPERTY_SSL_CIPHERSUITES_ORIGINAL_VALUE); } // remove Java's standard encryption and SSL providers Security.insertProviderAt( JCE_PROVIDER_SUN_JCE_PROVIDER_VALUE, JCE_PROVIDER_SUN_JCE_PROVIDER_POSITION); Security.insertProviderAt( JCE_PROVIDER_SUN_RSA_SIGN_PROVIDER_VALUE, JCE_PROVIDER_SUN_RSA_SIGN_PROVIDER_POSITION); // Restore previous keystore values if (JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE_ORIGINAL_VALUE == null) { System.clearProperty(JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE); } else { System.setProperty( JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE, JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE_ORIGINAL_VALUE); } if (JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE_ORIGINAL_VALUE == null) { System.clearProperty(JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE); } else { System.setProperty( JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE, JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE_ORIGINAL_VALUE); } System.clearProperty(SecurityUtil.USE_BUNDLED_BOUNCY_CASTLE_FOR_PRIVATE_KEY_DECRYPTION_JVM); // clear the named group. System.clearProperty(JAVA_SYSTEM_PROPERTY_SSL_NAMEDGROUPS); // attempts an SSL connection to Google // connectToGoogle(); } @Test public void connectWithFips() throws SQLException { Connection con = getConnection(); Statement statement = con.createStatement(); ResultSet resultSet = statement.executeQuery("show parameters"); assertTrue(resultSet.next()); assertFalse(con.isClosed()); statement.close(); con.close(); assertTrue(con.isClosed()); con.close(); // ensure no exception } @Test @DontRunOnGithubActions public void connectWithFipsKeyPair() throws Exception { Map parameters = getConnectionParameters(); String testUser = parameters.get("user"); Connection connection = getConnection(); Statement statement = connection.createStatement(); statement.execute("use role accountadmin"); String pathfile = getFullPathFileInResource("rsa_key.pub"); String pubKey = new String(Files.readAllBytes(Paths.get(pathfile))); pubKey = pubKey.replace("-----BEGIN PUBLIC KEY-----", ""); pubKey = pubKey.replace("-----END PUBLIC KEY-----", ""); statement.execute(String.format("alter user %s set rsa_public_key='%s'", testUser, pubKey)); connection.close(); // PKCS8 private key file. No PKCS1 is supported. String privateKeyLocation = getFullPathFileInResource("rsa_key.p8"); String uri = parameters.get("uri") + "/?private_key_file=" + privateKeyLocation; // Create Properties with simple null checks to avoid NullPointerException Properties properties = new Properties(); properties.put("user", testUser); if (parameters.get("account") != null) { properties.put("account", parameters.get("account")); } if (parameters.get("ssl") != null) { properties.put("ssl", parameters.get("ssl")); } if (parameters.get("port") != null) { properties.put("port", parameters.get("port")); } if (parameters.get("database") != null) { properties.put("db", parameters.get("database")); } if (parameters.get("schema") != null) { properties.put("schema", parameters.get("schema")); } connection = DriverManager.getConnection(uri, properties); assertNotNull(connection); connection.close(); } @Test @DontRunOnGithubActions public void testConnectUsingKeyPair() throws Exception { Map parameters = getConnectionParameters(); String testUser = parameters.get("user"); KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA", "BCFIPS"); SecureRandom random = SecureRandom.getInstance("DEFAULT", "BCFIPS"); keyPairGenerator.initialize(2048, random); KeyPair keyPair = keyPairGenerator.generateKeyPair(); PublicKey publicKey = keyPair.getPublic(); PrivateKey privateKey = keyPair.getPrivate(); try (Connection connection = getConnection()) { Statement statement = connection.createStatement(); statement.execute("use role accountadmin"); String encodePublicKey = Base64.encodeBase64String(publicKey.getEncoded()); statement.execute( String.format("alter user %s set rsa_public_key='%s'", testUser, encodePublicKey)); } String uri = parameters.get("uri"); Properties properties = new Properties(); properties.put("user", testUser); if (parameters.get("account") != null) { properties.put("account", parameters.get("account")); } if (parameters.get("ssl") != null) { properties.put("ssl", parameters.get("ssl")); } if (parameters.get("port") != null) { properties.put("port", parameters.get("port")); } if (parameters.get("database") != null) { properties.put("db", parameters.get("database")); } if (parameters.get("schema") != null) { properties.put("schema", parameters.get("schema")); } // test correct private key one properties.put("privateKey", privateKey); DriverManager.getConnection(uri, properties).close(); } /** * Test case for connecting with FIPS and executing a query. */ @Test public void connectWithFipsAndQuery() throws SQLException { try (Connection con = getConnection()) { Statement statement = con.createStatement(); ResultSet resultSet = statement.executeQuery( "select seq8(), randstr(100, random()) from table(generator(rowcount=>10000))"); int cnt = 0; while (resultSet.next()) { assertNotNull(resultSet.getInt(1)); assertNotNull(resultSet.getString(2)); cnt++; } assertEquals(cnt, 10000); } } @Test public void connectWithFipsAndPut() throws Exception { try (Connection con = getConnection()) { // put files ResultSet resultSet = con.createStatement() .executeQuery("PUT file://" + getFullPathFileInResource(TEST_DATA_FILE) + " @~"); int cnt = 0; while (resultSet.next()) { cnt++; } assertEquals(cnt, 1); } } /** Added in > 3.15.1 */ @Test @DontRunOnGithubActions public void connectWithFipsKeyPairWithBouncyCastle() throws Exception { System.setProperty( SecurityUtil.USE_BUNDLED_BOUNCY_CASTLE_FOR_PRIVATE_KEY_DECRYPTION_JVM, "true"); connectWithFipsKeyPair(); } /** Added in > 3.15.1 */ @Test @DontRunOnGithubActions public void testConnectUsingKeyPairWithBouncyCastle() throws Exception { System.setProperty( SecurityUtil.USE_BUNDLED_BOUNCY_CASTLE_FOR_PRIVATE_KEY_DECRYPTION_JVM, "true"); testConnectUsingKeyPair(); } private static void connectToGoogle() throws Exception { URL url = new URL("https://www.google.com/"); HttpsURLConnection con = (HttpsURLConnection) url.openConnection(); int code = con.getResponseCode(); if (code != 200) { throw new Exception("Got " + code + " instead of HTTP_OK"); } System.out.println("Connected to Google successfully"); } } ================================================ FILE: Jenkinsfile ================================================ @Library('pipeline-utils') import com.snowflake.DevEnvUtils import groovy.json.JsonOutput class JdbcJobDefinition { String jdk List params String jobToRun String runName } pipeline { agent { label 'regular-memory-node-snowos' } options { timestamps() } environment { COMMIT_SHA_LONG = sh(returnStdout: true, script: "echo \$(git rev-parse " + "HEAD)").trim() // environment variables for semgrep_agent (for findings / analytics page) // remove .git at the end // remove SCM URL + .git at the end BASELINE_BRANCH = "${env.CHANGE_TARGET}" } stages { stage('Checkout') { steps { checkout scm } } } } timestamps { node('regular-memory-node-snowos') { stage('checkout') { scmInfo = checkout scm println("${scmInfo}") env.GIT_BRANCH = scmInfo.GIT_BRANCH } stage('Authenticate Artifactory') { script { new DevEnvUtils().withSfCli { sh "sf artifact oci auth" } } } stage('Build') { sh '''\ |export JAVA_HOME=/usr/java/latest |export PATH=$JAVA_HOME/bin:$PATH |export GIT_BRANCH=${GIT_BRANCH} |$WORKSPACE/ci/build.sh '''.stripMargin() } jdkToParams = ['openjdk8': 'jdbc-rockylinux8-openjdk8', 'openjdk11': 'jdbc-rockylinux8-openjdk11', 'openjdk17': 'jdbc-rockylinux8-openjdk17', 'openjdk21': 'jdbc-rockylinux8-openjdk21'].collectEntries { jdk, image -> return [(jdk): [ string(name: 'client_git_branch', value: scmInfo.GIT_BRANCH), string(name: 'client_git_commit', value: scmInfo.GIT_COMMIT), string(name: 'branch', value: 'main'), string(name: 'TARGET_DOCKER_TEST_IMAGE', value: image), string(name: 'parent_job', value: env.JOB_NAME), string(name: 'parent_build_number', value: env.BUILD_NUMBER), string(name: 'timeout_value', value: '420'), string(name: 'PR_Key', value: scmInfo.GIT_BRANCH.substring(3)), string(name: 'svn_revision', value: 'sut-stable') ]] } jobDefinitions = jdkToParams.collectMany { jdk, params -> return [ 'RT-LanguageJDBC1-PC' : "Test JDBC 1 - $jdk", 'RT-LanguageJDBC2-PC' : "Test JDBC 2 - $jdk", 'RT-LanguageJDBC3-PC' : "Test JDBC 3 - $jdk", 'RT-LanguageJDBC4-PC' : "Test JDBC 4 - $jdk", ].collect { jobToRun, runName -> return new JdbcJobDefinition( jdk: jdk, params: params, jobToRun: jobToRun, runName: runName ) } }.collectEntries { jobDefinition -> return [(jobDefinition.runName): { build job: jobDefinition.jobToRun, parameters: jobDefinition.params }] } jobDefinitions.put('JDBC-AIX-Unit', { build job: 'JDBC-AIX-UnitTests', parameters: [ string(name: 'BRANCH', value: scmInfo.GIT_BRANCH ) ] } ) jobDefinitions.put('Test Authentication', { withCredentials([ string(credentialsId: 'sfctest0-parameters-secret', variable: 'PARAMETERS_SECRET'), ]) { sh '''\ |#!/bin/bash |set -e |ci/test_authentication.sh '''.stripMargin() } }) jobDefinitions.put('Test WIF', { withCredentials([ string(credentialsId: 'sfctest0-parameters-secret', variable: 'PARAMETERS_SECRET'), ]) { sh '''\ |#!/bin/bash |set -e |ci/test_wif.sh '''.stripMargin() } }) jobDefinitions.put('Test Revocation Validation', { withCredentials([ usernamePassword(credentialsId: 'jenkins-snowflakedb-github-app', usernameVariable: 'GITHUB_USER', passwordVariable: 'GITHUB_TOKEN') ]) { try { sh '''\ |#!/bin/bash -e |chmod +x $WORKSPACE/ci/test_revocation.sh |$WORKSPACE/ci/test_revocation.sh '''.stripMargin() } finally { archiveArtifacts artifacts: 'revocation-results.json,revocation-report.html', allowEmptyArchive: true publishHTML(target: [ allowMissing: true, alwaysLinkToLastBuild: true, keepAll: true, reportDir: '.', reportFiles: 'revocation-report.html', reportName: 'Revocation Validation Report' ]) } } }) stage('Test') { parallel (jobDefinitions) } } } def wgetUpdateGithub(String state, String folder, String targetUrl, String seconds) { def ghURL = "https://api.github.com/repos/snowflakedb/snowflake-jdbc/statuses/$COMMIT_SHA_LONG" def data = JsonOutput.toJson([state: "${state}", context: "jenkins/${folder}",target_url: "${targetUrl}"]) sh "wget ${ghURL} --spider -q --header='Authorization: token $GIT_PASSWORD' --post-data='${data}'" } ================================================ FILE: LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright (c) 2013-2018 Snowflake Computing, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.rst ================================================ Snowflake JDBC Driver ********************* .. image:: https://github.com/snowflakedb/snowflake-jdbc/workflows/Build%20and%20Test/badge.svg?branch=master :target: https://github.com/snowflakedb/snowflake-jdbc/actions?query=workflow%3A%22Build+and+Test%22+branch%3Amaster .. image:: https://codecov.io/gh/snowflakedb/snowflake-jdbc/branch/master/graph/badge.svg?token=Mj6uPxk0pV :target: https://codecov.io/gh/snowflakedb/snowflake-jdbc .. image:: http://img.shields.io/:license-Apache%202-brightgreen.svg :target: http://www.apache.org/licenses/LICENSE-2.0.txt Snowflake provides a JDBC type 4 driver that supports core functionality, allowing Java program to connect to Snowflake. .. |maven-snowflake-jdbc| image:: https://maven-badges.herokuapp.com/maven-central/net.snowflake/snowflake-jdbc/badge.svg?style=plastic :target: https://repo1.maven.org/maven2/net/snowflake/snowflake-jdbc/ .. |maven-snowflake-jdbc-fips| image:: https://maven-badges.herokuapp.com/maven-central/net.snowflake/snowflake-jdbc-fips/badge.svg?style=plastic :target: https://repo1.maven.org/maven2/net/snowflake/snowflake-jdbc-fips/ .. |maven-snowflake-jdbc-thin| image:: https://maven-badges.herokuapp.com/maven-central/net.snowflake/snowflake-jdbc-thin/badge.svg?style=plastic :target: https://repo1.maven.org/maven2/net/snowflake/snowflake-jdbc-thin/ - snowflake-jdbc (fat-jar): |maven-snowflake-jdbc| - snowflake-jdbc-fips (FIPS compliant fat-jar): |maven-snowflake-jdbc-fips| - snowflake-jdbc-thin (thin-jar): |maven-snowflake-jdbc-thin| Prerequisites ============= The Snowflake JDBC driver requires Java 1.8 or higher. If the minimum required version of Java is not installed on the client machines where the JDBC driver is installed, you must install either Oracle Java or OpenJDK. Installation ============ Maven ----- Add following dependency for fat-jar .. code-block:: xml net.snowflake snowflake-jdbc {version} or for FIPS compliant fat-jar .. code-block:: xml net.snowflake snowflake-jdbc-fips {version} or for thin-jar .. code-block:: xml net.snowflake snowflake-jdbc-thin {version} Build from Source Code ---------------------- 1. Checkout source code from Github by running: .. code-block:: bash git clone https://github.com/snowflakedb/snowflake-jdbc.git 2. Build the fat-jar and install it in local maven repository by running: .. code-block:: bash ./mvnw clean verify ./mvnw org.apache.maven.plugins:maven-install-plugin:3.1.1:install-file -Dfile=target/snowflake-jdbc.jar -DpomFile=./public_pom.xml 3. Build the FIPS compliant fat-jar and install it in local maven repository by running: .. code-block:: bash cd FIPS ../mvnw clean verify ../mvnw org.apache.maven.plugins:maven-install-plugin:3.1.1:install-file -Dfile=target/snowflake-jdbc-fips.jar -DpomFile=./public_pom.xml cd - 4. Build the thin-jar and install it in local maven repository by running: .. code-block:: bash ./mvnw clean verify -Dnot-self-contained-jar -Dthin-jar ./mvnw org.apache.maven.plugins:maven-install-plugin:3.1.1:install-file -Dfile=target/snowflake-jdbc-thin.jar -DpomFile=./thin_public_pom.xml -Dnot-self-contained-jar -Dthin-jar - ``thin-jar`` enables thin jar profile - ``not-self-contained-jar`` turns off fat jar profile (enabled by default) 5. **Note that the built dependencies are installed with version 1.0-SNAPSHOT** Usage ===== Load Driver Class ----------------- .. code-block:: java Class.forName("net.snowflake.client.api.driver.SnowflakeDriver") Note: The legacy driver class ``net.snowflake.client.jdbc.SnowflakeDriver`` is still available for backward compatibility but is deprecated. Datasource ---------- Use ``SnowflakeDataSourceFactory`` to create DataSource instances: .. code-block:: java import net.snowflake.client.api.datasource.SnowflakeDataSource; import net.snowflake.client.api.datasource.SnowflakeDataSourceFactory; SnowflakeDataSource ds = SnowflakeDataSourceFactory.createDataSource(); ds.setAccount("myaccount"); ds.setUser("myuser"); ds.setPassword("mypassword"); Connection String ----------------- US(West) Region: .. code-block:: bash jdbc:snowflake://.snowflakecomputing.com/? EU(Frankfurt) Region: .. code-block:: bash jdbc:snowflake://.eu-central-1.snowflakecomputing.com/? Documentation ============= For detailed documentation, please refer to https://docs.snowflake.net/manuals/user-guide/jdbc.html Development ============= Run the maven command to check the coding style. .. code-block:: bash mvn -P check-style validate Follow the instruction if any error occurs or run this command to fix the formats. .. code-block:: bash mvn com.spotify.fmt:fmt-maven-plugin:format You may import the coding style from IntelliJ so that the coding style can be applied on IDE: - In the **File** -> **Settings/Plugins**, and install `google-java-format` plugin. - Enable `google-java-format` for the JDBC project. - In the source code window, select **Code** -> **Reformat** to apply the coding style. - Additionally configure IDE in **File** -> **Editor** -> **Code Style** -> **Java** to - not use wildcard imports (tab **Imports**): - **Use single class import** - **Class count to use import with '*'** to 1000 - **Names count to use static import with '*'** to 1000 - always use braces in ``if/while/for/do..while`` in (tab **Wrapping and Braces**) Tests ===== Run Tests --------- Set the environment variables to specify the target database. .. code-block:: bash export SNOWFLAKE_TEST_HOST= export SNOWFLAKE_TEST_ACCOUNT= export SNOWFLAKE_TEST_USER= export SNOWFLAKE_TEST_PASSWORD= export SNOWFLAKE_TEST_DATABASE= export SNOWFLAKE_TEST_SCHEMA= export SNOWFLAKE_TEST_WAREHOUSE= export SNOWFLAKE_TEST_ROLE= Run the maven ``verify`` goal. .. code-block:: bash mvn -DjenkinsIT -DtestCategory=net.snowflake.client.category. verify where ``category`` is the class name under the package ``net.snowflake.client.category``. Prepare new version --------------- Run script passing desired version: .. code-block:: bash ./prepareNewVersion.sh 3.100.42 Add SNAPSHOT suffix when necessary: .. code-block:: bash ./prepareNewVersion.sh 3.100.42-SNAPSHOT Test Class Naming Convention ---------------------------- The test cases are fallen into a couple of criteria: - The unit test class names end with ``Test``. They run part of the JDBC build jobs. - The integration test class names end with ``IT``. They run part of the ``verify`` maven goal along with the test category specified by the parameter ``testCategory`` having ``net.snowflake.client.category`` classes. - The manual test class names end with ``Manual``. They don't run in the CI but you can run them manually. Aside from the general test criteria, the test case class names ending with ``LatestIT`` run only with the latest JDBC driver. The main motivation behind is to skip those tests for the old JDBC driver. See ``./TestOnly`` directory for further information. Support ============= Feel free to file an issue or submit a PR here for general cases. For official support, contact Snowflake support at: https://community.snowflake.com/s/article/How-To-Submit-a-Support-Case-in-Snowflake-Lodge Notes ---------- This driver support GCP regional endpoints starting from version 3.21.0. Please ensure that any workloads using through this driver below the version 3.21.0 do not require support for regional endpoints on GCP. If you have questions about this, please contact Snowflake Support. The driver uses Rust library called sf_mini_core, you can find its source code [here](https://github.com/snowflakedb/universal-driver/tree/main/sf_mini_core) ================================================ FILE: SECURITY.md ================================================ # Security Policy Please refer to the Snowflake [HackerOne program](https://hackerone.com/snowflake?type=team) for our security policies and for reporting any security vulnerabilities. For other security related questions and concerns, please contact the Snowflake security team at security@snowflake.com ================================================ FILE: TestOnly/.gitignore ================================================ target/ .idea/ ================================================ FILE: TestOnly/README.rst ================================================ Test Only Maven Project *********************** This directory includes a maven project to run the Snowflake JDBC tests with the specified JDBC version. The primary goal is to run the tests against the oldest support JDBC driver to ensure no regression. Run Tests ========== .. code-block:: bash mvn -DjenkinsIT -DtestCategory=net.snowflake.client.category. verify where ``category`` is the class name under the package ``net.snowflake.client.category``. Update the JDBC version ======================= Here are the steps updating the target JDBC driver version. - Change the project version in ``pom.xml`` to the JDBC version that you want to run the tests. - Locate ``maven-compiler-plugin`` plugin in ``pom.xml`` - Delete test case class files that should run along with the JDBC version. - Check ``*LatestIT.java`` and move the test cases that should run along with the JDBC to the base classes. ================================================ FILE: TestOnly/pom.xml ================================================ 4.0.0 net.snowflake snowflake-jdbc-test 3.13.21 snowflake-jdbc-test http://maven.apache.org UTF-8 UTF-8 17.0.0 4.2.0 2.17.2 0.8.4 true 5.13.0 5.11.1 3.5.1 3.5.6 4.1.133.Final 4.5.14 1.84 net.snowflake.client.jdbc.internal io.netty netty-common ${netty.version} org.apache.httpcomponents httpclient ${apache.httpclient.version} io.netty netty-buffer ${netty.version} org.apache.maven.plugins maven-failsafe-plugin 3.0.0-M1 test org.junit.jupiter junit-jupiter-api ${junit.version} test org.junit.jupiter junit-jupiter-params ${junit.version} test org.junit.jupiter junit-jupiter-engine ${junit.version} test org.junit.platform junit-platform-suite 1.11.1 test org.junit.platform junit-platform-engine 1.11.1 test org.junit.platform junit-platform-runner 1.11.1 test org.junit.platform junit-platform-suite-api 1.11.1 test org.junit.platform junit-platform-suite-engine 1.11.1 test org.junit.platform junit-platform-launcher 1.11.1 test org.hamcrest hamcrest 2.1 jar test org.mockito mockito-inline ${mockito.version} test org.awaitility awaitility ${awaitility.version} test commons-dbcp commons-dbcp 1.4 test com.mchange c3p0 0.9.5.4 jar test net.snowflake snowflake-jdbc ${project.version} jar net.snowflake snowflake-common 4.0.2-SNAPSHOT jar compile commons-cli commons-cli 1.2 jar test org.apache.commons commons-lang3 3.10 jar test org.apache.commons commons-text 1.10.0 jar test commons-io commons-io 2.2 jar test org.codehaus.mojo exec-maven-plugin 1.2.1 test org.apache.commons commons-compress 1.28.0 test net.minidev json-smart-mini 1.0.6.3 com.fasterxml.jackson.core jackson-databind ${jacksondatabind.version} jar org.apache.arrow arrow-vector ${arrow.version} io.netty netty-common io.netty netty-buffer org.apache.arrow arrow-memory-unsafe ${arrow.version} org.apache.arrow arrow-memory-netty-buffer-patch ${arrow.version} io.netty netty-common io.netty netty-buffer com.zaxxer HikariCP 2.4.3 test com.google.guava guava 32.0.0-jre net.java.dev.jna jna ${jna.version} net.java.dev.jna jna-platform ${jna.version} org.bouncycastle bcpkix-jdk18on ${bouncycastle.version} ${basedir}/../src/main/java ${basedir}/../src/test/java ${basedir}/../src/main/resources true ${basedir}/../src/test/resources org.apache.maven.plugins maven-clean-plugin 3.0.0 lib *.jar org.apache.maven.plugins maven-install-plugin 3.0.0-M1 install-arrow-memory validate install-file ${project.basedir}/../dependencies/arrow-memory-${arrow.version}.pom org.apache.arrow arrow-memory ${arrow.version} pom true install-arrow-memory-core validate install-file ${project.basedir}/../dependencies/arrow-memory-core-${arrow.version}.jar org.apache.arrow arrow-memory-core ${arrow.version} jar true install-arrow-memory-unsafe validate install-file ${project.basedir}/../dependencies/arrow-memory-unsafe-${arrow.version}.jar org.apache.arrow arrow-memory-unsafe ${arrow.version} jar true install-arrow-memory-netty-buffer-patch validate install-file ${project.basedir}/../dependencies/arrow-memory-netty-buffer-patch-${arrow.version}.jar org.apache.arrow arrow-memory-netty ${arrow.version} jar true install-arrow-format validate install-file ${project.basedir}/../dependencies/arrow-format-${arrow.version}.jar org.apache.arrow arrow-format ${arrow.version} jar true install-arrow-vector validate install-file ${project.basedir}/../dependencies/arrow-vector-${arrow.version}.jar org.apache.arrow arrow-vector ${arrow.version} jar true org.apache.maven.plugins maven-compiler-plugin 3.8.0 true 8 8 **/net/snowflake/client/core/arrow/*.java **/ResultSetArrow*.java **/PreparedStatementArrow*IT.java **/SFArrowResultSetIT.java **/structuredtypes/sqldata/*.java **/*LatestIT.java **/*WiremockIT.java **/*Test.java **/*Manual.java **/TelemetryServiceIT.java **/TelemetryIT.java **/SFTrustManagerIT.java **/SLF4JLoggerIT.java **/ResultSetAsyncIT.java **/SnowflakeResultSetSerializable*IT.java **/AuthTestHelper.java false org.apache.maven.plugins maven-surefire-plugin org.apache.maven.surefire surefire-junit-platform ${surefire.version} ${surefire.version} org.apache.maven.plugins maven-failsafe-plugin org.apache.maven.surefire surefire-junit-platform ${surefire.version} ${surefire.version} jenkinsIT jenkinsIT org.apache.maven.plugins maven-surefire-plugin false test org.apache.maven.plugins maven-failsafe-plugin verify DefaultIT integration-test net.snowflake.client.internal.log.JDK14Logger ${basedir}/src/test/resources/logging.properties ${integrationTestSuites} org.jacoco jacoco-maven-plugin ${jacoco.version} pre-integration-test pre-integration-test prepare-agent target/jacoco-it.exec post-integration-test post-integration-test report target/jacoco-it.exec target/jacoco-it ${jacoco.skip.instrument} ================================================ FILE: ci/_init.sh ================================================ #!/usr/local/bin/env bash set -e export PLATFORM=$(echo $(uname) | tr '[:upper:]' '[:lower:]') export INTERNAL_REPO=artifactory.ci1.us-west-2.aws-dev.app.snowflake.com/internal-development-docker-drivers-local if [[ -z "$GITHUB_ACTIONS" ]]; then # Use the internal Docker Registry export DOCKER_REGISTRY_NAME=$INTERNAL_REPO/snowflakedb export WORKSPACE=${WORKSPACE:-/tmp} else # Use Docker Hub export DOCKER_REGISTRY_NAME=snowflakedb export WORKSPACE=$GITHUB_WORKSPACE fi mkdir -p $WORKSPACE export DRIVER_NAME=jdbc # Test Images TEST_IMAGE_VERSION=1 declare -A TEST_IMAGE_NAMES=( [$DRIVER_NAME-rockylinux8-openjdk8]=$DOCKER_REGISTRY_NAME/client-$DRIVER_NAME-rockylinux8-openjdk8-test:$TEST_IMAGE_VERSION [$DRIVER_NAME-rockylinux8-openjdk11]=$DOCKER_REGISTRY_NAME/client-$DRIVER_NAME-rockylinux8-openjdk11-test:$TEST_IMAGE_VERSION [$DRIVER_NAME-rockylinux8-openjdk17]=$DOCKER_REGISTRY_NAME/client-$DRIVER_NAME-rockylinux8-openjdk17-test:$TEST_IMAGE_VERSION [$DRIVER_NAME-rockylinux8-openjdk21]=$DOCKER_REGISTRY_NAME/client-$DRIVER_NAME-rockylinux8-openjdk21-test:$TEST_IMAGE_VERSION [$DRIVER_NAME-rockylinux9-openjdk8]=$DOCKER_REGISTRY_NAME/client-$DRIVER_NAME-rockylinux9-openjdk8-test:$TEST_IMAGE_VERSION [$DRIVER_NAME-rockylinux9-openjdk11]=$DOCKER_REGISTRY_NAME/client-$DRIVER_NAME-rockylinux9-openjdk11-test:$TEST_IMAGE_VERSION [$DRIVER_NAME-rockylinux9-openjdk17]=$DOCKER_REGISTRY_NAME/client-$DRIVER_NAME-rockylinux9-openjdk17-test:$TEST_IMAGE_VERSION [$DRIVER_NAME-rockylinux9-openjdk21]=$DOCKER_REGISTRY_NAME/client-$DRIVER_NAME-rockylinux9-openjdk21-test:$TEST_IMAGE_VERSION ) export TEST_IMAGE_NAMES declare -A TEST_IMAGE_DOCKERFILES=( [$DRIVER_NAME-rockylinux8-openjdk8]=jdbc-rockylinux8-openjdk-test [$DRIVER_NAME-rockylinux8-openjdk11]=jdbc-rockylinux8-openjdk-test [$DRIVER_NAME-rockylinux8-openjdk17]=jdbc-rockylinux8-openjdk-test [$DRIVER_NAME-rockylinux8-openjdk21]=jdbc-rockylinux8-openjdk-test [$DRIVER_NAME-rockylinux9-openjdk8]=jdbc-rockylinux-openjdk-test [$DRIVER_NAME-rockylinux9-openjdk11]=jdbc-rockylinux-openjdk-test [$DRIVER_NAME-rockylinux9-openjdk17]=jdbc-rockylinux-openjdk-test [$DRIVER_NAME-rockylinux9-openjdk21]=jdbc-rockylinux-openjdk-test ) declare -A TEST_IMAGE_BUILD_ARGS=( [$DRIVER_NAME-rockylinux8-openjdk8]="--target jdbc-rockylinux8-openjdk8" [$DRIVER_NAME-rockylinux8-openjdk11]="--target jdbc-rockylinux8-openjdk11" [$DRIVER_NAME-rockylinux8-openjdk17]="--target jdbc-rockylinux8-openjdk17" [$DRIVER_NAME-rockylinux8-openjdk21]="--target jdbc-rockylinux8-openjdk21" [$DRIVER_NAME-rockylinux9-openjdk8]="--target jdbc-rockylinux-openjdk8" [$DRIVER_NAME-rockylinux9-openjdk11]="--target jdbc-rockylinux-openjdk11" [$DRIVER_NAME-rockylinux9-openjdk17]="--target jdbc-rockylinux-openjdk17" [$DRIVER_NAME-rockylinux9-openjdk21]="--target jdbc-rockylinux-openjdk21" ) ================================================ FILE: ci/build.sh ================================================ #!/usr/bin/env bash set -e # # Build JDBC driver # set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" export WORKSPACE=${WORKSPACE:=/tmp} $THIS_DIR/container/build_component.sh ================================================ FILE: ci/container/build_component.sh ================================================ #!/bin/bash -e # # Build JDBC driver # set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" JDBC_ROOT=$(cd "${THIS_DIR}/../../" && pwd) cd $JDBC_ROOT rm -f lib/*.jar mvn clean install -DskipTests --batch-mode --show-version cd FIPS rm -f lib/*.jar mvn clean install -DskipTests -Dsurefire.argLine="-Djavax.net.debug=ssl:handshake" -Dfailsafe.argLine="-Djavax.net.debug=ssl:handshake" --batch-mode --show-version $THIS_DIR/upload_artifact.sh ================================================ FILE: ci/container/change_snowflake_test_pwd.py ================================================ #!/usr/bin/env python # # Set a complex password for test user snowman # import os import sys import snowflake.connector params = { 'account': '', 'user': os.getenv("SNOWFLAKE_TEST_USER"), 'password': os.getenv("SNOWFLAKE_TEST_PASSWORD"), 'database': os.getenv("SNOWFLAKE_TEST_DATABASE"), 'role': os.getenv("SNOWFLAKE_TEST_ROLE"), 'host': os.getenv("SNOWFLAKE_TEST_HOST"), 'port': os.getenv("SNOWFLAKE_TEST_PORT"), 'protocol': os.getenv("SNOWFLAKE_TEST_PROTOCOL"), } for account in ["testaccount", "s3testaccount", "azureaccount", "gcpaccount"]: params['account'] = account conn = snowflake.connector.connect(**params) conn.cursor().execute("use role accountadmin") cmd = "alter user set password = '{}'".format(os.getenv("SNOWFLAKE_TEST_PASSWORD_NEW")) print(cmd) conn.cursor().execute(cmd) conn.close() ================================================ FILE: ci/container/create_schema.py ================================================ #!/usr/bin/env python # # Create test schema # import os import sys import snowflake.connector import sf_test_utils test_schema = sf_test_utils.get_test_schema() if not test_schema: sys.exit(2) params = sf_test_utils.init_connection_params() con = snowflake.connector.connect(**params) con.cursor().execute("create or replace schema {0}".format(test_schema)) sys.exit(0) ================================================ FILE: ci/container/download_artifact.sh ================================================ #!/bin/bash -e # # Download Artifact # set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" JDBC_ROOT=$(cd "${THIS_DIR}/../../" && pwd) if [[ -z "$GITHUB_ACTIONS" ]] ;then export GIT_BRANCH=${GIT_BRANCH:-origin/$(git rev-parse --abbrev-ref HEAD)} BRANCH=$(basename ${GIT_BRANCH}) # Place to hold downloaded library export LIB_DIR=$WORKSPACE/lib if [[ "$is_old_driver" != "true" ]]; then # Not Old Driver test mkdir -p $LIB_DIR pushd $LIB_DIR >& /dev/null base_stage=s3://sfc-eng-jenkins/repository/jdbc/${BRANCH} export GIT_COMMIT=${GIT_COMMIT:-$(aws s3 cp $base_stage/latest_commit -)} source_stage=$base_stage/${GIT_COMMIT} echo "[INFO] downloading ${source_stage}/" aws s3 cp --only-show-errors $source_stage/ . --recursive popd >& /dev/null mkdir -p /mnt/host/lib cp -p $LIB_DIR/*.jar /mnt/host/lib fi else export GIT_BRANCH=origin/$(basename ${GITHUB_REF}) export GIT_COMMIT=${GITHUB_SHA} fi ================================================ FILE: ci/container/drop_schema.py ================================================ #!/usr/bin/env python # # Create test schema # import os import sys import snowflake.connector import sf_test_utils test_schema = sf_test_utils.get_test_schema() if not test_schema: sys.exit(0) params = sf_test_utils.init_connection_params() con = snowflake.connector.connect(**params) con.cursor().execute("drop schema if exists {0}".format(test_schema)) sys.exit(0) ================================================ FILE: ci/container/hang_webserver.py ================================================ #!/usr/bin/env python3 import sys from http.server import BaseHTTPRequestHandler,HTTPServer from socketserver import ThreadingMixIn import threading import time class HTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): if self.path.startswith('/403'): self.send_response(403) self.send_header('Content-Type', 'text/plain') self.end_headers() elif self.path.startswith('/404'): self.send_response(404) self.send_header('Content-Type', 'text/plain') self.end_headers() elif self.path.startswith('/hang'): time.sleep(300) self.send_response(200, 'OK') self.send_header('Content-Type', 'text/plain') self.end_headers() else: self.send_response(200, 'OK') self.send_header('Content-Type', 'text/plain') self.end_headers() do_GET = do_POST class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): allow_reuse_address = True def shutdown(self): self.socket.close() HTTPServer.shutdown(self) class SimpleHttpServer(): def __init__(self, ip, port): self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) def start(self): self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.daemon = True self.server_thread.start() def waitForThread(self): self.server_thread.join() def stop(self): self.server.shutdown() self.waitForThread() if __name__=='__main__': if len(sys.argv) != 2: print("Usage: python3 {} PORT".format(sys.argv[0])) sys.exit(2) PORT = int(sys.argv[1]) server = SimpleHttpServer('localhost', PORT) print('HTTP Server Running on PORT {}..........'.format(PORT)) server.start() server.waitForThread() ================================================ FILE: ci/container/sf_test_utils.py ================================================ #!/usr/bin/env python # # Snowflake test utils # import os import sys def get_test_schema(): return os.getenv("TARGET_SCHEMA_NAME", "LOCAL_reg_1") def init_connection_params(): params = { 'account': os.getenv("SNOWFLAKE_TEST_ACCOUNT"), 'user': os.getenv("SNOWFLAKE_TEST_USER"), 'database': os.getenv("SNOWFLAKE_TEST_DATABASE"), 'role': os.getenv("SNOWFLAKE_TEST_ROLE"), } private_key_file = os.getenv("SNOWFLAKE_TEST_PRIVATE_KEY_FILE") if private_key_file: workspace = os.getenv("WORKSPACE") if workspace: key_path = os.path.join(workspace, private_key_file) else: key_path = private_key_file try: from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.serialization import load_pem_private_key from cryptography.hazmat.backends import default_backend with open(key_path, 'rb') as key_file: pem_data = key_file.read() private_key_pwd = os.getenv("SNOWFLAKE_TEST_PRIVATE_KEY_PWD") private_key_pwd_bytes = None if private_key_pwd: private_key_pwd_bytes = private_key_pwd.encode('utf-8') private_key_obj = load_pem_private_key(pem_data, password=private_key_pwd_bytes, backend=default_backend()) der_data = private_key_obj.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption() ) params['private_key'] = der_data params['authenticator'] = 'SNOWFLAKE_JWT' if private_key_pwd: params['private_key_pwd'] = private_key_pwd except Exception as e: print(f"ERROR: Failed to read private key file {key_path}: {e}") sys.exit(1) else: params['password'] = os.getenv("SNOWFLAKE_TEST_PASSWORD") host = os.getenv("SNOWFLAKE_TEST_HOST") if host: params['host'] = host port = os.getenv("SNOWFLAKE_TEST_PORT") if port: params['port'] = port protocol = os.getenv("SNOWFLAKE_TEST_PROTOCOL") if protocol: params['protocol'] = protocol warehouse = os.getenv("SNOWFLAKE_TEST_WAREHOUSE") if warehouse: params['warehouse'] = warehouse return params ================================================ FILE: ci/container/test_authentication.sh ================================================ #!/bin/bash -e set -o pipefail export WORKSPACE=${WORKSPACE:-/mnt/workspace} export SOURCE_ROOT=${SOURCE_ROOT:-/mnt/host} MVNW_EXE=$SOURCE_ROOT/mvnw AUTH_PARAMETER_FILE=./.github/workflows/parameters_aws_auth_tests.json eval $(jq -r '.authtestparams | to_entries | map("export \(.key)=\(.value|tostring)")|.[]' $AUTH_PARAMETER_FILE) $MVNW_EXE -DjenkinsIT \ -Dnet.snowflake.jdbc.temporaryCredentialCacheDir=/mnt/workspace/abc \ -Dnet.snowflake.jdbc.ocspResponseCacheDir=/mnt/workspace/abc \ -Djava.io.tmpdir=$WORKSPACE \ -Djacoco.skip.instrument=true \ -Dskip.unitTests=true \ -DintegrationTestSuites=AuthenticationTestSuite \ -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn \ -Dnot-self-contained-jar \ -Denforcer.skip=true \ clean verify \ --batch-mode --show-version ================================================ FILE: ci/container/test_component.sh ================================================ #!/bin/bash -e # # Test JDBC for Linux/MAC # set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" export WORKSPACE=${WORKSPACE:-/mnt/workspace} export SOURCE_ROOT=${SOURCE_ROOT:-/mnt/host} MVNW_EXE=$SOURCE_ROOT/mvnw echo "[INFO] Download JDBC Integration test cases and libraries" source $THIS_DIR/download_artifact.sh if [[ -f "$WORKSPACE/parameters.json" ]]; then echo "[INFO] Found parameter file in $WORKSPACE" PARAMETER_FILE=$WORKSPACE/parameters.json else echo "[INFO] Use the default test parameters.json" PARAMETER_FILE=$SOURCE_ROOT/src/test/resources/parameters.json fi eval $(jq -r '.testconnection | to_entries | map("export \(.key)=\(.value|tostring)")|.[]' $PARAMETER_FILE) if [[ -n "$GITHUB_SHA" ]]; then # Github Action export TARGET_SCHEMA_NAME=${RUNNER_TRACKING_ID//-/_}_${GITHUB_SHA} function finish() { pushd $SOURCE_ROOT/ci/container >& /dev/null echo "[INFO] Drop schema $TARGET_SCHEMA_NAME" python3 drop_schema.py popd >& /dev/null } trap finish EXIT pushd $SOURCE_ROOT/ci/container >& /dev/null echo "[INFO] Create schema $TARGET_SCHEMA_NAME" if python3 create_schema.py; then export SNOWFLAKE_TEST_SCHEMA=$TARGET_SCHEMA_NAME else echo "[WARN] SNOWFLAKE_TEST_SCHEMA: $SNOWFLAKE_TEST_SCHEMA" fi popd >& /dev/null fi # we change password, create SSM_KNOWN_FILE source $THIS_DIR/../log_analyze_setup.sh if [[ "${ENABLE_CLIENT_LOG_ANALYZE}" == "true" ]]; then echo "[INFO] Log Analyze is enabled." setup_log_env if [[ "$SNOWFLAKE_TEST_HOST" == *"snowflake.reg"*".local"* && "$SNOWFLAKE_TEST_ACCOUNT" == "s3testaccount" && "$SNOWFLAKE_TEST_USER" == "snowman" && "$SNOWFLAKE_TEST_PASSWORD" == "test" ]]; then echo "[INFO] Run test with local instance. Will set a more complex password" python3 $THIS_DIR/change_snowflake_test_pwd.py export SNOWFLAKE_TEST_PASSWORD=$SNOWFLAKE_TEST_PASSWORD_NEW echo $SNOWFLAKE_TEST_PASSWORD >> $CLIENT_KNOWN_SSM_FILE_PATH else echo "[INFO] Not running test with local instance. Won't set a new password" fi fi env | grep SNOWFLAKE_ | grep -v -E "(PASS|KEY|SECRET|TOKEN)" | sort echo "[INFO] Running Hang Web Server" kill -9 $(ps -ewf | grep hang_webserver | grep -v grep | awk '{print $2}') || true python3 $THIS_DIR/hang_webserver.py 12345& # Avoid connection timeouts export MAVEN_OPTS="$MAVEN_OPTS -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.httpconnectionManager.ttlSeconds=120" echo $MAVEN_OPTS cd $SOURCE_ROOT # Avoid connection timeout on plugin dependency fetch or fail-fast when dependency cannot be fetched after 3 retries # Retry dependency:go-offline up to 3 times if it fails for attempt in 1 2 3; do echo "[INFO] maven dependency:go-offline attempt $attempt/3" if "$MVNW_EXE" --batch-mode --show-version dependency:go-offline; then break fi if [ $attempt -eq 3 ]; then exit 1 fi echo "[WARN] Retrying in 5 seconds..." sleep 5 done if [[ "$is_old_driver" == "true" ]]; then pushd TestOnly >& /dev/null JDBC_VERSION=$($MVNW_EXE org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version --batch-mode | grep -v "[INFO]") echo "[INFO] Run JDBC $JDBC_VERSION tests" $MVNW_EXE -DjenkinsIT \ -Dskip.unitTests=true \ -Djava.io.tmpdir=$WORKSPACE \ -Djacoco.skip.instrument=false \ -DintegrationTestSuites="$JDBC_TEST_SUITES" \ -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn \ verify \ --batch-mode --show-version popd >& /dev/null elif [[ "$JDBC_TEST_SUITES" == "FipsTestSuite" ]]; then pushd FIPS >& /dev/null echo "[INFO] Run Fips tests" $MVNW_EXE -DjenkinsIT \ -Dskip.unitTests=true \ -Djava.io.tmpdir=$WORKSPACE \ -Djacoco.skip.instrument=false \ -DintegrationTestSuites=FipsTestSuite \ -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn \ -Dnot-self-contained-jar \ verify \ --batch-mode --show-version popd >& /dev/null else echo "[INFO] Run $JDBC_TEST_SUITES tests" $MVNW_EXE -DjenkinsIT \ -Dskip.unitTests=true \ -Djava.io.tmpdir=$WORKSPACE \ -Djacoco.skip.instrument=false \ -DintegrationTestSuites="$JDBC_TEST_SUITES" \ -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn \ -Dnot-self-contained-jar $ADDITIONAL_MAVEN_PROFILE \ verify \ --batch-mode --show-version fi IFS=' ' ================================================ FILE: ci/container/upload_artifact.sh ================================================ #!/bin/bash -e # # Upload jar files to S3 # set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" JDBC_ROOT=$(cd "${THIS_DIR}/../../" && pwd) if [[ -z "$GITHUB_ACTIONS" ]] ;then export GIT_BRANCH=${GIT_BRANCH:-origin/$(git rev-parse --abbrev-ref HEAD)} export GIT_COMMIT=${GIT_COMMIT:-$(git rev-parse HEAD)} export WORKSPACE=${WORKSPACE:-/tmp} echo "[INFO] Git Branch is $GIT_BRANCH" if [[ "$GIT_BRANCH" == PR-* ]]; then BRANCH=$GIT_BRANCH else BRANCH=$(basename ${GIT_BRANCH}) fi target_stage=s3://sfc-eng-jenkins/repository/jdbc/$BRANCH/${GIT_COMMIT} echo "[INFO] Uploading jar to $target_stage/" aws s3 cp --only-show-errors $JDBC_ROOT/lib/ $target_stage/ --recursive --exclude "*" --include "*.jar" aws s3 cp --only-show-errors $JDBC_ROOT/FIPS/lib $target_stage/ --recursive --exclude "*" --include "*.jar" COMMIT_FILE=$(mktemp) cat > $COMMIT_FILE </dev/null && pwd )" if jar tvf $DIR/../../target/snowflake-jdbc${package_modifier}.jar | awk '{print $8}' | grep -v -E "/$" | grep -v -E "^(net|com)/snowflake" | grep -v -E "(com|net)/\$" | grep -v -E "^META-INF" | grep -v -E "^iso3166_" | grep -v -E "^mozilla" | grep -v -E "^com/sun/jna" | grep -v com/sun/ | grep -v mime.types | grep -v -E "^com/github/luben/zstd/" | grep -v -E "^aix/" | grep -v -E "^darwin/" | grep -v -E "^freebsd/" | grep -v -E "^linux/" | grep -v -E "^win/" | grep -v -E "^minicore/" | grep -v -E "^org/conscrypt/"; then echo "[ERROR] JDBC jar includes class not under the snowflake namespace" exit 1 fi if jar tvf $DIR/../../target/snowflake-jdbc${package_modifier}.jar | awk '{print $8}' | grep -E "^META-INF/versions/.*.class" | grep -v -E "^META-INF/versions/.*/(net|com)/snowflake"; then echo "[ERROR] JDBC jar includes multi-release classes not under the snowflake namespace" exit 1 fi ================================================ FILE: ci/scripts/check_no_raw_system_calls.sh ================================================ #!/usr/bin/env bash # # Architectural guard: ensures no raw System.getProperty(), System.getenv(), # or System.setProperty() calls exist in production code outside SnowflakeUtil.java. # # Use SnowflakeUtil.systemGetProperty() / systemGetEnv() / systemSetProperty() # instead of raw calls to ensure proper SecurityException handling. # set -euo pipefail SRC_DIR="${1:-src/main/java}" WRAPPER_FILE="SnowflakeUtil.java" violations=$( grep -rn --include='*.java' -E 'System\.(getProperty|getenv|setProperty)\s*\(' "$SRC_DIR" \ | grep -v "/${WRAPPER_FILE}:" \ || true ) if [[ -n "$violations" ]]; then echo "ERROR: Found raw System.getProperty/getenv/setProperty calls outside ${WRAPPER_FILE}:" echo "" echo "$violations" echo "" echo "Replace with SnowflakeUtil.systemGetProperty() / systemGetEnv() / systemSetProperty()." exit 1 fi echo "OK: No raw System.getProperty/getenv/setProperty calls found outside ${WRAPPER_FILE}." ================================================ FILE: ci/scripts/login_docker.sh ================================================ #!/bin/bash -e # # Login the Docker Hub # echo "[INFO] Login the Docker Hub" if [[ -z "$DOCKER_HUB_USER" ]] || [[ -z "$DOCKER_HUB_TOKEN" ]]; then echo "[ERROR] Set DOCKER_HUB_USER and DOCKER_HUB_TOKEN to push the images to the Docker Hub" exit 1 fi docker login --username "$DOCKER_HUB_USER" --password "$DOCKER_HUB_TOKEN" ================================================ FILE: ci/scripts/set_git_info.sh ================================================ #!/bin/bash -e # # Set GIT info # if [[ -z "$GITHUB_ACTIONS" ]]; then # # set Jenkins GIT parameters propagated from Build job. # export client_git_url=${client_git_url:-https://github.com/snowflakedb/snowflake-jdbc.git} export client_git_branch=${client_git_branch:-origin/$(git rev-parse --abbrev-ref HEAD)} export client_git_commit=${client_git_commit:-$(git log --pretty=oneline | head -1 | awk '{print $1}')} else # # GITHUB Actions if [[ "$CLOUD_PROVIDER" == "AZURE" ]]; then SOURCE_PARAMETER_FILE=parameters_azure.json.gpg RSA_KEY_FILE=rsa_key_jdbc_azure.p8.gpg elif [[ "$CLOUD_PROVIDER" == "GCP" ]]; then SOURCE_PARAMETER_FILE=parameters_gcp.json.gpg RSA_KEY_FILE=rsa_key_jdbc_gcp.p8.gpg else SOURCE_PARAMETER_FILE=parameters_aws.json.gpg RSA_KEY_FILE=rsa_key_jdbc_aws.p8.gpg fi gpg --quiet --batch --yes --decrypt --passphrase="$PARAMETERS_SECRET" --output $WORKSPACE/parameters.json $THIS_DIR/../.github/workflows/$SOURCE_PARAMETER_FILE gpg --quiet --batch --yes --decrypt --passphrase="$JDBC_PRIVATE_KEY_SECRET" --output $WORKSPACE/rsa_key_jdbc.p8 $THIS_DIR/../.github/workflows/rsa_keys/$RSA_KEY_FILE export client_git_url=https://github.com/${GITHUB_REPOSITORY}.git export client_git_branch=origin/$(basename ${GITHUB_REF}) export client_git_commit=${GITHUB_SHA} fi # # set GIT parameters used in the following scripts # export GIT_URL=$client_git_url export GIT_BRANCH=$client_git_branch export GIT_COMMIT=$client_git_commit echo "GIT_URL: $GIT_URL, GIT_BRANCH: $GIT_BRANCH, GIT_COMMIT: $GIT_COMMIT" ================================================ FILE: ci/scripts/setup_gpg.sh ================================================ #!/bin/bash # GPG setup script for creating unique GPG home directory setup_gpg_home() { # Create unique GPG home directory export GNUPGHOME="${THIS_DIR}/.gnupg_$$_$(date +%s%N)_${BUILD_NUMBER:-}" mkdir -p "$GNUPGHOME" chmod 700 "$GNUPGHOME" cleanup_gpg() { if [[ -n "$GNUPGHOME" && -d "$GNUPGHOME" ]]; then rm -rf "$GNUPGHOME" fi } trap cleanup_gpg EXIT } setup_gpg_home ================================================ FILE: ci/test.sh ================================================ #!/bin/bash -e # # Test JDBC # set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" JDBC_ROOT="$(cd "${THIS_DIR}/.." && pwd)" source $THIS_DIR/_init.sh source $THIS_DIR/scripts/set_git_info.sh declare -A TARGET_TEST_IMAGES if [[ -n "$TARGET_DOCKER_TEST_IMAGE" ]]; then echo "[INFO] TARGET_DOCKER_TEST_IMAGE: $TARGET_DOCKER_TEST_IMAGE" IMAGE_NAME=${TEST_IMAGE_NAMES[$TARGET_DOCKER_TEST_IMAGE]} if [[ -z "$IMAGE_NAME" ]]; then echo "[ERROR] The target platform $TARGET_DOCKER_TEST_IMAGE doesn't exist. Check $THIS_DIR/_init.sh" exit 1 fi TARGET_TEST_IMAGES=([$TARGET_DOCKER_TEST_IMAGE]=$IMAGE_NAME) else echo "[ERROR] Set TARGET_DOCKER_TEST_IMAGE to the docker image name to run the test" for name in "${!TEST_IMAGE_NAMES[@]}"; do echo " " $name done exit 2 fi if [[ -z "$JDBC_TEST_SUITES" ]]; then echo "[ERROR] Set JDBC_TEST_SUITES to the JDBC test category." find $THIS_DIR/../src/test/java -type f -exec grep -E "^import net.snowflake.client.category" {} \; | sort | uniq | awk -F. '{print $NF}' | awk -F\; '{print $1}' exit 2 fi for name in "${!TARGET_TEST_IMAGES[@]}"; do echo "[INFO] Testing $DRIVER_NAME on $name" # docker pull "${TEST_IMAGE_NAMES[$name]}" docker container run \ --rm \ --network=host \ -v $JDBC_ROOT:/mnt/host \ -v $WORKSPACE:/mnt/workspace \ -e LOCAL_USER_ID=$(id -u ${USER}) \ -e TERM=xterm \ -e GIT_COMMIT \ -e GIT_BRANCH \ -e GIT_URL \ -e AWS_ACCESS_KEY_ID \ -e AWS_SECRET_ACCESS_KEY \ -e GITHUB_ACTIONS \ -e GITHUB_SHA \ -e GITHUB_REF \ -e RUNNER_TRACKING_ID \ -e JOB_NAME \ -e BUILD_NUMBER \ -e JDBC_TEST_SUITES \ -e ADDITIONAL_MAVEN_PROFILE \ -e CLOUD_PROVIDER \ -e is_old_driver \ ${TEST_IMAGE_NAMES[$name]} \ /mnt/host/ci/container/test_component.sh done ================================================ FILE: ci/test_authentication.sh ================================================ #!/bin/bash -e set -o pipefail export THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "$THIS_DIR/scripts/setup_gpg.sh" export WORKSPACE=${WORKSPACE:-/tmp} export INTERNAL_REPO=artifactory.ci1.us-west-2.aws-dev.app.snowflake.com/internal-production-docker-snowflake-virtual CI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" if [[ -n "$JENKINS_HOME" ]]; then ROOT_DIR="$(cd "${CI_DIR}/.." && pwd)" export WORKSPACE=${WORKSPACE:-/tmp} source $CI_DIR/_init.sh fi gpg --quiet --batch --yes --decrypt --passphrase="$PARAMETERS_SECRET" --output $THIS_DIR/../.github/workflows/parameters_aws_auth_tests.json "$THIS_DIR/../.github/workflows/parameters_aws_auth_tests.json.gpg" docker run \ -v $(cd $THIS_DIR/.. && pwd):/mnt/host \ -v $WORKSPACE:/mnt/workspace \ --rm \ artifactory.ci1.us-west-2.aws-dev.app.snowflake.com/internal-production-docker-snowflake-virtual/docker/snowdrivers-test-external-browser-jdbc:4 \ "/mnt/host/ci/container/test_authentication.sh" ================================================ FILE: ci/test_mac.sh ================================================ #!/bin/bash -e # # Test JDBC for Mac # echo "DOWNLOADED" set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source $THIS_DIR/_init.sh source $THIS_DIR/scripts/set_git_info.sh export WORKSPACE=$GITHUB_WORKSPACE export SOURCE_ROOT=$GITHUB_WORKSPACE python3 --version python3 -m venv venv source venv/bin/activate pip3 install -U pip pip3 install -U snowflake-connector-python $THIS_DIR/container/test_component.sh ================================================ FILE: ci/test_revocation.sh ================================================ #!/bin/bash -e # # Test certificate revocation validation using the revocation-validation framework. # set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" JDBC_ROOT="$( dirname "${THIS_DIR}")" WORKSPACE=${WORKSPACE:-${JDBC_ROOT}} echo "[Info] Starting revocation validation tests" # Detect JDBC version using Maven Wrapper (reliable, handles property interpolation) JDBC_VERSION=$(cd "$JDBC_ROOT" && ./mvnw help:evaluate -Dexpression=project.version -q -DforceStdout 2>/dev/null) if [ -z "$JDBC_VERSION" ]; then echo "[Error] Failed to determine JDBC version from pom.xml" exit 1 fi echo "[Info] JDBC driver version: $JDBC_VERSION" # Ensure parent POM is also in ~/.m2 (needed for Maven dependency resolution) if [ ! -f "$HOME/.m2/repository/net/snowflake/snowflake-jdbc-parent/$JDBC_VERSION/"*.pom ]; then echo "[Info] Installing parent POM to local Maven repo..." if ! (cd "$JDBC_ROOT" && ./mvnw install -N -f parent-pom.xml -Dmaven.test.skip=true -q --batch-mode); then echo "[Error] Failed to install parent POM" exit 1 fi fi # Clone revocation-validation framework REVOCATION_DIR="/tmp/revocation-validation" REVOCATION_BRANCH="${REVOCATION_BRANCH:-main}" rm -rf "$REVOCATION_DIR" if [ -n "$GITHUB_USER" ] && [ -n "$GITHUB_TOKEN" ]; then git clone --depth 1 --branch "$REVOCATION_BRANCH" "https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com/snowflake-eng/revocation-validation.git" "$REVOCATION_DIR" else git clone --depth 1 --branch "$REVOCATION_BRANCH" "https://github.com/snowflake-eng/revocation-validation.git" "$REVOCATION_DIR" fi cd "$REVOCATION_DIR" # Point the wrapper's pom.xml at the locally-built JDBC version (already in ~/.m2 from Build stage) WRAPPER_POM="$REVOCATION_DIR/validation/clients/snowflake-jdbc/java/pom.xml" awk -v ver="$JDBC_VERSION" ' /net\.snowflake<\/groupId>/ { in_sf=1 } in_sf && /snowflake-jdbc<\/artifactId>/ { found=1 } found && // { sub(/[^<]*<\/version>/, "" ver ""); found=0; in_sf=0 } { print } ' "$WRAPPER_POM" > "${WRAPPER_POM}.tmp" && mv "${WRAPPER_POM}.tmp" "$WRAPPER_POM" # Verify the version was correctly set if ! grep -q "${JDBC_VERSION}" "$WRAPPER_POM"; then echo "[Error] Failed to update JDBC version in wrapper pom.xml" exit 1 fi echo "[Info] Updated wrapper to use JDBC $JDBC_VERSION" echo "[Info] Running tests with Go $(go version | grep -oE 'go[0-9]+\.[0-9]+')..." set +e go run . \ --client snowflake-jdbc \ --output "${WORKSPACE}/revocation-results.json" \ --output-html "${WORKSPACE}/revocation-report.html" \ --log-level debug EXIT_CODE=$? set -e if [ -f "${WORKSPACE}/revocation-results.json" ] && [ -f "${WORKSPACE}/revocation-report.html" ]; then echo "[Info] Results: ${WORKSPACE}/revocation-results.json" echo "[Info] Report: ${WORKSPACE}/revocation-report.html" else echo "[Warn] Expected output files were not generated" fi exit $EXIT_CODE ================================================ FILE: ci/test_wif.sh ================================================ #!/bin/bash -e set -o pipefail export THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" export RSA_KEY_PATH_AWS_AZURE="$THIS_DIR/wif/parameters/rsa_wif_aws_azure" export RSA_KEY_PATH_GCP="$THIS_DIR/wif/parameters/rsa_wif_gcp" export RSA_GCP_FUNCTION_KEY="$THIS_DIR/wif/parameters/rsa_gcp_function" export PARAMETERS_FILE_PATH="$THIS_DIR/wif/parameters/parameters_wif.json" export PARAMETERS_FUNCTIONS_FILE_PATH="$THIS_DIR/wif/parameters/parameters_wif_function.json" run_tests_and_set_result() { local provider="$1" local host="$2" local snowflake_host="$3" local rsa_key_path="$4" local snowflake_user="$5" local impersonation_path="$6" local snowflake_user_for_impersonation="$7" local impersonation_external_id="${8:-}" ssh -i "$rsa_key_path" -o IdentitiesOnly=yes -p 443 "$host" env BRANCH="$BRANCH" SNOWFLAKE_TEST_WIF_HOST="$snowflake_host" SNOWFLAKE_TEST_WIF_PROVIDER="$provider" SNOWFLAKE_TEST_WIF_ACCOUNT="$SNOWFLAKE_TEST_WIF_ACCOUNT" SNOWFLAKE_TEST_WIF_USERNAME="$snowflake_user" SNOWFLAKE_TEST_WIF_IMPERSONATION_PATH="$impersonation_path" SNOWFLAKE_TEST_WIF_USERNAME_IMPERSONATION="$snowflake_user_for_impersonation" SNOWFLAKE_TEST_WIF_AWS_EXTERNAL_ID="$impersonation_external_id" SNOWFLAKE_TEST_WIF_IMPERSONATION_ROLE_ARN_WITH_EXTERNAL_ID="$SNOWFLAKE_TEST_WIF_IMPERSONATION_ROLE_ARN_WITH_EXTERNAL_ID" SNOWFLAKE_TEST_WIF_IMPERSONATION_USER_WITH_EXTERNAL_ID="$SNOWFLAKE_TEST_WIF_IMPERSONATION_USER_WITH_EXTERNAL_ID" bash << EOF set -e set -o pipefail docker run \ --rm \ --cpus=1 \ -m 2g \ -e BRANCH \ -e SNOWFLAKE_TEST_WIF_PROVIDER \ -e SNOWFLAKE_TEST_WIF_HOST \ -e SNOWFLAKE_TEST_WIF_ACCOUNT \ -e SNOWFLAKE_TEST_WIF_USERNAME \ -e SNOWFLAKE_TEST_WIF_IMPERSONATION_PATH \ -e SNOWFLAKE_TEST_WIF_USERNAME_IMPERSONATION \ -e SNOWFLAKE_TEST_WIF_AWS_EXTERNAL_ID \ -e SNOWFLAKE_TEST_WIF_IMPERSONATION_ROLE_ARN_WITH_EXTERNAL_ID \ -e SNOWFLAKE_TEST_WIF_IMPERSONATION_USER_WITH_EXTERNAL_ID \ -e SF_ENABLE_WIF_AWS_EXTERNAL_ID=true \ snowflakedb/client-jdbc-rockylinux8-openjdk17-test:1 \ bash -c " echo 'Running tests on branch: \$BRANCH' mkdir -p /tmp/maven-repo /tmp/workspace chmod 755 /tmp/maven-repo /tmp/workspace if [[ \"\$BRANCH\" =~ ^PR-[0-9]+\$ ]]; then curl -L https://github.com/snowflakedb/snowflake-jdbc/archive/refs/pull/\$(echo \$BRANCH | cut -d- -f2)/head.tar.gz | tar -xz mv snowflake-jdbc-* snowflake-jdbc else curl -L https://github.com/snowflakedb/snowflake-jdbc/archive/refs/heads/\$BRANCH.tar.gz | tar -xz mv snowflake-jdbc-\$BRANCH snowflake-jdbc fi cd snowflake-jdbc bash ci/wif/test_wif.sh " EOF local status=$? if [[ $status -ne 0 ]]; then echo "$provider tests failed with exit status: $status" EXIT_STATUS=1 else echo "$provider tests passed" fi } get_branch() { local branch if [[ -n "${GIT_BRANCH}" ]]; then # Jenkins branch="${GIT_BRANCH}" else branch=$(git rev-parse --abbrev-ref HEAD) if [[ "$branch" == "HEAD" ]]; then branch=$(git name-rev --name-only HEAD | sed 's#^remotes/origin/##;s#^origin/##') fi fi echo "$branch" } run_azure_function() { if ! bash "$THIS_DIR/wif/azure-function/test.sh"; then EXIT_STATUS=1 fi } run_aws_function() { if ! bash "$THIS_DIR/wif/aws-lambda/test.sh"; then EXIT_STATUS=1 fi } run_gcp_function() { if ! bash "$THIS_DIR/wif/gcp-function/test.sh"; then EXIT_STATUS=1 fi } setup_parameters() { source "$THIS_DIR/scripts/setup_gpg.sh" gpg --quiet --batch --yes --decrypt --passphrase="$PARAMETERS_SECRET" --output "$RSA_KEY_PATH_AWS_AZURE" "${RSA_KEY_PATH_AWS_AZURE}.gpg" gpg --quiet --batch --yes --decrypt --passphrase="$PARAMETERS_SECRET" --output "$RSA_KEY_PATH_GCP" "${RSA_KEY_PATH_GCP}.gpg" gpg --quiet --batch --yes --decrypt --passphrase="$PARAMETERS_SECRET" --output "$RSA_GCP_FUNCTION_KEY" "${RSA_GCP_FUNCTION_KEY}.gpg" chmod 600 "$RSA_KEY_PATH_AWS_AZURE" chmod 600 "$RSA_KEY_PATH_GCP" chmod 600 "$RSA_GCP_FUNCTION_KEY" gpg --quiet --batch --yes --decrypt --passphrase="$PARAMETERS_SECRET" --output "$PARAMETERS_FILE_PATH" "${PARAMETERS_FILE_PATH}.gpg" eval $(jq -r '.wif | to_entries | map("export \(.key)=\(.value|tostring)")|.[]' $PARAMETERS_FILE_PATH) gpg --quiet --batch --yes --decrypt --passphrase="$PARAMETERS_SECRET" --output "$PARAMETERS_FUNCTIONS_FILE_PATH" "${PARAMETERS_FUNCTIONS_FILE_PATH}.gpg" eval $(jq -r '.wif | to_entries | map("export \(.key)=\(.value|tostring)")|.[]' $PARAMETERS_FUNCTIONS_FILE_PATH) } BRANCH=$(get_branch) export BRANCH setup_parameters # Run tests for all cloud providers EXIT_STATUS=0 set +e # Don't exit on first failure # WIF E2E tests on functions run_aws_function run_azure_function run_gcp_function # WIF E2E tests on VMs run_tests_and_set_result "AZURE" "$HOST_AZURE" "$SNOWFLAKE_TEST_WIF_HOST_AZURE" "$RSA_KEY_PATH_AWS_AZURE" "$SNOWFLAKE_TEST_WIF_USERNAME_AZURE" "$SNOWFLAKE_TEST_WIF_IMPERSONATION_PATH_AZURE" "$SNOWFLAKE_TEST_WIF_USERNAME_AZURE_IMPERSONATION" run_tests_and_set_result "AWS" "$HOST_AWS" "$SNOWFLAKE_TEST_WIF_HOST_AWS" "$RSA_KEY_PATH_AWS_AZURE" "$SNOWFLAKE_TEST_WIF_USERNAME_AWS" "$SNOWFLAKE_TEST_WIF_IMPERSONATION_PATH_AWS" "$SNOWFLAKE_TEST_WIF_USERNAME_AWS_IMPERSONATION" "$SNOWFLAKE_TEST_WIF_AWS_EXTERNAL_ID" run_tests_and_set_result "GCP" "$HOST_GCP" "$SNOWFLAKE_TEST_WIF_HOST_GCP" "$RSA_KEY_PATH_GCP" "$SNOWFLAKE_TEST_WIF_USERNAME_GCP" "$SNOWFLAKE_TEST_WIF_IMPERSONATION_PATH_GCP" "$SNOWFLAKE_TEST_WIF_USERNAME_GCP_IMPERSONATION" set -e # Re-enable exit on error echo "Exit status: $EXIT_STATUS" exit $EXIT_STATUS ================================================ FILE: ci/test_windows.bat ================================================ REM REM Tests JDBC Driver on Windows REM setlocal setlocal EnableDelayedExpansion python -m venv venv call venv\scripts\activate pip install -U snowflake-connector-python cd %GITHUB_WORKSPACE% if "%CLOUD_PROVIDER%"=="AZURE" ( set ENCODED_PARAMETERS_FILE=.github/workflows/parameters_azure.json.gpg set ENCODED_RSA_KEY_FILE=.github/workflows/rsa_keys/rsa_key_jdbc_azure.p8.gpg ) else if "%CLOUD_PROVIDER%"=="GCP" ( set ENCODED_PARAMETERS_FILE=.github/workflows/parameters_gcp.json.gpg set ENCODED_RSA_KEY_FILE=.github/workflows/rsa_keys/rsa_key_jdbc_gcp.p8.gpg ) else if "%CLOUD_PROVIDER%"=="AWS" ( set ENCODED_PARAMETERS_FILE=.github/workflows/parameters_aws.json.gpg set ENCODED_RSA_KEY_FILE=.github/workflows/rsa_keys/rsa_key_jdbc_aws.p8.gpg ) else ( echo === unknown cloud provider exit /b 1 ) gpg --quiet --batch --yes --decrypt --passphrase=%PARAMETERS_SECRET% --output parameters.json %ENCODED_PARAMETERS_FILE% gpg --quiet --batch --yes --decrypt --passphrase=%JDBC_PRIVATE_KEY_SECRET% --output rsa_key_jdbc.p8 %ENCODED_RSA_KEY_FILE% REM DON'T FORGET TO include @echo off here or the password may be leaked! echo @echo off>parameters.bat jq -r ".testconnection | to_entries | map(\"set \(.key)=\(.value)\") | .[]" parameters.json >> parameters.bat call parameters.bat REM Set the private key file path directly to avoid encoding issues set "SNOWFLAKE_TEST_PRIVATE_KEY_FILE=%GITHUB_WORKSPACE%\rsa_key_jdbc.p8" set "SNOWFLAKE_TEST_AUTHENTICATOR=SNOWFLAKE_JWT" if %ERRORLEVEL% NEQ 0 ( echo === failed to set the test parameters exit /b 1 ) set SNOWFLAKE_TEST_SCHEMA=%RUNNER_TRACKING_ID:-=_%_%GITHUB_SHA% set TARGET_SCHEMA_NAME=%SNOWFLAKE_TEST_SCHEMA% echo [INFO] Account: %SNOWFLAKE_TEST_ACCOUNT% echo [INFO] User : %SNOWFLAKE_TEST_USER% echo [INFO] Database: %SNOWFLAKE_TEST_DATABASE% echo [INFO] Schema: %SNOWFLAKE_TEST_SCHEMA% echo [INFO] Warehouse: %SNOWFLAKE_TEST_WAREHOUSE% echo [INFO] Role: %SNOWFLAKE_TEST_ROLE% echo [INFO] PROVIDER: %CLOUD_PROVIDER% echo [INFO] Creating schema %SNOWFLAKE_TEST_SCHEMA% pushd %GITHUB_WORKSPACE%\ci\container python create_schema.py popd REM setup log set CLIENT_LOG_DIR_PATH=%GITHUB_WORKSPACE%\jenkins_rt_logs echo "[INFO] CLIENT_LOG_DIR_PATH=%CLIENT_LOG_DIR_PATH%" set CLIENT_LOG_FILE_PATH=%CLIENT_LOG_DIR_PATH%\ssnowflake_ssm_rt.log echo "[INFO] CLIENT_LOG_FILE_PATH=%CLIENT_LOG_FILE_PATH%" set CLIENT_KNOWN_SSM_FILE_PATH=%CLIENT_LOG_DIR_PATH%\rt_jenkins_log_known_ssm.txt echo "[INFO] CLIENT_KNOWN_SSM_FILE_PATH=%CLIENT_KNOWN_SSM_FILE_PATH%" REM To close log analyze, just set ENABLE_CLIENT_LOG_ANALYZE to not "true", e.g. "false". set ENABLE_CLIENT_LOG_ANALYZE=true REM The new complex password we use for jenkins test set SNOWFLAKE_TEST_PASSWORD_NEW="ThisIsRandomPassword123!" set LOG_PROPERTY_FILE=%GITHUB_WORKSPACE%\src\test\resources\logging.properties echo "[INFO] LOG_PROPERTY_FILE=%LOG_PROPERTY_FILE%" set CLIENT_DRIVER_NAME=JDBC powershell -Command "(Get-Content %LOG_PROPERTY_FILE%) | Foreach-Object { $_ -replace '^java.util.logging.FileHandler.pattern.*', 'java.util.logging.FileHandler.pattern = %CLIENT_LOG_FILE_PATH%' } | Set-Content %LOG_PROPERTY_FILE%" echo "[INFO] Create log directory" IF NOT EXIST %CLIENT_LOG_DIR_PATH% MD %CLIENT_LOG_DIR_PATH% 2>nul echo "[INFO] Delete ssm file" IF EXIST "%CLIENT_KNOWN_SSM_FILE_PATH%" DEL /F /Q "%CLIENT_KNOWN_SSM_FILE_PATH%" echo "[INFO] Create ssm file" echo.>"%CLIENT_KNOWN_SSM_FILE_PATH%" echo "[INFO] Finish log setup" REM end setup log for /F "tokens=1,* delims==" %%i in ('set ^| findstr /I /R "^SNOWFLAKE_[^=]*$" ^| findstr /I /V /R "^SNOWFLAKE_(PASS|.*KEY|.*SECRET|.*TOKEN)_[^=]*$" ^| sort') do ( echo %%i=%%j ) echo [INFO] Starting hang_webserver.py 12345 pushd %GITHUB_WORKSPACE%\ci\container start /b python hang_webserver.py 12345 > hang_webserver.out 2>&1 popd echo [INFO] Testing set MVNW_EXE=%GITHUB_WORKSPACE%\mvnw.cmd REM Avoid connection timeouts set MAVEN_OPTS="-Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.httpconnectionManager.ttlSeconds=120" echo "MAVEN OPTIONS %MAVEN_OPTS%" REM Avoid connection timeout on plugin dependency fetch or fail-fast when dependency cannot be fetched after 3 retries REM Retry dependency:go-offline up to 3 times if it fails for /l %%i in (1,1,3) do ( echo [INFO] maven dependency:go-offline attempt %%i/3 cmd /c "%MVNW_EXE%" --batch-mode --show-version dependency:go-offline if !errorlevel! equ 0 goto :success if %%i equ 3 exit /b 1 echo [WARN] Retrying in 5 seconds... timeout /t 5 /nobreak >nul ) :success if "%JDBC_TEST_SUITES%"=="FipsTestSuite" ( pushd FIPS echo "[INFO] Run Fips tests" cmd /c %MVNW_EXE% -B -DjenkinsIT ^ -Dskip.unitTests=true ^ -Djava.io.tmpdir=%GITHUB_WORKSPACE% ^ -Djacoco.skip.instrument=false ^ -DintegrationTestSuites=FipsTestSuite ^ -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn ^ -Dnot-self-contained-jar ^ verify ^ --batch-mode --show-version > log.txt & type log.txt echo "[INFO] Check for test execution status" find /i /c "BUILD FAILURE" log.txt > NUL set isfound=!errorlevel! if !isfound! equ 0 ( echo [ERROR] Failed run %%a test exit /b 1 ) else ( echo [INFO] Success run %%a test ) popd ) else ( echo "[INFO] Run %JDBC_TEST_SUITES% tests" cmd /c %MVNW_EXE% -B -DjenkinsIT ^ -Dskip.unitTests=true ^ -Djava.io.tmpdir=%GITHUB_WORKSPACE% ^ -Djacoco.skip.instrument=false ^ -DintegrationTestSuites="%JDBC_TEST_SUITES%" ^ -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn ^ -Dnot-self-contained-jar %ADDITIONAL_MAVEN_PROFILE% ^ verify ^ --batch-mode --show-version > log.txt & type log.txt echo "[INFO] Check for test execution status" find /i /c "BUILD FAILURE" log.txt > NUL set isfound=!errorlevel! if !isfound! equ 0 ( echo [ERROR] Failed run %%a test exit /b 1 ) else ( echo [INFO] Success run %%a test ) ) echo [INFO] Dropping schema %SNOWFLAKE_TEST_SCHEMA% pushd %GITHUB_WORKSPACE%\ci\container python drop_schema.py popd ================================================ FILE: ci/wif/aws-lambda/README.md ================================================ # AWS Lambda Function for WIF E2E Testing ## Deployment Steps 1. Install AWS CLI 2. Configure AWS credentials: `aws configure`. Use credentials: WIF E2E AWS LAMBDA DEPLOY 3. It may be required to unset AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if it points to other aws account. 4. Run deployment script: `./deploy.sh` ================================================ FILE: ci/wif/aws-lambda/pom.xml ================================================ 4.0.0 com.snowflake.wif aws-lambda 1.0-SNAPSHOT jar AWS Lambda Functions UTF-8 17 1.2.3 3.11.3 3.4.1 drivers-wif-e2e us-west-2 java17 480 3008 com.snowflake.wif.aws.WifLambdaFunctionE2e::handleRequest com.amazonaws aws-lambda-java-core ${aws.lambda.java.core.version} com.amazonaws aws-lambda-java-events ${aws.lambda.java.events.version} com.fasterxml.jackson.core jackson-databind 2.15.2 org.apache.commons commons-compress 1.28.0 org.junit.jupiter junit-jupiter 5.9.2 test org.mockito mockito-core 5.3.1 test src/main/java org.codehaus.mojo build-helper-maven-plugin 3.4.0 generate-sources add-source ../shared org.apache.maven.plugins maven-compiler-plugin 3.11.0 ${java.version} ${java.version} ${project.build.sourceEncoding} org.apache.maven.plugins maven-shade-plugin ${maven.shade.plugin.version} false package shade maven-clean-plugin 3.2.0 target ================================================ FILE: ci/wif/aws-lambda/src/main/java/com/snowflake/wif/aws/WifLambdaFunctionE2e.java ================================================ package com.snowflake.wif.aws; import com.amazonaws.services.lambda.runtime.Context; import com.amazonaws.services.lambda.runtime.RequestHandler; import com.amazonaws.services.lambda.runtime.events.APIGatewayProxyResponseEvent; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.JsonNode; import com.snowflake.wif.common.WifTestHelper; import java.util.HashMap; import java.util.Map; public class WifLambdaFunctionE2e implements RequestHandler { private static final ObjectMapper objectMapper = new ObjectMapper(); private static class LambdaLogger implements WifTestHelper.WifLogger { private final Context context; public LambdaLogger(Context context) { this.context = context; } @Override public void log(String message) { context.getLogger().log(message); } } @Override public APIGatewayProxyResponseEvent handleRequest(Object input, Context context) { WifTestHelper.WifLogger logger = new LambdaLogger(context); String workingDirectory = null; try { logger.log("=== WIF AWS Lambda Function E2E started ==="); logger.log("Input received: " + objectMapper.writeValueAsString(input)); Map queryParameters = extractQueryParameters(input, logger); WifTestHelper.validateQueryParameters(queryParameters); String branch = queryParameters.get("BRANCH"); String tarballUrl = WifTestHelper.buildTarballUrl(branch); workingDirectory = WifTestHelper.downloadAndExtractRepository(tarballUrl, logger); String repoFolderPath = WifTestHelper.findRepositoryFolder(workingDirectory); WifTestHelper.makeExecutable(repoFolderPath, logger); int mavenExitCode = WifTestHelper.executeMavenBuild(repoFolderPath, System.getProperty("java.io.tmpdir"), logger, queryParameters); return createResponse(mavenExitCode); } catch (Exception e) { logger.log("Error: " + e.getMessage()); return createErrorResponse(500, "Error: " + e.getMessage()); } finally { WifTestHelper.cleanupWorkingDirectory(workingDirectory, logger); } } private Map extractQueryParameters(Object input, WifTestHelper.WifLogger logger) throws Exception { JsonNode inputNode = objectMapper.valueToTree(input); // Handle Lambda Function URL format (query parameters in queryStringParameters) if (inputNode.has("queryStringParameters")) { JsonNode queryParamsNode = inputNode.get("queryStringParameters"); if (queryParamsNode != null && !queryParamsNode.isNull()) { logger.log("Processing Lambda Function URL with queryStringParameters"); return objectMapper.convertValue(queryParamsNode, Map.class); } } // Handle direct invocation format (parameters at root level) if (inputNode.has("SNOWFLAKE_TEST_WIF_HOST")) { logger.log("Processing direct invocation with parameters at root level"); return objectMapper.convertValue(inputNode, Map.class); } throw new IllegalArgumentException("Invalid input format: expected Lambda Function URL event or direct invocation with required parameters"); } private APIGatewayProxyResponseEvent createResponse(int mavenExitCode) { APIGatewayProxyResponseEvent response = new APIGatewayProxyResponseEvent(); Map headers = new HashMap<>(); headers.put("Content-Type", "text/plain"); response.setHeaders(headers); String responseBody = WifTestHelper.createMavenResultMessage(mavenExitCode); if (mavenExitCode == 0) { response.setStatusCode(200); } else { response.setStatusCode(500); } response.setBody(responseBody); return response; } private APIGatewayProxyResponseEvent createErrorResponse(int statusCode, String message) { APIGatewayProxyResponseEvent response = new APIGatewayProxyResponseEvent(); Map headers = new HashMap<>(); headers.put("Content-Type", "text/plain"); response.setHeaders(headers); response.setStatusCode(statusCode); response.setBody(message); return response; } } ================================================ FILE: ci/wif/aws-lambda/test.sh ================================================ #!/bin/bash -e set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" run_aws_function() { if [[ -z "$WIF_E2E_AWS_ACCESS_KEY" ]] || [[ -z "$WIF_E2E_AWS_SECRET_ACCESS_KEY" ]]; then echo "Error: WIF_E2E_AWS_ACCESS_KEY and WIF_E2E_AWS_SECRET_ACCESS_KEY environment variables must be set" return 1 fi # Clear potentially conflicting AWS environment variables unset AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN unset AWS_PROFILE AWS_CONFIG_FILE AWS_SHARED_CREDENTIALS_FILE # Set AWS credentials for CLI export AWS_ACCESS_KEY_ID="$WIF_E2E_AWS_ACCESS_KEY" export AWS_SECRET_ACCESS_KEY="$WIF_E2E_AWS_SECRET_ACCESS_KEY" export AWS_DEFAULT_REGION="us-west-2" export AWS_REGION="us-west-2" # Check AWS CLI version to determine the correct command format local aws_version aws_version=$(aws --version 2>&1 | head -n1) local payload_json="{\"queryStringParameters\":{\"SNOWFLAKE_TEST_WIF_HOST\":\"${SNOWFLAKE_TEST_WIF_HOST_AWS}\",\"SNOWFLAKE_TEST_WIF_ACCOUNT\":\"${SNOWFLAKE_TEST_WIF_ACCOUNT}\",\"SNOWFLAKE_TEST_WIF_PROVIDER\":\"AWS\",\"BRANCH\":\"${BRANCH}\"}}" local function_name="drivers-wif-automated-tests" local cli_response_file="/tmp/aws_cli_response_$$.json" local cli_error_file="/tmp/aws_cli_error_$$.txt" # Use different command based on AWS CLI version if [[ "$aws_version" =~ aws-cli/2\. ]]; then # AWS CLI v2 - needs --cli-binary-format flag echo "Using AWS CLI v2 format" aws lambda invoke \ --function-name "$function_name" \ --region "us-west-2" \ --cli-binary-format raw-in-base64-out \ --payload "$payload_json" \ --cli-read-timeout 1000 \ --cli-connect-timeout 60 \ "$cli_response_file" >/dev/null 2>"$cli_error_file" else # AWS CLI v1 - no --cli-binary-format flag needed, but include timeouts echo "Using AWS CLI v1 format" aws lambda invoke \ --function-name "$function_name" \ --region "us-west-2" \ --payload "$payload_json" \ --cli-read-timeout 1000 \ --cli-connect-timeout 60 \ "$cli_response_file" >/dev/null 2>"$cli_error_file" fi local invoke_result=$? if [[ $invoke_result -eq 0 ]]; then if [[ -f "$cli_response_file" ]]; then # Check if the response indicates success (HTTP 200) if grep -q '"statusCode":200' "$cli_response_file" 2>/dev/null; then echo "AWS Lambda Function test passed (HTTP 200)" rm -f "$cli_response_file" "$cli_error_file" return 0 else echo "AWS Lambda Function test failed (non-200 status)" rm -f "$cli_response_file" "$cli_error_file" return 1 fi else echo "No response file found" rm -f "$cli_response_file" "$cli_error_file" return 1 fi else echo "AWS CLI invocation failed" if [[ -f "$cli_error_file" ]]; then echo "CLI Error:" cat "$cli_error_file" fi rm -f "$cli_response_file" "$cli_error_file" return 1 fi } if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then run_aws_function exit $? fi ================================================ FILE: ci/wif/azure-function/Dockerfile ================================================ # Build stage FROM --platform=linux/amd64 mcr.microsoft.com/azure-functions/java:4-java17-build AS build COPY azure-function /src/azure-function COPY shared /src/shared # Build the function RUN cd /src/azure-function && \ MAVEN_OPTS="-Xmx4g -Xms1g" mvn clean package -DskipTests && \ mkdir -p /home/site/wwwroot && \ cp -r target/azure-functions/drivers-wif-e2e/* /home/site/wwwroot/ # Runtime stage FROM --platform=linux/amd64 mcr.microsoft.com/azure-functions/java:4-java17 RUN apt-get update && \ apt-get install -y openjdk-17-jdk && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* ENV JAVA_HOME=/usr/lib/jvm/java-17-openjdk-amd64 \ PATH="/usr/lib/jvm/java-17-openjdk-amd64/bin:$PATH" \ AzureWebJobsScriptRoot=/home/site/wwwroot \ AzureFunctionsJobHost__Logging__Console__IsEnabled=true \ MAVEN_OPTS="-Xmx4g -Xms1g" COPY --from=build /home/site/wwwroot /home/site/wwwroot ================================================ FILE: ci/wif/azure-function/README.md ================================================ # Azure Function for WIF E2E Testing ## Deployment Steps 1. Install Azure CLI 2. Authenticate: `az login`, login in browser. 3. Run deployment script: `./deploy.sh` ================================================ FILE: ci/wif/azure-function/host.json ================================================ { "version": "2.0", "extensionBundle": { "id": "Microsoft.Azure.Functions.ExtensionBundle", "version": "[4.0.0, 5.0.0)" }, "functionTimeout": "00:08:00", "concurrency": { "dynamicConcurrencyEnabled": true, "snapshotPersistenceEnabled": true }, "logging": { "logLevel": { "default": "Information" } } } ================================================ FILE: ci/wif/azure-function/pom.xml ================================================ 4.0.0 com.snowflake.wif azure-function 1.0-SNAPSHOT jar Azure Java Functions UTF-8 17 1.35.0 2.2.0 drivers-wif-e2e com.microsoft.azure.functions azure-functions-java-library ${azure.functions.java.library.version} org.junit.jupiter junit-jupiter 5.4.2 test org.mockito mockito-core 2.23.4 test com.fasterxml.jackson.core jackson-databind 2.15.2 org.apache.commons commons-compress 1.28.0 org.codehaus.mojo build-helper-maven-plugin 3.4.0 generate-sources add-source ../shared org.apache.maven.plugins maven-compiler-plugin 3.11.0 ${java.version} ${java.version} ${project.build.sourceEncoding} com.microsoft.azure azure-functions-maven-plugin ${azure.functions.maven.plugin.version} ${functionAppName} package-functions package maven-clean-plugin 3.2.0 obj ================================================ FILE: ci/wif/azure-function/src/main/java/com/snowflake/wif/azure/WifAzureFunctionE2e.java ================================================ package com.snowflake.wif.azure; import com.microsoft.azure.functions.*; import com.microsoft.azure.functions.annotation.*; import com.snowflake.wif.common.WifTestHelper; import java.io.File; import java.util.Optional; public class WifAzureFunctionE2e { private static class AzureLogger implements WifTestHelper.WifLogger { private final ExecutionContext context; public AzureLogger(ExecutionContext context) { this.context = context; } @Override public void log(String message) { context.getLogger().info(message); } } @FunctionName("WifAzureFunctionE2e") public HttpResponseMessage run( @HttpTrigger( name = "req", methods = {HttpMethod.GET, HttpMethod.POST}, authLevel = AuthorizationLevel.FUNCTION) HttpRequestMessage> request, final ExecutionContext context) { WifTestHelper.WifLogger logger = new AzureLogger(context); String workingDirectory = null; try { logger.log("=== WIF Azure Function E2E started ==="); WifTestHelper.validateQueryParameters(request.getQueryParameters()); String branch = request.getQueryParameters().get("BRANCH"); String tarballUrl = WifTestHelper.buildTarballUrl(branch); // Download and extract to timestamp-based directory workingDirectory = WifTestHelper.downloadAndExtractRepository(tarballUrl, logger); String repoFolderPath = WifTestHelper.findRepositoryFolder(workingDirectory); WifTestHelper.makeExecutable(repoFolderPath, logger); int mavenExitCode = WifTestHelper.executeMavenBuild(repoFolderPath, System.getProperty("java.io.tmpdir"), logger, request.getQueryParameters()); return createResponse(request, mavenExitCode); } catch (Exception e) { logger.log("Error: " + e.getMessage()); return request.createResponseBuilder(HttpStatus.INTERNAL_SERVER_ERROR).body("Error: " + e.getMessage()).build(); } finally { WifTestHelper.cleanupWorkingDirectory(workingDirectory, logger); } } private HttpResponseMessage createResponse(HttpRequestMessage> request, int mavenExitCode) { String responseBody = WifTestHelper.createMavenResultMessage(mavenExitCode); if (mavenExitCode == 0) { return request.createResponseBuilder(HttpStatus.OK).body(responseBody).build(); } else { return request.createResponseBuilder(HttpStatus.INTERNAL_SERVER_ERROR).body(responseBody).build(); } } } ================================================ FILE: ci/wif/azure-function/test.sh ================================================ #!/bin/bash -e set -o pipefail run_azure_function() { echo "Running Azure Function E2E test..." local url="${AZURE_FUNCTION_BASE_URL}?code=${AZURE_FUNCTION_CODE}" url="${url}&BRANCH=${BRANCH}" url="${url}&SNOWFLAKE_TEST_WIF_HOST=${SNOWFLAKE_TEST_WIF_HOST_AZURE}" url="${url}&SNOWFLAKE_TEST_WIF_ACCOUNT=${SNOWFLAKE_TEST_WIF_ACCOUNT}" url="${url}&SNOWFLAKE_TEST_WIF_PROVIDER=AZURE" local http_code http_code=$(curl -s -o /dev/null -w "%{http_code}" --max-time 1200 "$url") if [[ "$http_code" == "200" ]]; then echo "Azure Function test passed" return 0 else echo "Azure Function test failed with HTTP $http_code" return 1 fi } if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then run_azure_function exit $? fi ================================================ FILE: ci/wif/gcp-function/README.md ================================================ # GCP Cloud Function for WIF E2E Testing ## Deployment Steps 1. Install Google Cloud SDK. 2. Authenticate: `gcloud auth login`, login in browser. 3. Run deployment script: `./deploy.sh `. Use credentials: WIF E2E GCP FUNCTION DEPLOY ================================================ FILE: ci/wif/gcp-function/pom.xml ================================================ 4.0.0 com.snowflake.wif gcp-function 1.0-SNAPSHOT jar GCP Cloud Functions UTF-8 17 1.1.0 0.11.0 com.snowflake.wif.gcp.WifGcpFunctionE2e drivers-wif-e2e-gcp us-central1 1024MB 480 com.google.cloud.functions functions-framework-api ${functions.framework.version} provided org.junit.jupiter junit-jupiter 5.4.2 test org.mockito mockito-core 2.23.4 test com.fasterxml.jackson.core jackson-databind 2.15.2 org.apache.commons commons-compress 1.28.0 org.codehaus.mojo build-helper-maven-plugin 3.4.0 generate-sources add-source ../shared org.apache.maven.plugins maven-compiler-plugin 3.8.1 ${java.version} ${java.version} com.google.cloud.functions function-maven-plugin ${function.maven.plugin.version} ${function.target} 8080 org.apache.maven.plugins maven-shade-plugin 3.4.1 package shade false target/deploy ${function.name} ${function.target} ================================================ FILE: ci/wif/gcp-function/src/main/java/com/snowflake/wif/gcp/WifGcpFunctionE2e.java ================================================ package com.snowflake.wif.gcp; import com.google.cloud.functions.HttpFunction; import com.google.cloud.functions.HttpRequest; import com.google.cloud.functions.HttpResponse; import com.snowflake.wif.common.WifTestHelper; import java.io.BufferedWriter; import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.logging.Logger; import static java.net.URLDecoder.*; import static java.util.stream.Collectors.*; public class WifGcpFunctionE2e implements HttpFunction { private static final Logger logger = Logger.getLogger(WifGcpFunctionE2e.class.getName()); private static class GcpLogger implements WifTestHelper.WifLogger { private final Logger logger; public GcpLogger(Logger logger) { this.logger = logger; } @Override public void log(String message) { logger.info(message); } } private static String createGcpResponseMessage(int mavenExitCode, String additionalInfo) { StringBuilder response = new StringBuilder(); response.append("=== WIF GCP Function E2E Test Results ===\n"); response.append(WifTestHelper.createMavenResultMessage(mavenExitCode)); if (additionalInfo != null && !additionalInfo.trim().isEmpty()) { response.append("\n\nAdditional Info:\n"); response.append(additionalInfo); } response.append("\n\nFunction Runtime: Google Cloud Functions (Java 17)"); response.append("\nExecution Environment: GCP"); return response.toString(); } private static void performGcpCleanup(String workingDirectory, WifTestHelper.WifLogger logger) { logger.log("Performing GCP-specific cleanup..."); try { WifTestHelper.cleanupWorkingDirectory(workingDirectory, logger); logger.log("GCP cleanup completed successfully"); } catch (Exception e) { logger.log("Warning: GCP cleanup encountered issues: " + e.getMessage()); } } @Override public void service(HttpRequest request, HttpResponse response) throws IOException { WifTestHelper.WifLogger wifLogger = new GcpLogger(logger); String workingDirectory = null; try { wifLogger.log("=== WIF GCP Function E2E started ==="); Map queryParameters = extractQueryParameters(request); String branch = queryParameters.get("BRANCH"); String tarballUrl = WifTestHelper.buildTarballUrl(branch); // Download and extract to timestamp-based directory workingDirectory = WifTestHelper.downloadAndExtractRepository(tarballUrl, wifLogger); String repoFolderPath = WifTestHelper.findRepositoryFolder(workingDirectory); WifTestHelper.makeExecutable(repoFolderPath, wifLogger); int mavenExitCode = WifTestHelper.executeMavenBuild(repoFolderPath, System.getProperty("java.io.tmpdir"), wifLogger, queryParameters); createResponse(response, mavenExitCode, wifLogger); } catch (Exception e) { wifLogger.log("Error occurred: " + e.getMessage()); if (e.getCause() != null) { wifLogger.log("Caused by: " + e.getCause().getMessage()); } createErrorResponse(response, 500, "Error: " + e.getMessage()); } finally { performGcpCleanup(workingDirectory, wifLogger); } } private Map extractQueryParameters(HttpRequest request) throws IOException { Map params = new HashMap<>(); // Handle GET request query parameters Map> queryParams = request.getQueryParameters(); for (Map.Entry> entry : queryParams.entrySet()) { if (!entry.getValue().isEmpty()) { params.put(entry.getKey(), entry.getValue().get(0)); } } // Handle POST request form data if ("POST".equalsIgnoreCase(request.getMethod())) { String contentType = request.getContentType().orElse(""); if (contentType.contains("application/x-www-form-urlencoded")) { String body = request.getReader().lines().collect(joining("\n")); if (body != null && !body.trim().isEmpty()) { String[] pairs = body.split("&"); for (String pair : pairs) { String[] keyValue = pair.split("=", 2); if (keyValue.length == 2) { try { String key = decode(keyValue[0], "UTF-8"); String value = decode(keyValue[1], "UTF-8"); params.put(key, value); } catch (Exception e) { // Log but continue with other parameters logger.warning("Failed to decode parameter: " + pair); } } } } } } return params; } private void createResponse(HttpResponse response, int mavenExitCode, WifTestHelper.WifLogger logger) throws IOException { String responseBody = createGcpResponseMessage(mavenExitCode, "GCP Function execution completed"); logger.log("Maven build completed with exit code: " + mavenExitCode); if (mavenExitCode == 0) { response.setStatusCode(200); logger.log("Returning success response"); } else { response.setStatusCode(500); logger.log("Returning error response due to Maven build failure"); } response.setContentType("text/plain; charset=utf-8"); try (BufferedWriter writer = response.getWriter()) { writer.write(responseBody); } } private void createErrorResponse(HttpResponse response, int statusCode, String message) throws IOException { response.setStatusCode(statusCode); response.setContentType("text/plain; charset=utf-8"); try (BufferedWriter writer = response.getWriter()) { writer.write(message); } } } ================================================ FILE: ci/wif/gcp-function/test.sh ================================================ #!/bin/bash -e set -o pipefail generate_gcp_jwt() { # For ID token request, aud should be Google's token endpoint, target_audience is the service local target_audience="${GCP_FUNCTION_BASE_URL}" local iat=$(date +%s) local exp=$((iat + 3600)) # 1 hour expiration # Create JWT header local header='{"alg":"RS256","typ":"JWT"}' local header_b64=$(echo -n "$header" | base64 -w 0 | tr -d '=' | tr '/+' '_-') # Create JWT payload for ID token reques local payload="{\"iss\":\"$GCP_SERVICE_ACCOUNT\",\"aud\":\"https://oauth2.googleapis.com/token\",\"target_audience\":\"$target_audience\",\"exp\":$exp,\"iat\":$iat}" local payload_b64=$(echo -n "$payload" | base64 -w 0 | tr -d '=' | tr '/+' '_-') # Create signature using private key from file local to_sign="$header_b64.$payload_b64" local signature=$(echo -n "$to_sign" | openssl dgst -sha256 -sign "$RSA_GCP_FUNCTION_KEY" | base64 -w 0 | tr -d '=' | tr '/+' '_-') echo "$header_b64.$payload_b64.$signature" } get_gcp_id_token() { local jwt="$1" local response=$(curl -s -X POST https://oauth2.googleapis.com/token \ -H "Content-Type: application/x-www-form-urlencoded" \ -d "grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion=$jwt") echo "$response" | grep -o '"id_token":"[^"]*"' | cut -d'"' -f4 } run_gcp_function() { echo "Running GCP Cloud Function E2E test..." local jwt=$(generate_gcp_jwt) if [[ -z "$jwt" ]]; then echo "Error: Failed to generate JWT" return 1 fi local id_token=$(get_gcp_id_token "$jwt") if [[ -z "$id_token" ]]; then echo "Error: Failed to get GCP ID token" return 1 fi local url="${GCP_FUNCTION_BASE_URL}?SNOWFLAKE_TEST_WIF_HOST=${SNOWFLAKE_TEST_WIF_HOST_GCP}" url="${url}&SNOWFLAKE_TEST_WIF_ACCOUNT=${SNOWFLAKE_TEST_WIF_ACCOUNT}" url="${url}&SNOWFLAKE_TEST_WIF_PROVIDER=GCP" url="${url}&BRANCH=${BRANCH}" url="${url}&IS_GCP_FUNCTION=true" local http_code http_code=$(curl -s -o /dev/null -w "%{http_code}" --max-time 1200 \ -H "Authorization: Bearer $id_token" \ "$url") if [[ "$http_code" == "200" ]]; then echo "GCP Cloud Function test passed" return 0 else echo "GCP Cloud Function test failed with HTTP $http_code" return 1 fi } if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then run_gcp_function exit $? fi ================================================ FILE: ci/wif/parameters/rsa_wif_aws_azure.gpg ================================================  髃6K5%܇ټ飐|eRk]nc-TloB,ܐ͒R7B] queryParams) { if (queryParams == null) { throw new IllegalArgumentException("Missing query parameters"); } String wifHost = queryParams.get("SNOWFLAKE_TEST_WIF_HOST"); String wifAccount = queryParams.get("SNOWFLAKE_TEST_WIF_ACCOUNT"); String wifProvider = queryParams.get("SNOWFLAKE_TEST_WIF_PROVIDER"); String branch = queryParams.get("BRANCH"); if (wifHost == null) { throw new IllegalArgumentException("Missing required query parameter: SNOWFLAKE_TEST_WIF_HOST"); } if (wifAccount == null) { throw new IllegalArgumentException("Missing required query parameter: SNOWFLAKE_TEST_WIF_ACCOUNT"); } if (wifProvider == null) { throw new IllegalArgumentException("Missing required query parameter: SNOWFLAKE_TEST_WIF_PROVIDER"); } if (branch == null) { throw new IllegalArgumentException("Missing required query parameter: BRANCH"); } } public static String buildTarballUrl(String branch) { if (Pattern.matches("^PR-\\d+$", branch)) { String prNumber = branch.substring(3); return "https://github.com/snowflakedb/snowflake-jdbc/archive/refs/pull/" + prNumber + "/head.tar.gz"; } else { return "https://github.com/snowflakedb/snowflake-jdbc/archive/refs/heads/" + branch + ".tar.gz"; } } public static String findRepositoryFolder(String workingDirectory) { if (workingDirectory == null) { throw new RuntimeException("Working directory must be specified"); } File workDir = new File(workingDirectory); File[] files = workDir.listFiles(); if (files != null) { for (File file : files) { if (file.isDirectory() && file.getName().startsWith("snowflake-jdbc-")) { return file.getAbsolutePath(); } } } throw new RuntimeException("Driver repository folder not found in: " + workingDirectory); } public static int executeMavenBuild(String repoFolderPath, String tempDirectory, WifLogger logger, Map queryParams) { Process process = null; File mavenRepo = null; File workspace = null; File mavenHomeDir = null; try { // Use unique session ID to avoid conflicts with parallel test runs String sessionId = generateUniqueSessionId(); mavenRepo = new File(tempDirectory, "maven-repo-" + sessionId); workspace = new File(tempDirectory, "workspace-" + sessionId); mavenRepo.mkdirs(); workspace.mkdirs(); ProcessBuilder pb = new ProcessBuilder( "bash", "-c", "cd " + repoFolderPath + " && " + "./mvnw -Dmaven.repo.local=" + mavenRepo.getAbsolutePath() + " " + "-DjenkinsIT " + "-Dnet.snowflake.jdbc.temporaryCredentialCacheDir=" + workspace.getAbsolutePath() + " " + "-Dnet.snowflake.jdbc.ocspResponseCacheDir=" + workspace.getAbsolutePath() + " " + "-Djava.io.tmpdir=" + workspace.getAbsolutePath() + " " + "-Djacoco.skip.instrument=true " + "-Dskip.unitTests=true " + "-DintegrationTestSuites=WIFTestSuite " + "-Dmaven.artifact.threads=4 " + "-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn " + "-Dnot-self-contained-jar " + "-Dmaven.test.failure.ignore=false " + "-Dmaven.compile.fork=false " + "-Dmaven.javadoc.skip=true " + "-Dmaven.source.skip=true " + "-Dcheckstyle.skip=true " + "-Dspotbugs.skip=true " + "-Denforcer.skip=true " + "-T 2C " + "test-compile verify --batch-mode --show-version --fail-fast --no-transfer-progress" ); pb.redirectErrorStream(true); // Redirect all Maven directories to temp directory to avoid read-only filesystem issues String mavenHome = tempDirectory + "/maven-home-" + sessionId; mavenHomeDir = new File(mavenHome); mavenHomeDir.mkdirs(); logger.log("Setting Maven home to: " + mavenHome); pb.environment().put("MAVEN_USER_HOME", mavenHome); pb.environment().put("HOME", tempDirectory); pb.environment().put("USER_HOME", tempDirectory); pb.environment().put("MAVEN_OPTS", "-Xmx2g -Xms512m -Duser.home=" + tempDirectory + " -Djava.io.tmpdir=" + tempDirectory); pb.environment().put("SNOWFLAKE_TEST_WIF_HOST", queryParams.get("SNOWFLAKE_TEST_WIF_HOST")); pb.environment().put("SNOWFLAKE_TEST_WIF_ACCOUNT", queryParams.get("SNOWFLAKE_TEST_WIF_ACCOUNT")); pb.environment().put("SNOWFLAKE_TEST_WIF_PROVIDER", queryParams.get("SNOWFLAKE_TEST_WIF_PROVIDER")); pb.environment().put("SF_ENABLE_EXPERIMENTAL_AUTHENTICATION", "true"); pb.environment().put("SF_ENABLE_WIF_AWS_EXTERNAL_ID", "true"); pb.environment().put("IS_GCP_FUNCTION", queryParams.getOrDefault("IS_GCP_FUNCTION", "false")); String externalId = queryParams.get("SNOWFLAKE_TEST_WIF_AWS_EXTERNAL_ID"); if (externalId != null) { pb.environment().put("SNOWFLAKE_TEST_WIF_AWS_EXTERNAL_ID", externalId); } process = pb.start(); final InputStream processInputStream = process.getInputStream(); Thread outputThread = new Thread(() -> { try (BufferedReader reader = new BufferedReader(new InputStreamReader(processInputStream))) { String line; while ((line = reader.readLine()) != null) { logger.log(line); } } catch (IOException e) { logger.log("Error reading Maven output: " + e.getMessage()); } }); outputThread.start(); if (!process.waitFor(6, TimeUnit.MINUTES)) { process.destroyForcibly(); return -2; // timeout } return process.exitValue(); } catch (Exception e) { logger.log("Maven build error: " + e.getMessage()); if (process != null && process.isAlive()) { process.destroyForcibly(); } return -1; } finally { if (process != null && process.isAlive()) { process.destroyForcibly(); try { process.waitFor(10, TimeUnit.SECONDS); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } // Clean up Maven temporary directories cleanupMavenTempDirectories(mavenRepo, workspace, mavenHomeDir, logger); } } private static void cleanupMavenTempDirectories(File mavenRepo, File workspace, File mavenHomeDir, WifLogger logger) { logger.log("Cleaning up Maven temporary directories..."); cleanupDirectory(mavenRepo, "Maven repository", logger); cleanupDirectory(workspace, "workspace", logger); cleanupDirectory(mavenHomeDir, "Maven home", logger); logger.log("Maven temporary directories cleanup completed"); } private static void cleanupDirectory(File directory, String description, WifLogger logger) { if (directory != null && directory.exists()) { try { ProcessBuilder pb = new ProcessBuilder("rm", "-rf", directory.getAbsolutePath()); Process process = pb.start(); int exitCode = process.waitFor(); if (exitCode == 0) { logger.log("Successfully cleaned up " + description + ": " + directory.getAbsolutePath()); } else { logger.log("Warning: Failed to clean up " + description + " (exit code " + exitCode + "): " + directory.getAbsolutePath()); } } catch (Exception e) { logger.log("Warning: Exception during " + description + " cleanup: " + e.getMessage()); } } else if (directory != null) { logger.log("Directory already cleaned or doesn't exist: " + directory.getAbsolutePath()); } } /** * Makes mvnw and check_content.sh executable after downloading repository. * This ensures the Maven wrapper and CI scripts can be executed in cloud environments. * * @param repoFolderPath Path to the repository folder * @param logger Logger for output */ public static void makeExecutable(String repoFolderPath, WifLogger logger) { try { File mvnwFile = new File(repoFolderPath, "mvnw"); if (mvnwFile.exists()) { boolean wasExecutable = mvnwFile.canExecute(); boolean success = mvnwFile.setExecutable(true, false); logger.log(String.format( "Found mvnw: %s (was executable: %s, set executable: %s, now executable: %s)", mvnwFile.getAbsolutePath(), wasExecutable, success, mvnwFile.canExecute() )); mvnwFile.setReadable(true, false); } else { logger.log("Warning: mvnw file not found at " + mvnwFile.getAbsolutePath()); } File checkContentFile = new File(repoFolderPath, "ci/scripts/check_content.sh"); if (checkContentFile.exists()) { boolean wasExecutable = checkContentFile.canExecute(); boolean success = checkContentFile.setExecutable(true, false); logger.log(String.format( "Found check_content.sh: %s (was executable: %s, set executable: %s, now executable: %s)", checkContentFile.getAbsolutePath(), wasExecutable, success, checkContentFile.canExecute() )); checkContentFile.setReadable(true, false); } else { logger.log("Warning: check_content.sh file not found at " + checkContentFile.getAbsolutePath()); } } catch (Exception e) { logger.log("Warning: Failed to fix file permissions: " + e.getMessage()); } } public static void cleanupWorkingDirectory(String workingDirectory, WifLogger logger) { try { if (workingDirectory == null) { logger.log("No working directory to clean up"); return; } File workDir = new File(workingDirectory); if (!workDir.exists()) { logger.log("Working directory does not exist: " + workingDirectory); return; } logger.log("Cleaning up working directory: " + workingDirectory); try { ProcessBuilder pb = new ProcessBuilder("rm", "-rf", workingDirectory); Process process = pb.start(); int exitCode = process.waitFor(); if (exitCode == 0) { logger.log("Successfully cleaned up working directory"); } else { logger.log("rm -rf command failed with exit code: " + exitCode); } } catch (Exception e) { logger.log("rm -rf command failed: " + e.getMessage()); } // Also clean up any truly old orphaned Maven directories (older than 1 hour) cleanupOrphanedMavenDirectories(logger); } catch (Exception e) { logger.log("Working directory cleanup warning: " + e.getMessage()); } } /** * Clean up any orphaned Maven temporary directories that might have been left behind * from previous failed executions. This is a defensive cleanup measure that only * removes directories older than 1 hour to avoid conflicts with parallel test runs. */ private static void cleanupOrphanedMavenDirectories(WifLogger logger) { try { String tempDir = System.getProperty("java.io.tmpdir"); File tempDirectory = new File(tempDir); if (!tempDirectory.exists() || !tempDirectory.isDirectory()) { return; } logger.log("Scanning for old orphaned Maven directories in: " + tempDir); File[] files = tempDirectory.listFiles(); if (files != null) { int cleanedCount = 0; long oneHourAgo = System.currentTimeMillis() - (60 * 60 * 1000); // 1 hour ago for (File file : files) { if (file.isDirectory() && ( file.getName().startsWith("maven-repo-") || file.getName().startsWith("workspace-") || file.getName().startsWith("maven-home-") || file.getName().startsWith("wif-function-") )) { // Only clean up directories older than 1 hour to avoid interfering with parallel runs if (file.lastModified() < oneHourAgo) { cleanupDirectory(file, "old orphaned directory", logger); cleanedCount++; } else { logger.log("Skipping recent directory (likely from parallel run): " + file.getName()); } } } if (cleanedCount > 0) { logger.log("Cleaned up " + cleanedCount + " old orphaned Maven/WIF directories"); } else { logger.log("No old orphaned Maven/WIF directories found"); } } } catch (Exception e) { logger.log("Warning: Failed to clean up orphaned directories: " + e.getMessage()); } } public static String downloadAndExtractRepository(String tarballUrl, WifLogger logger) throws IOException, InterruptedException { // Create unique session-based directory for complete isolation String sessionId = generateUniqueSessionId(); String workDir = "/tmp/wif-function-" + sessionId; File workingDirectory = new File(workDir); if (!workingDirectory.exists()) { workingDirectory.mkdirs(); } logger.log("Extracting to timestamp-based directory: " + workingDirectory.getAbsolutePath()); logger.log("Downloading using Java HTTP client: " + tarballUrl); try { downloadAndExtractWithJava(tarballUrl, workingDirectory, logger); logger.log("Download and extraction completed successfully"); return workingDirectory.getAbsolutePath(); } catch (Exception e) { throw new RuntimeException("Failed to download and extract: " + e.getMessage(), e); } } public static void downloadAndExtractWithJava(String tarballUrl, File workingDirectory, WifLogger logger) throws IOException, InterruptedException { logger.log("Creating HTTP client..."); HttpClient client = HttpClient.newBuilder() .connectTimeout(Duration.ofSeconds(30)) .followRedirects(HttpClient.Redirect.ALWAYS) .build(); HttpRequest request = HttpRequest.newBuilder() .uri(URI.create(tarballUrl)) .timeout(Duration.ofMinutes(5)) .header("User-Agent", "WIF-Function") .GET() .build(); logger.log("Sending HTTP request..."); try { HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofInputStream()); if (response.statusCode() != 200) { throw new IOException("HTTP " + response.statusCode() + " when downloading " + tarballUrl); } logger.log("HTTP response received, extracting tar.gz..."); try (InputStream responseStream = response.body(); GZIPInputStream gzipStream = new GZIPInputStream(responseStream); TarArchiveInputStream tarStream = new TarArchiveInputStream(gzipStream)) { TarArchiveEntry entry; while ((entry = tarStream.getNextTarEntry()) != null) { File outputFile = createOutputFile(workingDirectory, entry.getName()); if (entry.isDirectory()) { outputFile.mkdirs(); } else { outputFile.getParentFile().mkdirs(); try (FileOutputStream fos = new FileOutputStream(outputFile)) { tarStream.transferTo(fos); } } } } logger.log("Tar extraction completed"); } catch (Exception e) { logger.log("Download/extract error: " + e.getMessage()); throw new IOException("Failed to download and extract: " + e.getMessage(), e); } } private static File createOutputFile(File workingDirectory, String entryName) throws IOException { File outputFile = new File(workingDirectory, entryName); String workingDirCanonical = workingDirectory.getCanonicalPath(); String outputFileCanonical = outputFile.getCanonicalPath(); if (!outputFileCanonical.startsWith(workingDirCanonical + File.separator) && !outputFileCanonical.equals(workingDirCanonical)) { throw new IOException("Archive entry '" + entryName + "' would extract outside the target directory: " + outputFileCanonical); } return outputFile; } public static String createMavenResultMessage(int mavenExitCode) { if (mavenExitCode == 0) { return "WIF tests completed successfully"; } else { String mavenResult; String testResults; if (mavenExitCode == -2) { mavenResult = "TIMEOUT (exit code: " + mavenExitCode + ")"; testResults = "Build timed out after 6 minutes"; } else { mavenResult = "FAILED (exit code: " + mavenExitCode + ")"; testResults = "Build or tests failed"; } return String.format( "MAVEN_RESULT=%s\nTEST_STATUS=%s", mavenResult, testResults ); } } public interface WifLogger { void log(String message); } } ================================================ FILE: ci/wif/test_wif.sh ================================================ #!/bin/bash -e set -o pipefail export SF_ENABLE_EXPERIMENTAL_AUTHENTICATION=true ./mvnw -Dmaven.repo.local=/tmp/maven-repo \ -DjenkinsIT \ -Dnet.snowflake.jdbc.temporaryCredentialCacheDir=/tmp/workspace \ -Dnet.snowflake.jdbc.ocspResponseCacheDir=/tmp/workspace \ -Djava.io.tmpdir=/tmp/workspace \ -Djacoco.skip.instrument=true \ -Dskip.unitTests=true \ -DintegrationTestSuites=WIFTestSuite \ -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn \ -Dnot-self-contained-jar \ -Denforcer.skip=true \ -Dmaven.javadoc.skip=true \ verify \ --batch-mode --show-version ================================================ FILE: codecov/codecov ================================================ [File too large to display: 41.9 MB] ================================================ FILE: codecov/codecov.SHA256SUM ================================================ 0c9b79119b0d8dbe7aaf460dc3bd7c3094ceda06e5ae32b0d11a8ff56e2cc5c5 codecov ================================================ FILE: codecov/codecov.SHA256SUM.sig ================================================ -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEJwNOf9uFDgu8LGL/gGuyiu13mGkFAmSRyEgACgkQgGuyiu13 mGnHdQ//WMx0TS7fJlgo04LCD9tQj3quUFm7oPMuDs8CzSx22W0V9FmPiN7s1uYI iKnRd32LW0a9eIymhyz1Tv3HXV4k61pruVVNq5ghx24i5jtDqfnGDXNhDWxo/Dk+ 62v4/17dcFM+QPBsXvXQkaUqXvkmenn7PXSqCtztcOa54MFEZo5RRSm+3JbKoLYc pB7yTG6gC+ZMYNoEoQG0Ax+lfYVUBdesbst0nXdrMzOs8134KPMazccO2EmEIPUA r3b2KQzz89kt7tvTEthycjOu12ZIUYOftdU3XEv1VjdfsVJs4BlIhPTzRJ0tYYoO dcNBwvNbreDITpikM3m6zzxkzbPsLM+bk2MURzQR6wWRPtG+2tMS9a7bV0O4Qufk 9kzk6NG3i+z/vwwUYzcRTrDWQurrr4hDNGKfHWVHkgoVXCZBQc8X3wCiYwjLovP5 HDChwPK0liQgHJx2FG+/lVtFUbGQ0I9OvWb02VizRInFxr4hWt14Nd3DJNBaOzuT T/u6IecHq6xSqV+Cj/qcD9Gxclj1Ge7DrLSRlv2hwrvph4PA4ehTY0lQTMSvgwFf m5ZVwwW8QwPefCdbvcQoLFQQN1U6RxX1bjxaZKtmSOwxdkpgQEiWrdFirwZhNA9u mdOlQA0SnvAjopcJYQqMSkgLaMt4cJDIxvxO5ihlNSsgcRx32zY= =oOVB -----END PGP SIGNATURE----- ================================================ FILE: codecov/codecov_aarch64 ================================================ [File too large to display: 38.6 MB] ================================================ FILE: codecov.yml ================================================ parsers: jacoco: partials_as_hits: true ================================================ FILE: dependencies/Readme.md ================================================ Arrow dependencies are built from internal branch `upgradeTo17.0.0-v3`. This build was applied the AIX fix and the customer logger instead of slf4j logger. ================================================ FILE: dependencies/arrow-memory-17.0.0.pom ================================================ 4.0.0 org.apache.arrow arrow-java-root 17.0.0 arrow-memory Arrow Memory pom memory-core memory-unsafe memory-netty-buffer-patch ================================================ FILE: fat-jar-test-app/pom.xml ================================================ 4.0.0 net.snowflake fat-jar-test-app 1.0-SNAPSHOT 8 8 UTF-8 net.snowflake.FatJarTestApp org.codehaus.mojo exec-maven-plugin 3.1.0 java -classpath ${exec.mainClass} compile default true net.snowflake snowflake-jdbc ignored system ${project.basedir}/../target/snowflake-jdbc.jar fips org.codehaus.mojo build-helper-maven-plugin 3.4.0 add-fips-sources generate-sources add-source ${project.basedir}/src/main/fips-java net.snowflake snowflake-jdbc-fips ignored system ${project.basedir}/../FIPS/target/snowflake-jdbc-fips.jar org.bouncycastle bc-fips 1.0.2.6 org.bouncycastle bcpkix-fips 1.0.8 slf4j org.codehaus.mojo exec-maven-plugin -Dnet.snowflake.jdbc.loggerImpl=net.snowflake.client.log.SLF4JLogger -classpath ${exec.mainClass} org.slf4j slf4j-api 2.0.13 ch.qos.logback logback-classic 1.3.14 ch.qos.logback logback-core 1.3.14 ================================================ FILE: fat-jar-test-app/run.sh ================================================ #!/bin/bash -ex # # Test fat jar by running a sample app that connects to Snowflake # set -o pipefail THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" JDBC_ROOT="$(cd "${THIS_DIR}/.." && pwd)" JDBC_JAR=$1 LOGGING_MODE=${2:-} mkdir -p $THIS_DIR/target # Setup GPG home directory if running in GitHub Actions if [[ -n "$GITHUB_ACTIONS" ]]; then source $JDBC_ROOT/ci/scripts/setup_gpg.sh # Select the encrypted files based on CLOUD_PROVIDER if [[ "$CLOUD_PROVIDER" == "AZURE" ]]; then ENCODED_PARAMETERS_FILE=.github/workflows/parameters_azure.json.gpg ENCODED_RSA_KEY_FILE=.github/workflows/rsa_keys/rsa_key_jdbc_azure.p8.gpg elif [[ "$CLOUD_PROVIDER" == "GCP" ]]; then ENCODED_PARAMETERS_FILE=.github/workflows/parameters_gcp.json.gpg ENCODED_RSA_KEY_FILE=.github/workflows/rsa_keys/rsa_key_jdbc_gcp.p8.gpg elif [[ "$CLOUD_PROVIDER" == "AWS" ]]; then ENCODED_PARAMETERS_FILE=.github/workflows/parameters_aws.json.gpg ENCODED_RSA_KEY_FILE=.github/workflows/rsa_keys/rsa_key_jdbc_aws.p8.gpg else echo "[ERROR] Unknown cloud provider: $CLOUD_PROVIDER" exit 1 fi # Decrypt parameters file echo "[INFO] Decrypting parameters file for $CLOUD_PROVIDER" gpg --quiet --batch --yes --decrypt --passphrase="$PARAMETERS_SECRET" \ --output "$JDBC_ROOT/parameters.json" "$JDBC_ROOT/$ENCODED_PARAMETERS_FILE" # Decrypt RSA key file echo "[INFO] Decrypting RSA key file for $CLOUD_PROVIDER" gpg --quiet --batch --yes --decrypt --passphrase="$JDBC_PRIVATE_KEY_SECRET" \ --output "$THIS_DIR/target/rsa_key_jdbc.p8" "$JDBC_ROOT/$ENCODED_RSA_KEY_FILE" # Parse parameters.json and export as SNOWFLAKE_TEST_* environment variables echo "[INFO] Setting up environment variables from parameters.json" eval $(jq -r '.testconnection | to_entries | map("export \(.key)=\(.value|tostring)")|.[]' "$JDBC_ROOT/parameters.json") # Set RSA key authentication export SNOWFLAKE_TEST_PRIVATE_KEY_FILE="$THIS_DIR/target/rsa_key_jdbc.p8" export SNOWFLAKE_TEST_AUTHENTICATOR="SNOWFLAKE_JWT" # Print env vars (excluding sensitive ones) env | grep SNOWFLAKE_ | grep -v -E "(PASS|KEY|SECRET|TOKEN)" | sort fi echo "[INFO] Building fat jar" cd "$JDBC_ROOT" if [ "$JDBC_JAR" == "fips" ] ; then MAVEN_PROFILE="fips" ./mvnw -f FIPS/pom.xml package -DskipTests -Dmaven.test.skip=true --quiet else MAVEN_PROFILE="default" ./mvnw package -DskipTests -Dmaven.test.skip=true --quiet fi # Build Maven profiles list PROFILES="$MAVEN_PROFILE" if [[ "$LOGGING_MODE" == "slf4j" ]]; then PROFILES="$PROFILES,slf4j" fi echo "[INFO] Running fat jar test app with Maven profiles: $PROFILES" cd "$THIS_DIR" ../mvnw compile exec:exec -P "$PROFILES" ================================================ FILE: fat-jar-test-app/src/main/fips-java/net/snowflake/FipsInitializer.java ================================================ package net.snowflake; import java.security.Provider; import java.security.Security; import java.util.Arrays; import java.util.List; import org.bouncycastle.crypto.CryptoServicesRegistrar; import org.bouncycastle.crypto.fips.FipsStatus; import org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider; /** * FIPS initialization class that registers BouncyCastle FIPS provider * before the main application runs. This class is only compiled when * the 'fips' Maven profile is active. */ public class FipsInitializer { private static final String JCE_PROVIDER_BOUNCY_CASTLE_FIPS = "BCFIPS"; private static final String JCE_PROVIDER_SUN_JCE = "SunJCE"; private static final String JCE_PROVIDER_SUN_RSA_SIGN = "SunRsaSign"; private static final String JCE_KEYSTORE_BOUNCY_CASTLE = "BCFKS"; private static final String JCE_KEYSTORE_JKS = "JKS"; private static final String BOUNCY_CASTLE_RNG_HYBRID_MODE = "C:HYBRID;ENABLE{All};"; private static final String SSL_ENABLED_PROTOCOLS = "TLSv1.2,TLSv1.1,TLSv1"; private static final String JAVA_SYSTEM_PROPERTY_SSL_NAMEDGROUPS = "jdk.tls.namedGroups"; private static final String JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE = "javax.net.ssl.keyStoreType"; private static final String JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE = "javax.net.ssl.trustStoreType"; private static final String JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS = "jdk.tls.client.protocols"; // Static initializer block executes when class is loaded static { try { initializeFipsMode(); System.out.println("[FIPS] Initialization completed successfully"); } catch (Exception e) { System.err.println("[FIPS] Initialization failed: " + e.getMessage()); e.printStackTrace(); throw new RuntimeException("Failed to initialize FIPS mode", e); } } private static void initializeFipsMode() { // Set named groups to avoid test failure on GCP environment System.setProperty(JAVA_SYSTEM_PROPERTY_SSL_NAMEDGROUPS, "secp256r1, secp384r1, ffdhe2048, ffdhe3072"); // Set keystore types for BouncyCastle libraries System.setProperty(JAVA_SYSTEM_PROPERTY_SSL_KEYSTORE_TYPE, JCE_KEYSTORE_BOUNCY_CASTLE); System.setProperty(JAVA_SYSTEM_PROPERTY_SSL_TRUSTSTORE_TYPE, JCE_KEYSTORE_JKS); // Set SSL protocols System.setProperty(JAVA_SYSTEM_PROPERTY_SSL_PROTOCOLS, SSL_ENABLED_PROTOCOLS); // Remove Java's standard encryption and SSL providers List providers = Arrays.asList(Security.getProviders()); Provider sunJceProvider = Security.getProvider(JCE_PROVIDER_SUN_JCE); Provider sunRsaSignProvider = Security.getProvider(JCE_PROVIDER_SUN_RSA_SIGN); if (sunJceProvider != null) { Security.removeProvider(JCE_PROVIDER_SUN_JCE); } if (sunRsaSignProvider != null) { Security.removeProvider(JCE_PROVIDER_SUN_RSA_SIGN); } /* * Insert BouncyCastle's FIPS-compliant encryption and SSL providers. */ BouncyCastleFipsProvider bcFipsProvider = new BouncyCastleFipsProvider(BOUNCY_CASTLE_RNG_HYBRID_MODE); /* * We remove BCFIPS provider pessimistically. This is a no-op if provider * does not exist. This is necessary to always add it to the first * position when calling insertProviderAt. * * JavaDoc for insertProviderAt states: * "A provider cannot be added if it is already installed." */ Security.removeProvider(JCE_PROVIDER_BOUNCY_CASTLE_FIPS); Security.insertProviderAt(bcFipsProvider, 1); // Enable approved-only mode if (!CryptoServicesRegistrar.isInApprovedOnlyMode()) { if (FipsStatus.isReady()) { CryptoServicesRegistrar.setApprovedOnlyMode(true); } else { throw new RuntimeException( "FIPS is not ready to be enabled and FIPS mode is required"); } } } /** * Force class loading - this method will be called from FatJarTestApp. * The method body is empty; just loading the class is enough to trigger * the static initializer block. */ public static void ensureInitialized() { // Method body is empty; just loading the class is enough } } ================================================ FILE: fat-jar-test-app/src/main/java/net/snowflake/FatJarTestApp.java ================================================ package net.snowflake; import java.io.File; import java.io.InputStream; import java.net.URISyntaxException; import java.nio.file.Files; import java.util.logging.LogManager; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.Statement; import java.util.Arrays; import java.util.Properties; public class FatJarTestApp { private static final String LOGGER_IMPL_PROPERTY = "net.snowflake.jdbc.loggerImpl"; private static final String SLF4J_LOGGER_CLASS = "net.snowflake.client.log.SLF4JLogger"; private static final String LOGGER_IMPL = System.getProperty(LOGGER_IMPL_PROPERTY); private static final String[] CLOUD_SDK_LOGGER_PATTERNS = { "net.snowflake.client.jdbc.internal.software.amazon", "net.snowflake.client.jdbc.internal.google", "net.snowflake.client.jdbc.internal.azure" }; private static final File logFile = new File(System.getProperty("java.io.tmpdir"), "fat-jar-test.log"); // Static initializer to load FIPS configuration if available static { try { // This will only succeed when FipsInitializer.class is on the classpath // (i.e., when compiled with the fips profile) Class fipsInitializer = Class.forName("net.snowflake.FipsInitializer"); fipsInitializer.getMethod("ensureInitialized").invoke(null); System.out.println("[INFO] Running in FIPS mode"); } catch (ClassNotFoundException e) { // FIPS not available - normal mode System.out.println("[INFO] Running in normal (non-FIPS) mode"); } catch (Exception e) { throw new RuntimeException("Failed to initialize FIPS mode", e); } } public static void main(String[] args) throws Exception { setupLogging(); runQueries(args); verifyLogs(); } private static void runQueries(String[] args) throws Exception { try (Connection connection = getConnection(args); Statement stmt = connection.createStatement()) { System.out.println("RUNNING SELECT 1"); ResultSet resultSet = stmt.executeQuery("SELECT 1"); if (!resultSet.next()) { throw new RuntimeException("No data found"); } if (resultSet.getInt(1) != 1) { throw new RuntimeException("Wrong data found: " + resultSet.getInt(1)); } System.out.println("CREATING A STAGE"); stmt.execute("CREATE OR REPLACE TEMPORARY STAGE fat_jar_stage"); System.out.println("PUTTING A FILE"); stmt.execute("PUT file://" + getTestFilePath() + " @fat_jar_stage"); } } private static void setupLogging() throws Exception { if (SLF4J_LOGGER_CLASS.equals(LOGGER_IMPL)) { System.setProperty("fatjar.logfile", logFile.getAbsolutePath()); System.out.println("[INFO] SLF4J logging to: " + logFile.getAbsolutePath()); } else { try (InputStream is = FatJarTestApp.class.getResourceAsStream("/logging.properties")) { LogManager.getLogManager().readConfiguration(is); } System.out.println("[INFO] JUL logging to: " + logFile.getAbsolutePath()); } } private static void verifyLogs() throws Exception { String logOutput = new String(Files.readAllBytes(logFile.toPath())); String mode = SLF4J_LOGGER_CLASS.equals(LOGGER_IMPL) ? "SLF4J" : "JUL"; System.out.println("[INFO] Verifying " + mode + " log output (" + logOutput.length() + " chars)"); String logsPrelude = logOutput.substring(0, Math.min(2000, logOutput.length())); boolean hasOpeningSession = logOutput.contains("Opening session"); if (!hasOpeningSession) { System.err.println("[FAIL] Log output does not contain 'Opening session' (expected from SFSession)"); System.err.println("[DEBUG] First 2000 chars of log output:"); System.err.println(logsPrelude); System.exit(1); } System.out.println("[PASS] Found 'Opening session' in " + mode + " log output"); boolean hasCloudSdkLog = false; for (String pattern : CLOUD_SDK_LOGGER_PATTERNS) { if (logOutput.contains(pattern)) { hasCloudSdkLog = true; System.out.println("[PASS] Found cloud SDK logger: " + pattern); break; } } if (!hasCloudSdkLog) { System.err.println("[FAIL] Log output does not contain any cloud SDK logger name"); System.err.println("[FAIL] Expected one of: " + Arrays.toString(CLOUD_SDK_LOGGER_PATTERNS)); System.err.println("[DEBUG] First 2000 chars of log output:"); System.err.println(logsPrelude); System.exit(1); } System.out.println("[PASS] All " + mode + " logging verifications passed"); } private static String getTestFilePath() throws URISyntaxException { if (new File("/tmp/test.csv").exists()) { return "/tmp/test.csv"; } else { return new File(FatJarTestApp.class.getClassLoader().getResource("test.csv").toURI()).getAbsolutePath(); } } private static Connection getConnection(String[] args) throws Exception { Properties properties = new Properties(); properties.put("user", getSfEnv("USER")); String authenticator = getSfEnv("AUTHENTICATOR"); if ("SNOWFLAKE_JWT".equals(authenticator)) { String privateKeyFile = getSfEnv("PRIVATE_KEY_FILE"); properties.put("private_key_file", privateKeyFile); properties.put("authenticator", "SNOWFLAKE_JWT"); } else { String password = getSfEnv("PASSWORD"); properties.put("password", password); } properties.put("role", getSfEnv("ROLE")); properties.put("account", getSfEnv("ACCOUNT")); properties.put("db", getSfEnv("DATABASE")); properties.put("schema", getSfEnv("SCHEMA")); properties.put("warehouse", getSfEnv("WAREHOUSE")); properties.put("ssl", true); String uri = "jdbc:snowflake://" + getSfEnv("ACCOUNT") + ".snowflakecomputing.com"; if (Arrays.asList(args).contains("enableDiagnostics")) { uri += "?ENABLE_DIAGNOSTICS=true&DIAGNOSTICS_ALLOWLIST_FILE=/allowlist.json"; } if (Arrays.asList(args).contains("useProxy")) { properties.put("useProxy", "true"); properties.put("proxyHost", "localhost"); properties.put("proxyPort", "8080"); } Class.forName("net.snowflake.client.api.driver.SnowflakeDriver"); return DriverManager.getConnection(uri, properties); } private static String getSfEnv(String param) { return System.getenv("SNOWFLAKE_TEST_" + param); } } ================================================ FILE: fat-jar-test-app/src/main/resources/logback.xml ================================================ %d{HH:mm:ss.SSS} [%thread] %-5level %logger - %msg%n ${fatjar.logfile} %d{HH:mm:ss.SSS} [%thread] %-5level %logger - %msg%n ================================================ FILE: fat-jar-test-app/src/main/resources/logging.properties ================================================ handlers = java.util.logging.FileHandler .level = FINE java.util.logging.FileHandler.pattern = %t/fat-jar-test.log java.util.logging.FileHandler.formatter = java.util.logging.SimpleFormatter java.util.logging.FileHandler.level = ALL ================================================ FILE: fat-jar-test-app/src/main/resources/test.csv ================================================ abc,1 def,2 ================================================ FILE: linkage-checker-exclusion-rules.xml ================================================ ? Optional Optional Optional provided appengine ? ? ? ? ? ? ? ? ? Optional com.amazonaws.util is not used in runtime CRT dependency excluded, not used in runtime CRT dependency excluded, not used in runtime CRT dependency excluded, not used in runtime CRT dependency excluded, not used in runtime False positive from linkage checker - field exists in actual JAR False positive from linkage checker - field exists in actual JAR False positive from linkage checker - field exists in actual JAR False positive from linkage checker - field exists in actual JAR False positive from linkage checker - field exists in actual JAR False positive from linkage checker - field exists in actual JAR False positive from linkage checker - field exists in actual JAR False positive from linkage checker - field exists in actual JAR Optional Optional ================================================ FILE: mvnw ================================================ #!/bin/sh # ---------------------------------------------------------------------------- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # ---------------------------------------------------------------------------- # ---------------------------------------------------------------------------- # Apache Maven Wrapper startup batch script, version 3.2.0 # # Required ENV vars: # ------------------ # JAVA_HOME - location of a JDK home dir # # Optional ENV vars # ----------------- # MAVEN_OPTS - parameters passed to the Java VM when running Maven # e.g. to debug Maven itself, use # set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 # MAVEN_SKIP_RC - flag to disable loading of mavenrc files # ---------------------------------------------------------------------------- if [ -z "$MAVEN_SKIP_RC" ] ; then if [ -f /usr/local/etc/mavenrc ] ; then . /usr/local/etc/mavenrc fi if [ -f /etc/mavenrc ] ; then . /etc/mavenrc fi if [ -f "$HOME/.mavenrc" ] ; then . "$HOME/.mavenrc" fi fi # OS specific support. $var _must_ be set to either true or false. cygwin=false; darwin=false; mingw=false case "$(uname)" in CYGWIN*) cygwin=true ;; MINGW*) mingw=true;; Darwin*) darwin=true # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home # See https://developer.apple.com/library/mac/qa/qa1170/_index.html if [ -z "$JAVA_HOME" ]; then if [ -x "/usr/libexec/java_home" ]; then JAVA_HOME="$(/usr/libexec/java_home)"; export JAVA_HOME else JAVA_HOME="/Library/Java/Home"; export JAVA_HOME fi fi ;; esac if [ -z "$JAVA_HOME" ] ; then if [ -r /etc/gentoo-release ] ; then JAVA_HOME=$(java-config --jre-home) fi fi # For Cygwin, ensure paths are in UNIX format before anything is touched if $cygwin ; then [ -n "$JAVA_HOME" ] && JAVA_HOME=$(cygpath --unix "$JAVA_HOME") [ -n "$CLASSPATH" ] && CLASSPATH=$(cygpath --path --unix "$CLASSPATH") fi # For Mingw, ensure paths are in UNIX format before anything is touched if $mingw ; then [ -n "$JAVA_HOME" ] && [ -d "$JAVA_HOME" ] && JAVA_HOME="$(cd "$JAVA_HOME" || (echo "cannot cd into $JAVA_HOME."; exit 1); pwd)" fi if [ -z "$JAVA_HOME" ]; then javaExecutable="$(which javac)" if [ -n "$javaExecutable" ] && ! [ "$(expr "\"$javaExecutable\"" : '\([^ ]*\)')" = "no" ]; then # readlink(1) is not available as standard on Solaris 10. readLink=$(which readlink) if [ ! "$(expr "$readLink" : '\([^ ]*\)')" = "no" ]; then if $darwin ; then javaHome="$(dirname "\"$javaExecutable\"")" javaExecutable="$(cd "\"$javaHome\"" && pwd -P)/javac" else javaExecutable="$(readlink -f "\"$javaExecutable\"")" fi javaHome="$(dirname "\"$javaExecutable\"")" javaHome=$(expr "$javaHome" : '\(.*\)/bin') JAVA_HOME="$javaHome" export JAVA_HOME fi fi fi if [ -z "$JAVACMD" ] ; then if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi else JAVACMD="$(\unset -f command 2>/dev/null; \command -v java)" fi fi if [ ! -x "$JAVACMD" ] ; then echo "Error: JAVA_HOME is not defined correctly." >&2 echo " We cannot execute $JAVACMD" >&2 exit 1 fi if [ -z "$JAVA_HOME" ] ; then echo "Warning: JAVA_HOME environment variable is not set." fi # traverses directory structure from process work directory to filesystem root # first directory with .mvn subdirectory is considered project base directory find_maven_basedir() { if [ -z "$1" ] then echo "Path not specified to find_maven_basedir" return 1 fi basedir="$1" wdir="$1" while [ "$wdir" != '/' ] ; do if [ -d "$wdir"/.mvn ] ; then basedir=$wdir break fi # workaround for JBEAP-8937 (on Solaris 10/Sparc) if [ -d "${wdir}" ]; then wdir=$(cd "$wdir/.." || exit 1; pwd) fi # end of workaround done printf '%s' "$(cd "$basedir" || exit 1; pwd)" } # concatenates all lines of a file concat_lines() { if [ -f "$1" ]; then # Remove \r in case we run on Windows within Git Bash # and check out the repository with auto CRLF management # enabled. Otherwise, we may read lines that are delimited with # \r\n and produce $'-Xarg\r' rather than -Xarg due to word # splitting rules. tr -s '\r\n' ' ' < "$1" fi } log() { if [ "$MVNW_VERBOSE" = true ]; then printf '%s\n' "$1" fi } BASE_DIR=$(find_maven_basedir "$(dirname "$0")") if [ -z "$BASE_DIR" ]; then exit 1; fi MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}; export MAVEN_PROJECTBASEDIR log "$MAVEN_PROJECTBASEDIR" ########################################################################################## # Extension to allow automatically downloading the maven-wrapper.jar from Maven-central # This allows using the maven wrapper in projects that prohibit checking in binary data. ########################################################################################## wrapperJarPath="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" if [ -r "$wrapperJarPath" ]; then log "Found $wrapperJarPath" else log "Couldn't find $wrapperJarPath, downloading it ..." if [ -n "$MVNW_REPOURL" ]; then wrapperUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" else wrapperUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" fi while IFS="=" read -r key value; do # Remove '\r' from value to allow usage on windows as IFS does not consider '\r' as a separator ( considers space, tab, new line ('\n'), and custom '=' ) safeValue=$(echo "$value" | tr -d '\r') case "$key" in (wrapperUrl) wrapperUrl="$safeValue"; break ;; esac done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" log "Downloading from: $wrapperUrl" if $cygwin; then wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") fi if command -v wget > /dev/null; then log "Found wget ... using wget" [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--quiet" if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then wget $QUIET "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" else wget $QUIET --http-user="$MVNW_USERNAME" --http-password="$MVNW_PASSWORD" "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" fi elif command -v curl > /dev/null; then log "Found curl ... using curl" [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--silent" if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then curl $QUIET -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" else curl $QUIET --user "$MVNW_USERNAME:$MVNW_PASSWORD" -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" fi else log "Falling back to using Java to download" javaSource="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.java" javaClass="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.class" # For Cygwin, switch paths to Windows format before running javac if $cygwin; then javaSource=$(cygpath --path --windows "$javaSource") javaClass=$(cygpath --path --windows "$javaClass") fi if [ -e "$javaSource" ]; then if [ ! -e "$javaClass" ]; then log " - Compiling MavenWrapperDownloader.java ..." ("$JAVA_HOME/bin/javac" "$javaSource") fi if [ -e "$javaClass" ]; then log " - Running MavenWrapperDownloader.java ..." ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$wrapperUrl" "$wrapperJarPath") || rm -f "$wrapperJarPath" fi fi fi fi ########################################################################################## # End of extension ########################################################################################## # If specified, validate the SHA-256 sum of the Maven wrapper jar file wrapperSha256Sum="" while IFS="=" read -r key value; do case "$key" in (wrapperSha256Sum) wrapperSha256Sum=$value; break ;; esac done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" if [ -n "$wrapperSha256Sum" ]; then wrapperSha256Result=false if command -v sha256sum > /dev/null; then if echo "$wrapperSha256Sum $wrapperJarPath" | sha256sum -c > /dev/null 2>&1; then wrapperSha256Result=true fi elif command -v shasum > /dev/null; then if echo "$wrapperSha256Sum $wrapperJarPath" | shasum -a 256 -c > /dev/null 2>&1; then wrapperSha256Result=true fi else echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." echo "Please install either command, or disable validation by removing 'wrapperSha256Sum' from your maven-wrapper.properties." exit 1 fi if [ $wrapperSha256Result = false ]; then echo "Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised." >&2 echo "Investigate or delete $wrapperJarPath to attempt a clean download." >&2 echo "If you updated your Maven version, you need to update the specified wrapperSha256Sum property." >&2 exit 1 fi fi MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" # For Cygwin, switch paths to Windows format before running java if $cygwin; then [ -n "$JAVA_HOME" ] && JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") [ -n "$CLASSPATH" ] && CLASSPATH=$(cygpath --path --windows "$CLASSPATH") [ -n "$MAVEN_PROJECTBASEDIR" ] && MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") fi # Provide a "standardized" way to retrieve the CLI args that will # work with both Windows and non-Windows executions. MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $*" export MAVEN_CMD_LINE_ARGS WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain # shellcheck disable=SC2086 # safe args exec "$JAVACMD" \ $MAVEN_OPTS \ $MAVEN_DEBUG_OPTS \ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" ================================================ FILE: mvnw.cmd ================================================ @REM ---------------------------------------------------------------------------- @REM Licensed to the Apache Software Foundation (ASF) under one @REM or more contributor license agreements. See the NOTICE file @REM distributed with this work for additional information @REM regarding copyright ownership. The ASF licenses this file @REM to you under the Apache License, Version 2.0 (the @REM "License"); you may not use this file except in compliance @REM with the License. You may obtain a copy of the License at @REM @REM http://www.apache.org/licenses/LICENSE-2.0 @REM @REM Unless required by applicable law or agreed to in writing, @REM software distributed under the License is distributed on an @REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @REM KIND, either express or implied. See the License for the @REM specific language governing permissions and limitations @REM under the License. @REM ---------------------------------------------------------------------------- @REM ---------------------------------------------------------------------------- @REM Apache Maven Wrapper startup batch script, version 3.2.0 @REM @REM Required ENV vars: @REM JAVA_HOME - location of a JDK home dir @REM @REM Optional ENV vars @REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands @REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending @REM MAVEN_OPTS - parameters passed to the Java VM when running Maven @REM e.g. to debug Maven itself, use @REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 @REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files @REM ---------------------------------------------------------------------------- @REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' @echo off @REM set title of command window title %0 @REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' @if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% @REM set %HOME% to equivalent of $HOME if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") @REM Execute a user defined script before this one if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre @REM check for pre script, once with legacy .bat ending and once with .cmd ending if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* :skipRcPre @setlocal set ERROR_CODE=0 @REM To isolate internal variables from possible post scripts, we use another setlocal @setlocal @REM ==== START VALIDATION ==== if not "%JAVA_HOME%" == "" goto OkJHome echo. echo Error: JAVA_HOME not found in your environment. >&2 echo Please set the JAVA_HOME variable in your environment to match the >&2 echo location of your Java installation. >&2 echo. goto error :OkJHome if exist "%JAVA_HOME%\bin\java.exe" goto init echo. echo Error: JAVA_HOME is set to an invalid directory. >&2 echo JAVA_HOME = "%JAVA_HOME%" >&2 echo Please set the JAVA_HOME variable in your environment to match the >&2 echo location of your Java installation. >&2 echo. goto error @REM ==== END VALIDATION ==== :init @REM Find the project base dir, i.e. the directory that contains the folder ".mvn". @REM Fallback to current working directory if not found. set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir set EXEC_DIR=%CD% set WDIR=%EXEC_DIR% :findBaseDir IF EXIST "%WDIR%"\.mvn goto baseDirFound cd .. IF "%WDIR%"=="%CD%" goto baseDirNotFound set WDIR=%CD% goto findBaseDir :baseDirFound set MAVEN_PROJECTBASEDIR=%WDIR% cd "%EXEC_DIR%" goto endDetectBaseDir :baseDirNotFound set MAVEN_PROJECTBASEDIR=%EXEC_DIR% cd "%EXEC_DIR%" :endDetectBaseDir IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig @setlocal EnableExtensions EnableDelayedExpansion for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a @endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% :endReadAdditionalConfig SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain set WRAPPER_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( IF "%%A"=="wrapperUrl" SET WRAPPER_URL=%%B ) @REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central @REM This allows using the maven wrapper in projects that prohibit checking in binary data. if exist %WRAPPER_JAR% ( if "%MVNW_VERBOSE%" == "true" ( echo Found %WRAPPER_JAR% ) ) else ( if not "%MVNW_REPOURL%" == "" ( SET WRAPPER_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" ) if "%MVNW_VERBOSE%" == "true" ( echo Couldn't find %WRAPPER_JAR%, downloading it ... echo Downloading from: %WRAPPER_URL% ) powershell -Command "&{"^ "$webclient = new-object System.Net.WebClient;"^ "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ "}"^ "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%WRAPPER_URL%', '%WRAPPER_JAR%')"^ "}" if "%MVNW_VERBOSE%" == "true" ( echo Finished downloading %WRAPPER_JAR% ) ) @REM End of extension @REM If specified, validate the SHA-256 sum of the Maven wrapper jar file SET WRAPPER_SHA_256_SUM="" FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( IF "%%A"=="wrapperSha256Sum" SET WRAPPER_SHA_256_SUM=%%B ) IF NOT %WRAPPER_SHA_256_SUM%=="" ( powershell -Command "&{"^ "$hash = (Get-FileHash \"%WRAPPER_JAR%\" -Algorithm SHA256).Hash.ToLower();"^ "If('%WRAPPER_SHA_256_SUM%' -ne $hash){"^ " Write-Output 'Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised.';"^ " Write-Output 'Investigate or delete %WRAPPER_JAR% to attempt a clean download.';"^ " Write-Output 'If you updated your Maven version, you need to update the specified wrapperSha256Sum property.';"^ " exit 1;"^ "}"^ "}" if ERRORLEVEL 1 goto error ) @REM Provide a "standardized" way to retrieve the CLI args that will @REM work with both Windows and non-Windows executions. set MAVEN_CMD_LINE_ARGS=%* %MAVEN_JAVA_EXE% ^ %JVM_CONFIG_MAVEN_PROPS% ^ %MAVEN_OPTS% ^ %MAVEN_DEBUG_OPTS% ^ -classpath %WRAPPER_JAR% ^ "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* if ERRORLEVEL 1 goto error goto end :error set ERROR_CODE=1 :end @endlocal & set ERROR_CODE=%ERROR_CODE% if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost @REM check for post script, once with legacy .bat ending and once with .cmd ending if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" :skipRcPost @REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' if "%MAVEN_BATCH_PAUSE%"=="on" pause if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% cmd /C exit /B %ERROR_CODE% ================================================ FILE: output.json ================================================ ================================================ FILE: parent-pom.xml ================================================ 4.0.0 net.snowflake snowflake-jdbc-parent 4.2.1-SNAPSHOT pom . FIPS 1.27 1.28.0 3.18.0 1.10.0 4.5.14 4.4.16 1.5.6-5 17.0.0 9.7.1 1.8.1 4.2.0 2.37.5 1.57.0 12.32.0 12.31.0 3.4.0 1.84 1.0.2.6 1.0.8 1.14.17 1.1 3.48.2 1.2 1.19.0 1.4 2.20.0 1.2 1.5.4 0.9.5.4 2.48.0 1.29.0 2.47.0 2.44.1 1.45.0 2.13.1 2.35.1 24.3.25 2.57.0 33.3.1-jre 3.0.0 3.0.2 4.28.2 1.81.0 2.2 2.4.3 2.18.4.1 true 3.1.0 5.13.0 2.5.2 4.13.2 5.11.1 1.11.1 1.15.3 1.3.6 4.11.0 11.30.1 4.1.133.Final 10.6 0.31.1 1.0-alpha-9-stable-1 3.4.2 UTF-8 UTF-8 net/snowflake/client/jdbc/internal net.snowflake.client.jdbc.internal net_snowflake_client_jdbc_internal false 2.0.13 5.1.4 UnitTestSuite 2.4.1 1.9 3.6.3 3.1.0 3.0.0 3.3.1 3.2.0 3.11.0 3.5.0 3.1.1 3.0.0-M3 3.1.0 3.5.1 2.19 3.0.1 3.1.1 0.8.8 0.23.0 3.3.0 3.5.0 3.4.2 3.6.2 3.0.1 3.2.1 3.5.1 3.8.0 3.6.1 0.8.0 software.amazon.awssdk bom ${awssdk.version} pom import com.fasterxml.jackson jackson-bom ${jackson.version} pom import com.google.protobuf protobuf-bom ${google.protobuf.java.version} pom import io.grpc grpc-bom ${grpc.version} pom import io.netty netty-bom ${netty.version} pom import org.codehaus.mojo animal-sniffer-annotations ${animal.sniffer.annotations.version} classworlds classworlds ${classworlds.version} com.google.api gax ${google.gax.version} com.google.api.grpc proto-google-common-protos ${google.api.grpc.version} com.google.auth google-auth-library-oauth2-http ${google.auth.library.oauth2.http.version} com.google.cloud google-cloud-core ${google.cloud.core.version} com.google.cloud google-cloud-core-http ${google.cloud.core.version} com.google.api-client google-api-client com.google.cloud google-cloud-storage ${google.cloud.storage.version} com.google.http-client google-http-client ${google.http.client.version} com.google.http-client google-http-client-apache-v2 ${google.http.client.version} com.google.code.findbugs jsr305 ${google.jsr305.version} com.google.code.gson gson ${google.code.gson.version} com.google.errorprone error_prone_annotations ${google.errorprone.version} com.google.guava guava ${google.guava.version} com.google.j2objc j2objc-annotations ${google.j2objc-annotations.version} com.azure azure-core ${azure.core.version} com.azure azure-storage-common ${azure.storage.common.version} io.netty netty-tcnative-boringssl-static com.azure azure-storage-blob ${azure.storage.blob.version} io.netty netty-tcnative-boringssl-static com.nimbusds nimbus-jose-jwt ${nimbusds.version} com.nimbusds oauth2-oidc-sdk ${nimbusds.oauth2.version} commons-cli commons-cli ${commons.cli.version} test commons-codec commons-codec ${commons.codec.version} commons-io commons-io ${commons.io.version} commons-logging commons-logging ${commons.logging.version} commons-dbcp commons-dbcp ${commons.dbcp.version} test commons-pool commons-pool ${commons.pool.version} test io.opencensus opencensus-api ${opencensus.version} junit junit ${junit4.version} test org.junit.jupiter junit-jupiter ${junit.version} test org.junit.jupiter junit-jupiter-api ${junit.version} test org.junit.jupiter junit-jupiter-engine ${junit.version} test org.junit.jupiter junit-jupiter-params ${junit.version} test org.junit.platform junit-platform-suite ${junit.platform.version} test org.junit.platform junit-platform-engine ${junit.platform.version} test org.junit.platform junit-platform-runner ${junit.platform.version} test org.junit.platform junit-platform-suite-api ${junit.platform.version} test org.junit.platform junit-platform-suite-engine ${junit.platform.version} test org.junit.platform junit-platform-launcher ${junit.platform.version} test org.apache.avro avro ${avro.version} test org.apache.commons commons-compress ${apache.commons.compress.version} test org.apache.commons commons-lang3 ${apache.commons.lang3.version} test org.apache.commons commons-text ${apache.commons.text.version} test javax.servlet javax.servlet-api ${javax.servlet.version} net.minidev json-smart ${json.smart.version} net.snowflake snowflake-common ${snowflake.common.version} org.apache.arrow arrow-memory-core ${arrow.version} org.apache.arrow arrow-vector ${arrow.version} org.apache.httpcomponents httpclient ${apache.httpclient.version} org.apache.httpcomponents httpcore ${apache.httpcore.version} com.github.luben zstd-jni ${zstd-jni.version} org.apache.tika tika-core ${tika.version} org.jsoup jsoup ${jsoup.version} net.java.dev.jna jna ${jna.version} provided net.java.dev.jna jna-platform ${jna.version} provided org.checkerframework checker-qual ${checkerframework.version} org.codehaus.plexus plexus-container-default ${plexus.container.version} org.codehaus.plexus plexus-utils ${plexus.utils.version} org.hamcrest hamcrest ${hamcrest.version} test org.hamcrest hamcrest-core ${hamcrest.version} test org.ow2.asm asm ${asm.version} org.slf4j slf4j-api ${slf4j.version} com.google.flatbuffers flatbuffers-java ${google.flatbuffers.version} runtime org.apache.arrow arrow-format ${arrow.version} runtime org.apache.arrow arrow-memory-netty-buffer-patch ${arrow.version} runtime io.netty netty-common io.netty netty-buffer org.apache.arrow arrow-memory-unsafe ${arrow.version} runtime ch.qos.logback logback-classic ${logback.version} test ch.qos.logback logback-core ${logback.version} test com.mchange c3p0 ${c3p0.version} test com.zaxxer HikariCP ${hikaricp.version} test org.bouncycastle bcpkix-jdk18on ${bouncycastle.version} org.bouncycastle bcprov-jdk18on ${bouncycastle.version} org.bouncycastle bcutil-jdk18on ${bouncycastle.version} org.bouncycastle bc-fips ${bouncycastle.bcfips.version} provided org.bouncycastle bcpkix-fips ${bouncycastle.bcpkixfips.version} provided org.tukaani xz ${tukaani.version} org.mockito mockito-core ${mockito.version} test org.mockito mockito-inline ${mockito.version} test net.bytebuddy byte-buddy ${bytebuddy.version} test org.awaitility awaitility ${awaitility.version} test org.wiremock wiremock-standalone ${version.plugin.wiremock} test software.amazon.awssdk s3 software.amazon.awssdk.crt aws-crt software.amazon.awssdk s3-transfer-manager software.amazon.awssdk.crt aws-crt software.amazon.awssdk sdk-core software.amazon.awssdk netty-nio-client software.amazon.awssdk http-client-spi software.amazon.awssdk identity-spi software.amazon.awssdk aws-core software.amazon.awssdk auth software.amazon.awssdk http-auth-aws software.amazon.awssdk.crt aws-crt software.amazon.awssdk http-auth-spi software.amazon.awssdk regions software.amazon.awssdk sts com.azure azure-core ${azure.core.version} com.azure azure-storage-common ${azure.storage.common.version} com.azure azure-storage-blob ${azure.storage.blob.version} com.fasterxml.jackson.core jackson-annotations com.fasterxml.jackson.core jackson-core com.fasterxml.jackson.core jackson-databind com.fasterxml.jackson.dataformat jackson-dataformat-toml com.google.api gax com.google.auth google-auth-library-oauth2-http com.google.cloud google-cloud-core com.google.cloud google-cloud-core-http com.google.cloud google-cloud-storage com.google.http-client google-http-client com.google.http-client google-http-client-apache-v2 com.google.code.findbugs jsr305 com.google.guava guava com.nimbusds nimbus-jose-jwt com.nimbusds oauth2-oidc-sdk commons-codec commons-codec commons-io commons-io commons-logging commons-logging javax.servlet javax.servlet-api net.minidev json-smart io.netty netty-common runtime io.netty netty-buffer runtime net.snowflake snowflake-common com.amazonaws aws-java-sdk-core org.apache.arrow arrow-memory-core org.apache.arrow arrow-vector org.apache.httpcomponents httpclient org.apache.httpcomponents httpcore com.github.luben zstd-jni org.apache.tika tika-core org.jsoup jsoup net.java.dev.jna jna net.java.dev.jna jna-platform org.slf4j slf4j-api com.fasterxml.jackson.datatype jackson-datatype-jsr310 runtime com.google.flatbuffers flatbuffers-java org.apache.arrow arrow-format org.apache.arrow arrow-memory-netty-buffer-patch org.apache.arrow arrow-memory-unsafe ch.qos.logback logback-classic ch.qos.logback logback-core com.mchange c3p0 com.zaxxer HikariCP commons-cli commons-cli commons-dbcp commons-dbcp commons-pool commons-pool junit junit org.junit.jupiter junit-jupiter org.junit.jupiter junit-jupiter-api org.junit.jupiter junit-jupiter-engine org.junit.jupiter junit-jupiter-params org.junit.platform junit-platform-suite org.junit.platform junit-platform-engine org.junit.platform junit-platform-runner org.junit.platform junit-platform-suite-api org.junit.platform junit-platform-suite-engine org.junit.platform junit-platform-launcher org.apache.avro avro org.apache.commons commons-compress org.apache.commons commons-lang3 org.apache.commons commons-text org.hamcrest hamcrest org.mockito mockito-core org.mockito mockito-inline org.awaitility awaitility org.wiremock wiremock-standalone ================================================ FILE: pom.xml ================================================ 4.0.0 net.snowflake snowflake-jdbc-parent 4.2.1-SNAPSHOT ./parent-pom.xml ${artifactId} 4.2.1-SNAPSHOT jar ${artifactId} https://github.com/snowflakedb/snowflake-jdbc scm:git:https://github.com/snowflakedb/snowflake-jdbc.git https://github.com/snowflakedb/snowflake-jdbc snowflake-jdbc org.bouncycastle bcpkix-jdk18on org.bouncycastle bcprov-jdk18on org.bouncycastle bcutil-jdk18on com.tngtech.archunit archunit 1.4.1 test ${project.artifactId} true src/main/resources **/*.dylib **/*.so **/*.dll **/*.a false src/main/resources **/*.dylib **/*.so **/*.dll **/*.a com.github.ekryd.sortpom sortpom-maven-plugin ${version.plugin.sortpom} com.github.siom79.japicmp japicmp-maven-plugin ${version.plugin.japicmp} com.spotify.fmt fmt-maven-plugin ${version.plugin.fmt} org.apache.maven.plugins maven-antrun-plugin ${version.plugin.antrun} org.apache.maven.plugins maven-checkstyle-plugin ${version.plugin.checkstyle} org.apache.maven.plugins maven-clean-plugin ${version.plugin.clean} org.apache.maven.plugins maven-compiler-plugin ${version.plugin.compiler} org.apache.maven.plugins maven-dependency-plugin ${version.plugin.dependency} org.apache.maven.plugins maven-deploy-plugin ${version.plugin.deploy} org.apache.maven.plugins maven-enforcer-plugin ${version.plugin.enforcer} org.apache.maven.plugins maven-failsafe-plugin ${version.plugin.failsafe} org.apache.maven.surefire surefire-junit-platform ${version.plugin.surefire} org.apache.maven.plugins maven-gpg-plugin ${version.plugin.gpg} org.apache.maven.plugins maven-install-plugin ${version.plugin.install} org.apache.maven.plugins maven-jar-plugin ${version.plugin.jar} org.apache.maven.plugins maven-javadoc-plugin ${version.plugin.javadoc} org.apache.maven.plugins maven-project-info-reports-plugin ${version.plugin.projectinforeports} org.apache.maven.plugins maven-shade-plugin ${version.plugin.shade} org.apache.maven.plugins maven-source-plugin ${version.plugin.source} org.apache.maven.plugins maven-surefire-plugin ${version.plugin.surefire} org.apache.maven.surefire surefire-junit-platform ${version.plugin.surefire} org.codehaus.mojo buildnumber-maven-plugin ${version.plugin.buildnumber} org.codehaus.mojo exec-maven-plugin ${version.plugin.exec} org.jacoco jacoco-maven-plugin ${version.plugin.jacoco} com.github.ekryd.sortpom sortpom-maven-plugin false false true scope,groupId,artifactId groupId,artifactId true true true verify validate org.apache.maven.plugins maven-clean-plugin lib *.jar org.apache.maven.plugins maven-compiler-plugin true true javac true 8 8 -Xlint:all,-path default-testCompile testCompile test-compile org.apache.maven.plugins maven-dependency-plugin analyze analyze-only true true javax.servlet:javax.servlet-api install-jar copy install ${project.groupId} ${project.artifactId} ${project.version} lib org.apache.maven.plugins maven-enforcer-plugin com.google.cloud.tools linkage-checker-enforcer-rules 1.5.13 org.codehaus.mojo extra-enforcer-rules 1.3 org.eclipse.aether aether-util enforce-best-practices enforce true true arrow-memory-unsafe org.apache.arrow.memory.DefaultAllocationManagerFactory enforce-maven enforce ${version.maven} enforce-linkage-checker enforce verify true linkage-checker-exclusion-rules.xml org.apache.maven.plugins maven-install-plugin install-arrow-format install-file validate ${project.basedir}/dependencies/arrow-format-${arrow.version}.jar org.apache.arrow arrow-format ${arrow.version} jar true install-arrow-memory-core install-file validate ${project.basedir}/dependencies/arrow-memory-core-${arrow.version}.jar org.apache.arrow arrow-memory-core ${arrow.version} jar true install-arrow-memory-netty-buffer-patch install-file validate ${project.basedir}/dependencies/arrow-memory-netty-buffer-patch-${arrow.version}.jar org.apache.arrow arrow-memory-netty-buffer-patch ${arrow.version} jar true install-arrow-memory-pom install-file validate ${project.basedir}/dependencies/arrow-memory-${arrow.version}.pom org.apache.arrow arrow-memory ${arrow.version} pom true install-arrow-memory-unsafe install-file validate ${project.basedir}/dependencies/arrow-memory-unsafe-${arrow.version}.jar org.apache.arrow arrow-memory-unsafe ${arrow.version} jar true install-arrow-vector install-file validate ${project.basedir}/dependencies/arrow-vector-${arrow.version}.jar org.apache.arrow arrow-vector ${arrow.version} jar true install-tika-core install-file validate ${project.basedir}/dependencies/tika-core-${tika.version}.jar org.apache.tika tika-core ${tika.version} jar true org.apache.maven.plugins maven-jar-plugin net.snowflake.client.api.driver.SnowflakeDriver true test-jar org.apache.maven.plugins maven-javadoc-plugin 8 ${project.basedir}/src/main/javadoc/overview.html java ${project.basedir}/src/main/javadoc/licenses.html net.snowflake.client.internal.*:net.snowflake.client.jdbc.internal.* attach-javadocs jar org.apache.maven.plugins maven-source-plugin attach-sources jar org.jacoco jacoco-maven-plugin ${jacoco.skip.instrument} pre-unit-test prepare-agent target/jacoco-ut.exec post-unit-test report test target/jacoco-ut.exec target/jacoco-ut org.apache.maven.plugins maven-project-info-reports-plugin check-style com.spotify.fmt fmt-maven-plugin fmt check validate org.apache.maven.plugins maven-checkstyle-plugin true true true warning checkstyle check validate thin-jar thin-jar snowflake-jdbc-thin org.apache.maven.plugins maven-enforcer-plugin enforce-linkage-checker enforce none org.apache.maven.plugins maven-shade-plugin shade package net.snowflake:snowflake-common org.apache.arrow:* org.apache.tika:tika-core io.netty:netty-common io.netty:netty-buffer net.snowflake.common ${shadeBase}.snowflake.common mozilla ${shadeBase}.mozilla org.apache.arrow ${shadeBase}.apache.arrow org.apache.tika ${shadeBase}.apache.tika io.netty ${shadeBase}.io.netty *:* META-INF/LICENSE* META-INF/NOTICE* META-INF/DEPENDENCIES META-INF/maven/** META-INF/*.xml META-INF/*.SF META-INF/*.DSA META-INF/*.RSA .netbeans_automatic_build git.properties arrow-git.properties google-http-client.properties storage.v1.json pipes-fork-server-default-log4j2.xml dependencies.properties pipes-fork-server-default-log4j2.xml org.apache.arrow:arrow-vector codegen/** META-INF/io.netty.versions.properties org.codehaus.mojo buildnumber-maven-plugin yyyyMMddHHmmss buildNumber.timestamp false false create-timestamp package self-contained-jar !not-self-contained-jar ${basedir}/src/main/resources-fat-jar org.codehaus.mojo build-helper-maven-plugin add-fat-jar-sources add-source generate-sources ${basedir}/src/main/java-fat-jar org.apache.maven.plugins maven-enforcer-plugin enforce-linkage-checker enforce none org.apache.maven.plugins maven-shade-plugin shade package mozilla ${shadeBase}.mozilla net.snowflake.common ${shadeBase}.snowflake.common org.apache ${shadeBase}.apache org.apache.log4j.* org.slf4j ${shadeBase}.org.slf4j software.amazon.awssdk ${shadeBase}.software.amazon.awssdk software.amazon.encryption.s3 ${shadeBase}.software.amazon.encryption.s3 software.amazon.eventstream ${shadeBase}.software.amazon.eventstream software.amazon.ion ${shadeBase}.software.amazon.ion org.reactivestreams ${shadeBase}.reactivestreams org.jvnet.staxex ${shadeBase}.jvnet.staxex jakarta.xml.soap ${shadeBase}.jakarta.xml.soap jakarta.activation ${shadeBase}.jakarta.activation com.azure ${shadeBase}.azure com.fasterxml ${shadeBase}.fasterxml com.google ${shadeBase}.google google.api ${shadeBase}.google.api google.apps ${shadeBase}.google.apps google.cloud ${shadeBase}.google.cloud google.geo ${shadeBase}.google.geo google.iam ${shadeBase}.google.iam google.logging ${shadeBase}.google.logging google.longrunning ${shadeBase}.google.longrunning google.monitoring ${shadeBase}.google.monitoring google.protobuf ${shadeBase}.google.protobuf google.rpc ${shadeBase}.google.rpc google.shopping ${shadeBase}.google.shopping google.storage ${shadeBase}.google.storage google.type ${shadeBase}.google.type org.joda ${shadeBase}.joda javax.servlet ${shadeBase}.javax.servlet org.jsoup ${shadeBase}.org.jsoup org.bouncycastle ${shadeBase}.org.bouncycastle com.nimbusds ${shadeBase}.com.nimbusds javax.annotation ${shadeBase}.javax.annotation net.jcip ${shadeBase}.net.jcip net.minidev ${shadeBase}.net.minidev org.objectweb ${shadeBase}.org.objectweb io.netty ${shadeBase}.io.netty com.carrotsearch ${shadeBase}.com.carrotsearch io.opencensus ${shadeBase}.opencensus io.opentelemetry ${shadeBase}.opentelemetry org.threeten ${shadeBase}.threeten io.grpc ${shadeBase}.grpc META-INF.native.io_grpc_netty_shaded_netty_tcnative META-INF.native.${shadeNativeBase}_grpc_netty_shaded_netty_tcnative META-INF.native.libio_grpc_netty_shaded_netty_tcnative META-INF.native.lib${shadeNativeBase}_grpc_netty_shaded_netty_tcnative META-INF.native.io_grpc_netty_shaded_netty_transport_native_epoll META-INF.native.${shadeNativeBase}_grpc_netty_shaded_netty_transport_native_epoll META-INF.native.libio_grpc_netty_shaded_netty_transport_native_epoll META-INF.native.lib${shadeNativeBase}_grpc_netty_shaded_netty_transport_native_epoll org.checkerframework ${shadeBase}.org.checkerframework org.codehaus ${shadeBase}.org.codehaus io.perfmark ${shadeBase}.io.perfmark opencensus ${shadeBase}.opencensus grpc ${shadeBase}.grpc android.annotation ${shadeBase}.android.annotation reactor ${shadeBase}.reactor org.reactivestreams ${shadeBase}.org.reactivestreams *:* META-INF/LICENSE* META-INF/NOTICE* META-INF/DEPENDENCIES META-INF/maven/** META-INF/services/com.fasterxml.* META-INF/versions/9/module-info.* META-INF/versions/11/module-info.* META-INF/*.xml META-INF/*.SF META-INF/*.DSA META-INF/*.RSA .netbeans_automatic_build git.properties arrow-git.properties google-http-client.properties storage.v1.json pipes-fork-server-default-log4j2.xml dependencies.properties pipes-fork-server-default-log4j2.xml azure-*.properties VersionInfo.java project.properties org.apache.arrow:arrow-vector codegen/** com.google.guava:guava com/google/common/io/** com/google/common/base/** com/google/common/hash/** com/google/common/collect/** com/google/common/graph/** com/google/common/math/** com/google/common/util/concurrent/** commons-logging:commons-logging org/apache/commons/logging/impl/AvalonLogger.class META-INF/io.netty.versions.properties org.apache.maven.plugins maven-antrun-plugin ${version.plugin.antrun} repack run package org.codehaus.mojo buildnumber-maven-plugin yyyyMMddHHmmss buildNumber.timestamp false false create-timestamp package java-9 (9,) maven-failsafe-plugin --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/sun.util.calendar=ALL-UNNAMED --add-exports=java.base/sun.nio.ch=ALL-UNNAMED --add-exports=jdk.unsupported/sun.misc=ALL-UNNAMED maven-surefire-plugin ${skip.unitTests} --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/sun.util.calendar=ALL-UNNAMED --add-exports=java.base/sun.nio.ch=ALL-UNNAMED --add-exports=jdk.unsupported/sun.misc=ALL-UNNAMED jenkinsIT jenkinsIT com.github.siom79.japicmp japicmp-maven-plugin japicmp cmp none org.apache.maven.plugins maven-dependency-plugin analyze analyze-only none org.apache.maven.plugins maven-surefire-plugin UnitTestSuite ${skip.unitTests} org.apache.maven.surefire surefire-junit-platform ${version.plugin.surefire} test org.apache.maven.plugins maven-failsafe-plugin verify DefaultIT integration-test net.snowflake.client.internal.log.JDK14Logger ${basedir}/src/test/resources/logging.properties ${integrationTestSuites} org.jacoco jacoco-maven-plugin ${jacoco.skip.instrument} pre-integration-test prepare-agent pre-integration-test target/jacoco-it.exec post-integration-test report post-integration-test target/jacoco-it.exec target/jacoco-it check-content !windows !thin-jar org.codehaus.mojo exec-maven-plugin ${version.plugin.exec} check-shaded-content exec verify ${basedir}/ci/scripts/check_content.sh check-content-thin !windows thin-jar org.codehaus.mojo exec-maven-plugin ${version.plugin.exec} check-shaded-content exec verify ${basedir}/ci/scripts/check_content.sh -thin qa1IT qa1IT org.apache.maven.plugins maven-failsafe-plugin **/*IT.java org.apache.maven.surefire surefire-junit-platform ${version.plugin.surefire} verify DellBoomi dellBoomiIT org.apache.maven.plugins maven-failsafe-plugin **/*IT.java org.apache.maven.surefire surefire-junit-platform ${version.plugin.surefire} verify preprod3IT preprod3IT org.apache.maven.plugins maven-failsafe-plugin **/*IT.java org.apache.maven.surefire surefire-junit-platform ${version.plugin.surefire} verify ossrh-deploy ossrhDeploy maven-deploy-plugin true org.apache.maven.plugins maven-gpg-plugin sign-and-deploy-file deploy target/${project.artifactId}.jar ossrh https://ossrh-staging-api.central.sonatype.com/service/local/staging/deploy/maven2 generated_public_pom.xml target/${project.artifactId}-javadoc.jar target/${project.artifactId}-sources.jar ${env.GPG_KEY_ID} ${env.GPG_KEY_PASSPHRASE} central-deploy central-deploy org.codehaus.mojo build-helper-maven-plugin ${version.plugin.buildhelper} attach-public-pom attach-artifact package generated_public_pom.xml pom org.apache.maven.plugins maven-gpg-plugin ${env.GPG_KEY_ID} ${env.GPG_KEY_PASSPHRASE} sign-artifacts sign verify org.sonatype.central central-publishing-maven-plugin ${version.plugin.publishing} true ossrh true published ================================================ FILE: prepareNewVersion.sh ================================================ #!/bin/bash -e if [[ -z "$1" ]]; then echo First argument must be new version to set exit 1 fi version=$1 version_without_snapshot=${version%-*} # prepare release with maven (version.properties is populated by Maven resource filtering) ./mvnw -f parent-pom.xml versions:set -DnewVersion=$version -DgenerateBackupPoms=false if [[ "$version" == *-SNAPSHOT ]]; then sed -i '' '3a\ - v'"$version " CHANGELOG.md fi # add changelog entry but only when releasing version without snapshot if [[ "$version" == "$version_without_snapshot" ]]; then sed -i '' "4s/.*/- v$version/" CHANGELOG.md fi ================================================ FILE: prober/Dockerfile ================================================ FROM ubuntu:25.10 # boilerplate labels required by validation when pushing to ACR, ECR & GCR LABEL org.opencontainers.image.source="https://github.com/snowflakedb/snowflake-jdbc" LABEL com.snowflake.owners.email="triage-snow-drivers-warsaw-dl@snowflake.com" LABEL com.snowflake.owners.slack="triage-snow-drivers-warsaw-dl" LABEL com.snowflake.owners.team="Snow Drivers" LABEL com.snowflake.owners.jira_area="Developer Platform" LABEL com.snowflake.owners.jira_component="JDBC Driver" # fake layers label to pass the validation LABEL com.snowflake.ugcbi.layers="sha256:850959b749c07b254308a4d1a84686fd7c09fcb94aeae33cc5748aa07e5cb232,sha256:b79d3c4628a989cbb8bc6f0bf0940ff33a68da2dca9c1ffbf8cfb2a27ac8d133,sha256:1cbcc0411a84fbce85e7ee2956c8c1e67b8e0edc81746a33d9da48c852037c3e,sha256:07e89b796f91d37255c6eec926b066d6818f3f2edc344a584d1b9566f77e1c27,sha256:84ff92691f909a05b224e1c56abb4864f01b4f8e3c854e4bb4c7baf1d3f6d652,sha256:3ab72684daee4eea64c3ae78a43ea332b86358446b6f2904dca4b634712e1537" ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install -y --no-install-recommends \ curl \ zip \ unzip \ jq \ ca-certificates \ && rm -rf /var/lib/apt/lists/* ENV SDKMAN_DIR="/app/.sdkman" RUN curl -s "https://get.sdkman.io?rcupdate=false&ci=false" | bash ARG MATRIX_VERSION='{"11.0.27-tem": ["3.24.2", "3.20.0", "3.22.0", "3.13.6", "3.15.0", "3.18.0", "3.16.1", "3.23.2", "3.14.1", "3.14.4", "3.13.30"], "21.0.7-tem": ["3.24.2", "3.20.0", "3.22.0", "3.13.6", "3.15.0", "3.18.0", "3.16.1", "3.23.2", "3.14.1", "3.14.4", "3.13.30"]}' ENV DRIVERS_DIR="/opt/jdbc_drivers" RUN \ set -ex && \ mkdir -p ${DRIVERS_DIR} && \ \ for java_version_full in $(echo "${MATRIX_VERSION}" | jq -r 'keys_unsorted[]'); do \ bash -c "source ${SDKMAN_DIR}/bin/sdkman-init.sh && sdk install java ${java_version_full}"; \ done && \ \ for jdbc_version in $(echo "${MATRIX_VERSION}" | jq -r '.[][]' | sort -u); do \ DRIVER_URL="https://repo1.maven.org/maven2/net/snowflake/snowflake-jdbc/${jdbc_version}/snowflake-jdbc-${jdbc_version}.jar" && \ curl -fSL -o "${DRIVERS_DIR}/snowflake-jdbc-${jdbc_version}.jar.tmp" "${DRIVER_URL}" && \ mv "${DRIVERS_DIR}/snowflake-jdbc-${jdbc_version}.jar.tmp" "${DRIVERS_DIR}/snowflake-jdbc-${jdbc_version}.jar"; \ done WORKDIR /app # Copy the Prober.java source code into the correct package structure COPY /src/main/java/com/snowflake/client/jdbc/prober/Prober.java com/snowflake/client/jdbc/prober/Prober.java RUN \ set -ex && \ # Find the first available Java version to use as the compiler COMPILE_JAVA_VERSION=$(echo "${MATRIX_VERSION}" | jq -r 'keys_unsorted[0]') && \ \ # Select the first JDBC version from the first Java version's array COMPILE_JDBC_VERSION=$(echo "${MATRIX_VERSION}" | jq -r --arg jv "${COMPILE_JAVA_VERSION}" '.[$jv][0]') && \ COMPILE_JDBC_JAR_PATH="${DRIVERS_DIR}/snowflake-jdbc-${COMPILE_JDBC_VERSION}.jar" && \ \ COMPILE_COMMAND="source \"${SDKMAN_DIR}/bin/sdkman-init.sh\" >/dev/null 2>&1 && \ sdk use java \"${COMPILE_JAVA_VERSION}\" >/dev/null 2>&1 && \ cd \"${PROBER_APP_DIR}\" && \ javac -cp \".:${COMPILE_JDBC_JAR_PATH}\" com/snowflake/client/jdbc/prober/Prober.java" && \ \ bash -c "${COMPILE_COMMAND}" # Copy the entrypoint script into the image and make it executable COPY entrypoint.sh entrypoint.sh RUN chmod +x entrypoint.sh RUN chmod +x /app/.sdkman/bin/sdkman-init.sh ================================================ FILE: prober/Jenkinsfile.groovy ================================================ pipeline { agent { label 'regular-memory-node' } options { ansiColor('xterm') timestamps() } environment { VAULT_CREDENTIALS = credentials('vault-jenkins') COMMIT_SHA_SHORT = sh(script: 'cd JDBC/prober && git rev-parse --short HEAD', returnStdout: true).trim() IMAGE_NAME = 'snowdrivers/jdbc-driver-prober' TEAM_NAME = 'Snow Drivers' TEAM_JIRA_DL = 'triage-snow-drivers-warsaw-dl' TEAM_JIRA_AREA = 'Developer Platform' TEAM_JIRA_COMPONENT = 'JDBC Driver' } stages { stage('Build Image') { steps { dir('./JDBC/prober') { sh """ ls -l docker build \ -t ${IMAGE_NAME}:${COMMIT_SHA_SHORT} \ --label "org.opencontainers.image.revision=${COMMIT_SHA_SHORT}" \ -f ./Dockerfile . """ } } } stage('Checkout Jenkins Push Scripts') { steps { dir('k8sc-jenkins_scripts') { git branch: 'master', credentialsId: 'jenkins-snowflake-github-app-3', url: 'https://github.com/snowflakedb/k8sc-jenkins_scripts.git' } } } stage('Push Image') { steps { sh """ ./k8sc-jenkins_scripts/jenkins_push.sh \ -r "${VAULT_CREDENTIALS_USR}" \ -s "${VAULT_CREDENTIALS_PSW}" \ -i "${IMAGE_NAME}" \ -v "${COMMIT_SHA_SHORT}" \ -n "${TEAM_JIRA_DL}" \ -a "${TEAM_JIRA_AREA}" \ -C "${TEAM_JIRA_COMPONENT}" """ } } } post { always { cleanWs() } } } ================================================ FILE: prober/entrypoint.sh ================================================ #!/bin/bash # Exit immediately if a command exits with a non-zero status set -e JAVA_VERSION_ID="" JDBC_VERSION="" if [[ "$1" == "--java_version" && "$3" == "--driver_version" ]]; then JAVA_VERSION_ID="$2" JDBC_VERSION="$4" else echo "Error: Missing required arguments." echo "Usage: $0 --java_version --driver_version [prober_arguments...]" echo "Example: $0 --java_version 17.0.10-tem --driver_version 3.24.2 --host localhost --port 8080 --user admin --password secret" exit 1 fi PROBER_ARGS=("${@}") # Define key directories SDKMAN_DIR="/app/.sdkman" DRIVERS_DIR="/opt/jdbc_drivers" PROBER_APP_DIR="/app" source "${SDKMAN_DIR}/bin/sdkman-init.sh" >/dev/null 2>&1 sdk use java "${JAVA_VERSION_ID}" >/dev/null 2>&1 JDBC_DRIVER_JAR_PATH="${DRIVERS_DIR}/snowflake-jdbc-${JDBC_VERSION}.jar" CURRENT_CLASSPATH="${PROBER_APP_DIR}:${JDBC_DRIVER_JAR_PATH}" exec java -cp "${CURRENT_CLASSPATH}" \ --add-opens=java.base/java.nio=ALL-UNNAMED \ com.snowflake.client.jdbc.prober.Prober \ "${PROBER_ARGS[@]}" # This line will not be reached because 'exec' replaces the process. # It would only be reached if 'exec java' itself failed to start the Java process. echo "Error: Java application failed to start." exit 1 # Exit with an error if Java failed to start ================================================ FILE: prober/src/main/java/com/snowflake/client/jdbc/prober/Prober.java ================================================ package com.snowflake.client.jdbc.prober; import net.snowflake.client.api.connection.SnowflakeConnection; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.security.KeyFactory; import java.security.NoSuchAlgorithmException; import java.security.interfaces.RSAPrivateCrtKey; import java.security.spec.InvalidKeySpecException; import java.security.spec.PKCS8EncodedKeySpec; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Random; import java.util.StringJoiner; import java.util.logging.LogManager; import java.util.stream.Collectors; public class Prober { private static final String CHARACTERS = "abcdefghijklmnopqrstuvwxyz"; private static final Random random = new Random(); private static final String stageName = "test_stage_" + generateRandomString(10); private static final String stageFilePath = "test_file_" + generateRandomString(10) + ".txt"; private static final String tableName = "test_table" + generateRandomString(10); private static String javaVersion; private static String driverVersion; enum Status { SUCCESS(0), FAILURE(1); private final int code; Status(int code) { this.code = code; } public int getCode() { return code; } } enum Scope { LOGIN, PUT_FETCH_GET, PUT_FETCH_GET_FAIL_CLOSED } public static void main(String[] args) throws Exception { Map arguments = parseArguments(args); String url = "jdbc:snowflake://" + arguments.get("host"); Properties props = new Properties(); for (Map.Entry entry : arguments.entrySet()) { props.setProperty(entry.getKey(), entry.getValue()); } setPrivateKey(props); setupLogging(props); javaVersion = props.getProperty("java_version"); driverVersion = props.getProperty("driver_version"); if (Scope.LOGIN.name().toLowerCase().equals(props.getProperty("scope"))) { testLogin(url, props); } if (Scope.PUT_FETCH_GET.name().toLowerCase().equals(props.getProperty("scope"))) { testPutFetchGet(url, props); } if (Scope.PUT_FETCH_GET_FAIL_CLOSED.name().toLowerCase().equals(props.getProperty("scope"))) { testPutFetchGetFailClosed(url, props); } } private static void testLogin(String url, Properties properties) { boolean success; try (Connection connection = DriverManager.getConnection(url, properties); Statement statement = connection.createStatement(); ResultSet resultSet = statement.executeQuery("select 1")) { resultSet.next(); int result = resultSet.getInt(1); success = result == 1; } catch (SQLException e) { success = false; System.err.println(e.getMessage()); logMetric("cloudprober_driver_java_perform_login", Status.FAILURE); System.exit(1); } logMetric("cloudprober_driver_java_perform_login", success ? Status.SUCCESS : Status.FAILURE); } private static void testPutFetchGet(String url, Properties properties) { try (Connection connection = DriverManager.getConnection(url, properties); Statement statement = connection.createStatement()) { SnowflakeConnection sfConnection = connection.unwrap(SnowflakeConnection.class); List csv = generateCsv(100); String csvFile = csv.stream().collect(Collectors.joining(System.lineSeparator())); createWarehouse(statement, properties, "cloudprober_driver_java_create_warehouse"); createDatabase(statement, properties, "cloudprober_driver_java_create_database"); createSchema(statement, properties, "cloudprober_driver_java_create_schema"); createDataTable(statement, "cloudprober_driver_java_create_table"); createDataStage(statement, "cloudprober_driver_java_create_stage"); uploadFile(sfConnection, csvFile, "cloudprober_driver_java_perform_put"); loadFileIntoTable(statement, "cloudprober_driver_java_copy_data_from_stage_into_table"); fetchAndVerifyRows(statement, "cloudprober_driver_java_data_transferred_completely"); downloadFile(sfConnection, "cloudprober_driver_java_perform_get"); compareFetchedDataAndFile(statement, csv, "cloudprober_driver_java_data_integrity"); cleanupResources(statement, "cloudprober_driver_java_cleanup_resources"); csv.clear(); csvFile = null; } catch (SQLException e) { System.err.println(e.getMessage()); System.exit(1); } finally { System.gc(); } } private static void testPutFetchGetFailClosed(String url, Properties properties) { Properties failClosedProperties = new Properties(); failClosedProperties.putAll(properties); failClosedProperties.put("ocspFailOpen", "false"); try (Connection connection = DriverManager.getConnection(url, failClosedProperties); Statement statement = connection.createStatement()) { SnowflakeConnection sfConnection = connection.unwrap(SnowflakeConnection.class); List csv = generateCsv(100); String csvFile = csv.stream().collect(Collectors.joining(System.lineSeparator())); createWarehouse(statement, failClosedProperties, "cloudprober_driver_java_create_warehouse_fail_closed"); createDatabase(statement, failClosedProperties, "cloudprober_driver_java_create_database_fail_closed"); createSchema(statement, failClosedProperties, "cloudprober_driver_java_create_schema_fail_closed"); createDataTable(statement, "cloudprober_driver_java_create_table_fail_closed"); createDataStage(statement, "cloudprober_driver_java_create_stage_fail_closed"); uploadFile(sfConnection, csvFile, "cloudprober_driver_java_perform_put_fail_closed"); loadFileIntoTable(statement, "cloudprober_driver_java_copy_data_from_stage_into_table_fail_closed"); fetchAndVerifyRows(statement, "cloudprober_driver_java_data_transferred_completely_fail_closed"); downloadFile(sfConnection, "cloudprober_driver_java_perform_get_fail_closed"); compareFetchedDataAndFile(statement, csv, "cloudprober_driver_java_data_integrity_fail_closed"); cleanupResources(statement, "cloudprober_driver_java_cleanup_resources_fail_closed"); csv.clear(); csvFile = null; } catch (SQLException e) { System.err.println(e.getMessage()); System.exit(1); } finally { System.gc(); } } private static void createDatabase(Statement statement, Properties properties, String metricName) throws SQLException { try { String databaseName = properties.getProperty("database", "test_db"); try (ResultSet rs1 = statement.executeQuery("CREATE DATABASE IF NOT EXISTS " + databaseName); ResultSet rs2 = statement.executeQuery("USE database " + databaseName)) { } logMetric(metricName, Status.SUCCESS); } catch (SQLException e) { System.err.println("Error creating database: " + e.getMessage()); logMetric(metricName, Status.FAILURE); System.exit(1); } } private static void createSchema(Statement statement, Properties properties, String metricName) throws SQLException { try { String schemaName = properties.getProperty("schema", "test_schema"); try (ResultSet rs1 = statement.executeQuery("CREATE SCHEMA IF NOT EXISTS " + schemaName); ResultSet rs2 = statement.executeQuery("USE SCHEMA " + schemaName)) { } logMetric(metricName, Status.SUCCESS); } catch (SQLException e) { System.err.println("Error creating schema: " + e.getMessage()); logMetric(metricName, Status.FAILURE); System.exit(1); } } private static void createWarehouse(Statement statement, Properties properties, String metricName) throws SQLException { try { String warehouseName = properties.getProperty("warehouse", "test_wh"); try (ResultSet rs1 = statement.executeQuery("CREATE WAREHOUSE IF NOT EXISTS " + warehouseName + " WAREHOUSE_SIZE='X-SMALL';"); ResultSet rs2 = statement.executeQuery("USE WAREHOUSE " + warehouseName)) { } logMetric(metricName, Status.SUCCESS); } catch (SQLException e) { System.err.println("Error creating warehouse: " + e.getMessage()); logMetric(metricName, Status.FAILURE); System.exit(1); } } private static void cleanupResources(Statement statement, String metricName) { try { try (ResultSet rs1 = statement.executeQuery("REMOVE @" + stageName); ResultSet rs2 = statement.executeQuery("DROP TABLE IF EXISTS " + tableName)) { } logMetric(metricName, Status.SUCCESS); } catch (SQLException e) { System.err.println("Error during cleanup: " + e.getMessage()); logMetric(metricName, Status.FAILURE); System.exit(1); } } private static void compareFetchedDataAndFile(Statement statement, List csv, String metricName) throws SQLException { try (ResultSet resultSet = statement.executeQuery("select id,name,email from " + tableName + " order by id")) { for (int i = 1; i < csv.size(); i++) { String csvRow = csv.get(i); String[] csvValues = csvRow.split(",", 3); int listId = Integer.parseInt(csvValues[0]); String listName = csvValues[1]; String listEmail = csvValues[2]; if (!resultSet.next()) { logMetric(metricName, Status.FAILURE); return; } int dbId = resultSet.getInt(1); String dbName = resultSet.getString(2); String dbEmail = resultSet.getString(3); boolean idMatch = (dbId == listId); boolean nameMatch = dbName.equals(listName); boolean emailMatch = dbEmail.equals(listEmail); if (!(idMatch && nameMatch && emailMatch)) { logMetric(metricName, Status.FAILURE); return; } } logMetric(metricName, Status.SUCCESS); } } private static String downloadFile(SnowflakeConnection sfConnection, String metricName) throws SQLException { try (InputStream downloadStream = sfConnection.downloadStream("@" + stageName, stageFilePath, false); BufferedReader reader = new BufferedReader(new InputStreamReader(downloadStream, StandardCharsets.UTF_8))) { List lines = reader.lines().collect(Collectors.toList()); if (lines.size() == 101) { logMetric(metricName, Status.SUCCESS); } else { logMetric(metricName, Status.FAILURE); } return lines.stream().collect(Collectors.joining(System.lineSeparator())); } catch (IOException e) { logMetric(metricName, Status.FAILURE); throw new SQLException("Error downloading file", e); } } private static void fetchAndVerifyRows(Statement statement, String metricName) throws SQLException { try (ResultSet resultSet = statement.executeQuery("select count(*) from " + tableName)) { if (resultSet.next()) { int rowCount = resultSet.getInt(1); boolean success = rowCount == 100; logMetric(metricName, success ? Status.SUCCESS : Status.FAILURE); } else { logMetric(metricName, Status.FAILURE); } } } private static void loadFileIntoTable(Statement statement, String metricName) throws SQLException { try { try (ResultSet rs = statement.executeQuery("copy into " + tableName + " from @" + stageName + "/" + stageFilePath + " FILE_FORMAT = (TYPE = CSV FIELD_OPTIONALLY_ENCLOSED_BY = '\"' SKIP_HEADER = 1);")) { // ResultSet automatically closed by try-with-resources } logMetric(metricName, Status.SUCCESS); } catch (SQLException e) { System.err.println("Error during copy into table: " + e.getMessage()); logMetric(metricName, Status.FAILURE); System.exit(1); } } private static void uploadFile(SnowflakeConnection sfConnection, String fileContent, String metricName) throws SQLException { try { sfConnection.uploadStream("@" + stageName, "", new ByteArrayInputStream(fileContent.getBytes()), stageFilePath, false); logMetric(metricName, Status.SUCCESS); } catch (SQLException e) { System.err.println("Error during file upload: " + e.getMessage()); logMetric(metricName, Status.FAILURE); System.exit(1); } } private static void createDataTable(Statement statement, String metricName) throws SQLException { try { try (ResultSet resultSet = statement.executeQuery("CREATE OR REPLACE TABLE " + tableName + " (id int, name text, email text)")) { if (resultSet.next()) { boolean result = resultSet.getString(1).equals("Table " + tableName.toUpperCase() + " successfully created."); logMetric(metricName, result ? Status.SUCCESS : Status.FAILURE); } else { logMetric(metricName, Status.FAILURE); } } } catch (SQLException e) { System.err.println(e.getMessage()); logMetric(metricName, Status.FAILURE); System.exit(1); } } private static void createDataStage(Statement statement, String metricName) throws SQLException { try { try (ResultSet createStageResult = statement.executeQuery("CREATE OR REPLACE STAGE " + stageName)) { if (createStageResult.next()) { boolean result = createStageResult.getString(1).equals("Stage area " + stageName.toUpperCase() + " successfully created."); logMetric(metricName, result ? Status.SUCCESS : Status.FAILURE); } else { logMetric(metricName, Status.FAILURE); } } } catch (SQLException e) { System.err.println(e.getMessage()); logMetric(metricName, Status.FAILURE); System.exit(1); } } private static void setupLogging(Properties properties) throws IOException { String loggingPropertiesString = "handlers=java.util.logging.ConsoleHandler\n.level=" + properties.getProperty("log_level"); properties.put("JAVA_LOGGING_CONSOLE_STD_OUT", "false"); try (InputStream propertiesStream = new ByteArrayInputStream( loggingPropertiesString.getBytes(StandardCharsets.UTF_8) )) { LogManager.getLogManager().readConfiguration(propertiesStream); } } private static void logMetric(String metricName, Status status) { System.out.println(metricName + "{java_version=\"" + javaVersion + "\", driver_version=\"" + driverVersion + "\"} " + status.getCode()); } private static Map parseArguments(String[] args) { Map parsedArgs = new HashMap<>(); for (int i = 0; i < args.length; i++) { String currentArg = args[i]; if (currentArg.startsWith("--")) { String key = currentArg.substring(2); // Remove "--" // Check if there is a next argument to be the value if (i + 1 < args.length) { String nextArg = args[i + 1]; // Ensure the next argument is not another key if (!nextArg.startsWith("--")) { parsedArgs.put(key, nextArg); i++; // Increment i to skip the value argument in the next iteration } } } } return parsedArgs; } private static List generateCsv(int numRows) { String[] headers = {"ID", "Name", "Email"}; List csvRows = new ArrayList<>(); csvRows.add(String.join(",", headers)); for (int i = 1; i <= numRows; i++) { String firstName = generateRandomString(4 + random.nextInt(5)); String lastName = generateRandomString(5 + random.nextInt(6)); String fullName = firstName + " " + lastName; String email = (firstName + "." + lastName + "@example.com").toLowerCase(); StringJoiner rowJoiner = new StringJoiner(","); rowJoiner.add(String.valueOf(i)); rowJoiner.add(fullName); rowJoiner.add(email); csvRows.add(rowJoiner.toString()); } return csvRows; } private static String generateRandomString(int length) { if (length <= 0) { return ""; } StringBuilder builder = new StringBuilder(length); for (int i = 0; i < length; i++) { builder.append(CHARACTERS.charAt(random.nextInt(CHARACTERS.length()))); } builder.setCharAt(0, Character.toUpperCase(builder.charAt(0))); return builder.toString(); } private static void setPrivateKey(Properties props) throws IOException, NoSuchAlgorithmException, InvalidKeySpecException { String keyStr = new String(Files.readAllBytes(Paths.get(props.getProperty("private_key_file"))), StandardCharsets.UTF_8).trim(); byte[] keyBytes = Base64.getUrlDecoder().decode(keyStr); // Convert the DER bytes to a private key object PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(keyBytes); KeyFactory keyFactory = KeyFactory.getInstance("RSA"); RSAPrivateCrtKey privateKey = (RSAPrivateCrtKey) keyFactory.generatePrivate(keySpec); props.put("privateKey", privateKey); // Remove the path from properties so the driver does not try to read it props.remove("private_key_file"); } } ================================================ FILE: public_pom.xml ================================================ 4.0.0 net.snowflake snowflake-jdbc 1.0-SNAPSHOT jar Snowflake JDBC Driver Snowflake JDBC Driver https://www.snowflake.net/ The Apache Software License, Version 2.0 http://www.apache.org/licenses/LICENSE-2.0.txt Snowflake Support Team snowflake-java@snowflake.net Snowflake Computing https://www.snowflake.net scm:git:git://github.com/snowflakedb/snowflake-jdbc http://github.com/snowflakedb/snowflake-jdbc/tree/master 5.13.0 net.java.dev.jna jna ${jna.version} true net.java.dev.jna jna-platform ${jna.version} true ================================================ FILE: settings.json ================================================ ================================================ FILE: src/main/java/net/snowflake/client/api/auth/AuthenticatorType.java ================================================ package net.snowflake.client.api.auth; /** * Enumeration of authentication methods supported by the Snowflake JDBC driver. * *

This enum defines the various authentication mechanisms that can be used to establish a * connection to Snowflake. The authenticator type is specified via the connection property {@code * authenticator}. * *

Usage Example

* *
{@code
 * Properties props = new Properties();
 * props.put("user", "myuser");
 * props.put("authenticator", "EXTERNALBROWSER");
 * Connection conn = DriverManager.getConnection(url, props);
 * }
*/ public enum AuthenticatorType { /** Regular login with username and password via Snowflake, may or may not have MFA */ SNOWFLAKE, /** Federated authentication with OKTA as identity provider */ OKTA, /** Web-browser-based authenticator for SAML 2.0 compliant service/application */ EXTERNALBROWSER, /** OAuth 2.0 authentication flow */ OAUTH, /** Snowflake JWT token authentication using a private key */ SNOWFLAKE_JWT, /** Internal authenticator to enable id_token for web browser based authentication */ ID_TOKEN, /** Authenticator to enable token for regular login with MFA */ USERNAME_PASSWORD_MFA, /** OAuth authorization code flow with browser popup */ OAUTH_AUTHORIZATION_CODE, /** OAuth client credentials flow with clientId and clientSecret */ OAUTH_CLIENT_CREDENTIALS, /** Programmatic Access Token (PAT) authentication created in Snowflake */ PROGRAMMATIC_ACCESS_TOKEN, /** * Workload identity authentication using existing AWS/GCP/Azure/OIDC workload identity * credentials */ WORKLOAD_IDENTITY } ================================================ FILE: src/main/java/net/snowflake/client/api/connection/DownloadStreamConfig.java ================================================ package net.snowflake.client.api.connection; /** * Optional configuration for downloading files from a Snowflake stage as a stream. * *

This class provides optional configuration for the {@link * SnowflakeConnection#downloadStream(String, String, DownloadStreamConfig)} method. Required * parameters (stageName, sourceFileName) are passed as method arguments, while optional settings * are configured here. * *

Example usage: * *

{@code
 * DownloadStreamConfig config = DownloadStreamConfig.builder()
 *     .setDecompress(true)
 *     .build();
 *
 * try (InputStream stream = connection.downloadStream("@my_stage", "data/file.csv.gz", config)) {
 *   // Process the stream
 * }
 * }
* * @see SnowflakeConnection#downloadStream(String, String, DownloadStreamConfig) */ public class DownloadStreamConfig { private final boolean decompress; /** * Private constructor. Use {@link Builder} to create instances. * * @param builder the builder instance */ private DownloadStreamConfig(Builder builder) { this.decompress = builder.decompress; } /** * Whether to decompress the file during download. * * @return true if the file should be decompressed, false otherwise */ public boolean isDecompress() { return decompress; } /** * Creates a new builder instance. * * @return a new {@link Builder} */ public static Builder builder() { return new Builder(); } /** * Builder for creating {@link DownloadStreamConfig} instances. * *

This builder provides a fluent API for configuring optional download stream settings. All * setter methods return the builder instance for method chaining. * *

Example: * *

{@code
   * DownloadStreamConfig config = DownloadStreamConfig.builder()
   *     .setDecompress(true)
   *     .build();
   * }
*/ public static class Builder { private boolean decompress = false; /** Private constructor. Use {@link DownloadStreamConfig#builder()} instead. */ private Builder() {} /** * Sets whether to automatically decompress the file during download. * *

If set to {@code true}, the driver will automatically decompress files with recognized * compression extensions (e.g., .gz, .bz2, .zip) during download. The returned stream will * contain the decompressed data. * *

If set to {@code false}, the file is downloaded as-is without decompression. * * @param decompress true to decompress the file, false to download as-is * @return this builder instance */ public Builder setDecompress(boolean decompress) { this.decompress = decompress; return this; } /** * Builds the {@link DownloadStreamConfig} instance. * * @return a new {@link DownloadStreamConfig} instance */ public DownloadStreamConfig build() { return new DownloadStreamConfig(this); } } @Override public String toString() { return "DownloadStreamConfig{" + "decompress=" + decompress + '}'; } } ================================================ FILE: src/main/java/net/snowflake/client/api/connection/SnowflakeConnection.java ================================================ package net.snowflake.client.api.connection; import java.io.InputStream; import java.sql.ResultSet; import java.sql.SQLException; import net.snowflake.client.api.resultset.QueryStatus; /** This interface defines Snowflake specific APIs for Connection */ public interface SnowflakeConnection { /** * Upload data from a stream to a Snowflake stage with required parameters only. * *

This is a convenience method that uses default options (compress data, no destination * prefix). For advanced configuration, use {@link #uploadStream(String, String, InputStream, * UploadStreamConfig)}. * *

The caller is responsible for closing the input stream after upload completes. * * @param stageName the name of the stage (e.g., "@my_stage") * @param destFileName the destination file name on the stage * @param inputStream the input stream containing data to upload * @throws SQLException if upload fails */ void uploadStream(String stageName, String destFileName, InputStream inputStream) throws SQLException; /** * Upload data from a stream to a Snowflake stage with optional configuration. * *

This method allows customization of upload behavior via {@link UploadStreamConfig}, such as * setting a destination prefix or controlling compression. * *

The caller is responsible for closing the input stream after upload completes. * * @param stageName the name of the stage (e.g., "@my_stage") * @param destFileName the destination file name on the stage * @param inputStream the input stream containing data to upload * @param config optional configuration for upload behavior * @throws SQLException if upload fails */ void uploadStream( String stageName, String destFileName, InputStream inputStream, UploadStreamConfig config) throws SQLException; /** * Download a file from a Snowflake stage as a stream with required parameters only. * *

This is a convenience method that uses default options (no decompression). For advanced * configuration, use {@link #downloadStream(String, String, DownloadStreamConfig)}. * *

The caller is responsible for closing the returned input stream. * * @param stageName the name of the stage (e.g., "@my_stage") * @param sourceFileName the path to the file within the stage * @return an input stream containing the file data * @throws SQLException if download fails */ InputStream downloadStream(String stageName, String sourceFileName) throws SQLException; /** * Download a file from a Snowflake stage as a stream with optional configuration. * *

This method allows customization of download behavior via {@link DownloadStreamConfig}, such * as automatic decompression. * *

The caller is responsible for closing the returned input stream. * * @param stageName the name of the stage (e.g., "@my_stage") * @param sourceFileName the path to the file within the stage * @param config optional configuration for download behavior * @return an input stream containing the file data * @throws SQLException if download fails */ InputStream downloadStream(String stageName, String sourceFileName, DownloadStreamConfig config) throws SQLException; /** * Return unique session ID from current session generated by making connection * * @return a unique alphanumeric value representing current session ID * @throws SQLException if an error occurs */ String getSessionID() throws SQLException; /** * Return the status of a query. * * @param queryID the query ID. * @return the status of the query. * @throws SQLException if an error occurs. */ QueryStatus getQueryStatus(String queryID) throws SQLException; /** * Create a new instance of a ResultSet object based off query ID. ResultSet will contain results * of corresponding query. Used when original ResultSet object is no longer available, such as * when original connection has been closed. * * @param queryID the query ID * @return ResultSet based off the query ID * @throws SQLException if an error occurs */ ResultSet createResultSet(String queryID) throws SQLException; /** * Return an array of child query IDs for the given query ID. * *

If the given query ID is for a multiple statements query, it returns an array of its child * statements, otherwise, it returns an array to include the given query ID. * * @param queryID The given query ID * @return An array of child query IDs * @throws SQLException If the query is running or the corresponding query is FAILED. */ String[] getChildQueryIds(String queryID) throws SQLException; /** * Get the major version of the Snowflake database. * * @return database major version * @throws SQLException if an error occurs */ int getDatabaseMajorVersion() throws SQLException; /** * Get the minor version of the Snowflake database. * * @return database minor version * @throws SQLException if an error occurs */ int getDatabaseMinorVersion() throws SQLException; /** * Get the full version string of the Snowflake database. * * @return database version string * @throws SQLException if an error occurs */ String getDatabaseVersion() throws SQLException; /** * Get the current role for the session. * * @return the current role name, or null if no role is set * @throws SQLException if the connection is closed or an error occurs */ String getRole() throws SQLException; /** * Get the current warehouse for the session. * * @return the current warehouse name, or null if no warehouse is set * @throws SQLException if the connection is closed or an error occurs */ String getWarehouse() throws SQLException; /** * Get the current database for the session. * *

This is equivalent to {@link java.sql.Connection#getCatalog()} but uses Snowflake-specific * terminology for discoverability. * * @return the current database name, or null if no database is set * @throws SQLException if the connection is closed or an error occurs */ String getDatabase() throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/api/connection/SnowflakeDatabaseMetaData.java ================================================ package net.snowflake.client.api.connection; import java.sql.DatabaseMetaData; import java.sql.ResultSet; import java.sql.SQLException; /** * Snowflake-specific extension of {@link DatabaseMetaData}. * *

This interface extends the standard JDBC DatabaseMetaData interface with Snowflake-specific * metadata operations. */ public interface SnowflakeDatabaseMetaData extends DatabaseMetaData { /** * Retrieves a description of the streams available in the given catalog. * *

This is a Snowflake-specific extension for retrieving information about Snowflake streams. * * @param catalog a catalog name; must match the catalog name as it is stored in the database; "" * retrieves those without a catalog; null means that the catalog name should not be used to * narrow the search * @param schemaPattern a schema name pattern; must match the schema name as it is stored in the * database; "" retrieves those without a schema; null means that the schema name should not * be used to narrow the search * @param streamName a stream name pattern; must match the stream name as it is stored in the * database * @return a ResultSet object in which each row is a stream description * @throws SQLException if a database access error occurs */ ResultSet getStreams(String catalog, String schemaPattern, String streamName) throws SQLException; /** * Retrieves a description of the table columns available in the specified catalog with extended * metadata. * *

This is a Snowflake-specific overload of {@link DatabaseMetaData#getColumns} that allows * retrieving extended column metadata. * * @param catalog a catalog name; must match the catalog name as it is stored in the database; "" * retrieves those without a catalog; null means that the catalog name should not be used to * narrow the search * @param schemaPattern a schema name pattern; must match the schema name as it is stored in the * database; "" retrieves those without a schema; null means that the schema name should not * be used to narrow the search * @param tableNamePattern a table name pattern; must match the table name as it is stored in the * database * @param columnNamePattern a column name pattern; must match the column name as it is stored in * the database * @param extendedSet if true, returns extended metadata including base type information * @return ResultSet - each row is a column description * @throws SQLException if a database access error occurs */ ResultSet getColumns( String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern, boolean extendedSet) throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/api/connection/UploadStreamConfig.java ================================================ package net.snowflake.client.api.connection; /** * Optional configuration for uploading data to a Snowflake stage from a stream. * *

This class provides optional configuration for the {@link * SnowflakeConnection#uploadStream(String, String, java.io.InputStream, UploadStreamConfig)} * method. Required parameters (stageName, destFileName, inputStream) are passed as method * arguments, while optional settings are configured here. * *

Example usage: * *

{@code
 * try (InputStream dataStream = new FileInputStream("data.csv")) {
 *   UploadStreamConfig config = UploadStreamConfig.builder()
 *       .setDestPrefix("data/2024")
 *       .setCompressData(true)
 *       .build();
 *
 *   connection.uploadStream("@my_stage", "uploaded_data.csv", dataStream, config);
 * }
 * }
* * @see SnowflakeConnection#uploadStream(String, String, java.io.InputStream, UploadStreamConfig) */ public class UploadStreamConfig { private final String destPrefix; private final boolean compressData; /** * Private constructor. Use {@link Builder} to create instances. * * @param builder the builder instance */ private UploadStreamConfig(Builder builder) { this.destPrefix = builder.destPrefix; this.compressData = builder.compressData; } /** * Gets the destination prefix (directory path within the stage). * * @return the destination prefix, or null if files should be uploaded to stage root */ public String getDestPrefix() { return destPrefix; } /** * Whether to compress the data during upload. * * @return true if data should be compressed, false otherwise */ public boolean isCompressData() { return compressData; } /** * Creates a new builder instance. * * @return a new {@link Builder} */ public static Builder builder() { return new Builder(); } /** * Builder for creating {@link UploadStreamConfig} instances. * *

This builder provides a fluent API for configuring optional upload stream settings. All * setter methods return the builder instance for method chaining. * *

Example: * *

{@code
   * UploadStreamConfig config = UploadStreamConfig.builder()
   *     .setDestPrefix("data/2024")
   *     .setCompressData(true)
   *     .build();
   * }
*/ public static class Builder { private String destPrefix; private boolean compressData = true; /** Private constructor. Use {@link UploadStreamConfig#builder()} instead. */ private Builder() {} /** * Sets the destination prefix (directory path) within the stage. * *

This is optional. If not set, files will be uploaded to the root of the stage. Use forward * slashes to separate directory levels. * *

Examples: * *

    *
  • {@code "data"} - upload to data directory *
  • {@code "data/2024/01"} - upload to nested directories *
  • {@code null} or empty - upload to stage root (default) *
* * @param destPrefix the destination prefix/directory path (can be null or empty for stage root) * @return this builder instance */ public Builder setDestPrefix(String destPrefix) { this.destPrefix = (destPrefix == null || destPrefix.trim().isEmpty()) ? null : destPrefix.trim(); return this; } /** * Sets whether to automatically compress the data during upload. * *

If set to {@code true} (default), the driver will compress the data using gzip compression * before uploading. This reduces upload time and storage costs. The file will be stored with a * .gz extension appended to the destination file name. * *

If set to {@code false}, the data is uploaded as-is without compression. * * @param compressData true to compress the data (default), false to upload uncompressed * @return this builder instance */ public Builder setCompressData(boolean compressData) { this.compressData = compressData; return this; } /** * Builds the {@link UploadStreamConfig} instance. * * @return a new {@link UploadStreamConfig} instance */ public UploadStreamConfig build() { return new UploadStreamConfig(this); } } @Override public String toString() { return "UploadStreamConfig{" + "destPrefix='" + destPrefix + '\'' + ", compressData=" + compressData + '}'; } } ================================================ FILE: src/main/java/net/snowflake/client/api/datasource/SnowflakeDataSource.java ================================================ package net.snowflake.client.api.datasource; import java.security.PrivateKey; import java.util.List; import java.util.Properties; import javax.sql.DataSource; import net.snowflake.client.api.http.HttpHeadersCustomizer; /** * Snowflake-specific extension of {@link javax.sql.DataSource} that provides configuration methods * for Snowflake JDBC connections. * *

Use {@link SnowflakeDataSourceFactory} to create instances of this interface. * *

Example usage: * *

{@code
 * SnowflakeDataSource ds = SnowflakeDataSourceFactory.createDataSource();
 * ds.setAccount("myaccount");
 * ds.setUser("myuser");
 * ds.setPassword("mypassword");
 * ds.setDatabase("mydb");
 * ds.setSchema("myschema");
 * ds.setWarehouse("mywh");
 *
 * try (Connection conn = ds.getConnection()) {
 *   // use connection
 * }
 * }
* * @see SnowflakeDataSourceFactory */ public interface SnowflakeDataSource extends DataSource { /** Sets the JDBC URL for the connection. */ void setUrl(String url); /** Sets the database name. */ void setDatabaseName(String databaseName); /** Sets the schema name. */ void setSchema(String schema); /** Sets the warehouse name. */ void setWarehouse(String warehouse); /** Sets the role name. */ void setRole(String role); /** Sets the user name. */ void setUser(String user); /** Sets the server name (hostname). */ void setServerName(String serverName); /** Sets the password. */ void setPassword(String password); /** Sets the port number. */ void setPortNumber(int portNumber); /** Sets the account identifier. */ void setAccount(String account); /** Sets whether to use SSL (default: true). */ void setSsl(boolean ssl); /** Sets the authenticator type (e.g., "snowflake", "externalbrowser", "oauth"). */ void setAuthenticator(String authenticator); /** Sets the token for OAuth/PAT authentication. */ void setToken(String token); /** Gets the JDBC URL. */ String getUrl(); /** Sets the private key for key-pair authentication. */ void setPrivateKey(PrivateKey privateKey); /** Sets the private key file location and optional password for key-pair authentication. */ void setPrivateKeyFile(String location, String password); /** Sets the Base64-encoded private key and optional password for key-pair authentication. */ void setPrivateKeyBase64(String privateKeyBase64, String password); /** Sets the tracing level. */ void setTracing(String tracing); /** Gets the connection properties. */ Properties getProperties(); /** Sets whether to allow underscores in hostnames. */ void setAllowUnderscoresInHost(boolean allowUnderscoresInHost); /** Sets whether to disable GCS default credentials. */ void setDisableGcsDefaultCredentials(boolean isGcsDefaultCredentialsDisabled); /** Sets whether to disable SAML URL validation. */ void setDisableSamlURLCheck(boolean disableSamlURLCheck); /** Sets the passcode for MFA authentication. */ void setPasscode(String passcode); /** Sets whether the passcode is included in the password for MFA authentication. */ void setPasscodeInPassword(boolean isPasscodeInPassword); /** Sets whether to disable SOCKS proxy. */ void setDisableSocksProxy(boolean ignoreJvmSocksProxy); /** Sets non-proxy hosts pattern. */ void setNonProxyHosts(String nonProxyHosts); /** Sets the proxy host. */ void setProxyHost(String proxyHost); /** Sets the proxy password. */ void setProxyPassword(String proxyPassword); /** Sets the proxy port. */ void setProxyPort(int proxyPort); /** Sets the proxy protocol (e.g., "http", "https"). */ void setProxyProtocol(String proxyProtocol); /** Sets the proxy user. */ void setProxyUser(String proxyUser); /** Sets whether to use a proxy. */ void setUseProxy(boolean useProxy); /** Sets the network timeout in seconds. */ void setNetworkTimeout(int networkTimeoutSeconds); /** Sets the query timeout in seconds. */ void setQueryTimeout(int queryTimeoutSeconds); /** Sets the application name. */ void setApplication(String application); /** Sets the client configuration file path. */ void setClientConfigFile(String clientConfigFile); /** Sets whether to enable pattern search in metadata queries. */ void setEnablePatternSearch(boolean enablePatternSearch); /** Sets whether to enable PUT/GET commands. */ void setEnablePutGet(boolean enablePutGet); /** Sets whether to treat Arrow DECIMAL columns as INT. */ void setArrowTreatDecimalAsInt(boolean treatDecimalAsInt); /** Sets the maximum number of HTTP retries. */ void setMaxHttpRetries(int maxHttpRetries); /** Sets whether OCSP checking should fail open. */ void setOcspFailOpen(boolean ocspFailOpen); /** Sets the maximum number of PUT/GET retries. */ void setPutGetMaxRetries(int putGetMaxRetries); /** Sets whether strings are quoted in column definitions. */ void setStringsQuotedForColumnDef(boolean stringsQuotedForColumnDef); /** Sets whether to enable diagnostics. */ void setEnableDiagnostics(boolean enableDiagnostics); /** Sets the diagnostics allowlist file path. */ void setDiagnosticsAllowlistFile(String diagnosticsAllowlistFile); /** Sets the default date format with timezone for JDBC. */ void setJDBCDefaultFormatDateWithTimezone(Boolean jdbcDefaultFormatDateWithTimezone); /** Sets whether getDate should use null timezone. */ void setGetDateUseNullTimezone(Boolean getDateUseNullTimezone); /** Sets whether to enable client-side MFA token request. */ void setEnableClientRequestMfaToken(boolean enableClientRequestMfaToken); /** Sets whether to enable client-side storage of temporary credentials. */ void setEnableClientStoreTemporaryCredential(boolean enableClientStoreTemporaryCredential); /** Sets the browser response timeout in seconds for external browser authentication. */ void setBrowserResponseTimeout(int seconds); /** Sets custom HTTP header customizers. */ void setHttpHeadersCustomizers(List httpHeadersCustomizers); } ================================================ FILE: src/main/java/net/snowflake/client/api/datasource/SnowflakeDataSourceFactory.java ================================================ package net.snowflake.client.api.datasource; import net.snowflake.client.internal.api.implementation.datasource.SnowflakeBasicDataSource; /** * Factory for creating {@link SnowflakeDataSource} instances. * *

This factory provides methods to create different types of Snowflake DataSource * implementations. Use this factory instead of directly instantiating DataSource classes. * *

Example usage: * *

{@code
 * SnowflakeDataSource ds = SnowflakeDataSourceFactory.createDataSource();
 * ds.setAccount("myaccount");
 * ds.setUser("myuser");
 * ds.setPassword("mypassword");
 * ds.setDatabase("mydb");
 * ds.setSchema("myschema");
 * ds.setWarehouse("mywh");
 *
 * try (Connection conn = ds.getConnection()) {
 *   // use connection
 * }
 * }
* * @see SnowflakeDataSource */ public final class SnowflakeDataSourceFactory { private SnowflakeDataSourceFactory() { throw new AssertionError("SnowflakeDataSourceFactory cannot be instantiated"); } /** * Creates a new non-pooled Snowflake DataSource. * *

This DataSource creates a new physical connection for each {@link * javax.sql.DataSource#getConnection()} call. For applications that require connection pooling, * consider using an external connection pool manager (e.g., HikariCP, Apache DBCP) with this * DataSource. * * @return a new {@link SnowflakeDataSource} instance */ public static SnowflakeDataSource createDataSource() { return new SnowflakeBasicDataSource(); } } ================================================ FILE: src/main/java/net/snowflake/client/api/driver/SnowflakeDriver.java ================================================ package net.snowflake.client.api.driver; import java.sql.Connection; import java.sql.Driver; import java.sql.DriverManager; import java.sql.DriverPropertyInfo; import java.sql.SQLException; import java.util.List; import java.util.Properties; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.driver.AutoConfigurationHelper; import net.snowflake.client.internal.driver.ConnectionFactory; import net.snowflake.client.internal.driver.DriverInitializer; import net.snowflake.client.internal.driver.DriverVersion; import net.snowflake.client.internal.jdbc.SnowflakeConnectString; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.ResourceBundleManager; /** * JDBC Driver implementation for Snowflake. * *

To use this driver, specify the following URL formats: * *

    *
  • {@code jdbc:snowflake://host:port} - Standard connection *
  • {@code jdbc:snowflake:auto} - Auto-configuration from connections.toml *
* *

The driver is automatically registered with {@link DriverManager} when loaded. * * @see java.sql.Driver */ public class SnowflakeDriver implements Driver { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeDriver.class); public static final SnowflakeDriver INSTANCE; public static final Properties EMPTY_PROPERTIES = new Properties(); private static final DriverVersion VERSION = DriverVersion.getInstance(); static final ResourceBundleManager versionResourceBundleManager = ResourceBundleManager.getSingleton("net.snowflake.client.jdbc.version"); static { try { DriverManager.registerDriver(INSTANCE = new SnowflakeDriver()); logger.debug("Snowflake JDBC Driver {} registered successfully", VERSION.getFullVersion()); } catch (SQLException ex) { throw new IllegalStateException("Unable to register " + SnowflakeDriver.class.getName(), ex); } // Perform all driver initialization (Arrow, security, telemetry, etc.) DriverInitializer.initialize(); } /** * Checks whether a given URL is in a valid Snowflake JDBC format. * *

Valid formats: * *

    *
  • {@code jdbc:snowflake://host[:port]} - Standard connection *
  • {@code jdbc:snowflake:auto} - Auto-configuration *
* * @param url the database URL * @return true if the URL is valid and accepted by this driver */ @Override public boolean acceptsURL(String url) { if (AutoConfigurationHelper.isAutoConfigurationUrl(url)) { return true; } return SnowflakeConnectString.parse(url, EMPTY_PROPERTIES).isValid(); } /** * Establishes a connection to the Snowflake database. * * @param url the database URL * @param info additional connection properties * @return a Connection object, or null if the URL is not accepted * @throws SQLException if a database access error occurs */ @Override public Connection connect(String url, Properties info) throws SQLException { return ConnectionFactory.createConnection(url, info); } /** * Establishes a connection using auto-configuration. * * @return a Connection object * @throws SQLException if a database access error occurs */ public Connection connect() throws SQLException { return ConnectionFactory.createConnectionWithAutoConfig(); } @Override public int getMajorVersion() { return VERSION.getMajor(); } @Override public int getMinorVersion() { return VERSION.getMinor(); } /** * Gets the driver patch version number. This is not part of the standard JDBC Driver interface, * but is needed for full version information. * * @return driver patch version number */ public long getPatchVersion() { return VERSION.getPatch(); } /** * Gets driver version information for telemetry and logging. * * @return full version string (e.g., "4.0.0") */ public static String getImplementationVersion() { return VERSION.getFullVersion(); } @Override public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { if (url == null || url.isEmpty()) { DriverPropertyInfo[] result = new DriverPropertyInfo[1]; result[0] = new DriverPropertyInfo("serverURL", null); result[0].description = "server URL in form of ://:/"; return result; } try (SnowflakeConnectionImpl con = new SnowflakeConnectionImpl(url, info, true)) { List missingProperties = con.returnMissingProperties(); return missingProperties.toArray(new DriverPropertyInfo[0]); } } @Override public boolean jdbcCompliant() { return false; } @Override public java.util.logging.Logger getParentLogger() { return null; } } ================================================ FILE: src/main/java/net/snowflake/client/api/exception/ErrorCode.java ================================================ package net.snowflake.client.api.exception; import net.snowflake.common.core.SqlState; /** Internal JDBC driver error codes */ public enum ErrorCode { /** * Error codes partitioning: * *

0NNNNN: GS SQL error codes 1NNNNN: XP error codes 2NNNNN: JDBC driver error codes 3NNNNN: GS * generic error codes 4NNNNN: Node.js driver error codes * *

N can be any digits from 0 to 9. */ INTERNAL_ERROR(200001, SqlState.INTERNAL_ERROR), CONNECTION_ERROR(200002, SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), INTERRUPTED(200003, SqlState.QUERY_CANCELED), COMPRESSION_TYPE_NOT_SUPPORTED(200004, SqlState.FEATURE_NOT_SUPPORTED), QUERY_CANCELED(200005, SqlState.QUERY_CANCELED), COMPRESSION_TYPE_NOT_KNOWN(200006, SqlState.FEATURE_NOT_SUPPORTED), FAIL_LIST_FILES(200007, SqlState.DATA_EXCEPTION), FILE_NOT_FOUND(200008, SqlState.DATA_EXCEPTION), FILE_IS_DIRECTORY(200009, SqlState.DATA_EXCEPTION), DUPLICATE_CONNECTION_PROPERTY_SPECIFIED(200010, SqlState.DATA_EXCEPTION), MISSING_USERNAME(200011, SqlState.INVALID_AUTHORIZATION_SPECIFICATION), MISSING_PASSWORD(200012, SqlState.INVALID_AUTHORIZATION_SPECIFICATION), S3_OPERATION_ERROR(200013, SqlState.SYSTEM_ERROR), MAX_RESULT_LIMIT_EXCEEDED(200014, SqlState.PROGRAM_LIMIT_EXCEEDED), NETWORK_ERROR(200015, SqlState.IO_ERROR), IO_ERROR(200016, SqlState.IO_ERROR), PATH_NOT_DIRECTORY(200017, SqlState.DATA_EXCEPTION), DATA_TYPE_NOT_SUPPORTED(200018, SqlState.FEATURE_NOT_SUPPORTED), CLIENT_SIDE_SORTING_NOT_SUPPORTED(200019, SqlState.FEATURE_NOT_SUPPORTED), AWS_CLIENT_ERROR(200020, SqlState.SYSTEM_ERROR), INVALID_SQL(200021, SqlState.SQL_STATEMENT_NOT_YET_COMPLETE), BAD_RESPONSE(200022, SqlState.INTERNAL_ERROR), ARRAY_BIND_MIXED_TYPES_NOT_SUPPORTED(200023, SqlState.FEATURE_NOT_SUPPORTED), STATEMENT_CLOSED(200024, SqlState.FEATURE_NOT_SUPPORTED), STATEMENT_ALREADY_RUNNING_QUERY(200025, SqlState.FEATURE_NOT_SUPPORTED), MISSING_SERVER_URL(200026, SqlState.INVALID_AUTHORIZATION_SPECIFICATION), TOO_MANY_SESSION_PARAMETERS(200027, SqlState.FEATURE_NOT_SUPPORTED), MISSING_CONNECTION_PROPERTY(200028, SqlState.INVALID_AUTHORIZATION_SPECIFICATION), INVALID_CONNECTION_URL(200029, SqlState.INVALID_AUTHORIZATION_SPECIFICATION), DUPLICATE_STATEMENT_PARAMETER_SPECIFIED(200030, SqlState.DATA_EXCEPTION), TOO_MANY_STATEMENT_PARAMETERS(200031, SqlState.FEATURE_NOT_SUPPORTED), COLUMN_DOES_NOT_EXIST(200032, SqlState.DATA_EXCEPTION), INVALID_PARAMETER_TYPE(200033, SqlState.INVALID_PARAMETER_VALUE), ROW_DOES_NOT_EXIST(200034, SqlState.DATA_EXCEPTION), FEATURE_UNSUPPORTED(200035, SqlState.FEATURE_NOT_SUPPORTED), INVALID_STATE(200036, SqlState.FEATURE_NOT_SUPPORTED), RESULTSET_ALREADY_CLOSED(200037, SqlState.FEATURE_NOT_SUPPORTED), INVALID_VALUE_CONVERT(200038, SqlState.FEATURE_NOT_SUPPORTED), IDP_CONNECTION_ERROR(200039, SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), IDP_INCORRECT_DESTINATION(200040, SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), CONNECTION_ESTABLISHED_WITH_DIFFERENT_PROP(200041, SqlState.WARNING), UNSUPPORTED_STATEMENT_TYPE_IN_EXECUTION_API(200042, SqlState.FEATURE_NOT_SUPPORTED), STATEMENT_PREPARE_FAILURE(200043, SqlState.FEATURE_NOT_SUPPORTED), AZURE_SERVICE_ERROR(200044, SqlState.SYSTEM_ERROR), INVALID_OR_UNSUPPORTED_PRIVATE_KEY(200045, SqlState.SYNTAX_ERROR), FAILED_TO_GENERATE_JWT(200046, SqlState.SYNTAX_ERROR), INVALID_PARAMETER_VALUE(200047, SqlState.INVALID_PARAMETER_VALUE), QUERY_FIRST_RESULT_NOT_RESULT_SET(200048, SqlState.WARNING), UPDATE_FIRST_RESULT_NOT_UPDATE_COUNT(200049, SqlState.WARNING), CHILD_RESULT_IDS_AND_TYPES_DIFFERENT_SIZES(200050, SqlState.INTERNAL_ERROR), INVALID_PROXY_PROPERTIES(200051, SqlState.CONNECTION_EXCEPTION), CONNECTION_CLOSED(200052, SqlState.CONNECTION_DOES_NOT_EXIST), NON_FATAL_ERROR(200053, SqlState.WARNING), NUMERIC_VALUE_OUT_OF_RANGE(200054, SqlState.NUMERIC_VALUE_OUT_OF_RANGE), NO_VALID_DATA(200055, SqlState.NO_DATA), INVALID_APP_NAME(200056, SqlState.INVALID_PARAMETER_VALUE), EXECUTE_BATCH_INTEGER_OVERFLOW(200058, SqlState.NUMERIC_VALUE_OUT_OF_RANGE), INVALID_CONNECT_STRING(200059, SqlState.CONNECTION_EXCEPTION), INVALID_OKTA_USERNAME(200060, SqlState.CONNECTION_EXCEPTION), GCP_SERVICE_ERROR(200061, SqlState.SYSTEM_ERROR), AUTHENTICATOR_REQUEST_TIMEOUT(200062, SqlState.CONNECTION_EXCEPTION), INVALID_STRUCT_DATA(200063, SqlState.DATA_EXCEPTION), DISABLEOCSP_INSECUREMODE_VALUE_MISMATCH(200064, SqlState.INVALID_PARAMETER_VALUE), TOO_MANY_FILES_TO_DOWNLOAD_AS_STREAM(200065, SqlState.DATA_EXCEPTION), FILE_OPERATION_UPLOAD_ERROR(200066, SqlState.INTERNAL_ERROR), FILE_OPERATION_DOWNLOAD_ERROR(200067, SqlState.INTERNAL_ERROR), OAUTH_AUTHORIZATION_CODE_FLOW_ERROR(200068, SqlState.CONNECTION_EXCEPTION), OAUTH_CLIENT_CREDENTIALS_FLOW_ERROR(200069, SqlState.CONNECTION_EXCEPTION), OAUTH_REFRESH_TOKEN_FLOW_ERROR(200070, SqlState.CONNECTION_EXCEPTION), WORKLOAD_IDENTITY_FLOW_ERROR(200071, SqlState.CONNECTION_EXCEPTION), OKTA_MFA_NOT_SUPPORTED(200072, SqlState.FEATURE_NOT_SUPPORTED), UNKNOWN_CERT_REVOCATION_CHECK_MODE(200073, SqlState.INVALID_PARAMETER_VALUE), BOTH_OCSP_AND_CERT_REVOCATION_CHECK(200074, SqlState.FEATURE_NOT_SUPPORTED), FILE_TRANSFER_ERROR(253000, SqlState.SYSTEM_ERROR), DOWNLOAD_ERROR(253002, SqlState.SYSTEM_ERROR), UPLOAD_ERROR(253003, SqlState.SYSTEM_ERROR), OCSP_GENERAL_ERROR(254000, SqlState.INTERNAL_ERROR), HTTP_GENERAL_ERROR(290000, SqlState.INTERNAL_ERROR); public static final String errorMessageResource = "net.snowflake.client.jdbc.jdbc_error_messages"; /** Snowflake internal message associated to the error. */ private final Integer messageCode; private final String sqlState; /** * Construct a new error code specification given Snowflake internal error code and SQL state * error code. * *

* * @param messageCode Snowflake internal error code * @param sqlState SQL state error code */ ErrorCode(Integer messageCode, String sqlState) { this.messageCode = messageCode; this.sqlState = sqlState; } public Integer getMessageCode() { return messageCode; } public String getSqlState() { return sqlState; } @Override public String toString() { return "ErrorCode{" + "messageCode=" + messageCode + ", sqlState=" + sqlState + '}'; } } ================================================ FILE: src/main/java/net/snowflake/client/api/exception/SnowflakeSQLException.java ================================================ package net.snowflake.client.api.exception; import java.sql.SQLException; import net.snowflake.client.internal.core.SFException; import net.snowflake.common.core.ResourceBundleManager; public class SnowflakeSQLException extends SQLException { private static final long serialVersionUID = 1L; static final ResourceBundleManager errorResourceBundleManager = ResourceBundleManager.getSingleton(ErrorCode.errorMessageResource); private String queryId = "unknown"; /** * This constructor should only be used for error from Global service. Since Global service has * already built the error message, we use it as is. For any errors local to JDBC driver, we * should use one of the constructors below to build the error message. * * @param queryId query id * @param reason reason for which exception is created * @param sqlState SQL state * @param vendorCode vendor code */ public SnowflakeSQLException(String queryId, String reason, String sqlState, int vendorCode) { super(reason, sqlState, vendorCode); this.queryId = queryId; } /** * @param queryId the queryID * @param reason exception reason * @param sqlState the SQL state */ public SnowflakeSQLException(String queryId, String reason, String sqlState) { super(reason, sqlState); this.queryId = queryId; } /** * @param queryId query ID * @param sqlState SQL state * @param vendorCode vendor code */ public SnowflakeSQLException(String queryId, String sqlState, int vendorCode) { super( errorResourceBundleManager.getLocalizedMessage(String.valueOf(vendorCode)), sqlState, vendorCode); this.queryId = queryId; } /** * @param sqlState the SQL state * @param vendorCode the vendor code * @param params additional parameters */ public SnowflakeSQLException(String sqlState, int vendorCode, Object... params) { this((String) null, sqlState, vendorCode, params); } /** * @param queryId query ID * @param sqlState the SQL state * @param vendorCode the vendor code * @param params additional parameters */ public SnowflakeSQLException(String queryId, String sqlState, int vendorCode, Object... params) { super( errorResourceBundleManager.getLocalizedMessage(String.valueOf(vendorCode), params), sqlState, vendorCode); this.queryId = queryId; } /** * @param ex Throwable exception * @param sqlState the SQL state * @param vendorCode the vendor code */ public SnowflakeSQLException(Throwable ex, String sqlState, int vendorCode) { super( errorResourceBundleManager.getLocalizedMessage(String.valueOf(vendorCode)), sqlState, vendorCode, ex); } /** * @param ex Throwable exception * @param errorCode the error code * @param params additional parameters */ public SnowflakeSQLException(Throwable ex, ErrorCode errorCode, Object... params) { this(ex, errorCode.getSqlState(), errorCode.getMessageCode(), params); } /** * @param ex Throwable exception * @param sqlState the SQL state * @param vendorCode the vendor code * @param params additional parameters */ public SnowflakeSQLException(Throwable ex, String sqlState, int vendorCode, Object... params) { this(null, ex, sqlState, vendorCode, params); } /** * @param queryId query ID * @param ex Throwable exception * @param sqlState the SQL state * @param vendorCode the vendor code * @param params additional parameters */ public SnowflakeSQLException( String queryId, Throwable ex, String sqlState, int vendorCode, Object... params) { super( errorResourceBundleManager.getLocalizedMessage(String.valueOf(vendorCode), params), sqlState, vendorCode, ex); this.queryId = queryId; } /** * @param errorCode the error code * @param params additional parameters */ public SnowflakeSQLException(ErrorCode errorCode, Object... params) { super( errorResourceBundleManager.getLocalizedMessage( String.valueOf(errorCode.getMessageCode()), params), errorCode.getSqlState(), errorCode.getMessageCode()); } /** * @param queryId query ID * @param errorCode error code * @param params additional parameters */ public SnowflakeSQLException(String queryId, ErrorCode errorCode, Object... params) { super( errorResourceBundleManager.getLocalizedMessage( String.valueOf(errorCode.getMessageCode()), params), errorCode.getSqlState(), errorCode.getMessageCode()); this.queryId = queryId; } /** * @param e the SFException */ public SnowflakeSQLException(SFException e) { this(e.getQueryId(), e.getMessage(), e.getSqlState(), e.getVendorCode()); } public SnowflakeSQLException(String reason) { super(reason); } public SnowflakeSQLException(Throwable ex, String message) { super(message, ex); } public String getQueryId() { return queryId; } } ================================================ FILE: src/main/java/net/snowflake/client/api/http/HttpHeadersCustomizer.java ================================================ package net.snowflake.client.api.http; import java.util.List; import java.util.Map; /** * Allows programmatic customization of HTTP headers for requests sent by the Snowflake JDBC driver. * *

Implementations can be registered with the driver (e.g., via {@link * net.snowflake.client.api.datasource.SnowflakeDataSource}) to dynamically add headers. They define * which requests to apply headers to, provide the headers, and specify if headers should regenerate * on retries (e.g., for dynamic tokens). */ public interface HttpHeadersCustomizer { String HTTP_HEADER_CUSTOMIZERS_PROPERTY_KEY = "net.snowflake.client.jdbc.HttpHeadersCustomizer"; /** * Determines if this customizer should be applied to the given request context. * * @param method The HTTP method (e.g., "GET", "POST"). * @param uri The target URI for the request. * @param currentHeaders A read-only view of headers already present before this customizer runs. * @return true if newHeaders() should be called for this request, false otherwise. */ boolean applies(String method, String uri, Map> currentHeaders); /** * Generates the custom headers to be added to the request. * * @return A Map where keys are header names and values are Lists of header values. */ Map> newHeaders(); /** * Indicates if newHeaders() should be called only once for the initial request attempt (true), or * if it should be called again before each retry attempt (false). * * @return true for static headers, false for dynamic headers needing regeneration. */ boolean invokeOnce(); } ================================================ FILE: src/main/java/net/snowflake/client/api/loader/LoadResultListener.java ================================================ package net.snowflake.client.api.loader; /** Callback API for processing errors and statistics of upload operation */ public interface LoadResultListener { /** * @return this result listener needs to listen to the provided records */ boolean needSuccessRecords(); /** * @param op Operation requested * @param record Data submitted for processing */ void recordProvided(Operation op, Object[] record); /** * @param op Operation requested * @param i number of rows that had been processed */ void addProcessedRecordCount(Operation op, int i); /** * @param op Operation requested * @param i number of rows that had been affected by a given operation */ void addOperationRecordCount(Operation op, int i); /** * @return whether this result listener needs to listen to error records */ boolean needErrors(); /** * @param error information about error that was encountered */ void addError(LoadingError error); /** * @return Whether to throw an exception upon encountering error */ boolean throwOnError(); /** * Method to add to the error count for a listener * * @param number the number of errors */ void addErrorCount(int number); /** Method to reset the error count back to zero */ void resetErrorCount(); /** * method to get the total number of errors * * @return the number of errors */ int getErrorCount(); /** * Method to add to the error record count for a listener * * @param number the number of error records */ void addErrorRecordCount(int number); /** Method to reset the errorRecordCount back to zero */ void resetErrorRecordCount(); /** * method to get the total number of error records * * @return the number of rows in errors */ int getErrorRecordCount(); /** Resets submitted row count */ void resetSubmittedRowCount(); /** * Adds the number of submitted rows * * @param number the number of submitted row */ void addSubmittedRowCount(int number); /** * Gets the number of submitted row * * @return the number of submitted row */ int getSubmittedRowCount(); } ================================================ FILE: src/main/java/net/snowflake/client/api/loader/Loader.java ================================================ package net.snowflake.client.api.loader; /** * Bulk loader for Snowflake. * *

This interface extends {@link AutoCloseable}, enabling try-with-resources pattern for * automatic resource management: * *

{@code
 * try (Loader loader = LoaderFactory.createLoader(...)) {
 *     loader.start();
 *     loader.submitRow(data);
 *     loader.finish();
 * } // Connections closed automatically
 * }
*/ public interface Loader extends AutoCloseable { // Configuration, see LoaderProperty void setProperty(LoaderProperty property, Object value); // Callback API void setListener(LoadResultListener listener); /** Initiates loading threads. Executes "before" statement */ void start(); /** * Pass row data * * @param data, must match shape of the table (requested columns, in the order provided) */ void submitRow(Object[] data); /** * If operation is changed, previous data is committed * * @param op operation will be reset */ void resetOperation(Operation op); /** * Rollback uncommitted changes. If no transaction was initialized, indeterminate fraction of rows * could have been committed. */ void rollback(); /** * Finishes processing and commits or rolls back. Will throw the exception that was the cause of * an abort * * @throws Exception if that was the cause of an abort */ void finish() throws Exception; /** * Close connections that have been provided upon initialization. * *

This method is called automatically when using try-with-resources. It is safe to call this * method multiple times. * * @throws Exception if an error occurs while closing resources */ @Override void close() throws Exception; // Raised for data conversion errors, if requested class DataError extends RuntimeException { private static final long serialVersionUID = 1L; public DataError(String msg) { super(msg); } public DataError(String msg, Throwable ex) { super(msg, ex); } public DataError(Throwable ex) { super(ex); } } // Raised for connection and other system errors. class ConnectionError extends RuntimeException { private static final long serialVersionUID = 1L; public ConnectionError(String msg) { super(msg); } // SNOW-22336: pass cause to connector public ConnectionError(String msg, Throwable ex) { super(msg, ex); } public ConnectionError(Throwable ex) { super(ex); } } // get the listener instance used by this loader instance LoadResultListener getListener(); } ================================================ FILE: src/main/java/net/snowflake/client/api/loader/LoaderFactory.java ================================================ package net.snowflake.client.api.loader; import java.sql.Connection; import java.util.Map; import net.snowflake.client.internal.loader.StreamLoader; public class LoaderFactory { public static Loader createLoader( Map properties, Connection uploadConnection, Connection processingConnection) { return new StreamLoader(properties, uploadConnection, processingConnection); } } ================================================ FILE: src/main/java/net/snowflake/client/api/loader/LoaderProperty.java ================================================ package net.snowflake.client.api.loader; /** Configuration parameters for Loader */ public enum LoaderProperty { tableName, // Target table name String schemaName, // Target table schema String databaseName, // Target table database String remoteStage, // Stage to use - "~" is default String columns, // List of columns that will be uploaded List keys, // List of columns used as keys for updating List operation, // UPDATE, DELETE, MODIFY, UPSERT Enum Operation startTransaction, // start transaction for the operation Boolean oneBatch, // process all data in one batch Boolean truncateTable, // delete all data from the table prior to run Boolean executeBefore, // SQL statement to execute before run String executeAfter, // SQL statement to execute after run String isFirstStartCall, // skip deleting data. Used in multiple calls of loader.start Boolean isLastFinishCall, // skip commit. Used in multiple calls of loader.finish Boolean batchRowSize, // Batch row size. Flush queues when it reaches this Long onError, // on_error option String csvFileBucketSize, // File bucket size. 64 by default. Long csvFileSize, // File size. 50MB by default. Long preserveStageFile, // Preserve stage files if error occurs Boolean useLocalTimezone, // Use local timezone in converting TIMESTAMP Boolean compressFileByPut, // Compress file by PUT. false by default Boolean compressDataBeforePut, // Compress data before PUT. true by default Boolean compressLevel, // Compress level: 1 (Speed) to 9 (Compression) for // compressDataBeforePut option. No impact to // compressFileByPut. 1 by default. Long // compatibility parameters mapTimeToTimestamp, // map TIME data type to TIMESTAMP. Informatica v1 // connector behavior. Boolean // deprecated. to be removed. copyEmptyFieldAsEmpty, // EMPTY_FIELD_AS_NULL = true by default Boolean // test parameters testRemoteBadCSV // TEST: Inject bad CSV in the remote stage Boolean } ================================================ FILE: src/main/java/net/snowflake/client/api/loader/LoadingError.java ================================================ package net.snowflake.client.api.loader; import java.sql.ResultSet; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; import net.snowflake.client.internal.loader.BufferStage; import net.snowflake.client.internal.loader.StreamLoader; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Wrapper for data format errors returned by the COPY/validate command. * *

This class encapsulates error information from failed data loading operations. It provides * details about what went wrong, where in the file the error occurred, and the rejected record * data. * *

Usage Example

* *
{@code
 * LoadResultListener listener = new LoadResultListener() {
 *   public void addError(LoadingError error) {
 *     System.err.println("Error in file: " + error.getFile());
 *     System.err.println("Line: " + error.getProperty(ErrorProperty.LINE));
 *     System.err.println("Error: " + error.getProperty(ErrorProperty.ERROR));
 *   }
 *   // ... implement other methods
 * };
 * }
* * @see LoadResultListener * @see Loader */ public class LoadingError { private static final SFLogger logger = SFLoggerFactory.getLogger(LoadingError.class); /** * Properties that can be associated with a loading error. * *

These correspond to columns in the Snowflake COPY command validation results and provide * detailed information about what went wrong during data loading. */ public enum ErrorProperty { /** The error message describing what went wrong */ ERROR, /** The line number in the source file where the error occurred */ LINE, /** The character position where the error occurred */ CHARACTER, /** The byte offset in the file where the error occurred */ BYTE_OFFSET, /** The error category (e.g., "parsing", "data type mismatch") */ CATEGORY, /** The numeric error code */ CODE, /** The SQL state code */ SQL_STATE, /** The name of the column where the error occurred */ COLUMN_NAME, /** The row number in the result set */ ROW_NUMBER, /** The starting line number of the row */ ROW_START_LINE, /** The rejected record data */ REJECTED_RECORD } private String _stage; private String _prefix; private String _file; private final String _target; private final Map _properties = new HashMap(); public static String UNKNOWN = "unknown"; /** * Construct error from validation output * * @param rs result set * @param bs buffer stage * @param loader stream loader */ public LoadingError(ResultSet rs, BufferStage bs, StreamLoader loader) { _stage = bs.getRemoteLocation(); try { String ffile = rs.getString("FILE"); _file = ffile.substring(ffile.lastIndexOf("/")); _prefix = ffile.substring(0, ffile.lastIndexOf("/")); } catch (SQLException ex) { _file = UNKNOWN; } _target = loader.getTable(); for (ErrorProperty p : ErrorProperty.values()) { try { _properties.put(p, rs.getString(p.name())); } catch (SQLException ex) { logger.error("Exception", ex); } } } /** * Gets the stage name where the error occurred. * * @return the stage name */ public String getStage() { return _stage; } /** * Gets the file prefix within the stage. * * @return the file prefix path */ public String getPrefix() { return _prefix; } /** * Gets the file name where the error occurred. * * @return the file name */ public String getFile() { return _file; } /** * Gets the target table name. * * @return the target table name */ public String getTarget() { return _target; } /** * Gets the value of a specific error property. * * @param p the error property to retrieve * @return the value of the error property, or null if not set */ public String getProperty(ErrorProperty p) { return this._properties.get(p); } /** * Sets the value of a specific error property. * * @param p the error property to set * @param value the value to assign to the property */ public void setProperty(ErrorProperty p, String value) { this._properties.put(p, value); } public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); String prefix = ""; for (ErrorProperty p : ErrorProperty.values()) { sb.append(prefix); sb.append("\"").append(p.name()).append("\": "); sb.append("\""); String value = String.valueOf(_properties.get(p)); sb.append(value.replaceAll("[\\s]+", " ").replace("\"", "\\\"")); sb.append("\""); prefix = ","; } sb.append("}"); return sb.toString(); } /** * Converts this loading error into a DataError exception. * * @return a DataError exception containing this error's details */ public Loader.DataError getException() { return new Loader.DataError(this.toString()); } } ================================================ FILE: src/main/java/net/snowflake/client/api/loader/Operation.java ================================================ package net.snowflake.client.api.loader; /** * Operations supported by the Snowflake Loader API. * *

These operations define how data is loaded into the target table. The operation type is * specified via the {@link LoaderProperty#operation} property. * * @see Loader * @see LoaderFactory * @see LoaderProperty#operation */ public enum Operation { /** Insert new rows into the target table */ INSERT, /** Delete rows from the target table based on keys */ DELETE, /** Modify existing rows in the target table */ MODIFY, /** Insert new rows or update existing rows (update-or-insert) */ UPSERT } ================================================ FILE: src/main/java/net/snowflake/client/api/pooling/SnowflakeConnectionPoolDataSource.java ================================================ package net.snowflake.client.api.pooling; import javax.sql.ConnectionPoolDataSource; import net.snowflake.client.api.datasource.SnowflakeDataSource; /** * SnowflakeConnectionPoolDataSource is the interface for a connection pool data source. Its * implementation is instantiated by {@link SnowflakeConnectionPoolDataSourceFactory}. */ public interface SnowflakeConnectionPoolDataSource extends ConnectionPoolDataSource, SnowflakeDataSource {} ================================================ FILE: src/main/java/net/snowflake/client/api/pooling/SnowflakeConnectionPoolDataSourceFactory.java ================================================ package net.snowflake.client.api.pooling; import net.snowflake.client.internal.api.implementation.pooling.SnowflakeConnectionPoolDataSourceImpl; /** * Factory for creating {@link SnowflakeConnectionPoolDataSourceImpl} instances. * *

This factory provides methods to create different types of Snowflake Connection Pool Data * Source implementations. Use this factory instead of directly instantiating Connection Pool Data * Source classes. * *

Example usage: * *

{@code
 * SnowflakeConnectionPoolDataSource ds = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource();
 * ds.setAccount("myaccount");
 * ds.setUser("myuser");
 * ds.setPassword("mypassword");
 * ds.setDatabase("mydb");
 * ds.setSchema("myschema");
 * ds.setWarehouse("mywh");
 *
 * try (Connection conn = ds.getConnection()) {
 *   // use connection
 * }
 * }
* * @see SnowflakeConnectionPoolDataSourceImpl */ public class SnowflakeConnectionPoolDataSourceFactory { private SnowflakeConnectionPoolDataSourceFactory() { throw new AssertionError("SnowflakeConnectionPoolDataSourceFactory cannot be instantiated"); } /** * Creates a new SnowflakeConnectionPoolDataSource instance. * * @return a new SnowflakeConnectionPoolDataSource instance */ public static SnowflakeConnectionPoolDataSource createConnectionPoolDataSource() { return new SnowflakeConnectionPoolDataSourceImpl(); } } ================================================ FILE: src/main/java/net/snowflake/client/api/resultset/FieldMetadata.java ================================================ package net.snowflake.client.api.resultset; import java.util.List; /** * Metadata describing a field in a structured type (OBJECT, ARRAY, MAP). This interface provides * read-only access to field information including name, type, precision, scale, and nested fields. */ public interface FieldMetadata { /** * Gets the name of the field. * * @return the field name */ String getName(); /** * Gets the type name of the field. * * @return the type name */ String getTypeName(); /** * Gets the SQL type code of the field. * * @return the SQL type code as defined in {@link java.sql.Types} */ int getType(); /** * Checks if the field is nullable. * * @return true if the field can contain null values, false otherwise */ boolean isNullable(); /** * Gets the byte length of the field. * * @return the byte length */ int getByteLength(); /** * Gets the precision of the field. * * @return the precision */ int getPrecision(); /** * Gets the scale of the field. * * @return the scale */ int getScale(); /** * Checks if the field has a fixed size. * * @return true if the field has a fixed size, false otherwise */ boolean isFixed(); /** * Gets the base Snowflake type of the field. * * @return the base {@link SnowflakeType} */ SnowflakeType getBase(); /** * Gets the nested field metadata for structured types (OBJECT, ARRAY, MAP). * * @return list of nested field metadata, or empty list if no nested fields */ List getFields(); } ================================================ FILE: src/main/java/net/snowflake/client/api/resultset/QueryStatus.java ================================================ package net.snowflake.client.api.resultset; import java.util.Arrays; /** * Represents detailed status information for a query execution. * *

This class provides comprehensive metadata about a query's execution, including timing * information, warehouse details, and error information if applicable. Use this class to monitor * query progress and diagnose execution issues. * *

Usage Example

* *
{@code
 * String queryId = statement.unwrap(SnowflakeStatement.class).getQueryID();
 * QueryStatus status = connection.unwrap(SnowflakeConnection.class)
 *     .getQueryStatus(queryId);
 *
 * System.out.println("Query Status: " + status.getStatus());
 * System.out.println("Duration: " + status.getTotalDuration() + "ms");
 * if (status.getErrorCode() != 0) {
 *     System.err.println("Error: " + status.getErrorMessage());
 * }
 * }
*/ public final class QueryStatus { public enum Status { RUNNING(0, "RUNNING"), ABORTING(1, "ABORTING"), SUCCESS(2, "SUCCESS"), FAILED_WITH_ERROR(3, "FAILED_WITH_ERROR"), ABORTED(4, "ABORTED"), QUEUED(5, "QUEUED"), FAILED_WITH_INCIDENT(6, "FAILED_WITH_INCIDENT"), DISCONNECTED(7, "DISCONNECTED"), RESUMING_WAREHOUSE(8, "RESUMING_WAREHOUSE"), QUEUED_REPAIRING_WAREHOUSE(9, "QUEUED_REPARING_WAREHOUSE"), RESTARTED(10, "RESTARTED"), BLOCKED(11, "BLOCKED"), NO_DATA(12, "NO_DATA"); private final int value; private final String description; Status(int value, String description) { this.value = value; this.description = description; } public int getValue() { return this.value; } public String getDescription() { return this.description; } private static boolean isStillRunning(Status status) { switch (status) { case RUNNING: case QUEUED: case RESUMING_WAREHOUSE: case QUEUED_REPAIRING_WAREHOUSE: case BLOCKED: case NO_DATA: return true; default: return false; } } private static boolean isAnError(Status status) { switch (status) { case ABORTING: case FAILED_WITH_ERROR: case ABORTED: case FAILED_WITH_INCIDENT: case DISCONNECTED: case BLOCKED: return true; default: return false; } } private static Status getStatusFromString(String description) { if (description != null) { return Arrays.stream(Status.values()) .filter(st -> description.equalsIgnoreCase(st.getDescription())) .findFirst() .orElse(Status.NO_DATA); } // Is it correct? I think we should never reach this point, but maybe we should return NO_DATA // instead? return null; } } private final long endTime; private final int errorCode; private final String errorMessage; private final String id; private final String name; private final long sessionId; private final String sqlText; private final long startTime; private final String state; private final Status status; private final int totalDuration; private final String warehouseExternalSize; private final int warehouseId; private final String warehouseName; private final String warehouseServerType; /** * Constructs a QueryStatus object with detailed query execution information. * * @param endTime the end time of the query in milliseconds since epoch * @param errorCode the error code if query failed, 0 otherwise * @param errorMessage the error message if query failed, empty otherwise * @param id the unique query ID * @param name the query status name * @param sessionId the session ID that executed the query * @param sqlText the SQL text of the query * @param startTime the start time of the query in milliseconds since epoch * @param state the internal state of the query * @param totalDuration the total duration of query execution in milliseconds * @param warehouseExternalSize the external size of the warehouse (e.g., "X-Small") * @param warehouseId the warehouse ID * @param warehouseName the warehouse name * @param warehouseServerType the warehouse server type */ public QueryStatus( long endTime, int errorCode, String errorMessage, String id, String name, long sessionId, String sqlText, long startTime, String state, int totalDuration, String warehouseExternalSize, int warehouseId, String warehouseName, String warehouseServerType) { this.endTime = endTime; this.errorCode = errorCode; this.errorMessage = errorMessage; this.id = id; this.name = name; this.sessionId = sessionId; this.sqlText = sqlText; this.startTime = startTime; this.state = state; this.status = Status.getStatusFromString(name); this.totalDuration = totalDuration; this.warehouseExternalSize = warehouseExternalSize; this.warehouseId = warehouseId; this.warehouseName = warehouseName; this.warehouseServerType = warehouseServerType; } /** * Creates an empty QueryStatus instance with default values. * * @return an empty QueryStatus object */ public static QueryStatus empty() { return new QueryStatus(0, 0, "", "", "", 0, "", 0, "", 0, "", 0, "", ""); } /** * Checks if this query status is empty (no data). * * @return true if the status name is empty */ public boolean isEmpty() { return name.isEmpty(); } /** * Checks if the query is still running. * * @return true if the query is in a running state */ public boolean isStillRunning() { return Status.isStillRunning(status); } /** * Checks if the query completed successfully. * * @return true if the query status is SUCCESS */ public boolean isSuccess() { return status == Status.SUCCESS; } /** * Checks if the query encountered an error. * * @return true if the query is in an error state */ public boolean isAnError() { return Status.isAnError(status); } /** * Gets the end time of query execution. * * @return the end time in milliseconds since epoch */ public long getEndTime() { return endTime; } /** * Gets the error code if the query failed. * * @return the error code, or 0 if no error occurred */ public int getErrorCode() { return errorCode; } /** * Gets the error message if the query failed. * * @return the error message, or empty string if no error occurred */ public String getErrorMessage() { return errorMessage; } /** * Gets the unique query ID. * * @return the query ID */ public String getId() { return id; } /** * Gets the query status name. * * @return the status name (e.g., "RUNNING", "SUCCESS") */ public String getName() { return name; } /** * Gets the session ID that executed the query. * * @return the session ID */ public long getSessionId() { return sessionId; } /** * Gets the SQL text of the query. * * @return the SQL query text */ public String getSqlText() { return sqlText; } /** * Gets the start time of query execution. * * @return the start time in milliseconds since epoch */ public long getStartTime() { return startTime; } /** * Gets the internal state of the query. * * @return the internal query state */ public String getState() { return state; } /** * Gets the total duration of query execution. * * @return the total duration in milliseconds */ public int getTotalDuration() { return totalDuration; } /** * Gets the external size of the warehouse that executed the query. * * @return the warehouse size (e.g., "X-Small", "Small", "Medium") */ public String getWarehouseExternalSize() { return warehouseExternalSize; } /** * Gets the warehouse ID that executed the query. * * @return the warehouse ID */ public int getWarehouseId() { return warehouseId; } /** * Gets the warehouse name that executed the query. * * @return the warehouse name */ public String getWarehouseName() { return warehouseName; } /** * Gets the warehouse server type that executed the query. * * @return the warehouse server type */ public String getWarehouseServerType() { return warehouseServerType; } /** * Gets the status description. * * @return the status name */ public String getDescription() { return name; } /** * Gets the query status enum value. * * @return the Status enum representing the current state */ public Status getStatus() { return status; } } ================================================ FILE: src/main/java/net/snowflake/client/api/resultset/SnowflakeAsyncResultSet.java ================================================ package net.snowflake.client.api.resultset; import java.sql.SQLException; /** This interface defines Snowflake specific APIs for asynchronous ResultSet */ public interface SnowflakeAsyncResultSet extends SnowflakeResultSet { /** * This function retrieves the status of an asynchronous query. An empty ResultSet object has * already been returned, but the query may still be running. This function can be used to query * whether it is possible to retrieve results from the ResultSet already. * *

status.isSuccess() means that results can be retrieved. * * @return an instance containing query metadata * @throws SQLException if an error is encountered */ QueryStatus getStatus() throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/api/resultset/SnowflakeResultSet.java ================================================ package net.snowflake.client.api.resultset; import java.sql.SQLException; import java.util.List; import java.util.Map; /** This interface defines Snowflake specific APIs for ResultSet */ public interface SnowflakeResultSet { /** * @return the Snowflake query ID of the query which generated this result set * @throws SQLException if an error is encountered */ String getQueryID() throws SQLException; /** * Get a list of ResultSetSerializables for the ResultSet in order to parallel processing * * @param maxSizeInBytes The expected max data size wrapped in the ResultSetSerializables object. * NOTE: this parameter is intended to make the data size in each serializable object to be * less than it. But if user specifies a small value which may be smaller than the data size * of one result chunk. So the definition can't be guaranteed completely. For this special * case, one serializable object is used to wrap the data chunk. * @return a list of ResultSetSerializables * @throws SQLException if fails to get the ResultSetSerializable objects. */ List getResultSetSerializables(long maxSizeInBytes) throws SQLException; /** * Get an array of elements from a structured type (ARRAY) column. * *

This method is used to retrieve array elements with proper type conversion for Snowflake * structured types. * * @param the type of array elements * @param columnIndex the column index (1-based) * @param type the class of array elements * @return an array of elements, or null if the value was SQL NULL * @throws SQLException if the column is not a structured type or conversion fails */ T[] getArray(int columnIndex, Class type) throws SQLException; /** * Get a list of elements from a structured type (ARRAY) column. * *

This method is used to retrieve array elements as a List with proper type conversion for * Snowflake structured types. * * @param the type of list elements * @param columnIndex the column index (1-based) * @param type the class of list elements * @return a List of elements, or null if the value was SQL NULL * @throws SQLException if the column is not a structured type or conversion fails */ List getList(int columnIndex, Class type) throws SQLException; /** * Get a map of key-value pairs from a structured type (MAP or OBJECT) column. * *

This method is used to retrieve map entries with proper type conversion for Snowflake * structured types. * * @param the type of map values * @param columnIndex the column index (1-based) * @param type the class of map values * @return a Map of String keys to typed values, or null if the value was SQL NULL * @throws SQLException if the column is not a structured type or conversion fails */ Map getMap(int columnIndex, Class type) throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/api/resultset/SnowflakeResultSetMetaData.java ================================================ package net.snowflake.client.api.resultset; import java.sql.SQLException; import java.util.List; public interface SnowflakeResultSetMetaData { String getQueryID() throws SQLException; List getColumnNames() throws SQLException; int getColumnIndex(String columnName) throws SQLException; int getInternalColumnType(int column) throws SQLException; List getColumnFields(int column) throws SQLException; /** * Get vector dimension * * @param column column index * @return vector dimension when the column is vector type or 0 when it is not vector type * @throws SQLException when cannot get column dimension */ int getVectorDimension(int column) throws SQLException; /** * Get vector dimension * * @param columnName column name * @return vector dimension when the column is vector type or 0 when it is not vector type * @throws SQLException when cannot get column dimension */ int getVectorDimension(String columnName) throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/api/resultset/SnowflakeResultSetSerializable.java ================================================ package net.snowflake.client.api.resultset; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Properties; /** * This interface defines Snowflake specific APIs to access the data wrapped in the result set * serializable object. */ public interface SnowflakeResultSetSerializable { // This wraps the required info for retrieving ResultSet class ResultSetRetrieveConfig { private Properties proxyProperties; private String sfFullURL; public ResultSetRetrieveConfig(Builder builder) { this.proxyProperties = builder.proxyProperties; this.sfFullURL = builder.sfFullURL; } public Properties getProxyProperties() { return proxyProperties; } public String getSfFullURL() { return sfFullURL; } // The inner builder class for ResultSetRetrieveConfig public static class Builder { private Builder() {} private Properties proxyProperties = null; private String sfFullURL = null; public static Builder newInstance() { return new Builder(); } public ResultSetRetrieveConfig build() throws IllegalArgumentException { // The SFURL must include protocol like https or http if (sfFullURL == null || !sfFullURL.toLowerCase().startsWith("http")) { throw new IllegalArgumentException( "The SF URL must include protocol. The invalid is: " + sfFullURL); } return new ResultSetRetrieveConfig(this); } public Builder setProxyProperties(Properties proxyProperties) { this.proxyProperties = proxyProperties; return this; } public Builder setSfFullURL(String sfFullURL) { this.sfFullURL = sfFullURL; return this; } } } /** * Get ResultSet from the ResultSet Serializable object so that the user can access the data. * * @param resultSetRetrieveConfig The extra info to retrieve the result set. * @return a ResultSet which represents for the data wrapped in the object * @throws SQLException if an error occurs */ ResultSet getResultSet(ResultSetRetrieveConfig resultSetRetrieveConfig) throws SQLException; /** * Retrieve total row count included in the ResultSet Serializable object. * * @return the total row count from metadata * @throws SQLException if an error occurs */ long getRowCount() throws SQLException; /** * Retrieve compressed data size included in the ResultSet Serializable object. * * @return the total compressed data size in bytes from metadata * @throws SQLException if an error occurs */ long getCompressedDataSizeInBytes() throws SQLException; /** * Retrieve uncompressed data size included in the ResultSet Serializable object. * * @return the total uncompressed data size in bytes from metadata * @throws SQLException if an error occurs */ long getUncompressedDataSizeInBytes() throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/api/resultset/SnowflakeType.java ================================================ package net.snowflake.client.api.resultset; /** * Enumeration of Snowflake data types. * *

This enum represents the various data types supported by Snowflake. These type values are used * in metadata operations such as {@link FieldMetadata#getBase()} to describe the underlying type of * a field in structured types (OBJECT, ARRAY, MAP). * *

Usage Example

* *
{@code
 * import net.snowflake.client.api.resultset.FieldMetadata;
 * import net.snowflake.client.api.resultset.SnowflakeType;
 *
 * // Get field metadata from a structured type column
 * FieldMetadata field = resultSetMetaData.getColumnFields(1).get(0);
 * SnowflakeType baseType = field.getBase();
 *
 * if (baseType == SnowflakeType.TEXT) {
 *     System.out.println("Field is a text type");
 * } else if (baseType == SnowflakeType.INTEGER) {
 *     System.out.println("Field is an integer type");
 * }
 * }
* * Additionally, this enum includes JDBC type code extensions for Snowflake-specific data types not * covered by {@link java.sql.Types}. * *

These constants can be used with {@link java.sql.PreparedStatement#setObject(int, Object, * int)} to specify the target SQL type when binding parameters to Snowflake-specific types. * * *

Example usage: * *

{@code
 * PreparedStatement pstmt = connection.prepareStatement("INSERT INTO t VALUES (?)");
 * pstmt.setObject(1, myTimestamp, SnowflakeType.EXTRA_TYPES_TIMESTAMP_NTZ);
 * }
* * @since 4.0.0 * @see FieldMetadata */ public enum SnowflakeType { /** Represents an ANY type (unspecified/dynamic) */ ANY, /** Represents an ARRAY type */ ARRAY, /** Represents a BINARY type */ BINARY, /** Represents a BOOLEAN type */ BOOLEAN, /** Represents a CHAR type */ CHAR, /** Represents a DATE type */ DATE, /** Represents a DECFLOAT (decimal floating-point) type */ DECFLOAT, /** Represents a FIXED-point numeric type (NUMBER with scale) */ FIXED, /** Represents an INTEGER type */ INTEGER, /** Represents an OBJECT type (structured) */ OBJECT, /** Represents a MAP type */ MAP, /** Represents a REAL (floating-point) type */ REAL, /** Represents a TEXT/VARCHAR type */ TEXT, /** Represents a TIME type */ TIME, /** Represents a TIMESTAMP type (no timezone) */ TIMESTAMP, /** Represents a TIMESTAMP with local timezone */ TIMESTAMP_LTZ, /** Represents a TIMESTAMP with no timezone */ TIMESTAMP_NTZ, /** Represents a TIMESTAMP with timezone */ TIMESTAMP_TZ, /** Represents an INTERVAL YEAR TO MONTH type */ INTERVAL_YEAR_MONTH, /** Represents an INTERVAL DAY TO TIME type */ INTERVAL_DAY_TIME, /** Represents a VARIANT (semi-structured) type */ VARIANT, /** Represents a GEOGRAPHY type */ GEOGRAPHY, /** Represents a GEOMETRY type */ GEOMETRY, /** Represents a VECTOR type */ VECTOR; public static final int EXTRA_TYPES_TIMESTAMP_LTZ = 50000; public static final int EXTRA_TYPES_TIMESTAMP_TZ = 50001; public static final int EXTRA_TYPES_TIMESTAMP_NTZ = 50002; public static final int EXTRA_TYPES_VECTOR = 50003; public static final int EXTRA_TYPES_DECFLOAT = 50004; public static final int EXTRA_TYPES_YEAR_MONTH_INTERVAL = 50005; public static final int EXTRA_TYPES_DAY_TIME_INTERVAL = 50006; } ================================================ FILE: src/main/java/net/snowflake/client/api/statement/SnowflakePreparedStatement.java ================================================ package net.snowflake.client.api.statement; import java.math.BigInteger; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Map; public interface SnowflakePreparedStatement { /** * @return the Snowflake query ID of the latest executed query * @throws SQLException if an error occurs */ String getQueryID() throws SQLException; /** * Execute a query asynchronously * * @return ResultSet containing results * @throws SQLException if an error occurs */ ResultSet executeAsyncQuery() throws SQLException; /** * Sets the designated parameter to the given BigInteger value. * * @param parameterIndex the parameter index * @param x the BigInteger value * @throws SQLException if an error occurs */ void setBigInteger(int parameterIndex, BigInteger x) throws SQLException; /** * Sets the designated parameter to the given Map instance. * * @param parameterIndex the parameter index * @param map the map instance * @param type the type * @param generic type * @throws SQLException if an error occurs */ void setMap(int parameterIndex, Map map, int type) throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/api/statement/SnowflakeStatement.java ================================================ package net.snowflake.client.api.statement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.List; /** This interface defines Snowflake specific APIs for Statement */ public interface SnowflakeStatement { /** * @return the Snowflake query ID of the latest executed query (even failed one) or null when the * last query ID is not available * @throws SQLException if an error is encountered */ String getQueryID() throws SQLException; /** * @return the Snowflake query IDs of the latest executed batch queries * @throws SQLException if an error is encountered */ List getBatchQueryIDs() throws SQLException; /** * Set statement level parameter * * @param name parameter name * @param value parameter value * @throws SQLException if an error is encountered */ void setParameter(String name, Object value) throws SQLException; /** * Set batch ID * * @param batchID the batch ID */ void setBatchID(String batchID); /** * Execute SQL query asynchronously * * @param sql sql statement * @return ResultSet * @throws SQLException if @link{#executeQueryInternal(String, Map)} throws an exception */ // Should we return AsyncResultSet here? It would have to extend ResultSet ResultSet executeAsyncQuery(String sql) throws SQLException; /** * Sets the query timeout when running an async query. * * @param seconds The number of seconds until timeout. * @throws SQLException if an error is encountered */ void setAsyncQueryTimeout(int seconds) throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/connection/SnowflakeConnectionImpl.java ================================================ package net.snowflake.client.internal.api.implementation.connection; import static net.snowflake.client.api.exception.ErrorCode.FEATURE_UNSUPPORTED; import static net.snowflake.client.api.exception.ErrorCode.INVALID_CONNECT_STRING; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.recordIfExternal; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; import java.io.InputStream; import java.sql.Array; import java.sql.Blob; import java.sql.CallableStatement; import java.sql.ClientInfoStatus; import java.sql.Clob; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.DriverPropertyInfo; import java.sql.JDBCType; import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLClientInfoException; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.SQLWarning; import java.sql.SQLXML; import java.sql.Savepoint; import java.sql.Statement; import java.sql.Struct; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; import java.util.zip.GZIPInputStream; import net.snowflake.client.api.connection.DownloadStreamConfig; import net.snowflake.client.api.connection.SnowflakeConnection; import net.snowflake.client.api.connection.UploadStreamConfig; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.QueryStatus; import net.snowflake.client.internal.api.implementation.metadata.SnowflakeDatabaseMetaDataImpl; import net.snowflake.client.internal.api.implementation.statement.SnowflakeCallableStatementImpl; import net.snowflake.client.internal.api.implementation.statement.SnowflakePreparedStatementImpl; import net.snowflake.client.internal.api.implementation.statement.SnowflakeStatementImpl; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.core.SfSqlArray; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.DefaultSFConnectionHandler; import net.snowflake.client.internal.jdbc.SFBaseFileTransferAgent; import net.snowflake.client.internal.jdbc.SFConnectionHandler; import net.snowflake.client.internal.jdbc.SnowflakeClob; import net.snowflake.client.internal.jdbc.SnowflakeConnectString; import net.snowflake.client.internal.jdbc.SnowflakeLoggedFeatureNotSupportedException; import net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.InternalCallMarker; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.log.SFLoggerUtil; import net.snowflake.client.internal.util.Stopwatch; import net.snowflake.common.core.SqlState; /** Snowflake connection implementation */ public class SnowflakeConnectionImpl implements Connection, SnowflakeConnection { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeConnectionImpl.class); static { SFLoggerUtil.initializeSnowflakeLogger(); } /** Refer to all created and open statements from this connection */ private final Set openStatements = ConcurrentHashMap.newKeySet(); // Injected delay for the purpose of connection timeout testing // Any statement execution will sleep for the specified number of milliseconds private final AtomicInteger _injectedDelay = new AtomicInteger(0); private boolean isClosed; private SQLWarning sqlWarnings = null; private List missingProperties = null; /** * Amount of milliseconds a user is willing to tolerate for network related issues (e.g. HTTP * 503/504) or database transient issues (e.g. GS not responding) * *

A value of 0 means no timeout * *

Default: 300 seconds */ private int networkTimeoutInMilli = 0; // in milliseconds /* this should be set to Connection.TRANSACTION_READ_COMMITTED * There may not be many implications here since the call to * setTransactionIsolation doesn't do anything. */ private int transactionIsolation = Connection.TRANSACTION_NONE; private SFBaseSession sfSession; /** The SnowflakeConnectionImpl that provides the underlying physical-layer implementation */ private SFConnectionHandler sfConnectionHandler; private boolean showStatementParameters; private ObjectMapper objectMapper; /** * Instantiates a SnowflakeConnectionImpl with the passed-in SnowflakeConnectionImpl. * * @param sfConnectionHandler The SnowflakeConnectionImpl. * @throws SQLException if failed to instantiate a SnowflakeConnectionImpl. */ public SnowflakeConnectionImpl(SFConnectionHandler sfConnectionHandler) throws SQLException { initConnectionWithImpl(sfConnectionHandler, null, null); } /** * Instantiates a SnowflakeConnectionImpl with the passed-in SnowflakeConnectionImpl. * * @param sfConnectionHandler The SnowflakeConnectionImpl. * @param url The URL string. * @param info Connection properties. * @throws SQLException if failed to instantiate connection. */ public SnowflakeConnectionImpl( SFConnectionHandler sfConnectionHandler, String url, Properties info) throws SQLException { initConnectionWithImpl(sfConnectionHandler, url, info); } /** * A connection will establish a session token from snowflake * * @param url server url used to create snowflake connection * @param info properties about the snowflake connection * @throws SQLException if failed to create a snowflake connection i.e. username or password not * specified */ public SnowflakeConnectionImpl(String url, Properties info) throws SQLException { SnowflakeConnectString conStr = SnowflakeConnectString.parse(url, info); if (!conStr.isValid()) { throw new SnowflakeSQLException(INVALID_CONNECT_STRING, url); } initConnectionWithImpl(new DefaultSFConnectionHandler(conStr), url, info); appendWarnings(sfSession.getSqlWarnings()); isClosed = false; } public SnowflakeConnectionImpl(String url, Properties info, boolean fakeConnection) throws SQLException { SnowflakeConnectString conStr = SnowflakeConnectString.parse(url, info); if (!conStr.isValid()) { throw new SnowflakeSQLException( SqlState.CONNECTION_EXCEPTION, INVALID_CONNECT_STRING.getMessageCode(), url); } initConnectionWithImpl(new DefaultSFConnectionHandler(conStr, true), url, info); isClosed = false; } private void initConnectionWithImpl( SFConnectionHandler sfConnectionHandler, String url, Properties info) throws SQLException { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); logger.debug("Initializing new connection"); this.sfConnectionHandler = sfConnectionHandler; sfConnectionHandler.initializeConnection(url, info); this.sfSession = sfConnectionHandler.getSFSession(); this.objectMapper = ObjectMapperFactory.getObjectMapperForSession(sfSession); missingProperties = sfSession.checkProperties(); this.showStatementParameters = sfSession.getPreparedStatementLogging(); stopwatch.stop(); logger.debug( "Connection initialized successfully in {} ms. Session id: {}", stopwatch.elapsedMillis(), sfSession.getSessionId()); } public List returnMissingProperties() { return missingProperties; } private void raiseSQLExceptionIfConnectionIsClosed() throws SQLException { if (isClosed) { throw new SnowflakeSQLException(ErrorCode.CONNECTION_CLOSED); } } /** * Execute a statement where the result isn't needed, and the statement is closed before this * method returns * * @param stmtText text of the statement * @throws SQLException exception thrown it the statement fails to execute */ private void executeImmediate(String stmtText) throws SQLException { // execute the statement and auto-close it as well try (final Statement statement = this.createStatement()) { statement.execute(stmtText); } } /** * Create a statement * * @return statement statement object * @throws SQLException if failed to create a snowflake statement */ @Override public Statement createStatement() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); Statement stmt = createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); openStatements.add(stmt); return stmt; } /** * Get an instance of a ResultSet object * * @param queryID the query ID * @return ResultSet * @throws SQLException if connection is closed */ public ResultSet createResultSet(String queryID) throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); ResultSet rs = sfConnectionHandler.createResultSet(queryID, createStatement()); return rs; } /** * Return an array of child query ID for the given query ID. * *

If the given query ID is for a multiple statements query, it returns an array of its child * statements, otherwise, it returns an array to include the given query ID. * * @param queryID The given query ID * @return An array of child query IDs * @throws SQLException If the query is running or the corresponding query is FAILED. */ @Override public String[] getChildQueryIds(String queryID) throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); // execute the statement and auto-close it as well try (final Statement statement = this.createStatement()) { return statement.unwrap(SnowflakeStatementImpl.class).getChildQueryIds(queryID); } } /** * Close the connection * * @throws SQLException failed to close the connection */ @Override public void close() throws SQLException { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); String sessionId = null; if (sfSession != null) { sessionId = sfSession.getSessionId(); logger.debug("Closing connection with session id: {}", sessionId); } else { logger.debug("Closing connection without associated session"); } if (isClosed) { logger.debug("Connection is already closed"); // No exception is raised even if the connection is closed. return; } isClosed = true; try { if (sfSession != null && sfSession.isSafeToClose()) { sfSession.close(internalCallMarker()); sfSession = null; } // make sure to close all created statements if (!openStatements.isEmpty()) { logger.debug("Closing {} opened statements", openStatements.size()); } for (Statement stmt : openStatements) { if (stmt != null && !stmt.isClosed()) { if (stmt.isWrapperFor(SnowflakeStatementImpl.class)) { stmt.unwrap(SnowflakeStatementImpl.class).close(false); } else { stmt.close(); } } } if (!openStatements.isEmpty()) { logger.debug("Statements closed successfully"); } openStatements.clear(); } catch (SFException ex) { throw new SnowflakeSQLLoggedException( sfSession, ex.getSqlState(), ex.getVendorCode(), ex.getCause(), ex.getParams()); } stopwatch.stop(); logger.debug( "Connection with session id: {} closed successfully in {} ms", sessionId, stopwatch.elapsedMillis()); } @Override public String getSessionID() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getSessionId(); } @Override public QueryStatus getQueryStatus(String queryID) throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getQueryStatus(queryID); } @Override public String getRole() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getRole(); } @Override public String getWarehouse() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getWarehouse(); } @Override public String getDatabase() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getDatabase(); } @Override public boolean isClosed() throws SQLException { logger.trace("boolean isClosed()", false); return isClosed; } /** * Return the database metadata * * @return Database metadata * @throws SQLException if any database error occurs */ @Override public DatabaseMetaData getMetaData() throws SQLException { logger.trace("DatabaseMetaData getMetaData()", false); raiseSQLExceptionIfConnectionIsClosed(); return new SnowflakeDatabaseMetaDataImpl(this); } @Override public CallableStatement prepareCall(String sql) throws SQLException { logger.trace("CallableStatement prepareCall(String sql)", false); raiseSQLExceptionIfConnectionIsClosed(); CallableStatement stmt = prepareCall(sql, false); openStatements.add(stmt); return stmt; } public CallableStatement prepareCall(String sql, boolean skipParsing) throws SQLException { logger.trace("CallableStatement prepareCall(String sql, boolean skipParsing)", false); raiseSQLExceptionIfConnectionIsClosed(); CallableStatement stmt = new SnowflakeCallableStatementImpl( this, sql, skipParsing, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); openStatements.add(stmt); return stmt; } @Override public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { logger.trace( "CallableStatement prepareCall(String sql," + " int resultSetType,int resultSetConcurrency", false); CallableStatement stmt = prepareCall(sql, resultSetType, resultSetConcurrency, ResultSet.CLOSE_CURSORS_AT_COMMIT); openStatements.add(stmt); return stmt; } @Override public CallableStatement prepareCall( String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { logger.trace("CallableStatement prepareCall(String sql, int " + "resultSetType,", false); CallableStatement stmt = new SnowflakeCallableStatementImpl( this, sql, false, resultSetType, resultSetConcurrency, resultSetHoldability); openStatements.add(stmt); return stmt; } @Override public String nativeSQL(String sql) throws SQLException { logger.trace("String nativeSQL(String sql)", false); raiseSQLExceptionIfConnectionIsClosed(); return sql; } @Override public boolean getAutoCommit() throws SQLException { logger.trace("boolean getAutoCommit()", false); raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getAutoCommit(); } @Override public void setAutoCommit(boolean isAutoCommit) throws SQLException { logger.trace("void setAutoCommit(boolean isAutoCommit)", false); boolean currentAutoCommit = this.getAutoCommit(); if (isAutoCommit != currentAutoCommit) { sfSession.setAutoCommit(isAutoCommit); this.executeImmediate( "alter session /* JDBC:SnowflakeConnectionImpl.setAutoCommit*/ set autocommit=" + isAutoCommit); } } @Override public void commit() throws SQLException { logger.trace("void commit()", false); this.executeImmediate("commit"); } @Override public void rollback() throws SQLException { logger.trace("void rollback()", false); this.executeImmediate("rollback"); } @Override public void rollback(Savepoint savepoint) throws SQLException { logger.trace("void rollback(Savepoint savepoint)", false); throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public boolean isReadOnly() throws SQLException { logger.trace("boolean isReadOnly()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public void setReadOnly(boolean readOnly) throws SQLException { logger.trace("void setReadOnly(boolean readOnly)", false); raiseSQLExceptionIfConnectionIsClosed(); if (readOnly) { logger.debug("setReadOnly not supported.", false); } } @Override public String getCatalog() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getDatabase(); } @Override public void setCatalog(String catalog) throws SQLException { logger.trace("void setCatalog(String catalog)", false); // switch db by running "use db" this.executeImmediate("use database \"" + catalog + "\""); } @Override public int getTransactionIsolation() throws SQLException { logger.trace("int getTransactionIsolation()", false); raiseSQLExceptionIfConnectionIsClosed(); return this.transactionIsolation; } /** * Sets the transaction isolation level. * * @param level transaction level: TRANSACTION_NONE or TRANSACTION_READ_COMMITTED * @throws SQLException if any SQL error occurs */ @Override public void setTransactionIsolation(int level) throws SQLException { logger.trace("void setTransactionIsolation(int level), level = {}", level); raiseSQLExceptionIfConnectionIsClosed(); if (level == Connection.TRANSACTION_NONE || level == Connection.TRANSACTION_READ_COMMITTED) { this.transactionIsolation = level; } else { throw new SQLFeatureNotSupportedException( "Transaction Isolation " + level + " not supported.", FEATURE_UNSUPPORTED.getSqlState(), FEATURE_UNSUPPORTED.getMessageCode()); } } @Override public SQLWarning getWarnings() throws SQLException { logger.trace("SQLWarning getWarnings()", false); raiseSQLExceptionIfConnectionIsClosed(); return sqlWarnings; } @Override public void clearWarnings() throws SQLException { logger.trace("void clearWarnings()", false); raiseSQLExceptionIfConnectionIsClosed(); sfSession.clearSqlWarnings(); sqlWarnings = null; } @Override public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { logger.trace( "Statement createStatement(int resultSetType, " + "int resultSetConcurrency)", false); Statement stmt = createStatement(resultSetType, resultSetConcurrency, ResultSet.CLOSE_CURSORS_AT_COMMIT); openStatements.add(stmt); return stmt; } @Override public Statement createStatement( int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { logger.trace( "Statement createStatement(int resultSetType, " + "int resultSetConcurrency, int resultSetHoldability", false); Statement stmt = new SnowflakeStatementImpl(this, resultSetType, resultSetConcurrency, resultSetHoldability); openStatements.add(stmt); return stmt; } @Override public PreparedStatement prepareStatement(String sql) throws SQLException { logger.trace("PreparedStatement prepareStatement(String sql)", false); raiseSQLExceptionIfConnectionIsClosed(); PreparedStatement stmt = prepareStatement(sql, false); openStatements.add(stmt); return stmt; } @Override public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { logger.trace( "PreparedStatement prepareStatement(String sql, " + "int autoGeneratedKeys)", false); if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { return prepareStatement(sql); } throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { logger.trace("PreparedStatement prepareStatement(String sql, " + "int[] columnIndexes)", false); throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { logger.trace( "PreparedStatement prepareStatement(String sql, " + "String[] columnNames)", false); throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { logger.trace("PreparedStatement prepareStatement(String sql, " + "int resultSetType,", false); PreparedStatement stmt = prepareStatement( sql, resultSetType, resultSetConcurrency, ResultSet.CLOSE_CURSORS_AT_COMMIT); openStatements.add(stmt); return stmt; } @Override public PreparedStatement prepareStatement( String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { logger.trace("PreparedStatement prepareStatement(String sql, " + "int resultSetType,", false); PreparedStatement stmt = new SnowflakePreparedStatementImpl( this, sql, false, resultSetType, resultSetConcurrency, resultSetHoldability); openStatements.add(stmt); return stmt; } public PreparedStatement prepareStatement(String sql, boolean skipParsing) throws SQLException { logger.trace("PreparedStatement prepareStatement(String sql, boolean skipParsing)", false); raiseSQLExceptionIfConnectionIsClosed(); PreparedStatement stmt = new SnowflakePreparedStatementImpl( this, sql, skipParsing, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); openStatements.add(stmt); return stmt; } @Override public Map> getTypeMap() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return Collections.emptyMap(); // nop } @Override public void setTypeMap(Map> map) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public int getHoldability() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return ResultSet.CLOSE_CURSORS_AT_COMMIT; // nop } @Override public void setHoldability(int holdability) throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); if ((holdability != ResultSet.CLOSE_CURSORS_AT_COMMIT && holdability != ResultSet.HOLD_CURSORS_OVER_COMMIT)) { throw new SQLException("The given parameter is not a ResultSet holdability constant."); } // HOLD_CURSORS_OVER_COMMIT holdability is currently not supported. // no-op if the holdability is CLOSE_CURSORS_AT_COMMIT if (holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT) { throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } } @Override public Savepoint setSavepoint() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public Savepoint setSavepoint(String name) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public void releaseSavepoint(Savepoint savepoint) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public Blob createBlob() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public Clob createClob() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return new SnowflakeClob(); } @Override public NClob createNClob() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public SQLXML createSQLXML() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public boolean isValid(int timeout) throws SQLException { if (timeout < 0) { throw new SQLException("timeout is less than 0"); } else if (isClosed) { return false; } else { try { sfSession.callHeartBeat(timeout); } catch (SFException | Exception ex) { return false; } return true; } } @Override public void setClientInfo(String name, String value) throws SQLClientInfoException { Map failedProps = new HashMap<>(); failedProps.put(name, ClientInfoStatus.REASON_UNKNOWN_PROPERTY); raiseSetClientInfoException(failedProps); } private void raiseSetClientInfoException(Map failedProps) throws SQLClientInfoException { if (isClosed) { throw new SQLClientInfoException( "The connection is not opened.", ErrorCode.CONNECTION_CLOSED.getSqlState(), ErrorCode.CONNECTION_CLOSED.getMessageCode(), failedProps); } throw new SQLClientInfoException( "The client property cannot be set by setClientInfo.", ErrorCode.INVALID_PARAMETER_VALUE.getSqlState(), ErrorCode.INVALID_PARAMETER_VALUE.getMessageCode(), failedProps); } @Override public Properties getClientInfo() throws SQLException { logger.trace("Properties getClientInfo()", false); raiseSQLExceptionIfConnectionIsClosed(); // sfSession must not be null if the connection is not closed. return sfSession.getClientInfo(); } @Override public void setClientInfo(Properties properties) throws SQLClientInfoException { Map failedProps = new HashMap<>(); Enumeration propList = properties.propertyNames(); while (propList.hasMoreElements()) { String name = (String) propList.nextElement(); failedProps.put(name, ClientInfoStatus.REASON_UNKNOWN_PROPERTY); } raiseSetClientInfoException(failedProps); } @Override public String getClientInfo(String name) throws SQLException { logger.trace("String getClientInfo(String name)", false); raiseSQLExceptionIfConnectionIsClosed(); // sfSession must not be null if the connection is not closed. return sfSession.getClientInfo(name); } @Override public Array createArrayOf(String typeName, Object[] elements) throws SQLException { logger.trace("Array createArrayOf(String typeName, Object[] " + "elements)", false); return new SfSqlArray( JDBCType.valueOf(typeName.toUpperCase()).getVendorTypeNumber(), elements, sfSession, objectMapper); } @Override public Struct createStruct(String typeName, Object[] attributes) throws SQLException { logger.trace("Struct createStruct(String typeName, Object[] " + "attributes)", false); throw new SnowflakeLoggedFeatureNotSupportedException(sfSession); } @Override public String getSchema() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getSchema(); } @Override public void setSchema(String schema) throws SQLException { logger.trace("void setSchema(String schema)", false); String databaseName = getCatalog(); // switch schema by running "use db.schema" if (databaseName == null) { this.executeImmediate("use schema \"" + schema + "\""); } else { this.executeImmediate("use schema \"" + databaseName + "\".\"" + schema + "\""); } } @Override public void abort(Executor executor) throws SQLException { logger.trace("void abort(Executor executor)", false); close(); } @Override public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { logger.trace("void setNetworkTimeout(Executor executor, int " + "milliseconds)", false); raiseSQLExceptionIfConnectionIsClosed(); networkTimeoutInMilli = milliseconds; } @Override public int getNetworkTimeout() throws SQLException { logger.trace("int getNetworkTimeout()", false); raiseSQLExceptionIfConnectionIsClosed(); return networkTimeoutInMilli; } @Override public boolean isWrapperFor(Class iface) throws SQLException { logger.trace("boolean isWrapperFor(Class iface)", false); return iface.isInstance(this); } @SuppressWarnings("unchecked") @Override public T unwrap(Class iface) throws SQLException { logger.trace(" T unwrap(Class iface)", false); if (!iface.isInstance(this)) { throw new SQLException( this.getClass().getName() + " not unwrappable from " + iface.getName()); } return (T) this; } @Override public int getDatabaseMajorVersion() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getDatabaseMajorVersion(); } @Override public int getDatabaseMinorVersion() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getDatabaseMinorVersion(); } @Override public String getDatabaseVersion() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); return sfSession.getDatabaseVersion(); } public SFConnectionHandler getHandler() { return getHandler(null); } public SFConnectionHandler getHandler(InternalCallMarker internalCallMarker) { recordIfExternal("SnowflakeConnectionImpl", "getHandler", internalCallMarker); return sfConnectionHandler; } @Override public void uploadStream(String stageName, String destFileName, InputStream inputStream) throws SQLException { uploadStream(stageName, destFileName, inputStream, UploadStreamConfig.builder().build()); } @Override public void uploadStream( String stageName, String destFileName, InputStream inputStream, UploadStreamConfig config) throws SQLException { if (config == null) { throw new IllegalArgumentException("UploadStreamConfig cannot be null"); } uploadStreamInternal( stageName, config.getDestPrefix(), inputStream, destFileName, config.isCompressData()); } /** * Method to compress data from a stream and upload it at a stage location. The data will be * uploaded as one file. No splitting is done in this method. * *

caller is responsible for releasing the inputStream after the method is called. * *

This method is deprecated * * @param stageName stage name: e.g. ~ or table name or stage name * @param destPrefix path prefix under which the data should be uploaded on the stage * @param inputStream input stream from which the data will be uploaded * @param destFileName destination file name to use * @throws SQLException failed to compress and put data from a stream at stage */ @Deprecated public void compressAndUploadStream( String stageName, String destPrefix, InputStream inputStream, String destFileName) throws SQLException { uploadStreamInternal(stageName, destPrefix, inputStream, destFileName, true); } /** * Method to put data from a stream at a stage location. The data will be uploaded as one file. No * splitting is done in this method. * *

Stream size must match the total size of data in the input stream unless compressData * parameter is set to true. * *

caller is responsible for passing the correct size for the data in the stream and releasing * the inputStream after the method is called. * * @param stageName stage name: e.g. ~ or table name or stage name * @param destPrefix path prefix under which the data should be uploaded on the stage * @param inputStream input stream from which the data will be uploaded * @param destFileName destination file name to use * @param compressData whether compression is requested fore uploading data * @throws SQLException raises if any error occurs */ private void uploadStreamInternal( String stageName, String destPrefix, InputStream inputStream, String destFileName, boolean compressData) throws SQLException { logger.debug( "Upload data from stream: stageName={}" + ", destPrefix={}, destFileName={}", stageName, destPrefix, destFileName); if (stageName == null) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "stage name is null"); } if (destFileName == null) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "stage name is null"); } SnowflakeStatementImpl stmt = this.createStatement().unwrap(SnowflakeStatementImpl.class); StringBuilder destStage = new StringBuilder(); // add stage name if (!(stageName.startsWith("@") || stageName.startsWith("'@") || stageName.startsWith("$$@"))) { destStage.append("@"); } destStage.append(stageName); // add dest prefix if (destPrefix != null) { if (!destPrefix.startsWith("/")) { destStage.append("/"); } destStage.append(destPrefix); } StringBuilder putCommand = new StringBuilder(); // use a placeholder for source file putCommand.append("put file:///tmp/placeholder "); putCommand.append(destStage.toString()); putCommand.append(" overwrite=true"); SFBaseFileTransferAgent transferAgent = sfConnectionHandler.getFileTransferAgent(putCommand.toString(), stmt.getSFBaseStatement()); transferAgent.setDestStagePath(destStage.toString()); transferAgent.setSourceStream(inputStream); transferAgent.setDestFileNameForStreamSource(destFileName); transferAgent.setCompressSourceFromStream(compressData); transferAgent.execute(); stmt.close(); } @Override public InputStream downloadStream(String stageName, String sourceFileName) throws SQLException { return downloadStream(stageName, sourceFileName, DownloadStreamConfig.builder().build()); } @Override public InputStream downloadStream( String stageName, String sourceFileName, DownloadStreamConfig config) throws SQLException { if (config == null) { throw new IllegalArgumentException("DownloadStreamConfig cannot be null"); } boolean decompress = config.isDecompress(); logger.debug( "Download data to stream: stageName={}" + ", sourceFileName={}", stageName, sourceFileName); if (isNullOrEmpty(stageName)) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "stage name is null or empty"); } if (isNullOrEmpty(sourceFileName)) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "source file name is null or empty"); } SnowflakeStatementImpl stmt = new SnowflakeStatementImpl( this, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); StringBuilder getCommand = new StringBuilder(); getCommand.append("get "); if (!stageName.startsWith("@")) { getCommand.append("@"); } getCommand.append(stageName); getCommand.append("/"); if (sourceFileName.startsWith("/")) { sourceFileName = sourceFileName.substring(1); } getCommand.append(sourceFileName); // special characters and spaces require single quotes around stage name. boolean isSpecialChar = !sourceFileName.matches("^[a-zA-Z0-9_/.]*$"); if (isSpecialChar) { getCommand.insert(getCommand.indexOf("@"), "'"); getCommand.append("'"); } // this is a fake path, used to form Get query and retrieve stage info, // no file will be downloaded to this location getCommand.append(" file:///tmp/ /*jdbc download stream*/"); SFBaseFileTransferAgent transferAgent = sfConnectionHandler.getFileTransferAgent(getCommand.toString(), stmt.getSFBaseStatement()); InputStream stream = transferAgent.downloadStream(sourceFileName); if (decompress) { try { return new GZIPInputStream(stream); } catch (IOException ex) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, ex.getMessage()); } } else { return stream; } } public void setInjectedDelay(int delay) throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); sfSession.setInjectedDelay(delay); } public void injectedDelay() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); int d = _injectedDelay.get(); if (d != 0) { _injectedDelay.set(0); try { logger.trace("delayed for {}", d); Thread.sleep(d); } catch (InterruptedException ex) { } } } public void setInjectFileUploadFailure(String fileToFail) throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); sfSession.setInjectFileUploadFailure(fileToFail); } public SFBaseSession getSFBaseSession() { return getSFBaseSession(null); } public SFBaseSession getSFBaseSession(InternalCallMarker internalCallMarker) { recordIfExternal("SnowflakeConnectionImpl", "getSFBaseSession", internalCallMarker); return sfSession; } // Convenience method to return an SFSession-typed SFBaseSession object, but // performs the type-checking as necessary. public SFSession getSfSession() throws SnowflakeSQLException { return getSfSession(null); } public SFSession getSfSession(InternalCallMarker internalCallMarker) throws SnowflakeSQLException { recordIfExternal("SnowflakeConnectionImpl", "getSfSession", internalCallMarker); if (sfSession instanceof SFSession) { return (SFSession) sfSession; } throw new SnowflakeSQLException("getSFSession() called with a different SFBaseSession type."); } private void appendWarning(SQLWarning w) { if (sqlWarnings == null) { sqlWarnings = w; } else { sqlWarnings.setNextWarning(w); } } private void appendWarnings(List warnings) { for (SFException e : warnings) { appendWarning(new SQLWarning(e.getMessage(), e.getSqlState(), e.getVendorCode())); } } public boolean getShowStatementParameters() { return showStatementParameters; } public void removeClosedStatement(Statement stmt) { openStatements.remove(stmt); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/datasource/SnowflakeBasicDataSource.java ================================================ package net.snowflake.client.internal.api.implementation.datasource; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.PrintWriter; import java.io.Serializable; import java.security.PrivateKey; import java.sql.Connection; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.util.List; import java.util.Properties; import java.util.logging.Logger; import net.snowflake.client.api.datasource.SnowflakeDataSource; import net.snowflake.client.api.driver.SnowflakeDriver; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Basic implementation of {@link SnowflakeDataSource} for Snowflake JDBC connections. * *

This class provides a simple, non-pooled DataSource implementation that creates new Snowflake * connections on demand. It is suitable for applications that do not require connection pooling or * for use with external connection pool managers. * *

Note: This class is not intended for direct instantiation. Use {@link * net.snowflake.client.api.datasource.SnowflakeDataSourceFactory#createDataSource()} instead. */ public class SnowflakeBasicDataSource implements SnowflakeDataSource, Serializable { private static final long serialVersionUID = 1L; private static final String AUTHENTICATOR_SNOWFLAKE_JWT = "SNOWFLAKE_JWT"; private static final String AUTHENTICATOR_OAUTH = "OAUTH"; private static final String AUTHENTICATOR_EXTERNAL_BROWSER = "EXTERNALBROWSER"; private static final String AUTHENTICATOR_USERNAME_PASSWORD_MFA = "USERNAME_PASSWORD_MFA"; private String url; private String serverName; private String user; private String password; private int portNumber = 0; private String authenticator; private Properties properties = new Properties(); private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeBasicDataSource.class); static { try { Class.forName("net.snowflake.client.api.driver.SnowflakeDriver"); } catch (ClassNotFoundException e) { throw new IllegalStateException( "Unable to load " + "net.snowflake.client.api.driver.SnowflakeDriver. " + "Please check if you have proper Snowflake JDBC " + "Driver jar on the classpath", e); } } private void writeObjectHelper(ObjectOutputStream out) throws IOException { out.writeObject(url); out.writeObject(serverName); out.writeObject(user); out.writeObject(password); out.writeObject(portNumber); out.writeObject(authenticator); out.writeObject(properties); } private void readObjectHelper(ObjectInputStream in) throws IOException, ClassNotFoundException { url = (String) in.readObject(); serverName = (String) in.readObject(); user = (String) in.readObject(); password = (String) in.readObject(); portNumber = (int) in.readObject(); authenticator = (String) in.readObject(); properties = (Properties) in.readObject(); } private void writeObject(ObjectOutputStream out) throws IOException { writeObjectHelper(out); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { readObjectHelper(in); } @Override public Connection getConnection() throws SQLException { return getConnection(user, password); } @Override public Connection getConnection(String username, String password) throws SQLException { if (!AUTHENTICATOR_OAUTH.equalsIgnoreCase( authenticator)) { // For OAuth, no username is required if (username == null) { throw new SnowflakeSQLException( "Cannot create connection because username is missing in DataSource properties."); } properties.put(SFSessionProperty.USER.getPropertyKey(), username); } // The driver needs password for OAUTH as part of SNOW-533673 feature request. if (!AUTHENTICATOR_SNOWFLAKE_JWT.equalsIgnoreCase(authenticator) && !AUTHENTICATOR_EXTERNAL_BROWSER.equalsIgnoreCase(authenticator)) { if (password == null) { throw new SnowflakeSQLException( "Cannot create connection because password is missing in DataSource properties."); } properties.put(SFSessionProperty.PASSWORD.getPropertyKey(), password); } try { Connection con = SnowflakeDriver.INSTANCE.connect(getUrl(), properties); logger.trace("Created a connection for {} at {}", user, (ArgSupplier) this::getUrl); return con; } catch (SQLException e) { logger.error("Failed to create a connection for {} at {}: {}", user, getUrl(), e); throw e; } } @Override public PrintWriter getLogWriter() throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public void setLogWriter(PrintWriter out) throws SQLException { throw new SQLFeatureNotSupportedException(); } @Override public int getLoginTimeout() { try { return Integer.parseInt( properties.getProperty(SFSessionProperty.LOGIN_TIMEOUT.getPropertyKey())); } catch (NumberFormatException e) { return 0; } } @Override public void setLoginTimeout(int seconds) throws SQLException { properties.put(SFSessionProperty.LOGIN_TIMEOUT.getPropertyKey(), Integer.toString(seconds)); } @Override public Logger getParentLogger() throws SQLFeatureNotSupportedException { throw new SQLFeatureNotSupportedException(); } @Override public boolean isWrapperFor(Class iface) { return false; } @Override public T unwrap(Class iface) { return null; } @Override public void setUrl(String url) { this.url = url; } @Override public void setDatabaseName(String databaseName) { properties.put(SFSessionProperty.DATABASE.getPropertyKey(), databaseName); } @Override public void setSchema(String schema) { properties.put(SFSessionProperty.SCHEMA.getPropertyKey(), schema); } @Override public void setWarehouse(String warehouse) { properties.put(SFSessionProperty.WAREHOUSE.getPropertyKey(), warehouse); } @Override public void setRole(String role) { properties.put(SFSessionProperty.ROLE.getPropertyKey(), role); } @Override public void setUser(String user) { this.user = user; } @Override public void setServerName(String serverName) { this.serverName = serverName; } @Override public void setPassword(String password) { this.password = password; } @Override public void setPortNumber(int portNumber) { this.portNumber = portNumber; } @Override public void setAccount(String account) { this.properties.put(SFSessionProperty.ACCOUNT.getPropertyKey(), account); } @Override public void setSsl(boolean ssl) { this.properties.put("ssl", String.valueOf(ssl)); } @Override public void setAuthenticator(String authenticator) { this.authenticator = authenticator; this.properties.put(SFSessionProperty.AUTHENTICATOR.getPropertyKey(), authenticator); } @Override public void setToken(String token) { this.properties.put(SFSessionProperty.TOKEN.getPropertyKey(), token); } @Override public String getUrl() { if (url != null) { return url; } else { // generate url; StringBuilder url = new StringBuilder(100); url.append("jdbc:snowflake://"); url.append(serverName); if (portNumber != 0) { url.append(":").append(portNumber); } return url.toString(); } } @Override public void setPrivateKey(PrivateKey privateKey) { this.setAuthenticator(AUTHENTICATOR_SNOWFLAKE_JWT); this.properties.put(SFSessionProperty.PRIVATE_KEY.getPropertyKey(), privateKey); } @Override public void setPrivateKeyFile(String location, String password) { this.setAuthenticator(AUTHENTICATOR_SNOWFLAKE_JWT); this.properties.put(SFSessionProperty.PRIVATE_KEY_FILE.getPropertyKey(), location); if (!isNullOrEmpty(password)) { this.properties.put(SFSessionProperty.PRIVATE_KEY_PWD.getPropertyKey(), password); } } @Override public void setPrivateKeyBase64(String privateKeyBase64, String password) { this.setAuthenticator(AUTHENTICATOR_SNOWFLAKE_JWT); this.properties.put(SFSessionProperty.PRIVATE_KEY_BASE64.getPropertyKey(), privateKeyBase64); if (!isNullOrEmpty(password)) { this.properties.put(SFSessionProperty.PRIVATE_KEY_PWD.getPropertyKey(), password); } } @Override public void setTracing(String tracing) { this.properties.put(SFSessionProperty.TRACING.getPropertyKey(), tracing); } @Override public Properties getProperties() { return this.properties; } @Override public void setAllowUnderscoresInHost(boolean allowUnderscoresInHost) { this.properties.put( SFSessionProperty.ALLOW_UNDERSCORES_IN_HOST.getPropertyKey(), String.valueOf(allowUnderscoresInHost)); } @Override public void setDisableGcsDefaultCredentials(boolean isGcsDefaultCredentialsDisabled) { this.properties.put( SFSessionProperty.DISABLE_GCS_DEFAULT_CREDENTIALS.getPropertyKey(), String.valueOf(isGcsDefaultCredentialsDisabled)); } @Override public void setDisableSamlURLCheck(boolean disableSamlURLCheck) { this.properties.put( SFSessionProperty.DISABLE_SAML_URL_CHECK.getPropertyKey(), String.valueOf(disableSamlURLCheck)); } @Override public void setPasscode(String passcode) { this.setAuthenticator(AUTHENTICATOR_USERNAME_PASSWORD_MFA); this.properties.put(SFSessionProperty.PASSCODE.getPropertyKey(), passcode); } @Override public void setPasscodeInPassword(boolean isPasscodeInPassword) { this.properties.put( SFSessionProperty.PASSCODE_IN_PASSWORD.getPropertyKey(), String.valueOf(isPasscodeInPassword)); if (isPasscodeInPassword) { this.setAuthenticator(AUTHENTICATOR_USERNAME_PASSWORD_MFA); } } @Override public void setDisableSocksProxy(boolean ignoreJvmSocksProxy) { this.properties.put( SFSessionProperty.DISABLE_SOCKS_PROXY.getPropertyKey(), String.valueOf(ignoreJvmSocksProxy)); } @Override public void setNonProxyHosts(String nonProxyHosts) { this.properties.put(SFSessionProperty.NON_PROXY_HOSTS.getPropertyKey(), nonProxyHosts); } @Override public void setProxyHost(String proxyHost) { this.properties.put(SFSessionProperty.PROXY_HOST.getPropertyKey(), proxyHost); } @Override public void setProxyPassword(String proxyPassword) { this.properties.put(SFSessionProperty.PROXY_PASSWORD.getPropertyKey(), proxyPassword); } @Override public void setProxyPort(int proxyPort) { this.properties.put(SFSessionProperty.PROXY_PORT.getPropertyKey(), Integer.toString(proxyPort)); } @Override public void setProxyProtocol(String proxyProtocol) { this.properties.put(SFSessionProperty.PROXY_PROTOCOL.getPropertyKey(), proxyProtocol); } @Override public void setProxyUser(String proxyUser) { this.properties.put(SFSessionProperty.PROXY_USER.getPropertyKey(), proxyUser); } @Override public void setUseProxy(boolean useProxy) { this.properties.put(SFSessionProperty.USE_PROXY.getPropertyKey(), String.valueOf(useProxy)); } @Override public void setNetworkTimeout(int networkTimeoutSeconds) { this.properties.put( SFSessionProperty.NETWORK_TIMEOUT.getPropertyKey(), Integer.toString(networkTimeoutSeconds)); } @Override public void setQueryTimeout(int queryTimeoutSeconds) { this.properties.put( SFSessionProperty.QUERY_TIMEOUT.getPropertyKey(), Integer.toString(queryTimeoutSeconds)); } @Override public void setApplication(String application) { this.properties.put(SFSessionProperty.APPLICATION.getPropertyKey(), application); } @Override public void setClientConfigFile(String clientConfigFile) { this.properties.put(SFSessionProperty.CLIENT_CONFIG_FILE.getPropertyKey(), clientConfigFile); } @Override public void setEnablePatternSearch(boolean enablePatternSearch) { this.properties.put( SFSessionProperty.ENABLE_PATTERN_SEARCH.getPropertyKey(), String.valueOf(enablePatternSearch)); } @Override public void setEnablePutGet(boolean enablePutGet) { this.properties.put( SFSessionProperty.ENABLE_PUT_GET.getPropertyKey(), String.valueOf(enablePutGet)); } @Override public void setArrowTreatDecimalAsInt(boolean treatDecimalAsInt) { this.properties.put( SFSessionProperty.JDBC_ARROW_TREAT_DECIMAL_AS_INT.getPropertyKey(), String.valueOf(treatDecimalAsInt)); } @Override public void setMaxHttpRetries(int maxHttpRetries) { this.properties.put( SFSessionProperty.MAX_HTTP_RETRIES.getPropertyKey(), Integer.toString(maxHttpRetries)); } @Override public void setOcspFailOpen(boolean ocspFailOpen) { this.properties.put( SFSessionProperty.OCSP_FAIL_OPEN.getPropertyKey(), String.valueOf(ocspFailOpen)); } @Override public void setPutGetMaxRetries(int putGetMaxRetries) { this.properties.put( SFSessionProperty.PUT_GET_MAX_RETRIES.getPropertyKey(), Integer.toString(putGetMaxRetries)); } @Override public void setStringsQuotedForColumnDef(boolean stringsQuotedForColumnDef) { this.properties.put( SFSessionProperty.STRINGS_QUOTED.getPropertyKey(), String.valueOf(stringsQuotedForColumnDef)); } @Override public void setEnableDiagnostics(boolean enableDiagnostics) { this.properties.put( SFSessionProperty.ENABLE_DIAGNOSTICS.getPropertyKey(), String.valueOf(enableDiagnostics)); } @Override public void setDiagnosticsAllowlistFile(String diagnosticsAllowlistFile) { this.properties.put( SFSessionProperty.DIAGNOSTICS_ALLOWLIST_FILE.getPropertyKey(), diagnosticsAllowlistFile); } @Override public void setJDBCDefaultFormatDateWithTimezone(Boolean jdbcDefaultFormatDateWithTimezone) { this.properties.put( "JDBC_DEFAULT_FORMAT_DATE_WITH_TIMEZONE", jdbcDefaultFormatDateWithTimezone); } @Override public void setGetDateUseNullTimezone(Boolean getDateUseNullTimezone) { this.properties.put("JDBC_GET_DATE_USE_NULL_TIMEZONE", getDateUseNullTimezone); } @Override public void setEnableClientRequestMfaToken(boolean enableClientRequestMfaToken) { this.setAuthenticator(AUTHENTICATOR_USERNAME_PASSWORD_MFA); this.properties.put( SFSessionProperty.ENABLE_CLIENT_REQUEST_MFA_TOKEN.getPropertyKey(), enableClientRequestMfaToken); } @Override public void setEnableClientStoreTemporaryCredential( boolean enableClientStoreTemporaryCredential) { this.setAuthenticator(AUTHENTICATOR_EXTERNAL_BROWSER); this.properties.put( SFSessionProperty.ENABLE_CLIENT_STORE_TEMPORARY_CREDENTIAL.getPropertyKey(), enableClientStoreTemporaryCredential); } @Override public void setBrowserResponseTimeout(int seconds) { this.setAuthenticator(AUTHENTICATOR_EXTERNAL_BROWSER); this.properties.put("BROWSER_RESPONSE_TIMEOUT", Integer.toString(seconds)); } @Override public void setHttpHeadersCustomizers(List httpHeadersCustomizers) { this.properties.put( HttpHeadersCustomizer.HTTP_HEADER_CUSTOMIZERS_PROPERTY_KEY, httpHeadersCustomizers); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/metadata/SnowflakeDatabaseMetaDataImpl.java ================================================ package net.snowflake.client.internal.api.implementation.metadata; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_CATALOGS; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_COLUMNS; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_COLUMNS_EXTENDED_SET; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_FOREIGN_KEYS; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_FUNCTIONS; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_FUNCTION_COLUMNS; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_PRIMARY_KEYS; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_PROCEDURES; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_PROCEDURE_COLUMNS; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_SCHEMAS; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_STREAMS; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_TABLES; import static net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata.GET_TABLE_PRIVILEGES; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import static net.snowflake.client.internal.jdbc.util.SnowflakeTypeHelper.convertStringToType; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import java.sql.Connection; import java.sql.ResultSet; import java.sql.RowIdLifetime; import java.sql.SQLException; import java.sql.Statement; import java.sql.Types; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.regex.Pattern; import net.snowflake.client.api.connection.SnowflakeDatabaseMetaData; import net.snowflake.client.api.driver.SnowflakeDriver; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeResultSet; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.jdbc.DBMetadataResultSetMetadata; import net.snowflake.client.internal.jdbc.SnowflakeColumnMetadata; import net.snowflake.client.internal.jdbc.SnowflakeDatabaseMetaDataQueryResultSet; import net.snowflake.client.internal.jdbc.SnowflakeDatabaseMetaDataResultSet; import net.snowflake.client.internal.jdbc.SnowflakeLoggedFeatureNotSupportedException; import net.snowflake.client.internal.jdbc.telemetry.Telemetry; import net.snowflake.client.internal.jdbc.telemetry.TelemetryData; import net.snowflake.client.internal.jdbc.telemetry.TelemetryField; import net.snowflake.client.internal.jdbc.telemetry.TelemetryUtil; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; import net.snowflake.common.util.Wildcard; public class SnowflakeDatabaseMetaDataImpl implements SnowflakeDatabaseMetaData { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeDatabaseMetaDataImpl.class); private static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); private static final String DatabaseProductName = "Snowflake"; private static final String DriverName = "Snowflake"; private static final char SEARCH_STRING_ESCAPE = '\\'; private static final String JDBCVersion = "4.2"; // Open Group CLI Functions // LOG10 is not supported public static final String NumericFunctionsSupported = "ABS,ACOS,ASIN,ATAN,ATAN2,CBRT,CEILING,COS,COT,DEGREES,EXP,FACTORIAL," + "FLOOR,HAVERSINE,LN,LOG,MOD,PI,POWER,RADIANS,RAND," + "ROUND,SIGN,SIN,SQRT,SQUARE,TAN,TRUNCATE"; // DIFFERENCE and SOUNDEX are not supported public static final String StringFunctionsSupported = "ASCII,BIT_LENGTH,CHAR,CONCAT,INSERT,LCASE,LEFT,LENGTH,LPAD," + "LOCATE,LTRIM,OCTET_LENGTH,PARSE_IP,PARSE_URL,REPEAT,REVERSE," + "REPLACE,RPAD,RTRIMMED_LENGTH,SPACE,SPLIT,SPLIT_PART," + "SPLIT_TO_TABLE,STRTOK,STRTOK_TO_ARRAY,STRTOK_SPLIT_TO_TABLE," + "TRANSLATE,TRIM,UNICODE,UUID_STRING,INITCAP,LOWER,UPPER,REGEXP," + "REGEXP_COUNT,REGEXP_INSTR,REGEXP_LIKE,REGEXP_REPLACE," + "REGEXP_SUBSTR,RLIKE,CHARINDEX,CONTAINS,EDITDISTANCE,ENDSWITH," + "ILIKE,ILIKE ANY,LIKE,LIKE ALL,LIKE ANY,POSITION,REPLACE,RIGHT," + "STARTSWITH,SUBSTRING,COMPRESS,DECOMPRESS_BINARY,DECOMPRESS_STRING," + "BASE64_DECODE_BINARY,BASE64_DECODE_STRING,BASE64_ENCODE," + "HEX_DECODE_BINARY,HEX_DECODE_STRING,HEX_ENCODE," + "TRY_BASE64_DECODE_BINARY,TRY_BASE64_DECODE_STRING," + "TRY_HEX_DECODE_BINARY,TRY_HEX_DECODE_STRING,MD_5,MD5_HEX," + "MD5_BINARY,SHA1,SHA1_HEX,SHA2,SHA1_BINARY,SHA2_HEX,SHA2_BINARY," + " HASH,HASH_AGG,COLLATE,COLLATION"; private static final String DateAndTimeFunctionsSupported = "CURDATE," + "CURTIME,DAYNAME,DAYOFMONTH,DAYOFWEEK,DAYOFYEAR,HOUR,MINUTE,MONTH," + "MONTHNAME,NOW,QUARTER,SECOND,TIMESTAMPADD,TIMESTAMPDIFF,WEEK,YEAR"; public static final String SystemFunctionsSupported = "DATABASE,IFNULL,USER"; // These are keywords not in SQL2003 standard private static final String notSQL2003Keywords = String.join( ",", "ACCOUNT", "ASOF", "BIT", "BYTEINT", "CONNECTION", "DATABASE", "DATETIME", "DATE_PART", "FIXED", "FOLLOWING", "GSCLUSTER", "GSPACKAGE", "IDENTIFIER", "ILIKE", "INCREMENT", "ISSUE", "LONG", "MAP", "MATCH_CONDITION", "MINUS", "NUMBER", "OBJECT", "ORGANIZATION", "QUALIFY", "REFERENCE", "REGEXP", "RLIKE", "SAMPLE", "SCHEMA", "STRING", "TEXT", "TIMESTAMPLTZ", "TIMESTAMPNTZ", "TIMESTAMPTZ", "TIMESTAMP_LTZ", "TIMESTAMP_NTZ", "TIMESTAMP_TZ", "TINYINT", "TRANSIT", "TRY_CAST", "VARIANT", "VECTOR", "VIEW"); private static final String MAX_VARCHAR_BINARY_SIZE_PARAM_NAME = "VARCHAR_AND_BINARY_MAX_SIZE_IN_RESULT"; // Defaults to 16MB private static final int DEFAULT_MAX_LOB_SIZE = 16777216; private final Connection connection; private final SFBaseSession session; private Telemetry ibInstance; private final boolean metadataRequestUseConnectionCtx; private boolean useSessionSchema = false; private final boolean metadataRequestUseSessionDatabase; private boolean stringsQuoted = false; // The number of columns for the result set returned from the current procedure. A value of -1 // means the procedure doesn't return a result set private int procedureResultsetColumnNum; // Indicates if pattern matching is allowed for all parameters. private boolean isPatternMatchingEnabled = true; private boolean exactSchemaSearchEnabled; private boolean enableWildcardsInShowMetadataCommands; public SnowflakeDatabaseMetaDataImpl(Connection connection) throws SQLException { logger.trace("SnowflakeDatabaseMetaDataImpl(SnowflakeConnection connection)", false); this.connection = connection; this.session = connection.unwrap(SnowflakeConnectionImpl.class).getSFBaseSession(internalCallMarker()); this.metadataRequestUseConnectionCtx = session.getMetadataRequestUseConnectionCtx(); this.metadataRequestUseSessionDatabase = session.getMetadataRequestUseSessionDatabase(); this.stringsQuoted = session.isStringQuoted(); this.ibInstance = session.getTelemetryClient(internalCallMarker()); this.procedureResultsetColumnNum = -1; this.isPatternMatchingEnabled = session.getEnablePatternSearch(); this.exactSchemaSearchEnabled = session.getEnableExactSchemaSearch(); this.enableWildcardsInShowMetadataCommands = session.getEnableWildcardsInShowMetadataCommands(); } private void raiseSQLExceptionIfConnectionIsClosed() throws SQLException { if (connection.isClosed()) { throw new SnowflakeSQLException(ErrorCode.CONNECTION_CLOSED); } } /** * Function to send in-band telemetry data about DatabaseMetadata get API calls and their * associated SHOW commands * * @param resultSet The ResultSet generated from the SHOW command in the function call. Can be of * type SnowflakeResultSet or SnowflakeDatabaseMetaDataResultSet * @param functionName name of DatabaseMetadata API function call * @param catalog database * @param schema schema * @param generalNamePattern name of table, function, etc * @param specificNamePattern name of table column, function parameter name, etc */ private void sendInBandTelemetryMetadataMetrics( ResultSet resultSet, String functionName, String catalog, String schema, String generalNamePattern, String specificNamePattern) { String queryId = ""; try { if (resultSet.isWrapperFor(SnowflakeResultSet.class)) { queryId = resultSet.unwrap(SnowflakeResultSet.class).getQueryID(); } else if (resultSet.isWrapperFor(SnowflakeDatabaseMetaDataResultSet.class)) { queryId = resultSet.unwrap(SnowflakeDatabaseMetaDataResultSet.class).getQueryID(); } } catch (SQLException e) { // This should never be reached because resultSet should always be one of the 2 types // unwrapped above. // In case we get here, do nothing; just don't include query ID } ObjectNode ibValue = mapper.createObjectNode(); ibValue.put("type", TelemetryField.METADATA_METRICS.toString()); ibValue.put("query_id", queryId); ibValue.put("function_name", functionName); ibValue.with("function_parameters").put("catalog", catalog); ibValue.with("function_parameters").put("schema", schema); ibValue.with("function_parameters").put("general_name_pattern", generalNamePattern); ibValue.with("function_parameters").put("specific_name_pattern", specificNamePattern); ibValue.put("use_connection_context", metadataRequestUseConnectionCtx ? "true" : "false"); ibValue.put("session_database_name", session.getDatabase()); ibValue.put("session_schema_name", session.getSchema()); TelemetryData data = TelemetryUtil.buildJobData(ibValue); ibInstance.addLogToBatch(data); } // used to get convert string back to normal after its special characters have been escaped to // send it through Wildcard regex private String unescapeChars(String escapedString) { String unescapedString = escapedString.replace("\\_", "_"); unescapedString = unescapedString.replace("\\%", "%"); unescapedString = unescapedString.replace("\\\\", "\\"); unescapedString = escapeSqlQuotes(unescapedString); return unescapedString; } // In SQL, double quotes must be escaped with an additional pair of double quotes. Add additional // quotes to avoid syntax errors with SQL queries. private String escapeSqlQuotes(String originalString) { return originalString.replace("\"", "\"\""); } /** * This guards against SQL injections by ensuring that any single quote is escaped properly. * * @param arg the original schema * @return */ private String escapeSingleQuoteForLikeCommand(String arg) { if (arg == null) { return null; } int i = 0; int index = arg.indexOf("'", i); while (index != -1) { if (index == 0 || (index > 0 && arg.charAt(index - 1) != '\\')) { arg = arg.replace("'", "\\'"); i = index + 2; } else { i = index + 1; } index = i < arg.length() ? arg.indexOf("'", i) : -1; } return arg; } private boolean isSchemaNameWildcardPattern(String inputString) { // if schema contains wildcard, don't treat it as wildcard; treat as just a schema name if // session schema or wildcards in identifiers in show metadata queries disabled return (useSessionSchema || !enableWildcardsInShowMetadataCommands) ? false : Wildcard.isWildcardPatternStr(inputString); } @Override public boolean allProceduresAreCallable() throws SQLException { logger.trace("boolean allProceduresAreCallable()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean allTablesAreSelectable() throws SQLException { logger.trace("boolean allTablesAreSelectable()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public String getURL() throws SQLException { logger.trace("String getURL()", false); raiseSQLExceptionIfConnectionIsClosed(); String url = session.getUrl(); return url.startsWith("http://") ? url.replace("http://", "jdbc:snowflake://") : url.replace("https://", "jdbc:snowflake://"); } @Override public String getUserName() throws SQLException { logger.trace("String getUserName()", false); raiseSQLExceptionIfConnectionIsClosed(); return session.getUser(); } @Override public boolean isReadOnly() throws SQLException { logger.trace("boolean isReadOnly()", false); raiseSQLExceptionIfConnectionIsClosed(); // no read only mode is supported. return false; } @Override public boolean nullsAreSortedHigh() throws SQLException { logger.trace("boolean nullsAreSortedHigh()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean nullsAreSortedLow() throws SQLException { logger.trace("boolean nullsAreSortedLow()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean nullsAreSortedAtStart() throws SQLException { logger.trace("boolean nullsAreSortedAtStart()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean nullsAreSortedAtEnd() throws SQLException { logger.trace("boolean nullsAreSortedAtEnd()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public String getDatabaseProductName() throws SQLException { logger.trace("String getDatabaseProductName()", false); raiseSQLExceptionIfConnectionIsClosed(); return DatabaseProductName; } @Override public String getDatabaseProductVersion() throws SQLException { logger.trace("String getDatabaseProductVersion()", false); raiseSQLExceptionIfConnectionIsClosed(); return connection.unwrap(SnowflakeConnectionImpl.class).getDatabaseVersion(); } @Override public String getDriverName() throws SQLException { logger.trace("String getDriverName()", false); raiseSQLExceptionIfConnectionIsClosed(); return DriverName; } @Override public String getDriverVersion() throws SQLException { logger.trace("String getDriverVersion()", false); raiseSQLExceptionIfConnectionIsClosed(); return SnowflakeDriver.INSTANCE.getMajorVersion() + "." + SnowflakeDriver.INSTANCE.getMinorVersion() + "." + SnowflakeDriver.INSTANCE.getPatchVersion(); } @Override public int getDriverMajorVersion() { logger.trace("int getDriverMajorVersion()", false); return SnowflakeDriver.INSTANCE.getMajorVersion(); } @Override public int getDriverMinorVersion() { logger.trace("int getDriverMinorVersion()", false); return SnowflakeDriver.INSTANCE.getMinorVersion(); } @Override public boolean usesLocalFiles() throws SQLException { logger.trace("boolean usesLocalFiles()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean usesLocalFilePerTable() throws SQLException { logger.trace("boolean usesLocalFilePerTable()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsMixedCaseIdentifiers() throws SQLException { logger.trace("boolean supportsMixedCaseIdentifiers()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean storesUpperCaseIdentifiers() throws SQLException { logger.trace("boolean storesUpperCaseIdentifiers()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean storesLowerCaseIdentifiers() throws SQLException { logger.trace("boolean storesLowerCaseIdentifiers()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean storesMixedCaseIdentifiers() throws SQLException { logger.trace("boolean storesMixedCaseIdentifiers()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { logger.trace("boolean supportsMixedCaseQuotedIdentifiers()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { logger.trace("boolean storesUpperCaseQuotedIdentifiers()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { logger.trace("boolean storesLowerCaseQuotedIdentifiers()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { logger.trace("boolean storesMixedCaseQuotedIdentifiers()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public String getIdentifierQuoteString() throws SQLException { logger.trace("String getIdentifierQuoteString()", false); raiseSQLExceptionIfConnectionIsClosed(); return "\""; } @Override public String getSQLKeywords() throws SQLException { logger.trace("String getSQLKeywords()", false); raiseSQLExceptionIfConnectionIsClosed(); return notSQL2003Keywords; } @Override public String getNumericFunctions() throws SQLException { logger.trace("String getNumericFunctions()", false); raiseSQLExceptionIfConnectionIsClosed(); return NumericFunctionsSupported; } @Override public String getStringFunctions() throws SQLException { logger.trace("String getStringFunctions()", false); raiseSQLExceptionIfConnectionIsClosed(); return StringFunctionsSupported; } @Override public String getSystemFunctions() throws SQLException { logger.trace("String getSystemFunctions()", false); raiseSQLExceptionIfConnectionIsClosed(); return SystemFunctionsSupported; } @Override public String getTimeDateFunctions() throws SQLException { logger.trace("String getTimeDateFunctions()", false); raiseSQLExceptionIfConnectionIsClosed(); return DateAndTimeFunctionsSupported; } @Override public String getSearchStringEscape() throws SQLException { logger.trace("String getSearchStringEscape()", false); raiseSQLExceptionIfConnectionIsClosed(); return Character.toString(SEARCH_STRING_ESCAPE); } @Override public String getExtraNameCharacters() throws SQLException { logger.trace("String getExtraNameCharacters()", false); raiseSQLExceptionIfConnectionIsClosed(); return "$"; } @Override public boolean supportsAlterTableWithAddColumn() throws SQLException { logger.trace("boolean supportsAlterTableWithAddColumn()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsAlterTableWithDropColumn() throws SQLException { logger.trace("boolean supportsAlterTableWithDropColumn()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsColumnAliasing() throws SQLException { logger.trace("boolean supportsColumnAliasing()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean nullPlusNonNullIsNull() throws SQLException { logger.trace("boolean nullPlusNonNullIsNull()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsConvert() throws SQLException { logger.trace("boolean supportsConvert()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsConvert(int fromType, int toType) throws SQLException { logger.trace("boolean supportsConvert(int fromType, int toType)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsTableCorrelationNames() throws SQLException { logger.trace("boolean supportsTableCorrelationNames()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsDifferentTableCorrelationNames() throws SQLException { logger.trace("boolean supportsDifferentTableCorrelationNames()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsExpressionsInOrderBy() throws SQLException { logger.trace("boolean supportsExpressionsInOrderBy()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsOrderByUnrelated() throws SQLException { logger.trace("boolean supportsOrderByUnrelated()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsGroupBy() throws SQLException { logger.trace("boolean supportsGroupBy()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsGroupByUnrelated() throws SQLException { logger.trace("boolean supportsGroupByUnrelated()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsGroupByBeyondSelect() throws SQLException { logger.trace("boolean supportsGroupByBeyondSelect()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsLikeEscapeClause() throws SQLException { logger.trace("boolean supportsLikeEscapeClause()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsMultipleResultSets() throws SQLException { logger.trace("boolean supportsMultipleResultSets()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsMultipleTransactions() throws SQLException { logger.trace("boolean supportsMultipleTransactions()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsNonNullableColumns() throws SQLException { logger.trace("boolean supportsNonNullableColumns()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsMinimumSQLGrammar() throws SQLException { logger.trace("boolean supportsMinimumSQLGrammar()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsCoreSQLGrammar() throws SQLException { logger.trace("boolean supportsCoreSQLGrammar()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsExtendedSQLGrammar() throws SQLException { logger.trace("boolean supportsExtendedSQLGrammar()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsANSI92EntryLevelSQL() throws SQLException { logger.trace("boolean supportsANSI92EntryLevelSQL()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsANSI92IntermediateSQL() throws SQLException { logger.trace("boolean supportsANSI92IntermediateSQL()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsANSI92FullSQL() throws SQLException { logger.trace("boolean supportsANSI92FullSQL()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsIntegrityEnhancementFacility() throws SQLException { logger.trace("boolean supportsIntegrityEnhancementFacility()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsOuterJoins() throws SQLException { logger.trace("boolean supportsOuterJoins()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsFullOuterJoins() throws SQLException { logger.trace("boolean supportsFullOuterJoins()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsLimitedOuterJoins() throws SQLException { logger.trace("boolean supportsLimitedOuterJoins()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public String getSchemaTerm() throws SQLException { logger.trace("String getSchemaTerm()", false); raiseSQLExceptionIfConnectionIsClosed(); return "schema"; } @Override public String getProcedureTerm() throws SQLException { logger.trace("String getProcedureTerm()", false); raiseSQLExceptionIfConnectionIsClosed(); return "procedure"; } @Override public String getCatalogTerm() throws SQLException { logger.trace("String getCatalogTerm()", false); raiseSQLExceptionIfConnectionIsClosed(); return "database"; } @Override public boolean isCatalogAtStart() throws SQLException { logger.trace("boolean isCatalogAtStart()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public String getCatalogSeparator() throws SQLException { logger.trace("String getCatalogSeparator()", false); raiseSQLExceptionIfConnectionIsClosed(); return "."; } @Override public boolean supportsSchemasInDataManipulation() throws SQLException { logger.trace("boolean supportsSchemasInDataManipulation()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsSchemasInProcedureCalls() throws SQLException { logger.trace("boolean supportsSchemasInProcedureCalls()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsSchemasInTableDefinitions() throws SQLException { logger.trace("boolean supportsSchemasInTableDefinitions()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsSchemasInIndexDefinitions() throws SQLException { logger.trace("boolean supportsSchemasInIndexDefinitions()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { logger.trace("boolean supportsSchemasInPrivilegeDefinitions()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsCatalogsInDataManipulation() throws SQLException { logger.trace("boolean supportsCatalogsInDataManipulation()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsCatalogsInProcedureCalls() throws SQLException { logger.trace("boolean supportsCatalogsInProcedureCalls()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsCatalogsInTableDefinitions() throws SQLException { logger.trace("boolean supportsCatalogsInTableDefinitions()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsCatalogsInIndexDefinitions() throws SQLException { logger.trace("boolean supportsCatalogsInIndexDefinitions()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { logger.trace("boolean supportsCatalogsInPrivilegeDefinitions()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsPositionedDelete() throws SQLException { logger.trace("boolean supportsPositionedDelete()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsPositionedUpdate() throws SQLException { logger.trace("boolean supportsPositionedUpdate()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsSelectForUpdate() throws SQLException { logger.trace("boolean supportsSelectForUpdate()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsStoredProcedures() throws SQLException { logger.trace("boolean supportsStoredProcedures()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsSubqueriesInComparisons() throws SQLException { logger.trace("boolean supportsSubqueriesInComparisons()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsSubqueriesInExists() throws SQLException { logger.trace("boolean supportsSubqueriesInExists()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsSubqueriesInIns() throws SQLException { logger.trace("boolean supportsSubqueriesInIns()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsSubqueriesInQuantifieds() throws SQLException { logger.trace("boolean supportsSubqueriesInQuantifieds()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsCorrelatedSubqueries() throws SQLException { logger.trace("boolean supportsCorrelatedSubqueries()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsUnion() throws SQLException { logger.trace("boolean supportsUnion()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsUnionAll() throws SQLException { logger.trace("boolean supportsUnionAll()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsOpenCursorsAcrossCommit() throws SQLException { logger.trace("boolean supportsOpenCursorsAcrossCommit()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsOpenCursorsAcrossRollback() throws SQLException { logger.trace("boolean supportsOpenCursorsAcrossRollback()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsOpenStatementsAcrossCommit() throws SQLException { logger.trace("boolean supportsOpenStatementsAcrossCommit()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsOpenStatementsAcrossRollback() throws SQLException { logger.trace("boolean supportsOpenStatementsAcrossRollback()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public int getMaxBinaryLiteralLength() throws SQLException { logger.trace("int getMaxBinaryLiteralLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return getMaxCharLiteralLength() / 2; // hex instead of octal, thus divided by 2 } @Override public int getMaxCharLiteralLength() throws SQLException { logger.trace("int getMaxCharLiteralLength()", false); raiseSQLExceptionIfConnectionIsClosed(); Optional maxLiteralLengthFromSession = Optional.ofNullable( (Integer) session.getOtherParameter(MAX_VARCHAR_BINARY_SIZE_PARAM_NAME)); return maxLiteralLengthFromSession.orElse(DEFAULT_MAX_LOB_SIZE); } @Override public int getMaxColumnNameLength() throws SQLException { logger.trace("int getMaxColumnNameLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return 255; } @Override public int getMaxColumnsInGroupBy() throws SQLException { logger.trace("int getMaxColumnsInGroupBy()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxColumnsInIndex() throws SQLException { logger.trace("int getMaxColumnsInIndex()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxColumnsInOrderBy() throws SQLException { logger.trace("int getMaxColumnsInOrderBy()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxColumnsInSelect() throws SQLException { logger.trace("int getMaxColumnsInSelect()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxColumnsInTable() throws SQLException { logger.trace("int getMaxColumnsInTable()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxConnections() throws SQLException { logger.trace("int getMaxConnections()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxCursorNameLength() throws SQLException { logger.trace("int getMaxCursorNameLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxIndexLength() throws SQLException { logger.trace("int getMaxIndexLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxSchemaNameLength() throws SQLException { logger.trace("int getMaxSchemaNameLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return 255; } @Override public int getMaxProcedureNameLength() throws SQLException { logger.trace("int getMaxProcedureNameLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxCatalogNameLength() throws SQLException { logger.trace("int getMaxCatalogNameLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return 255; } @Override public int getMaxRowSize() throws SQLException { logger.trace("int getMaxRowSize()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { logger.trace("boolean doesMaxRowSizeIncludeBlobs()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public int getMaxStatementLength() throws SQLException { logger.trace("int getMaxStatementLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxStatements() throws SQLException { logger.trace("int getMaxStatements()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxTableNameLength() throws SQLException { logger.trace("int getMaxTableNameLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return 255; } @Override public int getMaxTablesInSelect() throws SQLException { logger.trace("int getMaxTablesInSelect()", false); raiseSQLExceptionIfConnectionIsClosed(); return 0; } @Override public int getMaxUserNameLength() throws SQLException { logger.trace("int getMaxUserNameLength()", false); raiseSQLExceptionIfConnectionIsClosed(); return 255; } @Override public int getDefaultTransactionIsolation() throws SQLException { logger.trace("int getDefaultTransactionIsolation()", false); raiseSQLExceptionIfConnectionIsClosed(); return Connection.TRANSACTION_READ_COMMITTED; } @Override public boolean supportsTransactions() throws SQLException { logger.trace("boolean supportsTransactions()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsTransactionIsolationLevel(int level) throws SQLException { logger.trace("boolean supportsTransactionIsolationLevel(int level)", false); raiseSQLExceptionIfConnectionIsClosed(); return (level == Connection.TRANSACTION_NONE) || (level == Connection.TRANSACTION_READ_COMMITTED); } @Override public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { logger.trace("boolean supportsDataDefinitionAndDataManipulationTransactions()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean supportsDataManipulationTransactionsOnly() throws SQLException { logger.trace("boolean supportsDataManipulationTransactionsOnly()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean dataDefinitionCausesTransactionCommit() throws SQLException { logger.trace("boolean dataDefinitionCausesTransactionCommit()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean dataDefinitionIgnoredInTransactions() throws SQLException { logger.trace("boolean dataDefinitionIgnoredInTransactions()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public ResultSet getProcedures( final String originalCatalog, final String originalSchemaPattern, final String procedureNamePattern) throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); logger.trace( "public ResultSet getProcedures(String originalCatalog, " + "String originalSchemaPattern,String procedureNamePattern)", false); String showProcedureCommand = getFirstResultSetCommand( originalCatalog, originalSchemaPattern, procedureNamePattern, "procedures"); if (showProcedureCommand.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_PROCEDURES, statement); } ContextAwareMetadataSearch result = applySessionContext(originalCatalog, originalSchemaPattern); String catalog = result.database(); String schemaPattern = result.schema(); boolean isExactSchema = result.isExactSchema(); final Pattern compiledSchemaPattern = Wildcard.toRegexPattern(schemaPattern, true); final Pattern compiledProcedurePattern = Wildcard.toRegexPattern(procedureNamePattern, true); ResultSet resultSet = executeAndReturnEmptyResultIfNotFound(statement, showProcedureCommand, GET_PROCEDURES); sendInBandTelemetryMetadataMetrics( resultSet, "getProcedures", catalog, schemaPattern, procedureNamePattern, "none"); return new SnowflakeDatabaseMetaDataQueryResultSet(GET_PROCEDURES, resultSet, statement) { public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); // iterate throw the show table result until we find an entry // that matches the table name while (showObjectResultSet.next()) { String catalogName = showObjectResultSet.getString("catalog_name"); String schemaName = showObjectResultSet.getString("schema_name"); String procedureName = showObjectResultSet.getString("name"); String remarks = showObjectResultSet.getString("description"); String specificName = showObjectResultSet.getString("arguments"); short procedureType = procedureReturnsResult; if ((compiledProcedurePattern == null || compiledProcedurePattern.matcher(procedureName).matches()) && (compiledSchemaPattern == null || compiledSchemaPattern.matcher(schemaName).matches() || isExactSchema && schemaPattern.equals(schemaPattern))) { logger.trace("Found a matched function:" + schemaName + "." + procedureName); nextRow[0] = catalogName; nextRow[1] = schemaName; nextRow[2] = procedureName; nextRow[3] = remarks; nextRow[4] = procedureType; nextRow[5] = specificName; return true; } } close(); return false; } }; } @Override public ResultSet getProcedureColumns( final String catalog, final String schemaPattern, final String procedureNamePattern, final String columnNamePattern) throws SQLException { logger.trace( "public ResultSet getProcedureColumns(String catalog, " + "String schemaPattern,String procedureNamePattern," + "String columnNamePattern)", false); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); boolean addAllRows = false; String showProcedureCommand = getFirstResultSetCommand(catalog, schemaPattern, procedureNamePattern, "procedures"); if (showProcedureCommand.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_PROCEDURE_COLUMNS, statement); } final Pattern compiledSchemaPattern = Wildcard.toRegexPattern(schemaPattern, true); final Pattern compiledProcedurePattern = Wildcard.toRegexPattern(procedureNamePattern, true); if (columnNamePattern == null || columnNamePattern.isEmpty() || columnNamePattern.trim().equals("%") || columnNamePattern.trim().equals(".*")) { addAllRows = true; } ResultSet resultSetStepOne = executeAndReturnEmptyResultIfNotFound( statement, showProcedureCommand, GET_PROCEDURE_COLUMNS); sendInBandTelemetryMetadataMetrics( resultSetStepOne, "getProcedureColumns", catalog, schemaPattern, procedureNamePattern, columnNamePattern); ArrayList rows = new ArrayList(); while (resultSetStepOne.next()) { String procedureNameUnparsed = resultSetStepOne.getString("arguments").trim(); String procedureNameNoArgs = resultSetStepOne.getString("name"); String schemaName = resultSetStepOne.getString("schema_name"); // Check that schema name match the original input // And check special case - schema with special name in quotes boolean isSchemaNameMatch = compiledSchemaPattern != null && (compiledSchemaPattern.matcher(schemaName).matches() || (schemaName.startsWith("\"") && schemaName.endsWith("\"") && compiledSchemaPattern .matcher(schemaName) .region(1, schemaName.length() - 1) .matches())); // Check that procedure name and schema name match the original input in case wildcards have // been used. // Procedure name column check must occur later when columns are parsed. if ((compiledProcedurePattern != null && !compiledProcedurePattern.matcher(procedureNameNoArgs).matches()) || (compiledSchemaPattern != null && !isSchemaNameMatch)) { continue; } String catalogName = resultSetStepOne.getString("catalog_name"); String showProcedureColCommand = getSecondResultSetCommand(catalogName, schemaName, procedureNameUnparsed, "procedure"); ResultSet resultSetStepTwo = executeAndReturnEmptyResultIfNotFound( statement, showProcedureColCommand, GET_PROCEDURE_COLUMNS); if (resultSetStepTwo.next() == false) { continue; } // Retrieve the procedure arguments and procedure return values. String args = resultSetStepTwo.getString("value"); resultSetStepTwo.next(); String res = resultSetStepTwo.getString("value"); // parse procedure arguments and return values into a list of columns // result value(s) will be at the top of the list, followed by any arguments List procedureCols = parseColumns(res, args); String paramNames[] = new String[procedureCols.size() / 2]; String paramTypes[] = new String[procedureCols.size() / 2]; if (procedureCols.size() > 1) { for (int i = 0; i < procedureCols.size(); i++) { if (i % 2 == 0) { paramNames[i / 2] = procedureCols.get(i); } else { paramTypes[i / 2] = procedureCols.get(i); } } } for (int i = 0; i < paramNames.length; i++) { // if it's the 1st in for loop, it's the result if (i == 0 || paramNames[i].equalsIgnoreCase(columnNamePattern) || addAllRows) { Object[] nextRow = new Object[20]; // add a row to resultSet nextRow[0] = catalog; // catalog. Can be null. nextRow[1] = schemaName; // schema. Can be null. nextRow[2] = procedureNameNoArgs; // procedure name nextRow[3] = paramNames[i]; // column/parameter name // column type if (i == 0 && procedureResultsetColumnNum < 0) { nextRow[4] = procedureColumnReturn; } else if (procedureResultsetColumnNum >= 0 && i < procedureResultsetColumnNum) { nextRow[4] = procedureColumnResult; } else { nextRow[4] = procedureColumnIn; // kind of column/parameter } String typeName = paramTypes[i]; String typeNameTrimmed = typeName; // don't include nullability in type name, such as NUMBER NOT NULL. Just include NUMBER. if (typeName.contains(" NOT NULL")) { typeNameTrimmed = typeName.substring(0, typeName.indexOf(' ')); } // don't include column size in type name if (typeNameTrimmed.contains("(") && typeNameTrimmed.contains(")")) { typeNameTrimmed = typeNameTrimmed.substring(0, typeNameTrimmed.indexOf('(')); } int type = convertStringToType(typeName); nextRow[5] = type; // data type nextRow[6] = typeNameTrimmed; // type name // precision and scale. Values only exist for numbers int precision = 38; short scale = 0; if (type < 10) { if (typeName.contains("(") && typeName.contains(")")) { precision = Integer.parseInt( typeName.substring(typeName.indexOf('(') + 1, typeName.indexOf(','))); scale = Short.parseShort( typeName.substring(typeName.indexOf(',') + 1, typeName.indexOf(')'))); nextRow[7] = precision; nextRow[9] = scale; } else { nextRow[7] = precision; nextRow[9] = scale; } } else { nextRow[7] = 0; nextRow[9] = null; } nextRow[8] = 0; // length in bytes. not supported nextRow[10] = 10; // radix. Probably 10 is default, but unknown. // if type specifies "not null", no null values are allowed. if (typeName.toLowerCase().contains("not null")) { nextRow[11] = procedureNoNulls; nextRow[18] = "NO"; } // if the type is a return value (only when i = 0), it can always be specified as "not // null." The fact that // this isn't specified means it has nullable return values. else if (i == 0) { nextRow[11] = procedureNullable; nextRow[18] = "YES"; } // if the row is for an input parameter, it's impossible to know from the description // whether the values // are allowed to be null or not. Nullability is unknown. else { nextRow[11] = procedureNullableUnknown; // nullable. We don't know from current function info. nextRow[18] = ""; } nextRow[12] = resultSetStepOne.getString("description").trim(); // remarks nextRow[13] = null; // default value for column. Not supported nextRow[14] = 0; // Sql data type: reserved for future use nextRow[15] = 0; // sql datetime sub: reserved for future use // char octet length if (type == Types.BINARY || type == Types.VARBINARY || type == Types.CHAR || type == Types.VARCHAR) { if (typeName.contains("(") && typeName.contains(")")) { int char_octet_len = Integer.parseInt( typeName.substring(typeName.indexOf('(') + 1, typeName.indexOf(')'))); nextRow[16] = char_octet_len; } else if (type == Types.CHAR || type == Types.VARCHAR) { nextRow[16] = getMaxCharLiteralLength(); } else if (type == Types.BINARY || type == Types.VARBINARY) { nextRow[16] = getMaxBinaryLiteralLength(); } } else { nextRow[16] = null; } // the ordinal position is 0 for a return value. // for result set columns, the ordinal position is of the column in the result set // starting at 1 if (procedureResultsetColumnNum >= 0) { if (i < procedureResultsetColumnNum) { nextRow[17] = i + 1; } else { nextRow[17] = i - procedureResultsetColumnNum + 1; } } else { nextRow[17] = i; // ordinal position. } nextRow[19] = procedureNameUnparsed; // specific name rows.add(nextRow); } } } Object[][] resultRows = new Object[rows.size()][20]; for (int i = 0; i < resultRows.length; i++) { resultRows[i] = rows.get(i); } return new SnowflakeDatabaseMetaDataResultSet(GET_PROCEDURE_COLUMNS, resultRows, statement); } // apply session context when catalog is unspecified private ContextAwareMetadataSearch applySessionContext(String catalog, String schemaPattern) { if (metadataRequestUseConnectionCtx) { // CLIENT_METADATA_USE_SESSION_DATABASE = TRUE if (catalog == null) { catalog = session.getDatabase(); } if (schemaPattern == null) { schemaPattern = session.getSchema(); useSessionSchema = true; } } else { if (metadataRequestUseSessionDatabase) { if (catalog == null) { catalog = session.getDatabase(); } } } return new ContextAwareMetadataSearch( catalog, schemaPattern, (exactSchemaSearchEnabled && useSessionSchema) || !enableWildcardsInShowMetadataCommands); } /* helper function for getProcedures, getFunctionColumns, etc. Returns sql command to show some type of result such as procedures or udfs */ private String getFirstResultSetCommand( String catalog, String schemaPattern, String name, String type) { // apply session context when catalog is unspecified ContextAwareMetadataSearch result = applySessionContext(catalog, schemaPattern); catalog = result.database(); schemaPattern = result.schema(); boolean isExactSchema = result.isExactSchema(); String showProcedureCommand = "show /* JDBC:DatabaseMetaData.getProcedures() */ " + type; if (name != null && !name.isEmpty() && !name.trim().equals("%") && !name.trim().equals(".*")) { showProcedureCommand += " like '" + escapeSingleQuoteForLikeCommand(name) + "'"; } if (catalog == null) { showProcedureCommand += " in account"; } else if (catalog.isEmpty()) { return ""; } else { String catalogEscaped = escapeSqlQuotes(catalog); if (!isExactSchema && (schemaPattern == null || isSchemaNameWildcardPattern(schemaPattern))) { showProcedureCommand += " in database \"" + catalogEscaped + "\""; } else if (schemaPattern.isEmpty()) { return ""; } else { schemaPattern = unescapeChars(schemaPattern); showProcedureCommand += " in schema \"" + catalogEscaped + "\".\"" + schemaPattern + "\""; } } logger.debug("Sql command to get column metadata: {}", showProcedureCommand); return showProcedureCommand; } /* another helper function for getProcedures, getFunctionColumns, etc. Returns sql command that describes procedures or functions */ private String getSecondResultSetCommand( String catalog, String schemaPattern, String name, String type) { if (isNullOrEmpty(name)) { return ""; } String procedureCols = name.substring(name.indexOf("("), name.indexOf(" RETURN")); String quotedName = "\"" + name.substring(0, name.indexOf("(")) + "\""; String procedureName = quotedName + procedureCols; String showProcedureColCommand; if (!isNullOrEmpty(catalog) && !isNullOrEmpty(schemaPattern)) { showProcedureColCommand = "desc " + type + " " + catalog + "." + schemaPattern + "." + procedureName; } else if (!isNullOrEmpty(schemaPattern)) { showProcedureColCommand = "desc " + type + " " + schemaPattern + "." + procedureName; } else { showProcedureColCommand = "desc " + type + " " + procedureName; } return showProcedureColCommand; } @Override public ResultSet getTables( String originalCatalog, String originalSchemaPattern, final String tableNamePattern, final String[] types) throws SQLException { logger.trace( "public ResultSet getTables(String catalog={}, String " + "schemaPattern={}, String tableNamePattern={}, String[] types={})", originalCatalog, originalSchemaPattern, tableNamePattern, (ArgSupplier) () -> Arrays.toString(types)); raiseSQLExceptionIfConnectionIsClosed(); Set supportedTableTypes = new HashSet<>(); ResultSet resultSet = getTableTypes(); while (resultSet.next()) { supportedTableTypes.add(resultSet.getString("TABLE_TYPE")); } resultSet.close(); List inputValidTableTypes = new ArrayList<>(); // then filter on the input table types; if (types != null) { for (String t : types) { if (supportedTableTypes.contains(t)) { inputValidTableTypes.add(t); } } } else { inputValidTableTypes = new ArrayList(supportedTableTypes); } // if the input table types don't have types supported by Snowflake, // then return an empty result set directly Statement statement = connection.createStatement(); if (inputValidTableTypes.size() == 0) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_TABLES, statement); } ContextAwareMetadataSearch result = applySessionContext(originalCatalog, originalSchemaPattern); String catalog = result.database(); String schemaPattern = result.schema(); boolean isExactSchema = result.isExactSchema(); final Pattern compiledSchemaPattern = Wildcard.toRegexPattern(schemaPattern, true); final Pattern compiledTablePattern = Wildcard.toRegexPattern(tableNamePattern, true); String showTablesCommand = null; final boolean viewOnly = inputValidTableTypes.size() == 1 && "VIEW".equalsIgnoreCase(inputValidTableTypes.get(0)); final boolean tableOnly = inputValidTableTypes.size() == 1 && "TABLE".equalsIgnoreCase(inputValidTableTypes.get(0)); if (viewOnly) { showTablesCommand = "show /* JDBC:DatabaseMetaData.getTables() */ views"; } else if (tableOnly) { showTablesCommand = "show /* JDBC:DatabaseMetaData.getTables() */ tables"; } else { showTablesCommand = "show /* JDBC:DatabaseMetaData.getTables() */ objects"; } // only add pattern if it is not empty and not matching all character. if (tableNamePattern != null && !tableNamePattern.isEmpty() && !tableNamePattern.trim().equals("%") && !tableNamePattern.trim().equals(".*")) { showTablesCommand += " like '" + escapeSingleQuoteForLikeCommand(tableNamePattern) + "'"; } if (catalog == null) { showTablesCommand += " in account"; } else if (catalog.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_TABLES, statement); } else { String catalogEscaped = escapeSqlQuotes(catalog); // if the schema pattern is a deterministic identifier, specify schema // in the show command. This is necessary for us to see any tables in // a schema if the current schema a user is connected to is different // given that we don't support show tables without a known schema. if (schemaPattern == null || isSchemaNameWildcardPattern(schemaPattern)) { showTablesCommand += " in database \"" + catalogEscaped + "\""; } else if (schemaPattern.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_TABLES, statement); } else { String schemaUnescaped = isExactSchema ? schemaPattern : unescapeChars(schemaPattern); showTablesCommand += " in schema \"" + catalogEscaped + "\".\"" + schemaUnescaped + "\""; } } logger.debug("Sql command to get table metadata: {}", showTablesCommand); resultSet = executeAndReturnEmptyResultIfNotFound(statement, showTablesCommand, GET_TABLES); sendInBandTelemetryMetadataMetrics( resultSet, "getTables", originalCatalog, originalSchemaPattern, tableNamePattern, Arrays.toString(types)); return new SnowflakeDatabaseMetaDataQueryResultSet(GET_TABLES, resultSet, statement) { @Override public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); // iterate throw the show table result until we find an entry // that matches the table name while (showObjectResultSet.next()) { String tableName = showObjectResultSet.getString(2); String dbName; String schemaName; String kind; String comment; if (viewOnly) { dbName = showObjectResultSet.getString(4); schemaName = showObjectResultSet.getString(5); kind = "VIEW"; comment = showObjectResultSet.getString(7); } else { dbName = showObjectResultSet.getString(3); schemaName = showObjectResultSet.getString(4); kind = showObjectResultSet.getString(5); comment = showObjectResultSet.getString(6); } if ((compiledTablePattern == null || compiledTablePattern.matcher(tableName).matches()) && (compiledSchemaPattern == null || compiledSchemaPattern.matcher(schemaName).matches())) { nextRow[0] = dbName; nextRow[1] = schemaName; nextRow[2] = tableName; nextRow[3] = kind; nextRow[4] = comment; nextRow[5] = null; nextRow[6] = null; nextRow[7] = null; nextRow[8] = null; nextRow[9] = null; return true; } } close(); return false; } }; } @Override public ResultSet getSchemas() throws SQLException { logger.trace("ResultSet getSchemas()", false); return getSchemas(null, null); } @Override public ResultSet getCatalogs() throws SQLException { logger.trace("ResultSet getCatalogs()", false); raiseSQLExceptionIfConnectionIsClosed(); String showDB = "show /* JDBC:DatabaseMetaData.getCatalogs() */ databases in account"; Statement statement = connection.createStatement(); return new SnowflakeDatabaseMetaDataQueryResultSet( GET_CATALOGS, statement.executeQuery(showDB), statement) { @Override public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); // iterate throw the show databases result if (showObjectResultSet.next()) { String dbName = showObjectResultSet.getString(2); nextRow[0] = dbName; return true; } close(); return false; } }; } @Override public ResultSet getTableTypes() throws SQLException { logger.trace("ResultSet getTableTypes()", false); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); // TODO: We should really get the list of table types from GS return new SnowflakeDatabaseMetaDataResultSet( Collections.singletonList("TABLE_TYPE"), Collections.singletonList("TEXT"), Collections.singletonList(Types.VARCHAR), new Object[][] {{"TABLE"}, {"VIEW"}}, statement); } @Override public ResultSet getColumns( String catalog, String schemaPattern, final String tableNamePattern, final String columnNamePattern) throws SQLException { return getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, false); } public ResultSet getColumns( String originalCatalog, String originalSchemaPattern, final String tableNamePattern, final String columnNamePattern, final boolean extendedSet) throws SQLException { logger.trace( "public ResultSet getColumns(String catalog={}, String schemaPattern={}, " + "String tableNamePattern={}, String columnNamePattern={}, boolean extendedSet={}", originalCatalog, originalSchemaPattern, tableNamePattern, columnNamePattern, extendedSet); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); // apply session context when catalog is unspecified ContextAwareMetadataSearch result = applySessionContext(originalCatalog, originalSchemaPattern); String catalog = result.database(); String schemaPattern = result.schema(); boolean isExactSchema = result.isExactSchema(); final Pattern compiledSchemaPattern = Wildcard.toRegexPattern(schemaPattern, true); final Pattern compiledTablePattern = Wildcard.toRegexPattern(tableNamePattern, true); final Pattern compiledColumnPattern = Wildcard.toRegexPattern(columnNamePattern, true); String showColumnsCommand = "show /* JDBC:DatabaseMetaData.getColumns() */ columns"; if (columnNamePattern != null && !columnNamePattern.isEmpty() && !columnNamePattern.trim().equals("%") && !columnNamePattern.trim().equals(".*")) { showColumnsCommand += " like '" + escapeSingleQuoteForLikeCommand(columnNamePattern) + "'"; } if (catalog == null) { showColumnsCommand += " in account"; } else if (catalog.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet( extendedSet ? GET_COLUMNS_EXTENDED_SET : GET_COLUMNS, statement); } else { String catalogEscaped = escapeSqlQuotes(catalog); if (schemaPattern == null || isSchemaNameWildcardPattern(schemaPattern)) { showColumnsCommand += " in database \"" + catalogEscaped + "\""; } else if (schemaPattern.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet( extendedSet ? GET_COLUMNS_EXTENDED_SET : GET_COLUMNS, statement); } else { String schemaUnescaped = isExactSchema ? schemaPattern : unescapeChars(schemaPattern); if (tableNamePattern == null || (Wildcard.isWildcardPatternStr(tableNamePattern) && enableWildcardsInShowMetadataCommands)) { showColumnsCommand += " in schema \"" + catalogEscaped + "\".\"" + schemaUnescaped + "\""; } else if (tableNamePattern.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet( extendedSet ? GET_COLUMNS_EXTENDED_SET : GET_COLUMNS, statement); } else { String tableNameUnescaped = unescapeChars(tableNamePattern); showColumnsCommand += " in table \"" + catalogEscaped + "\".\"" + schemaUnescaped + "\".\"" + tableNameUnescaped + "\""; } } } logger.debug("Sql command to get column metadata: {}", showColumnsCommand); ResultSet resultSet = executeAndReturnEmptyResultIfNotFound( statement, showColumnsCommand, extendedSet ? GET_COLUMNS_EXTENDED_SET : GET_COLUMNS); sendInBandTelemetryMetadataMetrics( resultSet, "getColumns", originalCatalog, originalSchemaPattern, tableNamePattern, columnNamePattern); return new SnowflakeDatabaseMetaDataQueryResultSet( extendedSet ? GET_COLUMNS_EXTENDED_SET : GET_COLUMNS, resultSet, statement) { int ordinalPosition = 0; String currentTableName = null; public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); // iterate throw the show table result until we find an entry // that matches the table name while (showObjectResultSet.next()) { String tableName = showObjectResultSet.getString(1); String schemaName = showObjectResultSet.getString(2); String columnName = showObjectResultSet.getString(3); String dataTypeStr = showObjectResultSet.getString(4); String defaultValue = showObjectResultSet.getString(6); defaultValue.trim(); if (defaultValue.isEmpty()) { defaultValue = null; } else if (!stringsQuoted) { if (defaultValue.startsWith("\'") && defaultValue.endsWith("\'")) { // remove extra set of single quotes defaultValue = defaultValue.substring(1, defaultValue.length() - 1); // scan for 2 single quotes in a row and remove one of them defaultValue = defaultValue.replace("''", "'"); } } String comment = showObjectResultSet.getString(9); String catalogName = showObjectResultSet.getString(10); String autoIncrement = showObjectResultSet.getString(11); if ((compiledTablePattern == null || compiledTablePattern.matcher(tableName).matches()) && (compiledSchemaPattern == null || compiledSchemaPattern.matcher(schemaName).matches()) && (compiledColumnPattern == null || compiledColumnPattern.matcher(columnName).matches())) { logger.debug("Found a matched column:" + tableName + "." + columnName); // reset ordinal position for new table if (!tableName.equals(currentTableName)) { ordinalPosition = 1; currentTableName = tableName; } else { ordinalPosition++; } JsonNode jsonNode; try { jsonNode = mapper.readTree(dataTypeStr); } catch (Exception ex) { logger.error("Exception when parsing column" + " result", ex); throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "error parsing data type: " + dataTypeStr); } logger.debug("Data type string: {}", dataTypeStr); SnowflakeColumnMetadata columnMetadata = new SnowflakeColumnMetadata(jsonNode, session.isJdbcTreatDecimalAsInt(), session); logger.debug("Nullable: {}", columnMetadata.isNullable()); // SNOW-16881: add catalog name nextRow[0] = catalogName; nextRow[1] = schemaName; nextRow[2] = tableName; nextRow[3] = columnName; int internalColumnType = columnMetadata.getType(); int externalColumnType = internalColumnType; if (internalColumnType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ) { externalColumnType = Types.TIMESTAMP; } if (internalColumnType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ) { externalColumnType = session == null ? Types.TIMESTAMP_WITH_TIMEZONE : session.getEnableReturnTimestampWithTimeZone() ? Types.TIMESTAMP_WITH_TIMEZONE : Types.TIMESTAMP; } nextRow[4] = externalColumnType; nextRow[5] = columnMetadata.getTypeName(); nextRow[6] = getColumnSize(columnMetadata); nextRow[7] = null; nextRow[8] = columnMetadata.getScale(); nextRow[9] = null; nextRow[10] = (columnMetadata.isNullable() ? columnNullable : columnNoNulls); logger.debug("Returning nullable: {}", nextRow[10]); nextRow[11] = comment; nextRow[12] = defaultValue; // snow-10597: sql data type is integer instead of string nextRow[13] = externalColumnType; nextRow[14] = null; nextRow[15] = (columnMetadata.getType() == Types.VARCHAR || columnMetadata.getType() == Types.CHAR) ? columnMetadata.getLength() : null; nextRow[16] = ordinalPosition; nextRow[17] = (columnMetadata.isNullable() ? "YES" : "NO"); nextRow[18] = null; nextRow[19] = null; nextRow[20] = null; nextRow[21] = null; nextRow[22] = "".equals(autoIncrement) ? "NO" : "YES"; nextRow[23] = "NO"; if (extendedSet) { nextRow[24] = columnMetadata.getBase().name(); } return true; } } close(); return false; } }; } static Integer getColumnSize(SnowflakeColumnMetadata columnMetadata) { // The COLUMN_SIZE column specifies the column size for the given column. For numeric data, this // is the maximum precision. For character data, this is the length in characters. For datetime // datatypes, this is the length in characters of the String representation (assuming the // maximum allowed precision of the fractional seconds component). For binary data, this is the // length in bytes. For the ROWID datatype, this is the length in bytes. Null is returned for // data types where the column size is not applicable. switch (columnMetadata.getType()) { // Character data types case Types.CHAR: case Types.VARCHAR: return columnMetadata.getLength(); // Binary data types case Types.BINARY: case Types.VARBINARY: return columnMetadata.getLength(); // All numeric and datetime types - getPrecision() handles both correctly case Types.DECIMAL: case Types.NUMERIC: case Types.BIGINT: case Types.INTEGER: case Types.SMALLINT: case Types.TINYINT: case Types.FLOAT: case Types.DOUBLE: case Types.REAL: case Types.DATE: case Types.TIME: case Types.TIMESTAMP: case Types.TIMESTAMP_WITH_TIMEZONE: case SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ: case SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ: case SnowflakeType.EXTRA_TYPES_TIMESTAMP_NTZ: case SnowflakeType.EXTRA_TYPES_DECFLOAT: return columnMetadata.getPrecision(); // For VECTOR Snowflake type we consider dimension as the column size case SnowflakeType.EXTRA_TYPES_VECTOR: return columnMetadata.getDimension(); // For all other types (BOOLEAN, ARRAY, OBJECT, etc.) return null as per JDBC spec // requirement for non-applicable types default: return null; } } @Override public ResultSet getColumnPrivileges( String catalog, String schema, String table, String columnNamePattern) throws SQLException { logger.trace( "public ResultSet getColumnPrivileges(String catalog, " + "String schema,String table, String columnNamePattern)", false); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); return new SnowflakeDatabaseMetaDataResultSet( Arrays.asList( "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", "GRANTOR", "GRANTEE", "PRIVILEGE", "IS_GRANTABLE"), Arrays.asList("TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR), new Object[][] {}, statement); } @Override public ResultSet getTablePrivileges( String originalCatalog, String originalSchemaPattern, final String tableNamePattern) throws SQLException { logger.trace( "public ResultSet getTablePrivileges(String catalog, " + "String schemaPattern,String tableNamePattern)", false); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); if (tableNamePattern == null) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_TABLE_PRIVILEGES, statement); } // apply session context when catalog is unspecified ContextAwareMetadataSearch result = applySessionContext(originalCatalog, originalSchemaPattern); String catalog = result.database(); String schemaPattern = result.schema(); boolean isExactSchema = result.isExactSchema(); String showView = "select * from "; if (catalog != null && !catalog.isEmpty() && !catalog.trim().equals("%") && !catalog.trim().equals(".*")) { showView += "\"" + escapeSqlQuotes(catalog) + "\"."; } showView += "information_schema.table_privileges"; if (tableNamePattern != null && !tableNamePattern.isEmpty() && !tableNamePattern.trim().equals("%") && !tableNamePattern.trim().equals(".*")) { showView += " where table_name = '" + tableNamePattern + "'"; } if (schemaPattern != null && !schemaPattern.isEmpty() && !schemaPattern.trim().equals("%") && !schemaPattern.trim().equals(".*")) { String unescapedSchema = isExactSchema ? schemaPattern : unescapeChars(schemaPattern); if (showView.contains("where table_name")) { showView += " and table_schema = '" + unescapedSchema + "'"; } else { showView += " where table_schema = '" + unescapedSchema + "'"; } } showView += " order by table_catalog, table_schema, table_name, privilege_type"; final String catalogIn = catalog; final String schemaIn = schemaPattern; final String tableIn = tableNamePattern; ResultSet resultSet = executeAndReturnEmptyResultIfNotFound(statement, showView, GET_TABLE_PRIVILEGES); sendInBandTelemetryMetadataMetrics( resultSet, "getTablePrivileges", originalCatalog, originalSchemaPattern, tableNamePattern, "none"); return new SnowflakeDatabaseMetaDataQueryResultSet(GET_TABLE_PRIVILEGES, resultSet, statement) { @Override public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); while (showObjectResultSet.next()) { String table_cat = showObjectResultSet.getString("TABLE_CATALOG"); String table_schema = showObjectResultSet.getString("TABLE_SCHEMA"); String table_name = showObjectResultSet.getString("TABLE_NAME"); String grantor = showObjectResultSet.getString("GRANTOR"); String grantee = showObjectResultSet.getString("GRANTEE"); String privilege = showObjectResultSet.getString("PRIVILEGE_TYPE"); String is_grantable = showObjectResultSet.getString("IS_GRANTABLE"); if ((catalogIn == null || catalogIn.trim().equals("%") || catalogIn.trim().equals(table_cat)) && (schemaIn == null || schemaIn.trim().equals("%") || schemaIn.trim().equals(table_schema)) && (tableIn.trim().equals(table_name) || tableIn.trim().equals("%"))) { nextRow[0] = table_cat; nextRow[1] = table_schema; nextRow[2] = table_name; nextRow[3] = grantor; nextRow[4] = grantee; nextRow[5] = privilege; nextRow[6] = is_grantable; return true; } } close(); return false; } }; } @Override public ResultSet getBestRowIdentifier( String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { logger.trace( "public ResultSet getBestRowIdentifier(String catalog, " + "String schema,String table, int scope,boolean nullable)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { logger.trace( "public ResultSet getVersionColumns(String catalog, " + "String schema, String table)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public ResultSet getPrimaryKeys(String originalCatalog, String originalSchema, final String table) throws SQLException { logger.trace( "public ResultSet getPrimaryKeys(String catalog={}, " + "String schema={}, String table={})", originalCatalog, originalSchema, table); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); String showPKCommand = "show /* JDBC:DatabaseMetaData.getPrimaryKeys() */ primary keys in "; // apply session context when catalog is unspecified ContextAwareMetadataSearch result = applySessionContext(originalCatalog, originalSchema); String catalog = result.database(); String schema = result.schema(); boolean isExactSchema = result.isExactSchema(); // These Patterns will only be used if the connection property enablePatternSearch=true final Pattern compiledSchemaPattern = Wildcard.toRegexPattern(schema, true); final Pattern compiledTablePattern = Wildcard.toRegexPattern(table, true); if (catalog == null) { showPKCommand += "account"; } else if (catalog.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_PRIMARY_KEYS, statement); } else { String catalogUnescaped = escapeSqlQuotes(catalog); if (schema == null || (isPatternMatchingEnabled && isSchemaNameWildcardPattern(schema))) { showPKCommand += "database \"" + catalogUnescaped + "\""; } else if (schema.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_PRIMARY_KEYS, statement); } else { String schemaUnescaped = isExactSchema ? schema : unescapeChars(schema); if (table == null) { showPKCommand += "schema \"" + catalogUnescaped + "\".\"" + schemaUnescaped + "\""; } else if (table.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_PRIMARY_KEYS, statement); } else { String tableUnescaped = unescapeChars(table); showPKCommand += "table \"" + catalogUnescaped + "\".\"" + schemaUnescaped + "\".\"" + tableUnescaped + "\""; } } } final String catalogIn = catalog; // These values for Schema and Table will only be used to filter results if the connection // property // enablePatternSearch=false final String schemaIn = schema; final String tableIn = table; logger.debug("Sql command to get primary key metadata: {}", showPKCommand); ResultSet resultSet = executeAndReturnEmptyResultIfNotFound(statement, showPKCommand, GET_PRIMARY_KEYS); sendInBandTelemetryMetadataMetrics( resultSet, "getPrimaryKeys", originalCatalog, originalSchema, table, "none"); // Return empty result set since we don't have primary keys yet return new SnowflakeDatabaseMetaDataQueryResultSet(GET_PRIMARY_KEYS, resultSet, statement) { @Override public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); while (showObjectResultSet.next()) { // Get the values for each field to display String table_cat = showObjectResultSet.getString(2); String table_schem = showObjectResultSet.getString(3); String table_name = showObjectResultSet.getString(4); String column_name = showObjectResultSet.getString(5); int key_seq = showObjectResultSet.getInt(6); String pk_name = showObjectResultSet.getString(7); boolean isMatch = false; // Post filter based on the input if (isPatternMatchingEnabled) { isMatch = isPrimaryKeyPatternSearch( table_cat, table_schem, table_name, column_name, key_seq, pk_name); } else { isMatch = isPrimaryKeyExactSearch( table_cat, table_schem, table_name, column_name, key_seq, pk_name); } if (isMatch) { createPrimaryKeyRow(table_cat, table_schem, table_name, column_name, key_seq, pk_name); return true; } } close(); return false; } private boolean isPrimaryKeyExactSearch( String table_cat, String table_schem, String table_name, String column_name, int key_seq, String pk_name) { if ((catalogIn == null || catalogIn.equals(table_cat)) && (schemaIn == null || schemaIn.equals(table_schem)) && (tableIn == null || tableIn.equals(table_name))) { return true; } return false; } private boolean isPrimaryKeyPatternSearch( String table_cat, String table_schem, String table_name, String column_name, int key_seq, String pk_name) { if ((catalogIn == null || catalogIn.equals(table_cat)) && (compiledSchemaPattern == null || compiledSchemaPattern.equals(table_schem) || compiledSchemaPattern.matcher(table_schem).matches()) && (compiledTablePattern == null || compiledTablePattern.equals(table_name) || compiledTablePattern.matcher(table_name).matches())) { return true; } return false; } private void createPrimaryKeyRow( String table_cat, String table_schem, String table_name, String column_name, int key_seq, String pk_name) { nextRow[0] = table_cat; nextRow[1] = table_schem; nextRow[2] = table_name; nextRow[3] = column_name; nextRow[4] = key_seq; nextRow[5] = pk_name; } }; } /** * Retrieves the foreign keys * * @param client type of foreign key * @param originalParentCatalog database name * @param originalParentSchema schema name * @param parentTable table name * @param foreignCatalog other database name * @param foreignSchema other schema name * @param foreignTable other table name * @return foreign key columns in result set */ private ResultSet getForeignKeys( final String client, String originalParentCatalog, String originalParentSchema, final String parentTable, final String foreignCatalog, final String foreignSchema, final String foreignTable) throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); StringBuilder commandBuilder = new StringBuilder(); // apply session context when catalog is unspecified ContextAwareMetadataSearch result = applySessionContext(originalParentCatalog, originalParentSchema); String parentCatalog = result.database(); String parentSchema = result.schema(); boolean isExactSchema = result.isExactSchema(); // These Patterns will only be used to filter results if the connection property // enablePatternSearch=true final Pattern compiledSchemaPattern = Wildcard.toRegexPattern(parentSchema, true); final Pattern compiledParentTablePattern = Wildcard.toRegexPattern(parentTable, true); final Pattern compiledForeignSchemaPattern = Wildcard.toRegexPattern(foreignSchema, true); final Pattern compiledForeignTablePattern = Wildcard.toRegexPattern(foreignTable, true); if (client.equals("export") || client.equals("cross")) { commandBuilder.append( "show /* JDBC:DatabaseMetaData.getForeignKeys() */ " + "exported keys in "); } else if (client.equals("import")) { commandBuilder.append( "show /* JDBC:DatabaseMetaData.getForeignKeys() */ " + "imported keys in "); } if (parentCatalog == null) { commandBuilder.append("account"); } else if (parentCatalog.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_FOREIGN_KEYS, statement); } else { String unescapedParentCatalog = escapeSqlQuotes(parentCatalog); if (parentSchema == null || (isPatternMatchingEnabled && isSchemaNameWildcardPattern(parentSchema))) { commandBuilder.append("database \"" + unescapedParentCatalog + "\""); } else if (parentSchema.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_FOREIGN_KEYS, statement); } else { String unescapedParentSchema = isExactSchema ? parentSchema : unescapeChars(parentSchema); if (parentTable == null) { commandBuilder.append( "schema \"" + unescapedParentCatalog + "\".\"" + unescapedParentSchema + "\""); } else if (parentTable.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_FOREIGN_KEYS, statement); } else { String unescapedParentTable = unescapeChars(parentTable); commandBuilder.append( "table \"" + unescapedParentCatalog + "\".\"" + unescapedParentSchema + "\".\"" + unescapedParentTable + "\""); } } } final String finalParentCatalog = parentCatalog; final String finalForeignCatalog = foreignCatalog; // These values will only be used to filter results if the connection property // enablePatternSearch=true final String finalParentSchema = parentSchema; final String finalParentTable = parentTable; final String finalForeignSchema = foreignSchema; final String finalForeignTable = foreignTable; String command = commandBuilder.toString(); ResultSet resultSet = executeAndReturnEmptyResultIfNotFound(statement, command, GET_FOREIGN_KEYS); sendInBandTelemetryMetadataMetrics( resultSet, "getForeignKeys", originalParentCatalog, originalParentSchema, parentTable, "none"); return new SnowflakeDatabaseMetaDataQueryResultSet(GET_FOREIGN_KEYS, resultSet, statement) { @Override public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); while (showObjectResultSet.next()) { // Get the value for each field to display String pktable_cat = showObjectResultSet.getString(2); String pktable_schem = showObjectResultSet.getString(3); String pktable_name = showObjectResultSet.getString(4); String pkcolumn_name = showObjectResultSet.getString(5); String fktable_cat = showObjectResultSet.getString(6); String fktable_schem = showObjectResultSet.getString(7); String fktable_name = showObjectResultSet.getString(8); String fkcolumn_name = showObjectResultSet.getString(9); int key_seq = showObjectResultSet.getInt(10); short updateRule = getForeignKeyConstraintProperty("update", showObjectResultSet.getString(11)); short deleteRule = getForeignKeyConstraintProperty("delete", showObjectResultSet.getString(12)); String fk_name = showObjectResultSet.getString(13); String pk_name = showObjectResultSet.getString(14); short deferrability = getForeignKeyConstraintProperty("deferrability", showObjectResultSet.getString(15)); boolean passedFilter = false; if (isPatternMatchingEnabled) { passedFilter = isForeignKeyPatternMatch( fktable_cat, fktable_schem, fktable_name, passedFilter, pktable_cat, pktable_schem, pktable_name); } else { passedFilter = isForeignKeyExactMatch( fktable_cat, fktable_schem, fktable_name, passedFilter, pktable_cat, pktable_schem, pktable_name); } if (passedFilter) { createForeinKeyRow( pktable_cat, pktable_schem, pktable_name, pkcolumn_name, fktable_cat, fktable_schem, fktable_name, fkcolumn_name, key_seq, updateRule, deleteRule, fk_name, pk_name, deferrability); return true; } } close(); return false; } private void createForeinKeyRow( String pktable_cat, String pktable_schem, String pktable_name, String pkcolumn_name, String fktable_cat, String fktable_schem, String fktable_name, String fkcolumn_name, int key_seq, short updateRule, short deleteRule, String fk_name, String pk_name, short deferrability) { nextRow[0] = pktable_cat; nextRow[1] = pktable_schem; nextRow[2] = pktable_name; nextRow[3] = pkcolumn_name; nextRow[4] = fktable_cat; nextRow[5] = fktable_schem; nextRow[6] = fktable_name; nextRow[7] = fkcolumn_name; nextRow[8] = key_seq; nextRow[9] = updateRule; nextRow[10] = deleteRule; nextRow[11] = fk_name; nextRow[12] = pk_name; nextRow[13] = deferrability; } private boolean isForeignKeyExactMatch( String fktable_cat, String fktable_schem, String fktable_name, boolean passedFilter, String pktable_cat, String pktable_schem, String pktable_name) { // Post filter the results based on the client type if (client.equals("import")) { // For imported keys, filter on the foreign key table if ((finalParentCatalog == null || finalParentCatalog.equals(fktable_cat)) && (finalParentSchema == null || finalParentSchema.equals(fktable_schem)) && (finalParentTable == null || finalParentTable.equals(fktable_name))) { passedFilter = true; } } else if (client.equals("export")) { // For exported keys, filter on the primary key table if ((finalParentCatalog == null || finalParentCatalog.equals(pktable_cat)) && (finalParentSchema == null || finalParentSchema.equals(pktable_schem)) && (finalParentTable == null || finalParentTable.equals(pktable_name))) { passedFilter = true; } } else if (client.equals("cross")) { // For cross references, filter on both the primary key and foreign // key table if ((finalParentCatalog == null || finalParentCatalog.equals(pktable_cat)) && (finalParentSchema == null || finalParentSchema.equals(pktable_schem)) && (finalParentTable == null || finalParentTable.equals(pktable_name)) && (finalForeignCatalog == null || finalForeignCatalog.equals(fktable_cat)) && (finalForeignSchema == null || finalForeignSchema.equals(fktable_schem)) && (finalForeignTable == null || finalForeignTable.equals(fktable_name))) { passedFilter = true; } } return passedFilter; } private boolean isForeignKeyPatternMatch( String fktable_cat, String fktable_schem, String fktable_name, boolean passedFilter, String pktable_cat, String pktable_schem, String pktable_name) { // Post filter the results based on the client type if (client.equals("import")) { // For imported keys, filter on the foreign key table if ((finalParentCatalog == null || finalParentCatalog.equals(fktable_cat)) && (compiledSchemaPattern == null || compiledSchemaPattern.equals(fktable_schem) || compiledSchemaPattern.matcher(fktable_schem).matches()) && (compiledParentTablePattern == null || compiledParentTablePattern.equals(fktable_name) || compiledParentTablePattern.matcher(fktable_name).matches())) { passedFilter = true; } } else if (client.equals("export")) { // For exported keys, filter on the primary key table if ((finalParentCatalog == null || finalParentCatalog.equals(pktable_cat)) && (compiledSchemaPattern == null || compiledSchemaPattern.equals(pktable_schem) || compiledSchemaPattern.matcher(pktable_schem).matches()) && (compiledParentTablePattern == null || compiledParentTablePattern.equals(pktable_name) || compiledParentTablePattern.matcher(pktable_name).matches())) { passedFilter = true; } } else if (client.equals("cross")) { // For cross references, filter on both the primary key and foreign // key table if ((finalParentCatalog == null || finalParentCatalog.equals(pktable_cat)) && (compiledSchemaPattern == null || compiledSchemaPattern.equals(pktable_schem) || compiledSchemaPattern.matcher(pktable_schem).matches()) && (compiledParentTablePattern == null || compiledParentTablePattern.equals(pktable_name) || compiledParentTablePattern.matcher(pktable_name).matches()) && (foreignCatalog == null || foreignCatalog.equals(fktable_cat)) && (compiledForeignSchemaPattern == null || compiledForeignSchemaPattern.equals(fktable_schem) || compiledForeignSchemaPattern.matcher(fktable_schem).matches()) && (compiledForeignTablePattern == null || compiledForeignTablePattern.equals(fktable_name) || compiledForeignTablePattern.matcher(fktable_name).matches())) { passedFilter = true; } } return passedFilter; } }; } /** * Returns the JDBC standard property string for the property string used in our show constraint * commands * * @param property_name operation type * @param property property value * @return metadata property value */ private short getForeignKeyConstraintProperty(String property_name, String property) { short result = 0; switch (property_name) { case "update": case "delete": switch (property) { case "NO ACTION": result = importedKeyNoAction; break; case "CASCADE": result = importedKeyCascade; break; case "SET NULL": result = importedKeySetNull; break; case "SET DEFAULT": result = importedKeySetDefault; break; case "RESTRICT": result = importedKeyRestrict; break; } break; case "deferrability": switch (property) { case "INITIALLY DEFERRED": result = importedKeyInitiallyDeferred; break; case "INITIALLY IMMEDIATE": result = importedKeyInitiallyImmediate; break; case "NOT DEFERRABLE": result = importedKeyNotDeferrable; break; } break; } return result; } @Override public ResultSet getImportedKeys(String originalCatalog, String originalSchema, String table) throws SQLException { logger.trace( "public ResultSet getImportedKeys(String catalog={}, " + "String schema={}, String table={})", originalCatalog, originalSchema, table); return getForeignKeys("import", originalCatalog, originalSchema, table, null, null, null); } @Override public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { logger.trace( "public ResultSet getExportedKeys(String catalog={}, " + "String schema={}, String table={})", catalog, schema, table); return getForeignKeys("export", catalog, schema, table, null, null, null); } @Override public ResultSet getCrossReference( String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { logger.trace( "public ResultSet getCrossReference(String parentCatalog={}, " + "String parentSchema={}, String parentTable={}, " + "String foreignCatalog={}, String foreignSchema={}, " + "String foreignTable={})", parentCatalog, parentSchema, parentTable, foreignCatalog, foreignSchema, foreignTable); return getForeignKeys( "cross", parentCatalog, parentSchema, parentTable, foreignCatalog, foreignSchema, foreignTable); } @Override public ResultSet getTypeInfo() throws SQLException { logger.trace("ResultSet getTypeInfo()", false); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); // Return empty result set since we don't have primary keys yet return new SnowflakeDatabaseMetaDataResultSet( Arrays.asList( "TYPE_NAME", "DATA_TYPE", "PRECISION", "LITERAL_PREFIX", "LITERAL_SUFFIX", "CREATE_PARAMS", "NULLABLE", "CASE_SENSITIVE", "SEARCHABLE", "UNSIGNED_ATTRIBUTE", "FIXED_PREC_SCALE", "AUTO_INCREMENT", "LOCAL_TYPE_NAME", "MINIMUM_SCALE", "MAXIMUM_SCALE", "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "NUM_PREC_RADIX"), Arrays.asList( "TEXT", "INTEGER", "INTEGER", "TEXT", "TEXT", "TEXT", "SHORT", "BOOLEAN", "SHORT", "BOOLEAN", "BOOLEAN", "BOOLEAN", "TEXT", "SHORT", "SHORT", "INTEGER", "INTEGER", "INTEGER"), Arrays.asList( Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.BOOLEAN, Types.SMALLINT, Types.BOOLEAN, Types.BOOLEAN, Types.BOOLEAN, Types.VARCHAR, Types.SMALLINT, Types.SMALLINT, Types.INTEGER, Types.INTEGER, Types.INTEGER), new Object[][] { { "NUMBER", Types.DECIMAL, 38, null, null, null, typeNullable, false, typeSearchable, false, true, true, null, 0, 37, -1, -1, -1 }, { "INTEGER", Types.INTEGER, 38, null, null, null, typeNullable, false, typeSearchable, false, true, true, null, 0, 0, -1, -1, -1 }, { "DOUBLE", Types.DOUBLE, 38, null, null, null, typeNullable, false, typeSearchable, false, true, true, null, 0, 37, -1, -1, -1 }, { "VARCHAR", Types.VARCHAR, -1, null, null, null, typeNullable, false, typeSearchable, false, true, true, null, -1, -1, -1, -1, -1 }, { "DATE", Types.DATE, -1, null, null, null, typeNullable, false, typeSearchable, false, true, true, null, -1, -1, -1, -1, -1 }, { "TIME", Types.TIME, -1, null, null, null, typeNullable, false, typeSearchable, false, true, true, null, -1, -1, -1, -1, -1 }, { "TIMESTAMP", Types.TIMESTAMP, -1, null, null, null, typeNullable, false, typeSearchable, false, true, true, null, -1, -1, -1, -1, -1 }, { "BOOLEAN", Types.BOOLEAN, -1, null, null, null, typeNullable, false, typeSearchable, false, true, true, null, -1, -1, -1, -1, -1 } }, statement); } /** * Function to return a list of streams * * @param originalCatalog catalog name * @param originalSchemaPattern schema name pattern * @param streamName stream name * @return a result set * @throws SQLException if any SQL error occurs. */ public ResultSet getStreams( String originalCatalog, String originalSchemaPattern, String streamName) throws SQLException { logger.trace( "public ResultSet getStreams(String catalog={}, String schemaPattern={}" + "String streamName={}", originalCatalog, originalSchemaPattern, streamName); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); // apply session context when catalog is unspecified ContextAwareMetadataSearch result = applySessionContext(originalCatalog, originalSchemaPattern); String catalog = result.database(); String schemaPattern = result.schema(); boolean isExactSchema = result.isExactSchema(); final Pattern compiledSchemaPattern = Wildcard.toRegexPattern(schemaPattern, true); final Pattern compiledStreamNamePattern = Wildcard.toRegexPattern(streamName, true); String showStreamsCommand = "show streams"; if (streamName != null && !streamName.isEmpty() && !streamName.trim().equals("%") && !streamName.trim().equals(".*")) { showStreamsCommand += " like '" + escapeSingleQuoteForLikeCommand(streamName) + "'"; } if (catalog == null) { showStreamsCommand += " in account"; } else if (catalog.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_STREAMS, statement); } else { String catalogEscaped = escapeSqlQuotes(catalog); if (schemaPattern == null || isSchemaNameWildcardPattern(schemaPattern)) { showStreamsCommand += " in database \"" + catalogEscaped + "\""; } else if (schemaPattern.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_STREAMS, statement); } else { String schemaUnescaped = isExactSchema ? schemaPattern : unescapeChars(schemaPattern); showStreamsCommand += " in schema \"" + catalogEscaped + "\".\"" + schemaUnescaped + "\""; } } logger.debug("Sql command to get stream metadata: {}", showStreamsCommand); ResultSet resultSet = executeAndReturnEmptyResultIfNotFound(statement, showStreamsCommand, GET_STREAMS); sendInBandTelemetryMetadataMetrics( resultSet, "getStreams", originalCatalog, originalSchemaPattern, streamName, "none"); return new SnowflakeDatabaseMetaDataQueryResultSet(GET_STREAMS, resultSet, statement) { @Override public boolean next() throws SQLException { logger.trace("boolean next()"); incrementRow(); // iterate throw the show streams result until we find an entry // that matches the stream name while (showObjectResultSet.next()) { String name = showObjectResultSet.getString("name"); String databaseName = showObjectResultSet.getString("database_name"); String schemaName = showObjectResultSet.getString("schema_name"); String owner = showObjectResultSet.getString("owner"); String comment = showObjectResultSet.getString("comment"); String tableName = showObjectResultSet.getString("table_name"); String sourceType = showObjectResultSet.getString("source_type"); String baseTables = showObjectResultSet.getString("base_tables"); String type = showObjectResultSet.getString("type"); String stale = showObjectResultSet.getString("stale"); String mode = showObjectResultSet.getString("mode"); if ((compiledStreamNamePattern == null || compiledStreamNamePattern.matcher(name).matches()) && (compiledSchemaPattern == null || compiledSchemaPattern.matcher(schemaName).matches())) { logger.debug("Found a matched stream:" + schemaName + "." + name); nextRow[0] = name; nextRow[1] = databaseName; nextRow[2] = schemaName; nextRow[3] = owner; nextRow[4] = comment; nextRow[5] = tableName; nextRow[6] = sourceType; nextRow[7] = baseTables; nextRow[8] = type; nextRow[9] = stale; nextRow[10] = mode; return true; } } close(); return false; } }; } @Override public ResultSet getIndexInfo( String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { logger.trace( "public ResultSet getIndexInfo(String catalog, String schema, " + "String table,boolean unique, boolean approximate)", false); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); // Return empty result set since we don't have primary keys yet return new SnowflakeDatabaseMetaDataResultSet( Arrays.asList( "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "NON_UNIQUE", "INDEX_QUALIFIER", "INDEX_NAME", "TYPE", "ORDINAL_POSITION", "COLUMN_NAME", "ASC_OR_DESC", "CARDINALITY", "PAGES", "FILTER_CONDITION"), Arrays.asList( "TEXT", "TEXT", "TEXT", "BOOLEAN", "TEXT", "TEXT", "SHORT", "SHORT", "TEXT", "TEXT", "INTEGER", "INTEGER", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.BOOLEAN, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.SMALLINT, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.VARCHAR), new Object[][] {}, statement); } @Override public boolean supportsResultSetType(int type) throws SQLException { logger.trace("boolean supportsResultSetType(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return (type == ResultSet.TYPE_FORWARD_ONLY); } @Override public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { logger.trace( "public boolean supportsResultSetConcurrency(int type, " + "int concurrency)", false); raiseSQLExceptionIfConnectionIsClosed(); return (type == ResultSet.TYPE_FORWARD_ONLY && concurrency == ResultSet.CONCUR_READ_ONLY); } @Override public boolean ownUpdatesAreVisible(int type) throws SQLException { logger.trace("boolean ownUpdatesAreVisible(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean ownDeletesAreVisible(int type) throws SQLException { logger.trace("boolean ownDeletesAreVisible(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean ownInsertsAreVisible(int type) throws SQLException { logger.trace("boolean ownInsertsAreVisible(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean othersUpdatesAreVisible(int type) throws SQLException { logger.trace("boolean othersUpdatesAreVisible(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean othersDeletesAreVisible(int type) throws SQLException { logger.trace("boolean othersDeletesAreVisible(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean othersInsertsAreVisible(int type) throws SQLException { logger.trace("boolean othersInsertsAreVisible(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean updatesAreDetected(int type) throws SQLException { logger.trace("boolean updatesAreDetected(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean deletesAreDetected(int type) throws SQLException { logger.trace("boolean deletesAreDetected(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean insertsAreDetected(int type) throws SQLException { logger.trace("boolean insertsAreDetected(int type)", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsBatchUpdates() throws SQLException { logger.trace("boolean supportsBatchUpdates()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public ResultSet getUDTs( String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { logger.trace( "public ResultSet getUDTs(String catalog, " + "String schemaPattern,String typeNamePattern, int[] types)", false); raiseSQLExceptionIfConnectionIsClosed(); // We don't user-defined types, so return an empty result set Statement statement = connection.createStatement(); return new SnowflakeDatabaseMetaDataResultSet( Arrays.asList( "TYPE_CAT", "TYPE_SCHEM", "TYPE_NAME", "CLASS_NAME", "DATA_TYPE", "REMARKS", "BASE_TYPE"), Arrays.asList("TEXT", "TEXT", "TEXT", "TEXT", "INTEGER", "TEXT", "SHORT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.VARCHAR, Types.SMALLINT), new Object[][] {}, statement); } @Override public Connection getConnection() throws SQLException { logger.trace("Connection getConnection()", false); raiseSQLExceptionIfConnectionIsClosed(); return connection; } @Override public boolean supportsSavepoints() throws SQLException { logger.trace("boolean supportsSavepoints()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsNamedParameters() throws SQLException { logger.trace("boolean supportsNamedParameters()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsMultipleOpenResults() throws SQLException { logger.trace("boolean supportsMultipleOpenResults()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public boolean supportsGetGeneratedKeys() throws SQLException { logger.trace("boolean supportsGetGeneratedKeys()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { logger.trace( "public ResultSet getSuperTypes(String catalog, " + "String schemaPattern,String typeNamePattern)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { logger.trace( "public ResultSet getSuperTables(String catalog, " + "String schemaPattern,String tableNamePattern)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public ResultSet getAttributes( String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException { logger.trace( "public ResultSet getAttributes(String catalog, String " + "schemaPattern," + "String typeNamePattern,String attributeNamePattern)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public boolean supportsResultSetHoldability(int holdability) throws SQLException { logger.trace("boolean supportsResultSetHoldability(int holdability)", false); raiseSQLExceptionIfConnectionIsClosed(); return holdability == ResultSet.CLOSE_CURSORS_AT_COMMIT; } @Override public int getResultSetHoldability() throws SQLException { logger.trace("int getResultSetHoldability()", false); return ResultSet.CLOSE_CURSORS_AT_COMMIT; } @Override public int getDatabaseMajorVersion() throws SQLException { logger.trace("int getDatabaseMajorVersion()", false); raiseSQLExceptionIfConnectionIsClosed(); return connection.unwrap(SnowflakeConnectionImpl.class).getDatabaseMajorVersion(); } @Override public int getDatabaseMinorVersion() throws SQLException { logger.trace("int getDatabaseMinorVersion()", false); raiseSQLExceptionIfConnectionIsClosed(); return connection.unwrap(SnowflakeConnectionImpl.class).getDatabaseMinorVersion(); } @Override public int getJDBCMajorVersion() throws SQLException { logger.trace("int getJDBCMajorVersion()", false); raiseSQLExceptionIfConnectionIsClosed(); return Integer.parseInt(JDBCVersion.split("\\.", 2)[0]); } @Override public int getJDBCMinorVersion() throws SQLException { logger.trace("int getJDBCMinorVersion()", false); raiseSQLExceptionIfConnectionIsClosed(); return Integer.parseInt(JDBCVersion.split("\\.", 2)[1]); } @Override public int getSQLStateType() throws SQLException { logger.trace("int getSQLStateType()", false); return sqlStateSQL; } @Override public boolean locatorsUpdateCopy() { logger.trace("boolean locatorsUpdateCopy()", false); return false; } @Override public boolean supportsStatementPooling() throws SQLException { logger.trace("boolean supportsStatementPooling()", false); raiseSQLExceptionIfConnectionIsClosed(); return false; } @Override public RowIdLifetime getRowIdLifetime() throws SQLException { logger.trace("RowIdLifetime getRowIdLifetime()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public ResultSet getSchemas(String originalCatalog, String originalSchema) throws SQLException { logger.trace( "public ResultSet getSchemas(String catalog={}, String " + "schemaPattern={})", originalCatalog, originalSchema); raiseSQLExceptionIfConnectionIsClosed(); // apply session context when catalog is unspecified ContextAwareMetadataSearch result = applySessionContext(originalCatalog, originalSchema); final String catalog = result.database(); final String schemaPattern = result.schema(); boolean isExactSchema = result.isExactSchema(); final Pattern compiledSchemaPattern = Wildcard.toRegexPattern(schemaPattern, true); StringBuilder showSchemas = new StringBuilder("show /* JDBC:DatabaseMetaData.getSchemas() */ schemas"); Statement statement = connection.createStatement(); if (isExactSchema && enableWildcardsInShowMetadataCommands) { String escapedSchema = schemaPattern.replaceAll("_", "\\\\\\\\_").replaceAll("%", "\\\\\\\\%"); showSchemas.append(" like '").append(escapedSchema).append("'"); } else if (schemaPattern != null && !schemaPattern.isEmpty() && !schemaPattern.trim().equals("%") && !schemaPattern.trim().equals(".*")) { // only add pattern if it is not empty and not matching all character. showSchemas .append(" like '") .append(escapeSingleQuoteForLikeCommand(schemaPattern)) .append("'"); } if (catalog == null) { showSchemas.append(" in account"); } else if (catalog.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_SCHEMAS, statement); } else { showSchemas.append(" in database \"").append(escapeSqlQuotes(catalog)).append("\""); } String sqlQuery = showSchemas.toString(); logger.debug("Sql command to get schemas metadata: {}", sqlQuery); ResultSet resultSet = executeAndReturnEmptyResultIfNotFound(statement, sqlQuery, GET_SCHEMAS); sendInBandTelemetryMetadataMetrics( resultSet, "getSchemas", originalCatalog, originalSchema, "none", "none"); return new SnowflakeDatabaseMetaDataQueryResultSet(GET_SCHEMAS, resultSet, statement) { public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); // iterate throw the show table result until we find an entry // that matches the table name while (showObjectResultSet.next()) { String schemaName = showObjectResultSet.getString(2); String dbName = showObjectResultSet.getString(5); if (compiledSchemaPattern == null || compiledSchemaPattern.matcher(schemaName).matches() || isExactSchema && schemaPattern.equals(schemaPattern)) { nextRow[0] = schemaName; nextRow[1] = dbName; return true; } } close(); return false; } }; } @Override public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { logger.trace("boolean supportsStoredFunctionsUsingCallSyntax()", false); raiseSQLExceptionIfConnectionIsClosed(); return true; } @Override public boolean autoCommitFailureClosesAllResultSets() throws SQLException { logger.trace("boolean autoCommitFailureClosesAllResultSets()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public ResultSet getClientInfoProperties() throws SQLException { logger.trace("ResultSet getClientInfoProperties()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public ResultSet getFunctions( final String originalCatalog, final String originalSchemaPattern, final String functionNamePattern) throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); logger.trace( "public ResultSet getFunctions(String catalog={}, String schemaPattern={}, " + "String functionNamePattern={}", originalCatalog, originalSchemaPattern, functionNamePattern); String showFunctionCommand = getFirstResultSetCommand( originalCatalog, originalSchemaPattern, functionNamePattern, "functions"); if (showFunctionCommand.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_FUNCTIONS, statement); } ContextAwareMetadataSearch result = applySessionContext(originalCatalog, originalSchemaPattern); String catalog = result.database(); String schemaPattern = result.schema(); boolean isExactSchema = result.isExactSchema(); final Pattern compiledSchemaPattern = Wildcard.toRegexPattern(schemaPattern, true); final Pattern compiledFunctionPattern = Wildcard.toRegexPattern(functionNamePattern, true); ResultSet resultSet = executeAndReturnEmptyResultIfNotFound(statement, showFunctionCommand, GET_FUNCTIONS); sendInBandTelemetryMetadataMetrics( resultSet, "getFunctions", catalog, schemaPattern, functionNamePattern, "none"); return new SnowflakeDatabaseMetaDataQueryResultSet(GET_FUNCTIONS, resultSet, statement) { public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); // iterate throw the show table result until we find an entry // that matches the table name while (showObjectResultSet.next()) { String catalogName = showObjectResultSet.getString(11); String schemaName = showObjectResultSet.getString(3); String functionName = showObjectResultSet.getString(2); String remarks = showObjectResultSet.getString(10); int functionType = ("Y".equals(showObjectResultSet.getString(12)) ? functionReturnsTable : functionNoTable); String specificName = functionName; if ((compiledFunctionPattern == null || compiledFunctionPattern.matcher(functionName).matches()) && (compiledSchemaPattern == null || compiledSchemaPattern.matcher(schemaName).matches() || isExactSchema && schemaPattern.equals(schemaPattern))) { logger.debug("Found a matched function:" + schemaName + "." + functionName); nextRow[0] = catalogName; nextRow[1] = schemaName; nextRow[2] = functionName; nextRow[3] = remarks; nextRow[4] = functionType; nextRow[5] = specificName; return true; } } close(); return false; } }; } /** * This is a function that takes in a string of return types and a string of parameter names and * types. It splits both strings in a list of column names and column types. The names will be * every odd index and the types will be every even index. */ private List parseColumns(String retType, String args) { List columns = new ArrayList<>(); if (retType.substring(0, 5).equalsIgnoreCase("table")) { // if return type is a table there will be a result set String typeStr = retType.substring(retType.indexOf('(') + 1, retType.lastIndexOf(')')); String[] types = typeStr.split("\\s+|, "); if (types.length != 1) { for (int i = 0; i < types.length; i++) { columns.add(types[i]); } procedureResultsetColumnNum = columns.size() / 2; } } else { // otherwise it will be a return value columns.add(""); // there is no name for this column columns.add(retType); procedureResultsetColumnNum = -1; } String argStr = args.substring(args.indexOf('(') + 1, args.lastIndexOf(')')); String arguments[] = argStr.split("\\s+|, "); if (arguments.length != 1) { for (int i = 0; i < arguments.length; i++) { columns.add(arguments[i]); } } return columns; } @Override public ResultSet getFunctionColumns( String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException { logger.trace( "public ResultSet getFunctionColumns(String catalog, " + "String schemaPattern,String functionNamePattern," + "String columnNamePattern)", false); raiseSQLExceptionIfConnectionIsClosed(); Statement statement = connection.createStatement(); boolean addAllRows = false; String showFunctionCommand = getFirstResultSetCommand(catalog, schemaPattern, functionNamePattern, "functions"); if (showFunctionCommand.isEmpty()) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(GET_FUNCTION_COLUMNS, statement); } if (columnNamePattern == null || columnNamePattern.isEmpty() || columnNamePattern.trim().equals("%") || columnNamePattern.trim().equals(".*")) { addAllRows = true; } ResultSet resultSetStepOne = executeAndReturnEmptyResultIfNotFound(statement, showFunctionCommand, GET_FUNCTION_COLUMNS); sendInBandTelemetryMetadataMetrics( resultSetStepOne, "getFunctionColumns", catalog, schemaPattern, functionNamePattern, columnNamePattern); ArrayList rows = new ArrayList(); while (resultSetStepOne.next()) { String functionNameUnparsed = resultSetStepOne.getString("arguments").trim(); String functionNameNoArgs = resultSetStepOne.getString("name"); String realSchema = resultSetStepOne.getString("schema_name"); String realDatabase = resultSetStepOne.getString("catalog_name"); String showFunctionColCommand = getSecondResultSetCommand(realDatabase, realSchema, functionNameUnparsed, "function"); ResultSet resultSetStepTwo = executeAndReturnEmptyResultIfNotFound( statement, showFunctionColCommand, GET_FUNCTION_COLUMNS); if (resultSetStepTwo.next() == false) { continue; } // Retrieve the function arguments and function return values. String args = resultSetStepTwo.getString("value"); resultSetStepTwo.next(); String res = resultSetStepTwo.getString("value"); // parse function arguments and return values into a list of columns // result value(s) will be at the top of the list, followed by any arguments List functionCols = parseColumns(res, args); String paramNames[] = new String[functionCols.size() / 2]; String paramTypes[] = new String[functionCols.size() / 2]; if (functionCols.size() > 1) { for (int i = 0; i < functionCols.size(); i++) { if (i % 2 == 0) { paramNames[i / 2] = functionCols.get(i); } else { paramTypes[i / 2] = functionCols.get(i); } } } for (int i = 0; i < paramNames.length; i++) { // if it's the 1st in for loop, it's the result if (i == 0 || paramNames[i].equalsIgnoreCase(columnNamePattern) || addAllRows) { Object[] nextRow = new Object[17]; // add a row to resultSet nextRow[0] = catalog; // function catalog. Can be null. nextRow[1] = schemaPattern; // function schema. Can be null. nextRow[2] = functionNameNoArgs; // function name nextRow[3] = paramNames[i]; // column/parameter name if (i == 0 && procedureResultsetColumnNum < 0) { nextRow[4] = functionReturn; } else if (procedureResultsetColumnNum >= 0 && i < procedureResultsetColumnNum) { nextRow[4] = functionColumnResult; } else { nextRow[4] = functionColumnIn; // kind of column/parameter } String typeName = paramTypes[i]; int type = convertStringToType(typeName); nextRow[5] = type; // data type nextRow[6] = typeName; // type name // precision and scale. Values only exist for numbers int precision = 38; short scale = 0; if (type < 10) { if (typeName.contains("(") && typeName.contains(")")) { precision = Integer.parseInt( typeName.substring(typeName.indexOf('(') + 1, typeName.indexOf(','))); scale = Short.parseShort( typeName.substring(typeName.indexOf(',') + 1, typeName.indexOf(')'))); nextRow[7] = precision; nextRow[9] = scale; } else if (type == Types.FLOAT) { nextRow[7] = 0; nextRow[9] = null; } else { nextRow[7] = precision; nextRow[9] = scale; } } else { nextRow[7] = 0; nextRow[9] = null; } nextRow[8] = 0; // length in bytes. not supported nextRow[10] = 10; // radix. Probably 10 is default, but unknown. nextRow[11] = functionNullableUnknown; // nullable. We don't know from current function info. nextRow[12] = resultSetStepOne.getString("description").trim(); // remarks if (type == Types.BINARY || type == Types.VARBINARY || type == Types.CHAR || type == Types.VARCHAR) { if (typeName.contains("(") && typeName.contains(")")) { int char_octet_len = Integer.parseInt( typeName.substring(typeName.indexOf('(') + 1, typeName.indexOf(')'))); nextRow[13] = char_octet_len; } else if (type == Types.CHAR || type == Types.VARCHAR) { nextRow[13] = getMaxCharLiteralLength(); } else if (type == Types.BINARY || type == Types.VARBINARY) { nextRow[13] = getMaxBinaryLiteralLength(); } } else { nextRow[13] = null; } // the ordinal position is 0 for a return value. // for result set columns, the ordinal position is of the column in the result set // starting at 1 if (procedureResultsetColumnNum >= 0) { if (i < procedureResultsetColumnNum) { nextRow[14] = i + 1; } else { nextRow[14] = i - procedureResultsetColumnNum + 1; } } else { nextRow[14] = i; // ordinal position. } nextRow[15] = ""; // nullability again. Not supported. nextRow[16] = functionNameUnparsed; rows.add(nextRow); } } } Object[][] resultRows = new Object[rows.size()][17]; for (int i = 0; i < resultRows.length; i++) { resultRows[i] = rows.get(i); } return new SnowflakeDatabaseMetaDataResultSet(GET_FUNCTION_COLUMNS, resultRows, statement); } // @Override public ResultSet getPseudoColumns( String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { logger.trace( "public ResultSet getPseudoColumns(String catalog, " + "String schemaPattern,String tableNamePattern," + "String columnNamePattern)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } // @Override public boolean generatedKeyAlwaysReturned() throws SQLException { logger.trace("boolean generatedKeyAlwaysReturned()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } // unchecked @Override public T unwrap(Class iface) throws SQLException { logger.trace(" T unwrap(Class iface)", false); if (!iface.isInstance(this)) { throw new SQLException( this.getClass().getName() + " not unwrappable from " + iface.getName()); } return (T) this; } @Override public boolean isWrapperFor(Class iface) throws SQLException { logger.trace("boolean isWrapperFor(Class iface)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } /** * A small helper function to execute show command to get metadata, And if object does not exist, * return an empty result set instead of throwing a SnowflakeSQLException */ private ResultSet executeAndReturnEmptyResultIfNotFound( Statement statement, String sql, DBMetadataResultSetMetadata metadataType) throws SQLException { ResultSet resultSet; if (isNullOrEmpty(sql)) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResultSet(metadataType, statement); } try { resultSet = statement.executeQuery(sql); } catch (SnowflakeSQLException e) { if (e.getSQLState().equals(SqlState.NO_DATA) || e.getSQLState().equals(SqlState.BASE_TABLE_OR_VIEW_NOT_FOUND) || e.getMessage().contains("Operation is not supported in reader account")) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResult( metadataType, statement, e.getQueryId()); } // When using this helper function for "desc function" calls, there are some built-in // functions with unusual argument syntax that throw an error when attempting to call // desc function on them. For example, AS_TIMESTAMP_LTZ([,VARIANT]) throws an exception. // Skip these built-in functions. else if (sql.contains("desc function") && e.getSQLState().equals(SqlState.SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION)) { return SnowflakeDatabaseMetaDataResultSet.getEmptyResult( metadataType, statement, e.getQueryId()); } else { throw e; } } return resultSet; } private static class ContextAwareMetadataSearch { private final String database; private final String schema; private final boolean isExactSchema; public ContextAwareMetadataSearch(String database, String schema, boolean isExactSchema) { this.database = database; this.schema = schema; this.isExactSchema = isExactSchema; } public String database() { return database; } public String schema() { return schema; } public boolean isExactSchema() { return isExactSchema; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/pooling/LogicalConnection.java ================================================ package net.snowflake.client.internal.api.implementation.pooling; import java.sql.Array; import java.sql.Blob; import java.sql.CallableStatement; import java.sql.Clob; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.SQLClientInfoException; import java.sql.SQLException; import java.sql.SQLWarning; import java.sql.SQLXML; import java.sql.Savepoint; import java.sql.Statement; import java.sql.Struct; import java.util.Properties; import java.util.concurrent.Executor; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Logical connection is wrapper class on top of SnowflakeConnectionImpl Every method call will be * delegated to SnowflakeConnectionImpl except for close method */ class LogicalConnection implements Connection { private static final SFLogger logger = SFLoggerFactory.getLogger(LogicalConnection.class); /** physical connection to snowflake, instance SnowflakeConnectionImpl */ private final Connection physicalConnection; /** Pooled connection object that create this logical connection */ private final SnowflakePooledConnection pooledConnection; /** * flags indicating whether this logical connection is closed or not Note: This is different from * physical connection's state of whether closed or not */ private boolean isClosed; LogicalConnection(SnowflakePooledConnection pooledConnection) throws SQLException { this.physicalConnection = pooledConnection.getPhysicalConnection(); this.pooledConnection = pooledConnection; this.isClosed = physicalConnection.isClosed(); } @Override public Statement createStatement() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.createStatement(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public PreparedStatement prepareStatement(String sql) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.prepareStatement(sql); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public CallableStatement prepareCall(String sql) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.prepareCall(sql); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public String nativeSQL(String sql) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.nativeSQL(sql); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setAutoCommit(boolean autoCommit) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.setAutoCommit(autoCommit); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public boolean getAutoCommit() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getAutoCommit(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void commit() throws SQLException { throwExceptionIfClosed(); try { physicalConnection.commit(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void rollback() throws SQLException { throwExceptionIfClosed(); try { physicalConnection.rollback(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } /** Logical connection will not close physical connection, but fire events */ @Override public void close() throws SQLException { if (isClosed) { return; } SnowflakeConnectionImpl sfConnection = physicalConnection.unwrap(SnowflakeConnectionImpl.class); logger.debug("Closing logical connection with session id: {}", sfConnection.getSessionID()); pooledConnection.fireConnectionCloseEvent(); isClosed = true; } @Override public boolean isClosed() throws SQLException { return isClosed; } @Override public DatabaseMetaData getMetaData() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getMetaData(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setReadOnly(boolean readOnly) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.setReadOnly(readOnly); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public boolean isReadOnly() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.isReadOnly(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setCatalog(String catalog) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.setCatalog(catalog); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public String getCatalog() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getCatalog(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setTransactionIsolation(int level) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.setTransactionIsolation(level); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public int getTransactionIsolation() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getTransactionIsolation(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public SQLWarning getWarnings() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getWarnings(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void clearWarnings() throws SQLException { throwExceptionIfClosed(); try { physicalConnection.clearWarnings(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.createStatement(resultSetType, resultSetConcurrency); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.prepareStatement(sql, resultSetType, resultSetConcurrency); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.prepareCall(sql, resultSetType, resultSetConcurrency); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public java.util.Map> getTypeMap() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getTypeMap(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setTypeMap(java.util.Map> map) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.setTypeMap(map); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setHoldability(int holdability) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.setHoldability(holdability); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public int getHoldability() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getHoldability(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public Savepoint setSavepoint() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.setSavepoint(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public Savepoint setSavepoint(String name) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.setSavepoint(name); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void rollback(Savepoint savepoint) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.rollback(savepoint); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void releaseSavepoint(Savepoint savepoint) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.releaseSavepoint(savepoint); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public Statement createStatement( int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.createStatement( resultSetType, resultSetConcurrency, resultSetHoldability); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public PreparedStatement prepareStatement( String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.prepareStatement( sql, resultSetType, resultSetConcurrency, resultSetHoldability); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public CallableStatement prepareCall( String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.prepareCall( sql, resultSetType, resultSetConcurrency, resultSetHoldability); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.prepareStatement(sql, autoGeneratedKeys); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public PreparedStatement prepareStatement(String sql, int columnIndexes[]) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.prepareStatement(sql, columnIndexes); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public PreparedStatement prepareStatement(String sql, String columnNames[]) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.prepareStatement(sql, columnNames); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public Clob createClob() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.createClob(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public Blob createBlob() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.createBlob(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public NClob createNClob() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.createNClob(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public SQLXML createSQLXML() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.createSQLXML(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public boolean isValid(int timeout) throws SQLException { try { return !isClosed && physicalConnection.isValid(timeout); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setClientInfo(String name, String value) throws SQLClientInfoException { try { physicalConnection.setClientInfo(name, value); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setClientInfo(Properties properties) throws SQLClientInfoException { try { physicalConnection.setClientInfo(properties); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public String getClientInfo(String name) throws SQLException { try { return physicalConnection.getClientInfo(name); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public Properties getClientInfo() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getClientInfo(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public Array createArrayOf(String typeName, Object[] elements) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.createArrayOf(typeName, elements); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public Struct createStruct(String typeName, Object[] attributes) throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.createStruct(typeName, attributes); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setSchema(String schema) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.setSchema(schema); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public String getSchema() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getSchema(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void abort(Executor executor) throws SQLException { try { physicalConnection.abort(executor); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { throwExceptionIfClosed(); try { physicalConnection.setNetworkTimeout(executor, milliseconds); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public int getNetworkTimeout() throws SQLException { throwExceptionIfClosed(); try { return physicalConnection.getNetworkTimeout(); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public boolean isWrapperFor(Class iface) throws SQLException { try { return physicalConnection.isWrapperFor(iface); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } @Override public T unwrap(Class iface) throws SQLException { try { return physicalConnection.unwrap(iface); } catch (SQLException e) { pooledConnection.fireConnectionErrorEvent(e); throw e; } } private void throwExceptionIfClosed() throws SQLException { if (isClosed) { throw new SnowflakeSQLException(ErrorCode.CONNECTION_CLOSED); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/pooling/SnowflakeConnectionPoolDataSourceImpl.java ================================================ package net.snowflake.client.internal.api.implementation.pooling; import java.sql.Connection; import java.sql.SQLException; import javax.sql.PooledConnection; import net.snowflake.client.api.pooling.SnowflakeConnectionPoolDataSource; import net.snowflake.client.internal.api.implementation.datasource.SnowflakeBasicDataSource; public class SnowflakeConnectionPoolDataSourceImpl extends SnowflakeBasicDataSource implements SnowflakeConnectionPoolDataSource { @Override public PooledConnection getPooledConnection() throws SQLException { Connection connection = super.getConnection(); return new SnowflakePooledConnection(connection); } @Override public PooledConnection getPooledConnection(String user, String password) throws SQLException { Connection connection = super.getConnection(user, password); return new SnowflakePooledConnection(connection); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/pooling/SnowflakePooledConnection.java ================================================ package net.snowflake.client.internal.api.implementation.pooling; import java.sql.Connection; import java.sql.SQLException; import java.util.HashSet; import java.util.Set; import javax.sql.ConnectionEvent; import javax.sql.ConnectionEventListener; import javax.sql.PooledConnection; import javax.sql.StatementEventListener; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Snowflake implementation of pooled connection */ public class SnowflakePooledConnection implements PooledConnection { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakePooledConnection.class); /** physical connection, an instance of SnowflakeConnectionImpl class */ private Connection physicalConnection; /** list of event listener registered to listen for connection event */ private final Set eventListeners; public SnowflakePooledConnection(Connection physicalConnection) throws SQLException { this.physicalConnection = physicalConnection; SnowflakeConnectionImpl sfConnection = physicalConnection.unwrap(SnowflakeConnectionImpl.class); logger.debug("Creating new pooled connection with session id: {}", sfConnection.getSessionID()); this.eventListeners = new HashSet<>(); } @Override public Connection getConnection() throws SQLException { SnowflakeConnectionImpl sfConnection = physicalConnection.unwrap(SnowflakeConnectionImpl.class); logger.debug( "Creating new Logical Connection based on pooled connection with session id: {}", sfConnection.getSessionID()); return new LogicalConnection(this); } Connection getPhysicalConnection() { return physicalConnection; } /** Fire a connection has been closed event to event listener */ void fireConnectionCloseEvent() { for (ConnectionEventListener connectionEventListener : eventListeners) { connectionEventListener.connectionClosed(new ConnectionEvent(this)); } } void fireConnectionErrorEvent(SQLException e) { for (ConnectionEventListener connectionEventListener : eventListeners) { connectionEventListener.connectionErrorOccurred(new ConnectionEvent(this, e)); } } @Override public void addConnectionEventListener(ConnectionEventListener eventListener) { this.eventListeners.add(eventListener); } @Override public void close() throws SQLException { if (this.physicalConnection != null) { SnowflakeConnectionImpl sfConnection = physicalConnection.unwrap(SnowflakeConnectionImpl.class); logger.debug("Closing pooled connection with session id: {}", sfConnection.getSessionID()); this.physicalConnection.close(); this.physicalConnection = null; } eventListeners.clear(); } @Override public void removeConnectionEventListener(ConnectionEventListener eventListener) { this.eventListeners.remove(eventListener); } @Override public void addStatementEventListener(StatementEventListener eventListener) { // do nothing for now } @Override public void removeStatementEventListener(StatementEventListener eventListener) { // do nothing for now } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/resultset/FieldMetadataImpl.java ================================================ package net.snowflake.client.internal.api.implementation.resultset; import java.util.ArrayList; import java.util.List; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.api.resultset.SnowflakeType; /** Implementation of {@link FieldMetadata} for structured type field information. */ public class FieldMetadataImpl implements FieldMetadata { private String name; private String typeName; private int type; private boolean nullable; private int byteLength; private int precision; private int scale; private boolean fixed; private SnowflakeType base; private List fields; public FieldMetadataImpl( String name, String typeName, int type, boolean nullable, int byteLength, int precision, int scale, boolean fixed, SnowflakeType base, List fields) { this.name = name; this.typeName = typeName; this.type = type; this.nullable = nullable; this.byteLength = byteLength; this.precision = precision; this.scale = scale; this.fixed = fixed; this.base = base; this.fields = fields; } public FieldMetadataImpl() { this.fields = new ArrayList<>(); } @Override public String getName() { return name; } public void setName(String name) { this.name = name; } @Override public String getTypeName() { return typeName; } public void setTypeName(String typeName) { this.typeName = typeName; } @Override public int getType() { return type; } public void setType(int type) { this.type = type; } @Override public boolean isNullable() { return nullable; } public void setNullable(boolean nullable) { this.nullable = nullable; } @Override public int getByteLength() { return byteLength; } public void setByteLength(int byteLength) { this.byteLength = byteLength; } @Override public int getPrecision() { return precision; } public void setPrecision(int precision) { this.precision = precision; } @Override public int getScale() { return scale; } public void setScale(int scale) { this.scale = scale; } @Override public boolean isFixed() { return fixed; } public void setFixed(boolean fixed) { this.fixed = fixed; } @Override public SnowflakeType getBase() { return base; } public void setBase(SnowflakeType base) { this.base = base; } @Override public List getFields() { return fields; } public void setFields(List fields) { this.fields = fields; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/resultset/SnowflakeBaseResultSet.java ================================================ package net.snowflake.client.internal.api.implementation.resultset; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.mapSFExceptionToSQLException; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.InputStream; import java.io.Reader; import java.io.StringReader; import java.math.BigDecimal; import java.net.URL; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; import java.sql.Date; import java.sql.NClob; import java.sql.Ref; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLData; import java.sql.SQLException; import java.sql.SQLInput; import java.sql.SQLWarning; import java.sql.SQLXML; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.time.Duration; import java.time.Period; import java.util.Arrays; import java.util.Calendar; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.api.resultset.SnowflakeResultSet; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.api.implementation.statement.SnowflakeStatementImpl; import net.snowflake.client.internal.core.ColumnTypeHelper; import net.snowflake.client.internal.core.JsonSqlInput; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.SFBaseResultSet; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.arrow.StructObjectWrapper; import net.snowflake.client.internal.core.structs.SQLDataCreationHelper; import net.snowflake.client.internal.jdbc.SnowflakeClob; import net.snowflake.client.internal.jdbc.SnowflakeLoggedFeatureNotSupportedException; import net.snowflake.client.internal.jdbc.SnowflakeResultSetMetaDataV1; import net.snowflake.client.internal.jdbc.SnowflakeResultSetSerializableV1; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; /** Base class for query result set and metadata result set */ public abstract class SnowflakeBaseResultSet implements ResultSet, SnowflakeResultSet { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeBaseResultSet.class); private final int resultSetType; private final int resultSetConcurrency; private final int resultSetHoldability; protected SFBaseResultSet sfBaseResultSet; // Snowflake supports sessionless result set. For this case, there is no // statement for this result set. protected final Statement statement; protected SnowflakeResultSetMetaDataV1 resultSetMetaData = null; protected Map parameters = new HashMap<>(); private int fetchSize = 0; protected SFBaseSession session; private final SnowflakeResultSetSerializableV1 serializable; private static final ObjectMapper OBJECT_MAPPER = ObjectMapperFactory.getObjectMapper(); public SnowflakeBaseResultSet(Statement statement) throws SQLException { this.statement = statement; this.resultSetType = statement.getResultSetType(); this.resultSetConcurrency = statement.getResultSetConcurrency(); this.resultSetHoldability = statement.getResultSetHoldability(); this.session = maybeGetSession(statement); this.serializable = null; } private static SFBaseSession maybeGetSession(Statement statement) { try { return ((SnowflakeConnectionImpl) statement.getConnection()) .getSFBaseSession(internalCallMarker()); } catch (SQLException e) { // This exception shouldn't be hit. Statement class should be able to be unwrapped. logger.error( "Unable to unwrap SnowflakeStatementImpl class to retrieve session. Session is null.", false); return null; } } /** * Create an sessionless result set, there is no statement and session for the result set. * * @param resultSetSerializable The result set serializable object which includes all metadata to * create the result set * @throws SQLException if an error occurs */ public SnowflakeBaseResultSet(SnowflakeResultSetSerializableV1 resultSetSerializable) throws SQLException { // This is a sessionless result set, so there is no actual statement for it. this.statement = new SnowflakeStatementImpl.NoOpSnowflakeStatementImpl(); this.resultSetType = resultSetSerializable.getResultSetType(); this.resultSetConcurrency = resultSetSerializable.getResultSetConcurrency(); this.resultSetHoldability = resultSetSerializable.getResultSetHoldability(); this.session = null; this.serializable = resultSetSerializable; } /** * This should never be used. Simply needed this for SFAsynchronousResult subclass * * @throws SQLException if an error occurs */ protected SnowflakeBaseResultSet() throws SQLException { this.resultSetType = 0; this.resultSetConcurrency = 0; this.resultSetHoldability = 0; this.statement = new SnowflakeStatementImpl.NoOpSnowflakeStatementImpl(); this.session = null; this.serializable = null; } @Override public abstract boolean next() throws SQLException; @Override public abstract boolean isClosed() throws SQLException; /** * Raises SQLException if the result set is closed * * @throws SQLException if the result set is closed. */ protected void raiseSQLExceptionIfResultSetIsClosed() throws SQLException { if (isClosed()) { throw new SnowflakeSQLException(ErrorCode.RESULTSET_ALREADY_CLOSED); } } @Override public abstract byte[] getBytes(int columnIndex) throws SQLException; /** * Get Date value * * @param columnIndex column index * @param tz timezone * @return Date value at column index * @throws SQLException if data at column index is incompatible with Date type */ public abstract Date getDate(int columnIndex, TimeZone tz) throws SQLException; private boolean getGetDateUseNullTimezone() { if (this.session != null) { return this.session.getGetDateUseNullTimezone(); } if (this.serializable != null) { return this.serializable.getGetDateUseNullTimezone(); } return false; } @Override public Date getDate(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return getDate(columnIndex, getGetDateUseNullTimezone() ? null : TimeZone.getDefault()); } @Override public abstract Time getTime(int columnIndex) throws SQLException; @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return getTimestamp(columnIndex, (TimeZone) null); } /** * Get timestamp value * * @param columnIndex column index * @param tz timezone * @return timestamp value at column index * @throws SQLException if data at column index is incompatible with timestamp */ public abstract Timestamp getTimestamp(int columnIndex, TimeZone tz) throws SQLException; @Override public InputStream getAsciiStream(int columnIndex) throws SQLException { logger.trace("InputStream getAsciiStream(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } /** * @deprecated */ @Deprecated @Override public InputStream getUnicodeStream(int columnIndex) throws SQLException { logger.trace("InputStream getUnicodeStream(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public InputStream getBinaryStream(int columnIndex) throws SQLException { logger.trace("InputStream getBinaryStream(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public String getString(String columnLabel) throws SQLException { logger.trace("String getString(String columnLabel)", false); return getString(findColumn(columnLabel)); } @Override public boolean getBoolean(String columnLabel) throws SQLException { logger.trace("boolean getBoolean(String columnLabel)", false); return getBoolean(findColumn(columnLabel)); } @Override public byte getByte(String columnLabel) throws SQLException { logger.trace("byte getByte(String columnLabel)", false); raiseSQLExceptionIfResultSetIsClosed(); return getByte(findColumn(columnLabel)); } @Override public short getShort(String columnLabel) throws SQLException { logger.trace("short getShort(String columnLabel)", false); return getShort(findColumn(columnLabel)); } @Override public int getInt(String columnLabel) throws SQLException { logger.trace("int getInt(String columnLabel)", false); return getInt(findColumn(columnLabel)); } @Override public long getLong(String columnLabel) throws SQLException { logger.trace("long getLong(String columnLabel)", false); return getLong(findColumn(columnLabel)); } @Override public float getFloat(String columnLabel) throws SQLException { logger.trace("float getFloat(String columnLabel)", false); return getFloat(findColumn(columnLabel)); } @Override public double getDouble(String columnLabel) throws SQLException { logger.trace("double getDouble(String columnLabel)", false); return getDouble(findColumn(columnLabel)); } /** * @deprecated */ @Deprecated @Override public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { logger.trace("BigDecimal getBigDecimal(String columnLabel, " + "int scale)", false); return getBigDecimal(findColumn(columnLabel), scale); } @Override public byte[] getBytes(String columnLabel) throws SQLException { logger.trace("byte[] getBytes(String columnLabel)", false); return getBytes(findColumn(columnLabel)); } @Override public Date getDate(String columnLabel) throws SQLException { logger.trace("Date getDate(String columnLabel)", false); return getDate(findColumn(columnLabel)); } @Override public Time getTime(String columnLabel) throws SQLException { logger.trace("Time getTime(String columnLabel)", false); return getTime(findColumn(columnLabel)); } @Override public Timestamp getTimestamp(String columnLabel) throws SQLException { logger.trace("Timestamp getTimestamp(String columnLabel)", false); return getTimestamp(findColumn(columnLabel)); } @Override public InputStream getAsciiStream(String columnLabel) throws SQLException { logger.trace("InputStream getAsciiStream(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } /** * @deprecated */ @Deprecated @Override public InputStream getUnicodeStream(String columnLabel) throws SQLException { logger.trace("InputStream getUnicodeStream(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public InputStream getBinaryStream(String columnLabel) throws SQLException { logger.trace("InputStream getBinaryStream(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public SQLWarning getWarnings() throws SQLException { logger.trace("SQLWarning getWarnings()", false); raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public void clearWarnings() throws SQLException { logger.trace("void clearWarnings()", false); raiseSQLExceptionIfResultSetIsClosed(); } @Override public String getCursorName() throws SQLException { logger.trace("String getCursorName()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public ResultSetMetaData getMetaData() throws SQLException { logger.trace("ResultSetMetaData getMetaData()", false); raiseSQLExceptionIfResultSetIsClosed(); return resultSetMetaData; } @Override public Object getObject(String columnLabel) throws SQLException { logger.trace("Object getObject(String columnLabel)", false); return getObject(findColumn(columnLabel)); } @Override public int findColumn(String columnLabel) throws SQLException { logger.trace("int findColumn(String columnLabel)", false); raiseSQLExceptionIfResultSetIsClosed(); int columnIndex = resultSetMetaData.getColumnIndex(columnLabel); if (columnIndex == -1) { throw new SQLException("Column not found: " + columnLabel, SqlState.UNDEFINED_COLUMN); } else { return ++columnIndex; } } @Override public Reader getCharacterStream(int columnIndex) throws SQLException { logger.trace("Reader getCharacterStream(int columnIndex)", false); raiseSQLExceptionIfResultSetIsClosed(); String streamData = getString(columnIndex); return (streamData == null) ? null : new StringReader(streamData); } @Override public Reader getCharacterStream(String columnLabel) throws SQLException { logger.trace("Reader getCharacterStream(String columnLabel)", false); return getCharacterStream(findColumn(columnLabel)); } @Override public BigDecimal getBigDecimal(String columnLabel) throws SQLException { logger.trace("BigDecimal getBigDecimal(String columnLabel)", false); return getBigDecimal(findColumn(columnLabel)); } @Override public void beforeFirst() throws SQLException { logger.trace("void beforeFirst()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void afterLast() throws SQLException { logger.trace("void afterLast()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public boolean first() throws SQLException { logger.trace("boolean first()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public boolean last() throws SQLException { logger.trace("boolean last()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public boolean absolute(int row) throws SQLException { logger.trace("boolean absolute(int row)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public boolean relative(int rows) throws SQLException { logger.trace("boolean relative(int rows)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public boolean previous() throws SQLException { logger.trace("boolean previous()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public int getFetchDirection() throws SQLException { logger.trace("int getFetchDirection()", false); raiseSQLExceptionIfResultSetIsClosed(); return ResultSet.FETCH_FORWARD; } @Override public void setFetchDirection(int direction) throws SQLException { logger.trace("void setFetchDirection(int direction)", false); raiseSQLExceptionIfResultSetIsClosed(); if (direction != ResultSet.FETCH_FORWARD) { throw new SnowflakeLoggedFeatureNotSupportedException(session); } } @Override public int getFetchSize() throws SQLException { logger.trace("int getFetchSize()", false); raiseSQLExceptionIfResultSetIsClosed(); return this.fetchSize; } @Override public void setFetchSize(int rows) throws SQLException { logger.trace("void setFetchSize(int rows)", false); raiseSQLExceptionIfResultSetIsClosed(); this.fetchSize = rows; } @Override public int getType() throws SQLException { logger.trace("int getType()", false); raiseSQLExceptionIfResultSetIsClosed(); return resultSetType; } @Override public int getConcurrency() throws SQLException { logger.trace("int getConcurrency()", false); raiseSQLExceptionIfResultSetIsClosed(); return resultSetConcurrency; } @Override public boolean rowUpdated() throws SQLException { logger.trace("boolean rowUpdated()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public boolean rowInserted() throws SQLException { logger.trace("boolean rowInserted()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public boolean rowDeleted() throws SQLException { logger.trace("boolean rowDeleted()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNull(int columnIndex) throws SQLException { logger.trace("void updateNull(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBoolean(int columnIndex, boolean x) throws SQLException { logger.trace("void updateBoolean(int columnIndex, boolean x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateByte(int columnIndex, byte x) throws SQLException { logger.trace("void updateByte(int columnIndex, byte x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateShort(int columnIndex, short x) throws SQLException { logger.trace("void updateShort(int columnIndex, short x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateInt(int columnIndex, int x) throws SQLException { logger.trace("void updateInt(int columnIndex, int x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateLong(int columnIndex, long x) throws SQLException { logger.trace("void updateLong(int columnIndex, long x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateFloat(int columnIndex, float x) throws SQLException { logger.trace("void updateFloat(int columnIndex, float x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateDouble(int columnIndex, double x) throws SQLException { logger.trace("void updateDouble(int columnIndex, double x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { logger.trace("void updateBigDecimal(int columnIndex, BigDecimal x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateString(int columnIndex, String x) throws SQLException { logger.trace("void updateString(int columnIndex, String x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBytes(int columnIndex, byte[] x) throws SQLException { logger.trace("void updateBytes(int columnIndex, byte[] x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateDate(int columnIndex, Date x) throws SQLException { logger.trace("void updateDate(int columnIndex, Date x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateTime(int columnIndex, Time x) throws SQLException { logger.trace("void updateTime(int columnIndex, Time x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { logger.trace("void updateTimestamp(int columnIndex, Timestamp x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { logger.trace( "public void updateAsciiStream(int columnIndex, " + "InputStream x, int length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { logger.trace( "public void updateBinaryStream(int columnIndex, " + "InputStream x, int length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { logger.trace( "public void updateCharacterStream(int columnIndex, " + "Reader x, int length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { logger.trace( "public void updateObject(int columnIndex, Object x, " + "int scaleOrLength)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateObject(int columnIndex, Object x) throws SQLException { logger.trace("void updateObject(int columnIndex, Object x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNull(String columnLabel) throws SQLException { logger.trace("void updateNull(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBoolean(String columnLabel, boolean x) throws SQLException { logger.trace("void updateBoolean(String columnLabel, boolean x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateByte(String columnLabel, byte x) throws SQLException { logger.trace("void updateByte(String columnLabel, byte x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateShort(String columnLabel, short x) throws SQLException { logger.trace("void updateShort(String columnLabel, short x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateInt(String columnLabel, int x) throws SQLException { logger.trace("void updateInt(String columnLabel, int x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateLong(String columnLabel, long x) throws SQLException { logger.trace("void updateLong(String columnLabel, long x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateFloat(String columnLabel, float x) throws SQLException { logger.trace("void updateFloat(String columnLabel, float x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateDouble(String columnLabel, double x) throws SQLException { logger.trace("void updateDouble(String columnLabel, double x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { logger.trace("void updateBigDecimal(String columnLabel, " + "BigDecimal x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateString(String columnLabel, String x) throws SQLException { logger.trace("void updateString(String columnLabel, String x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBytes(String columnLabel, byte[] x) throws SQLException { logger.trace("void updateBytes(String columnLabel, byte[] x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateDate(String columnLabel, Date x) throws SQLException { logger.trace("void updateDate(String columnLabel, Date x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateTime(String columnLabel, Time x) throws SQLException { logger.trace("void updateTime(String columnLabel, Time x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { logger.trace("void updateTimestamp(String columnLabel, Timestamp x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { logger.trace( "public void updateAsciiStream(String columnLabel, " + "InputStream x, int length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { logger.trace( "public void updateBinaryStream(String columnLabel, " + "InputStream x, int length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { logger.trace( "public void updateCharacterStream(String columnLabel, " + "Reader reader,int length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { logger.trace( "public void updateObject(String columnLabel, Object x, " + "int scaleOrLength)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateObject(String columnLabel, Object x) throws SQLException { logger.trace("void updateObject(String columnLabel, Object x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void insertRow() throws SQLException { logger.trace("void insertRow()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateRow() throws SQLException { logger.trace("void updateRow()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void deleteRow() throws SQLException { logger.trace("void deleteRow()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void refreshRow() throws SQLException { logger.trace("void refreshRow()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void cancelRowUpdates() throws SQLException { logger.trace("void cancelRowUpdates()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void moveToInsertRow() throws SQLException { logger.trace("void moveToInsertRow()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void moveToCurrentRow() throws SQLException { logger.trace("void moveToCurrentRow()", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Statement getStatement() throws SQLException { logger.trace("Statement getStatement()", false); raiseSQLExceptionIfResultSetIsClosed(); return statement; } @Override public Object getObject(int columnIndex, Map> map) throws SQLException { logger.trace("Object getObject(int columnIndex, Map> map)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Ref getRef(int columnIndex) throws SQLException { logger.trace("Ref getRef(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Blob getBlob(int columnIndex) throws SQLException { logger.trace("Blob getBlob(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Clob getClob(int columnIndex) throws SQLException { logger.trace("Clob getClob(int columnIndex)", false); String columnValue = getString(columnIndex); return columnValue == null ? null : new SnowflakeClob(columnValue); } @Override public Array getArray(int columnIndex) throws SQLException { logger.trace("Array getArray(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Object getObject(String columnLabel, Map> map) throws SQLException { logger.trace( "public Object getObject(String columnLabel, " + "Map> map)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Ref getRef(String columnLabel) throws SQLException { logger.trace("Ref getRef(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Blob getBlob(String columnLabel) throws SQLException { logger.trace("Blob getBlob(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Clob getClob(String columnLabel) throws SQLException { logger.trace("Clob getClob(String columnLabel)", false); String columnValue = getString(columnLabel); return columnValue == null ? null : new SnowflakeClob(columnValue); } @Override public Array getArray(String columnLabel) throws SQLException { logger.trace("Array getArray(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Date getDate(int columnIndex, Calendar cal) throws SQLException { logger.trace("Date getDate(int columnIndex, Calendar cal)", false); return getDate(columnIndex, cal.getTimeZone()); } @Override public Date getDate(String columnLabel, Calendar cal) throws SQLException { logger.trace("Date getDate(String columnLabel, Calendar cal)", false); return getDate(findColumn(columnLabel), cal.getTimeZone()); } @Override public Time getTime(int columnIndex, Calendar cal) throws SQLException { logger.trace("Time getTime(int columnIndex, Calendar cal)", false); return getTime(columnIndex); } @Override public Time getTime(String columnLabel, Calendar cal) throws SQLException { logger.trace("Time getTime(String columnLabel, Calendar cal)", false); return getTime(columnLabel); } @Override public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { logger.trace("Timestamp getTimestamp(int columnIndex, Calendar cal)", false); return getTimestamp(columnIndex, cal.getTimeZone()); } @Override public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { logger.trace("Timestamp getTimestamp(String columnLabel, " + "Calendar cal)", false); return getTimestamp(findColumn(columnLabel), cal.getTimeZone()); } @Override public URL getURL(int columnIndex) throws SQLException { logger.trace("URL getURL(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public URL getURL(String columnLabel) throws SQLException { logger.trace("URL getURL(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateRef(int columnIndex, Ref x) throws SQLException { logger.trace("void updateRef(int columnIndex, Ref x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateRef(String columnLabel, Ref x) throws SQLException { logger.trace("void updateRef(String columnLabel, Ref x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBlob(int columnIndex, Blob x) throws SQLException { logger.trace("void updateBlob(int columnIndex, Blob x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBlob(String columnLabel, Blob x) throws SQLException { logger.trace("void updateBlob(String columnLabel, Blob x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateClob(int columnIndex, Clob x) throws SQLException { logger.trace("void updateClob(int columnIndex, Clob x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateClob(String columnLabel, Clob x) throws SQLException { logger.trace("void updateClob(String columnLabel, Clob x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateArray(int columnIndex, Array x) throws SQLException { logger.trace("void updateArray(int columnIndex, Array x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateArray(String columnLabel, Array x) throws SQLException { logger.trace("void updateArray(String columnLabel, Array x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public RowId getRowId(int columnIndex) throws SQLException { logger.trace("RowId getRowId(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public RowId getRowId(String columnLabel) throws SQLException { logger.trace("RowId getRowId(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateRowId(int columnIndex, RowId x) throws SQLException { logger.trace("void updateRowId(int columnIndex, RowId x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateRowId(String columnLabel, RowId x) throws SQLException { logger.trace("void updateRowId(String columnLabel, RowId x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public int getHoldability() throws SQLException { logger.trace("int getHoldability()", false); raiseSQLExceptionIfResultSetIsClosed(); return resultSetHoldability; } @Override public void updateNString(int columnIndex, String nString) throws SQLException { logger.trace("void updateNString(int columnIndex, String nString)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNString(String columnLabel, String nString) throws SQLException { logger.trace("void updateNString(String columnLabel, String nString)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNClob(int columnIndex, NClob nClob) throws SQLException { logger.trace("void updateNClob(int columnIndex, NClob nClob)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNClob(String columnLabel, NClob nClob) throws SQLException { logger.trace("void updateNClob(String columnLabel, NClob nClob)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public NClob getNClob(int columnIndex) throws SQLException { logger.trace("NClob getNClob(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public NClob getNClob(String columnLabel) throws SQLException { logger.trace("NClob getNClob(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public SQLXML getSQLXML(int columnIndex) throws SQLException { logger.trace("SQLXML getSQLXML(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public SQLXML getSQLXML(String columnLabel) throws SQLException { logger.trace("SQLXML getSQLXML(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { logger.trace("void updateSQLXML(int columnIndex, SQLXML xmlObject)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { logger.trace("void updateSQLXML(String columnLabel, SQLXML xmlObject)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public String getNString(int columnIndex) throws SQLException { logger.trace("String getNString(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public String getNString(String columnLabel) throws SQLException { logger.trace("String getNString(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Reader getNCharacterStream(int columnIndex) throws SQLException { logger.trace("Reader getNCharacterStream(int columnIndex)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Reader getNCharacterStream(String columnLabel) throws SQLException { logger.trace("Reader getNCharacterStream(String columnLabel)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { logger.trace( "public void updateNCharacterStream(int columnIndex, " + "Reader x, long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { logger.trace( "public void updateNCharacterStream(String columnLabel, " + "Reader reader,long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { logger.trace( "public void updateAsciiStream(int columnIndex, " + "InputStream x, long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { logger.trace( "public void updateBinaryStream(int columnIndex, " + "InputStream x, long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { logger.trace( "public void updateCharacterStream(int columnIndex, Reader x, " + "long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { logger.trace( "public void updateAsciiStream(String columnLabel, " + "InputStream x, long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { logger.trace( "public void updateBinaryStream(String columnLabel, " + "InputStream x, long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { logger.trace( "public void updateCharacterStream(String columnLabel, " + "Reader reader,long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { logger.trace( "public void updateBlob(int columnIndex, InputStream " + "inputStream, long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { logger.trace( "public void updateBlob(String columnLabel, " + "InputStream inputStream,long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { logger.trace("void updateClob(int columnIndex, Reader reader, " + "long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { logger.trace( "public void updateClob(String columnLabel, Reader reader, " + "long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { logger.trace( "public void updateNClob(int columnIndex, Reader reader, " + "long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { logger.trace( "public void updateNClob(String columnLabel, Reader reader, " + "long length)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { logger.trace("void updateNCharacterStream(int columnIndex, Reader x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { logger.trace( "public void updateNCharacterStream(String columnLabel, " + "Reader reader)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { logger.trace("void updateAsciiStream(int columnIndex, InputStream x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { logger.trace("void updateBinaryStream(int columnIndex, InputStream x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { logger.trace("void updateCharacterStream(int columnIndex, Reader x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { logger.trace("void updateAsciiStream(String columnLabel, InputStream x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { logger.trace("void updateBinaryStream(String columnLabel, InputStream x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { logger.trace( "public void updateCharacterStream(String columnLabel, " + "Reader reader)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { logger.trace("void updateBlob(int columnIndex, InputStream inputStream)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { logger.trace("void updateBlob(String columnLabel, InputStream " + "inputStream)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateClob(int columnIndex, Reader reader) throws SQLException { logger.trace("void updateClob(int columnIndex, Reader reader)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateClob(String columnLabel, Reader reader) throws SQLException { logger.trace("void updateClob(String columnLabel, Reader reader)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNClob(int columnIndex, Reader reader) throws SQLException { logger.trace("void updateNClob(int columnIndex, Reader reader)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void updateNClob(String columnLabel, Reader reader) throws SQLException { logger.trace("void updateNClob(String columnLabel, Reader reader)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public T getObject(int columnIndex, Class type) throws SQLException { logger.trace(" T getObject(int columnIndex,Class type)", false); if (resultSetMetaData.isStructuredTypeColumn(columnIndex)) { if (SQLData.class.isAssignableFrom(type)) { SQLInput sqlInput = SnowflakeUtil.mapSFExceptionToSQLException( () -> { StructObjectWrapper structObjectWrapper = (StructObjectWrapper) sfBaseResultSet.getObjectWithoutString(columnIndex); if (structObjectWrapper == null) { return null; } return (SQLInput) structObjectWrapper.getObject(); }); if (sqlInput == null) { return null; } else { SQLData instance = (SQLData) SQLDataCreationHelper.create(type); instance.readSQL(sqlInput, null); return (T) instance; } } else if (Map.class.isAssignableFrom(type)) { Object object = getObject(columnIndex); if (object == null) { return null; } else if (object instanceof Map) { throw new SQLException( "Arrow native struct couldn't be converted to String. To map to SqlData the method getObject(int columnIndex, Class type) should be used"); } else { try { return (T) OBJECT_MAPPER.readValue( (String) object, new TypeReference>() {}); } catch (JsonProcessingException e) { throw new SQLException("Value couldn't be converted to Map"); } } } } if (String.class.isAssignableFrom(type)) { return (T) getString(columnIndex); } else if (Boolean.class.isAssignableFrom(type)) { return (T) (Boolean) getBoolean(columnIndex); } else if (Byte.class.isAssignableFrom(type)) { return (T) (Byte) getByte(columnIndex); } else if (Short.class.isAssignableFrom(type)) { return (T) (Short) getShort(columnIndex); } else if (Integer.class.isAssignableFrom(type)) { return (T) (Integer) getInt(columnIndex); } else if (Long.class.isAssignableFrom(type)) { return (T) (Long) getLong(columnIndex); } else if (Float.class.isAssignableFrom(type)) { return (T) (Float) getFloat(columnIndex); } else if (Double.class.isAssignableFrom(type)) { return (T) (Double) getDouble(columnIndex); } else if (Date.class.isAssignableFrom(type)) { return (T) getDate(columnIndex); } else if (Time.class.isAssignableFrom(type)) { return (T) getTime(columnIndex); } else if (Timestamp.class.isAssignableFrom(type)) { return (T) getTimestamp(columnIndex); } else if (BigDecimal.class.isAssignableFrom(type)) { return (T) getBigDecimal(columnIndex); } else if (Period.class.isAssignableFrom(type)) { try { return (T) sfBaseResultSet.getPeriod(columnIndex); } catch (SFException e) { throw new SQLException( "Type passed to 'getObject(int columnIndex,Class type)' is unsupported. Type: " + type.getName()); } } else if (Duration.class.isAssignableFrom(type)) { try { return (T) sfBaseResultSet.getDuration(columnIndex); } catch (SFException e) { throw new SQLException( "Type passed to 'getObject(int columnIndex,Class type)' is unsupported. Type: " + type.getName()); } } else { logger.debug( "Unsupported type passed to getObject(int columnIndex,Class type): " + type.getName()); throw new SQLException( "Type passed to 'getObject(int columnIndex,Class type)' is unsupported. Type: " + type.getName()); } } public List getList(int columnIndex, Class type) throws SQLException { logger.trace(" List getList(int columnIndex, Class type)", false); if (!resultSetMetaData.isStructuredTypeColumn(columnIndex)) { throw new SnowflakeLoggedFeatureNotSupportedException(session); } T[] sqlInputs = getArray(columnIndex, type); return Arrays.asList(sqlInputs); } public T[] getArray(int columnIndex, Class type) throws SQLException { logger.trace(" T[] getArray(int columnIndex, Class type)", false); if (!resultSetMetaData.isStructuredTypeColumn(columnIndex)) { throw new SnowflakeLoggedFeatureNotSupportedException(session); } List fieldMetadataList = resultSetMetaData.getColumnFields(columnIndex); if (fieldMetadataList.size() != 1) { throw new SQLException("Wrong size of fields for array type " + fieldMetadataList.size()); } FieldMetadata fieldMetadata = fieldMetadataList.get(0); int columnSubType = fieldMetadata.getType(); int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); int scale = fieldMetadata.getScale(); TimeZone tz = sfBaseResultSet.getSessionTimeZone(); Array array = getArray(columnIndex); if (array == null) { return null; } Object[] objects = (Object[]) array.getArray(); T[] arr = (T[]) java.lang.reflect.Array.newInstance(type, objects.length); int counter = 0; for (Object value : objects) { if (value == null) { arr[counter++] = null; } else if (type.isAssignableFrom(value.getClass())) { arr[counter++] = (T) value; } else { if (SQLData.class.isAssignableFrom(type)) { SQLData instance = (SQLData) SQLDataCreationHelper.create(type); SQLInput sqlInput = sfBaseResultSet.createSqlInputForColumn( value, objects.getClass(), columnIndex, session, fieldMetadata.getFields()); instance.readSQL(sqlInput, null); arr[counter++] = (T) instance; } else if (String.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .getStringConverter() .getString(value, columnType, columnSubType, scale)); } else if (Boolean.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .getBooleanConverter() .getBoolean(value, columnType)); } else if (Byte.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .getBytesConverter() .getBytes(value, columnType, columnSubType, scale)); } else if (Short.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) Short.valueOf( sfBaseResultSet .getConverters() .getNumberConverter() .getShort(value, columnType))); } else if (Integer.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) Integer.valueOf( sfBaseResultSet .getConverters() .getNumberConverter() .getInt(value, columnType))); } else if (Long.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) Long.valueOf( sfBaseResultSet .getConverters() .getNumberConverter() .getLong(value, columnType))); } else if (Float.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) Float.valueOf( sfBaseResultSet .getConverters() .getNumberConverter() .getFloat(value, columnType))); } else if (Double.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) Double.valueOf( sfBaseResultSet .getConverters() .getNumberConverter() .getDouble(value, columnType))); } else if (Date.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .dateStringConverter(session) .convert((String) value)); } else if (Time.class.isAssignableFrom(type)) { arr[counter++] = mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .timeFromStringConverter(session) .convert((String) value)); } else if (Timestamp.class.isAssignableFrom(type)) { mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .timestampFromStringConverter( columnSubType, columnType, scale, session, null, tz) .convert((String) value)); } else if (BigDecimal.class.isAssignableFrom(type)) { arr[counter++] = (T) getBigDecimal(columnIndex); } else { logger.debug( "Unsupported type passed to getArray(int columnIndex, Class type): " + type.getName()); throw new SQLException( "Type passed to 'getObject(int columnIndex,Class type)' is unsupported. Type: " + type.getName()); } } } return arr; } public Map getMap(int columnIndex, Class type) throws SQLException { logger.trace(" Map getMap(int columnIndex, Class type)", false); if (!resultSetMetaData.isStructuredTypeColumn(columnIndex)) { throw new SnowflakeLoggedFeatureNotSupportedException(session); } List fieldMetadataList = resultSetMetaData.getColumnFields(columnIndex); if (fieldMetadataList.size() != 2) { throw new SQLException( "Wrong size of fields metadata for map type " + fieldMetadataList.size()); } FieldMetadata valueFieldMetadata = fieldMetadataList.get(1); int columnSubType = valueFieldMetadata.getType(); int columnType = ColumnTypeHelper.getColumnType(valueFieldMetadata.getType(), session); int scale = valueFieldMetadata.getScale(); TimeZone tz = sfBaseResultSet.getSessionTimeZone(); StructObjectWrapper structObjectWrapper = (StructObjectWrapper) SnowflakeUtil.mapSFExceptionToSQLException( () -> sfBaseResultSet.getObjectWithoutString(columnIndex)); if (structObjectWrapper == null || structObjectWrapper.getObject() == null) { return null; } Map map = mapSFExceptionToSQLException( () -> prepareMapWithValues(structObjectWrapper.getObject(), type)); Map resultMap = new HashMap<>(); for (Map.Entry entry : map.entrySet()) { if (SQLData.class.isAssignableFrom(type)) { SQLData instance = (SQLData) SQLDataCreationHelper.create(type); SQLInput sqlInput = sfBaseResultSet.createSqlInputForColumn( entry.getValue(), structObjectWrapper.getObject().getClass(), columnIndex, session, valueFieldMetadata.getFields()); instance.readSQL(sqlInput, null); resultMap.put(entry.getKey(), (T) instance); } else if (String.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .getStringConverter() .getString(entry.getValue(), columnType, columnSubType, scale))); } else if (Boolean.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .getBooleanConverter() .getBoolean(entry.getValue(), columnType))); } else if (Byte.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .getBytesConverter() .getBytes(entry.getValue(), columnType, columnSubType, scale))); } else if (Short.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) (Short) sfBaseResultSet .getConverters() .getNumberConverter() .getShort(entry.getValue(), columnType))); } else if (Integer.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) (Integer) sfBaseResultSet .getConverters() .getNumberConverter() .getInt(entry.getValue(), columnType))); } else if (Long.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) (Long) sfBaseResultSet .getConverters() .getNumberConverter() .getLong(entry.getValue(), columnType))); } else if (Float.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) (Float) sfBaseResultSet .getConverters() .getNumberConverter() .getFloat(entry.getValue(), columnType))); } else if (Double.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) (Double) sfBaseResultSet .getConverters() .getNumberConverter() .getDouble(entry.getValue(), columnType))); } else if (BigDecimal.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) sfBaseResultSet .getConverters() .getNumberConverter() .getBigDecimal(entry.getValue(), columnType))); } else if (Date.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) sfBaseResultSet.convertToDate(entry.getValue(), tz))); } else if (Time.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) sfBaseResultSet.convertToTime(entry.getValue(), scale))); } else if (Timestamp.class.isAssignableFrom(type)) { resultMap.put( entry.getKey(), mapSFExceptionToSQLException( () -> (T) sfBaseResultSet.convertToTimestamp( entry.getValue(), columnType, columnSubType, tz, scale))); } else { logger.debug( "Unsupported type passed to getObject(int columnIndex,Class type): " + type.getName()); throw new SQLException( "Type passed to 'getObject(int columnIndex,Class type)' is unsupported. Type: " + type.getName()); } } return resultMap; } @Override public T getObject(String columnLabel, Class type) throws SQLException { logger.trace(" T getObject(String columnLabel,Class type)", false); return getObject(findColumn(columnLabel), type); } @SuppressWarnings("unchecked") @Override public T unwrap(Class iface) throws SQLException { logger.trace(" T unwrap(Class iface)", false); if (!iface.isInstance(this)) { throw new SQLException( this.getClass().getName() + " not unwrappable from " + iface.getName()); } return (T) this; } @Override public boolean isWrapperFor(Class iface) throws SQLException { logger.trace("boolean isWrapperFor(Class iface)", false); return iface.isInstance(this); } private Map prepareMapWithValues(Object object, Class type) throws SFException { if (object instanceof JsonSqlInput) { Map map = new HashMap<>(); JsonNode jsonNode = ((JsonSqlInput) object).getInput(); for (Iterator it = jsonNode.fieldNames(); it.hasNext(); ) { String name = it.next(); map.put( name, SQLData.class.isAssignableFrom(type) ? jsonNode.get(name) : SnowflakeUtil.getJsonNodeStringValue(jsonNode.get(name))); } return map; } else if (object instanceof Map) { return (Map) object; } else { throw new SFException(ErrorCode.INVALID_STRUCT_DATA, "Object couldn't be converted to map"); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/statement/SnowflakeCallableStatementImpl.java ================================================ package net.snowflake.client.internal.api.implementation.statement; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.net.URL; import java.sql.Array; import java.sql.Blob; import java.sql.CallableStatement; import java.sql.Clob; import java.sql.Date; import java.sql.NClob; import java.sql.Ref; import java.sql.RowId; import java.sql.SQLException; import java.sql.SQLXML; import java.sql.Time; import java.sql.Timestamp; import java.util.Calendar; import java.util.Map; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.jdbc.SnowflakeLoggedFeatureNotSupportedException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public final class SnowflakeCallableStatementImpl extends SnowflakePreparedStatementImpl implements CallableStatement { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeCallableStatementImpl.class); /** * Construct SnowflakePreparedStatementImpl * * @param connection connection object * @param sql sql * @param skipParsing true if the applications want to skip parsing to get metadata. false by * default. * @param resultSetType result set type: ResultSet.TYPE_FORWARD_ONLY. * @param resultSetConcurrency result set concurrency: ResultSet.CONCUR_READ_ONLY. * @param resultSetHoldability result set holdability: ResultSet.CLOSE_CURSORS_AT_COMMIT * @throws SQLException if any SQL error occurs. */ public SnowflakeCallableStatementImpl( SnowflakeConnectionImpl connection, String sql, boolean skipParsing, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { super( connection, parseSqlEscapeSyntax(sql), skipParsing, resultSetType, resultSetConcurrency, resultSetHoldability); } /** * Helper function to remove curly brackets for CallableStatement procedure calls, since GS parser * does not support escape syntax for curly brackets * * @param originalSql original SQL text, possibly with curly brackets * @return a string of SQL text with curly brackets removed */ public static String parseSqlEscapeSyntax(String originalSql) { originalSql = originalSql.trim(); if (originalSql.startsWith("{") && originalSql.endsWith("}")) { logger.debug("Curly brackets {} removed before sending sql to server.", false); return originalSql.substring(1, originalSql.length() - 1); } return originalSql; } /* The Snowflake database does not accept OUT or INOUT parameters, so the registerOutParameter functions and the get functions (which get values of OUT parameters) will remain not implemented) */ @Override public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void registerOutParameter(String parameterName, int sqlType) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public boolean wasNull() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public String getString(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public String getString(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public boolean getBoolean(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public boolean getBoolean(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public byte getByte(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public byte getByte(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public short getShort(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public short getShort(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public int getInt(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public int getInt(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public long getLong(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public long getLong(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public float getFloat(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public float getFloat(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public double getDouble(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public double getDouble(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override @Deprecated public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override @Deprecated public BigDecimal getBigDecimal(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override @Deprecated public BigDecimal getBigDecimal(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public byte[] getBytes(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public byte[] getBytes(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Date getDate(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Date getDate(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Time getTime(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Time getTime(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Time getTime(String parameterName, Calendar cal) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Timestamp getTimestamp(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Timestamp getTimestamp(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Object getObject(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Object getObject(int parameterIndex, Map> map) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Object getObject(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Object getObject(String parameterName, Map> map) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Ref getRef(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Ref getRef(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Blob getBlob(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Blob getBlob(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Clob getClob(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Clob getClob(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Array getArray(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Array getArray(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Date getDate(int parameterIndex, Calendar cal) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Date getDate(String parameterName, Calendar cal) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Time getTime(int parameterIndex, Calendar cal) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public URL getURL(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public URL getURL(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public RowId getRowId(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public RowId getRowId(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public SQLXML getSQLXML(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public SQLXML getSQLXML(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public String getNString(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public String getNString(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Reader getNCharacterStream(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Reader getNCharacterStream(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Reader getCharacterStream(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Reader getCharacterStream(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } /* JDBC is not currently set up to store the parameter names, just the parameter index. Callable Statement can set parameters using the functions in PreparedStatement based on parameter index. */ @Override public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setRowId(String parameterName, RowId x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNString(String parameterName, String value) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNCharacterStream(String parameterName, Reader value) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNClob(String parameterName, NClob value) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setClob(String parameterName, Reader reader, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setClob(String parameterName, Reader reader) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setClob(String parameterName, Clob x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBlob(String parameterName, InputStream inputStream) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNClob(String parameterName, Reader reader) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public T getObject(int parameterIndex, Class type) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public T getObject(String parameterName, Class type) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBlob(String parameterName, Blob x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNClob(String parameterName, Reader reader, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public NClob getNClob(int parameterIndex) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public NClob getNClob(String parameterName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setURL(String parameterName, URL val) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNull(String parameterName, int sqlType) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBoolean(String parameterName, boolean x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setByte(String parameterName, byte x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setShort(String parameterName, short x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setInt(String parameterName, int x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setLong(String parameterName, long x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setFloat(String parameterName, float x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setDouble(String parameterName, double x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setString(String parameterName, String x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBytes(String parameterName, byte[] x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setDate(String parameterName, Date x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setTime(String parameterName, Time x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setTimestamp(String parameterName, Timestamp x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setAsciiStream(String parameterName, InputStream x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setAsciiStream(String parameterName, InputStream x, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBinaryStream(String parameterName, InputStream x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBinaryStream(String parameterName, InputStream x, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setObject(String parameterName, Object x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setCharacterStream(String parameterName, Reader reader, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setCharacterStream(String parameterName, Reader reader) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setDate(String parameterName, Date x, Calendar cal) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setTime(String parameterName, Time x, Calendar cal) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNull(String parameterName, int sqlType, String typeName) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/statement/SnowflakePreparedStatementImpl.java ================================================ package net.snowflake.client.internal.api.implementation.statement; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.math.BigInteger; import java.net.URL; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; import java.sql.Date; import java.sql.NClob; import java.sql.ParameterMetaData; import java.sql.PreparedStatement; import java.sql.Ref; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLData; import java.sql.SQLException; import java.sql.SQLXML; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.api.statement.SnowflakePreparedStatement; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.common.core.SFBinary; import net.snowflake.client.internal.core.FieldSchemaCreator; import net.snowflake.client.internal.core.JsonSqlOutput; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.ParameterBindingDTO; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFBaseResultSet; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFPreparedStatementMetaData; import net.snowflake.client.internal.core.SfSqlArray; import net.snowflake.client.internal.core.SfTimestampUtil; import net.snowflake.client.internal.core.StmtUtil; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.BindingParameterMetadata; import net.snowflake.client.internal.jdbc.SnowflakeLoggedFeatureNotSupportedException; import net.snowflake.client.internal.jdbc.SnowflakeParameterMetadata; import net.snowflake.client.internal.jdbc.SnowflakeResultSetMetaDataV1; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.jdbc.util.SnowflakeTypeHelper; import net.snowflake.client.internal.jdbc.util.SnowflakeTypeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.VariableTypeArray; import net.snowflake.common.core.SqlState; public class SnowflakePreparedStatementImpl extends SnowflakeStatementImpl implements PreparedStatement, SnowflakePreparedStatement { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakePreparedStatementImpl.class); /** Error code returned when describing a statement that is binding table name */ private static final Integer ERROR_CODE_TABLE_BIND_VARIABLE_NOT_SET = 2128; /** Error code when preparing statement with binding object names */ private static final Integer ERROR_CODE_OBJECT_BIND_NOT_SET = 2129; /** Error code returned when describing a ddl command */ private static final Integer ERROR_CODE_STATEMENT_CANNOT_BE_PREPARED = 7; /** snow-44393 Workaround for compiler cannot prepare to_timestamp(?, 3) */ private static final Integer ERROR_CODE_FORMAT_ARGUMENT_NOT_STRING = 1026; /** A hash set that contains the error code that will not lead to exception in describe mode */ private static final Set errorCodesIgnoredInDescribeMode = new HashSet<>( Arrays.asList( ERROR_CODE_TABLE_BIND_VARIABLE_NOT_SET, ERROR_CODE_STATEMENT_CANNOT_BE_PREPARED, ERROR_CODE_OBJECT_BIND_NOT_SET, ERROR_CODE_FORMAT_ARGUMENT_NOT_STRING)); private final String sql; private SFPreparedStatementMetaData preparedStatementMetaData; /** statement and result metadata from describe phase */ private boolean showStatementParameters; /** * map of bind name to bind values for single query execution * *

Currently, bind name is just value index */ private Map parameterBindings = new HashMap<>(); /** map of bind values for batch query executions */ private Map batchParameterBindings = new HashMap<>(); private Map wasPrevValueNull = new HashMap<>(); /** Counter for batch size if we are executing a statement with array bind supported */ private int batchSize = 0; private boolean alreadyDescribed = false; private final ObjectMapper objectMapper; /** * Construct SnowflakePreparedStatementImpl * * @param connection connection object * @param sql sql * @param skipParsing true if the applications want to skip parsing to get metadata. false by * default. * @param resultSetType result set type: ResultSet.TYPE_FORWARD_ONLY. * @param resultSetConcurrency result set concurrency: ResultSet.CONCUR_READ_ONLY. * @param resultSetHoldability result set holdability: ResultSet.CLOSE_CURSORS_AT_COMMIT * @throws SQLException if any SQL error occurs. */ public SnowflakePreparedStatementImpl( SnowflakeConnectionImpl connection, String sql, boolean skipParsing, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { super(connection, resultSetType, resultSetConcurrency, resultSetHoldability); this.sql = sql; this.preparedStatementMetaData = SFPreparedStatementMetaData.emptyMetaData(); showStatementParameters = connection.getShowStatementParameters(); objectMapper = ObjectMapperFactory.getObjectMapperForSession( connection.getSFBaseSession(internalCallMarker())); } /** * This method will check alreadyDescribed flag. And if it is false, it will try to issue a * describe request to server. If true, it will skip describe request. * * @throws SQLException */ private void describeSqlIfNotTried() throws SQLException { if (!alreadyDescribed) { try { this.preparedStatementMetaData = sfBaseStatement.describe(sql); if (preparedStatementMetaData != null && !preparedStatementMetaData.isArrayBindSupported()) { logger.debug( "Array bind is not supported - each batch entry will be executed as a single request for query: {}", sql); } } catch (SFException e) { throw new SnowflakeSQLLoggedException(connection.getSFBaseSession(internalCallMarker()), e); } catch (SnowflakeSQLException e) { if (!errorCodesIgnoredInDescribeMode.contains(e.getErrorCode())) { throw e; } else { preparedStatementMetaData = SFPreparedStatementMetaData.emptyMetaData(); } } alreadyDescribed = true; } } @Override public ResultSet executeQuery() throws SQLException { ExecTimeTelemetryData execTimeData = new ExecTimeTelemetryData("ResultSet PreparedStatement.executeQuery(String)", this.batchID); if (showStatementParameters) { logger.info("executeQuery()", false); } else { logger.trace("executeQuery()", false); } ResultSet rs = executeQueryInternal(sql, false, parameterBindings, execTimeData); execTimeData.setQueryEnd(); execTimeData.generateTelemetry(); logger.debug("Query completed. {}", execTimeData); return rs; } /** * Execute a query asynchronously * * @return ResultSet containing results * @throws SQLException */ public ResultSet executeAsyncQuery() throws SQLException { ExecTimeTelemetryData execTimeData = new ExecTimeTelemetryData( "ResultSet PreparedStatement.executeAsyncQuery(String)", this.batchID); if (showStatementParameters) { logger.info("executeAsyncQuery()", false); } else { logger.trace("executeAsyncQuery()", false); } ResultSet rs = executeQueryInternal(sql, true, parameterBindings, execTimeData); execTimeData.setQueryEnd(); execTimeData.generateTelemetry(); logger.debug("Query completed. {}", execTimeData); return rs; } @Override public long executeLargeUpdate() throws SQLException { ExecTimeTelemetryData execTimeTelemetryData = new ExecTimeTelemetryData("long PreparedStatement.executeLargeUpdate()", this.batchID); logger.trace("executeLargeUpdate()", false); long updates = executeUpdateInternal(sql, parameterBindings, true, execTimeTelemetryData); execTimeTelemetryData.setQueryEnd(); execTimeTelemetryData.generateTelemetry(); logger.debug("Query completed. {}", execTimeTelemetryData); return updates; } @Override public int executeUpdate() throws SQLException { logger.trace("executeUpdate()", false); return (int) executeLargeUpdate(); } @Override public void setNull(int parameterIndex, int sqlType) throws SQLException { logger.trace( "setNull(parameterIndex: {}, sqlType: {})", parameterIndex, SnowflakeTypeHelper.JavaSQLType.find(sqlType)); raiseSQLExceptionIfStatementIsClosed(); ParameterBindingDTO binding = new ParameterBindingDTO(SnowflakeType.ANY.toString(), null); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setBoolean(int parameterIndex, boolean x) throws SQLException { logger.trace("setBoolean(parameterIndex: {}, boolean x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.BOOLEAN, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setByte(int parameterIndex, byte x) throws SQLException { logger.trace("setByte(parameterIndex: {}, byte x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.TINYINT, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setShort(int parameterIndex, short x) throws SQLException { logger.trace("setShort(parameterIndex: {}, short x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.SMALLINT, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setInt(int parameterIndex, int x) throws SQLException { logger.trace("setInt(parameterIndex: {}, int x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.INTEGER, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setLong(int parameterIndex, long x) throws SQLException { logger.trace("setLong(parameterIndex: {}, long x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.BIGINT, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setBigInteger(int parameterIndex, BigInteger x) throws SQLException { logger.trace("setBigInteger(parameterIndex: {}, BigInteger x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.BIGINT, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setFloat(int parameterIndex, float x) throws SQLException { logger.trace("setFloat(parameterIndex: {}, float x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.FLOAT, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setDouble(int parameterIndex, double x) throws SQLException { logger.trace("setDouble(parameterIndex: {}, double x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.DOUBLE, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { logger.trace("setBigDecimal(parameterIndex: {}, BigDecimal x)", parameterIndex); if (x == null) { setNull(parameterIndex, Types.DECIMAL); } else { ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.DECIMAL, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } } @Override public void setString(int parameterIndex, String x) throws SQLException { logger.trace("setString(parameterIndex: {}, String x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.VARCHAR, connection.getSFBaseSession(internalCallMarker())), x); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setBytes(int parameterIndex, byte[] x) throws SQLException { logger.trace("setBytes(parameterIndex: {}, byte[] x)", parameterIndex); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.BINARY, connection.getSFBaseSession(internalCallMarker())), new SFBinary(x).toHex()); parameterBindings.put(String.valueOf(parameterIndex), binding); } private void setObjectInternal(int parameterIndex, SQLData sqlData) throws SQLException { logger.debug("setObjectInternal(parameterIndex: {}, SqlData sqlData)", parameterIndex); JsonSqlOutput stream = new JsonSqlOutput(sqlData, connection.getSFBaseSession(internalCallMarker())); sqlData.writeSQL(stream); ParameterBindingDTO binding = new ParameterBindingDTO( "json", SnowflakeUtil.javaTypeToSFTypeString( Types.STRUCT, connection.getSFBaseSession(internalCallMarker())), stream.getJsonString(), stream.getSchema()); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setDate(int parameterIndex, Date x) throws SQLException { logger.trace("setDate(parameterIndex: {}, Date x)", parameterIndex); if (x == null) { setNull(parameterIndex, Types.DATE); } else { ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.DATE, connection.getSFBaseSession(internalCallMarker())), String.valueOf( x.getTime() + TimeZone.getDefault().getOffset(x.getTime()) - ResultUtil.msDiffJulianToGregorian(x))); parameterBindings.put(String.valueOf(parameterIndex), binding); } } @Override public void setTime(int parameterIndex, Time x) throws SQLException { logger.trace("setTime(parameterIndex: {}, Time x)", parameterIndex); if (x == null) { setNull(parameterIndex, Types.TIME); } else { String value; if (connection.getSFBaseSession(internalCallMarker()).getTreatTimeAsWallClockTime()) { value = String.valueOf(x.toLocalTime().toNanoOfDay()); } else { value = String.valueOf(SfTimestampUtil.getTimeInNanoseconds(x)); } ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.TIME, connection.getSFBaseSession(internalCallMarker())), value); parameterBindings.put(String.valueOf(parameterIndex), binding); } } @Override public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { logger.trace("setTimestamp(parameterIndex: {}, Timestamp x)", parameterIndex); setTimestampWithType(parameterIndex, x, Types.TIMESTAMP); } private void setTimestampWithType(int parameterIndex, Timestamp x, int snowflakeType) throws SQLException { // convert the timestamp from being in local time zone to be in UTC timezone String value = x == null ? null : String.valueOf( BigDecimal.valueOf((x.getTime() - ResultUtil.msDiffJulianToGregorian(x)) / 1000) .scaleByPowerOfTen(9) .add(BigDecimal.valueOf(x.getNanos()))); String bindingTypeName; switch (snowflakeType) { case SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ: bindingTypeName = SnowflakeType.TIMESTAMP_LTZ.name(); break; case SnowflakeType.EXTRA_TYPES_TIMESTAMP_NTZ: bindingTypeName = SnowflakeType.TIMESTAMP_NTZ.name(); break; default: bindingTypeName = connection.getSFBaseSession(internalCallMarker()).getTimestampMappedType().name(); break; } ParameterBindingDTO binding = new ParameterBindingDTO(bindingTypeName, value); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override @Deprecated public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void clearParameters() throws SQLException { parameterBindings.clear(); } @Override public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { if (x == null) { setNull(parameterIndex, targetSqlType); } else if (targetSqlType == Types.DATE) { setDate(parameterIndex, (Date) x); } else if (targetSqlType == Types.TIME) { setTime(parameterIndex, (Time) x); } else if (targetSqlType == Types.TIMESTAMP) { setTimestamp(parameterIndex, (Timestamp) x); } else if (targetSqlType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ || targetSqlType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_NTZ) { setTimestampWithType(parameterIndex, (Timestamp) x, targetSqlType); } else if (targetSqlType == SnowflakeType.EXTRA_TYPES_DECFLOAT) { setDecfloat(parameterIndex, (BigDecimal) x); } else if (targetSqlType == SnowflakeType.EXTRA_TYPES_YEAR_MONTH_INTERVAL) { setYearMonthInterval(parameterIndex, (String) x); } else if (targetSqlType == SnowflakeType.EXTRA_TYPES_DAY_TIME_INTERVAL) { setDayTimeInterval(parameterIndex, (String) x); } else { logger.trace( "setObject(parameterIndex: {}, Object x, sqlType: {})", parameterIndex, SnowflakeTypeHelper.JavaSQLType.find(targetSqlType)); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( targetSqlType, connection.getSFBaseSession(internalCallMarker())), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } } private void setYearMonthInterval(int parameterIndex, String x) throws SQLException { logger.trace("setYearMonthInterval(parameterIndex: {}, String x)", parameterIndex); if (x == null) { setNull(parameterIndex, SnowflakeType.EXTRA_TYPES_YEAR_MONTH_INTERVAL); } else { ParameterBindingDTO binding = new ParameterBindingDTO(SnowflakeType.INTERVAL_YEAR_MONTH.name(), x); parameterBindings.put(String.valueOf(parameterIndex), binding); } } private void setDayTimeInterval(int parameterIndex, String x) throws SQLException { logger.trace("setDayTimeInterval(parameterIndex: {}, String x)", parameterIndex); if (x == null) { setNull(parameterIndex, SnowflakeType.EXTRA_TYPES_DAY_TIME_INTERVAL); } else { ParameterBindingDTO binding = new ParameterBindingDTO(SnowflakeType.INTERVAL_DAY_TIME.name(), x); parameterBindings.put(String.valueOf(parameterIndex), binding); } } private void setDecfloat(int parameterIndex, BigDecimal x) throws SQLException { logger.trace("setDecfloat(parameterIndex: {}, BigDecimal x)", parameterIndex); if (x == null) { setNull(parameterIndex, SnowflakeType.EXTRA_TYPES_DECFLOAT); } else { ParameterBindingDTO binding = new ParameterBindingDTO(SnowflakeType.DECFLOAT.name(), String.valueOf(x)); parameterBindings.put(String.valueOf(parameterIndex), binding); } } @Override public void setObject(int parameterIndex, Object x) throws SQLException { if (x == null) { setNull(parameterIndex, Types.NULL); } else if (x instanceof String) { setString(parameterIndex, (String) x); } else if (x instanceof BigDecimal) { setBigDecimal(parameterIndex, (BigDecimal) x); } else if (x instanceof Short) { setShort(parameterIndex, (Short) x); } else if (x instanceof Integer) { setInt(parameterIndex, (Integer) x); } else if (x instanceof Long) { setLong(parameterIndex, (Long) x); } else if (x instanceof BigInteger) { setBigInteger(parameterIndex, (BigInteger) x); } else if (x instanceof Float) { setFloat(parameterIndex, (Float) x); } else if (x instanceof Double) { setDouble(parameterIndex, (Double) x); } else if (x instanceof Date) { setDate(parameterIndex, (Date) x); } else if (x instanceof Time) { setTime(parameterIndex, (Time) x); } else if (x instanceof Timestamp) { setTimestamp(parameterIndex, (Timestamp) x); } else if (x instanceof Boolean) { setBoolean(parameterIndex, (Boolean) x); } else if (x instanceof byte[]) { setBytes(parameterIndex, (byte[]) x); } else if (x instanceof SQLData) { setObjectInternal(parameterIndex, (SQLData) x); } else { throw new SnowflakeSQLLoggedException( connection.getSFBaseSession(internalCallMarker()), ErrorCode.DATA_TYPE_NOT_SUPPORTED.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED, "Object type: " + x.getClass()); } } @Override public boolean execute() throws SQLException { ExecTimeTelemetryData execTimeData = new ExecTimeTelemetryData("boolean PreparedStatement.execute(String)", this.batchID); logger.debug("Execute: {}", sql); boolean success = executeInternal(sql, parameterBindings, execTimeData); execTimeData.setQueryEnd(); execTimeData.generateTelemetry(); logger.debug("Query completed. {}", execTimeData); return success; } @Override public void addBatch() throws SQLException { logger.trace("addBatch()", false); raiseSQLExceptionIfStatementIsClosed(); describeSqlIfNotTried(); if (preparedStatementMetaData.isArrayBindSupported()) { for (Map.Entry binding : parameterBindings.entrySet()) { // get the entry for the bind variable in the batch binding map ParameterBindingDTO bindingValueAndType = batchParameterBindings.get(binding.getKey()); List values; Object newValue = binding.getValue().getValue(); // create binding value and type for the first time if (bindingValueAndType == null) { // create the value list values = new ArrayList<>(); bindingValueAndType = new ParameterBindingDTO(binding.getValue().getType(), values); // put the new map into the batch batchParameterBindings.put(binding.getKey(), bindingValueAndType); wasPrevValueNull.put(binding.getKey(), binding.getValue().getValue() == null); } else { // make sure type matches except for null values String prevType = bindingValueAndType.getType(); String newType = binding.getValue().getType(); if (wasPrevValueNull.get(binding.getKey()) && newValue != null) { // if previous value is null and the current value is not null // override the data type. bindingValueAndType = batchParameterBindings.remove(binding.getKey()); bindingValueAndType.setType(newType); batchParameterBindings.put(binding.getKey(), bindingValueAndType); prevType = newType; wasPrevValueNull.put(binding.getKey(), false); } // if previous type is null, replace it with new type if (SnowflakeType.ANY.name().equalsIgnoreCase(prevType) && !SnowflakeType.ANY.name().equalsIgnoreCase(newType)) { bindingValueAndType.setType(newType); } else if (binding.getValue().getValue() != null && !prevType.equalsIgnoreCase(newType)) { String row = "Unknown"; if (bindingValueAndType.getValue() instanceof Collection) { final List typeCheckedList = (List) bindingValueAndType.getValue(); values = typeCheckedList; row = Integer.toString(values.size() + 1); } throw new SnowflakeSQLLoggedException( connection.getSFBaseSession(internalCallMarker()), ErrorCode.ARRAY_BIND_MIXED_TYPES_NOT_SUPPORTED.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED, SnowflakeTypeUtil.getJavaType(SnowflakeTypeUtil.fromString(prevType), false).name(), SnowflakeTypeUtil.getJavaType(SnowflakeTypeUtil.fromString(newType), false).name(), binding.getKey(), row); } // found the existing map so just get the value list final List typeCheckedList = (List) bindingValueAndType.getValue(); values = typeCheckedList; } // add the value to the list of values in batch binding map values.add((String) newValue); bindingValueAndType.setValue(values); } batchSize++; } else { batch.add(new BatchEntry(this.sql, parameterBindings)); parameterBindings = new HashMap<>(); } } @Override public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setRef(int parameterIndex, Ref x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBlob(int parameterIndex, Blob x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setClob(int parameterIndex, Clob x) throws SQLException { setString(parameterIndex, x == null ? null : x.toString()); } @Override public void setArray(int parameterIndex, Array array) throws SQLException { if (array instanceof SfSqlArray) { SfSqlArray sfArray = (SfSqlArray) array; ParameterBindingDTO binding = new ParameterBindingDTO( "json", SnowflakeUtil.javaTypeToSFTypeString( Types.ARRAY, connection.getSFBaseSession(internalCallMarker())), sfArray.getJsonString(), sfArray.getSchema()); parameterBindings.put(String.valueOf(parameterIndex), binding); } else { SfSqlArray sfArray = new SfSqlArray( Types.INTEGER, array, connection.getSFBaseSession(internalCallMarker()), objectMapper); ParameterBindingDTO binding = new ParameterBindingDTO( "json", SnowflakeUtil.javaTypeToSFTypeString( Types.ARRAY, connection.getSFBaseSession(internalCallMarker())), sfArray.getJsonString(), sfArray.getSchema()); parameterBindings.put(String.valueOf(parameterIndex), binding); } } @Override public void setMap(int parameterIndex, Map map, int type) throws SQLException { BindingParameterMetadata valueTypeSchema; if (Types.STRUCT == type) { SQLData sqlData = (SQLData) map.values().stream().findFirst().orElse(null); JsonSqlOutput stream = new JsonSqlOutput(sqlData, connection.getSFBaseSession(internalCallMarker())); sqlData.writeSQL(stream); valueTypeSchema = stream.getSchema(); } else { valueTypeSchema = FieldSchemaCreator.buildBindingSchemaForType(type, false); } BindingParameterMetadata schema = BindingParameterMetadata.BindingParameterMetadataBuilder.bindingParameterMetadata() .withType("map") .withFields( Arrays.asList( FieldSchemaCreator.buildBindingSchemaForType(Types.VARCHAR, false), valueTypeSchema)) .build(); ParameterBindingDTO binding = null; try { binding = new ParameterBindingDTO( "json", SnowflakeUtil.javaTypeToSFTypeString( Types.STRUCT, connection.getSFBaseSession(internalCallMarker())), SnowflakeUtil.mapJson(map), schema); } catch (JsonProcessingException e) { throw new RuntimeException(e); } parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public ResultSetMetaData getMetaData() throws SQLException { logger.trace("getMetaData()", false); raiseSQLExceptionIfStatementIsClosed(); describeSqlIfNotTried(); return new SnowflakeResultSetMetaDataV1(this.preparedStatementMetaData.getResultSetMetaData()); } @Override public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { logger.trace("setDate(int parameterIndex, Date x, Calendar cal)", false); raiseSQLExceptionIfStatementIsClosed(); if (x == null) { setNull(parameterIndex, Types.DATE); } else { // convert the date from to be in local time zone to be in UTC String value = String.valueOf( x.getTime() + cal.getTimeZone().getOffset(x.getTime()) - ResultUtil.msDiffJulianToGregorian(x)); ParameterBindingDTO binding = new ParameterBindingDTO( SnowflakeUtil.javaTypeToSFTypeString( Types.DATE, connection.getSFBaseSession(internalCallMarker())), value); parameterBindings.put(String.valueOf(parameterIndex), binding); } } @Override public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { logger.trace("setTime(int parameterIndex, Time x, Calendar cal)", false); raiseSQLExceptionIfStatementIsClosed(); setTime(parameterIndex, x); } @Override public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { logger.trace("setTimestamp(int parameterIndex, Timestamp x, Calendar cal)", false); raiseSQLExceptionIfStatementIsClosed(); // convert the time from being in UTC to be in local time zone String value = null; SnowflakeType sfType = SnowflakeUtil.javaTypeToSFType( Types.TIMESTAMP, connection.getSFBaseSession(internalCallMarker())); if (x != null) { long milliSecSinceEpoch = x.getTime(); if (sfType == SnowflakeType.TIMESTAMP) { sfType = connection.getSFBaseSession(internalCallMarker()).getTimestampMappedType(); } // if type is timestamp_tz, keep the offset and the time value separate. // store the offset, in minutes, as amount it's off from UTC if (sfType == SnowflakeType.TIMESTAMP_TZ) { value = String.valueOf( BigDecimal.valueOf(milliSecSinceEpoch / 1000) .scaleByPowerOfTen(9) .add(BigDecimal.valueOf(x.getNanos()))); int offset = cal.getTimeZone().getOffset(milliSecSinceEpoch) / 60000 + 1440; value += " " + offset; } else { milliSecSinceEpoch = milliSecSinceEpoch + cal.getTimeZone().getOffset(milliSecSinceEpoch); value = String.valueOf( BigDecimal.valueOf(milliSecSinceEpoch / 1000) .scaleByPowerOfTen(9) .add(BigDecimal.valueOf(x.getNanos()))); } } ParameterBindingDTO binding = new ParameterBindingDTO(sfType.name(), value); parameterBindings.put(String.valueOf(parameterIndex), binding); } @Override public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { logger.trace("setNull(int parameterIndex, int sqlType, String typeName)", false); setNull(parameterIndex, sqlType); } @Override public void setURL(int parameterIndex, URL x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public ParameterMetaData getParameterMetaData() throws SQLException { describeSqlIfNotTried(); return new SnowflakeParameterMetadata( preparedStatementMetaData, connection.getSFBaseSession(internalCallMarker())); } @Override public void setRowId(int parameterIndex, RowId x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNString(int parameterIndex, String value) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNClob(int parameterIndex, NClob value) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { logger.trace( "setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength)", false); raiseSQLExceptionIfStatementIsClosed(); if (x == null) { setNull(parameterIndex, targetSqlType); } else if (targetSqlType == Types.DECIMAL || targetSqlType == Types.NUMERIC) { BigDecimal decimalObj = new BigDecimal(String.valueOf(x)); decimalObj.setScale(scaleOrLength); setBigDecimal(parameterIndex, decimalObj); } else { setObject(parameterIndex, x, targetSqlType); } } @Override public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setClob(int parameterIndex, Reader reader) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setNClob(int parameterIndex, Reader reader) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public int executeUpdate(String sql) throws SQLException { logger.debug("executeUpdate(String sql)", false); throw new SnowflakeSQLException( ErrorCode.UNSUPPORTED_STATEMENT_TYPE_IN_EXECUTION_API, StmtUtil.truncateSQL(sql)); } @Override public boolean execute(String sql) throws SQLException { logger.debug("execute(String sql)", false); throw new SnowflakeSQLException( ErrorCode.UNSUPPORTED_STATEMENT_TYPE_IN_EXECUTION_API, StmtUtil.truncateSQL(sql)); } @Override public void addBatch(String sql) throws SQLException { throw new SnowflakeSQLException( ErrorCode.UNSUPPORTED_STATEMENT_TYPE_IN_EXECUTION_API, StmtUtil.truncateSQL(sql)); } @Override public void clearBatch() throws SQLException { super.clearBatch(); batchParameterBindings.clear(); parameterBindings.clear(); wasPrevValueNull.clear(); batchSize = 0; } @Override public int[] executeBatch() throws SQLException { logger.trace("executeBatch()", false); return executeBatchInternalWithArrayBind(false).intArr; } @Override public long[] executeLargeBatch() throws SQLException { logger.trace("executeLargeBatch()", false); return executeBatchInternalWithArrayBind(true).longArr; } VariableTypeArray executeBatchInternalWithArrayBind(boolean isLong) throws SQLException { raiseSQLExceptionIfStatementIsClosed(); describeSqlIfNotTried(); if (this.preparedStatementMetaData.getStatementType().isGenerateResultSet()) { throw new SnowflakeSQLException( ErrorCode.UNSUPPORTED_STATEMENT_TYPE_IN_EXECUTION_API, StmtUtil.truncateSQL(sql)); } VariableTypeArray updateCounts; if (isLong) { long[] arr = new long[batch.size()]; updateCounts = new VariableTypeArray(null, arr); } else { int size = batch.size(); int[] arr = new int[size]; updateCounts = new VariableTypeArray(arr, null); } try { if (this.preparedStatementMetaData.isArrayBindSupported()) { if (batchSize <= 0) { if (isLong) { logger.debug( "executeLargeBatch() using array bind with no batch data. Return long[0] directly", false); return new VariableTypeArray(null, new long[0]); } else { logger.debug( "executeBatch() using array bind with no batch data. Return int[0] directly", false); return new VariableTypeArray(new int[0], null); } } int updateCount = (int) executeUpdateInternal( this.sql, batchParameterBindings, false, new ExecTimeTelemetryData()); // when update count is the same as the number of bindings in the batch, // expand the update count into an array (SNOW-14034) if (updateCount == batchSize) { if (isLong) { updateCounts = new VariableTypeArray(null, new long[updateCount]); for (int idx = 0; idx < updateCount; idx++) { updateCounts.longArr[idx] = 1; } } else { updateCounts = new VariableTypeArray(new int[updateCount], null); for (int idx = 0; idx < updateCount; idx++) { updateCounts.intArr[idx] = 1; } } } else { if (isLong) { updateCounts.longArr = new long[] {updateCount}; } else { updateCounts.intArr = new int[] {updateCount}; } } } else { // Array binding is not supported if (isLong) { updateCounts.longArr = executeBatchInternal(true).longArr; } else { updateCounts.intArr = executeBatchInternal(false).intArr; } } if (this.getSFBaseStatement() .getSFBaseSession(internalCallMarker()) .getClearBatchOnlyAfterSuccessfulExecution()) { clearBatch(); } } finally { if (!this.getSFBaseStatement() .getSFBaseSession(internalCallMarker()) .getClearBatchOnlyAfterSuccessfulExecution()) { clearBatch(); } } return updateCounts; } @Override public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public int executeUpdate(String sql, String[] columnNames) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public boolean execute(String sql, int[] columnIndexes) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public boolean execute(String sql, String[] columnNames) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } // For testing use only public Map getBatchParameterBindings() { return batchParameterBindings; } // package private for testing purpose only public Map getParameterBindings() { return parameterBindings; } // For testing use only public boolean isAlreadyDescribed() { return this.alreadyDescribed; } // For testing use only public boolean isArrayBindSupported() { return this.preparedStatementMetaData.isArrayBindSupported(); } @Override public void resultSetMetadataHandler(SFBaseResultSet resultSet) throws SQLException { if (!this.preparedStatementMetaData.isValidMetaData()) { this.preparedStatementMetaData = new SFPreparedStatementMetaData( resultSet.getMetaData(), resultSet.getStatementType(), resultSet.getNumberOfBinds(), resultSet.isArrayBindSupported(), resultSet.getMetaDataOfBinds(), true); alreadyDescribed = true; } } public String toString() { return (this.sql != null) ? this.sql + " - Query ID: " + this.getQueryID() : super.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/api/implementation/statement/SnowflakeStatementImpl.java ================================================ package net.snowflake.client.internal.api.implementation.statement; import static net.snowflake.client.api.exception.ErrorCode.FEATURE_UNSUPPORTED; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import java.sql.BatchUpdateException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.SQLWarning; import java.sql.Statement; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.statement.SnowflakeStatement; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.core.CancellationReason; import net.snowflake.client.internal.core.ParameterBindingDTO; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFBaseResultSet; import net.snowflake.client.internal.core.SFBaseStatement; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFStatement; import net.snowflake.client.internal.core.SFStatementType; import net.snowflake.client.internal.core.StmtUtil; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.QueryIdValidator; import net.snowflake.client.internal.jdbc.SnowflakeLoggedFeatureNotSupportedException; import net.snowflake.client.internal.jdbc.SnowflakeResultSetV1; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.VariableTypeArray; import net.snowflake.common.core.SqlState; /** Snowflake statement */ public class SnowflakeStatementImpl implements Statement, SnowflakeStatement { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeStatementImpl.class); private static final String NOOP_MESSAGE = "This is a dummy SnowflakeStatement, " + "no member function should be called for it."; private static final long NO_UPDATES = -1; public final SnowflakeConnectionImpl connection; protected final int resultSetType; protected final int resultSetConcurrency; protected final int resultSetHoldability; protected String batchID = ""; /* * The maximum number of rows this statement ( should return (0 => all rows). */ private int maxRows = 0; // Refer to all open resultSets from this statement private final Set openResultSets = ConcurrentHashMap.newKeySet(); // result set currently in use private ResultSet resultSet = null; private int fetchSize = 50; private Boolean isClosed = false; private long updateCount = NO_UPDATES; // timeout in seconds private int queryTimeout = 0; SFBaseStatement sfBaseStatement; private boolean poolable; /** Snowflake query ID from the latest executed query */ private String queryID; /** Snowflake query IDs from the latest executed batch */ private List batchQueryIDs = new LinkedList<>(); /** batch of sql strings added by addBatch */ protected final List batch = new ArrayList<>(); private SQLWarning sqlWarnings; /** * Construct SnowflakeStatementImpl * * @param connection connection object * @param resultSetType result set type: ResultSet.TYPE_FORWARD_ONLY. * @param resultSetConcurrency result set concurrency: ResultSet.CONCUR_READ_ONLY. * @param resultSetHoldability result set holdability: ResultSet.CLOSE_CURSORS_AT_COMMIT * @throws SQLException if any SQL error occurs. */ public SnowflakeStatementImpl( SnowflakeConnectionImpl connection, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { logger.trace("SnowflakeStatement(SnowflakeConnectionImpl conn)", false); this.connection = connection; if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) { throw new SQLFeatureNotSupportedException( String.format("ResultSet type %d is not supported.", resultSetType), FEATURE_UNSUPPORTED.getSqlState(), FEATURE_UNSUPPORTED.getMessageCode()); } if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) { throw new SQLFeatureNotSupportedException( String.format("ResultSet concurrency %d is not supported.", resultSetConcurrency), FEATURE_UNSUPPORTED.getSqlState(), FEATURE_UNSUPPORTED.getMessageCode()); } if (resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) { throw new SQLFeatureNotSupportedException( String.format("ResultSet holdability %d is not supported.", resultSetHoldability), FEATURE_UNSUPPORTED.getSqlState(), FEATURE_UNSUPPORTED.getMessageCode()); } this.resultSetType = resultSetType; this.resultSetConcurrency = resultSetConcurrency; this.resultSetHoldability = resultSetHoldability; sfBaseStatement = (connection != null) ? connection.getHandler(internalCallMarker()).getSFStatement() : null; } protected void raiseSQLExceptionIfStatementIsClosed() throws SQLException { if (isClosed) { throw new SnowflakeSQLException(ErrorCode.STATEMENT_CLOSED); } } /** * Execute SQL query * * @param sql sql statement * @return ResultSet * @throws SQLException if @link{#executeQueryInternal(String, Map)} throws an exception */ @Override public ResultSet executeQuery(String sql) throws SQLException { ExecTimeTelemetryData execTimeData = new ExecTimeTelemetryData("ResultSet Statement.executeQuery(String)", this.batchID); raiseSQLExceptionIfStatementIsClosed(); ResultSet rs = executeQueryInternal(sql, false, null, execTimeData); execTimeData.setQueryEnd(); execTimeData.generateTelemetry(); logger.debug("Query completed. {}", execTimeData); return rs; } /** * Execute SQL query asynchronously * * @param sql sql statement * @return ResultSet * @throws SQLException if @link{#executeQueryInternal(String, Map)} throws an exception */ public ResultSet executeAsyncQuery(String sql) throws SQLException { ExecTimeTelemetryData execTimeData = new ExecTimeTelemetryData("ResultSet Statement.executeAsyncQuery(String)", this.batchID); raiseSQLExceptionIfStatementIsClosed(); ResultSet rs = executeQueryInternal(sql, true, null, execTimeData); execTimeData.setQueryEnd(); execTimeData.generateTelemetry(); logger.debug("Query completed. {}", queryID, execTimeData); return rs; } /** * Hook for subclasses to process result set metadata after query execution. This method is called * internally after executing a query to allow subclasses to extract and cache metadata from the * result set. * *

The default implementation does nothing. Subclasses (like PreparedStatementImpl) can * override this to capture metadata such as parameter metadata. * * @param resultSet the internal result set containing metadata * @throws SQLException if an error occurs while processing metadata */ protected void resultSetMetadataHandler(SFBaseResultSet resultSet) throws SQLException { // No-Op - subclasses can override } /** * Execute an update statement * * @param sql sql statement * @return number of rows updated * @throws SQLException if @link{#executeUpdateInternal(String, Map)} throws exception */ @Override public int executeUpdate(String sql) throws SQLException { return (int) this.executeLargeUpdate(sql); } /** * Execute an update statement returning the number of affected rows in long * * @param sql sql statement * @return number of rows updated in long * @throws SQLException if @link{#executeUpdateInternal(String, Map)} throws exception */ @Override public long executeLargeUpdate(String sql) throws SQLException { ExecTimeTelemetryData execTimeData = new ExecTimeTelemetryData("ResultSet Statement.executeLargeUpdate(String)", this.batchID); long res = executeUpdateInternal(sql, null, true, execTimeData); execTimeData.setQueryEnd(); execTimeData.generateTelemetry(); logger.debug("Query completed. {}", queryID, execTimeData); return res; } public long executeUpdateInternal( String sql, Map parameterBindings, boolean updateQueryRequired, ExecTimeTelemetryData execTimeData) throws SQLException { execTimeData.setQueryText(sql); execTimeData.setSessionId(connection.getSessionID()); raiseSQLExceptionIfStatementIsClosed(); /* If sql command is a staging command that has parameter binding, throw an exception because parameter binding is not supported for staging commands. */ if (StmtUtil.checkStageManageCommand(sql) != null && parameterBindings != null) { throw new SnowflakeSQLLoggedException( connection.getSFBaseSession(internalCallMarker()), ErrorCode.UNSUPPORTED_STATEMENT_TYPE_IN_EXECUTION_API, StmtUtil.truncateSQL(sql)); } SFBaseResultSet sfResultSet; try { sfResultSet = sfBaseStatement.execute( sql, parameterBindings, SFBaseStatement.CallingMethod.EXECUTE_UPDATE, execTimeData); sfResultSet.setSession(this.connection.getSFBaseSession(internalCallMarker())); updateCount = ResultUtil.calculateUpdateCount(sfResultSet); queryID = sfResultSet.getQueryId(); resultSetMetadataHandler(sfResultSet); } catch (SnowflakeSQLException ex) { setQueryIdWhenValidOrNull(ex.getQueryId()); throw ex; } catch (SFException ex) { setQueryIdWhenValidOrNull(ex.getQueryId()); throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } finally { if (resultSet != null && !resultSet.isClosed()) { openResultSets.add(resultSet); } resultSet = null; } if (updateCount == NO_UPDATES && updateQueryRequired) { throw new SnowflakeSQLLoggedException( connection.getSFBaseSession(internalCallMarker()), ErrorCode.UNSUPPORTED_STATEMENT_TYPE_IN_EXECUTION_API, StmtUtil.truncateSQL(sql)); } return updateCount; } private void setQueryIdWhenValidOrNull(String queryId) { if (QueryIdValidator.isValid(queryId)) { this.queryID = queryId; } else { this.queryID = null; } } /** * Internal method for executing a query with bindings accepted. * * @param sql sql statement * @param asyncExec execute query asynchronously * @param parameterBindings parameters bindings * @return query result set * @throws SQLException if @link{SFStatement.execute(String)} throws exception */ ResultSet executeQueryInternal( String sql, boolean asyncExec, Map parameterBindings, ExecTimeTelemetryData execTimeData) throws SQLException { execTimeData.setQueryText(sql); execTimeData.setSessionId(connection.getSessionID()); SFBaseResultSet sfResultSet; try { if (asyncExec) { if (!connection.getHandler(internalCallMarker()).supportsAsyncQuery()) { throw new SQLFeatureNotSupportedException( "Async execution not supported in current context."); } sfResultSet = sfBaseStatement.asyncExecute( sql, parameterBindings, SFBaseStatement.CallingMethod.EXECUTE_QUERY, execTimeData); } else { sfResultSet = sfBaseStatement.execute( sql, parameterBindings, SFBaseStatement.CallingMethod.EXECUTE_QUERY, execTimeData); resultSetMetadataHandler(sfResultSet); } sfResultSet.setSession(this.connection.getSFBaseSession(internalCallMarker())); queryID = sfResultSet.getQueryId(); } catch (SnowflakeSQLException ex) { setQueryIdWhenValidOrNull(ex.getQueryId()); throw ex; } catch (SFException ex) { setQueryIdWhenValidOrNull(ex.getQueryId()); throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } if (resultSet != null && !resultSet.isClosed()) { openResultSets.add(resultSet); } if (asyncExec) { resultSet = connection.getHandler(internalCallMarker()).createAsyncResultSet(sfResultSet, this); } else { resultSet = connection.getHandler(internalCallMarker()).createResultSet(sfResultSet, this); } return getResultSet(); } /** * Execute sql * * @param sql sql statement * @param parameterBindings a map of binds to use for this query * @return whether there is result set or not * @throws SQLException if @link{#executeQuery(String)} throws exception */ boolean executeInternal( String sql, Map parameterBindings, ExecTimeTelemetryData execTimeData) throws SQLException { raiseSQLExceptionIfStatementIsClosed(); execTimeData.setQueryText(sql); execTimeData.setSessionId(connection.getSessionID()); connection.injectedDelay(); logger.debug("Execute: {}", sql); String trimmedSql = sql.trim(); if (trimmedSql.length() >= 20 && trimmedSql.toLowerCase().startsWith("set-sf-property")) { // deprecated: sfsql executeSetProperty(sql); return false; } SFBaseResultSet sfResultSet; try { sfResultSet = sfBaseStatement.execute( sql, parameterBindings, SFBaseStatement.CallingMethod.EXECUTE, execTimeData); sfResultSet.setSession(this.connection.getSFBaseSession(internalCallMarker())); resultSetMetadataHandler(sfResultSet); if (resultSet != null && !resultSet.isClosed()) { openResultSets.add(resultSet); } resultSet = connection.getHandler(internalCallMarker()).createResultSet(sfResultSet, this); queryID = sfResultSet.getQueryId(); // Legacy behavior treats update counts as result sets for single- // statement execute, so we only treat update counts as update counts // if CLIENT_SFSQL is not set, or if a statement // is multi-statement boolean copyResultSetEnabled = sfResultSet.getStatementType() == SFStatementType.COPY && connection.getSFBaseSession(internalCallMarker()).isEnableCopyResultSet(); if (!copyResultSetEnabled && !sfResultSet.getStatementType().isGenerateResultSet() && (!connection.getSFBaseSession(internalCallMarker()).isSfSQLMode() || sfBaseStatement.hasChildren())) { updateCount = ResultUtil.calculateUpdateCount(sfResultSet); if (resultSet != null && !resultSet.isClosed()) { openResultSets.add(resultSet); } resultSet = null; return false; } updateCount = NO_UPDATES; return true; } catch (SnowflakeSQLException ex) { setQueryIdWhenValidOrNull(ex.getQueryId()); throw ex; } catch (SFException ex) { setQueryIdWhenValidOrNull(ex.getQueryId()); throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } /** * @return the query ID of the latest executed query */ public String getQueryID() { return queryID; } /** * @return the query IDs of the latest executed batch queries */ public List getBatchQueryIDs() { return Collections.unmodifiableList(batchQueryIDs); } /** * @return the child query IDs for the multiple statements query. */ public String[] getChildQueryIds(String queryID) throws SQLException { return sfBaseStatement.getChildQueryIds(queryID); } /** * @return the open resultSets from this statement */ public Set getOpenResultSets() { return openResultSets; } /** * Execute sql * * @param sql sql statement * @return whether there is result set or not * @throws SQLException if @link{#executeQuery(String)} throws exception */ @Override public boolean execute(String sql) throws SQLException { ExecTimeTelemetryData execTimeData = new ExecTimeTelemetryData("ResultSet Statement.execute(String)", this.batchID); boolean res = executeInternal(sql, null, execTimeData); execTimeData.setQueryEnd(); execTimeData.generateTelemetry(); logger.debug("Query completed. {}", queryID, execTimeData); return res; } @Override public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { logger.trace("execute(String sql, int autoGeneratedKeys)", false); if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { return execute(sql); } else { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } } @Override public boolean execute(String sql, int[] columnIndexes) throws SQLException { logger.trace("execute(String sql, int[] columnIndexes)", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public boolean execute(String sql, String[] columnNames) throws SQLException { logger.trace("execute(String sql, String[] columnNames)", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } /** * Batch Execute. If one of the commands in the batch failed, JDBC will continuing processing and * throw BatchUpdateException after all commands are processed. * * @return an array of update counts * @throws SQLException if any error occurs. */ @Override public int[] executeBatch() throws SQLException { logger.trace("int[] executeBatch()", false); return executeBatchInternal(false).intArr; } /** * Batch Execute. If one of the commands in the batch failed, JDBC will continuing processing and * throw BatchUpdateException after all commands are processed. * * @return an array of update counts * @throws SQLException if any error occurs. */ @Override public long[] executeLargeBatch() throws SQLException { logger.trace("executeBatch()", false); return executeBatchInternal(true).longArr; } /** * This method will iterate through batch and provide sql and bindings to underlying SFStatement * to get result set. * *

Note, array binds use a different code path since only one network roundtrip in the array * bind execution case. * * @return the number of updated rows * @throws SQLException raises if statement is closed or any db error occurs */ VariableTypeArray executeBatchInternal(boolean isLong) throws SQLException { raiseSQLExceptionIfStatementIsClosed(); SQLException exceptionReturned = null; VariableTypeArray updateCounts; if (isLong) { long[] arr = new long[batch.size()]; updateCounts = new VariableTypeArray(null, arr); } else { int size = batch.size(); int[] arr = new int[size]; updateCounts = new VariableTypeArray(arr, null); } batchQueryIDs.clear(); for (int i = 0; i < batch.size(); i++) { BatchEntry b = batch.get(i); try { long cnt = this.executeUpdateInternal( b.getSql(), b.getParameterBindings(), false, new ExecTimeTelemetryData()); if (cnt == NO_UPDATES) { // in executeBatch we set updateCount to SUCCESS_NO_INFO // for successful query with no updates cnt = SUCCESS_NO_INFO; } if (isLong) { updateCounts.longArr[i] = cnt; } else if (cnt <= Integer.MAX_VALUE) { updateCounts.intArr[i] = (int) cnt; } else { throw new SnowflakeSQLLoggedException( connection.getSFBaseSession(internalCallMarker()), ErrorCode.EXECUTE_BATCH_INTEGER_OVERFLOW.getMessageCode(), SqlState.NUMERIC_VALUE_OUT_OF_RANGE, i); } batchQueryIDs.add(queryID); } catch (SQLException e) { exceptionReturned = exceptionReturned == null ? e : exceptionReturned; if (isLong) { updateCounts.longArr[i] = (long) EXECUTE_FAILED; } else { updateCounts.intArr[i] = EXECUTE_FAILED; } } } if (exceptionReturned != null && isLong) { throw new BatchUpdateException( exceptionReturned.getLocalizedMessage(), exceptionReturned.getSQLState(), exceptionReturned.getErrorCode(), updateCounts.longArr, exceptionReturned); } else if (exceptionReturned != null) { throw new BatchUpdateException( exceptionReturned.getLocalizedMessage(), exceptionReturned.getSQLState(), exceptionReturned.getErrorCode(), updateCounts.intArr, exceptionReturned); } if (this.getSFBaseStatement() .getSFBaseSession(internalCallMarker()) .getClearBatchOnlyAfterSuccessfulExecution()) { clearBatch(); } return updateCounts; } @Override public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { logger.trace("executeUpdate(String sql, int autoGeneratedKeys)", false); return (int) this.executeLargeUpdate(sql, autoGeneratedKeys); } @Override public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { logger.trace("executeUpdate(String sql, int autoGeneratedKeys)", false); if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { return executeLargeUpdate(sql); } else { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } } @Override public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { logger.trace("executeUpdate(String sql, int[] columnIndexes)", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { logger.trace("executeLargeUpdate(String sql, int[] columnIndexes)", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public int executeUpdate(String sql, String[] columnNames) throws SQLException { logger.trace("executeUpdate(String sql, String[] columnNames)", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException { logger.trace("executeUpdate(String sql, String[] columnNames)", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public Connection getConnection() throws SQLException { logger.trace("getConnection()", false); raiseSQLExceptionIfStatementIsClosed(); return connection; } @Override public int getFetchDirection() throws SQLException { logger.trace("getFetchDirection()", false); raiseSQLExceptionIfStatementIsClosed(); return ResultSet.FETCH_FORWARD; } @Override public int getFetchSize() throws SQLException { logger.trace("getFetchSize()", false); raiseSQLExceptionIfStatementIsClosed(); return fetchSize; } @Override public ResultSet getGeneratedKeys() throws SQLException { logger.trace("getGeneratedKeys()", false); raiseSQLExceptionIfStatementIsClosed(); return new SnowflakeResultSetV1.EmptyResultSet(); } @Override public int getMaxFieldSize() throws SQLException { logger.trace("getMaxFieldSize()", false); raiseSQLExceptionIfStatementIsClosed(); return connection.getMetaData().getMaxCharLiteralLength(); } @Override public int getMaxRows() throws SQLException { logger.trace("getMaxRows()", false); raiseSQLExceptionIfStatementIsClosed(); return maxRows; } @Override public boolean getMoreResults() throws SQLException { logger.trace("getMoreResults()", false); return getMoreResults(Statement.CLOSE_CURRENT_RESULT); } @Override public boolean getMoreResults(int current) throws SQLException { logger.trace("getMoreResults(int current)", false); raiseSQLExceptionIfStatementIsClosed(); // clean up the current result set, if it exists if (resultSet != null && (current == Statement.CLOSE_CURRENT_RESULT || current == Statement.CLOSE_ALL_RESULTS)) { resultSet.close(); } boolean hasResultSet = sfBaseStatement.getMoreResults(current); SFBaseResultSet sfResultSet = sfBaseStatement.getResultSet(); if (hasResultSet) // result set returned { sfResultSet.setSession(this.connection.getSFBaseSession(internalCallMarker())); if (resultSet != null && !resultSet.isClosed()) { openResultSets.add(resultSet); } resultSet = connection.getHandler(internalCallMarker()).createResultSet(sfResultSet, this); updateCount = NO_UPDATES; return true; } else if (sfResultSet != null) // update count returned { if (resultSet != null && !resultSet.isClosed()) { openResultSets.add(resultSet); } resultSet = null; try { updateCount = ResultUtil.calculateUpdateCount(sfResultSet); } catch (SFException ex) { throw new SnowflakeSQLLoggedException( connection.getSFBaseSession(internalCallMarker()), ex); } // Multi statement queries should return true while there are still statements to iterate // through. if (queryID != null && sfBaseStatement.hasChildren() && sfBaseStatement.getChildQueryIds(queryID).length > 0) { return true; } return false; } else // no more results { updateCount = NO_UPDATES; return false; } } @Override public int getQueryTimeout() throws SQLException { logger.trace("getQueryTimeout()", false); raiseSQLExceptionIfStatementIsClosed(); return this.queryTimeout; } @Override public ResultSet getResultSet() throws SQLException { logger.trace("getResultSet()", false); raiseSQLExceptionIfStatementIsClosed(); return resultSet; } @Override public int getResultSetConcurrency() throws SQLException { logger.trace("getResultSetConcurrency()", false); raiseSQLExceptionIfStatementIsClosed(); return resultSetConcurrency; } @Override public int getResultSetHoldability() throws SQLException { logger.trace("getResultSetHoldability()", false); raiseSQLExceptionIfStatementIsClosed(); return resultSetHoldability; } @Override public int getResultSetType() throws SQLException { logger.trace("getResultSetType()", false); raiseSQLExceptionIfStatementIsClosed(); return this.resultSetType; } @Override public int getUpdateCount() throws SQLException { logger.trace("getUpdateCount()", false); return (int) getUpdateCountIfDML(); } @Override public long getLargeUpdateCount() throws SQLException { logger.trace("getLargeUpdateCount()", false); return getUpdateCountIfDML(); } private long getUpdateCountIfDML() throws SQLException { raiseSQLExceptionIfStatementIsClosed(); return updateCount; } @Override public SQLWarning getWarnings() throws SQLException { logger.trace("getWarnings()", false); raiseSQLExceptionIfStatementIsClosed(); return sqlWarnings; } @Override public boolean isClosed() throws SQLException { logger.trace("isClosed()", false); return isClosed; // no exception } @Override public boolean isPoolable() throws SQLException { logger.trace("isPoolable()", false); raiseSQLExceptionIfStatementIsClosed(); return poolable; } @Override public void setCursorName(String name) throws SQLException { logger.trace("setCursorName(String name)", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setEscapeProcessing(boolean enable) throws SQLException { logger.trace("setEscapeProcessing(boolean enable)", false); // NOTE: We could raise an exception here, because not implemented // but it may break the existing applications. For now returning nothing. // we should revisit. raiseSQLExceptionIfStatementIsClosed(); } @Override public void setFetchDirection(int direction) throws SQLException { logger.trace("setFetchDirection(int direction)", false); raiseSQLExceptionIfStatementIsClosed(); if (direction != ResultSet.FETCH_FORWARD) { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } } @Override public void setFetchSize(int rows) throws SQLException { logger.trace("setFetchSize(int rows), rows={}", rows); raiseSQLExceptionIfStatementIsClosed(); fetchSize = rows; } @Override public void setMaxFieldSize(int max) throws SQLException { logger.trace("setMaxFieldSize(int max)", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void setMaxRows(int max) throws SQLException { logger.trace("setMaxRows(int max)", false); raiseSQLExceptionIfStatementIsClosed(); this.maxRows = max; try { if (this.sfBaseStatement != null) { this.sfBaseStatement.addProperty("rows_per_resultset", max); } } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } @Override public void setPoolable(boolean poolable) throws SQLException { logger.trace("setPoolable(boolean poolable)", false); raiseSQLExceptionIfStatementIsClosed(); if (poolable) { throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } this.poolable = poolable; } /** * Sets a parameter at the statement level. * * @param name parameter name. * @param value parameter value. * @throws SQLException if any SQL error occurs. */ public void setParameter(String name, Object value) throws SQLException { logger.trace("setParameter", false); try { if (this.sfBaseStatement != null) { this.sfBaseStatement.addProperty(name, value); } } catch (SFException ex) { throw new SnowflakeSQLException(ex); } } @Override public void setBatchID(String batchID) { this.batchID = batchID; } @Override public void setQueryTimeout(int seconds) throws SQLException { logger.trace("setQueryTimeout(int seconds)", false); raiseSQLExceptionIfStatementIsClosed(); this.queryTimeout = seconds; try { if (this.sfBaseStatement != null) { this.sfBaseStatement.addProperty("query_timeout", seconds); } } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } @Override public void setAsyncQueryTimeout(int seconds) throws SQLException { logger.trace("setAsyncQueryTimeout(int seconds)", false); raiseSQLExceptionIfStatementIsClosed(); try { if (this.sfBaseStatement != null) { this.sfBaseStatement.addProperty("STATEMENT_TIMEOUT_IN_SECONDS", seconds); // disable statement level query timeout to avoid override by connection parameter this.setQueryTimeout(0); } } catch (SFException ex) { throw new SnowflakeSQLException( null, ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } @Override public boolean isWrapperFor(Class iface) throws SQLException { logger.trace("isWrapperFor(Class iface)", false); return iface.isInstance(this); } @SuppressWarnings("unchecked") @Override public T unwrap(Class iface) throws SQLException { logger.trace("unwrap(Class iface)", false); if (!iface.isInstance(this)) { throw new SQLException( this.getClass().getName() + " not unwrappable from " + iface.getName()); } return (T) this; } @Override public void closeOnCompletion() throws SQLException { logger.trace("closeOnCompletion()", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public boolean isCloseOnCompletion() throws SQLException { logger.trace("isCloseOnCompletion()", false); throw new SnowflakeLoggedFeatureNotSupportedException( connection.getSFBaseSession(internalCallMarker())); } @Override public void close() throws SQLException { close(true); } public void close(boolean removeClosedStatementFromConnection) throws SQLException { logger.trace("close()", false); // No exception is raised even if the statement is closed. if (resultSet != null) { resultSet.close(); resultSet = null; } isClosed = true; batch.clear(); // also make sure to close all created resultSets from this statement for (ResultSet rs : openResultSets) { if (rs != null && !rs.isClosed()) { if (rs.isWrapperFor(SnowflakeResultSetV1.class)) { rs.unwrap(SnowflakeResultSetV1.class).close(false); } else { rs.close(); } } } openResultSets.clear(); sfBaseStatement.close(); if (removeClosedStatementFromConnection) { connection.removeClosedStatement(this); } } @Override public void cancel() throws SQLException { logger.trace("cancel()", false); raiseSQLExceptionIfStatementIsClosed(); try { sfBaseStatement.cancel(CancellationReason.CLIENT_REQUESTED); } catch (SFException ex) { throw new SnowflakeSQLException(ex, ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } @Override public void clearWarnings() throws SQLException { logger.trace("clearWarnings()", false); raiseSQLExceptionIfStatementIsClosed(); sqlWarnings = null; } @Override public void addBatch(String sql) throws SQLException { logger.trace("addBatch(String sql)", false); raiseSQLExceptionIfStatementIsClosed(); batch.add(new BatchEntry(sql, null)); } @Override public void clearBatch() throws SQLException { logger.trace("clearBatch()", false); raiseSQLExceptionIfStatementIsClosed(); batch.clear(); } private void executeSetProperty(final String sql) { logger.trace("setting property", false); // tokenize the sql String[] tokens = sql.split("\\s+"); if (tokens.length < 2) { return; } if ("tracing".equalsIgnoreCase(tokens[1])) { if (tokens.length >= 3) { /*connection.tracingLevel = Level.parse(tokens[2].toUpperCase()); if (connection.tracingLevel != null) { Logger snowflakeLogger = Logger.getLogger("net.snowflake"); snowflakeLogger.setLevel(connection.tracingLevel); }*/ } } else { this.sfBaseStatement.executeSetProperty(sql); } } public SFBaseStatement getSFBaseStatement() throws SQLException { return sfBaseStatement; } // Convenience method to return an SFStatement-typed SFStatementInterface object, but // performs the type-checking as necessary. public SFStatement getSfStatement() throws SnowflakeSQLException { if (sfBaseStatement instanceof SFStatement) { return (SFStatement) sfBaseStatement; } throw new SnowflakeSQLException( "getSfStatement() called with a different SFStatementInterface type."); } public void removeClosedResultSet(ResultSet rs) { openResultSets.remove(rs); } final class BatchEntry { private final String sql; private final Map parameterBindings; BatchEntry(String sql, Map parameterBindings) { this.sql = sql; this.parameterBindings = parameterBindings; } public String getSql() { return sql; } public Map getParameterBindings() { return parameterBindings; } } /** * This is a No Operation Statement to avoid null pointer exception for sessionless result set. */ public static class NoOpSnowflakeStatementImpl extends SnowflakeStatementImpl { public NoOpSnowflakeStatementImpl() throws SQLException { super( null, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); } @Override public ResultSet executeQuery(String sql) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int executeUpdate(String sql) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public long executeLargeUpdate(String sql) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public String getQueryID() { return "invalid_query_id"; } @Override public List getBatchQueryIDs() { return new ArrayList<>(); } @Override public boolean execute(String sql) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public boolean execute(String sql, int[] columnIndexes) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public boolean execute(String sql, String[] columnNames) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int[] executeBatch() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public long[] executeLargeBatch() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int executeUpdate(String sql, String[] columnNames) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public Connection getConnection() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int getFetchDirection() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int getFetchSize() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public ResultSet getGeneratedKeys() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int getMaxFieldSize() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int getMaxRows() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public boolean getMoreResults() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public boolean getMoreResults(int current) throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int getQueryTimeout() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public ResultSet getResultSet() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int getResultSetConcurrency() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int getResultSetHoldability() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int getResultSetType() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public int getUpdateCount() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public long getLargeUpdateCount() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public SQLWarning getWarnings() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public boolean isClosed() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public boolean isPoolable() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public void setCursorName(String name) throws SQLException {} @Override public void setEscapeProcessing(boolean enable) throws SQLException {} @Override public void setFetchDirection(int direction) throws SQLException {} @Override public void setFetchSize(int rows) throws SQLException {} @Override public void setMaxFieldSize(int max) throws SQLException {} @Override public void setMaxRows(int max) throws SQLException {} @Override public void setPoolable(boolean poolable) throws SQLException {} @Override public void setParameter(String name, Object value) throws SQLException {} @Override public void setQueryTimeout(int seconds) throws SQLException {} @Override public void setAsyncQueryTimeout(int seconds) throws SQLException {} @Override public boolean isWrapperFor(Class iface) throws SQLException { logger.trace("isWrapperFor(Class iface)", false); return iface.isInstance(this); } @SuppressWarnings("unchecked") @Override public T unwrap(Class iface) throws SQLException { logger.trace("unwrap(Class iface)", false); if (!iface.isInstance(this)) { throw new SQLException( this.getClass().getName() + " not unwrappable from " + iface.getName()); } return (T) this; } @Override public void closeOnCompletion() throws SQLException {} @Override public boolean isCloseOnCompletion() throws SQLException { throw new SQLException(NOOP_MESSAGE); } @Override public void close() throws SQLException {} @Override public void close(boolean removeClosedStatementFromConnection) throws SQLException {} @Override public void cancel() throws SQLException {} @Override public void clearWarnings() throws SQLException {} @Override public void addBatch(String sql) throws SQLException {} @Override public void clearBatch() throws SQLException {} @Override public void removeClosedResultSet(ResultSet rs) {} } } ================================================ FILE: src/main/java/net/snowflake/client/internal/common/core/SFBinary.java ================================================ package net.snowflake.client.internal.common.core; import java.util.Arrays; import java.util.Objects; import org.apache.commons.codec.binary.Base16; import org.apache.commons.codec.binary.Base64; /** * Represents binary values. * *

Just a wrapper around a byte array. * *

Instances of this class are immutable. * * @author mkember, copied from net.snowflake.snowflake-common artifact */ public class SFBinary { private static final Base16 INSTANCE = new Base16(); /** An empty SFBinary. */ public static final SFBinary EMPTY = new SFBinary(new byte[] {}); /** * Special SFBinary that is greater than all others. * *

Specifically, MAXIMUM.compareTo(x) > 0 for all x != MAXIMUM. This value has no byte array * representation, so calling getBytes, toBase64, or concat on it is NOT allowed. It does, * however, have a special representation "Z", so SFBinary.fromHex("Z") returns MAXIMUM, and * MAXIMUM.toHex() returns "Z". */ public static final SFBinary MAXIMUM = new SFBinary(null); private static final String MAXIMUM_HEX = "Z"; // Used for validating hex-encoded strings. private static final byte[] HEX_TABLE; private static final byte INVALID = 0; private static final byte HEX_DIGIT = 1; private static final byte WHITESPACE = 2; static { // Initialize HEX_TABLE with 0-9, a-f, A-F, and whitespace. byte[] temp = new byte['f' + 1]; for (char c = '0'; c <= '9'; c++) { temp[c] = HEX_DIGIT; } for (char c = 'A'; c <= 'F'; c++) { temp[c] = HEX_DIGIT; } for (char c = 'a'; c <= 'f'; c++) { temp[c] = HEX_DIGIT; } temp[' '] = WHITESPACE; temp['\n'] = WHITESPACE; temp['\r'] = WHITESPACE; HEX_TABLE = temp; } private final byte[] bytes; /** * Constructs an SFBinary from a byte array. * * @param bytes an byte array */ public SFBinary(byte[] bytes) { this.bytes = bytes; } /** * Returns true if it's safe to call SFBinary.fromHex(str). * *

This is meant for checking user input, so it allows spaces, newlines, and carriage returns. * It returns false for the special value "Z". * * @param str a string * @return true if a string is a hexadecimal value otherwise false */ public static boolean validHex(String str) { int count = 0; for (int i = 0; i < str.length(); i++) { char c = str.charAt(i); int type = c < HEX_TABLE.length ? HEX_TABLE[c] : INVALID; if (type == INVALID) { return false; } if (type == HEX_DIGIT) { count++; } } return count % 2 == 0; } /** * Creates an SFBinary by decoding a hex-encoded string (uppercase letters). * *

Handles the special value "Z" by returning MAXIMUM. * * @param str a string * @return SFBinary * @throws IllegalArgumentException if the string is not hex-encoded. */ public static SFBinary fromHex(String str) { if (str.equals(MAXIMUM_HEX)) { return MAXIMUM; } if (!validHex(str)) { throw new IllegalArgumentException("Invalid hex in '" + str + "'"); } return new SFBinary(INSTANCE.decode(str)); } /** * Creates an SFBinary by decoding a Base64-encoded string (RFC 4648). * * @param str a string. * @return SFBinary * @throws IllegalArgumentException if the string is not Base64-encoded. */ public static SFBinary fromBase64(String str) { return new SFBinary(INSTANCE.decode(str)); } /** * Returns the underlying byte array. * * @return a byte array */ public byte[] getBytes() { assert !this.equals(MAXIMUM); return bytes; } /** * Returns the length of the SFBinary in bytes. * * @return byte length */ public int length() { return bytes.length; } /** * Encodes the binary value as a hex string (uppercase letters). * *

Handles MAXIMUM by returning the special value "Z". * * @return a hexdecimal string */ public String toHex() { if (this.equals(MAXIMUM)) { return MAXIMUM_HEX; } return INSTANCE.encodeAsString(bytes); } /** * Encodes the binary value as a Base64 string (RFC 4648). * * @return a base64 string */ public String toBase64() { assert !this.equals(MAXIMUM); return Base64.encodeBase64String(bytes); } /** * Returns a new SFBinary that is a substring of this SFBinary. * *

Same semantics as String.substring: 'start' is inclusive, 'end' is exclusive, and 'start' * cannot be greater than 'end'. * * @param start the starting index of byte array * @param end the ending index of byte array * @return SFBinary */ public SFBinary substring(int start, int end) { if (start == end) { return EMPTY; } return new SFBinary(Arrays.copyOfRange(bytes, start, end)); } /** * Concatenates two binary values. * *

Concatenates the bytes of this SFBinary with the bytes of the other SFBinary, returning a * new SFBinary instance. * * @param other SFBinary to append * @return concatenated SFBinary */ public SFBinary concat(SFBinary other) { assert !this.equals(MAXIMUM); assert !Objects.equals(other, MAXIMUM); byte[] result = Arrays.copyOf(bytes, bytes.length + other.bytes.length); System.arraycopy( other.bytes, 0, // source result, bytes.length, // destination other.bytes.length); // length return new SFBinary(result); } /** * Compares SFBinary * * @param other the target SFBinary * @return 1 if this SFBinary is larger, 0 if identical otherwise -1 */ public int compareTo(SFBinary other) { if (this.equals(MAXIMUM) && Objects.equals(other, MAXIMUM)) { // This logic is correct for most cases. For example, when choosing the // larger of two EP maxes, and both are Z, it doesn't matter what this // returns. It *would* be a problem if min and max were both Z and this // returned 0 (implying the expression is constant), but that comparison // will never happen, because XP never produces Z for the min. return 0; } if (this.equals(MAXIMUM)) { return 1; } else if (Objects.equals(other, MAXIMUM)) { return -1; } // Compare the byte arrays lexicographically. for (int i = 0; i < bytes.length && i < other.bytes.length; i++) { int a = bytes[i] & 0xFF; int b = other.bytes[i] & 0xFF; if (a > b) { return 1; } else if (a < b) { return -1; } } return bytes.length - other.bytes.length; } /** {@inheritDoc} */ @Override public int hashCode() { if (this.equals(MAXIMUM)) { return 0; } return Arrays.hashCode(bytes); } /** {@inheritDoc} */ @Override public boolean equals(Object other) { if (this == MAXIMUM || other == MAXIMUM) { return this == MAXIMUM && other == MAXIMUM; } return other instanceof SFBinary && Arrays.equals(bytes, ((SFBinary) other).bytes); } /** {@inheritDoc} */ @Override public String toString() { return "SFBinary(hex=" + toHex() + ")"; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/common/core/SFBinaryFormat.java ================================================ package net.snowflake.client.internal.common.core; import java.nio.ByteBuffer; import java.nio.CharBuffer; import java.nio.charset.CharacterCodingException; import java.nio.charset.CharsetDecoder; import java.nio.charset.CodingErrorAction; import java.nio.charset.StandardCharsets; /** * Format (encoding scheme) for binary values. * *

This corresponds to the BinaryFormat class in XP. * *

There are three formats: 1. HEX (hexadecimal encoding) 2. BASE64 (RFC 4648 Base64 encoding) 3. * UTF-8 or UTF8 * *

Each kind does two things: 1. format - convert from SFBinary to String 2. parse - convert from * String to SFBinary * *

For HEX and BASE64, "format" means "encode", and "parse" (which can fail) means "decode". For * UTF-8, it is the reverse: "format" means to decode bytes as Unicode characters (which can fail), * and "parse" means to encode a string of as UTF-8 bytes (which always suceeds). * * @author mkember, copied from net.snowflake.snowflake-common artifact */ public enum SFBinaryFormat { HEX { @Override public String format(SFBinary binary) { return binary.toHex(); } @Override public SFBinary parse(String string) { return SFBinary.fromHex(string); } }, BASE64 { @Override public String format(SFBinary binary) { return binary.toBase64(); } @Override public SFBinary parse(String string) { return SFBinary.fromBase64(string); } }, UTF8 { @Override public String format(SFBinary binary) { final CharBuffer buf; try { buf = UTF8_DECODER.decode(ByteBuffer.wrap(binary.getBytes())); } catch (CharacterCodingException ex) { throw new IllegalArgumentException("Invalid UTF-8"); } return buf.toString(); } @Override public SFBinary parse(String string) { return new SFBinary(string.getBytes(StandardCharsets.UTF_8)); } }; /** * Format a binary value as a string. * * @param binary SFBinary * @return formatted binary value * @throws IllegalArgumentException if the binary cannot be formatted. */ public abstract String format(SFBinary binary); /** * Parse a binary value as a string. * * @param string a string * @return SFBinary instance * @throws IllegalArgumentException if the string cannot be parsed. */ public abstract SFBinary parse(String string); /** * The default binary format. * *

NOTE: Keep this in sync with BinaryFormat::DEFAULT in XP. */ public static final SFBinaryFormat DEFAULT = HEX; /** Decoder object used for parsing and formatting UTF-8. */ private static final CharsetDecoder UTF8_DECODER = StandardCharsets.UTF_8 .newDecoder() .onMalformedInput(CodingErrorAction.REPORT) .onUnmappableCharacter(CodingErrorAction.REPORT); /** * Looks up a binary format by case-insensitive name. HEX, BASE64 or UTF-8 * * @param name binary format * @return SFBinaryFormat instance * @throws IllegalArgumentException if the name is invalid. */ public static SFBinaryFormat getFormat(String name) { try { return lookup(name); } catch (IllegalArgumentException ex) { throw new IllegalArgumentException("Must be 'HEX', 'BASE64', or 'UTF-8'"); } } /** * Looks up a binary format suitable for text output. * *

Specifically, does not allow UTF-8 because not all binary values can be formatted with the * UTF-8 format. * * @param name binary format * @return SFBinaryFormat instance * @throws IllegalArgumentException if the name is invalid. */ public static SFBinaryFormat getSafeOutputFormat(String name) { try { SFBinaryFormat fmt = lookup(name); if (fmt == UTF8) { throw new IllegalArgumentException(); } return fmt; } catch (IllegalArgumentException ex) { throw new IllegalArgumentException("Must be 'HEX' or 'BASE64'"); } } /** * Helper function to look up a binary format by case-insensitive name. * * @param name binary format name * @return SFBinaryFormat instance * @throws IllegalArgumentException if the name is invalid. */ private static SFBinaryFormat lookup(String name) { name = name.toUpperCase(); if (name.equals("UTF-8")) { return UTF8; } return SFBinaryFormat.valueOf(name); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/config/ConnectionParameters.java ================================================ package net.snowflake.client.internal.config; import java.util.Properties; public class ConnectionParameters { private final String url; private final Properties params; public ConnectionParameters(String uri, Properties params) { this.url = uri; this.params = params; } public String getUrl() { return url; } public Properties getParams() { return params; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/config/SFClientConfig.java ================================================ package net.snowflake.client.internal.config; import com.fasterxml.jackson.annotation.JsonAnySetter; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; /** POJO class for Snowflake's client config. */ public class SFClientConfig { // Used to keep the unknown properties when deserializing @JsonIgnore @JsonAnySetter private Map unknownParams = new LinkedHashMap<>(); @JsonProperty("common") private CommonProps commonProps; @JsonIgnore private String configFilePath; public SFClientConfig() {} public SFClientConfig(CommonProps commonProps) { this.commonProps = commonProps; } public CommonProps getCommonProps() { return commonProps; } public void setCommonProps(CommonProps commonProps) { this.commonProps = commonProps; } public String getConfigFilePath() { return configFilePath; } public void setConfigFilePath(String configFilePath) { this.configFilePath = configFilePath; } Set getUnknownParamKeys() { Set unknownParamKeys = new LinkedHashSet<>(unknownParams.keySet()); if (!commonProps.unknownParams.isEmpty()) { unknownParamKeys.addAll( commonProps.unknownParams.keySet().stream() .map(s -> "common:" + s) .collect(Collectors.toCollection(LinkedHashSet::new))); } return unknownParamKeys; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } SFClientConfig that = (SFClientConfig) o; return Objects.equals(commonProps, that.commonProps); } @Override public int hashCode() { return Objects.hash(commonProps); } public static class CommonProps { // Used to keep the unknown properties when deserializing @JsonIgnore @JsonAnySetter Map unknownParams = new LinkedHashMap<>(); @JsonProperty("log_level") private String logLevel; @JsonProperty("log_path") private String logPath; public CommonProps() {} public void CommonProps(String logLevel, String logPath) { this.logLevel = logLevel; this.logPath = logPath; } public String getLogLevel() { return logLevel; } public void setLogLevel(String logLevel) { this.logLevel = logLevel; } public String getLogPath() { return logPath; } public void setLogPath(String logPath) { this.logPath = logPath; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } CommonProps that = (CommonProps) o; return Objects.equals(logLevel, that.logLevel) && Objects.equals(logPath, that.logPath); } @Override public int hashCode() { return Objects.hash(logLevel, logPath); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/config/SFClientConfigParser.java ================================================ package net.snowflake.client.internal.config; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; import java.nio.file.attribute.PosixFilePermission; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import net.snowflake.client.api.driver.SnowflakeDriver; import net.snowflake.client.internal.core.Constants; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class SFClientConfigParser { private static final SFLogger logger = SFLoggerFactory.getLogger(SFClientConfigParser.class); public static final String SF_CLIENT_CONFIG_FILE_NAME = "sf_client_config.json"; public static final String SF_CLIENT_CONFIG_ENV_NAME = "SF_CLIENT_CONFIG_FILE"; /** * Construct SFClientConfig from client config file passed by user. This method searches the * config file in following order: 1. configFilePath param which is read from connection URL or * connection property. 2. Environment variable: SF_CLIENT_CONFIG_FILE containing full path to * sf_client_config file. 3. Searches for default config file name(sf_client_config.json under the * driver directory from where the driver gets loaded. 4. Searches for default config file * name(sf_client_config.json) under user home directory. * * @param configFilePath SF_CLIENT_CONFIG_FILE parameter read from connection URL or connection * properties * @return SFClientConfig * @throws IOException if exception encountered when reading config file. */ public static SFClientConfig loadSFClientConfig(String configFilePath) throws IOException { try { return loadSFClientConfigInsecure(configFilePath); } catch (SecurityException e) { logger.debug("Cannot load config file due to security exception: {}", e.getMessage()); return null; } } private static SFClientConfig loadSFClientConfigInsecure(String configFilePath) throws IOException { if (configFilePath != null) { logger.debug("Attempting to enable easy logging with file path {}", configFilePath); } String derivedConfigFilePath = null; if (configFilePath != null && !configFilePath.isEmpty()) { // 1. Try to read the file at configFilePath. logger.debug("Using config file specified from connection string: {}", configFilePath); derivedConfigFilePath = configFilePath; } else if (systemGetEnv(SF_CLIENT_CONFIG_ENV_NAME) != null) { // 2. If SF_CLIENT_CONFIG_ENV_NAME is set, read from env. String filePath = systemGetEnv(SF_CLIENT_CONFIG_ENV_NAME); logger.debug("Using config file specified from environment variable: {}", filePath); derivedConfigFilePath = filePath; } else { // 3. Read SF_CLIENT_CONFIG_FILE_NAME from where jdbc jar is loaded. String driverLocation = Paths.get(getConfigFilePathFromJDBCJarLocation(), SF_CLIENT_CONFIG_FILE_NAME).toString(); if (Files.exists(Paths.get(driverLocation))) { logger.debug("Using config file specified from driver directory: {}", driverLocation); derivedConfigFilePath = driverLocation; } else { // 4. Read SF_CLIENT_CONFIG_FILE_NAME if it is present in user home directory. String homeDirectory = systemGetProperty("user.home"); if (homeDirectory != null) { String userHomeFilePath = Paths.get(homeDirectory, SF_CLIENT_CONFIG_FILE_NAME).toString(); if (Files.exists(Paths.get(userHomeFilePath))) { logger.debug("Using config file specified from home directory: {}", userHomeFilePath); derivedConfigFilePath = userHomeFilePath; } } } } if (derivedConfigFilePath != null) { try { File configFile = new File(derivedConfigFilePath); ObjectMapper objectMapper = new ObjectMapper(); SFClientConfig clientConfig = objectMapper.readValue(configFile, SFClientConfig.class); logger.debug( "Reading values logLevel {} and logPath {} from client configuration", clientConfig.getCommonProps().getLogLevel(), clientConfig.getCommonProps().getLogPath()); Set unknownParams = clientConfig.getUnknownParamKeys(); if (!unknownParams.isEmpty()) { for (String unknownParam : unknownParams) { logger.warn("Unknown field from config: {}", unknownParam); } } clientConfig.setConfigFilePath(derivedConfigFilePath); return clientConfig; } catch (IOException e) { String customErrorMessage = "Error while reading config file: " + derivedConfigFilePath; throw new IOException(customErrorMessage, e); } } // return null if none of the above conditions are satisfied. return null; } public static String getConfigFilePathFromJDBCJarLocation() { try { if (SnowflakeDriver.class.getProtectionDomain() != null && SnowflakeDriver.class.getProtectionDomain().getCodeSource() != null && SnowflakeDriver.class.getProtectionDomain().getCodeSource().getLocation() != null) { String jarPath = SnowflakeDriver.class.getProtectionDomain().getCodeSource().getLocation().getPath(); // remove /snowflake-jdbc-3.13.29.jar and anything that follows it from the path. String updatedPath = new File(jarPath).getParentFile().getPath(); if (systemGetProperty("os.name") != null && systemGetProperty("os.name").toLowerCase().startsWith("windows")) { updatedPath = convertToWindowsPath(updatedPath); } return updatedPath; } return ""; } catch (Exception ex) { // return empty path and move to step 4 of loadSFClientConfig() return ""; } } public static void checkConfigFilePermissions(String derivedConfigFilePath) throws IOException { try { if (Constants.getOS().isPosix()) { // Check permissions of config file if (checkGroupOthersWritePermissions(derivedConfigFilePath)) { String error = String.format( "Error due to other users having permission to modify the config file: %s", derivedConfigFilePath); // TODO: SNOW-1503722 to change warning log to throw an error instead logger.warn(error); } } } catch (IOException e) { throw e; } } static Boolean checkGroupOthersWritePermissions(String configFilePath) throws IOException { Set folderPermissions = Files.getPosixFilePermissions(Paths.get(configFilePath)); return folderPermissions.contains(PosixFilePermission.GROUP_WRITE) || folderPermissions.contains(PosixFilePermission.OTHERS_WRITE); } static String convertToWindowsPath(String filePath) { // Find the Windows file path pattern: ex) C:\ or D:\ Pattern windowsFilePattern = Pattern.compile("[C-Z]:[\\\\/]"); Matcher matcher = windowsFilePattern.matcher(filePath); String prefix = ""; // Path translation for windows if (filePath.startsWith("/")) { filePath = filePath.substring(1); } else if (filePath.startsWith("file:\\")) { filePath = filePath.substring(6); } else if (filePath.startsWith("nested:\\")) { filePath = filePath.substring(8); } else if (filePath.startsWith("\\")) { filePath = filePath.substring(2); } else if (matcher.find() && matcher.start() != 0) { prefix = filePath.substring(0, matcher.start()); filePath = filePath.substring(matcher.start()); } filePath = prefix + filePath.replace("/", "\\"); return filePath; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/config/SFConnectionConfigParser.java ================================================ package net.snowflake.client.internal.config; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.convertSystemGetEnvToBooleanValue; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isBlank; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isWindows; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import com.fasterxml.jackson.dataformat.toml.TomlMapper; import java.io.File; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.PosixFileAttributeView; import java.nio.file.attribute.PosixFilePermission; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.Set; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class SFConnectionConfigParser { private static final SFLogger logger = SFLoggerFactory.getLogger(SFConnectionConfigParser.class); private static final TomlMapper mapper = new TomlMapper(); public static final String SNOWFLAKE_HOME_KEY = "SNOWFLAKE_HOME"; public static final String SNOWFLAKE_DIR = ".snowflake"; public static final String SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY = "SNOWFLAKE_DEFAULT_CONNECTION_NAME"; public static final String DEFAULT = "default"; public static final String SNOWFLAKE_TOKEN_FILE_PATH = "/snowflake/session/token"; public static final String SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION = "SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION"; public static final String SF_SKIP_WARNING_FOR_READ_PERMISSIONS_ON_CONFIG_FILE = "SF_SKIP_WARNING_FOR_READ_PERMISSIONS_ON_CONFIG_FILE"; private static final List REQUIRED_PERMISSIONS = Arrays.asList(PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_READ); public static ConnectionParameters buildConnectionParameters(String connectionUrl) throws SnowflakeSQLException { Map urlParameters = parseAutoConfigJdbcUrlParameters(connectionUrl); String defaultConnectionName = urlParameters.get("connectionName"); if (isBlank(defaultConnectionName)) { defaultConnectionName = Optional.ofNullable(systemGetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY)).orElse(DEFAULT); } logger.debug("Attempting to load the configuration {} from toml file.", defaultConnectionName); Map fileConnectionConfiguration = loadDefaultConnectionConfiguration(defaultConnectionName); if (fileConnectionConfiguration != null && !fileConnectionConfiguration.isEmpty()) { mergeUrlParametersIntoConfiguration(fileConnectionConfiguration, urlParameters); Properties connectionProperties = new Properties(); connectionProperties.putAll(fileConnectionConfiguration); String url = createUrl(fileConnectionConfiguration); logger.debug("Url created using parameters from connection configuration file: {}", url); if ("oauth".equals(fileConnectionConfiguration.get("authenticator")) && fileConnectionConfiguration.get("token") == null) { Path path = Paths.get( Optional.ofNullable(fileConnectionConfiguration.get("token_file_path")) .orElse(SNOWFLAKE_TOKEN_FILE_PATH)); logger.debug("Token used in connect is read from file: {}", path); try { boolean shouldSkipTokenFilePermissionsVerification = convertSystemGetEnvToBooleanValue(SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION, false); if (!shouldSkipTokenFilePermissionsVerification) { verifyFilePermissionSecure(path); } else { logger.debug("Skip token file permissions verification"); } String token = new String(Files.readAllBytes(path), Charset.defaultCharset()); if (!token.isEmpty()) { putPropertyIfNotNull(connectionProperties, "token", token.trim()); } else { throw new SnowflakeSQLException( "Non-empty token must be set when the authenticator type is OAUTH"); } } catch (Exception ex) { throw new SnowflakeSQLException(ex, "There is a problem during reading token from file"); } } return new ConnectionParameters(url, connectionProperties); } else { return null; } } static String getConnectionNameFromUrl(String connectionUrl) { Map autoConfigJdbcUrlParameters = parseAutoConfigJdbcUrlParameters(connectionUrl); String connectionNameValue = autoConfigJdbcUrlParameters.get("connectionName"); if (isBlank(connectionNameValue)) { logger.debug("'connectionName' parameter is not configured"); return ""; } else { logger.debug("'connectionName' parameter is configured. The value is " + connectionNameValue); return connectionNameValue; } } private static Map parseAutoConfigJdbcUrlParameters(String connectionUrl) { Map paramMap = new HashMap<>(); int queryStart = connectionUrl.indexOf('?'); if (queryStart == -1) { return paramMap; } String query = connectionUrl.substring(queryStart + 1); String[] propertyPairs = query.split("&"); for (String property : propertyPairs) { String[] peopertyKeyVal = property.split("=", 2); if (peopertyKeyVal.length == 2) { try { String key = URLDecoder.decode(peopertyKeyVal[0], "UTF-8"); String value = URLDecoder.decode(peopertyKeyVal[1], "UTF-8"); paramMap.put(key, value); } catch (UnsupportedEncodingException e) { logger.warn("Failed to decode a parameter {}. Ignored.", property); } } } return paramMap; } private static void mergeUrlParametersIntoConfiguration( Map fileConfig, Map urlParameters) { for (Map.Entry entry : urlParameters.entrySet()) { String key = entry.getKey(); if ("connectionName".equalsIgnoreCase(key)) { continue; } String urlValue = entry.getValue(); String tomlValue = fileConfig.get(key); if (tomlValue != null && !tomlValue.equalsIgnoreCase(urlValue)) { logger.debug( "For config item '{}' the values from connections.toml and the connection string" + " differ; the connection string value will be applied.", key); } fileConfig.put(key, urlValue); } } private static Map loadDefaultConnectionConfiguration( String defaultConnectionName) throws SnowflakeSQLException { String configDirectory = systemGetEnv(SNOWFLAKE_HOME_KEY); if (configDirectory == null) { String homeDir = systemGetProperty("user.home"); if (homeDir == null) { logger.debug("cannot determine user home directory"); return new HashMap<>(); } configDirectory = Paths.get(homeDir, SNOWFLAKE_DIR).toString(); } Path configFilePath = Paths.get(configDirectory, "connections.toml"); if (Files.exists(configFilePath)) { logger.debug( "Reading connection parameters from file {} using key: {}", configFilePath, defaultConnectionName); Map parametersMap = readParametersMap(configFilePath); Map defaultConnectionParametersMap = parametersMap.get(defaultConnectionName); if (defaultConnectionParametersMap == null) { logger.debug("The Connection {} not found in connections.toml.", defaultConnectionName); throw new SnowflakeSQLException( "The Connection " + defaultConnectionName + " not found in connections.toml file."); } else { logger.debug("The Connection {} found in connections.toml.", defaultConnectionName); } return defaultConnectionParametersMap; } else { logger.debug("Connection configuration file does not exist"); return new HashMap<>(); } } private static Map readParametersMap(Path configFilePath) throws SnowflakeSQLException { try { File file = new File(configFilePath.toUri()); boolean shouldSkipTokenFilePermissionsVerification = convertSystemGetEnvToBooleanValue(SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION, false); if (!shouldSkipTokenFilePermissionsVerification) { verifyFilePermissionSecure(configFilePath); } else { logger.debug( "Skip connection configuration file permissions verification for {}", configFilePath); } return mapper.readValue(file, Map.class); } catch (IOException ex) { throw new SnowflakeSQLException(ex, "Problem during reading a configuration file."); } } static void verifyFilePermissionSecure(Path configFilePath) throws IOException, SnowflakeSQLException { final String fileName = "connections.toml"; if (!isWindows()) { if (configFilePath.getFileName().toString().equals(fileName)) { boolean shouldSkipWarningForReadPermissions = convertSystemGetEnvToBooleanValue( SF_SKIP_WARNING_FOR_READ_PERMISSIONS_ON_CONFIG_FILE, false); PosixFileAttributeView posixFileAttributeView = Files.getFileAttributeView(configFilePath, PosixFileAttributeView.class); Set permissions = posixFileAttributeView.readAttributes().permissions(); if (!shouldSkipWarningForReadPermissions) { boolean groupRead = permissions.contains(PosixFilePermission.GROUP_READ); boolean othersRead = permissions.contains(PosixFilePermission.OTHERS_READ); // Warning if readable by group/others (must be 600 or stricter) if (groupRead || othersRead) { logger.warn( "File %s is readable by group or others. Permissions should be 600 or stricter for maximum security.", configFilePath); } } boolean groupWrite = permissions.contains(PosixFilePermission.GROUP_WRITE); boolean othersWrite = permissions.contains(PosixFilePermission.OTHERS_WRITE); // Error if writable by group/others (must be 644 or stricter) if (groupWrite || othersWrite) { logger.error( "File %s is writable by group or others. Permissions must be 644 or stricter.", configFilePath); throw new SnowflakeSQLException( String.format( "File %s is writable by group or others. Permissions must be 644 or stricter.", configFilePath)); } // Error if executable by anyone boolean ownerExec = permissions.contains(PosixFilePermission.OWNER_EXECUTE); boolean groupExec = permissions.contains(PosixFilePermission.GROUP_EXECUTE); boolean othersExec = permissions.contains(PosixFilePermission.OTHERS_EXECUTE); // Executable permission is not allowed if (ownerExec || groupExec || othersExec) { logger.error( "File %s is executable. Executable permission is not allowed.", configFilePath); throw new SnowflakeSQLException( String.format( "File %s is executable. Executable permission is not allowed.", configFilePath)); } } else { PosixFileAttributeView posixFileAttributeView = Files.getFileAttributeView(configFilePath, PosixFileAttributeView.class); if (!posixFileAttributeView.readAttributes().permissions().stream() .allMatch(o -> REQUIRED_PERMISSIONS.contains(o))) { logger.error( "Reading from file %s is not safe because file permissions are different than read/write for user", configFilePath); throw new SnowflakeSQLException( String.format( "Reading from file %s is not safe because file permissions are different than read/write for user", configFilePath)); } } } } private static String createUrl(Map fileConnectionConfiguration) throws SnowflakeSQLException { Optional maybeAccount = Optional.ofNullable(fileConnectionConfiguration.get("account")); Optional maybeHost = Optional.ofNullable(fileConnectionConfiguration.get("host")); if (maybeAccount.isPresent() && maybeHost.isPresent() && !maybeHost.get().contains(maybeAccount.get())) { logger.warn( String.format( "Inconsistent host and account values in file configuration. ACCOUNT: {} , HOST: {}. The host value will be used.", maybeAccount.get(), maybeHost.get())); } String host = maybeHost.orElse( maybeAccount .map(acnt -> String.format("%s.snowflakecomputing.com", acnt)) .orElse(null)); if (host == null || host.isEmpty()) { logger.warn("Neither host nor account is specified in connection parameters"); throw new SnowflakeSQLException( "Unable to connect because neither host nor account is specified in connection parameters"); } logger.debug("Host created using parameters from connection configuration file: {}", host); String port = fileConnectionConfiguration.get("port"); String protocol = fileConnectionConfiguration.get("protocol"); if ("http".equalsIgnoreCase(protocol)) { return String.format( "jdbc:snowflake://http://%s:%s", host, isNullOrEmpty(port) ? "80" : port); } return String.format("jdbc:snowflake://%s:%s", host, isNullOrEmpty(port) ? "443" : port); } private static void putPropertyIfNotNull(Properties props, Object key, Object value) { if (key != null && value != null) { props.put(key, value); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/ArrowSqlInput.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.mapSFExceptionToSQLException; import java.math.BigDecimal; import java.sql.Date; import java.sql.SQLData; import java.sql.SQLException; import java.sql.SQLInput; import java.sql.Time; import java.sql.Timestamp; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.internal.core.json.Converters; import net.snowflake.client.internal.core.structs.SQLDataCreationHelper; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.ThrowingBiFunction; import org.apache.arrow.vector.util.JsonStringArrayList; import org.apache.arrow.vector.util.JsonStringHashMap; public class ArrowSqlInput extends BaseSqlInput { private static final SFLogger logger = SFLoggerFactory.getLogger(ArrowSqlInput.class); private final Map input; private int currentIndex = 0; private boolean wasNull = false; public ArrowSqlInput( Map input, SFBaseSession session, Converters converters, List fields) { super(session, converters, fields); this.input = input; } public Map getInput() { return input; } @Override public String readString() throws SQLException { return withNextValue((this::convertString)); } @Override public boolean readBoolean() throws SQLException { return withNextValue(this::convertBoolean); } @Override public byte readByte() throws SQLException { return withNextValue( (value, fieldMetadata) -> mapSFExceptionToSQLException(() -> converters.getNumberConverter().getByte(value))); } @Override public short readShort() throws SQLException { return withNextValue(this::convertShort); } @Override public int readInt() throws SQLException { return withNextValue(this::convertInt); } @Override public long readLong() throws SQLException { return withNextValue(this::convertLong); } @Override public float readFloat() throws SQLException { return withNextValue(this::convertFloat); } @Override public double readDouble() throws SQLException { return withNextValue(this::convertDouble); } @Override public BigDecimal readBigDecimal() throws SQLException { return withNextValue(this::convertBigDecimal); } @Override public byte[] readBytes() throws SQLException { return withNextValue(this::convertBytes); } @Override public Date readDate() throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } return convertDate((int) value); }); } private Date convertDate(int value) throws SQLException { return mapSFExceptionToSQLException( () -> converters.getStructuredTypeDateTimeConverter().getDate(value, TimeZone.getDefault())); } @Override public Time readTime() throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } return convertTime((long) value, fieldMetadata); }); } private Time convertTime(long value, FieldMetadata fieldMetadata) throws SQLException { return mapSFExceptionToSQLException( () -> { int scale = fieldMetadata.getScale(); return converters.getStructuredTypeDateTimeConverter().getTime(value, scale); }); } @Override public Timestamp readTimestamp(TimeZone tz) throws SQLException { return withNextValue((value, fieldMetadata) -> convertTimestamp(tz, value, fieldMetadata)); } private Timestamp convertTimestamp(TimeZone tz, Object value, FieldMetadata fieldMetadata) throws SQLException { if (value == null) { return null; } int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); int columnSubType = fieldMetadata.getType(); int scale = fieldMetadata.getScale(); return mapSFExceptionToSQLException( () -> converters .getStructuredTypeDateTimeConverter() .getTimestamp((Map) value, columnType, columnSubType, tz, scale)); } @Override public Object readObject() throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (!(value instanceof JsonStringHashMap)) { throw new SQLException( "Invalid value passed to 'readObject()', expected Map; got: " + value.getClass()); } return value; }); } @Override public T readObject(Class type) throws SQLException { return readObject(type, TimeZone.getDefault()); } @Override public T readObject(Class type, TimeZone tz) throws SQLException { return withNextValue((value, fieldMetadata) -> convertObject(type, tz, value, fieldMetadata)); } private T convertObject(Class type, TimeZone tz, Object value, FieldMetadata fieldMetadata) throws SQLException { if (value == null) { return null; } else if (SQLData.class.isAssignableFrom(type)) { ArrowSqlInput sqlInput = new ArrowSqlInput( (Map) value, session, converters, fieldMetadata.getFields()); SQLData instance = (SQLData) SQLDataCreationHelper.create(type); instance.readSQL(sqlInput, null); return (T) instance; } else if (Map.class.isAssignableFrom(type)) { return (T) convertSqlInputToMap((SQLInput) value); } else if (String.class.isAssignableFrom(type)) { return (T) convertString(value, fieldMetadata); } else if (Boolean.class.isAssignableFrom(type)) { return (T) convertBoolean(value, fieldMetadata); } else if (Byte.class.isAssignableFrom(type)) { return (T) convertBytes(value, fieldMetadata); } else if (Short.class.isAssignableFrom(type)) { return (T) convertShort(value, fieldMetadata); } else if (Integer.class.isAssignableFrom(type)) { return (T) convertInt(value, fieldMetadata); } else if (Long.class.isAssignableFrom(type)) { return (T) convertLong(value, fieldMetadata); } else if (Float.class.isAssignableFrom(type)) { return (T) convertFloat(value, fieldMetadata); } else if (Double.class.isAssignableFrom(type)) { return (T) convertDouble(value, fieldMetadata); } else if (Date.class.isAssignableFrom(type)) { return (T) convertDate((int) value); } else if (Time.class.isAssignableFrom(type)) { return (T) convertTime((long) value, fieldMetadata); } else if (Timestamp.class.isAssignableFrom(type)) { return (T) convertTimestamp(tz, value, fieldMetadata); } else if (BigDecimal.class.isAssignableFrom(type)) { return (T) convertBigDecimal(value, fieldMetadata); } else if (byte[].class.isAssignableFrom(type)) { return (T) convertBytes(value, fieldMetadata); } else { logger.debug( "Unsupported type passed to readObject(int columnIndex,Class type): " + type.getName()); throw new SQLException( "Type passed to 'getObject(int columnIndex,Class type)' is unsupported. Type: " + type.getName()); } } @Override public List readList(Class type) throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } List result = new ArrayList(); JsonStringArrayList maps = (JsonStringArrayList) value; for (Object ob : maps) { result.add( convertObject(type, TimeZone.getDefault(), ob, fieldMetadata.getFields().get(0))); } return result; }); } @Override public T[] readArray(Class type) throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } JsonStringArrayList internalValues = (JsonStringArrayList) value; T[] array = (T[]) java.lang.reflect.Array.newInstance(type, internalValues.size()); int counter = 0; for (Object ob : internalValues) { array[counter++] = convertObject(type, TimeZone.getDefault(), ob, fieldMetadata.getFields().get(0)); } return array; }); } @Override public Map readMap(Class type) throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } Map result = new HashMap(); JsonStringArrayList maps = (JsonStringArrayList) value; for (Map map : maps) { result.put( map.get("key").toString(), convertObject( type, TimeZone.getDefault(), map.get("value"), fieldMetadata.getFields().get(1))); } return result; }); } @Override public boolean wasNull() { return wasNull; } @Override Map convertSqlInputToMap(SQLInput sqlInput) { return ((ArrowSqlInput) sqlInput).getInput(); } private T withNextValue(ThrowingBiFunction action) throws SQLException { FieldMetadata field = fields.get(currentIndex++); Object value = input.get(field.getName()); wasNull = value == null; return action.apply(value, field); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/AssertUtil.java ================================================ package net.snowflake.client.internal.core; import net.snowflake.client.api.exception.ErrorCode; public class AssertUtil { /** * Assert the condition is true, otherwise throw an internal error exception with the given * message. * * @param condition The variable to test the 'truthiness' of * @param internalErrorMesg The error message to display if condition is false * @throws SFException Will be thrown if condition is false */ public static void assertTrue(boolean condition, String internalErrorMesg) throws SFException { if (!condition) { throw new SFException(ErrorCode.INTERNAL_ERROR, internalErrorMesg); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/AttributeEnhancingHttpRequestRetryHandler.java ================================================ package net.snowflake.client.internal.core; import java.io.IOException; import org.apache.http.impl.client.DefaultHttpRequestRetryHandler; import org.apache.http.protocol.HttpContext; /** * Extends {@link DefaultHttpRequestRetryHandler} to store the current execution count (attempt * number) in the {@link HttpContext}. This allows interceptors to identify retry attempts. * *

The execution count is stored using the key defined by {@link #EXECUTION_COUNT_ATTRIBUTE}. */ class AttributeEnhancingHttpRequestRetryHandler extends DefaultHttpRequestRetryHandler { /** * The key used to store the current execution count (attempt number) in the {@link HttpContext}. * Interceptors can use this key to retrieve the count. The value stored will be an {@link * Integer}. */ static final String EXECUTION_COUNT_ATTRIBUTE = "net.snowflake.client.core.execution-count"; @Override public boolean retryRequest(IOException exception, int executionCount, HttpContext context) { context.setAttribute(EXECUTION_COUNT_ATTRIBUTE, executionCount); return super.retryRequest(exception, executionCount, context); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/BaseSqlInput.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.mapSFExceptionToSQLException; import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.net.URL; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; import java.sql.NClob; import java.sql.Ref; import java.sql.RowId; import java.sql.SQLException; import java.sql.SQLInput; import java.sql.SQLXML; import java.sql.Timestamp; import java.util.List; import java.util.Map; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.internal.core.json.Converters; import net.snowflake.client.internal.jdbc.SnowflakeLoggedFeatureNotSupportedException; public abstract class BaseSqlInput implements SFSqlInput { protected final SFBaseSession session; protected final Converters converters; protected final List fields; protected BaseSqlInput(SFBaseSession session, Converters converters, List fields) { this.session = session; this.converters = converters; this.fields = fields; } @Override public Timestamp readTimestamp() throws SQLException { return readTimestamp(null); } @Override public Reader readCharacterStream() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readCharacterStream"); } @Override public InputStream readAsciiStream() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readAsciiStream"); } @Override public InputStream readBinaryStream() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readBinaryStream"); } @Override public Ref readRef() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readRef"); } @Override public Blob readBlob() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readBlob"); } @Override public Clob readClob() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readClob"); } @Override public Array readArray() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readArray"); } @Override public boolean wasNull() throws SQLException { return false; // nulls are not allowed in structure types } @Override public URL readURL() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readCharacterStream"); } @Override public NClob readNClob() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readNClob"); } @Override public String readNString() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readNString"); } @Override public SQLXML readSQLXML() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readSQLXML"); } @Override public RowId readRowId() throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session, "readRowId"); } abstract Map convertSqlInputToMap(SQLInput sqlInput); protected String convertString(Object value, FieldMetadata fieldMetadata) throws SQLException { int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); int columnSubType = fieldMetadata.getType(); int scale = fieldMetadata.getScale(); return mapSFExceptionToSQLException( () -> converters.getStringConverter().getString(value, columnType, columnSubType, scale)); } protected Boolean convertBoolean(Object value, FieldMetadata fieldMetadata) throws SQLException { int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); return mapSFExceptionToSQLException( () -> converters.getBooleanConverter().getBoolean(value, columnType)); } protected Short convertShort(Object value, FieldMetadata fieldMetadata) throws SQLException { int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); return mapSFExceptionToSQLException( () -> converters.getNumberConverter().getShort(value, columnType)); } protected Integer convertInt(Object value, FieldMetadata fieldMetadata) throws SQLException { int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); return mapSFExceptionToSQLException( () -> converters.getNumberConverter().getInt(value, columnType)); } protected Long convertLong(Object value, FieldMetadata fieldMetadata) throws SQLException { int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); return mapSFExceptionToSQLException( () -> converters.getNumberConverter().getLong(value, columnType)); } protected Float convertFloat(Object value, FieldMetadata fieldMetadata) throws SQLException { int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); return mapSFExceptionToSQLException( () -> converters.getNumberConverter().getFloat(value, columnType)); } protected Double convertDouble(Object value, FieldMetadata fieldMetadata) throws SQLException { int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); return mapSFExceptionToSQLException( () -> converters.getNumberConverter().getDouble(value, columnType)); } protected BigDecimal convertBigDecimal(Object value, FieldMetadata fieldMetadata) throws SQLException { int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); return mapSFExceptionToSQLException( () -> converters.getNumberConverter().getBigDecimal(value, columnType)); } protected byte[] convertBytes(Object value, FieldMetadata fieldMetadata) throws SQLException { int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); int columnSubType = fieldMetadata.getType(); int scale = fieldMetadata.getScale(); return mapSFExceptionToSQLException( () -> converters.getBytesConverter().getBytes(value, columnType, columnSubType, scale)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/BasicEvent.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; /** Base Event class for events that don't need to deviate from the default flush behavior. */ public class BasicEvent extends Event { // Format strings for query state transitions private static final String requestId = "requestId: %s"; private static final String numPings = "numberPings: %d"; private static final String jobId = "jobId: %s"; private static final String chunkIdx = "chunkIndex: %d"; private static final String EVENT_DUMP_PROP = "snowflake.dump_events"; private static final Boolean doDump = systemGetProperty(EVENT_DUMP_PROP) != null; public enum QueryState { QUERY_STARTED(1, "Query Started", "{" + requestId + "}"), SENDING_QUERY(2, "Sending Query", "{" + requestId + "}"), WAITING_FOR_RESULT(3, "Waiting for Result", "{" + requestId + "," + numPings + "}"), PROCESSING_RESULT(4, "Processing Result", "{" + requestId + "}"), CONSUMING_RESULT(5, "Consuming Result", "{" + jobId + "," + chunkIdx + "}"), QUERY_ENDED(6, "Query ended", "{" + requestId + "}"), GETTING_FILES(8, "Getting Files", "{" + requestId + "}"), PUTTING_FILES(9, "Putting Files", "{" + requestId + "}"), ; QueryState(int id, String description, String argString) { this.id = id; this.description = description; this.argString = argString; } public int getId() { return id; } public String getDescription() { return description; } public String getArgString() { return argString; } private final int id; private final String description; private final String argString; } public BasicEvent(Event.EventType type, String message) { super(type, message); } @Override public void flush() { if (doDump) { // this.writeEventDumpLine("Event: " + getType() + "; Message: " + getMessage()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/CachedCredentialType.java ================================================ package net.snowflake.client.internal.core; enum CachedCredentialType { ID_TOKEN("ID_TOKEN"), MFA_TOKEN("MFATOKEN"), OAUTH_ACCESS_TOKEN("OAUTH_ACCESS_TOKEN"), OAUTH_REFRESH_TOKEN("OAUTH_REFRESH_TOKEN"), DPOP_BUNDLED_ACCESS_TOKEN( "DPOP_BUNDLED_ACCESS_TOKEN"); // contains '.' separated, base64 encoded access token and DPoP // public key private final String value; CachedCredentialType(String value) { this.value = value; } String getValue() { return value; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/CancellationReason.java ================================================ package net.snowflake.client.internal.core; public enum CancellationReason { UNKNOWN, CLIENT_REQUESTED, TIMEOUT } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/ChunkDownloader.java ================================================ package net.snowflake.client.internal.core; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.jdbc.SnowflakeResultChunk; /** Provide offline result chunk (which contains result data) to back to result set */ public interface ChunkDownloader { /** * Get next SnowflakeResultChunk that is ready to be consumed by the main thread. The caller will * be blocked if the chunk is not ready to be consumed (a.k.a not loaded into memory yet) * * @return result chunk with data loaded * @throws InterruptedException if downloading thread was interrupted * @throws SnowflakeSQLException if downloader encountered an error */ SnowflakeResultChunk getNextChunkToConsume() throws InterruptedException, SnowflakeSQLException; /** * Terminate the chunk downloader, release all resources allocated * * @return metrics measuring downloader performance * @throws InterruptedException if error encountered */ DownloaderMetrics terminate() throws InterruptedException; } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/ColumnTypeHelper.java ================================================ package net.snowflake.client.internal.core; import java.sql.Types; import net.snowflake.client.api.resultset.SnowflakeType; public class ColumnTypeHelper { public static int getColumnType(int internalColumnType, SFBaseSession session) { int externalColumnType = internalColumnType; if (internalColumnType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ) { externalColumnType = Types.TIMESTAMP; } else if (internalColumnType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ) { externalColumnType = session == null ? Types.TIMESTAMP_WITH_TIMEZONE : session.getEnableReturnTimestampWithTimeZone() ? Types.TIMESTAMP_WITH_TIMEZONE : Types.TIMESTAMP; } return externalColumnType; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/Constants.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.util.Optional; import net.snowflake.client.internal.jdbc.SnowflakeUtil; /* * Constants used in JDBC implementation */ public final class Constants { // Session expired error code as returned from Snowflake public static final int SESSION_EXPIRED_GS_CODE = 390112; // Cloud storage credentials expired error code public static final int CLOUD_STORAGE_CREDENTIALS_EXPIRED = 240001; // Session gone error code as returned from Snowflake public static final int SESSION_GONE = 390111; // Error code for all invalid id token cases during login request public static final int ID_TOKEN_INVALID_LOGIN_REQUEST_GS_CODE = 390195; public static final int OAUTH_ACCESS_TOKEN_EXPIRED_GS_CODE = 390318; public static final int OAUTH_ACCESS_TOKEN_INVALID_GS_CODE = 390303; // Error message for IOException when no space is left for GET public static final String NO_SPACE_LEFT_ON_DEVICE_ERR = "No space left on device"; public enum OS { WINDOWS, LINUX, MAC, SOLARIS, UNKNOWN; /** * Returns true if this OS supports POSIX file APIs (e.g., PosixFilePermissions). UNKNOWN * returns false to fail safely when OS cannot be determined. */ public boolean isPosix() { return this == LINUX || this == MAC || this == SOLARIS; } } public enum Architecture { X86_64("x86_64"), AARCH64("aarch64"), PPC64("ppc64"), X86("x86"), UNKNOWN("unknown"); private final String identifier; Architecture(String identifier) { this.identifier = identifier; } public String getIdentifier() { return identifier; } } private static OS os = null; private static Architecture architecture = null; private static Boolean isAix = null; public static synchronized OS getOS() { if (os == null) { String operSys = Optional.ofNullable(systemGetProperty("os.name")).map(String::toLowerCase).orElse(""); if (operSys.contains("win")) { os = OS.WINDOWS; } else if (operSys.contains("nix") || operSys.contains("nux") || operSys.contains("aix")) { os = OS.LINUX; } else if (operSys.contains("mac")) { os = OS.MAC; } else if (operSys.contains("sunos")) { os = OS.SOLARIS; } else { os = OS.UNKNOWN; } } return os; } public static synchronized Architecture getArchitecture() { if (architecture == null) { architecture = Architecture.UNKNOWN; String osArch = systemGetProperty("os.arch"); if (!SnowflakeUtil.isNullOrEmpty(osArch)) { osArch = osArch.toLowerCase(); if (osArch.contains("amd64") || osArch.contains("x86_64")) { architecture = Architecture.X86_64; } else if (osArch.contains("aarch64") || osArch.contains("arm64")) { architecture = Architecture.AARCH64; } else if (osArch.contains("ppc64")) { architecture = Architecture.PPC64; } else if (osArch.contains("x86") || osArch.contains("i386") || osArch.contains("i686")) { architecture = Architecture.X86; } } } return architecture; } public static boolean isAix() { if (isAix == null) { String osName = systemGetProperty("os.name"); isAix = osName != null && osName.toLowerCase().contains("aix"); } return isAix; } public static void clearOSForTesting() { os = null; architecture = null; } public static final int MB = 1024 * 1024; public static final long GB = 1024 * 1024 * 1024; } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/CredentialManager.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.Base64; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class CredentialManager { private static final SFLogger logger = SFLoggerFactory.getLogger(CredentialManager.class); private SecureStorageManager secureStorageManager; private CredentialManager() { initSecureStorageManager(); } private void initSecureStorageManager() { try { if (Constants.getOS() == Constants.OS.MAC) { secureStorageManager = SecureStorageAppleManager.builder(); } else if (Constants.getOS() == Constants.OS.WINDOWS) { secureStorageManager = SecureStorageWindowsManager.builder(); } else if (Constants.getOS() == Constants.OS.LINUX) { secureStorageManager = SecureStorageLinuxManager.getInstance(); } else { logger.error("Unsupported Operating System. Expected: OSX, Windows, Linux", false); } } catch (NoClassDefFoundError error) { logMissingJnaJarForSecureLocalStorage(); } } /** Helper function for tests to go back to normal settings. */ static void resetSecureStorageManager() { logger.debug("Resetting the secure storage manager"); getInstance().initSecureStorageManager(); } /** * Testing purpose. Inject a mock manager. * * @param manager SecureStorageManager */ static void injectSecureStorageManager(SecureStorageManager manager) { logger.debug("Injecting secure storage manager"); getInstance().secureStorageManager = manager; } private static class CredentialManagerHolder { private static final CredentialManager INSTANCE = new CredentialManager(); } public static CredentialManager getInstance() { return CredentialManagerHolder.INSTANCE; } /** * Reuse the cached id token stored locally * * @param loginInput login input to attach id token */ static void fillCachedIdToken(SFLoginInput loginInput) throws SFException { logger.debug( "Looking for cached id token for user: {}, host: {}", loginInput.getUserName(), loginInput.getHostFromServerUrl()); getInstance() .fillCachedCredential( loginInput, loginInput.getHostFromServerUrl(), loginInput.getUserName(), CachedCredentialType.ID_TOKEN); } /** * Reuse the cached mfa token stored locally * * @param loginInput login input to attach mfa token */ static void fillCachedMfaToken(SFLoginInput loginInput) throws SFException { logger.debug( "Looking for cached mfa token for user: {}, host: {}", loginInput.getUserName(), loginInput.getHostFromServerUrl()); getInstance() .fillCachedCredential( loginInput, loginInput.getHostFromServerUrl(), loginInput.getUserName(), CachedCredentialType.MFA_TOKEN); } /** * Reuse the cached OAuth access token stored locally * * @param loginInput login input to attach access token */ static void fillCachedOAuthAccessToken(SFLoginInput loginInput) throws SFException { String host = getHostForOAuthCacheKey(loginInput); logger.debug( "Looking for cached OAuth access token for user: {}, host: {}", loginInput.getUserName(), host); getInstance() .fillCachedCredential( loginInput, host, loginInput.getUserName(), CachedCredentialType.OAUTH_ACCESS_TOKEN); } /** * Reuse the cached OAuth refresh token stored locally * * @param loginInput login input to attach refresh token */ static void fillCachedOAuthRefreshToken(SFLoginInput loginInput) throws SFException { String host = getHostForOAuthCacheKey(loginInput); logger.debug( "Looking for cached OAuth refresh token for user: {}, host: {}", loginInput.getUserName(), host); getInstance() .fillCachedCredential( loginInput, host, loginInput.getUserName(), CachedCredentialType.OAUTH_REFRESH_TOKEN); } /** * Reuse the cached OAuth access token & DPoP public key tied to it * * @param loginInput login input to attach refresh token */ static void fillCachedDPoPBundledAccessToken(SFLoginInput loginInput) throws SFException { String host = getHostForOAuthCacheKey(loginInput); logger.debug( "Looking for cached DPoP public key for user: {}, host: {}", loginInput.getUserName(), host); getInstance() .fillCachedCredential( loginInput, host, loginInput.getUserName(), CachedCredentialType.DPOP_BUNDLED_ACCESS_TOKEN); } /** Reuse the cached token stored locally */ synchronized void fillCachedCredential( SFLoginInput loginInput, String host, String username, CachedCredentialType credType) throws SFException { if (isNullOrEmpty(username)) { logger.debug("Missing username; Cannot read from credential cache"); return; } if (secureStorageManager == null) { logMissingJnaJarForSecureLocalStorage(); return; } String base64EncodedCred, cred = null; try { base64EncodedCred = secureStorageManager.getCredential(host, username, credType.getValue()); } catch (NoClassDefFoundError error) { logMissingJnaJarForSecureLocalStorage(); return; } if (base64EncodedCred == null) { logger.debug("Retrieved {} is null", credType); } logger.debug( "Setting {}{} token for user: {}, host: {}", base64EncodedCred == null ? "null " : "", credType.getValue(), username, host); if (base64EncodedCred != null && credType != CachedCredentialType.DPOP_BUNDLED_ACCESS_TOKEN) { try { cred = new String(Base64.getDecoder().decode(base64EncodedCred)); } catch (Exception e) { // handle legacy non-base64 encoded cache values (CredentialManager fails to decode) deleteTemporaryCredential(host, username, credType); return; } } switch (credType) { case ID_TOKEN: loginInput.setIdToken(cred); break; case MFA_TOKEN: loginInput.setMfaToken(cred); break; case OAUTH_ACCESS_TOKEN: loginInput.setOauthAccessToken(cred); break; case OAUTH_REFRESH_TOKEN: loginInput.setOauthRefreshToken(cred); break; case DPOP_BUNDLED_ACCESS_TOKEN: updateInputWithTokenAndPublicKey(base64EncodedCred, loginInput); break; default: throw new SFException( ErrorCode.INTERNAL_ERROR, "Unrecognized type {} for local cached credential", credType); } } private void updateInputWithTokenAndPublicKey(String cred, SFLoginInput loginInput) throws SFException { if (isNullOrEmpty(cred)) { String[] values = cred.split("\\."); if (values.length != 2) { throw new SFException( ErrorCode.INTERNAL_ERROR, "Invalid DPoP bundled access token credential format"); } Base64.Decoder decoder = Base64.getDecoder(); loginInput.setOauthAccessToken(new String(decoder.decode(values[0]))); loginInput.setDPoPPublicKey(new String(decoder.decode(values[1]))); } } static void writeIdToken(SFLoginInput loginInput, String idToken) throws SFException { logger.debug( "Caching id token in a secure storage for user: {}, host: {}", loginInput.getUserName(), loginInput.getHostFromServerUrl()); getInstance() .writeTemporaryCredential( loginInput.getHostFromServerUrl(), loginInput.getUserName(), idToken, CachedCredentialType.ID_TOKEN); } static void writeMfaToken(SFLoginInput loginInput, String mfaToken) throws SFException { logger.debug( "Caching mfa token in a secure storage for user: {}, host: {}", loginInput.getUserName(), loginInput.getHostFromServerUrl()); getInstance() .writeTemporaryCredential( loginInput.getHostFromServerUrl(), loginInput.getUserName(), mfaToken, CachedCredentialType.MFA_TOKEN); } /** * Store OAuth Access Token * * @param loginInput loginInput to denote to the cache */ static void writeOAuthAccessToken(SFLoginInput loginInput) throws SFException { String host = getHostForOAuthCacheKey(loginInput); logger.debug( "Caching OAuth access token in a secure storage for user: {}, host: {}", loginInput.getUserName(), host); getInstance() .writeTemporaryCredential( host, loginInput.getUserName(), loginInput.getOauthAccessToken(), CachedCredentialType.OAUTH_ACCESS_TOKEN); } /** * Store OAuth Refresh Token * * @param loginInput loginInput to denote to the cache */ static void writeOAuthRefreshToken(SFLoginInput loginInput) throws SFException { String host = getHostForOAuthCacheKey(loginInput); logger.debug( "Caching OAuth refresh token in a secure storage for user: {}, host: {}", loginInput.getUserName(), host); getInstance() .writeTemporaryCredential( host, loginInput.getUserName(), loginInput.getOauthRefreshToken(), CachedCredentialType.OAUTH_REFRESH_TOKEN); } /** * Store OAuth DPoP Public Key With Token * * @param loginInput loginInput to denote to the cache */ static void writeDPoPBundledAccessToken(SFLoginInput loginInput) throws SFException { String host = getHostForOAuthCacheKey(loginInput); logger.debug( "Caching DPoP public key in a secure storage for user: {}, host: {}", loginInput.getUserName(), host); Base64.Encoder encoder = Base64.getEncoder(); String tokenBase64 = encoder.encodeToString(loginInput.getOauthAccessToken().getBytes(StandardCharsets.UTF_8)); String publicKeyBase64 = encoder.encodeToString(loginInput.getDPoPPublicKey().getBytes(StandardCharsets.UTF_8)); getInstance() .writeTemporaryCredential( host, loginInput.getUserName(), tokenBase64 + "." + publicKeyBase64, CachedCredentialType.DPOP_BUNDLED_ACCESS_TOKEN); } /** Store the temporary credential */ synchronized void writeTemporaryCredential( String host, String user, String cred, CachedCredentialType credType) { if (isNullOrEmpty(user)) { logger.debug("Missing username; Cannot write to credential cache"); return; } if (isNullOrEmpty(cred)) { logger.debug("No {} is given.", credType); return; // no credential } if (secureStorageManager == null) { logMissingJnaJarForSecureLocalStorage(); return; } try { if (credType == CachedCredentialType.DPOP_BUNDLED_ACCESS_TOKEN) { // DPOP_ACCESS_TOKEN is already preformatted and Base64 encoded secureStorageManager.setCredential(host, user, credType.getValue(), cred); } else { String base64EncodedCred = Base64.getEncoder().encodeToString(cred.getBytes(StandardCharsets.UTF_8)); secureStorageManager.setCredential(host, user, credType.getValue(), base64EncodedCred); } } catch (NoClassDefFoundError error) { logMissingJnaJarForSecureLocalStorage(); } } /** Delete the id token cache */ static void deleteIdTokenCacheEntry(String host, String user) { logger.debug( "Removing cached id token from a secure storage for user: {}, host: {}", user, host); getInstance().deleteTemporaryCredential(host, user, CachedCredentialType.ID_TOKEN); } /** Delete the mfa token cache */ static void deleteMfaTokenCacheEntry(String host, String user) { logger.debug( "Removing cached mfa token from a secure storage for user: {}, host: {}", user, host); getInstance().deleteTemporaryCredential(host, user, CachedCredentialType.MFA_TOKEN); } /** Delete the Oauth access token cache */ static void deleteOAuthAccessTokenCacheEntry(String host, String user) { logger.debug( "Removing cached oauth access token from a secure storage for user: {}, host: {}", user, host); getInstance().deleteTemporaryCredential(host, user, CachedCredentialType.OAUTH_ACCESS_TOKEN); } /** Delete the Oauth refresh token cache */ static void deleteOAuthRefreshTokenCacheEntry(String host, String user) { logger.debug( "Removing cached OAuth refresh token from a secure storage for user: {}, host: {}", user, host); getInstance().deleteTemporaryCredential(host, user, CachedCredentialType.OAUTH_REFRESH_TOKEN); } /** Delete the DPoP bundled access token cache */ static void deleteDPoPBundledAccessTokenCacheEntry(String host, String user) { logger.debug( "Removing cached DPoP public key from a secure storage for user: {}, host: {}", user, host); getInstance() .deleteTemporaryCredential(host, user, CachedCredentialType.DPOP_BUNDLED_ACCESS_TOKEN); } /** Delete the OAuth access token cache */ static void deleteOAuthAccessTokenCacheEntry(SFLoginInput loginInput) throws SFException { String host = getHostForOAuthCacheKey(loginInput); deleteOAuthAccessTokenCacheEntry(host, loginInput.getUserName()); } /** Delete the OAuth refresh token cache */ static void deleteOAuthRefreshTokenCacheEntry(SFLoginInput loginInput) throws SFException { String host = getHostForOAuthCacheKey(loginInput); deleteOAuthRefreshTokenCacheEntry(host, loginInput.getUserName()); } /** Delete the DPoP bundled access token cache */ static void deleteDPoPBundledAccessTokenCacheEntry(SFLoginInput loginInput) throws SFException { String host = getHostForOAuthCacheKey(loginInput); deleteDPoPBundledAccessTokenCacheEntry(host, loginInput.getUserName()); } /** * Method required for OAuth token caching, since actual token is not Snowflake account-specific, * but rather IdP-specific */ static String getHostForOAuthCacheKey(SFLoginInput loginInput) throws SFException { String oauthTokenRequestUrl = loginInput.getOauthLoginInput().getTokenRequestUrl(); if (oauthTokenRequestUrl != null) { URI parsedUrl = URI.create(oauthTokenRequestUrl); return parsedUrl.getHost(); } else { return loginInput.getHostFromServerUrl(); } } /** * Delete the temporary credential * * @param host host name * @param user user name * @param credType type of the credential */ synchronized void deleteTemporaryCredential( String host, String user, CachedCredentialType credType) { if (secureStorageManager == null) { logMissingJnaJarForSecureLocalStorage(); return; } if (isNullOrEmpty(user)) { logger.debug("Missing username; Cannot delete from credential cache"); return; } try { secureStorageManager.deleteCredential(host, user, credType.getValue()); } catch (NoClassDefFoundError error) { logMissingJnaJarForSecureLocalStorage(); } } private static void logMissingJnaJarForSecureLocalStorage() { logger.warn( "JNA jar files are needed for Secure Local Storage service. Please follow the Snowflake JDBC instruction for Secure Local Storage feature. Fall back to normal process.", false); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/DataConversionContext.java ================================================ package net.snowflake.client.internal.core; import java.util.TimeZone; import net.snowflake.client.internal.common.core.SFBinaryFormat; import net.snowflake.common.core.SnowflakeDateTimeFormat; /** * This class contains formatter info about each data type and related flags etc. And it is scoped * to a single result set. a.k.a each result set object should have its own formatter info */ public interface DataConversionContext { /** * @return timestamp_ltz formatter */ SnowflakeDateTimeFormat getTimestampLTZFormatter(); /** * @return timestamp_ntz formatter */ SnowflakeDateTimeFormat getTimestampNTZFormatter(); /** * @return timestamp_ntz formatter */ SnowflakeDateTimeFormat getTimestampTZFormatter(); /** * @return date formatter */ SnowflakeDateTimeFormat getDateFormatter(); /** * @return time formatter */ SnowflakeDateTimeFormat getTimeFormatter(); /** * @return binary formatter */ SFBinaryFormat getBinaryFormatter(); /** * get scale from Snowflake metadata * * @param columnIndex column index * @return scale value */ int getScale(int columnIndex); /** * @return current session */ SFBaseSession getSession(); /** * @return session time zone */ TimeZone getTimeZone(); /** * @return whether to honor client time zone for timestamp_ntz */ boolean getHonorClientTZForTimestampNTZ(); /** * @return result version */ long getResultVersion(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/DefaultFileCacheManager.java ================================================ package net.snowflake.client.internal.core; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.io.Reader; import java.io.Writer; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.BasicFileAttributes; import java.util.Date; import java.util.function.Supplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class DefaultFileCacheManager implements FileCacheManager { private static final SFLogger logger = SFLoggerFactory.getLogger(DefaultFileCacheManager.class); private static final ObjectMapper OBJECT_MAPPER = ObjectMapperFactory.getObjectMapper(); private static final Charset DEFAULT_FILE_ENCODING = StandardCharsets.UTF_8; private File cacheFile; private File cacheLockFile; private String baseCacheFileName; private final boolean onlyOwnerPermissions; private final long cacheFileLockExpirationInMilliseconds; DefaultFileCacheManager( File cacheFile, File cacheLockFile, String baseCacheFileName, boolean onlyOwnerPermissions, long cacheFileLockExpirationInMilliseconds) { this.cacheFile = cacheFile; this.cacheLockFile = cacheLockFile; this.baseCacheFileName = baseCacheFileName; this.onlyOwnerPermissions = onlyOwnerPermissions; this.cacheFileLockExpirationInMilliseconds = cacheFileLockExpirationInMilliseconds; logger.debug("Using cache file: {}", cacheFile.getAbsolutePath()); } @Override public synchronized String getCacheFilePath() { return cacheFile.getAbsolutePath(); } @Override public synchronized void overrideCacheFile(File newCacheFile) { if (!FileUtil.exists(newCacheFile)) { logger.debug("Cache file doesn't exist. File: {}", newCacheFile); } if (onlyOwnerPermissions) { FileUtil.handleWhenFilePermissionsWiderThanUserOnly(newCacheFile, "Override cache file"); FileUtil.handleWhenParentDirectoryPermissionsWiderThanUserOnly( newCacheFile, "Override cache file"); } else { FileUtil.logFileUsage(newCacheFile, "Override cache file", false); } this.cacheFile = newCacheFile; this.baseCacheFileName = newCacheFile.getName(); this.cacheLockFile = new File(newCacheFile.getParentFile(), this.baseCacheFileName + ".lck"); } @Override public synchronized T withLock(Supplier supplier) { if (cacheLockFile.exists()) { deleteCacheLockIfExpired(); } if (!tryToLockCacheFile()) { logger.debug("Failed to lock the file. Skipping cache operation", false); return null; } try { return supplier.get(); } finally { if (!unlockCacheFile()) { logger.debug("Failed to unlock cache file", false); } } } @Override public synchronized JsonNode readCacheFile() { try { if (!cacheFile.exists()) { logger.debug("Cache file doesn't exist. Ignoring read. File: {}", cacheFile); return null; } try (Reader reader = new InputStreamReader(new FileInputStream(cacheFile), DEFAULT_FILE_ENCODING)) { if (onlyOwnerPermissions) { FileUtil.handleWhenFilePermissionsWiderThanUserOnly(cacheFile, "Read cache"); FileUtil.handleWhenParentDirectoryPermissionsWiderThanUserOnly(cacheFile, "Read cache"); FileUtil.throwWhenOwnerDifferentThanCurrentUser(cacheFile, "Read cache"); } else { FileUtil.logFileUsage(cacheFile, "Read cache", false); } return OBJECT_MAPPER.readTree(reader); } } catch (IOException ex) { logger.debug("Failed to read the cache file. No worry. File: {}, Err: {}", cacheFile, ex); } return null; } @Override public synchronized void writeCacheFile(JsonNode input) { logger.debug("Writing cache file. File: {}", cacheFile); try { if (input == null || !cacheFile.exists()) { logger.debug( "Cache file doesn't exist or input is null. Ignoring write. File: {}", cacheFile); return; } try (Writer writer = new OutputStreamWriter(new FileOutputStream(cacheFile), DEFAULT_FILE_ENCODING)) { if (onlyOwnerPermissions) { FileUtil.handleWhenFilePermissionsWiderThanUserOnly(cacheFile, "Write to cache"); FileUtil.handleWhenParentDirectoryPermissionsWiderThanUserOnly( cacheFile, "Write to cache"); } else { FileUtil.logFileUsage(cacheFile, "Write to cache", false); } writer.write(input.toString()); } } catch (IOException ex) { logger.debug("Failed to write the cache file. File: {}", cacheFile); } } @Override public synchronized void deleteCacheFile() { logger.debug("Deleting cache file. File: {}, lock file: {}", cacheFile, cacheLockFile); unlockCacheFile(); if (!cacheFile.delete()) { logger.debug("Failed to delete the file: {}", cacheFile); } } private synchronized boolean tryToLockCacheFile() { int cnt = 0; boolean locked = false; while (cnt < 5 && !(locked = lockCacheFile())) { try { Thread.sleep(10); } catch (InterruptedException ex) { // doesn't matter } ++cnt; } if (!locked) { deleteCacheLockIfExpired(); if (!lockCacheFile()) { logger.debug("Failed to lock the cache file.", false); } } return locked; } private synchronized void deleteCacheLockIfExpired() { long currentTime = new Date().getTime(); long lockFileTs = fileCreationTime(cacheLockFile); if (lockFileTs < 0) { logger.debug("Failed to get the timestamp of lock directory"); } else if (lockFileTs < currentTime - this.cacheFileLockExpirationInMilliseconds) { try { if (!cacheLockFile.delete()) { logger.debug("Failed to delete the directory. Dir: {}", cacheLockFile); } else { logger.debug("Deleted expired cache lock directory.", false); } } catch (Exception e) { logger.debug( "Failed to delete the directory. Dir: {}, Error: {}", cacheLockFile, e.getMessage()); } } } private static synchronized long fileCreationTime(File targetFile) { if (!FileUtil.exists(targetFile)) { logger.debug("File does not exist. File: {}", targetFile); return -1; } try { Path cacheFileLockPath = Paths.get(targetFile.getAbsolutePath()); BasicFileAttributes attr = Files.readAttributes(cacheFileLockPath, BasicFileAttributes.class); return attr.creationTime().toMillis(); } catch (IOException ex) { logger.debug("Failed to get creation time. File/Dir: {}, Err: {}", targetFile, ex); } return -1; } private synchronized boolean lockCacheFile() { return cacheLockFile.mkdirs(); } private synchronized boolean unlockCacheFile() { return cacheLockFile.delete(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/DownloaderMetrics.java ================================================ package net.snowflake.client.internal.core; /** Metrics related to chunk downloader performance */ public class DownloaderMetrics { /** time in millis that main thread is blocked and waits for chunk is ready */ private final long millisWaiting; /** time in millis that background thread is downloading the data */ private final long millisDownloading; /** time in millis that background thread is parsing data */ private final long millisParsing; public DownloaderMetrics(long millisWaiting, long millisDownloading, long millisParsing) { this.millisWaiting = millisWaiting; this.millisDownloading = millisDownloading; this.millisParsing = millisParsing; } long getMillisWaiting() { return millisWaiting; } long getMillisDownloading() { return millisDownloading; } long getMillisParsing() { return millisParsing; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/Event.java ================================================ package net.snowflake.client.internal.core; import com.google.common.base.Preconditions; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.PrintWriter; import java.util.zip.GZIPOutputStream; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Abstract class to encapsulate a Client-side Event and any methods associated with it. */ public abstract class Event { private static final SFLogger logger = SFLoggerFactory.getLogger(Event.class); private static final String EVENT_DUMP_FILE_NAME = "sf_event_"; private static final String EVENT_DUMP_FILE_EXT = ".dmp.gz"; // need to check if directory exists, if not try to create it // need to check file size, see if it exceeds maximum (need parameter for this) public static enum EventType { NETWORK_ERROR(1, "NETWORK ERROR", BasicEvent.class), STATE_TRANSITION(2, "STATE TRANSITION", BasicEvent.class), NONE(100, "NONE", BasicEvent.class); public int getId() { return id; } public String getDescription() { return description; } public Class getEventClass() { return eventClass; } EventType(int id, String description, Class eventClass) { this.id = id; this.description = description; this.eventClass = eventClass; } private final int id; private final String description; private final Class eventClass; } private EventType type; private String message; public Event(EventType type, String message) { Preconditions.checkArgument(type.getEventClass() == this.getClass()); this.type = type; this.message = message; } public EventType getType() { return this.type; } public void setType(EventType type) { this.type = type; } public String getMessage() { return this.message; } public void setMessage(String message) { this.message = message; } protected void writeEventDumpLine(String message) { final String eventDumpPath = EventUtil.getDumpPathPrefix() + "/" + EVENT_DUMP_FILE_NAME + EventUtil.getDumpFileId() + EVENT_DUMP_FILE_EXT; // If the event dump file is too large, truncate if (new File(eventDumpPath).length() < EventUtil.getmaxDumpFileSizeBytes()) { try { final OutputStream outStream = new GZIPOutputStream(new FileOutputStream(eventDumpPath, true)); PrintWriter eventDumper = new PrintWriter(outStream, true); eventDumper.println(message); eventDumper.flush(); eventDumper.close(); } catch (IOException ex) { logger.error( "Could not open Event dump file {}, exception:{}", eventDumpPath, ex.getMessage()); } } else { logger.error( "Failed to dump Event because dump file is " + "too large. Delete dump file or increase maximum dump file size.", false); } } /* * An event is "flushed" when it is leaving the system, hence the behavior * defined by flush dictates what to do with an Event when the Handler is * finished buffering it. */ public abstract void flush(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/EventHandler.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.PrintWriter; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Comparator; import java.util.Date; import java.util.TimeZone; import java.util.TreeSet; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.logging.Formatter; import java.util.logging.Handler; import java.util.logging.LogRecord; import java.util.zip.GZIPOutputStream; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class EventHandler extends Handler { private static final SFLogger logger = SFLoggerFactory.getLogger(EventHandler.class); // Number of entries in the log buffer in memory protected static final long LOG_BUFFER_SIZE = (1L << 14); // Maximum amount of time a Snowflake dump file can exist before being // delete upon the next attempt to dump (1 week) protected static final long FILE_EXPN_TIME_MS = 7L * 24L * 3600L * 1000L; // Dump file properties protected static final String LOG_DUMP_FILE_NAME = "sf_log_"; protected static final String LOG_DUMP_FILE_EXT = ".dmp"; protected static final String LOG_DUMP_COMP_EXT = ".gz"; // Property to control time (in hours) an incident signature will be throttled for protected static final String THROTTLE_DURATION_PROP = "snowflake.throttle_duration"; // Property to control number of times an incident signature can be seen // before being throttled protected static final String THROTTLE_LIMIT_PROP = "snowflake.throttle_limit"; // Property to disable dumps completely protected static final String DISABLE_DUMPS_PROP = "snowflake.disable_debug_dumps"; // Property to set the number of allowable snowflake dump files protected static final String MAX_NUM_DUMP_FILES_PROP = "snowflake.max_dumpfiles"; // Property to set the number of allowable snowflake dump files protected static final String MAX_SIZE_DUMPS_MB_PROP = "snowflake.max_dumpdir_size_mb"; // Property to disable GZIP compression of log dump files protected static final String DISABLE_DUMP_COMPR_PROP = "snowflake.disable_log_compression"; private static final int THROTTLE_DURATION_HRS = systemGetProperty(THROTTLE_DURATION_PROP) == null ? 1 : Integer.valueOf(systemGetProperty(THROTTLE_DURATION_PROP)); private static final int INCIDENT_THROTTLE_LIMIT_PER_HR = systemGetProperty(THROTTLE_LIMIT_PROP) == null ? 1 : Integer.valueOf(systemGetProperty(THROTTLE_LIMIT_PROP)); // Default values private static final int DEFAULT_MAX_DUMP_FILES = 100; private static final int DEFAULT_MAX_DUMPDIR_SIZE_MB = 128; /** Runnable to handle periodic flushing of the event buffer */ private class QueueFlusher implements Runnable { @Override public void run() { flushEventBuffer(); } } // Location to dump log entries to private final String logDumpPathPrefix; // Max size the event buffer can reach before being forcibly flushed private final int maxEntries; // Period of time (in ms) to wait before waking the QueueFlusher private final int flushPeriodMs; // Queue to buffer events while they are waiting to be flushed private final ArrayList eventBuffer; // Queue to buffer log messages private final ArrayList logBuffer; // Executor to periodically flush the eventBuffer private ScheduledExecutorService flusher; public EventHandler(int maxEntries, int flushPeriodMs) { this.maxEntries = maxEntries; this.flushPeriodMs = flushPeriodMs; eventBuffer = new ArrayList<>(); logBuffer = new ArrayList<>(); logDumpPathPrefix = EventUtil.getDumpPathPrefix(); } /** * Returns current size of the event buffer * * @return size of eventBuffer */ public synchronized int getBufferSize() { return eventBuffer.size(); } /** * Returns the current size of the log buffer * * @return size of log buffer */ public synchronized long getLogBufferSize() { return logBuffer.size(); } /** Creates and runs a new QueueFlusher thread */ synchronized void startFlusher() { // Create a new scheduled executor service with a thread factory that // creates daemonized threads; this way if the user doesn't exit nicely // the JVM Runtime won't hang flusher = Executors.newScheduledThreadPool( 1, new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); t.setDaemon(true); return t; } }); flusher.scheduleWithFixedDelay(new QueueFlusher(), 0, flushPeriodMs, TimeUnit.MILLISECONDS); } /** Stops the running QueueFlusher thread, if any */ synchronized void stopFlusher() { if (flusher != null) { flusher.shutdown(); } } /* * Pushes an event onto the event buffer and flushes if specified or if * the buffer has reached maximum capacity. */ private synchronized void pushEvent(Event event, boolean flushBuffer) { eventBuffer.add(event); if (flushBuffer || eventBuffer.size() >= maxEntries) { this.flushEventBuffer(); } } /** * Triggers a new event of type @type with message @message and flushes the eventBuffer if full * * @param type event type * @param message triggering message */ void triggerBasicEvent(Event.EventType type, String message) { triggerBasicEvent(type, message, false); } /** * Triggers a new BaseEvent of type @type with message @message and flushes the eventBuffer if * full or @flushBuffer is true * * @param type event type * @param message trigger message * @param flushBuffer true if push the event to flush buffer */ void triggerBasicEvent(Event.EventType type, String message, boolean flushBuffer) { Event triggeredEvent = new BasicEvent(type, message); pushEvent(triggeredEvent, flushBuffer); } /** * Triggers a state transition event to @newState with an identifier (eg, requestId, jobUUID, etc) * * @param newState new state * @param identifier event id */ void triggerStateTransition(BasicEvent.QueryState newState, String identifier) { String msg = "{newState: " + newState.getDescription() + ", " + "info: " + identifier + ", " + "timestamp: " + getCurrentTimestamp() + "}"; Event triggeredEvent = new BasicEvent(Event.EventType.STATE_TRANSITION, msg); pushEvent(triggeredEvent, false); } static String getCurrentTimestamp() { DateFormat fmt = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); fmt.setTimeZone(TimeZone.getTimeZone("UTC")); return fmt.format(new Date()); } /** * Dumps the contents of the in-memory log buffer to disk and clears the buffer. * * @param identifier event id */ public void dumpLogBuffer(String identifier) { final ArrayList logBufferCopy; final PrintWriter logDumper; final OutputStream outStream; Formatter formatter = this.getFormatter(); // Check if compression of dump file is enabled boolean disableCompression = systemGetProperty(DISABLE_DUMP_COMPR_PROP) != null; // If no identifying factor (eg, an incident id) was provided, get one if (identifier == null) { identifier = EventUtil.getDumpFileId(); } // Do some sanity checking to make sure we're not flooding the user's // disk with dump files cleanupSfDumps(true); String logDumpPath = logDumpPathPrefix + File.separator + LOG_DUMP_FILE_NAME + identifier + LOG_DUMP_FILE_EXT; if (!disableCompression) { logDumpPath += LOG_DUMP_COMP_EXT; } logger.debug("EventHandler dumping log buffer to {}", logDumpPath); // Copy logBuffer because this is potentially long running. synchronized (this) { logBufferCopy = new ArrayList<>(logBuffer); logBuffer.clear(); } File outputFile = new File(logDumpPath); /* * Because log files could potentially be very large, we should never open * them in append mode. It's rare that this should happen anyways... */ try { // If the dump path doesn't already exist, create it. if (outputFile.getParentFile() != null) { outputFile.getParentFile().mkdirs(); } outStream = disableCompression ? new FileOutputStream(logDumpPath, false) : new GZIPOutputStream(new FileOutputStream(logDumpPath, false)); logDumper = new PrintWriter(outStream, true); } catch (IOException exc) { // Not much to do here, can't dump logs so exit out. logger.debug("Log dump failed, exception: {}", exc.getMessage()); return; } // Iterate over log entries, format them, then dump them. for (LogRecord entry : logBufferCopy) { logDumper.write(formatter != null ? formatter.format(entry) : entry.getMessage()); } // Clean up logDumper.flush(); logDumper.close(); } /** * Function to remove old Snowflake Dump files to make room for new ones. * * @param deleteOldest if true, always deletes the oldest file found if max number of dump files * has been reached */ protected void cleanupSfDumps(boolean deleteOldest) { // Check what the maximum number of dumpfiles and the max allowable // aggregate dump file size is. int maxDumpFiles = systemGetProperty(MAX_NUM_DUMP_FILES_PROP) != null ? Integer.valueOf(systemGetProperty(MAX_NUM_DUMP_FILES_PROP)) : DEFAULT_MAX_DUMP_FILES; int maxDumpDirSizeMB = systemGetProperty(MAX_SIZE_DUMPS_MB_PROP) != null ? Integer.valueOf(systemGetProperty(MAX_SIZE_DUMPS_MB_PROP)) : DEFAULT_MAX_DUMPDIR_SIZE_MB; File dumpDir = new File(logDumpPathPrefix); long dirSizeBytes = 0; if (dumpDir.listFiles() == null) { return; } // Keep a sorted list of files by size as we go in case we need to // delete some TreeSet fileList = new TreeSet<>( new Comparator() { @Override public int compare(File a, File b) { return a.length() < b.length() ? -1 : 1; } }); // Loop over files in this directory and get rid of old ones // while accumulating the total size for (File file : dumpDir.listFiles()) { if ((!file.getName().startsWith(LOG_DUMP_FILE_NAME) && !file.getName().startsWith("sf_incident_")) || (System.currentTimeMillis() - file.lastModified() > FILE_EXPN_TIME_MS && file.delete())) { continue; } dirSizeBytes += file.length(); fileList.add(file); } // If we're exceeding our max allotted disk usage, cut some stuff out; // else if we need to make space for a new dump delete the oldest. if (dirSizeBytes >= ((long) maxDumpDirSizeMB << 20)) { // While we take up more than half the allotted disk usage, keep deleting. for (File file : fileList) { if (dirSizeBytes < ((long) maxDumpDirSizeMB << 19)) { break; } long victimSize = file.length(); if (file.delete()) { dirSizeBytes -= victimSize; } } } else if (deleteOldest && fileList.size() >= maxDumpFiles) { fileList.first().delete(); } } // cleanupSfDumps(...) /** * Function to copy the event buffer, clear it, and iterate of the copy, calling each event's * flush() method one by one. * *

NOTE: This function is subject to a race condition; while the buffer copy is being iterated * over, the next round of buffer entries could be flushed creating a flush order that is not * "strictly consistent". While this could hypothetically also cause the system to run out of * memory due to an unbounded number of eventBuffer copies, that scenario is unlikely. */ private void flushEventBuffer() { ArrayList eventBufferCopy; logger.debug("Flushing eventBuffer", false); // Copy event buffer because this may be long running synchronized (this) { eventBufferCopy = new ArrayList<>(eventBuffer); eventBuffer.clear(); } for (Event event : eventBufferCopy) { event.flush(); } } /* Overridden methods for Handler interface */ /** Flushes all eventBuffer entries. */ @Override public synchronized void flush() { logger.debug("EventHandler flushing logger buffer", false); dumpLogBuffer(""); } /** * Overridden Logger.Handler publish(...) method. Buffers unformatted log records in memory in a * circular buffer-like fashion. * * @param record log record */ @Override public synchronized void publish(LogRecord record) { if (!super.isLoggable(record) || this.getLevel() != null && record.getLevel().intValue() < this.getLevel().intValue()) { return; } synchronized (logBuffer) { if (logBuffer.size() == LOG_BUFFER_SIZE) { logBuffer.remove(0); } logBuffer.add(record); } } @Override public void close() { this.flushEventBuffer(); this.stopFlusher(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/EventUtil.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.File; import java.util.concurrent.atomic.AtomicReference; /** Utility class to encapsulate support information pertaining to the EventHandler and events. */ public class EventUtil { public static final String DUMP_PATH_PROP = "snowflake.dump_path"; public static final String DUMP_SIZE_PROP = "snowflake.max_dump_size"; public static final String DUMP_SUBDIR = "snowflake_dumps"; private static final String DUMP_FILE_ID = UUIDUtils.getUUID().toString(); private static String DUMP_PATH_PREFIX = systemGetProperty(DUMP_PATH_PROP) == null ? "/tmp" : systemGetProperty(DUMP_PATH_PROP); private static final long MAX_DUMP_FILE_SIZE_BYTES = systemGetProperty(DUMP_SIZE_PROP) == null ? (10 << 20) : Long.valueOf(systemGetProperty(DUMP_SIZE_PROP)); private static AtomicReference eventHandler = new AtomicReference<>(null); private static int MAX_ENTRIES = 1000; private static int FLUSH_PERIOD_MS = 10000; /** * Junit is not recognizing the system properties for EventTest, so overriding the value here * * @param value string value */ public static void setDumpPathPrefixForTesting(String value) { DUMP_PATH_PREFIX = value; } /** * Initializes the common eventHandler instance for all sessions/threads * * @param maxEntries - maximum number of buffered events before flush * @param flushPeriodMs - period of time between asynchronous buffer flushes */ public static synchronized void initEventHandlerInstance(int maxEntries, int flushPeriodMs) { if (eventHandler.get() == null) { eventHandler.set(new EventHandler(maxEntries, flushPeriodMs)); } // eventHandler.startFlusher(); } /** * @return the shared EventHandler instance */ public static EventHandler getEventHandlerInstance() { if (eventHandler.get() == null) { initEventHandlerInstance(MAX_ENTRIES, FLUSH_PERIOD_MS); } return eventHandler.get(); } public static void triggerBasicEvent(Event.EventType type, String message, boolean flushBuffer) { EventHandler eh = eventHandler.get(); if (eh != null) { eh.triggerBasicEvent(type, message, flushBuffer); } } public static void triggerStateTransition(BasicEvent.QueryState newState, String identifier) { EventHandler eh = eventHandler.get(); if (eh != null) { eh.triggerStateTransition(newState, identifier); } } public static String getDumpPathPrefix() { return DUMP_PATH_PREFIX + File.separator + DUMP_SUBDIR; } public static String getDumpFileId() { return DUMP_FILE_ID; } public static long getmaxDumpFileSizeBytes() { return MAX_DUMP_FILE_SIZE_BYTES; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/FieldSchemaCreator.java ================================================ package net.snowflake.client.internal.core; import java.sql.SQLException; import java.sql.Types; import java.util.Optional; import net.snowflake.client.internal.jdbc.BindingParameterMetadata; import net.snowflake.client.internal.jdbc.SnowflakeColumn; import net.snowflake.client.internal.jdbc.util.SnowflakeTypeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class FieldSchemaCreator { static final SFLogger logger = SFLoggerFactory.getLogger(FieldSchemaCreator.class); public static final int MAX_TEXT_COLUMN_SIZE = 134217728; public static final int MAX_BINARY_COLUMN_SIZE = 67108864; public static BindingParameterMetadata buildSchemaForText( String fieldName, Optional maybeColumn) { return BindingParameterMetadata.BindingParameterMetadataBuilder.bindingParameterMetadata() .withType(maybeColumn.map(cl -> cl.type()).filter(str -> !str.isEmpty()).orElse("text")) .withLength(maybeColumn.map(cl -> cl.length()).orElse(MAX_TEXT_COLUMN_SIZE)) .withName(maybeColumn.map(cl -> cl.name()).filter(str -> !str.isEmpty()).orElse(fieldName)) .build(); } public static BindingParameterMetadata buildSchemaForBytesType( String fieldName, Optional maybeColumn) { return BindingParameterMetadata.BindingParameterMetadataBuilder.bindingParameterMetadata() .withType(maybeColumn.map(cl -> cl.type()).filter(str -> !str.isEmpty()).orElse("binary")) .withName(maybeColumn.map(cl -> cl.name()).filter(str -> !str.isEmpty()).orElse(fieldName)) .withLength(maybeColumn.map(cl -> cl.precision()).orElse(MAX_TEXT_COLUMN_SIZE)) .withByteLength(maybeColumn.map(cl -> cl.byteLength()).orElse(MAX_BINARY_COLUMN_SIZE)) .build(); } public static BindingParameterMetadata buildSchemaTypeAndNameOnly( String fieldName, String type, Optional maybeColumn) { return BindingParameterMetadata.BindingParameterMetadataBuilder.bindingParameterMetadata() .withType(maybeColumn.map(cl -> cl.type()).filter(str -> !str.isEmpty()).orElse(type)) .withName(maybeColumn.map(cl -> cl.name()).filter(str -> !str.isEmpty()).orElse(fieldName)) .build(); } public static BindingParameterMetadata buildSchemaWithScaleAndPrecision( String fieldName, String type, int scale, int precision, Optional maybeColumn) { return BindingParameterMetadata.BindingParameterMetadataBuilder.bindingParameterMetadata() .withType(maybeColumn.map(cl -> cl.type()).filter(str -> !str.isEmpty()).orElse(type)) .withScale(maybeColumn.map(cl -> cl.scale()).filter(i -> i > 0).orElse(scale)) .withName(maybeColumn.map(cl -> cl.name()).filter(str -> !str.isEmpty()).orElse(fieldName)) .withPrecision(maybeColumn.map(cl -> cl.precision()).filter(i -> i > 0).orElse(precision)) .build(); } public static BindingParameterMetadata buildBindingSchemaForType(int baseType) throws SQLException { return buildBindingSchemaForType(baseType, true); } public static BindingParameterMetadata buildBindingSchemaForType(int baseType, boolean addName) throws SQLException { String name = addName ? SnowflakeTypeUtil.javaTypeToSFType(baseType, null).name() : null; switch (baseType) { case Types.VARCHAR: case Types.CHAR: return FieldSchemaCreator.buildSchemaForText(name, Optional.empty()); case Types.FLOAT: case Types.DOUBLE: case Types.DECIMAL: return FieldSchemaCreator.buildSchemaWithScaleAndPrecision( name, "real", 9, 38, Optional.empty()); case Types.NUMERIC: case Types.INTEGER: case Types.SMALLINT: case Types.TINYINT: case Types.BIGINT: return FieldSchemaCreator.buildSchemaWithScaleAndPrecision( null, "fixed", 0, 38, Optional.empty()); case Types.BOOLEAN: return FieldSchemaCreator.buildSchemaTypeAndNameOnly(name, "boolean", Optional.empty()); case Types.DATE: return FieldSchemaCreator.buildSchemaTypeAndNameOnly(name, "date", Optional.empty()); case Types.TIMESTAMP: return FieldSchemaCreator.buildSchemaWithScaleAndPrecision( name, "timestamp", 9, 0, Optional.empty()); case Types.TIME: return FieldSchemaCreator.buildSchemaWithScaleAndPrecision( name, "time", 9, 0, Optional.empty()); default: logger.error("Could not create schema for type : " + baseType); throw new SQLException("Could not create schema for type : " + baseType); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/FileCacheManager.java ================================================ package net.snowflake.client.internal.core; import com.fasterxml.jackson.databind.JsonNode; import java.io.File; import java.util.function.Supplier; interface FileCacheManager { String getCacheFilePath(); void overrideCacheFile(File newCacheFile); T withLock(Supplier supplier); JsonNode readCacheFile(); void writeCacheFile(JsonNode input); void deleteCacheFile(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/FileCacheManagerBuilder.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isWindows; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.util.stream.Collectors; import java.util.stream.Stream; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class FileCacheManagerBuilder { private static final SFLogger logger = SFLoggerFactory.getLogger(FileCacheManagerBuilder.class); private String cacheDirectorySystemProperty; private String cacheDirectoryEnvironmentVariable; private String baseCacheFileName; private long cacheFileLockExpirationInMilliseconds; private boolean onlyOwnerPermissions = true; FileCacheManagerBuilder() {} FileCacheManagerBuilder setCacheDirectorySystemProperty(String cacheDirectorySystemProperty) { this.cacheDirectorySystemProperty = cacheDirectorySystemProperty; return this; } FileCacheManagerBuilder setCacheDirectoryEnvironmentVariable( String cacheDirectoryEnvironmentVariable) { this.cacheDirectoryEnvironmentVariable = cacheDirectoryEnvironmentVariable; return this; } FileCacheManagerBuilder setBaseCacheFileName(String baseCacheFileName) { this.baseCacheFileName = baseCacheFileName; return this; } FileCacheManagerBuilder setCacheFileLockExpirationInSeconds( long cacheFileLockExpirationInSeconds) { this.cacheFileLockExpirationInMilliseconds = cacheFileLockExpirationInSeconds * 1000; return this; } FileCacheManagerBuilder setOnlyOwnerPermissions(boolean onlyOwnerPermissions) { this.onlyOwnerPermissions = onlyOwnerPermissions; return this; } FileCacheManager build() { String cacheDirPath = this.cacheDirectorySystemProperty != null ? systemGetProperty(this.cacheDirectorySystemProperty) : null; if (cacheDirPath == null) { try { cacheDirPath = this.cacheDirectoryEnvironmentVariable != null ? systemGetEnv(this.cacheDirectoryEnvironmentVariable) : null; } catch (Exception ex) { logger.debug( "Cannot get environment variable for cache directory, skip using cache", false); return new NoOpFileCacheManager(); } } File cacheDir; if (cacheDirPath != null) { cacheDir = new File(cacheDirPath); } else { cacheDir = FileCacheUtil.getDefaultCacheDir(); } if (cacheDir == null) { return new NoOpFileCacheManager(); } if (!cacheDir.exists()) { try { if (!isWindows() && onlyOwnerPermissions) { Files.createDirectories( cacheDir.toPath(), PosixFilePermissions.asFileAttribute( Stream.of( PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE) .collect(Collectors.toSet()))); } else { Files.createDirectories(cacheDir.toPath()); } } catch (IOException e) { logger.info( "Failed to create the cache directory: {}. Ignored. {}", e.getMessage(), cacheDir.getAbsoluteFile()); return new NoOpFileCacheManager(); } } if (!cacheDir.exists()) { logger.debug("Cannot create the cache directory {}. Giving up.", cacheDir.getAbsolutePath()); return new NoOpFileCacheManager(); } logger.debug("Verified Directory {}", cacheDir.getAbsolutePath()); File cacheFileTmp = new File(cacheDir, this.baseCacheFileName).getAbsoluteFile(); try { if (!cacheFileTmp.exists()) { if (!isWindows() && onlyOwnerPermissions) { Files.createFile( cacheFileTmp.toPath(), PosixFilePermissions.asFileAttribute( Stream.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE) .collect(Collectors.toSet()))); } else { Files.createFile(cacheFileTmp.toPath()); } logger.debug("Successfully created a cache file {}", cacheFileTmp); } else { logger.debug("Cache file already exists {}", cacheFileTmp); } FileUtil.logFileUsage(cacheFileTmp, "Cache file creation", false); File cacheFile = cacheFileTmp.getCanonicalFile(); File cacheLockFile = new File(cacheFile.getParentFile(), this.baseCacheFileName + ".lck"); return new DefaultFileCacheManager( cacheFile, cacheLockFile, this.baseCacheFileName, this.onlyOwnerPermissions, this.cacheFileLockExpirationInMilliseconds); } catch (IOException | SecurityException ex) { logger.info( "Failed to touch the cache file: {}. Ignored. {}", ex.getMessage(), cacheFileTmp.getAbsoluteFile()); } return new NoOpFileCacheManager(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/FileCacheUtil.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.FileUtil.isWritable; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.File; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class FileCacheUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(FileCacheUtil.class); public static File getDefaultCacheDir() { if (Constants.getOS() == Constants.OS.LINUX) { String xdgCacheHome = getDir(systemGetEnv("XDG_CACHE_HOME")); if (xdgCacheHome != null) { return new File(xdgCacheHome, "snowflake"); } logger.debug("XDG cache home directory is not set or not writable."); } String homeDir = getDir(systemGetProperty("user.home")); if (homeDir == null) { logger.debug("Home directory is not set or not writable, no cache dir is set."); return null; } if (Constants.getOS() == Constants.OS.WINDOWS) { return new File( new File(new File(new File(homeDir, "AppData"), "Local"), "Snowflake"), "Caches"); } else if (Constants.getOS() == Constants.OS.MAC) { return new File(new File(new File(homeDir, "Library"), "Caches"), "Snowflake"); } else { return new File(new File(homeDir, ".cache"), "snowflake"); } } private static String getDir(String dir) { if (dir != null && isWritable(dir)) { return dir; } return null; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/FileTypeDetector.java ================================================ package net.snowflake.client.internal.core; import java.io.IOException; import java.nio.file.Path; import org.apache.tika.Tika; /** Use Tika to detect the mime type of files */ public class FileTypeDetector extends java.nio.file.spi.FileTypeDetector { private final Tika tika = new Tika(); @Override public String probeContentType(Path path) throws IOException { return tika.detect(path.toFile()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/FileUtil.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isWindows; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.FileOwnerAttributeView; import java.nio.file.attribute.PosixFilePermission; import java.util.Arrays; import java.util.Collection; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class FileUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(FileUtil.class); /** * Placeholder value returned by the JVM for {@code user.name} when the current OS user has no * matching entry in the system user database (e.g. a container started with {@code runAsUser} and * no {@code /etc/passwd} entry). Treated as "unknown user" — file owner validation is skipped in * this case. */ static final String UNKNOWN_USER_NAME_MARKER = "?"; private static final Collection WRITE_BY_OTHERS = Arrays.asList(PosixFilePermission.GROUP_WRITE, PosixFilePermission.OTHERS_WRITE); private static final Collection READ_BY_OTHERS = Arrays.asList(PosixFilePermission.GROUP_READ, PosixFilePermission.OTHERS_READ); private static final Collection EXECUTABLE = Arrays.asList( PosixFilePermission.OWNER_EXECUTE, PosixFilePermission.GROUP_EXECUTE, PosixFilePermission.OTHERS_EXECUTE); public static void logFileUsage(Path filePath, String context, boolean logReadAccess) { logWarnWhenAccessibleByOthers(filePath, context, logReadAccess); } public static void logFileUsage(File file, String context, boolean logReadAccess) { logFileUsage(file.toPath(), context, logReadAccess); } public static void logFileUsage(String stringPath, String context, boolean logReadAccess) { Path path = Paths.get(stringPath); logFileUsage(path, context, logReadAccess); } public static boolean isWritable(String path) { File file = new File(path); if (!file.canWrite()) { logger.debug("File/directory not writeable: {}", path); return false; } return true; } public static void handleWhenParentDirectoryPermissionsWiderThanUserOnly( File file, String context) { handleWhenDirectoryPermissionsWiderThanUserOnly(file.getParentFile(), context); } public static void handleWhenFilePermissionsWiderThanUserOnly(File file, String context) { if (Files.isSymbolicLink(file.toPath())) { throw new SecurityException("Symbolic link is not allowed for file cache: " + file); } handleWhenPermissionsWiderThanUserOnly(file.toPath(), context, false); } public static void handleWhenDirectoryPermissionsWiderThanUserOnly(File file, String context) { handleWhenPermissionsWiderThanUserOnly(file.toPath(), context, true); } public static void handleWhenPermissionsWiderThanUserOnly( Path filePath, String context, boolean isDirectory) { // we do not check the permissions for Windows if (isWindows()) { return; } try { Collection filePermissions = Files.getPosixFilePermissions(filePath); boolean isWritableByOthers = isPermPresent(filePermissions, WRITE_BY_OTHERS); boolean isReadableByOthers = isPermPresent(filePermissions, READ_BY_OTHERS); boolean isExecutable = isPermPresent(filePermissions, EXECUTABLE); boolean permissionsTooOpen; if (isDirectory) { permissionsTooOpen = isWritableByOthers || isReadableByOthers; } else { permissionsTooOpen = isWritableByOthers || isReadableByOthers || isExecutable; } if (permissionsTooOpen) { logger.debug( "{}File/directory {} access rights: {}", getContextStr(context), filePath, filePermissions); String message = String.format( "Access to file or directory %s is wider than allowed. Remove cache file/directory and re-run the driver.", filePath); if (isDirectory) { logger.warn(message); } else { throw new SecurityException(message); } } else { if (!isDirectory && Files.isSymbolicLink(filePath)) { throw new SecurityException("Symbolic link is not allowed for file cache: " + filePath); } } } catch (IOException e) { String message = String.format( "%s Unable to access the file/directory to check the permissions. Error: %s", filePath, e); if (isDirectory) { logger.warn(message); } else { throw new SecurityException(message); } } } private static void logWarnWhenAccessibleByOthers( Path filePath, String context, boolean logReadAccess) { // we do not check the permissions for Windows if (isWindows()) { return; } try { Collection filePermissions = Files.getPosixFilePermissions(filePath); logger.debug( "{}File {} access rights: {}", getContextStr(context), filePath, filePermissions); boolean isWritableByOthers = isPermPresent(filePermissions, WRITE_BY_OTHERS); boolean isReadableByOthers = isPermPresent(filePermissions, READ_BY_OTHERS); boolean isExecutable = isPermPresent(filePermissions, EXECUTABLE); if (isWritableByOthers || (isReadableByOthers && logReadAccess) || isExecutable) { logger.warn( "{}File {} is accessible by others to:{}{}", getContextStr(context), filePath, isReadableByOthers && logReadAccess ? " read" : "", isWritableByOthers ? " write" : "", isExecutable ? " execute" : ""); } } catch (IOException e) { logger.warn( "{}Unable to access the file to check the permissions: {}. Error: {}", getContextStr(context), filePath, e); } } public static void throwWhenOwnerDifferentThanCurrentUser(File file, String context) { // we do not check the permissions for Windows if (isWindows()) { return; } Path filePath = Paths.get(file.getPath()); try { String fileOwnerName = getFileOwnerName(filePath); String currentUser = systemGetProperty("user.name"); if (currentUser == null || UNKNOWN_USER_NAME_MARKER.equals(currentUser)) { logger.warn( "Cannot determine user (possibly due to security manager restrictions or missing passwd entry in container), skipping file owner validation"); return; } if (!currentUser.equalsIgnoreCase(fileOwnerName)) { logger.debug( "The file owner: {} is different than current user: {}", fileOwnerName, currentUser); throw new SecurityException("The file owner is different than current user"); } } catch (IOException e) { logger.warn( "{}Unable to access the file to check the owner: {}. Error: {}", getContextStr(context), filePath, e); } } private static boolean isPermPresent( Collection filePerms, Collection permsToCheck) throws IOException { return filePerms.stream().anyMatch(permsToCheck::contains); } static String getFileOwnerName(Path filePath) throws IOException { FileOwnerAttributeView ownerAttributeView = Files.getFileAttributeView(filePath, FileOwnerAttributeView.class); return ownerAttributeView.getOwner().getName(); } private static String getContextStr(String context) { return isNullOrEmpty(context) ? "" : context + ": "; } public static boolean exists(File file) { if (file == null) { return false; } return file.exists(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HeaderCustomizerHttpRequestInterceptor.java ================================================ package net.snowflake.client.internal.core; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.Header; import org.apache.http.HttpException; import org.apache.http.HttpRequest; import org.apache.http.HttpRequestInterceptor; import org.apache.http.protocol.HttpContext; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.http.SdkHttpRequest; /** * Implements Apache HttpClient's {@link HttpRequestInterceptor} and {@link ExecutionInterceptor} to * provide a mechanism for adding custom HTTP headers to outgoing requests made by the Snowflake * JDBC driver. * *

This class iterates through a list of user-provided {@link HttpHeadersCustomizer} * implementations. For each customizer, it checks if it applies to the current request. If it does, * it retrieves new headers from the customizer and adds them to the request, ensuring that existing * driver-set headers are not overridden. * *

For Apache HttpClient, retry detection is handled by checking the {@link * AttributeEnhancingHttpRequestRetryHandler#EXECUTION_COUNT_ATTRIBUTE} attribute in the {@link * HttpContext} set by {@link AttributeEnhancingHttpRequestRetryHandler} to honor the {@code * invokeOnce()} contract of the customizer. * * @see HttpHeadersCustomizer */ public class HeaderCustomizerHttpRequestInterceptor implements HttpRequestInterceptor, ExecutionInterceptor { private static final SFLogger logger = SFLoggerFactory.getLogger(HeaderCustomizerHttpRequestInterceptor.class); private final List headersCustomizers; public HeaderCustomizerHttpRequestInterceptor(List headersCustomizers) { if (headersCustomizers != null) { this.headersCustomizers = new ArrayList<>(headersCustomizers); // Defensive copy } else { this.headersCustomizers = new ArrayList<>(); } } /** * Processes an Apache HttpClient {@link HttpRequest} before it is sent. It iterates through * registered {@link HttpHeadersCustomizer}s, checks applicability, retrieves new headers, * verifies against overriding driver headers, and adds them to the request. Handles the {@code * invokeOnce()} flag based on the "execution-count" attribute in the {@link HttpContext}. * * @param httpRequest The HTTP request to process. * @param httpContext The context for the HTTP request execution, used to retrieve retry count. */ @Override public void process(HttpRequest httpRequest, HttpContext httpContext) throws HttpException, IOException { if (this.headersCustomizers.isEmpty()) { return; } String httpMethod = httpRequest.getRequestLine().getMethod(); String uri = httpRequest.getRequestLine().getUri(); Map> currentHeaders = extractHeaders(httpRequest); // convert header names to lower case for case in-sensitive lookup Set protectedHeaders = currentHeaders.keySet().stream().map(String::toLowerCase).collect(Collectors.toSet()); Integer executionCount = (Integer) httpContext.getAttribute( AttributeEnhancingHttpRequestRetryHandler.EXECUTION_COUNT_ATTRIBUTE); boolean isRetry = (executionCount == null || executionCount > 0); for (HttpHeadersCustomizer customizer : this.headersCustomizers) { if (customizer.applies(httpMethod, uri, currentHeaders)) { if (customizer.invokeOnce() && isRetry) { logger.debug( "Customizer {} should only run on the first attempt and this is a {} retry. Skipping.", customizer.getClass().getCanonicalName(), executionCount); continue; } Map> newHeaders = customizer.newHeaders(); logger.debug( "Customizer {} is adding headers {}", customizer.getClass().getCanonicalName(), newHeaders.keySet()); for (Map.Entry> entry : newHeaders.entrySet()) { if (isTryingToOverrideDriverHeader(entry, protectedHeaders)) { logger.debug( "Customizer {} attempted to override existing driver header {} which is not allowed. Skipping.", customizer.getClass().getCanonicalName(), entry.getKey()); } else { for (String value : entry.getValue()) { httpRequest.addHeader(entry.getKey(), value); } } } } } } /** * Modifies an AWS HTTP {@link SdkHttpRequest}. It iterates through registered {@link * HttpHeadersCustomizer}s, checks applicability, retrieves new headers, verifies against * overriding driver headers, and adds them to the request. Ignores the {@code invokeOnce()} flag. * * @param context The AWS request context to process. * @param executionAttributes The AWS execution attributes. */ @Override public SdkHttpRequest modifyHttpRequest( Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { if (this.headersCustomizers.isEmpty()) { return context.httpRequest(); } SdkHttpRequest httpRequest = context.httpRequest(); SdkHttpRequest.Builder requestBuilder = context.httpRequest().toBuilder(); String httpMethod = httpRequest.method().name(); String uri = httpRequest.getUri().toString(); Map> currentHeaders = httpRequest.headers(); Set protectedHeaders = currentHeaders.keySet().stream() .map(String::toLowerCase) .collect(Collectors.toSet()); // convert to lower case for case in-sensitive lookup for (HttpHeadersCustomizer customizer : this.headersCustomizers) { if (customizer.applies(httpMethod, uri, currentHeaders)) { Map> newHeaders = customizer.newHeaders(); logger.debug( "Customizer {} is adding headers {}", customizer.getClass().getCanonicalName(), newHeaders.keySet()); for (Map.Entry> entry : newHeaders.entrySet()) { if (isTryingToOverrideDriverHeader(entry, protectedHeaders)) { logger.debug( "Customizer {} attempted to override existing driver header {} which is not allowed. Skipping.", customizer.getClass().getCanonicalName(), entry.getKey()); } else { for (String value : entry.getValue()) { requestBuilder.appendHeader(entry.getKey(), value); } } } } } return requestBuilder.build(); } private static boolean isTryingToOverrideDriverHeader( Map.Entry> entry, Set protectedHeaders) { return protectedHeaders.contains(entry.getKey().toLowerCase()); } private static Map> extractHeaders(HttpRequest request) { Map> headerMap = new HashMap<>(); for (Header header : request.getAllHeaders()) { headerMap.computeIfAbsent(header.getName(), k -> new ArrayList<>()).add(header.getValue()); } return headerMap; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HeartbeatIntervalSelector.java ================================================ package net.snowflake.client.internal.core; import java.util.Set; /** * Selects the best existing heartbeat interval when a new interval cannot be created due to thread * limits. * *

The selector always prefers shorter (more frequent) intervals to prevent session token * expiration. It never selects an interval longer than requested unless no shorter alternatives * exist. */ class HeartbeatIntervalSelector { /** * Find the best existing interval to use when the requested interval cannot be created. * *

Selection strategy: 1. First choice: Closest interval that is <= requested (more frequent or * equal) 2. Fallback: If no smaller intervals exist, use the SHORTEST available interval (most * frequent), even if it is still longer than requested * *

This is a best-effort strategy to keep heartbeats as frequent as possible with existing * intervals, but it cannot guarantee avoiding less frequent heartbeats or token expiration when * only longer intervals are available. * * @param requestedInterval The interval that was requested but cannot be created * @param existingIntervals The set of intervals that currently have threads * @return The best existing interval to use * @throws IllegalArgumentException if existingIntervals is null, empty, or requestedInterval <= 0 */ static long selectBestInterval(long requestedInterval, Set existingIntervals) { if (requestedInterval <= 0) { throw new IllegalArgumentException( "Requested interval must be positive: " + requestedInterval); } if (existingIntervals == null || existingIntervals.isEmpty()) { throw new IllegalArgumentException("Existing intervals cannot be null or empty"); } Long closestSmallerOrEqual = null; Long shortestOverall = null; for (Long existingInterval : existingIntervals) { // Track the shortest interval overall (fallback) if (shortestOverall == null || existingInterval < shortestOverall) { shortestOverall = existingInterval; } // Find the closest interval that is <= requested (more frequent or equal) if (existingInterval <= requestedInterval) { if (closestSmallerOrEqual == null || existingInterval > closestSmallerOrEqual) { closestSmallerOrEqual = existingInterval; } } } // Prefer smaller-or-equal interval; fallback to shortest if none exists return closestSmallerOrEqual != null ? closestSmallerOrEqual : shortestOverall; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HeartbeatRegistry.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.time.Clock; import java.util.Collections; import java.util.Map; import java.util.WeakHashMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import net.snowflake.client.internal.jdbc.telemetry.Telemetry; import net.snowflake.client.internal.jdbc.telemetry.TelemetryClient; import net.snowflake.client.internal.jdbc.telemetry.TelemetryField; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Registry that manages multiple HeartbeatThread instances. * *

Each unique heartbeat interval gets its own thread. This solves the problem where a session * with short interval could expire when a session with long interval is added. * *

Replaces the singleton HeartbeatBackground. * *

Thread-safe: Uses concurrent data structures and synchronization where needed. */ public class HeartbeatRegistry { private static final SFLogger logger = SFLoggerFactory.getLogger(HeartbeatRegistry.class); /** Maximum number of different heartbeat intervals (prevents thread explosion) */ private static final int MAX_HEARTBEAT_THREADS = 10; /** Singleton instance */ private static volatile HeartbeatRegistry instance; /** Map: heartbeat interval (seconds) -> thread managing that interval */ private final ConcurrentHashMap threads = new ConcurrentHashMap<>(); /** * Map: session -> its required interval (for removal). Uses WeakHashMap for automatic cleanup if * session is garbage collected. */ private final Map sessionIntervals = Collections.synchronizedMap(new WeakHashMap<>()); private final ScheduledExecutorService executor; private final Clock clock; private volatile boolean isShutdown = false; /** Private constructor for singleton. */ private HeartbeatRegistry() { this( Executors.newScheduledThreadPool( MAX_HEARTBEAT_THREADS, new ThreadFactoryBuilder().setNameFormat("heartbeat-pool-%d").setDaemon(true).build()), Clock.systemUTC()); // Register shutdown hook to clean up resources on JVM exit try { Runtime.getRuntime() .addShutdownHook( new Thread( () -> { logger.debug("JVM shutdown detected - cleaning up HeartbeatRegistry"); shutdown(); }, "heartbeat-registry-shutdown")); } catch (SecurityException | IllegalStateException ex) { logger.debug("Unable to register HeartbeatRegistry shutdown hook; continuing", ex); } } /** * Package-private constructor for testing. * * @param executor Scheduler for heartbeat tasks * @param clock Time source (injectable for testing) */ HeartbeatRegistry(ScheduledExecutorService executor, Clock clock) { if (executor == null) { throw new IllegalArgumentException("Executor cannot be null"); } if (clock == null) { throw new IllegalArgumentException("Clock cannot be null"); } this.executor = executor; this.clock = clock; logger.debug("HeartbeatRegistry initialized"); } /** * Get singleton instance. * * @return The singleton HeartbeatRegistry instance */ public static HeartbeatRegistry getInstance() { if (instance == null) { synchronized (HeartbeatRegistry.class) { if (instance == null) { instance = new HeartbeatRegistry(); } } } return instance; } /** * Add session to heartbeat management. * *

Calculates the required heartbeat interval and adds the session to the appropriate * HeartbeatThread. If no thread exists for this interval, creates one. If a thread already * exists, the session is simply added (no rescheduling). * * @param session The session to heartbeat * @param masterTokenValidityInSecs Master token validity in seconds * @param heartbeatFrequencyInSecs Desired heartbeat frequency in seconds */ public void addSession( SFSession session, long masterTokenValidityInSecs, int heartbeatFrequencyInSecs) { if (session == null) { throw new IllegalArgumentException("Session cannot be null"); } if (masterTokenValidityInSecs <= 0) { throw new IllegalArgumentException( "Master token validity must be positive: " + masterTokenValidityInSecs); } if (heartbeatFrequencyInSecs <= 0) { throw new IllegalArgumentException( "Heartbeat frequency must be positive: " + heartbeatFrequencyInSecs); } if (isShutdown) { throw new IllegalStateException("Cannot add session to shutdown HeartbeatRegistry"); } // Calculate required interval: min of requested frequency and 1/4 of token validity long requiredInterval = Math.min(heartbeatFrequencyInSecs, masterTokenValidityInSecs / 4); // Enforce minimum interval of 1 second requiredInterval = Math.max(requiredInterval, 1); logger.debug( "Adding session {} with interval {}s (requested: {}s, validity: {}s)", session.getSessionId(), requiredInterval, heartbeatFrequencyInSecs, masterTokenValidityInSecs); // Synchronize thread creation to prevent race conditions with limit check synchronized (this) { if (isShutdown) { throw new IllegalStateException("Cannot add session to shutdown HeartbeatRegistry"); } // Check thread count limit if (threads.size() >= MAX_HEARTBEAT_THREADS && !threads.containsKey(requiredInterval)) { logger.debug("Maximum threads reached - attempting to prune empty threads before fallback"); pruneEmptyThreads(); // Check again after pruning if (threads.size() >= MAX_HEARTBEAT_THREADS && !threads.containsKey(requiredInterval)) { logger.warn( "Maximum heartbeat threads ({}) reached. Session {} will use closest existing interval.", MAX_HEARTBEAT_THREADS, session.getSessionId()); // Send telemetry event sendMaxThreadsExceededTelemetry(session, requiredInterval); final int requestedInterval = (int) requiredInterval; // Find best existing interval (prefers shorter intervals for safety) requiredInterval = HeartbeatIntervalSelector.selectBestInterval(requiredInterval, threads.keySet()); logger.debug( "Mapped requested heartbeat interval {}s to existing interval {}s as best-effort fallback after reaching maximum heartbeat threads.", requestedInterval, requiredInterval); } } // Get or create a live thread for this interval, retrying if a stale shutdown thread is // found. final int maxAddAttempts = 2; boolean added = false; for (int attempt = 0; attempt < maxAddAttempts; attempt++) { HeartbeatThread thread = threads.computeIfAbsent( requiredInterval, interval -> new HeartbeatThread(interval, executor, clock)); if (thread.addSession(session)) { sessionIntervals.put(session, requiredInterval); logger.debug( "Session {} added to interval {}s. Active threads: {}", session.getSessionId(), requiredInterval, threads.size()); added = true; break; } logger.debug( "Heartbeat thread for interval {}s was shutdown before session {} could be added. Retrying.", requiredInterval, session.getSessionId()); threads.remove(requiredInterval, thread); } if (!added) { throw new IllegalStateException( "Unable to add session " + session.getSessionId() + " to a live heartbeat thread"); } } } /** * Send telemetry event when max heartbeat threads limit is exceeded. * * @param session The session that triggered the limit * @param requestedInterval The interval that was requested but couldn't be created */ private void sendMaxThreadsExceededTelemetry(SFSession session, long requestedInterval) { try { Telemetry telemetry = session.getTelemetryClient(internalCallMarker()); if (!(telemetry instanceof TelemetryClient)) { logger.trace("Telemetry client not available, skipping max threads telemetry"); return; } TelemetryClient telemetryClient = (TelemetryClient) telemetry; ObjectNode telemetryData = ObjectMapperFactory.getObjectMapper().createObjectNode(); telemetryData.put( TelemetryField.TYPE.toString(), TelemetryField.HEARTBEAT_MAX_THREADS_EXCEEDED.toString()); telemetryData.put("max_heartbeat_threads", MAX_HEARTBEAT_THREADS); telemetryData.put("active_threads", threads.size()); telemetryData.put("requested_interval", requestedInterval); telemetryData.put("session_id", session.getSessionId()); telemetryClient.addLogToBatch(telemetryData, System.currentTimeMillis()); logger.trace("Queued max threads exceeded telemetry for sending"); } catch (Exception e) { // Never fail the session due to telemetry logger.trace("Failed to send max threads exceeded telemetry: {}", e.getMessage()); } } /** * Remove empty threads from the registry. * *

Sessions in HeartbeatThread are stored in a WeakHashMap. When sessions are garbage collected * without explicit removal, the thread becomes empty but remains in the registry. This method * cleans up such threads to free up thread slots. * *

Called when approaching thread limit before falling back to existing intervals. Uses atomic * operations to prevent races with concurrent addSession calls. */ private void pruneEmptyThreads() { int beforeCount = threads.size(); // Use computeIfPresent to atomically check and remove empty threads threads.forEach( (interval, thread) -> { threads.computeIfPresent( interval, (key, t) -> { // Double-check it's the same thread and it's empty if (t == thread && t.isEmpty()) { logger.debug( "Pruning empty heartbeat thread for interval {}s (sessions were GC'd)", interval); t.shutdown(); return null; // Remove from map } return t; // Keep in map }); }); int prunedCount = beforeCount - threads.size(); if (prunedCount > 0) { logger.debug( "Pruned {} empty heartbeat threads. Active threads: {} -> {}", prunedCount, beforeCount, threads.size()); } } /** * Remove session from heartbeat management. * *

Session's interval is looked up from internal tracking. If the HeartbeatThread becomes * empty, it is shutdown and removed atomically to prevent races with concurrent addSession calls. * * @param session The session to remove */ public void removeSession(SFSession session) { if (session == null) { logger.debug("Attempted to remove null session"); return; } if (isShutdown) { logger.debug("Registry is shutdown, ignoring removeSession for {}", session.getSessionId()); return; } synchronized (this) { Long interval = sessionIntervals.remove(session); if (interval == null) { logger.debug("Session {} not found in registry", session.getSessionId()); return; } logger.debug("Removing session {} with interval {}s", session.getSessionId(), interval); // Remove session from thread and cleanup if empty - atomic operation threads.computeIfPresent( interval, (key, thread) -> { thread.removeSession(session); // If thread is now empty, shut it down and return null to remove from map if (thread.isEmpty()) { logger.debug( "Removed empty heartbeat thread for interval {}s. Remaining threads: {}", interval, threads.size() - 1); // -1 because we're about to remove this one thread.shutdown(); return null; // Remove from map } return thread; // Keep in map }); } } // === Testing Support === /** * Trigger heartbeat immediately for a specific interval (for testing). * * @param intervalSeconds The interval to trigger */ @VisibleForTesting public void triggerHeartbeatForInterval(long intervalSeconds) { HeartbeatThread thread = threads.get(intervalSeconds); if (thread != null) { thread.triggerHeartbeatNow(); } else { logger.debug("No heartbeat thread found for interval {}s", intervalSeconds); } } /** * Get active thread count (for testing/monitoring). * * @return Number of active heartbeat threads */ @VisibleForTesting public int getActiveThreadCount() { return threads.size(); } /** * Get session count for a specific interval (for testing). * * @param intervalSeconds The interval to query * @return Number of sessions, or 0 if no thread exists */ @VisibleForTesting public int getSessionCountForInterval(long intervalSeconds) { HeartbeatThread thread = threads.get(intervalSeconds); return thread != null ? thread.getSessionCount() : 0; } /** * Shutdown all threads and cleanup. * *

Should be called during application shutdown to cleanly release resources. Also used in test * cleanup. Idempotent - safe to call multiple times. */ public synchronized void shutdown() { if (isShutdown) { logger.debug("HeartbeatRegistry already shutdown"); return; } logger.debug("Shutting down HeartbeatRegistry"); isShutdown = true; threads.values().forEach(HeartbeatThread::shutdown); threads.clear(); sessionIntervals.clear(); executor.shutdown(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HeartbeatThread.java ================================================ package net.snowflake.client.internal.core; import com.google.common.annotations.VisibleForTesting; import java.time.Clock; import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.WeakHashMap; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Manages heartbeat for all sessions that share the same heartbeat interval. * *

Each unique interval gets its own HeartbeatThread instance. Uses WeakHashMap to automatically * clean up sessions that are garbage collected. * *

Thread-safe: Session management methods are synchronized. The scheduled {@code run()} method * coordinates with shared state using the class's existing concurrency controls. */ class HeartbeatThread implements Runnable { private static final SFLogger logger = SFLoggerFactory.getLogger(HeartbeatThread.class); private final long intervalSeconds; private final ScheduledExecutorService executor; private final Clock clock; /** * Sessions managed by this thread. Uses a Set backed by WeakHashMap to automatically remove * sessions when garbage collected, preventing memory leaks if cleanup fails. */ private final Set sessions = Collections.newSetFromMap(new WeakHashMap<>()); private volatile ScheduledFuture scheduledTask; private volatile long lastHeartbeatStartTimeInSecs; private volatile boolean isShutdown = false; /** * Creates a new HeartbeatThread for a specific interval. * * @param intervalSeconds How often to heartbeat (in seconds) * @param executor Scheduler for running heartbeat tasks * @param clock Time source (injectable for testing) */ HeartbeatThread(long intervalSeconds, ScheduledExecutorService executor, Clock clock) { if (intervalSeconds <= 0) { throw new IllegalArgumentException("Interval must be positive: " + intervalSeconds); } if (executor == null) { throw new IllegalArgumentException("Executor cannot be null"); } if (clock == null) { throw new IllegalArgumentException("Clock cannot be null"); } this.intervalSeconds = intervalSeconds; this.executor = executor; this.clock = clock; this.lastHeartbeatStartTimeInSecs = 0; logger.debug("Created HeartbeatThread for interval {}s", intervalSeconds); } /** * Add session to this heartbeat thread. * *

If this is the first session, starts the heartbeat task. If thread is already running, the * new session will be included in the next scheduled heartbeat. * * @param session The session to add * @return true if session was added, false if thread is shutdown * @throws IllegalArgumentException if session is null */ synchronized boolean addSession(SFSession session) { if (session == null) { throw new IllegalArgumentException("Session cannot be null"); } if (isShutdown) { logger.warn( "Attempted to add session {} to shutdown HeartbeatThread (interval={}s)", session.getSessionId(), intervalSeconds); return false; } boolean wasEmpty = sessions.isEmpty(); sessions.add(session); logger.debug( "Added session {} to HeartbeatThread (interval={}s), total sessions: {}", session.getSessionId(), intervalSeconds, sessions.size()); // Start heartbeat if this is the first session if (wasEmpty) { start(); } return true; } /** * Remove session from this heartbeat thread. * *

WeakHashMap ensures sessions are cleaned up even if this method isn't called. * * @param session The session to remove */ synchronized void removeSession(SFSession session) { sessions.remove(session); logger.debug( "Removed session {} from HeartbeatThread (interval={}s), remaining sessions: {}", session.getSessionId(), intervalSeconds, sessions.size()); // Don't auto-shutdown here - let the registry decide when to cleanup } /** * Check if no sessions remain. * * @return true if this thread has no sessions */ synchronized boolean isEmpty() { return sessions.isEmpty(); } /** * Get the current number of sessions. * * @return Number of sessions managed by this thread */ @VisibleForTesting synchronized int getSessionCount() { return sessions.size(); } /** * Get the heartbeat interval in seconds. * * @return Interval in seconds */ long getIntervalSeconds() { return intervalSeconds; } /** Start scheduled heartbeat task. Called automatically when first session is added. */ private synchronized void start() { if (isShutdown) { logger.warn("Cannot start shutdown HeartbeatThread (interval={}s)", intervalSeconds); return; } if (scheduledTask != null) { logger.debug("HeartbeatThread (interval={}s) already started", intervalSeconds); return; } logger.debug("Starting HeartbeatThread (interval={}s)", intervalSeconds); scheduleHeartbeat(); } /** Schedule the next heartbeat task. */ private void scheduleHeartbeat() { // Calculate elapsed time since last heartbeat long currentTimeInSecs = clock.millis() / 1000; long elapsedSecsSinceLastHeartbeat = currentTimeInSecs - lastHeartbeatStartTimeInSecs; /* * The initial delay is 0 if enough time has elapsed, * otherwise it's the remaining time until the next interval. */ long initialDelay = Math.max(intervalSeconds - elapsedSecsSinceLastHeartbeat, 0); logger.debug( "Scheduling heartbeat task (interval={}s) with initial delay of {}s", intervalSeconds, initialDelay); // Use schedule() for single execution, not scheduleAtFixedRate() // We manually reschedule after each run to: // 1. Stop scheduling when sessions list becomes empty // 2. Handle exceptions without stopping future executions try { scheduledTask = executor.schedule(this, initialDelay, TimeUnit.SECONDS); } catch (RejectedExecutionException e) { logger.error( "Failed to schedule heartbeat task (interval={}s): executor rejected task. Marking thread as shutdown.", intervalSeconds, e); isShutdown = true; } } /** * Stop scheduled heartbeat task and prevent new sessions from being added. * *

Called when no sessions remain or during cleanup. */ synchronized void shutdown() { if (isShutdown) { logger.debug( "HeartbeatThread (interval={}s) already shutdown; performing cleanup", intervalSeconds); } else { logger.debug("Shutting down HeartbeatThread (interval={}s)", intervalSeconds); } isShutdown = true; if (scheduledTask != null) { scheduledTask.cancel(false); scheduledTask = null; } sessions.clear(); } /** * Run heartbeat for all sessions in this thread. * *

Called by scheduler at fixed interval. Synchronization is only around accessing the sessions * map, not around the actual heartbeat calls, to minimize blocking. */ @Override public void run() { if (isShutdown) { logger.debug("Skipping heartbeat run for shutdown thread (interval={}s)", intervalSeconds); return; } // Record current time as heartbeat start time lastHeartbeatStartTimeInSecs = clock.millis() / 1000; // Get a copy of sessions to heartbeat (outside synchronized block) Set sessionsToHeartbeat = new HashSet<>(); synchronized (this) { sessionsToHeartbeat.addAll(sessions); } logger.debug( "Running heartbeat for {} sessions (interval={}s)", sessionsToHeartbeat.size(), intervalSeconds); // Heartbeat each session (outside synchronized block to avoid blocking) for (SFSession session : sessionsToHeartbeat) { try { session.heartbeat(); } catch (Throwable ex) { logger.error( "Heartbeat error for session {} - message={}", session.getSessionId(), ex.getMessage(), ex); } } // Schedule next heartbeat if there are still sessions synchronized (this) { if (!isShutdown && !sessions.isEmpty()) { logger.debug("Scheduling next heartbeat run (interval={}s)", intervalSeconds); scheduleHeartbeat(); } else { logger.debug( "Not scheduling next heartbeat (shutdown={}, sessions={})", isShutdown, sessions.size()); scheduledTask = null; } } } /** * Trigger heartbeat immediately without waiting for scheduled time. * *

For testing purposes only. */ @VisibleForTesting synchronized void triggerHeartbeatNow() { if (!isShutdown) { run(); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HttpClientSettingsKey.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.Serializable; import java.util.Objects; import net.snowflake.client.internal.core.crl.CertRevocationCheckMode; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * This class defines all non-static parameters needed to create an HttpClient object. It is used as * the key for the static hashmap of reusable http clients. */ public class HttpClientSettingsKey implements Serializable { private static final SFLogger logger = SFLoggerFactory.getLogger(HttpClientSettingsKey.class); private static final int DEFAULT_OCSP_RESPONDER_CONNECTION_TIMEOUT = 10000; private static final String SF_OCSP_TEST_OCSP_RESPONDER_TIMEOUT = "SF_OCSP_TEST_OCSP_RESPONDER_TIMEOUT"; private OCSPMode ocspMode; private CertRevocationCheckMode revocationCheckMode; private boolean allowCertificatesWithoutCrlUrl; private boolean useProxy; private String proxyHost = ""; private int proxyPort = 0; private String nonProxyHosts = ""; private String proxyUser = ""; private String proxyPassword = ""; private String proxyProtocol = "http"; // Adds a suffix to the user agent header in the http requests made by the jdbc driver. // More details in SNOW-717606 private String userAgentSuffix = ""; private Boolean gzipDisabled = false; private Integer ocspTimeout = null; public HttpClientSettingsKey( OCSPMode mode, String host, int port, String nonProxyHosts, String user, String password, String scheme, String userAgentSuffix, Boolean gzipDisabled) { this.useProxy = true; this.ocspMode = mode != null ? mode : OCSPMode.FAIL_OPEN; this.ocspTimeout = getDefaultOcspTimeout(); this.proxyHost = !isNullOrEmpty(host) ? host.trim() : ""; this.proxyPort = port; this.nonProxyHosts = !isNullOrEmpty(nonProxyHosts) ? nonProxyHosts.trim() : ""; this.proxyUser = !isNullOrEmpty(user) ? user.trim() : ""; this.proxyPassword = !isNullOrEmpty(password) ? password.trim() : ""; this.proxyProtocol = !isNullOrEmpty(scheme) ? scheme.trim() : "http"; this.gzipDisabled = gzipDisabled; this.userAgentSuffix = !isNullOrEmpty(userAgentSuffix) ? userAgentSuffix.trim() : ""; } public HttpClientSettingsKey(OCSPMode mode) { this.useProxy = false; this.ocspMode = mode != null ? mode : OCSPMode.FAIL_OPEN; this.ocspTimeout = getDefaultOcspTimeout(); } HttpClientSettingsKey(OCSPMode mode, String userAgentSuffix, Boolean gzipDisabled) { this(mode); this.userAgentSuffix = !isNullOrEmpty(userAgentSuffix) ? userAgentSuffix.trim() : ""; this.gzipDisabled = gzipDisabled; } @Override public boolean equals(final Object obj) { if (obj instanceof HttpClientSettingsKey) { HttpClientSettingsKey comparisonKey = (HttpClientSettingsKey) obj; if (comparisonKey.ocspMode.getValue() == this.ocspMode.getValue()) { if (comparisonKey.revocationCheckMode == this.revocationCheckMode && comparisonKey.allowCertificatesWithoutCrlUrl == this.allowCertificatesWithoutCrlUrl) { if (comparisonKey.gzipDisabled.equals(this.gzipDisabled)) { if (comparisonKey.userAgentSuffix.equalsIgnoreCase(this.userAgentSuffix)) { if (Objects.equals(comparisonKey.ocspTimeout, this.ocspTimeout)) { if (!comparisonKey.useProxy && !this.useProxy) { return true; } else if (comparisonKey.proxyHost.equalsIgnoreCase(this.proxyHost) && comparisonKey.proxyPort == this.proxyPort && comparisonKey.proxyUser.equalsIgnoreCase(this.proxyUser) && comparisonKey.proxyPassword.equalsIgnoreCase(this.proxyPassword) && comparisonKey.proxyProtocol.equalsIgnoreCase(this.proxyProtocol)) { // update nonProxyHost if changed if (!this.nonProxyHosts.equalsIgnoreCase(comparisonKey.nonProxyHosts)) { comparisonKey.nonProxyHosts = this.nonProxyHosts; } return true; } } } } } } } return false; } @Override public int hashCode() { return this.ocspMode.getValue() + (this.proxyHost + this.proxyPort + this.proxyUser + this.proxyPassword + this.proxyProtocol) .hashCode() + Objects.hash( this.revocationCheckMode, this.allowCertificatesWithoutCrlUrl, this.ocspTimeout); } public OCSPMode getOcspMode() { return this.ocspMode; } public boolean usesProxy() { return this.useProxy; } public String getProxyHost() { return this.proxyHost; } public int getProxyPort() { return this.proxyPort; } public String getProxyUser() { return this.proxyUser; } public String getUserAgentSuffix() { return this.userAgentSuffix; } /** * Be careful of using this! Should only be called when password is later masked. * * @return proxy password */ public String getProxyPassword() { return this.proxyPassword; } public String getNonProxyHosts() { return this.nonProxyHosts; } public HttpProtocol getProxyHttpProtocol() { return this.proxyProtocol.equalsIgnoreCase("https") ? HttpProtocol.HTTPS : HttpProtocol.HTTP; } public Boolean getGzipDisabled() { return gzipDisabled; } public CertRevocationCheckMode getRevocationCheckMode() { return revocationCheckMode; } public boolean isAllowCertificatesWithoutCrlUrl() { return allowCertificatesWithoutCrlUrl; } public void setRevocationCheckMode(CertRevocationCheckMode revocationCheckMode) { this.revocationCheckMode = revocationCheckMode; } public void setAllowCertificatesWithoutCrlUrl(boolean allowCertificatesWithoutCrlUrl) { this.allowCertificatesWithoutCrlUrl = allowCertificatesWithoutCrlUrl; } public int getOcspTimeout() { return ocspTimeout; } private static int getDefaultOcspTimeout() { int timeout = DEFAULT_OCSP_RESPONDER_CONNECTION_TIMEOUT; String configuredTimeout = systemGetProperty(SF_OCSP_TEST_OCSP_RESPONDER_TIMEOUT); if (!isNullOrEmpty(configuredTimeout)) { try { timeout = Integer.parseInt(configuredTimeout); } catch (Exception ex) { // ignore invalid override and keep default logger.debug("Invalid override for OCSP timeout: {}", configuredTimeout); } } return timeout; } @Override public String toString() { return "HttpClientSettingsKey[" + "ocspMode=" + ocspMode + ", revocationCheckMode=" + revocationCheckMode + ", allowCertificatesWithoutCrlUrl=" + allowCertificatesWithoutCrlUrl + ", useProxy=" + useProxy + ", proxyHost='" + proxyHost + '\'' + ", proxyPort=" + proxyPort + ", nonProxyHosts='" + nonProxyHosts + '\'' + ", proxyUser='" + proxyUser + '\'' + ", proxyPassword is " + (proxyPassword.isEmpty() ? "not set" : "set") + ", proxyProtocol='" + proxyProtocol + '\'' + ", userAgentSuffix='" + userAgentSuffix + '\'' + ", gzipDisabled=" + gzipDisabled + ", ocspTimeout=" + ocspTimeout + ']'; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HttpExecutingContext.java ================================================ package net.snowflake.client.internal.core; import java.util.concurrent.atomic.AtomicBoolean; import net.snowflake.client.internal.util.DecorrelatedJitterBackoff; public class HttpExecutingContext { // min backoff in milli before we retry due to transient issues private static final long minBackoffMillis = 1000; // max backoff in milli before we retry due to transient issues // we double the backoff after each retry till we reach the max backoff private static final long maxBackoffMillis = 16000; // retry at least once even if timeout limit has been reached private static final int MIN_RETRY_COUNT = 1; // retry at least once even if timeout limit has been reached private static final int DEFAULT_RETRY_TIMEOUT = 300; private final String requestId; private final String requestInfoScrubbed; private final long startTime; // start time for each request, // used for keeping track how much time we have spent // due to network issues so that we can compare against the user // specified network timeout to make sure we do not retry infinitely // when there are transient network/GS issues private long startTimePerRequest; // Used to indicate that this is a login/auth request and will be using the new retry strategy. private boolean isLoginRequest; // Tracks the total time spent handling transient network issues and retries during HTTP requests private long elapsedMilliForTransientIssues; private long retryTimeout; private long authTimeout; private DecorrelatedJitterBackoff backoff; private long backoffInMillis; private int origSocketTimeout; private String breakRetryReason; private String breakRetryEventName; private String lastStatusCodeForRetry; private int retryCount; private int maxRetries; private boolean noRetry; private int injectSocketTimeout; private boolean retryHTTP403; private boolean shouldRetry; private boolean skipRetriesBecauseOf200; // todo create skip retry reason enum private boolean withoutCookies; private boolean includeRetryParameters; private boolean includeSnowflakeHeaders; private boolean unpackResponse; private AtomicBoolean canceling; private SFBaseSession sfSession; public HttpExecutingContext(String requestIdStr, String requestInfoScrubbed) { this.requestId = requestIdStr; this.requestInfoScrubbed = requestInfoScrubbed; this.startTime = System.currentTimeMillis(); this.startTimePerRequest = startTime; this.backoff = new DecorrelatedJitterBackoff(getMinBackoffInMillis(), getMaxBackoffInMilli()); this.backoffInMillis = minBackoffMillis; } public String getRequestId() { return requestId; } public long getStartTime() { return startTime; } public long getStartTimePerRequest() { return startTimePerRequest; } public void setStartTimePerRequest(long startTimePerRequest) { this.startTimePerRequest = startTimePerRequest; } public boolean isLoginRequest() { return isLoginRequest; } public void setLoginRequest(boolean loginRequest) { isLoginRequest = loginRequest; } public long getElapsedMilliForTransientIssues() { return elapsedMilliForTransientIssues; } public long getRetryTimeoutInMilliseconds() { return retryTimeout * 1000; } public long getRetryTimeout() { return retryTimeout; } public void setRetryTimeout(long retryTimeout) { this.retryTimeout = retryTimeout; } public long getMinBackoffInMillis() { return minBackoffMillis; } public long getBackoffInMillis() { return backoffInMillis; } public void setBackoffInMillis(long backoffInMillis) { this.backoffInMillis = backoffInMillis; } public long getMaxBackoffInMilli() { return maxBackoffMillis; } public long getAuthTimeout() { return authTimeout; } public long getAuthTimeoutInMilliseconds() { return authTimeout * 1000; } public void setAuthTimeout(long authTimeout) { this.authTimeout = authTimeout; } public DecorrelatedJitterBackoff getBackoff() { return backoff; } public void setBackoff(DecorrelatedJitterBackoff backoff) { this.backoff = backoff; } public int getOrigSocketTimeout() { return origSocketTimeout; } public void setOrigSocketTimeout(int origSocketTimeout) { this.origSocketTimeout = origSocketTimeout; } public String getBreakRetryReason() { return breakRetryReason; } public void setBreakRetryReason(String breakRetryReason) { this.breakRetryReason = breakRetryReason; } public String getBreakRetryEventName() { return breakRetryEventName; } public void setBreakRetryEventName(String breakRetryEventName) { this.breakRetryEventName = breakRetryEventName; } public String getLastStatusCodeForRetry() { return lastStatusCodeForRetry; } public void setLastStatusCodeForRetry(String lastStatusCodeForRetry) { this.lastStatusCodeForRetry = lastStatusCodeForRetry; } public int getRetryCount() { return retryCount; } public void setRetryCount(int retryCount) { this.retryCount = retryCount; } public void resetRetryCount() { this.retryCount = 0; } public void incrementRetryCount() { this.retryCount++; } public int getMaxRetries() { return maxRetries; } public void setMaxRetries(int maxRetries) { this.maxRetries = maxRetries; } public String getRequestInfoScrubbed() { return requestInfoScrubbed; } public boolean isNoRetry() { return noRetry; } public void setNoRetry(boolean noRetry) { this.noRetry = noRetry; } public boolean isRetryHTTP403() { return retryHTTP403; } public void setRetryHTTP403(boolean retryHTTP403) { this.retryHTTP403 = retryHTTP403; } public boolean isShouldRetry() { return shouldRetry; } public void setShouldRetry(boolean shouldRetry) { this.shouldRetry = shouldRetry; } public void increaseElapsedMilliForTransientIssues(long elapsedMilliForLastCall) { this.elapsedMilliForTransientIssues += elapsedMilliForLastCall; } public boolean elapsedTimeExceeded() { return elapsedMilliForTransientIssues > getRetryTimeoutInMilliseconds(); } public boolean moreThanMinRetries() { return retryCount >= MIN_RETRY_COUNT; } public boolean maxRetriesExceeded() { return maxRetries > 0 && retryCount >= maxRetries; } public boolean socketOrConnectTimeoutReached() { return authTimeout > 0 && elapsedMilliForTransientIssues > getAuthTimeoutInMilliseconds() && (origSocketTimeout == 0 || elapsedMilliForTransientIssues < origSocketTimeout); } public AtomicBoolean getCanceling() { return canceling; } public void setCanceling(AtomicBoolean canceling) { this.canceling = canceling; } public boolean isIncludeSnowflakeHeaders() { return includeSnowflakeHeaders; } public void setIncludeSnowflakeHeaders(boolean includeSnowflakeHeaders) { this.includeSnowflakeHeaders = includeSnowflakeHeaders; } public boolean isWithoutCookies() { return withoutCookies; } public void setWithoutCookies(boolean withoutCookies) { this.withoutCookies = withoutCookies; } public int isInjectSocketTimeout() { return injectSocketTimeout; } public void setInjectSocketTimeout(int injectSocketTimeout) { this.injectSocketTimeout = injectSocketTimeout; } public int getInjectSocketTimeout() { return injectSocketTimeout; } public boolean isIncludeRetryParameters() { return includeRetryParameters; } public boolean isUnpackResponse() { return unpackResponse; } public void setUnpackResponse(boolean unpackResponse) { this.unpackResponse = unpackResponse; } public void setIncludeRetryParameters(boolean includeRetryParameters) { this.includeRetryParameters = includeRetryParameters; } public boolean isSkipRetriesBecauseOf200() { return skipRetriesBecauseOf200; } public void setSkipRetriesBecauseOf200(boolean skipRetriesBecauseOf200) { this.skipRetriesBecauseOf200 = skipRetriesBecauseOf200; } public SFBaseSession getSfSession() { return sfSession; } public void setSfSession(SFBaseSession sfSession) { this.sfSession = sfSession; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HttpExecutingContextBuilder.java ================================================ package net.snowflake.client.internal.core; import java.util.concurrent.atomic.AtomicBoolean; /** * Builder class for {@link HttpExecutingContext}. Provides a fluent interface for constructing * HttpExecutingContext instances with many optional parameters. */ public class HttpExecutingContextBuilder { private final String requestId; private final String requestInfoScrubbed; private long retryTimeout; private long authTimeout; private int origSocketTimeout; private int maxRetries; private int injectSocketTimeout; private AtomicBoolean canceling; private boolean withoutCookies; private boolean includeRetryParameters; private boolean includeSnowflakeHeaders; private boolean retryHTTP403; private boolean noRetry; private boolean unpackResponse; private boolean isLoginRequest; private SFBaseSession sfSession; /** * Creates a new builder instance with required parameters. * * @param requestId Request ID for logging and tracking * @param requestInfoScrubbed Scrubbed request info for logging */ public HttpExecutingContextBuilder(String requestId, String requestInfoScrubbed) { this.requestId = requestId; this.requestInfoScrubbed = requestInfoScrubbed; } /** * Copy constructor to create a new builder from an existing HttpExecutingContext. * * @param context The context to copy settings from */ public HttpExecutingContextBuilder(HttpExecutingContext context) { this.requestId = context.getRequestId(); this.requestInfoScrubbed = context.getRequestInfoScrubbed(); this.retryTimeout = context.getRetryTimeout(); this.authTimeout = context.getAuthTimeout(); this.origSocketTimeout = context.getOrigSocketTimeout(); this.maxRetries = context.getMaxRetries(); this.injectSocketTimeout = context.getInjectSocketTimeout(); this.canceling = context.getCanceling(); this.withoutCookies = context.isWithoutCookies(); this.includeRetryParameters = context.isIncludeRetryParameters(); this.includeSnowflakeHeaders = context.isIncludeSnowflakeHeaders(); this.retryHTTP403 = context.isRetryHTTP403(); this.noRetry = context.isNoRetry(); this.unpackResponse = context.isUnpackResponse(); this.isLoginRequest = context.isLoginRequest(); } /** * Creates a new builder for a login request with common defaults. * * @param requestId Request ID for logging and tracking * @param requestInfoScrubbed Scrubbed request info for logging * @return A new builder instance configured for login requests */ public static HttpExecutingContextBuilder forLogin(String requestId, String requestInfoScrubbed) { return new HttpExecutingContextBuilder(requestId, requestInfoScrubbed) .loginRequest(true) .includeSnowflakeHeaders(true) .retryHTTP403(true); } /** * Creates a new builder for a query request with common defaults. * * @param requestId Request ID for logging and tracking * @param requestInfoScrubbed Scrubbed request info for logging * @return A new builder instance configured for query requests */ public static HttpExecutingContextBuilder forQuery(String requestId, String requestInfoScrubbed) { return new HttpExecutingContextBuilder(requestId, requestInfoScrubbed) .includeRetryParameters(true) .includeSnowflakeHeaders(true) .unpackResponse(true); } /** * Creates a new builder for a simple HTTP request with minimal retry settings. * * @param requestId Request ID for logging and tracking * @param requestInfoScrubbed Scrubbed request info for logging * @return A new builder instance configured for simple requests */ public static HttpExecutingContextBuilder forSimpleRequest( String requestId, String requestInfoScrubbed) { return new HttpExecutingContextBuilder(requestId, requestInfoScrubbed) .noRetry(true) .includeSnowflakeHeaders(true); } /** * Creates a new builder with default settings for retryable requests. * * @param requestId Request ID for logging and tracking * @param requestInfoScrubbed Scrubbed request info for logging * @return A new builder instance with default retry settings */ public static HttpExecutingContextBuilder withRequest( String requestId, String requestInfoScrubbed) { return new HttpExecutingContextBuilder(requestId, requestInfoScrubbed); } /** * Sets the retry timeout in seconds. * * @param retryTimeout Retry timeout in seconds * @return this builder instance */ public HttpExecutingContextBuilder retryTimeout(long retryTimeout) { this.retryTimeout = retryTimeout; return this; } /** * Sets the authentication timeout in seconds. * * @param authTimeout Authentication timeout in seconds * @return this builder instance */ public HttpExecutingContextBuilder authTimeout(long authTimeout) { this.authTimeout = authTimeout; return this; } /** * Sets the original socket timeout in milliseconds. * * @param origSocketTimeout Socket timeout in milliseconds * @return this builder instance */ public HttpExecutingContextBuilder origSocketTimeout(int origSocketTimeout) { this.origSocketTimeout = origSocketTimeout; return this; } /** * Sets the maximum number of retries. * * @param maxRetries Maximum number of retries * @return this builder instance */ public HttpExecutingContextBuilder maxRetries(int maxRetries) { this.maxRetries = maxRetries; return this; } /** * Sets the injected socket timeout for testing. * * @param injectSocketTimeout Socket timeout to inject * @return this builder instance */ public HttpExecutingContextBuilder injectSocketTimeout(int injectSocketTimeout) { this.injectSocketTimeout = injectSocketTimeout; return this; } /** * Sets the canceling flag. * * @param canceling AtomicBoolean for cancellation * @return this builder instance */ public HttpExecutingContextBuilder canceling(AtomicBoolean canceling) { this.canceling = canceling; return this; } /** * Sets whether to disable cookies. * * @param withoutCookies true to disable cookies * @return this builder instance */ public HttpExecutingContextBuilder withoutCookies(boolean withoutCookies) { this.withoutCookies = withoutCookies; return this; } /** * Sets whether to include retry parameters in requests. * * @param includeRetryParameters true to include retry parameters * @return this builder instance */ public HttpExecutingContextBuilder includeRetryParameters(boolean includeRetryParameters) { this.includeRetryParameters = includeRetryParameters; return this; } /** * Sets whether to include request GUID. * * @param includeSnowflakeHeaders true to include request GUID and other Snowflake headers * @return this builder instance */ public HttpExecutingContextBuilder includeSnowflakeHeaders(boolean includeSnowflakeHeaders) { this.includeSnowflakeHeaders = includeSnowflakeHeaders; return this; } /** * Sets whether to retry on HTTP 403 errors. * * @param retryHTTP403 true to retry on HTTP 403 * @return this builder instance */ public HttpExecutingContextBuilder retryHTTP403(boolean retryHTTP403) { this.retryHTTP403 = retryHTTP403; return this; } /** * Sets whether to disable retries. * * @param noRetry true to disable retries * @return this builder instance */ public HttpExecutingContextBuilder noRetry(boolean noRetry) { this.noRetry = noRetry; return this; } /** * Sets whether to unpack the response. * * @param unpackResponse true to unpack response * @return this builder instance */ public HttpExecutingContextBuilder unpackResponse(boolean unpackResponse) { this.unpackResponse = unpackResponse; return this; } /** * Sets whether this is a login request. * * @param isLoginRequest true if this is a login request * @return this builder instance */ public HttpExecutingContextBuilder loginRequest(boolean isLoginRequest) { this.isLoginRequest = isLoginRequest; return this; } /** * Sets the session associated with this context. * * @param sfSession SFBaseSession to associate with this context * @return this builder instance */ public HttpExecutingContextBuilder withSfSession(SFBaseSession sfSession) { this.sfSession = sfSession; return this; } /** * Builds and returns a new HttpExecutingContext instance with the configured parameters. * * @return A new HttpExecutingContext instance */ public HttpExecutingContext build() { HttpExecutingContext context = new HttpExecutingContext(requestId, requestInfoScrubbed); context.setRetryTimeout(retryTimeout); context.setAuthTimeout(authTimeout); context.setOrigSocketTimeout(origSocketTimeout); context.setMaxRetries(maxRetries); context.setInjectSocketTimeout(injectSocketTimeout); context.setCanceling(canceling); context.setWithoutCookies(withoutCookies); context.setIncludeRetryParameters(includeRetryParameters); context.setIncludeSnowflakeHeaders(includeSnowflakeHeaders); context.setRetryHTTP403(retryHTTP403); context.setNoRetry(noRetry); context.setUnpackResponse(unpackResponse); context.setLoginRequest(isLoginRequest); context.setSfSession(sfSession); return context; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HttpProtocol.java ================================================ package net.snowflake.client.internal.core; public enum HttpProtocol { HTTP("http"), HTTPS("https"); private final String scheme; HttpProtocol(String scheme) { this.scheme = scheme; } public String getScheme() { return scheme; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HttpResponseContextDto.java ================================================ package net.snowflake.client.internal.core; import org.apache.http.client.methods.CloseableHttpResponse; public class HttpResponseContextDto { private CloseableHttpResponse httpResponse; private String unpackedCloseableHttpResponse; private Exception savedEx; // Constructors public HttpResponseContextDto() {} public HttpResponseContextDto( CloseableHttpResponse httpResponse, String unpackedCloseableHttpResponse) { this.httpResponse = httpResponse; this.unpackedCloseableHttpResponse = unpackedCloseableHttpResponse; } public CloseableHttpResponse getHttpResponse() { return httpResponse; } public void setHttpResponse(CloseableHttpResponse httpResponse) { this.httpResponse = httpResponse; } public String getUnpackedCloseableHttpResponse() { return unpackedCloseableHttpResponse; } public void setUnpackedCloseableHttpResponse(String unpackedCloseableHttpResponse) { this.unpackedCloseableHttpResponse = unpackedCloseableHttpResponse; } public Exception getSavedEx() { return savedEx; } public void setSavedEx(Exception savedEx) { this.savedEx = savedEx; } @Override public String toString() { return "CloseableHttpResponseContextDto{" + "httpResponse=" + httpResponse + ", unpackedCloseableHttpResponse=" + unpackedCloseableHttpResponse + '}'; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HttpResponseWithHeaders.java ================================================ package net.snowflake.client.internal.core; import java.util.Collections; import java.util.Map; /** * Simple container for HTTP response data including both body and headers. This provides a clean * interface for methods that need access to response headers without exposing internal HTTP client * implementation details. */ public class HttpResponseWithHeaders { private final String responseBody; private final Map headers; public HttpResponseWithHeaders(String responseBody, Map headers) { this.responseBody = responseBody; this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); } /** * Get the HTTP response body as a string. * * @return the response body */ public String getResponseBody() { return responseBody; } /** * Get the HTTP response headers as an immutable map. If multiple headers exist with the same * name, only the last value is included. * * @return immutable map of header name to header value */ public Map getHeaders() { return headers; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/HttpUtil.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static org.apache.http.client.config.CookieSpecs.DEFAULT; import static org.apache.http.client.config.CookieSpecs.IGNORE_COOKIES; import com.google.common.annotations.VisibleForTesting; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.PrintWriter; import java.io.StringWriter; import java.net.Proxy; import java.net.Socket; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; import java.security.cert.CertificateException; import java.time.Duration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import javax.annotation.Nullable; import javax.net.ssl.TrustManager; import net.snowflake.client.api.driver.SnowflakeDriver; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.internal.core.crl.CertRevocationCheckMode; import net.snowflake.client.internal.jdbc.RestRequest; import net.snowflake.client.internal.jdbc.RetryContextManager; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.log.SFLoggerUtil; import net.snowflake.client.internal.util.SecretDetector; import net.snowflake.client.internal.util.Stopwatch; import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.HttpResponse; import org.apache.http.auth.AuthScope; import org.apache.http.auth.Credentials; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CredentialsProvider; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.config.Registry; import org.apache.http.config.RegistryBuilder; import org.apache.http.conn.ClientConnectionManager; import org.apache.http.conn.HttpClientConnectionManager; import org.apache.http.conn.socket.ConnectionSocketFactory; import org.apache.http.conn.socket.PlainConnectionSocketFactory; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.DefaultRedirectStrategy; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.client.LaxRedirectStrategy; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.protocol.HttpContext; import org.apache.http.ssl.SSLInitializationException; /** HttpUtil class */ public class HttpUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(HttpUtil.class); static final int DEFAULT_MAX_CONNECTIONS = 300; static final int DEFAULT_MAX_CONNECTIONS_PER_ROUTE = 300; private static final int DEFAULT_HTTP_CLIENT_CONNECTION_TIMEOUT_IN_MS = 60000; static final int DEFAULT_HTTP_CLIENT_SOCKET_TIMEOUT_IN_MS = 300000; // ms static final int DEFAULT_TTL = 60; // secs static final int DEFAULT_IDLE_CONNECTION_TIMEOUT = 30; // secs static final int DEFAULT_DOWNLOADED_CONDITION_TIMEOUT = 3600; // secs public static final String JDBC_TTL = "net.snowflake.jdbc.ttl"; public static final String JDBC_IDLE_CONNECTION_PROPERTY = "net.snowflake.jdbc.idle_connection_timeout"; public static final String JDBC_MAX_CONNECTIONS_PROPERTY = "net.snowflake.jdbc.max_connections"; public static final String JDBC_MAX_CONNECTIONS_PER_ROUTE_PROPERTY = "net.snowflake.jdbc.max_connections_per_route"; private static Duration connectionTimeout; private static Duration socketTimeout; /** * The unique httpClient shared by all connections. This will benefit long-lived clients. Key = * proxy host + proxy port + nonProxyHosts, Value = Map of [ClientSettings, HttpClient] */ public static Map httpClient = new ConcurrentHashMap<>(); /** * The unique httpClient map shared by all connections that don't want decompression. This will * benefit long-lived clients. Key = proxy host + proxy port + nonProxyHosts, Value = Map * [ClientSettings, HttpClient] */ private static Map httpClientWithoutDecompression = new ConcurrentHashMap<>(); /** The map of snowflake route planners */ static Map httpClientRoutePlanner = new ConcurrentHashMap<>(); /** Handle on the static connection manager, to gather statistics mainly */ private static final Map connectionManagers = new ConcurrentHashMap<>(); /** default request configuration, to be copied on individual requests. */ private static RequestConfig defaultRequestConfig = null; private static boolean socksProxyDisabled = false; public static void reset() { setConnectionTimeout(DEFAULT_HTTP_CLIENT_CONNECTION_TIMEOUT_IN_MS); setSocketTimeout(DEFAULT_HTTP_CLIENT_SOCKET_TIMEOUT_IN_MS); httpClient .values() .forEach( client -> { try { client.close(); } catch (IOException e) { logger.warn("Cannot close HTTP client", e); } }); httpClient.clear(); httpClientWithoutDecompression .values() .forEach( client -> { try { client.close(); } catch (IOException e) { logger.warn("Cannot close HTTP client", e); } }); httpClientWithoutDecompression.clear(); httpClientRoutePlanner.clear(); connectionManagers.values().forEach(PoolingHttpClientConnectionManager::close); connectionManagers.clear(); defaultRequestConfig = null; } public static Duration getConnectionTimeout() { return connectionTimeout != null ? connectionTimeout : Duration.ofMillis(DEFAULT_HTTP_CLIENT_CONNECTION_TIMEOUT_IN_MS); } public static Duration getSocketTimeout() { return socketTimeout != null ? socketTimeout : Duration.ofMillis(DEFAULT_HTTP_CLIENT_SOCKET_TIMEOUT_IN_MS); } public static void setConnectionTimeout(int timeout) { connectionTimeout = Duration.ofMillis(timeout); initDefaultRequestConfig(connectionTimeout.toMillis(), getSocketTimeout().toMillis()); } public static void setSocketTimeout(int timeout) { socketTimeout = Duration.ofMillis(timeout); initDefaultRequestConfig(getConnectionTimeout().toMillis(), socketTimeout.toMillis()); } public static long getDownloadedConditionTimeoutInSeconds() { return DEFAULT_DOWNLOADED_CONDITION_TIMEOUT; } /** * Constructs a user-agent header with the following pattern: connector_name/connector_version * (os-platform_info) language_implementation/language_version * * @param customSuffix custom suffix that would be appended to user agent to identify the jdbc * usage. * @return string for user-agent header */ @VisibleForTesting static String buildUserAgent(String customSuffix) { // Start with connector name StringBuilder builder = new StringBuilder("JDBC/"); // Append connector version and parenthesis start builder.append(SnowflakeDriver.getImplementationVersion()); builder.append(" ("); // Generate OS platform and version from system properties String osPlatform = (systemGetProperty("os.name") != null) ? systemGetProperty("os.name") : ""; String osVersion = (systemGetProperty("os.version") != null) ? systemGetProperty("os.version") : ""; // Append OS platform and version separated by a space builder.append(osPlatform); builder.append(" "); builder.append(osVersion); // Append language name builder.append(") JAVA/"); // Generate string for language version from system properties and append it String languageVersion = (systemGetProperty("java.version") != null) ? systemGetProperty("java.version") : ""; builder.append(languageVersion); if (!customSuffix.isEmpty()) { builder.append(" " + customSuffix); } String userAgent = builder.toString(); return userAgent; } /** * Build an Http client using our set of default. * * @param key Key to HttpClient hashmap containing OCSP mode and proxy information, could be null * @param ocspCacheFile OCSP response cache file. If null, the default OCSP response file will be * used. * @param downloadUnCompressed Whether the HTTP client should be built requesting no decompression * @return HttpClient object */ public static CloseableHttpClient buildHttpClient( @Nullable HttpClientSettingsKey key, File ocspCacheFile, boolean downloadUnCompressed) { return buildHttpClient(key, ocspCacheFile, downloadUnCompressed, null); } /** * Build an Http client using our set of default. * * @param key Key to HttpClient hashmap containing OCSP mode and proxy information, could be null * @param ocspCacheFile OCSP response cache file. If null, the default OCSP response file will be * used. * @param downloadUnCompressed Whether the HTTP client should be built requesting no decompression * @param httpHeadersCustomizers List of HTTP headers customizers * @return HttpClient object */ public static CloseableHttpClient buildHttpClient( @Nullable HttpClientSettingsKey key, File ocspCacheFile, boolean downloadUnCompressed, List httpHeadersCustomizers) { int idleConnectionTimeout = SystemUtil.convertSystemPropertyToIntValue( JDBC_IDLE_CONNECTION_PROPERTY, DEFAULT_IDLE_CONNECTION_TIMEOUT); logger.debug( "Building http client with client settings key: {}, ocsp cache file: {}, download uncompressed: {}", key != null ? key.toString() : null, ocspCacheFile, downloadUnCompressed); // Build a connection manager with enough connections HttpClientConnectionManager connectionManager = connectionManagers.computeIfAbsent( key, httpClientSettingsKey -> initHttpClientConnectionManager(key, ocspCacheFile)); logger.debug("Disabling cookie management for http client"); String userAgentSuffix = key != null ? key.getUserAgentSuffix() : ""; HttpClientBuilder httpClientBuilder = HttpClientBuilder.create() .setConnectionManager(connectionManager) // Support JVM proxy settings .useSystemProperties() .evictIdleConnections(idleConnectionTimeout, TimeUnit.SECONDS) .evictExpiredConnections() .setRedirectStrategy( new LaxRedirectStrategy()) // handle /POST redirects and retry if failed .setUserAgent(buildUserAgent(userAgentSuffix)) // needed for Okta .disableCookieManagement() // SNOW-39748 .setDefaultRequestConfig(defaultRequestConfig); if (key != null && key.usesProxy()) { HttpHost proxy = new HttpHost( key.getProxyHost(), key.getProxyPort(), key.getProxyHttpProtocol().getScheme()); logger.debug( "Configuring proxy and route planner - host: {}, port: {}, scheme: {}, nonProxyHosts: {}", key.getProxyHost(), key.getProxyPort(), key.getProxyHttpProtocol().getScheme(), key.getNonProxyHosts()); // use the custom proxy properties SnowflakeMutableProxyRoutePlanner sdkProxyRoutePlanner = httpClientRoutePlanner.computeIfAbsent( key, k -> new SnowflakeMutableProxyRoutePlanner( key.getProxyHost(), key.getProxyPort(), key.getProxyHttpProtocol(), key.getNonProxyHosts())); httpClientBuilder.setProxy(proxy).setRoutePlanner(sdkProxyRoutePlanner); if (!isNullOrEmpty(key.getProxyUser()) && !isNullOrEmpty(key.getProxyPassword())) { Credentials credentials = new UsernamePasswordCredentials(key.getProxyUser(), key.getProxyPassword()); AuthScope authScope = new AuthScope(key.getProxyHost(), key.getProxyPort()); CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); logger.debug( "Using user: {}, password is {} for proxy host: {}, port: {}", key.getProxyUser(), SFLoggerUtil.isVariableProvided(key.getProxyPassword()), key.getProxyHost(), key.getProxyPort()); credentialsProvider.setCredentials(authScope, credentials); httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); } } if (downloadUnCompressed) { logger.debug("Disabling content compression for http client"); httpClientBuilder.disableContentCompression(); } if (httpHeadersCustomizers != null && !httpHeadersCustomizers.isEmpty()) { logger.debug("Setting up http headers customizers"); httpClientBuilder.setRetryHandler(new AttributeEnhancingHttpRequestRetryHandler()); httpClientBuilder.addInterceptorLast( new HeaderCustomizerHttpRequestInterceptor(httpHeadersCustomizers)); } return httpClientBuilder.build(); } private static PoolingHttpClientConnectionManager initHttpClientConnectionManager( HttpClientSettingsKey key, File ocspCacheFile) { int timeToLiveSeconds = SystemUtil.convertSystemPropertyToIntValue(JDBC_TTL, DEFAULT_TTL); long validateAfterInactivitySeconds = SystemUtil.convertSystemPropertyToIntValue( JDBC_IDLE_CONNECTION_PROPERTY, DEFAULT_IDLE_CONNECTION_TIMEOUT); long connectTimeout = getConnectionTimeout().toMillis(); long socketTimeout = getSocketTimeout().toMillis(); logger.debug( "Connection pooling manager connect timeout: {} ms, socket timeout: {} ms, ttl: {} s, validate after inactivity: %s", connectTimeout, socketTimeout, timeToLiveSeconds, validateAfterInactivitySeconds); // Create default request config without proxy since different connections could use different // proxies in multi tenant environments // Proxy is set later with route planner if (defaultRequestConfig == null) { initDefaultRequestConfig(connectTimeout, socketTimeout); } try { TrustManager[] trustManagers = configureTrustManagerIfNeeded(key, ocspCacheFile); logger.debug( "Registering https connection socket factory with socks proxy disabled: {} and http " + "connection socket factory", socksProxyDisabled); Registry registry = RegistryBuilder.create() .register( "https", new SFSSLConnectionSocketFactory(trustManagers, socksProxyDisabled)) .register("http", new SFConnectionSocketFactory()) .build(); PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager( registry, null, null, null, timeToLiveSeconds, TimeUnit.SECONDS); int maxConnections = SystemUtil.convertSystemPropertyToIntValue( JDBC_MAX_CONNECTIONS_PROPERTY, DEFAULT_MAX_CONNECTIONS); int maxConnectionsPerRoute = SystemUtil.convertSystemPropertyToIntValue( JDBC_MAX_CONNECTIONS_PER_ROUTE_PROPERTY, DEFAULT_MAX_CONNECTIONS_PER_ROUTE); logger.debug( "Max connections total in connection pooling manager: {}; max connections per route: {}", maxConnections, maxConnectionsPerRoute); connectionManager.setMaxTotal(maxConnections); connectionManager.setDefaultMaxPerRoute(maxConnectionsPerRoute); connectionManager.setValidateAfterInactivity((int) validateAfterInactivitySeconds); return connectionManager; } catch (NoSuchAlgorithmException | KeyManagementException ex) { throw new SSLInitializationException(ex.getMessage(), ex); } } static TrustManager[] configureTrustManagerIfNeeded( HttpClientSettingsKey key, File ocspCacheFile) { if (key != null && key.getOcspMode() != OCSPMode.DISABLE_OCSP_CHECKS) { // A custom TrustManager is required only if disableOCSPChecks is disabled, // which is by default in the production. disableOCSPChecks can be enabled // 1) OCSP service is down for reasons, 2) PowerMock test that doesn't // care OCSP checks. // OCSP FailOpen is ON by default try { if (ocspCacheFile == null) { logger.debug("Instantiating trust manager with default ocsp cache file"); } else { logger.debug("Instantiating trust manager with ocsp cache file: {}", ocspCacheFile); } return new TrustManager[] {new SFTrustManager(key, ocspCacheFile)}; } catch (Exception | Error err) { // dump error stack StringWriter errors = new StringWriter(); err.printStackTrace(new PrintWriter(errors)); logger.error(errors.toString(), true); throw new RuntimeException(err); // rethrow the exception } } else if (key != null && key.getOcspMode() == OCSPMode.DISABLE_OCSP_CHECKS && crlRevocationChecksDisabled(key)) { logger.debug( "Omitting trust manager instantiation as OCSP mode is set to {}", key.getOcspMode()); } else if (key != null && !crlRevocationChecksDisabled(key)) { try { logger.debug("Instantiating trust manager with CRL based revocation checks"); return new TrustManager[] {SFCrlTrustManagerFactory.createCrlTrustManager(key)}; } catch (CertificateException e) { throw new SSLInitializationException(e.getMessage(), e); } } logger.debug("Omitting trust manager instantiation as configuration is not provided"); return null; } private static boolean crlRevocationChecksDisabled(HttpClientSettingsKey key) { return key.getRevocationCheckMode() == null || key.getRevocationCheckMode() == CertRevocationCheckMode.DISABLED; } private static void initDefaultRequestConfig(long connectTimeout, long socketTimeout) { RequestConfig.Builder builder = RequestConfig.custom() .setConnectTimeout((int) connectTimeout) .setConnectionRequestTimeout((int) connectTimeout) .setSocketTimeout((int) socketTimeout); logger.debug( "Rebuilding request config. Connect timeout: {} ms, connection request timeout: {} ms, socket timeout: {} ms", connectTimeout, connectTimeout, socketTimeout); defaultRequestConfig = builder.build(); } public static void updateRoutePlanner(HttpClientSettingsKey key) { if (httpClientRoutePlanner.containsKey(key) && !httpClientRoutePlanner .get(key) .getNonProxyHosts() .equalsIgnoreCase(key.getNonProxyHosts())) { logger.debug( "Updating route planner non-proxy hosts for proxy: {}:{} to: {}", key.getProxyHost(), key.getProxyPort(), key.getNonProxyHosts()); httpClientRoutePlanner.get(key).setNonProxyHosts(key.getNonProxyHosts()); } } /** * Gets HttpClient with insecureMode false * * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @return HttpClient object shared across all connections */ public static CloseableHttpClient getHttpClient(HttpClientSettingsKey ocspAndProxyKey) { return initHttpClient(ocspAndProxyKey, null, null); } /** * Gets HttpClient with insecureMode false * * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @param httpHeadersCustomizers List of HTTP headers customizers * @return HttpClient object shared across all connections */ public static CloseableHttpClient getHttpClient( HttpClientSettingsKey ocspAndProxyKey, List httpHeadersCustomizers) { return initHttpClient(ocspAndProxyKey, null, httpHeadersCustomizers); } /** * Gets HttpClient with insecureMode false and disabling decompression * * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @param httpHeadersCustomizers List of HTTP headers customizers * @return HttpClient object shared across all connections */ public static CloseableHttpClient getHttpClientWithoutDecompression( HttpClientSettingsKey ocspAndProxyKey, List httpHeadersCustomizers) { return initHttpClientWithoutDecompression(ocspAndProxyKey, null, httpHeadersCustomizers); } /** * Accessor for the HTTP client singleton. * * @param key contains information needed to build specific HttpClient * @param ocspCacheFile OCSP response cache file name. if null, the default file will be used. * @param httpHeadersCustomizers List of HTTP headers customizers * @return HttpClient object shared across all connections */ public static CloseableHttpClient initHttpClientWithoutDecompression( HttpClientSettingsKey key, File ocspCacheFile, List httpHeadersCustomizers) { updateRoutePlanner(key); return httpClientWithoutDecompression.computeIfAbsent( key, k -> buildHttpClient(key, ocspCacheFile, true, httpHeadersCustomizers)); } /** * Accessor for the HTTP client singleton. * * @param key contains information needed to build specific HttpClient * @param ocspCacheFile OCSP response cache file name. if null, the default file will be used. * @param httpHeadersCustomizers List of HTTP headers customizers * @return HttpClient object shared across all connections */ public static CloseableHttpClient initHttpClient( HttpClientSettingsKey key, File ocspCacheFile, List httpHeadersCustomizers) { updateRoutePlanner(key); return httpClient.computeIfAbsent( key, k -> buildHttpClient(key, ocspCacheFile, key.getGzipDisabled(), httpHeadersCustomizers)); } /** * Return a request configuration inheriting from the default request configuration of the shared * HttpClient with a different socket timeout. * * @param soTimeoutMs - custom socket timeout in milli-seconds * @param withoutCookies - whether this request should ignore cookies or not * @return RequestConfig object */ public static RequestConfig getDefaultRequestConfigWithSocketTimeout( int soTimeoutMs, boolean withoutCookies) { final String cookieSpec = withoutCookies ? IGNORE_COOKIES : DEFAULT; return RequestConfig.copy(defaultRequestConfig) .setSocketTimeout(soTimeoutMs) .setCookieSpec(cookieSpec) .build(); } /** * Return a request configuration inheriting from the default request configuration of the shared * HttpClient with a different socket and connect timeout. * * @param requestSocketAndConnectTimeout - custom socket and connect timeout in milli-seconds * @param withoutCookies - whether this request should ignore cookies or not * @return RequestConfig object */ public static RequestConfig getDefaultRequestConfigWithSocketAndConnectTimeout( int requestSocketAndConnectTimeout, boolean withoutCookies) { final String cookieSpec = withoutCookies ? IGNORE_COOKIES : DEFAULT; return RequestConfig.copy(defaultRequestConfig) .setSocketTimeout(requestSocketAndConnectTimeout) .setConnectTimeout(requestSocketAndConnectTimeout) .setCookieSpec(cookieSpec) .build(); } /** * Return a request configuration inheriting from the default request configuration of the shared * HttpClient with the cookie spec set to ignore. * * @return RequestConfig object */ public static RequestConfig getRequestConfigWithoutCookies() { return RequestConfig.copy(defaultRequestConfig).setCookieSpec(IGNORE_COOKIES).build(); } public static void setRequestConfig(RequestConfig requestConfig) { logger.debug("Setting default request config to: {}", requestConfig); defaultRequestConfig = requestConfig; } /** * Accessor for the HTTP client singleton. * * @return HTTP Client stats in string representation */ private static String getHttpClientStats(ClientConnectionManager connectionManager) { if (!(connectionManager instanceof PoolingHttpClientConnectionManager)) { return ""; } else { return ((PoolingHttpClientConnectionManager) connectionManager).getTotalStats().toString(); } } /** * Enables/disables use of the SOCKS proxy when creating sockets * * @param socksProxyDisabled new value */ public static void setSocksProxyDisabled(boolean socksProxyDisabled) { logger.debug("Setting socks proxy disabled to {}", socksProxyDisabled); HttpUtil.socksProxyDisabled = socksProxyDisabled; } /** * Returns whether the SOCKS proxy is disabled for this JVM * * @return whether the SOCKS proxy is disabled */ public static boolean isSocksProxyDisabled() { return HttpUtil.socksProxyDisabled; } /** * Executes an HTTP request with the cookie spec set to IGNORE_COOKIES * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator specific timeout * @param socketTimeout socket timeout (in ms) * @param retryCount max retry count for the request - if it is set to 0, it will be ignored and * only retryTimeout will determine when to end the retries * @param injectSocketTimeout injecting socket timeout * @param canceling canceling? * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @return response * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ @Deprecated static String executeRequestWithoutCookies( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int retryCount, int injectSocketTimeout, AtomicBoolean canceling, HttpClientSettingsKey ocspAndProxyKey) throws SnowflakeSQLException, IOException { return executeRequestWithoutCookies( httpRequest, retryTimeout, authTimeout, socketTimeout, retryCount, injectSocketTimeout, canceling, ocspAndProxyKey, null); } /** * Executes an HTTP request with the cookie spec set to IGNORE_COOKIES * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator specific timeout * @param socketTimeout socket timeout (in ms) * @param retryCount max retry count for the request - if it is set to 0, it will be ignored and * only retryTimeout will determine when to end the retries * @param injectSocketTimeout injecting socket timeout * @param canceling canceling? * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @param sfSession the session associated with the request * @return response * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ static String executeRequestWithoutCookies( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int retryCount, int injectSocketTimeout, AtomicBoolean canceling, HttpClientSettingsKey ocspAndProxyKey, SFBaseSession sfSession) throws SnowflakeSQLException, IOException { logger.debug("Executing request without cookies"); return executeRequestInternal( httpRequest, retryTimeout, authTimeout, socketTimeout, retryCount, injectSocketTimeout, canceling, true, // no cookie false, // no retry parameter true, // guid? (do we need this?) false, // no retry on HTTP 403 getHttpClient(ocspAndProxyKey, null), new ExecTimeTelemetryData(), null, sfSession, ocspAndProxyKey, null, false); } /** * Executes an HTTP request for Snowflake. * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator specific timeout * @param socketTimeout socket timeout (in ms) * @param retryCount max retry count for the request - if it is set to 0, it will be ignored and * only retryTimeout will determine when to end the retries * @param ocspAndProxyAndGzipKey OCSP mode and proxy settings for httpclient * @param sfSession the session associated with the request * @return response * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ public static String executeGeneralRequest( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int retryCount, HttpClientSettingsKey ocspAndProxyAndGzipKey, SFBaseSession sfSession) throws SnowflakeSQLException, IOException { return executeGeneralRequest( httpRequest, retryTimeout, authTimeout, socketTimeout, retryCount, ocspAndProxyAndGzipKey, null, sfSession); } public static String executeGeneralRequestOmitSnowflakeHeaders( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int retryCount, HttpClientSettingsKey ocspAndProxyAndGzipKey, SFBaseSession sfSession) throws SnowflakeSQLException, IOException { return executeRequestInternal( httpRequest, retryTimeout, authTimeout, socketTimeout, retryCount, 0, null, false, false, false, false, getHttpClient(ocspAndProxyAndGzipKey, null), new ExecTimeTelemetryData(), null, sfSession, ocspAndProxyAndGzipKey, null, false); } /** * Executes an HTTP request for Snowflake. * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator specific timeout * @param socketTimeout socket timeout (in ms) * @param retryCount max retry count for the request - if it is set to 0, it will be ignored and * only retryTimeout will determine when to end the retries * @param ocspAndProxyAndGzipKey OCSP mode and proxy settings for httpclient * @param retryContextManager RetryContext used to customize retry handling functionality * @param sfSession the session associated with the request * @return response * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ public static String executeGeneralRequest( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int retryCount, HttpClientSettingsKey ocspAndProxyAndGzipKey, RetryContextManager retryContextManager, SFBaseSession sfSession) throws SnowflakeSQLException, IOException { logger.debug("Executing general request"); return executeRequest( httpRequest, retryTimeout, authTimeout, socketTimeout, retryCount, 0, // no inject socket timeout null, // no canceling false, // no retry parameter false, // no retry on HTTP 403 ocspAndProxyAndGzipKey, new ExecTimeTelemetryData(), retryContextManager, sfSession); } /** * Executes an HTTP request for Snowflake and returns response with headers. This variant allows * access to both the response body and HTTP headers as a simple map. * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator specific timeout * @param socketTimeout socket timeout (in ms) * @param maxRetryCount max retry count for the request - if it is set to 0, it will be ignored * and only retryTimeout will determine when to end the retries * @param retriedCount The number of retries attempted to execute the request. * @param ocspAndProxyAndGzipKey OCSP mode and proxy settings for httpclient * @param sfSession the session associated with the request * @return HttpResponseWithHeaders containing response body and headers as a map * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ public static HttpResponseWithHeaders executeGeneralRequestWithContext( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int maxRetryCount, int retriedCount, HttpClientSettingsKey ocspAndProxyAndGzipKey, SFBaseSession sfSession) throws SnowflakeSQLException, IOException { // Set up telemetry with OCSP status boolean ocspEnabled = !(ocspAndProxyAndGzipKey.getOcspMode().equals(OCSPMode.DISABLE_OCSP_CHECKS)); logger.debug("Executing general request with OCSP enabled: {}", ocspEnabled); ExecTimeTelemetryData execTimeData = new ExecTimeTelemetryData(); execTimeData.setOCSPStatus(ocspEnabled); // Delegate to common internal method with appropriate defaults HttpResponseContextDto responseContext = executeRequestInternalWithContext( httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetryCount, retriedCount, 0, // no inject socket timeout null, // no canceling false, // with cookie false, // no retry parameters true, // include request GUID false, // no retry on HTTP 403 getHttpClient(ocspAndProxyAndGzipKey, null), execTimeData, null, // no retry context manager sfSession, ocspAndProxyAndGzipKey, null, // no custom headers false); // no decompression // Convert to clean response object with headers as map return convertToHttpResponseWithHeaders(responseContext); } private static HttpResponseWithHeaders convertToHttpResponseWithHeaders( HttpResponseContextDto responseContext) { String responseBody = responseContext.getUnpackedCloseableHttpResponse(); return new HttpResponseWithHeaders( responseBody, extractHeadersAsMap(responseContext.getHttpResponse())); } public static Map extractHeadersAsMap(HttpResponse httpResponse) { Map headersMap = new HashMap<>(); if (httpResponse != null) { Header[] headers = httpResponse.getAllHeaders(); if (headers != null) { for (Header header : headers) { headersMap.put(header.getName(), header.getValue()); } } } return headersMap; } /** * Executes an HTTP request for Snowflake * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator specific timeout * @param socketTimeout socket timeout (in ms) * @param retryCount max retry count for the request - if it is set to 0, it will be ignored and * only retryTimeout will determine when to end the retries * @param httpClient client object used to communicate with other machine * @param sfSession the session associated with the request * @return response * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ public static String executeGeneralRequest( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int retryCount, CloseableHttpClient httpClient, SFBaseSession sfSession) throws SnowflakeSQLException, IOException { logger.debug("Executing general request"); return executeRequestInternal( httpRequest, retryTimeout, authTimeout, socketTimeout, retryCount, 0, // no inject socket timeout null, // no canceling false, // with cookie false, // no retry parameter true, // include request GUID false, // no retry on HTTP 403 httpClient, new ExecTimeTelemetryData(), null, sfSession, null, null, false); } /** * Executes an HTTP request for Snowflake. * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator timeout * @param socketTimeout socket timeout (in ms) * @param maxRetries retry count for the request * @param injectSocketTimeout injecting socket timeout * @param canceling canceling? * @param includeRetryParameters whether to include retry parameters in retried requests * @param retryOnHTTP403 whether to retry on HTTP 403 or not * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @param execTimeData query execution time telemetry data object * @return response * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ @Deprecated public static String executeRequest( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean includeRetryParameters, boolean retryOnHTTP403, HttpClientSettingsKey ocspAndProxyKey, ExecTimeTelemetryData execTimeData) throws SnowflakeSQLException, IOException { return executeRequest( httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, includeRetryParameters, retryOnHTTP403, ocspAndProxyKey, execTimeData, (SFBaseSession) null); } /** * Executes an HTTP request for Snowflake. * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator timeout * @param socketTimeout socket timeout (in ms) * @param maxRetries retry count for the request * @param injectSocketTimeout injecting socket timeout * @param canceling canceling? * @param includeRetryParameters whether to include retry parameters in retried requests * @param retryOnHTTP403 whether to retry on HTTP 403 or not * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @param execTimeData query execution time telemetry data object * @param sfSession the session associated with the request * @return response * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ public static String executeRequest( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean includeRetryParameters, boolean retryOnHTTP403, HttpClientSettingsKey ocspAndProxyKey, ExecTimeTelemetryData execTimeData, SFBaseSession sfSession) throws SnowflakeSQLException, IOException { return executeRequest( httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, includeRetryParameters, retryOnHTTP403, ocspAndProxyKey, execTimeData, null, sfSession); } /** * Executes an HTTP request for Snowflake. * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator timeout * @param socketTimeout socket timeout (in ms) * @param maxRetries retry count for the request * @param injectSocketTimeout injecting socket timeout * @param canceling canceling? * @param includeRetryParameters whether to include retry parameters in retried requests * @param retryOnHTTP403 whether to retry on HTTP 403 or not * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @param execTimeData query execution time telemetry data object * @param retryContextManager RetryContext used to customize retry handling functionality * @return response * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ @Deprecated public static String executeRequest( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean includeRetryParameters, boolean retryOnHTTP403, HttpClientSettingsKey ocspAndProxyKey, ExecTimeTelemetryData execTimeData, RetryContextManager retryContextManager) throws SnowflakeSQLException, IOException { return executeRequest( httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, includeRetryParameters, retryOnHTTP403, ocspAndProxyKey, execTimeData, retryContextManager, null); } /** * Executes an HTTP request for Snowflake. * * @param httpRequest HttpRequestBase * @param retryTimeout retry timeout * @param authTimeout authenticator timeout * @param socketTimeout socket timeout (in ms) * @param maxRetries retry count for the request * @param injectSocketTimeout injecting socket timeout * @param canceling canceling? * @param includeRetryParameters whether to include retry parameters in retried requests * @param retryOnHTTP403 whether to retry on HTTP 403 or not * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @param execTimeData query execution time telemetry data object * @param retryContextManager RetryContext used to customize retry handling functionality * @param sfSession the session associated with the request * @return response * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ public static String executeRequest( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean includeRetryParameters, boolean retryOnHTTP403, HttpClientSettingsKey ocspAndProxyKey, ExecTimeTelemetryData execTimeData, RetryContextManager retryContextManager, SFBaseSession sfSession) throws SnowflakeSQLException, IOException { boolean ocspEnabled = !(ocspAndProxyKey.getOcspMode().equals(OCSPMode.DISABLE_OCSP_CHECKS)); logger.debug("Executing request with OCSP enabled: {}", ocspEnabled); execTimeData.setOCSPStatus(ocspEnabled); return executeRequestInternal( httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, false, // with cookie (do we need cookie?) includeRetryParameters, true, // include request GUID retryOnHTTP403, getHttpClient(ocspAndProxyKey, null), execTimeData, retryContextManager, sfSession, ocspAndProxyKey, null, false); } /** * Helper to execute a request with retry and check and throw exception if response is not * success. This should be used only for small request has it execute the REST request and get * back the result as a string. * *

Connection under the httpRequest is released. * * @param httpRequest request object contains all the information * @param retryTimeout retry timeout (in seconds) * @param authTimeout authenticator specific timeout (in seconds) * @param socketTimeout socket timeout (in ms) * @param maxRetries retry count for the request * @param injectSocketTimeout simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether this request should ignore cookies * @param includeRetryParameters whether to include retry parameters in retried requests * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryOnHTTP403 whether to retry on HTTP 403 * @param httpClient client object used to communicate with other machine * @param retryContextManager RetryContext used to customize retry handling functionality * @param sfSession the session associated with the request * @param key HttpClientSettingsKey object * @param httpHeaderCustomizer HttpHeadersCustomizer object for customization of HTTP headers for * requests sent by the Snowflake JDBC driver. * @param isHttpClientWithoutDecompression flag for create client without Decompression * @return response in String * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ private static String executeRequestInternal( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryOnHTTP403, CloseableHttpClient httpClient, ExecTimeTelemetryData execTimeData, RetryContextManager retryContextManager, SFBaseSession sfSession, HttpClientSettingsKey key, List httpHeaderCustomizer, boolean isHttpClientWithoutDecompression) throws SnowflakeSQLException, IOException { // Delegate to common method and extract string from response context HttpResponseContextDto responseContext = executeRequestInternalWithContext( httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryOnHTTP403, httpClient, execTimeData, retryContextManager, sfSession, key, httpHeaderCustomizer, isHttpClientWithoutDecompression); return responseContext.getUnpackedCloseableHttpResponse(); } /** * Common internal method to execute HTTP requests and return full response context. This method * contains the shared logic for building the request context and executing the request with * retries. * * @param httpRequest request object contains all the information * @param retryTimeout retry timeout (in seconds) * @param authTimeout authenticator specific timeout (in seconds) * @param socketTimeout socket timeout (in ms) * @param maxRetries retry count for the request * @param injectSocketTimeout simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether this request should ignore cookies * @param includeRetryParameters whether to include retry parameters in retried requests * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryOnHTTP403 whether to retry on HTTP 403 * @param httpClient client object used to communicate with other machine * @param execTimeData execution time telemetry data * @param retryContextManager RetryContext used to customize retry handling functionality * @param sfSession the session associated with the request * @param key HttpClientSettingsKey object * @param httpHeaderCustomizer HttpHeadersCustomizer object for customization of HTTP headers * @param isHttpClientWithoutDecompression flag for create client without Decompression * @return HttpResponseContextDto containing both response body and headers * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ private static HttpResponseContextDto executeRequestInternalWithContext( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryOnHTTP403, CloseableHttpClient httpClient, ExecTimeTelemetryData execTimeData, RetryContextManager retryContextManager, SFBaseSession sfSession, HttpClientSettingsKey key, List httpHeaderCustomizer, boolean isHttpClientWithoutDecompression) throws SnowflakeSQLException, IOException { return executeRequestInternalWithContext( httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, 0, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryOnHTTP403, httpClient, execTimeData, retryContextManager, sfSession, key, httpHeaderCustomizer, isHttpClientWithoutDecompression); } /** * Common internal method to execute HTTP requests and return full response context. This method * contains the shared logic for building the request context and executing the request with * retries. * * @param httpRequest request object contains all the information * @param retryTimeout retry timeout (in seconds) * @param authTimeout authenticator specific timeout (in seconds) * @param socketTimeout socket timeout (in ms) * @param maxRetries retry count for the request * @param retriedCount The number of retries attempted to execute the request. * @param injectSocketTimeout simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether this request should ignore cookies * @param includeRetryParameters whether to include retry parameters in retried requests * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryOnHTTP403 whether to retry on HTTP 403 * @param httpClient client object used to communicate with other machine * @param execTimeData execution time telemetry data * @param retryContextManager RetryContext used to customize retry handling functionality * @param sfSession the session associated with the request * @param key HttpClientSettingsKey object * @param httpHeaderCustomizer HttpHeadersCustomizer object for customization of HTTP headers * @param isHttpClientWithoutDecompression flag for create client without Decompression * @return HttpResponseContextDto containing both response body and headers * @throws SnowflakeSQLException if Snowflake error occurs * @throws IOException raises if a general IO error occurs */ private static HttpResponseContextDto executeRequestInternalWithContext( HttpRequestBase httpRequest, int retryTimeout, int authTimeout, int socketTimeout, int maxRetries, int retriedCount, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryOnHTTP403, CloseableHttpClient httpClient, ExecTimeTelemetryData execTimeData, RetryContextManager retryContextManager, SFBaseSession sfSession, HttpClientSettingsKey key, List httpHeaderCustomizer, boolean isHttpClientWithoutDecompression) throws SnowflakeSQLException, IOException { String requestInfoScrubbed = SecretDetector.maskSASToken(httpRequest.toString()); logger.debug( "Pool: {} Executing: {}", (ArgSupplier) () -> getHttpClientStats(httpClient.getConnectionManager()), requestInfoScrubbed); Stopwatch stopwatch = null; String requestIdStr = URLUtil.getRequestIdLogStr(httpRequest.getURI()); HttpExecutingContext context = HttpExecutingContextBuilder.forSimpleRequest(requestIdStr, requestInfoScrubbed) .retryTimeout(retryTimeout) .authTimeout(authTimeout) .origSocketTimeout(socketTimeout) .maxRetries(maxRetries) .injectSocketTimeout(injectSocketTimeout) .canceling(canceling) .withoutCookies(withoutCookies) .includeRetryParameters(includeRetryParameters) .includeSnowflakeHeaders(includeSnowflakeHeaders) .retryHTTP403(retryOnHTTP403) .unpackResponse(true) .noRetry(false) .loginRequest(SessionUtil.isNewRetryStrategyRequest(httpRequest)) .withSfSession(sfSession) .build(); context.setRetryCount(retriedCount); HttpResponseContextDto responseContext = RestRequest.executeWithRetries( httpClient, httpRequest, context, execTimeData, retryContextManager, key, httpHeaderCustomizer, isHttpClientWithoutDecompression); logger.debug( "Pool: {} Request returned for: {} took {} ms", (ArgSupplier) () -> HttpUtil.getHttpClientStats(httpClient.getConnectionManager()), requestInfoScrubbed, stopwatch == null ? "n/a" : stopwatch.elapsedMillis()); return responseContext; } // This is a workaround for JDK-7036144. // // The GZIPInputStream prematurely closes its input if a) it finds // a whole GZIP block and b) input.available() returns 0. In order // to work around this issue, we inject a thin wrapper for the // InputStream whose available() method always returns at least 1. // // Further details on this bug: // http://bugs.java.com/bugdatabase/view_bug.do?bug_id=7036144 public static final class HttpInputStream extends InputStream { private final InputStream httpIn; public HttpInputStream(InputStream httpIn) { this.httpIn = httpIn; } // This is the only modified function, all other // methods are simple wrapper around the HTTP stream. @Override public final int available() throws IOException { int available = httpIn.available(); return available == 0 ? 1 : available; } // ONLY WRAPPER METHODS FROM HERE ON. @Override public final int read() throws IOException { return httpIn.read(); } @Override public final int read(byte b[]) throws IOException { return httpIn.read(b); } @Override public final int read(byte b[], int off, int len) throws IOException { return httpIn.read(b, off, len); } @Override public final long skip(long n) throws IOException { return httpIn.skip(n); } @Override public final void close() throws IOException { httpIn.close(); } @Override public synchronized void mark(int readlimit) { httpIn.mark(readlimit); } @Override public synchronized void reset() throws IOException { httpIn.reset(); } @Override public final boolean markSupported() { return httpIn.markSupported(); } } static final class SFConnectionSocketFactory extends PlainConnectionSocketFactory { @Override public Socket createSocket(HttpContext ctx) throws IOException { if (socksProxyDisabled) { logger.trace("Creating socket with no proxy"); return new Socket(Proxy.NO_PROXY); } logger.trace("Creating socket with proxy"); return super.createSocket(ctx); } } /** * Helper function to attach additional headers to a request if present. This takes a (nullable) * map of headers in format and adds them to the incoming request using addHeader. * *

Snowsight uses this to attach headers with additional telemetry information, see * https://snowflakecomputing.atlassian.net/wiki/spaces/EN/pages/2960557006/GS+Communication * * @param request The request to add headers to. Must not be null. * @param additionalHeaders The headers to add. May be null. */ static void applyAdditionalHeadersForSnowsight( HttpRequestBase request, Map additionalHeaders) { if (additionalHeaders != null && !additionalHeaders.isEmpty()) { additionalHeaders.forEach(request::addHeader); } } public static CloseableHttpClient getHttpClientForCrl(HttpClientSettingsKey key) { return getHttpClient((int) HttpUtil.getConnectionTimeout().toMillis(), key); } public static CloseableHttpClient getHttpClientForOcsp(HttpClientSettingsKey key) { return getHttpClient(key.getOcspTimeout(), key); } private static CloseableHttpClient getHttpClient(int timeout, HttpClientSettingsKey key) { int idleConnectionTimeout = SystemUtil.convertSystemPropertyToIntValue( JDBC_IDLE_CONNECTION_PROPERTY, DEFAULT_IDLE_CONNECTION_TIMEOUT); RequestConfig config = RequestConfig.custom() .setConnectTimeout(timeout) .setConnectionRequestTimeout(timeout) .setSocketTimeout(timeout) .build(); Registry registry = RegistryBuilder.create() .register("http", new HttpUtil.SFConnectionSocketFactory()) .build(); // Build a connection manager with enough connections PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(registry); connectionManager.setMaxTotal(1); connectionManager.setValidateAfterInactivity(idleConnectionTimeout); HttpClientBuilder httpClientBuilder = HttpClientBuilder.create() .setDefaultRequestConfig(config) .setConnectionManager(connectionManager) .evictExpiredConnections() .evictIdleConnections(idleConnectionTimeout, TimeUnit.SECONDS) // Support JVM proxy settings .useSystemProperties() .setRedirectStrategy(new DefaultRedirectStrategy()) .disableCookieManagement(); if (key.usesProxy()) { // use the custom proxy properties HttpHost proxy = new HttpHost(key.getProxyHost(), key.getProxyPort()); SnowflakeMutableProxyRoutePlanner sdkProxyRoutePlanner = new SnowflakeMutableProxyRoutePlanner( key.getProxyHost(), key.getProxyPort(), HttpProtocol.HTTP, key.getNonProxyHosts()); httpClientBuilder.setProxy(proxy).setRoutePlanner(sdkProxyRoutePlanner); if (!isNullOrEmpty(key.getProxyUser()) && !isNullOrEmpty(key.getProxyPassword())) { Credentials credentials = new UsernamePasswordCredentials(key.getProxyUser(), key.getProxyPassword()); AuthScope authScope = new AuthScope(key.getProxyHost(), key.getProxyPort()); CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); credentialsProvider.setCredentials(authScope, credentials); httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); } } return httpClientBuilder.build(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/JsonSqlInput.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.SFBaseResultSet.OBJECT_MAPPER; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.mapSFExceptionToSQLException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.math.BigDecimal; import java.sql.Date; import java.sql.SQLData; import java.sql.SQLException; import java.sql.SQLInput; import java.sql.Time; import java.sql.Timestamp; import java.time.Instant; import java.time.ZoneOffset; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.internal.core.json.Converters; import net.snowflake.client.internal.core.structs.SQLDataCreationHelper; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.ThrowingBiFunction; import net.snowflake.common.core.SFTimestamp; import net.snowflake.common.core.SnowflakeDateTimeFormat; public class JsonSqlInput extends BaseSqlInput { private static final SFLogger logger = SFLoggerFactory.getLogger(JsonSqlInput.class); private final String text; private final JsonNode input; private final Iterator elements; private final TimeZone sessionTimeZone; private int currentIndex = 0; private boolean wasNull = false; public JsonSqlInput( String text, JsonNode input, SFBaseSession session, Converters converters, List fields, TimeZone sessionTimeZone) { super(session, converters, fields); this.text = text; this.input = input; this.elements = input.elements(); this.sessionTimeZone = sessionTimeZone; } public JsonNode getInput() { return input; } public String getText() { return text; } @Override public String readString() throws SQLException { return withNextValue((this::convertString)); } @Override public boolean readBoolean() throws SQLException { return withNextValue(this::convertBoolean); } @Override public byte readByte() throws SQLException { return withNextValue( (value, fieldMetadata) -> mapSFExceptionToSQLException(() -> converters.getNumberConverter().getByte(value))); } @Override public short readShort() throws SQLException { return withNextValue(this::convertShort); } @Override public int readInt() throws SQLException { return withNextValue(this::convertInt); } @Override public long readLong() throws SQLException { return withNextValue(this::convertLong); } @Override public float readFloat() throws SQLException { return withNextValue(this::convertFloat); } @Override public double readDouble() throws SQLException { return withNextValue(this::convertDouble); } @Override public BigDecimal readBigDecimal() throws SQLException { return withNextValue(this::convertBigDecimal); } @Override public byte[] readBytes() throws SQLException { return withNextValue(this::convertBytes); } @Override public Date readDate() throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } return convertDate((String) value); }); } private Date convertDate(String value) { SnowflakeDateTimeFormat formatter = getFormat(session, "DATE_OUTPUT_FORMAT"); SFTimestamp timestamp = formatter.parse(value); return Date.valueOf( Instant.ofEpochMilli(timestamp.getTime()).atZone(ZoneOffset.UTC).toLocalDate()); } @Override public Time readTime() throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } return convertTime((String) value); }); } private Time convertTime(String value) { SnowflakeDateTimeFormat formatter = getFormat(session, "TIME_OUTPUT_FORMAT"); SFTimestamp timestamp = formatter.parse(value); return Time.valueOf( Instant.ofEpochMilli(timestamp.getTime()).atZone(ZoneOffset.UTC).toLocalTime()); } @Override public Timestamp readTimestamp() throws SQLException { return readTimestamp(null); } @Override public Timestamp readTimestamp(TimeZone tz) throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } return convertTimestamp(tz, value, fieldMetadata); }); } @Override public T readObject(Class type, TimeZone tz) throws SQLException { return withNextValue((value, fieldMetadata) -> convertObject(type, tz, value, fieldMetadata)); } private T convertObject(Class type, TimeZone tz, Object value, FieldMetadata fieldMetadata) throws SQLException { if (value == null) { return null; } else if (SQLData.class.isAssignableFrom(type)) { if (!JsonNode.class.isAssignableFrom(value.getClass())) { logger.error("Object of class JsonNode is expected to convert to SqlData"); return null; } JsonNode jsonNode = (JsonNode) value; SQLInput sqlInput = new JsonSqlInput( null, jsonNode, session, converters, fieldMetadata.getFields(), sessionTimeZone); SQLData instance = (SQLData) SQLDataCreationHelper.create(type); instance.readSQL(sqlInput, null); return (T) instance; } else if (Map.class.isAssignableFrom(type)) { if (value == null) { return null; } else { return (T) convertSqlInputToMap((SQLInput) value); } } else if (String.class.isAssignableFrom(type)) { return (T) convertString(value, fieldMetadata); } else if (Boolean.class.isAssignableFrom(type)) { return (T) convertBoolean(value, fieldMetadata); } else if (Byte.class.isAssignableFrom(type)) { return (T) convertString(value, fieldMetadata); } else if (Short.class.isAssignableFrom(type)) { return (T) convertShort(value, fieldMetadata); } else if (Integer.class.isAssignableFrom(type)) { return (T) convertInt(value, fieldMetadata); } else if (Long.class.isAssignableFrom(type)) { return (T) convertLong(value, fieldMetadata); } else if (Float.class.isAssignableFrom(type)) { return (T) convertFloat(value, fieldMetadata); } else if (Double.class.isAssignableFrom(type)) { return (T) convertFloat(value, fieldMetadata); } else if (Date.class.isAssignableFrom(type)) { return (T) convertDate((String) value); } else if (Time.class.isAssignableFrom(type)) { return (T) convertTime((String) value); } else if (Timestamp.class.isAssignableFrom(type)) { return (T) convertTimestamp(tz, value, fieldMetadata); } else if (BigDecimal.class.isAssignableFrom(type)) { return (T) convertBigDecimal(value, fieldMetadata); } else if (byte[].class.isAssignableFrom(type)) { return (T) convertBytes(value, fieldMetadata); } else { logger.debug( "Unsupported type passed to readObject(int columnIndex,Class type): " + type.getName()); throw new SQLException( "Type passed to 'getObject(int columnIndex,Class type)' is unsupported. Type: " + type.getName()); } } @Override public List readList(Class type) throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } List result = new ArrayList(); if (ArrayNode.class.isAssignableFrom(value.getClass())) { for (JsonNode node : (ArrayNode) value) { result.add( convertObject( type, TimeZone.getDefault(), getValue(node), fieldMetadata.getFields().get(0))); } return result; } else { logger.debug("Given object could not be converted to List of type: " + type.getName()); throw new SQLException( "Given object could not be converted to List of type: " + type.getName()); } }); } @Override public T[] readArray(Class type) throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } if (ArrayNode.class.isAssignableFrom(value.getClass())) { ArrayNode valueNodes = (ArrayNode) value; T[] array = (T[]) java.lang.reflect.Array.newInstance(type, valueNodes.size()); int counter = 0; for (JsonNode node : valueNodes) { array[counter++] = convertObject( type, TimeZone.getDefault(), getValue(node), fieldMetadata.getFields().get(0)); } return array; } else { logger.debug("Given object could not be converted to Array of type: " + type.getName()); throw new SQLException( "Given object could not be converted to List of type: " + type.getName()); } }); } @Override public Map readMap(Class type) throws SQLException { return withNextValue( (value, fieldMetadata) -> { if (value == null) { return null; } if (ObjectNode.class.isAssignableFrom(value.getClass())) { Map result = new HashMap<>(); ObjectNode arrayNode = (ObjectNode) value; for (Iterator it = arrayNode.fieldNames(); it.hasNext(); ) { String key = it.next(); result.put( key, convertObject( type, TimeZone.getDefault(), getValue(arrayNode.get(key)), fieldMetadata)); } return result; } else { logger.debug( "Given object could not be converted to Map of String and type: " + type.getName()); throw new SQLException( "Given object could not be converted to Map of String and type: " + type.getName()); } }); } private Timestamp convertTimestamp(TimeZone tz, Object value, FieldMetadata fieldMetadata) throws SQLException { if (value == null) { return null; } int columnType = ColumnTypeHelper.getColumnType(fieldMetadata.getType(), session); int columnSubType = fieldMetadata.getType(); int scale = fieldMetadata.getScale(); Timestamp result = SfTimestampUtil.getTimestampFromType( columnSubType, (String) value, session, sessionTimeZone, tz); if (result != null) { return result; } return mapSFExceptionToSQLException( () -> converters .getDateTimeConverter() .getTimestamp(value, columnType, columnSubType, tz, scale)); } @Override public Object readObject() throws SQLException { return withNextValue((value, fieldMetadata) -> value); } @Override public T readObject(Class type) throws SQLException { return readObject(type, sessionTimeZone); } public boolean wasNull() { return wasNull; } @Override Map convertSqlInputToMap(SQLInput sqlInput) { return OBJECT_MAPPER.convertValue( ((JsonSqlInput) sqlInput).getInput(), new TypeReference>() {}); } private T withNextValue(ThrowingBiFunction action) throws SQLException { JsonNode jsonNode = elements.next(); Object value = getValue(jsonNode); wasNull = value == null; return action.apply(value, fields.get(currentIndex++)); } private Object getValue(JsonNode jsonNode) { if (jsonNode.isTextual()) { return jsonNode.textValue(); } else if (jsonNode.isBoolean()) { return jsonNode.booleanValue(); } else if (jsonNode.isNumber()) { return jsonNode.numberValue(); } else if (jsonNode.isObject() || jsonNode.isArray()) { return jsonNode; } return null; } private static SnowflakeDateTimeFormat getFormat(SFBaseSession session, String format) { return SnowflakeDateTimeFormat.fromSqlFormat( (String) session.getCommonParameters().get(format)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/JsonSqlOutput.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.FieldSchemaCreator.buildSchemaTypeAndNameOnly; import static net.snowflake.client.internal.core.FieldSchemaCreator.buildSchemaWithScaleAndPrecision; import java.io.InputStream; import java.io.Reader; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.math.BigDecimal; import java.net.URL; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; import java.sql.Date; import java.sql.NClob; import java.sql.Ref; import java.sql.RowId; import java.sql.SQLData; import java.sql.SQLException; import java.sql.SQLOutput; import java.sql.SQLXML; import java.sql.Struct; import java.sql.Time; import java.sql.Timestamp; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Optional; import java.util.TimeZone; import java.util.stream.Collectors; import net.minidev.json.JSONObject; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.common.core.SFBinary; import net.snowflake.client.internal.jdbc.BindingParameterMetadata; import net.snowflake.client.internal.jdbc.SnowflakeColumn; import net.snowflake.client.internal.jdbc.SnowflakeLoggedFeatureNotSupportedException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.jdbc.util.SnowflakeTypeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.ThrowingTriCallable; import net.snowflake.common.core.SFTime; import net.snowflake.common.core.SFTimestamp; import net.snowflake.common.core.SnowflakeDateTimeFormat; public class JsonSqlOutput implements SQLOutput { static final SFLogger logger = SFLoggerFactory.getLogger(JsonSqlOutput.class); private JSONObject json; private SQLData original; private SFBaseSession session; private Iterator fields; private BindingParameterMetadata schema; private TimeZone sessionTimezone; public JsonSqlOutput(SQLData original, SFBaseSession sfBaseSession) { this.original = original; this.session = sfBaseSession; this.sessionTimezone = getSessionTimezone(sfBaseSession); fields = getClassFields(original).iterator(); schema = new BindingParameterMetadata("object"); schema.setFields(new ArrayList<>()); json = new JSONObject(); } private TimeZone getSessionTimezone(SFBaseSession sfBaseSession) { String timeZoneName = (String) ResultUtil.effectiveParamValue(sfBaseSession.getCommonParameters(), "TIMEZONE"); return TimeZone.getTimeZone(timeZoneName); } private static List getClassFields(SQLData original) { return Arrays.stream(original.getClass().getDeclaredFields()) .filter( field -> !Modifier.isStatic(field.getModifiers()) && !Modifier.isTransient(field.getModifiers())) .collect(Collectors.toList()); } @Override public void writeString(String value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, value); schema.getFields().add(FieldSchemaCreator.buildSchemaForText(fieldName, maybeColumn)); })); } @Override public void writeBoolean(boolean value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, value); schema.getFields().add(buildSchemaTypeAndNameOnly(fieldName, "boolean", maybeColumn)); })); } @Override public void writeByte(byte value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, value); schema .getFields() .add(buildSchemaWithScaleAndPrecision(fieldName, "fixed", 0, 38, maybeColumn)); })); } @Override public void writeShort(short value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, value); schema .getFields() .add(buildSchemaWithScaleAndPrecision(fieldName, "fixed", 0, 38, maybeColumn)); })); } @Override public void writeInt(int input) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, input); schema .getFields() .add(buildSchemaWithScaleAndPrecision(fieldName, "fixed", 0, 38, maybeColumn)); })); } @Override public void writeLong(long value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, value); schema .getFields() .add(buildSchemaWithScaleAndPrecision(fieldName, "fixed", 0, 38, maybeColumn)); })); } @Override public void writeFloat(float value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, value); schema.getFields().add(buildSchemaTypeAndNameOnly(fieldName, "real", maybeColumn)); })); } @Override public void writeDouble(double value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, value); schema.getFields().add(buildSchemaTypeAndNameOnly(fieldName, "real", maybeColumn)); })); } @Override public void writeBigDecimal(BigDecimal value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, value); schema .getFields() .add( buildSchemaWithScaleAndPrecision( fieldName, "fixed", value.scale(), 38, maybeColumn)); })); } @Override public void writeBytes(byte[] value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put(fieldName, new SFBinary(value).toHex()); schema .getFields() .add(FieldSchemaCreator.buildSchemaForBytesType(fieldName, maybeColumn)); })); } @Override public void writeDate(Date value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { json.put( fieldName, ResultUtil.getDateAsString(value, getDateTimeFormat("DATE_OUTPUT_FORMAT"))); schema.getFields().add(buildSchemaTypeAndNameOnly(fieldName, "date", maybeColumn)); })); } @Override public void writeTime(Time x) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { long nanosSinceMidnight; if (session.getTreatTimeAsWallClockTime()) { nanosSinceMidnight = x.toLocalTime().toNanoOfDay(); } else { nanosSinceMidnight = SfTimestampUtil.getTimeInNanoseconds(x); } String result = ResultUtil.getSFTimeAsString( SFTime.fromNanoseconds(nanosSinceMidnight), 9, getDateTimeFormat("TIME_OUTPUT_FORMAT")); json.put(fieldName, result); schema .getFields() .add(buildSchemaWithScaleAndPrecision(fieldName, "time", 9, 0, maybeColumn)); })); } @Override public void writeTimestamp(Timestamp value) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { String timestampSessionType = (String) ResultUtil.effectiveParamValue( session.getCommonParameters(), "CLIENT_TIMESTAMP_TYPE_MAPPING"); SnowflakeType snowflakeType = SnowflakeTypeUtil.fromString( maybeColumn .map(cl -> cl.type()) .filter(str -> !str.isEmpty()) .orElse(timestampSessionType)); int columnType = snowflakeTypeToJavaType(snowflakeType); TimeZone timeZone = timeZoneDependOnType(snowflakeType, session, null); String timestampAsString = SnowflakeUtil.mapSFExceptionToSQLException( () -> ResultUtil.getSFTimestampAsString( new SFTimestamp(value, timeZone), columnType, 9, getDateTimeFormat("TIMESTAMP_NTZ_OUTPUT_FORMAT"), getDateTimeFormat("TIMESTAMP_LTZ_OUTPUT_FORMAT"), getDateTimeFormat("TIMESTAMP_TZ_OUTPUT_FORMAT"), session)); json.put(fieldName, timestampAsString); schema .getFields() .add( buildSchemaWithScaleAndPrecision( fieldName, snowflakeType.name(), 9, 0, maybeColumn)); })); } @Override public void writeCharacterStream(Reader x) throws SQLException { logger.debug(" Unsupported method writeCharacterStream(Reader x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeAsciiStream(InputStream x) throws SQLException { logger.debug("Unsupported method writeAsciiStream(InputStream x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeBinaryStream(InputStream x) throws SQLException { logger.debug("Unsupported method writeBinaryStream(InputStream x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeObject(SQLData sqlData) throws SQLException { withNextValue( ((json, fieldName, maybeColumn) -> { JsonSqlOutput jsonSqlOutput = new JsonSqlOutput(sqlData, session); sqlData.writeSQL(jsonSqlOutput); json.put(fieldName, jsonSqlOutput.getJsonObject()); BindingParameterMetadata structSchema = jsonSqlOutput.getSchema(); structSchema.setName(fieldName); schema.getFields().add(structSchema); })); } @Override public void writeRef(Ref x) throws SQLException { logger.debug("Unsupported method writeRef(Ref x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeBlob(Blob x) throws SQLException { logger.debug("Unsupported method writeBlob(Blob x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeClob(Clob x) throws SQLException { logger.debug("Unsupported method writeClob(Clob x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeStruct(Struct x) throws SQLException { logger.debug("Unsupported method writeStruct(Struct x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeArray(Array x) throws SQLException { logger.debug("Unsupported method writeArray(Array x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeURL(URL x) throws SQLException { logger.debug("Unsupported method writeURL(URL x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeNString(String x) throws SQLException { logger.debug("Unsupported method writeNString(String x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeNClob(NClob x) throws SQLException { logger.debug("Unsupported method writeNClob(NClob x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeRowId(RowId x) throws SQLException { logger.debug("Unsupported method writeRowId(RowId x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public void writeSQLXML(SQLXML x) throws SQLException { logger.debug("Unsupported method writeSQLXML(SQLXML x)", false); throw new SnowflakeLoggedFeatureNotSupportedException(session); } public String getJsonString() { return json.toJSONString(); } public JSONObject getJsonObject() { return json; } private void withNextValue( ThrowingTriCallable, SQLException> action) throws SQLException { Field field = fields.next(); String fieldName = field.getName(); Optional maybeColumn = Optional.ofNullable(field.getAnnotation(SnowflakeColumn.class)); action.apply(json, fieldName, maybeColumn); } private SnowflakeDateTimeFormat getDateTimeFormat(String format) { String rawFormat = (String) session.getCommonParameters().get(format); if (rawFormat == null || rawFormat.isEmpty()) { rawFormat = (String) session.getCommonParameters().get("TIMESTAMP_OUTPUT_FORMAT"); } SnowflakeDateTimeFormat formatter = SnowflakeDateTimeFormat.fromSqlFormat(rawFormat); return formatter; } public BindingParameterMetadata getSchema() { return schema; } private TimeZone timeZoneDependOnType( SnowflakeType snowflakeType, SFBaseSession session, TimeZone tz) { if (snowflakeType == SnowflakeType.TIMESTAMP_NTZ) { return null; } else if (snowflakeType == SnowflakeType.TIMESTAMP_LTZ) { return getSessionTimezone(session); } else if (snowflakeType == SnowflakeType.TIMESTAMP_TZ) { return Optional.ofNullable(tz).orElse(sessionTimezone); } return TimeZone.getDefault(); } private int snowflakeTypeToJavaType(SnowflakeType snowflakeType) { if (snowflakeType == SnowflakeType.TIMESTAMP_NTZ) { return SnowflakeType.EXTRA_TYPES_TIMESTAMP_NTZ; } else if (snowflakeType == SnowflakeType.TIMESTAMP_LTZ) { return SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ; } return SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/MetaDataOfBinds.java ================================================ package net.snowflake.client.internal.core; import java.io.Serializable; /** * Class that creates constructor used for storing information about a binding parameter's metadata. * Each instantiation of a MetaDataOfBinds object corresponds to one binding parameter; an arraylist * of MetaDataOfBinds corresponds to a list of binding parameters in a prepared statement. */ public class MetaDataOfBinds implements Serializable { private static final long serialVersionUID = 1L; private int precision; private boolean nullable; private int scale; private int byteLength; private int length; private String name; private String type; public MetaDataOfBinds(int prec, boolean n, int sc, int bL, int len, String name, String type) { this.precision = prec; this.nullable = n; this.scale = sc; this.byteLength = bL; this.length = len; this.name = name; this.type = type; } public int getPrecision() { return this.precision; } public boolean isNullable() { return this.nullable; } public int getScale() { return this.scale; } public int getByteLength() { return this.byteLength; } public int getLength() { return this.length; } public String getName() { return this.name; } public String getTypeName() { return this.type; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/NoOpFileCacheManager.java ================================================ package net.snowflake.client.internal.core; import com.fasterxml.jackson.databind.JsonNode; import java.io.File; import java.util.function.Supplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class NoOpFileCacheManager implements FileCacheManager { private static final SFLogger logger = SFLoggerFactory.getLogger(NoOpFileCacheManager.class); NoOpFileCacheManager() { logger.warn( "Cache is not available. Caching will be disabled. " + "Set the HOME, SF_TEMPORARY_CREDENTIAL_CACHE_DIR, " + "or net.snowflake.jdbc.temporaryCredentialCacheDir to enable caching."); } @Override public String getCacheFilePath() { return null; } @Override public void overrideCacheFile(File newCacheFile) { logger.debug("Cache is not enabled; ignoring override", false); } @Override public T withLock(Supplier supplier) { return null; } @Override public JsonNode readCacheFile() { return null; } @Override public void writeCacheFile(JsonNode input) {} @Override public void deleteCacheFile() {} } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/OCSPMode.java ================================================ package net.snowflake.client.internal.core; /** OCSP mode */ public enum OCSPMode { /** * Fail closed, aka. hard failure mode. The connection is blocked if the revocation status is * revoked or it cannot identify the status. */ FAIL_CLOSED(0), /** * Fail open, aka. soft failure mode. The connection is blocked only if the revocation status is * revoked otherwise opened for any reason including the case where the revocation status cannot * be retrieved. */ FAIL_OPEN(1), /** * @deprecated Use {@link #DISABLE_OCSP_CHECKS} for clarity. This configuration option is used to * disable OCSP verification. Insure mode. No OCSP check is made. */ @Deprecated INSECURE(2), /** Disable OCSP checks. It's used to disable OCSP verification. */ DISABLE_OCSP_CHECKS(3); private final int value; OCSPMode(int value) { this.value = value; } public int getValue() { return this.value; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/OCSPTelemetryData.java ================================================ package net.snowflake.client.internal.core; import java.security.cert.CertificateException; import net.minidev.json.JSONObject; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; public class OCSPTelemetryData { private String certId; private String sfcPeerHost; private String ocspUrl; private String ocspReq; private Boolean cacheEnabled; private Boolean cacheHit; private OCSPMode ocspMode; public OCSPTelemetryData() { this.ocspMode = OCSPMode.FAIL_OPEN; this.cacheEnabled = true; } public void setCertId(String certId) { this.certId = certId; } public void setSfcPeerHost(String sfcPeerHost) { this.sfcPeerHost = sfcPeerHost; } public void setOcspUrl(String ocspUrl) { this.ocspUrl = ocspUrl; } public void setOcspReq(String ocspReq) { this.ocspReq = ocspReq; } public void setCacheEnabled(Boolean cacheEnabled) { this.cacheEnabled = cacheEnabled; if (!cacheEnabled) { this.cacheHit = false; } } public void setCacheHit(Boolean cacheHit) { if (!this.cacheEnabled) { this.cacheHit = false; } else { this.cacheHit = cacheHit; } } public void setOCSPMode(OCSPMode ocspMode) { this.ocspMode = ocspMode; } public String generateTelemetry(String eventType, CertificateException ex) { JSONObject value = new JSONObject(); String valueStr; value.put("eventType", eventType); value.put("sfcPeerHost", this.sfcPeerHost); value.put("certId", this.certId); value.put("ocspResponderURL", this.ocspUrl); value.put("ocspReqBase64", this.ocspReq); value.put("ocspMode", this.ocspMode.name()); value.put("cacheEnabled", this.cacheEnabled); value.put("cacheHit", this.cacheHit); valueStr = value.toString(); // Avoid adding exception stacktrace to user logs. TelemetryService.getInstance().logOCSPExceptionTelemetryEvent(eventType, value, ex); return valueStr; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/ObjectMapperFactory.java ================================================ package net.snowflake.client.internal.core; import com.fasterxml.jackson.core.StreamReadConstraints; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.MapperFeature; import com.fasterxml.jackson.databind.ObjectMapper; import java.text.SimpleDateFormat; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SnowflakeDateTimeFormat; /** * Factor method used to create ObjectMapper instance. All object mapper in JDBC should be created * by this method. */ public class ObjectMapperFactory { private static final SFLogger log = SFLoggerFactory.getLogger(ObjectMapperFactory.class); // Snowflake allows up to 128M (after updating Max LOB size) string size and returns base64 // encoded value that makes it up to 180M public static final int DEFAULT_MAX_JSON_STRING_LEN = 180_000_000; public static final String MAX_JSON_STRING_LENGTH_JVM = "net.snowflake.jdbc.objectMapper.maxJsonStringLength"; public static ObjectMapper getObjectMapper() { ObjectMapper mapper = new ObjectMapper(); mapper.configure(MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS, false); mapper.configure(MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS, false); mapper.enable(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS); // override the maxStringLength value in ObjectMapper int maxJsonStringLength = SystemUtil.convertSystemPropertyToIntValue( MAX_JSON_STRING_LENGTH_JVM, DEFAULT_MAX_JSON_STRING_LEN); mapper .getFactory() .setStreamReadConstraints( StreamReadConstraints.builder().maxStringLength(maxJsonStringLength).build()); return mapper; } public static ObjectMapper getObjectMapperForSession(SFBaseSession session) { ObjectMapper mapper = getObjectMapper(); if (session != null && session.getCommonParameters() != null) { // Set the mapper to use the session's object mapper settings Object dateOutputFormat = session.getCommonParameters().get("DATE_OUTPUT_FORMAT"); if (dateOutputFormat != null) { String dateFormat = SnowflakeDateTimeFormat.fromSqlFormat(String.valueOf(dateOutputFormat)) .toSimpleDateTimePattern(); mapper.setDateFormat(new SimpleDateFormat(dateFormat)); } else { log.debug("DATE_OUTPUT_FORMAT is not set in session parameters."); } } else { log.debug("Initialized object mapper without session or parameter settings."); } return mapper; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/OpaqueContextDTO.java ================================================ package net.snowflake.client.internal.core; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; /** * This context is opaque to the JDBC driver. It is generated by the Cloud service and pushed back * on each request invocation. */ public class OpaqueContextDTO { @JsonInclude(JsonInclude.Include.NON_NULL) private String base64Data; @JsonCreator public OpaqueContextDTO(@JsonProperty("base64Data") String base64Data) { this.base64Data = base64Data; } public String getBase64Data() { return base64Data; } public void setBase64Data(String base64Data) { this.base64Data = base64Data; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/ParameterBindingDTO.java ================================================ package net.snowflake.client.internal.core; import net.snowflake.client.internal.jdbc.BindingParameterMetadata; /** This class represents a binding object passed to server side. */ public class ParameterBindingDTO { /** Type of binding */ private String type; private String fmt; private BindingParameterMetadata schema; /** Value is a String object if it's a single bind, otherwise is an array of String */ private Object value; public ParameterBindingDTO( String fmt, String type, Object value, BindingParameterMetadata schema) { this.fmt = fmt; this.type = type; this.value = value; this.schema = schema; } public ParameterBindingDTO(String fmt, String type, Object value) { this(fmt, type, value, null); } public ParameterBindingDTO(String type, Object value) { this(null, type, value, null); } public Object getValue() { return value; } public String getType() { return type; } public void setType(String type) { this.type = type; } public void setValue(Object value) { this.value = value; } public String getFmt() { return fmt; } public void setFmt(String fmt) { this.fmt = fmt; } public BindingParameterMetadata getSchema() { return schema; } public void setSchema(BindingParameterMetadata schema) { this.schema = schema; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/PrivateLinkDetector.java ================================================ package net.snowflake.client.internal.core; public class PrivateLinkDetector { /** * We can only tell if private link is enabled for certain hosts when the hostname contains the * word 'privatelink' but we don't have a good way of telling if a private link connection is * expected for internal stages for example. * * @param host host * @return true if host is considered as privatelink environment */ public static boolean isPrivateLink(String host) { return host.toLowerCase().contains(".privatelink.snowflakecomputing."); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/QueryContextCache.java ================================================ package net.snowflake.client.internal.core; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeSet; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Most Recently Used and Priority based cache. A separate cache for each connection in the driver. */ public class QueryContextCache { private final int capacity; // Capacity of the cache private final HashMap idMap; // Map for id and QCC private final TreeSet treeSet; // Order data as per priority private final HashMap priorityMap; // Map for priority and QCC private final HashMap newPriorityMap; // Intermediate map for priority and QCC for current round of merging private static final SFLogger logger = SFLoggerFactory.getLogger(QueryContextCache.class); private static ObjectMapper jsonObjectMapper; static { jsonObjectMapper = new ObjectMapper(); } /** * Constructor. * * @param capacity Maximum capacity of the cache. */ public QueryContextCache(int capacity) { this.capacity = capacity; idMap = new HashMap<>(); priorityMap = new HashMap<>(); newPriorityMap = new HashMap<>(); treeSet = new TreeSet<>( Comparator.comparingLong(QueryContextElement::getPriority) .thenComparingLong(QueryContextElement::getId) .thenComparingLong(QueryContextElement::getReadTimestamp)); } /** * Merge a new element comes from the server with the existing cache. Merge is based on read time * stamp for the same id and based on priority for two different ids. * * @param id Database id. * @param readTimestamp Last time read metadata from FDB. * @param priority 0 to N number, where 0 is the highest priority. Eviction policy is based on * priority. * @param context Opaque query context. */ void merge(long id, long readTimestamp, long priority, String context) { if (idMap.containsKey(id)) { // ID found in the cache QueryContextElement qce = idMap.get(id); if (readTimestamp > qce.readTimestamp) { if (qce.priority == priority) { // Same priority, overwrite new data at same place qce.readTimestamp = readTimestamp; qce.context = context; } else { // Change in priority QueryContextElement newQCE = new QueryContextElement(id, readTimestamp, priority, context); replaceQCE(qce, newQCE); } // new priority } // new data is recent else if (readTimestamp == qce.readTimestamp && qce.priority != priority) { // Same read timestamp but change in priority QueryContextElement newQCE = new QueryContextElement(id, readTimestamp, priority, context); replaceQCE(qce, newQCE); } } // id found else { // new id if (priorityMap.containsKey(priority)) { // Same priority with different id QueryContextElement qce = priorityMap.get(priority); // Replace with new data QueryContextElement newQCE = new QueryContextElement(id, readTimestamp, priority, context); replaceQCE(qce, newQCE); } else { // new priority // Add new element in the cache QueryContextElement newQCE = new QueryContextElement(id, readTimestamp, priority, context); addQCE(newQCE); } } } /** Sync the newPriorityMap with the priorityMap at the end of current round of merge */ void syncPriorityMap() { logger.debug( "syncPriorityMap called priorityMap size: {}, newPrioirtyMap size: {}", priorityMap.size(), newPriorityMap.size()); for (Map.Entry entry : newPriorityMap.entrySet()) { priorityMap.put(entry.getKey(), entry.getValue()); } // clear the newPriorityMap for next round of QCC merge(a round consists of multiple entries) newPriorityMap.clear(); } /** * After the merge, loop through priority list and make sure cache is at most capacity. Remove all * other elements from the list based on priority. */ void checkCacheCapacity() { logger.debug( "checkCacheCapacity() called. treeSet size: {} cache capacity: {}", treeSet.size(), capacity); if (treeSet.size() > capacity) { // remove elements based on priority while (treeSet.size() > capacity) { QueryContextElement qce = treeSet.last(); removeQCE(qce); } } logger.debug( "checkCacheCapacity() returns. treeSet size: {} cache capacity: {}", treeSet.size(), capacity); } /** Clear the cache. */ public void clearCache() { logger.trace("clearCache() called"); idMap.clear(); priorityMap.clear(); treeSet.clear(); logger.trace("clearCache() returns. Number of entries in cache now: {}", treeSet.size()); } /** * @param data: the QueryContext Object serialized as a JSON format string */ public void deserializeQueryContextJson(String data) { synchronized (this) { // Log existing cache entries logCacheEntries(); if (data == null || data.length() == 0) { // Clear the cache clearCache(); return; } try { JsonNode rootNode = jsonObjectMapper.readTree(data); // Deserialize the entries. The first entry with priority is the main entry. On JDBC side, // we save all entries into one list to simplify the logic. An example JSON is: // { // "entries": [ // { // "id": 0, // "read_timestamp": 123456789, // "priority": 0, // "context": "base64 encoded context" // }, // { // "id": 1, // "read_timestamp": 123456789, // "priority": 1, // "context": "base64 encoded context" // }, // { // "id": 2, // "read_timestamp": 123456789, // "priority": 2, // "context": "base64 encoded context" // } // ] JsonNode entriesNode = rootNode.path("entries"); if (entriesNode != null && entriesNode.isArray()) { for (JsonNode entryNode : entriesNode) { QueryContextElement entry = deserializeQueryContextElement(entryNode); if (entry != null) { merge(entry.id, entry.readTimestamp, entry.priority, entry.context); } else { logger.warn( "deserializeQueryContextJson: deserializeQueryContextElement meets mismatch field type. Clear the QueryContextCache."); clearCache(); return; } } // after merging all entries, sync the internal priority map to priority map. Because of // priority swicth from GS side, // there could be priority key conflict if we directly operating on the priorityMap during // a round of merge. syncPriorityMap(); } } catch (Exception e) { logger.debug("deserializeQueryContextJson: Exception: {}", e.getMessage()); // Not rethrowing. clear the cache as incomplete merge can lead to unexpected behavior. clearCache(); } // After merging all entries, truncate to capacity checkCacheCapacity(); // Log existing cache entries logCacheEntries(); } // Synchronized } private static QueryContextElement deserializeQueryContextElement(JsonNode node) throws IOException { QueryContextElement entry = new QueryContextElement(); JsonNode idNode = node.path("id"); if (idNode.isNumber()) { entry.setId(idNode.asLong()); } else { logger.warn("deserializeQueryContextElement: `id` field is not Number type"); return null; } JsonNode timestampNode = node.path("timestamp"); if (timestampNode.isNumber()) { entry.setReadTimestamp(timestampNode.asLong()); } else { logger.warn("deserializeQueryContextElement: `timestamp` field is not Long type"); return null; } JsonNode priorityNode = node.path("priority"); if (priorityNode.isNumber()) { entry.setPriority(priorityNode.asLong()); } else { logger.warn("deserializeQueryContextElement: `priority` field is not Long type"); return null; } JsonNode contextNode = node.path("context"); if (contextNode.isTextual()) { String contextBytes = contextNode.asText(); entry.setContext(contextBytes); } else if (contextNode.isEmpty()) { // Currenly the OpaqueContext field is empty in the JSON received from GS. In the future, it // will // be filled with OpaqueContext object in base64 format. logger.debug("deserializeQueryContextElement `context` field is empty"); } else { logger.warn("deserializeQueryContextElement: `context` field is not String type"); return null; } return entry; } /** * Deserialize the QueryContext cache from a QueryContextDTO object. This function currently is * only used in QueryContextCacheTest.java where we check that after serialization and * deserialization, the cache is the same as before. * * @param queryContextDTO QueryContextDTO to deserialize. */ public void deserializeQueryContextDTO(QueryContextDTO queryContextDTO) { synchronized (this) { // Log existing cache entries logCacheEntries(); if (queryContextDTO == null) { // Clear the cache clearCache(); // Log existing cache entries logCacheEntries(); return; } try { List entries = queryContextDTO.getEntries(); if (entries != null) { for (QueryContextEntryDTO entryDTO : entries) { // The main entry priority will always be 0, we simply save a list of // QueryContextEntryDTO in QueryContextDTO QueryContextElement entry = deserializeQueryContextElementDTO(entryDTO); merge(entry.id, entry.readTimestamp, entry.priority, entry.context); logCacheEntries(); } } // after merging all entries, sync the internal priority map to priority map. Because of // priority swicth from GS side, // there could be priority key conflict if we directly operating on the priorityMap during a // round of merge. syncPriorityMap(); } catch (Exception e) { logger.debug("deserializeQueryContextDTO: Exception: {}", e.getMessage()); // Not rethrowing. clear the cache as incomplete merge can lead to unexpected behavior. clearCache(); } // After merging all entries, truncate to capacity checkCacheCapacity(); // Log existing cache entries logCacheEntries(); } // Synchronized } private static QueryContextElement deserializeQueryContextElementDTO( QueryContextEntryDTO entryDTO) throws IOException { QueryContextElement entry = new QueryContextElement( entryDTO.getId(), entryDTO.getTimestamp(), entryDTO.getPriority(), entryDTO.getContext().getBase64Data()); return entry; } /** * Serialize the QueryContext cache to a QueryContextDTO object, which can be serialized to JSON * automatically later. * * @return {@link QueryContextDTO} */ public QueryContextDTO serializeQueryContextDTO() { synchronized (this) { // Log existing cache entries logCacheEntries(); TreeSet elements = getElements(); if (elements.size() == 0) { return null; } try { QueryContextDTO queryContextDTO = new QueryContextDTO(); List entries = new ArrayList(); // the first element is the main entry with priority 0. We use a list of // QueryContextEntryDTO to store all entries in QueryContextDTO // to simplify the JDBC side QueryContextCache design. for (final QueryContextElement elem : elements) { QueryContextEntryDTO queryContextElementDTO = serializeQueryContextEntryDTO(elem); entries.add(queryContextElementDTO); } queryContextDTO.setEntries(entries); return queryContextDTO; } catch (Exception e) { logger.debug("serializeQueryContextDTO(): Exception: {}", e.getMessage()); return null; } } } private QueryContextEntryDTO serializeQueryContextEntryDTO(QueryContextElement entry) throws IOException { // OpaqueContextDTO contains a base64 encoded byte array. On JDBC side, we do not decode and // encode it QueryContextEntryDTO entryDTO = new QueryContextEntryDTO( entry.getId(), entry.getReadTimestamp(), entry.getPriority(), new OpaqueContextDTO(entry.getContext())); return entryDTO; } /** * @param id the id of the element * @param timestamp the last update timestamp * @param priority the priority of the element * @param opaqueContext the binary data of the opaque context * @return a query context element */ private static QueryContextElement createElement( long id, long timestamp, long priority, String opaqueContext) { return new QueryContextElement(id, timestamp, priority, opaqueContext); } /** * Add an element in the cache. * * @param qce element to add */ private void addQCE(QueryContextElement qce) { idMap.put(qce.id, qce); priorityMap.put(qce.priority, qce); treeSet.add(qce); } /** * Remove an element from the cache. * * @param qce element to remove. */ private void removeQCE(QueryContextElement qce) { treeSet.remove(qce); priorityMap.remove(qce.priority); idMap.remove(qce.id); } /** * Replace the cache element with a new response element. Remove old element exist in the cache * and add a new element received. * * @param oldQCE an element exist in the cache * @param newQCE a new element just received. */ private void replaceQCE(QueryContextElement oldQCE, QueryContextElement newQCE) { // Remove old element from the cache removeQCE(oldQCE); // Add new element in the cache addQCE(newQCE); } /** * Get all elements in the cache in the order of the priority. * * @return TreeSet containing cache elements */ private TreeSet getElements() { return treeSet; } int getSize() { return treeSet.size(); } void getElements(long[] ids, long[] readTimestamps, long[] priorities, String[] contexts) { TreeSet elems = getElements(); int i = 0; for (QueryContextElement elem : elems) { ids[i] = elem.id; readTimestamps[i] = elem.readTimestamp; priorities[i] = elem.priority; contexts[i] = elem.context; i++; } } /** Debugging purpose, log the all entries in the cache. */ void logCacheEntries() { if (logger.isDebugEnabled()) { TreeSet elements = getElements(); for (final QueryContextElement elem : elements) { logger.debug( " Cache Entry: id: {} readTimestamp: {} priority: {}", elem.id, elem.readTimestamp, elem.priority); } } } /** Query context information. */ private static class QueryContextElement implements Comparable { long id; // database id as key. (bigint) long readTimestamp; // When the query context read (bigint). Compare for same id. long priority; // Priority of the query context (bigint). Compare for different ids. String context; // Opaque information (varbinary). public QueryContextElement() { // Default constructor } /** * Constructor. * * @param id database id * @param readTimestamp Server time when this entry read * @param priority Priority of this entry w.r.t other ids * @param context Opaque query context, used by query processor in the server. */ public QueryContextElement(long id, long readTimestamp, long priority, String context) { this.id = id; this.readTimestamp = readTimestamp; this.priority = priority; this.context = context; } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (!(obj instanceof QueryContextElement)) { return super.equals(obj); } QueryContextElement other = (QueryContextElement) obj; return (id == other.id && readTimestamp == other.readTimestamp && priority == other.priority && context.equals(other.context)); } @Override public int hashCode() { int hash = 31; hash = hash * 31 + (int) id; hash += (hash * 31) + (int) readTimestamp; hash += (hash * 31) + (int) priority; hash += (hash * 31) + context.hashCode(); return hash; } /** * Keep elements in ascending order of the priority. This method called by TreeSet. * * @param obj the object to be compared. * @return 0 if equals, -1 if this element is less than new element, otherwise 1. */ public int compareTo(QueryContextElement obj) { return (priority == obj.priority) ? 0 : (((priority - obj.priority) < 0) ? -1 : 1); } public void setId(long id) { this.id = id; } public void setPriority(long priority) { this.priority = priority; } public void setContext(String context) { this.context = context; } public void setReadTimestamp(long readTimestamp) { this.readTimestamp = readTimestamp; } public long getId() { return id; } public long getReadTimestamp() { return readTimestamp; } public long getPriority() { return priority; } public String getContext() { return context; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/QueryContextDTO.java ================================================ package net.snowflake.client.internal.core; import com.fasterxml.jackson.annotation.JsonInclude; import java.util.List; // The POJO object used by both JDBC and the Cloud service to exchange opaque informations. @JsonInclude(JsonInclude.Include.NON_NULL) public class QueryContextDTO { // QueryContextDTO is a list of QueryContextEntryDTO. The first entry is the main entry with // priority 0. private List entries; public QueryContextDTO() { entries = null; } public QueryContextDTO(List entries) { this.entries = entries; } public List getEntries() { return entries; } public void setEntries(List entries) { this.entries = entries; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/QueryContextEntryDTO.java ================================================ package net.snowflake.client.internal.core; /** * An entry in the set of query context exchanged with Cloud Services. This includes a domain * identifier(id), a timestamp that is monodically increasing, a priority for eviction and the * opaque information sent from the Cloud service. */ public class QueryContextEntryDTO { private long id; private long timestamp; private long priority; private OpaqueContextDTO context; public QueryContextEntryDTO() { // empty constructor } public QueryContextEntryDTO(long id, long timestamp, long priority, OpaqueContextDTO context) { this.id = id; this.timestamp = timestamp; this.priority = priority; this.context = context; } public long getId() { return id; } public void setId(long id) { this.id = id; } public long getTimestamp() { return timestamp; } public void setTimestamp(long timestamp) { this.timestamp = timestamp; } public long getPriority() { return priority; } public void setPriority(long priority) { this.priority = priority; } public OpaqueContextDTO getContext() { return context; } public void setContext(OpaqueContextDTO context) { this.context = context; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/QueryExecDTO.java ================================================ package net.snowflake.client.internal.core; import java.util.Map; /** Body of a query request */ public class QueryExecDTO { private String sqlText; @Deprecated private Integer sequenceId; private Map bindings; private String bindStage; private boolean describeOnly; private Map parameters; // Optional query context sent to the JDBC driver from the Cloud Service. private QueryContextDTO queryContextDTO; private String describedJobId; private long querySubmissionTime; private boolean isInternal; // Boolean value that, if true, indicates query should be asynchronous private boolean asyncExec; public QueryExecDTO( String sqlText, boolean describeOnly, Integer sequenceId, Map bindings, String bindStage, Map parameters, QueryContextDTO queryContext, long querySubmissionTime, boolean internal, boolean asyncExec) { this.sqlText = sqlText; this.describeOnly = describeOnly; this.sequenceId = sequenceId; this.bindings = bindings; this.bindStage = bindStage; this.parameters = parameters; this.queryContextDTO = queryContext; this.querySubmissionTime = querySubmissionTime; this.isInternal = internal; this.asyncExec = asyncExec; // indicates whether query should be asynchronous } public String getSqlText() { return sqlText; } public void setSqlText(String sqlText) { this.sqlText = sqlText; } @Deprecated public Integer getSequenceId() { return sequenceId; } @Deprecated public void setSequenceId(Integer sequenceId) { this.sequenceId = sequenceId; } public Map getBindings() { return bindings; } public void setBindings(Map bindings) { this.bindings = bindings; } public String getBindStage() { return bindStage; } public void setBindStage(String bindStage) { this.bindStage = bindStage; } public boolean isDescribeOnly() { return describeOnly; } public void setDescribeOnly(boolean describeOnly) { this.describeOnly = describeOnly; } public Map getParameters() { return parameters; } public void setParameters(Map parameters) { this.parameters = parameters; } public QueryContextDTO getqueryContextDTO() { return queryContextDTO; } public void queryContextDTO(QueryContextDTO queryContext) { this.queryContextDTO = queryContext; } public String getDescribedJobId() { return describedJobId; } public void setDescribedJobId(String describedJobId) { this.describedJobId = describedJobId; } public long getQuerySubmissionTime() { return querySubmissionTime; } public void setQuerySubmissionTime(long querySubmissionTime) { this.querySubmissionTime = querySubmissionTime; } public void setIsInternal(boolean isInternal) { this.isInternal = isInternal; } public boolean getIsInternal() { return this.isInternal; } public void setAsyncExec(boolean asyncExec) { this.asyncExec = asyncExec; } public boolean getAsyncExec() { return this.asyncExec; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/QueryResultFormat.java ================================================ package net.snowflake.client.internal.core; import java.util.Optional; public enum QueryResultFormat { JSON("json"), ARROW("arrow"); private String name; QueryResultFormat(String name) { this.name = name; } public static Optional lookupByName(String n) { for (QueryResultFormat format : QueryResultFormat.values()) { if (format.name.equalsIgnoreCase(n)) { return Optional.of(format); } } return Optional.empty(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/ResultUtil.java ================================================ package net.snowflake.client.internal.core; import com.fasterxml.jackson.databind.JsonNode; import java.math.BigDecimal; import java.sql.Date; import java.sql.SQLException; import java.sql.Timestamp; import java.sql.Types; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SFTime; import net.snowflake.common.core.SFTimestamp; import net.snowflake.common.core.SnowflakeDateTimeFormat; import net.snowflake.common.util.TimeUtil; public class ResultUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(ResultUtil.class); public static final int MILLIS_IN_ONE_DAY = 86400000; public static final int DEFAULT_SCALE_OF_SFTIME_FRACTION_SECONDS = 3; // default scale for SFTime fraction seconds // Map of default parameter values, used by effectiveParamValue(). private static final Map defaultParameters; static { Map map = new HashMap<>(); // IMPORTANT: This must be consistent with CommonParameterEnum map.put("TIMEZONE", "America/Los_Angeles"); map.put("TIMESTAMP_OUTPUT_FORMAT", "DY, DD MON YYYY HH24:MI:SS TZHTZM"); map.put("TIMESTAMP_NTZ_OUTPUT_FORMAT", ""); map.put("TIMESTAMP_LTZ_OUTPUT_FORMAT", ""); map.put("TIMESTAMP_TZ_OUTPUT_FORMAT", ""); map.put("DATE_OUTPUT_FORMAT", "YYYY-MM-DD"); map.put("TIME_OUTPUT_FORMAT", "HH24:MI:SS"); map.put("CLIENT_HONOR_CLIENT_TZ_FOR_TIMESTAMP_NTZ", Boolean.TRUE); map.put("CLIENT_DISABLE_INCIDENTS", Boolean.TRUE); map.put("BINARY_OUTPUT_FORMAT", "HEX"); defaultParameters = map; } /** * Returns the effective parameter value, using the value explicitly provided in parameters, or * the default if absent * * @param parameters keyed in parameter name and valued in parameter value * @param paramName Parameter to return the value of * @return Effective value */ public static Object effectiveParamValue(Map parameters, String paramName) { String upper = paramName.toUpperCase(); Object value = parameters.get(upper); if (value != null) { return value; } value = defaultParameters.get(upper); if (value != null) { return value; } logger.debug("Unknown Common Parameter: {}", paramName); return null; } /** * Helper function building a formatter for a specialized timestamp type. Note that it will be * based on either the 'param' value if set, or the default format provided. * * @param parameters keyed in parameter name and valued in parameter value * @param id id * @param param timestamp output format param * @param defaultFormat default format * @return {@link SnowflakeDateTimeFormat} */ public static SnowflakeDateTimeFormat specializedFormatter( Map parameters, String id, String param, String defaultFormat) { String sqlFormat = SnowflakeDateTimeFormat.effectiveSpecializedTimestampFormat( (String) effectiveParamValue(parameters, param), defaultFormat); SnowflakeDateTimeFormat formatter = SnowflakeDateTimeFormat.fromSqlFormat(sqlFormat); logger.debug( "sql {} format: {}, java {} format: {}", id, sqlFormat, id, (ArgSupplier) formatter::toSimpleDateTimePattern); return formatter; } /** * Adjust timestamp for dates before 1582-10-05 * * @param timestamp needs to be adjusted * @return adjusted timestamp */ public static Timestamp adjustTimestamp(Timestamp timestamp) { long milliToAdjust = ResultUtil.msDiffJulianToGregorian(timestamp); if (milliToAdjust != 0) { logger.debug( "adjust timestamp by {} days", (ArgSupplier) () -> milliToAdjust / MILLIS_IN_ONE_DAY); Timestamp newTimestamp = new Timestamp(timestamp.getTime() + milliToAdjust); newTimestamp.setNanos(timestamp.getNanos()); return newTimestamp; } else { return timestamp; } } /** * For dates before 1582-10-05, calculate the number of millis to adjust. * * @param date date before 1582-10-05 * @return millis needs to be adjusted */ public static long msDiffJulianToGregorian(java.util.Date date) { // if date is before 1582-10-05, apply the difference // by (H-(H/4)-2) where H is the hundreds digit of the year according to: // http://en.wikipedia.org/wiki/Gregorian_calendar if (date.getTime() < -12220156800000L) { // get the year of the date Calendar cal = Calendar.getInstance(); cal.setTime(date); int year = cal.get(Calendar.YEAR); int month = cal.get(Calendar.MONTH); int dayOfMonth = cal.get(Calendar.DAY_OF_MONTH); // for dates on or before 02/28, use the previous year otherwise use // current year. // TODO: we need to revisit this since there is a potential issue using // the year/month/day from the calendar since that may not be the same // year/month/day as the original date (which is the problem we are // trying to solve here). if (month == 0 || (month == 1 && dayOfMonth <= 28)) { year = year - 1; } int hundreds = year / 100; int differenceInDays = hundreds - (hundreds / 4) - 2; return differenceInDays * MILLIS_IN_ONE_DAY; } else { return 0; } } /** * Convert a timestamp internal value (scaled number of seconds + fractional seconds) into a * SFTimestamp. * * @param timestampStr timestamp object * @param scale timestamp scale * @param internalColumnType snowflake timestamp type * @param resultVersion For new result version, timestamp with timezone is formatted as the * seconds since epoch with fractional part in the decimal followed by time zone index. E.g.: * "123.456 1440". Here 123.456 is the * number of seconds since epoch and 1440 is the * timezone index. * @param sessionTZ session timezone * @param session session object * @return converted snowflake timestamp object * @throws SFException if timestampStr is an invalid timestamp */ public static SFTimestamp getSFTimestamp( String timestampStr, int scale, int internalColumnType, long resultVersion, TimeZone sessionTZ, SFBaseSession session) throws SFException { logger.trace("Timestamp getTimestamp(int columnIndex)", false); try { TimeUtil.TimestampType tsType = null; switch (internalColumnType) { case Types.TIMESTAMP: tsType = TimeUtil.TimestampType.TIMESTAMP_NTZ; break; case SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ: tsType = TimeUtil.TimestampType.TIMESTAMP_TZ; logger.trace( "Handle timestamp with timezone {} encoding: {}", (resultVersion > 0 ? "new" : "old"), timestampStr); break; case SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ: tsType = TimeUtil.TimestampType.TIMESTAMP_LTZ; break; } // Construct a timestamp return TimeUtil.getSFTimestamp(timestampStr, scale, tsType, resultVersion, sessionTZ); } catch (IllegalArgumentException ex) { throw new SFException(ErrorCode.IO_ERROR, "Invalid timestamp value: " + timestampStr); } } /** * Convert a time internal value (scaled number of seconds + fractional seconds) into an SFTime. * *

Example: getSFTime("123.456", 5) returns an SFTime for 00:02:03.45600. * * @param obj time object * @param scale time scale * @param session session object * @return snowflake time object * @throws SFException if time is invalid */ public static SFTime getSFTime(String obj, int scale, SFBaseSession session) throws SFException { try { return TimeUtil.getSFTime(obj, scale); } catch (IllegalArgumentException ex) { throw new SFException(ErrorCode.INTERNAL_ERROR, "Invalid time value: " + obj); } } /** * Convert a time value into a string * * @param sft snowflake time object * @param scale time scale * @param timeFormatter time formatter * @return time in string */ public static String getSFTimeAsString( SFTime sft, int scale, SnowflakeDateTimeFormat timeFormatter) { return timeFormatter.format(sft, scale); } /** * Convert a boolean to a string * * @param bool boolean * @return boolean in string */ public static String getBooleanAsString(boolean bool) { return bool ? "TRUE" : "FALSE"; } /** * Convert a SFTimestamp to a string value. * * @param sfTS snowflake timestamp object * @param columnType internal snowflake t * @param scale timestamp scale * @param timestampNTZFormatter snowflake timestamp ntz format * @param timestampLTZFormatter snowflake timestamp ltz format * @param timestampTZFormatter snowflake timestamp tz format * @param session session object * @return timestamp in string in desired format * @throws SFException timestamp format is missing */ public static String getSFTimestampAsString( SFTimestamp sfTS, int columnType, int scale, SnowflakeDateTimeFormat timestampNTZFormatter, SnowflakeDateTimeFormat timestampLTZFormatter, SnowflakeDateTimeFormat timestampTZFormatter, SFBaseSession session) throws SFException { // Derive the timestamp formatter to use SnowflakeDateTimeFormat formatter; if (columnType == Types.TIMESTAMP || columnType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_NTZ) { formatter = timestampNTZFormatter; } else if (columnType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ) { formatter = timestampLTZFormatter; } else // TZ { formatter = timestampTZFormatter; } if (formatter == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing timestamp formatter"); } try { Timestamp adjustedTimestamp = ResultUtil.adjustTimestamp(sfTS.getTimestamp()); return formatter.format(adjustedTimestamp, sfTS.getTimeZone(), scale); } catch (SFTimestamp.TimestampOperationNotAvailableException e) { // this timestamp doesn't fit into a Java timestamp, and therefore we // can't format it (for now). Just print it out as seconds since epoch. BigDecimal nanosSinceEpoch = sfTS.getNanosSinceEpoch(); BigDecimal secondsSinceEpoch = nanosSinceEpoch.scaleByPowerOfTen(-9); return secondsSinceEpoch.setScale(scale).toPlainString(); } } /** * Convert a date value into a string * * @param date date will be converted * @param dateFormatter date format * @return date in string */ public static String getDateAsString(Date date, SnowflakeDateTimeFormat dateFormatter) { return dateFormatter.format(date, TimeZone.getDefault()); } /** * Adjust date for before 1582-10-05 * * @param date date before 1582-10-05 * @return adjusted date */ public static Date adjustDate(Date date) { long milliToAdjust = ResultUtil.msDiffJulianToGregorian(date); if (milliToAdjust != 0) { // add the difference to the new date return new Date(date.getTime() + milliToAdjust); } else { return date; } } /** * Convert a date internal object to a Date object in specified timezone. * * @param str snowflake date object * @param tz timezone we want convert to * @param session snowflake session object * @return java date object * @throws SFException if date is invalid */ @Deprecated public static Date getDate(String str, TimeZone tz, SFBaseSession session) throws SFException { try { long milliSecsSinceEpoch = Long.valueOf(str) * MILLIS_IN_ONE_DAY; SFTimestamp tsInUTC = SFTimestamp.fromDate(new Date(milliSecsSinceEpoch), 0, TimeZone.getTimeZone("UTC")); SFTimestamp tsInClientTZ = tsInUTC.moveToTimeZone(tz); logger.debug( "getDate: tz offset={}", (ArgSupplier) () -> tsInClientTZ.getTimeZone().getOffset(tsInClientTZ.getTime())); // return the date adjusted to the JVM default time zone Date preDate = new Date(tsInClientTZ.getTime()); // if date is on or before 1582-10-04, apply the difference // by (H-H/4-2) where H is the hundreds digit of the year according to: // http://en.wikipedia.org/wiki/Gregorian_calendar Date newDate = adjustDate(preDate); logger.debug( "Adjust date from {} to {}", (ArgSupplier) preDate::toString, (ArgSupplier) newDate::toString); return newDate; } catch (NumberFormatException ex) { throw new SFException(ErrorCode.INTERNAL_ERROR, "Invalid date value: " + str); } } /** * Convert snowflake bool to java boolean * * @param str boolean type in string representation * @return true if the value indicates true otherwise false */ public static boolean getBoolean(String str) { return str.equalsIgnoreCase(Boolean.TRUE.toString()) || str.equals("1"); } /** * Calculate number of rows updated given a result set Interpret result format based on result * set's statement type * * @param resultSet result set to extract update count from * @return the number of rows updated * @throws SFException if failed to calculate update count * @throws SQLException if failed to calculate update count */ public static long calculateUpdateCount(SFBaseResultSet resultSet) throws SFException, SQLException { long updateCount = 0; SFStatementType statementType = resultSet.getStatementType(); if (statementType.isDML()) { while (resultSet.next()) { if (statementType == SFStatementType.COPY) { SFResultSetMetaData resultSetMetaData = resultSet.getMetaData(); int columnIndex = resultSetMetaData.getColumnIndex("rows_loaded"); updateCount += columnIndex == -1 ? 0 : resultSet.getLong(columnIndex + 1); } else if (statementType == SFStatementType.INSERT || statementType == SFStatementType.UPDATE || statementType == SFStatementType.DELETE || statementType == SFStatementType.MERGE || statementType == SFStatementType.MULTI_INSERT) { int columnCount = resultSet.getMetaData().getColumnCount(); for (int i = 0; i < columnCount; i++) { updateCount += resultSet.getLong(i + 1); // add up number of rows updated } } else { updateCount = 0; } } } else { updateCount = statementType.isGenerateResultSet() ? -1 : 0; } return updateCount; } /** * Given a list of String, do a case insensitive search for target string Used by * resultsetMetadata to search for target column name * * @param source source string list * @param target target string to match * @return index in the source string list that matches the target string index starts from zero */ public static int listSearchCaseInsensitive(List source, String target) { for (int i = 0; i < source.size(); i++) { if (target.equalsIgnoreCase(source.get(i))) { return i; } } return -1; } /** * Return the list of result IDs provided in a result, if available; otherwise return an empty * list. * * @param result result json * @return list of result IDs which can be used for result scans */ private static List getResultIds(JsonNode result) { JsonNode resultIds = result.path("data").path("resultIds"); if (resultIds.isNull() || resultIds.isMissingNode() || resultIds.asText().isEmpty()) { return Collections.emptyList(); } return new ArrayList<>(Arrays.asList(resultIds.asText().split(","))); } /** * Return the list of result types provided in a result, if available; otherwise return an empty * list. * * @param result result json * @return list of result IDs which can be used for result scans */ private static List getResultTypes(JsonNode result) { JsonNode resultTypes = result.path("data").path("resultTypes"); if (resultTypes.isNull() || resultTypes.isMissingNode() || resultTypes.asText().isEmpty()) { return Collections.emptyList(); } String[] typeStrs = resultTypes.asText().split(","); List res = new ArrayList<>(); for (String typeStr : typeStrs) { long typeId = Long.valueOf(typeStr); res.add(SFStatementType.lookUpTypeById(typeId)); } return res; } /** * Return the list of child results provided in a result, if available; otherwise return an empty * list * * @param session the current session * @param requestId the current request id * @param result result json * @return list of child results * @throws SFException if the number of child IDs does not match child statement types */ public static List getChildResults( SFBaseSession session, String requestId, JsonNode result) throws SFException { List ids = getResultIds(result); List types = getResultTypes(result); if (ids.size() != types.size()) { throw new SFException( ErrorCode.CHILD_RESULT_IDS_AND_TYPES_DIFFERENT_SIZES, ids.size(), types.size()); } List res = new ArrayList<>(); for (int i = 0; i < ids.size(); i++) { res.add(new SFChildResult(ids.get(i), types.get(i))); } return res; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFArrowResultSet.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.StmtUtil.eventHandler; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.ByteArrayInputStream; import java.io.IOException; import java.math.BigDecimal; import java.math.RoundingMode; import java.sql.Array; import java.sql.Date; import java.sql.SQLException; import java.sql.SQLInput; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.time.Duration; import java.time.Period; import java.util.List; import java.util.Map; import java.util.TimeZone; import java.util.stream.Stream; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.common.core.SFBinaryFormat; import net.snowflake.client.internal.core.arrow.ArrayConverter; import net.snowflake.client.internal.core.arrow.ArrowVectorConverter; import net.snowflake.client.internal.core.arrow.MapConverter; import net.snowflake.client.internal.core.arrow.StructConverter; import net.snowflake.client.internal.core.arrow.StructObjectWrapper; import net.snowflake.client.internal.core.arrow.VarCharConverter; import net.snowflake.client.internal.core.arrow.VectorTypeConverter; import net.snowflake.client.internal.core.json.Converters; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.ArrowResultChunk; import net.snowflake.client.internal.jdbc.ArrowResultChunk.ArrowChunkIterator; import net.snowflake.client.internal.jdbc.SnowflakeResultSetSerializableV1; import net.snowflake.client.internal.jdbc.telemetry.Telemetry; import net.snowflake.client.internal.jdbc.telemetry.TelemetryData; import net.snowflake.client.internal.jdbc.telemetry.TelemetryField; import net.snowflake.client.internal.jdbc.telemetry.TelemetryUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.Converter; import net.snowflake.common.core.SnowflakeDateTimeFormat; import net.snowflake.common.core.SqlState; import org.apache.arrow.memory.RootAllocator; import org.apache.arrow.vector.util.JsonStringHashMap; /** Arrow result set implementation */ public class SFArrowResultSet extends SFBaseResultSet implements DataConversionContext { private static final SFLogger logger = SFLoggerFactory.getLogger(SFArrowResultSet.class); /** iterator over current arrow result chunk */ private ArrowChunkIterator currentChunkIterator; /** current query id */ private String queryId; /** type of statement generate this result set */ private SFStatementType statementType; private boolean totalRowCountTruncated; /** true if sort first chunk */ private boolean sortResult; /** statement generate current result set */ protected SFBaseStatement statement; /** is array bind supported */ private final boolean arrayBindSupported; /** index of next chunk to consume */ private long nextChunkIndex = 0; /** total chunk count, not include first chunk */ private final long chunkCount; /** chunk downloader */ private ChunkDownloader chunkDownloader; /** time when first chunk arrived */ private final long firstChunkTime; /** telemetry client to push stats to server */ private final Telemetry telemetryClient; /** * memory allocator for Arrow. Each SFArrowResultSet contains one rootAllocator. This * rootAllocator will be cleared and closed when the resultSet is closed */ private RootAllocator rootAllocator; /** * If customer wants Timestamp_NTZ values to be stored in UTC time instead of a local/session * timezone, set to true */ private boolean treatNTZAsUTC; /** Set to true if want to use wallclock time */ private boolean useSessionTimezone; /** * If customer wants getDate(int col, Calendar cal) function to format date with Calendar * timezone, set to true */ private boolean formatDateWithTimezone; private Converters converters; private final ObjectMapper objectMapper; /** * Constructor takes a result from the API response that we get from executing a SQL statement. * *

The constructor will initialize the ResultSetMetaData. * * @param resultSetSerializable result data after parsing json * @param session SFBaseSession object * @param statement statement object * @param sortResult true if sort results otherwise false * @throws SQLException exception raised from general SQL layers */ public SFArrowResultSet( SnowflakeResultSetSerializableV1 resultSetSerializable, SFBaseSession session, SFBaseStatement statement, boolean sortResult) throws SQLException { this(resultSetSerializable, session.getTelemetryClient(internalCallMarker()), sortResult); this.converters = new Converters( resultSetSerializable.getTimeZone(), session, resultSetSerializable.getResultVersion(), resultSetSerializable.isHonorClientTZForTimestampNTZ(), resultSetSerializable.getTreatNTZAsUTC(), resultSetSerializable.getUseSessionTimezone(), resultSetSerializable.getFormatDateWithTimeZone(), resultSetSerializable.getBinaryFormatter(), resultSetSerializable.getDateFormatter(), resultSetSerializable.getTimeFormatter(), resultSetSerializable.getTimestampNTZFormatter(), resultSetSerializable.getTimestampLTZFormatter(), resultSetSerializable.getTimestampTZFormatter()); // update the session db/schema/wh/role etc this.statement = statement; session.setDatabase(resultSetSerializable.getFinalDatabaseName()); session.setSchema(resultSetSerializable.getFinalSchemaName()); session.setRole(resultSetSerializable.getFinalRoleName()); session.setWarehouse(resultSetSerializable.getFinalWarehouseName()); treatNTZAsUTC = resultSetSerializable.getTreatNTZAsUTC(); formatDateWithTimezone = resultSetSerializable.getFormatDateWithTimeZone(); useSessionTimezone = resultSetSerializable.getUseSessionTimezone(); // update the driver/session with common parameters from GS SessionUtil.updateSfDriverParamValues( this.parameters, statement.getSFBaseSession(internalCallMarker())); // if server gives a send time, log time it took to arrive if (resultSetSerializable.getSendResultTime() != 0) { long timeConsumeFirstResult = this.firstChunkTime - resultSetSerializable.getSendResultTime(); logMetric(TelemetryField.TIME_CONSUME_FIRST_RESULT, timeConsumeFirstResult); } eventHandler.triggerStateTransition( BasicEvent.QueryState.CONSUMING_RESULT, String.format(BasicEvent.QueryState.CONSUMING_RESULT.getArgString(), queryId, 0)); } /** * This is a minimum initialization for SFArrowResult. Mainly used for testing purpose. However, * real prod constructor will call this constructor as well * * @param resultSetSerializable data returned in query response * @param telemetryClient telemetryClient * @param sortResult set if results should be sorted * @throws SQLException if exception encountered */ public SFArrowResultSet( SnowflakeResultSetSerializableV1 resultSetSerializable, Telemetry telemetryClient, boolean sortResult) throws SQLException { this.resultSetSerializable = resultSetSerializable; this.rootAllocator = resultSetSerializable.getRootAllocator(); this.sortResult = sortResult; this.queryId = resultSetSerializable.getQueryId(); this.statementType = resultSetSerializable.getStatementType(); this.totalRowCountTruncated = resultSetSerializable.isTotalRowCountTruncated(); this.parameters = resultSetSerializable.getParameters(); this.chunkCount = resultSetSerializable.getChunkFileCount(); this.chunkDownloader = resultSetSerializable.getChunkDownloader(); this.honorClientTZForTimestampNTZ = resultSetSerializable.isHonorClientTZForTimestampNTZ(); this.resultVersion = resultSetSerializable.getResultVersion(); this.numberOfBinds = resultSetSerializable.getNumberOfBinds(); this.arrayBindSupported = resultSetSerializable.isArrayBindSupported(); this.metaDataOfBinds = resultSetSerializable.getMetaDataOfBinds(); this.telemetryClient = telemetryClient; this.firstChunkTime = System.currentTimeMillis(); this.timestampNTZFormatter = resultSetSerializable.getTimestampNTZFormatter(); this.timestampLTZFormatter = resultSetSerializable.getTimestampLTZFormatter(); this.timestampTZFormatter = resultSetSerializable.getTimestampTZFormatter(); this.dateFormatter = resultSetSerializable.getDateFormatter(); this.timeFormatter = resultSetSerializable.getTimeFormatter(); this.sessionTimeZone = resultSetSerializable.getTimeZone(); this.binaryFormatter = resultSetSerializable.getBinaryFormatter(); this.resultSetMetaData = resultSetSerializable.getSFResultSetMetaData(internalCallMarker()); this.treatNTZAsUTC = resultSetSerializable.getTreatNTZAsUTC(); this.formatDateWithTimezone = resultSetSerializable.getFormatDateWithTimeZone(); this.useSessionTimezone = resultSetSerializable.getUseSessionTimezone(); objectMapper = ObjectMapperFactory.getObjectMapperForSession(session); // sort result set if needed String rowsetBase64 = resultSetSerializable.getFirstChunkStringData(); if (rowsetBase64 == null || rowsetBase64.isEmpty()) { this.currentChunkIterator = ArrowResultChunk.getEmptyChunkIterator(); } else { if (sortResult) { // we don't support sort result when there are offline chunks if (resultSetSerializable.getChunkFileCount() > 0) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.CLIENT_SIDE_SORTING_NOT_SUPPORTED.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED); } this.currentChunkIterator = getSortedFirstResultChunk(resultSetSerializable.getFirstChunkByteData()) .getIterator(this); } else { this.currentChunkIterator = buildFirstChunk(resultSetSerializable.getFirstChunkByteData()).getIterator(this); } } } private boolean fetchNextRow() throws SnowflakeSQLException { if (sortResult) { return fetchNextRowSorted(); } else { return fetchNextRowUnsorted(); } } /** * Goto next row. If end of current chunk, update currentChunkIterator to the beginning of next * chunk, if any chunk not being consumed yet. * * @return true if still have rows otherwise false */ private boolean fetchNextRowUnsorted() throws SnowflakeSQLException { boolean hasNext = currentChunkIterator.next(); if (hasNext) { return true; } else { if (nextChunkIndex < chunkCount) { try { eventHandler.triggerStateTransition( BasicEvent.QueryState.CONSUMING_RESULT, String.format( BasicEvent.QueryState.CONSUMING_RESULT.getArgString(), queryId, nextChunkIndex)); ArrowResultChunk nextChunk = (ArrowResultChunk) chunkDownloader.getNextChunkToConsume(); if (nextChunk == null) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Expect chunk but got null for chunk index " + nextChunkIndex); } currentChunkIterator.getChunk().freeData(); currentChunkIterator = nextChunk.getIterator(this); if (currentChunkIterator.next()) { logger.debug( "Moving to chunk index: {}, row count: {}", nextChunkIndex, nextChunk.getRowCount()); nextChunkIndex++; return true; } else { return false; } } catch (InterruptedException ex) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERRUPTED.getMessageCode(), SqlState.QUERY_CANCELED); } } else { // always free current chunk try { currentChunkIterator.getChunk().freeData(); if (chunkCount > 0) { logger.debug("End of chunks", false); DownloaderMetrics metrics = chunkDownloader.terminate(); logChunkDownloaderMetrics(metrics); } } catch (InterruptedException e) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERRUPTED.getMessageCode(), SqlState.QUERY_CANCELED); } } return false; } } /** * Decode rowset returned in query response the load data into arrow vectors * * @param firstChunk first chunk of rowset in arrow format * @return result chunk with arrow data already being loaded */ private ArrowResultChunk buildFirstChunk(byte[] firstChunk) throws SQLException { ByteArrayInputStream inputStream = new ByteArrayInputStream(firstChunk); // create a result chunk ArrowResultChunk resultChunk = new ArrowResultChunk("", 0, 0, 0, rootAllocator, session); try { resultChunk.readArrowStream(inputStream); } catch (IOException e) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERNAL_ERROR, "Failed to " + "load data in first chunk into arrow vector ex: " + e.getMessage()); } return resultChunk; } /** * Decode rowset returned in query response the load data into arrow vectors and sort data * * @param firstChunk first chunk of rowset in arrow format * @return result chunk with arrow data already being loaded */ private ArrowResultChunk getSortedFirstResultChunk(byte[] firstChunk) throws SQLException { ArrowResultChunk resultChunk = buildFirstChunk(firstChunk); // enable sorted chunk, the sorting happens when the result chunk is ready to consume resultChunk.enableSortFirstResultChunk(); return resultChunk; } /** * Fetch next row of first chunked in sorted order. If the result set huge, then rest of the * chunks are ignored. */ private boolean fetchNextRowSorted() throws SnowflakeSQLException { boolean hasNext = currentChunkIterator.next(); if (hasNext) { return true; } else { currentChunkIterator.getChunk().freeData(); // no more chunks as sorted is only supported // for one chunk return false; } } @Override public Converters getConverters() { return converters; } @Override public SQLInput createSqlInputForColumn( Object input, Class parentObjectClass, int columnIndex, SFBaseSession session, List fields) { if (parentObjectClass.equals(JsonSqlInput.class)) { return createJsonSqlInputForColumn(input, session, fields); } else { return new ArrowSqlInput((Map) input, session, converters, fields); } } @Override public Date convertToDate(Object object, TimeZone tz) throws SFException { if (object instanceof String) { return convertStringToDate((String) object, tz); } return converters.getStructuredTypeDateTimeConverter().getDate((int) object, tz); } @Override public Time convertToTime(Object object, int scale) throws SFException { if (object instanceof String) { return convertStringToTime((String) object, scale); } return converters.getStructuredTypeDateTimeConverter().getTime((long) object, scale); } @Override public Timestamp convertToTimestamp( Object object, int columnType, int columnSubType, TimeZone tz, int scale) throws SFException { if (object instanceof String) { return convertStringToTimestamp((String) object, columnType, columnSubType, tz, scale); } return converters .getStructuredTypeDateTimeConverter() .getTimestamp( (JsonStringHashMap) object, columnType, columnSubType, tz, scale); } /** * Advance to next row * * @return true if next row exists, false otherwise */ @Override public boolean next() throws SFException, SnowflakeSQLException { if (isClosed()) { return false; } // otherwise try to fetch again if (fetchNextRow()) { row++; if (isLast()) { long timeConsumeLastResult = System.currentTimeMillis() - this.firstChunkTime; logMetric(TelemetryField.TIME_CONSUME_LAST_RESULT, timeConsumeLastResult); } return true; } else { logger.debug("End of result", false); /* * Here we check if the result has been truncated and throw exception if * so. */ if (totalRowCountTruncated || Boolean.TRUE .toString() .equalsIgnoreCase(systemGetProperty("snowflake.enable_incident_test2"))) { throw new SFException(queryId, ErrorCode.MAX_RESULT_LIMIT_EXCEEDED); } // mark end of result return false; } } @Override public byte getByte(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toByte(index); } @Override public String getString(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toString(index); } @Override public boolean getBoolean(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toBoolean(index); } @Override public Period getPeriod(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toPeriod(index); } @Override public Duration getDuration(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toDuration(index); } @Override public short getShort(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toShort(index); } @Override public int getInt(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toInt(index); } @Override public long getLong(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toLong(index); } @Override public float getFloat(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toFloat(index); } @Override public double getDouble(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toDouble(index); } @Override public byte[] getBytes(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); return converter.toBytes(index); } @Override public Date getDate(int columnIndex, TimeZone tz) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); converter.setSessionTimeZone(sessionTimeZone); converter.setUseSessionTimezone(useSessionTimezone); return converter.toDate(index, tz, resultSetSerializable.getFormatDateWithTimeZone()); } @Override public Time getTime(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); converter.setSessionTimeZone(sessionTimeZone); converter.setUseSessionTimezone(useSessionTimezone); return converter.toTime(index); } @Override public Timestamp getTimestamp(int columnIndex, TimeZone tz) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); converter.setSessionTimeZone(sessionTimeZone); converter.setUseSessionTimezone(useSessionTimezone); wasNull = converter.isNull(index); return converter.toTimestamp(index, tz); } @Override public Object getObject(int columnIndex) throws SFException { return getObjectRepresentation(columnIndex, true); } @Override public Object getObjectWithoutString(int columnIndex) throws SFException { return getObjectRepresentation(columnIndex, false); } private StructObjectWrapper getObjectRepresentation(int columnIndex, boolean withString) throws SFException { int type = resultSetMetaData.getColumnType(columnIndex); if (type == SnowflakeType.EXTRA_TYPES_VECTOR) { return new StructObjectWrapper(getString(columnIndex), null); } ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); converter.setTreatNTZAsUTC(treatNTZAsUTC); converter.setUseSessionTimezone(useSessionTimezone); converter.setSessionTimeZone(sessionTimeZone); Object obj = converter.toObject(index); if (obj == null) { return null; } boolean isStructuredType = resultSetMetaData.isStructuredTypeColumn(columnIndex); if (isStructuredType) { if (converter instanceof VarCharConverter) { if (type == Types.STRUCT) { JsonSqlInput jsonSqlInput = createJsonSqlInput(columnIndex, obj); return new StructObjectWrapper(jsonSqlInput.getText(), jsonSqlInput); } else if (type == Types.ARRAY) { SfSqlArray sfArray = getJsonArray((String) obj, columnIndex, objectMapper); return new StructObjectWrapper(sfArray.getText(), sfArray); } else { throw new SFException(queryId, ErrorCode.INVALID_STRUCT_DATA); } } else if (converter instanceof StructConverter) { String jsonString = withString ? converter.toString(index) : null; return new StructObjectWrapper( jsonString, createArrowSqlInput(columnIndex, (Map) obj)); } else if (converter instanceof MapConverter) { String jsonString = withString ? converter.toString(index) : null; return new StructObjectWrapper(jsonString, obj); } else if (converter instanceof ArrayConverter || converter instanceof VectorTypeConverter) { String jsonString = converter.toString(index); return new StructObjectWrapper(jsonString, obj); } else { throw new SFException(queryId, ErrorCode.INVALID_STRUCT_DATA); } } else { return new StructObjectWrapper(null, obj); } } private SQLInput createArrowSqlInput(int columnIndex, Map input) throws SFException { if (input == null) { return null; } return new ArrowSqlInput( input, session, converters, resultSetMetaData.getColumnFields(columnIndex)); } private boolean isVarcharConvertedStruct(int type, ArrowVectorConverter converter) { return (type == Types.STRUCT || type == Types.ARRAY) && converter instanceof VarCharConverter; } private JsonSqlInput createJsonSqlInput(int columnIndex, Object obj) throws SFException { try { if (obj == null) { return null; } String text = (String) obj; JsonNode jsonNode = objectMapper.readTree(text); return new JsonSqlInput( text, jsonNode, session, converters, resultSetMetaData.getColumnFields(columnIndex), sessionTimeZone); } catch (JsonProcessingException e) { throw new SFException(queryId, e, ErrorCode.INVALID_STRUCT_DATA); } } @Override public Array getArray(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); Object obj = converter.toObject(index); if (obj == null) { return null; } if (converter instanceof VarCharConverter) { return getJsonArray((String) obj, columnIndex, objectMapper); } else if (converter instanceof ArrayConverter || converter instanceof VectorTypeConverter) { String jsonString = converter.toString(index); return getArrowArray(jsonString, (List) obj, columnIndex); } else { throw new SFException(queryId, ErrorCode.INVALID_STRUCT_DATA); } } private SfSqlArray getArrowArray(String text, List elements, int columnIndex) throws SFException { try { List fieldMetadataList = resultSetMetaData.getColumnFields(columnIndex); if (fieldMetadataList.size() != 1) { throw new SFException( queryId, ErrorCode.INVALID_STRUCT_DATA, "Wrong size of fields for array type " + fieldMetadataList.size()); } FieldMetadata fieldMetadata = fieldMetadataList.get(0); int columnSubType = fieldMetadata.getType(); int columnType = ColumnTypeHelper.getColumnType(columnSubType, session); int scale = fieldMetadata.getScale(); switch (columnType) { case Types.INTEGER: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.integerConverter(columnType)) .toArray(Integer[]::new)); case Types.SMALLINT: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.smallIntConverter(columnType)) .toArray(Short[]::new)); case Types.TINYINT: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.tinyIntConverter(columnType)) .toArray(Byte[]::new)); case Types.BIGINT: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.bigIntConverter(columnType)).toArray(Long[]::new)); case Types.DECIMAL: case Types.NUMERIC: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.bigDecimalConverter(columnType)) .toArray(BigDecimal[]::new)); case Types.CHAR: case Types.VARCHAR: case Types.LONGNVARCHAR: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.varcharConverter(columnType, columnSubType, scale)) .toArray(String[]::new)); case Types.BINARY: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.bytesConverter(columnType, scale)) .toArray(Byte[][]::new)); case Types.FLOAT: case Types.REAL: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.floatConverter(columnType)).toArray(Float[]::new)); case Types.DOUBLE: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.doubleConverter(columnType)) .toArray(Double[]::new)); case Types.DATE: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.dateFromIntConverter(sessionTimeZone)) .toArray(Date[]::new)); case Types.TIME: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.timeFromIntConverter(scale)).toArray(Time[]::new)); case Types.TIMESTAMP: return getSfSqlArray( text, columnSubType, mapAndConvert( elements, converters.timestampFromStructConverter( columnType, columnSubType, sessionTimeZone, scale)) .toArray(Timestamp[]::new)); case Types.BOOLEAN: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, converters.booleanConverter(columnType)) .toArray(Boolean[]::new)); case Types.STRUCT: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, e -> e).toArray(Map[]::new)); case Types.ARRAY: return getSfSqlArray( text, columnSubType, mapAndConvert(elements, e -> ((List) e).stream().toArray(Map[]::new)) .toArray(Map[][]::new)); default: throw new SFException( queryId, ErrorCode.FEATURE_UNSUPPORTED, "Can't construct array for data type: " + columnSubType); } } catch (RuntimeException e) { throw new SFException(queryId, e, ErrorCode.INVALID_STRUCT_DATA); } } private SfSqlArray getSfSqlArray(String text, int columnSubType, Object[] array) { return new SfSqlArray(text, columnSubType, array, session, objectMapper); } private Stream mapAndConvert(List elements, Converter converter) { return elements.stream() .map( obj -> { try { return converter.convert(obj); } catch (SFException e) { throw new RuntimeException(e); } }); } @Override public BigDecimal getBigDecimal(int columnIndex) throws SFException { ArrowVectorConverter converter = currentChunkIterator.getCurrentConverter(columnIndex - 1); int index = currentChunkIterator.getCurrentRowInRecordBatch(); wasNull = converter.isNull(index); converter.setSessionTimeZone(sessionTimeZone); converter.setUseSessionTimezone(useSessionTimezone); return converter.toBigDecimal(index); } @Override public BigDecimal getBigDecimal(int columnIndex, int scale) throws SFException { BigDecimal bigDec = getBigDecimal(columnIndex); return bigDec == null ? null : bigDec.setScale(scale, RoundingMode.HALF_UP); } @Override public boolean isLast() { return nextChunkIndex == chunkCount && currentChunkIterator.isLast(); } @Override public boolean isAfterLast() { return nextChunkIndex == chunkCount && currentChunkIterator.isAfterLast(); } @Override public void close() throws SnowflakeSQLException { super.close(); // always make sure to free this current chunk currentChunkIterator.getChunk().freeData(); try { if (chunkDownloader != null) { DownloaderMetrics metrics = chunkDownloader.terminate(); logChunkDownloaderMetrics(metrics); } else { // always close root allocator closeRootAllocator(rootAllocator); } } catch (InterruptedException ex) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERRUPTED.getMessageCode(), SqlState.QUERY_CANCELED); } } public static void closeRootAllocator(RootAllocator rootAllocator) { long rest = rootAllocator.getAllocatedMemory(); int count = 3; try { while (rest > 0 && count-- > 0) { // this case should only happen when the resultSet is closed before consuming all chunks // otherwise, the memory usage for each chunk will be cleared right after it has been fully // consumed // The reason is that it is possible that one downloading thread is pending to close when // the main thread // reaches here. A retry is to wait for the downloading thread to finish closing incoming // streams and arrow // resources. Thread.sleep(10); rest = rootAllocator.getAllocatedMemory(); } if (rest == 0) { rootAllocator.close(); } } catch (InterruptedException ie) { logger.debug("Interrupted during closing root allocator", false); } catch (Exception e) { logger.debug("Exception happened when closing rootAllocator: ", e.getLocalizedMessage()); } } @Override public SFStatementType getStatementType() { return statementType; } @Override public void setStatementType(SFStatementType statementType) { this.statementType = statementType; } @Override public boolean isArrayBindSupported() { return this.arrayBindSupported; } @Override public String getQueryId() { return queryId; } private void logMetric(TelemetryField field, long value) { TelemetryData data = TelemetryUtil.buildJobData(this.queryId, field, value); this.telemetryClient.addLogToBatch(data); } private void logChunkDownloaderMetrics(DownloaderMetrics metrics) { if (metrics != null) { logMetric(TelemetryField.TIME_WAITING_FOR_CHUNKS, metrics.getMillisWaiting()); logMetric(TelemetryField.TIME_DOWNLOADING_CHUNKS, metrics.getMillisDownloading()); logMetric(TelemetryField.TIME_PARSING_CHUNKS, metrics.getMillisParsing()); } } @Override public SnowflakeDateTimeFormat getTimestampLTZFormatter() { return timestampLTZFormatter; } @Override public SnowflakeDateTimeFormat getTimestampNTZFormatter() { return timestampNTZFormatter; } @Override public SnowflakeDateTimeFormat getTimestampTZFormatter() { return timestampTZFormatter; } @Override public SnowflakeDateTimeFormat getDateFormatter() { return dateFormatter; } @Override public SnowflakeDateTimeFormat getTimeFormatter() { return timeFormatter; } @Override public SFBinaryFormat getBinaryFormatter() { return binaryFormatter; } @Override public int getScale(int columnIndex) { return resultSetMetaData.getScale(columnIndex); } @Override public TimeZone getTimeZone() { return sessionTimeZone; } @Override public boolean getHonorClientTZForTimestampNTZ() { return honorClientTZForTimestampNTZ; } @Override public long getResultVersion() { return resultVersion; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFBaseResultSet.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.getJsonNodeStringValue; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import java.math.BigDecimal; import java.sql.Array; import java.sql.Date; import java.sql.SQLException; import java.sql.SQLInput; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.time.Duration; import java.time.Period; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Spliterator; import java.util.Spliterators; import java.util.TimeZone; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; import java.util.stream.StreamSupport; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.api.resultset.SnowflakeResultSetSerializable; import net.snowflake.client.internal.common.core.SFBinaryFormat; import net.snowflake.client.internal.core.json.Converters; import net.snowflake.client.internal.jdbc.SnowflakeResultSetSerializableV1; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.Converter; import net.snowflake.common.core.SnowflakeDateTimeFormat; /** Base class for query result set and metadata result set */ public abstract class SFBaseResultSet { private static final SFLogger logger = SFLoggerFactory.getLogger(SFBaseResultSet.class); protected static final ObjectMapper OBJECT_MAPPER = ObjectMapperFactory.getObjectMapper(); boolean wasNull = false; protected SFResultSetMetaData resultSetMetaData = null; protected int row = 0; protected Map parameters = new HashMap<>(); // Formatters for different datatypes // TODO move all formatter to DataConversionContext.java SnowflakeDateTimeFormat timestampNTZFormatter; SnowflakeDateTimeFormat timestampLTZFormatter; SnowflakeDateTimeFormat timestampTZFormatter; SnowflakeDateTimeFormat dateFormatter; SnowflakeDateTimeFormat timeFormatter; boolean honorClientTZForTimestampNTZ = true; SFBinaryFormat binaryFormatter; protected long resultVersion = 0; protected int numberOfBinds = 0; protected List metaDataOfBinds = new ArrayList<>(); // For creating incidents protected SFBaseSession session; // indicate whether the result set has been closed or not. protected boolean isClosed; // The serializable object which can serialize the metadata for this // result set protected SnowflakeResultSetSerializableV1 resultSetSerializable; protected TimeZone sessionTimeZone; public abstract boolean isLast(); public abstract boolean isAfterLast(); public abstract String getString(int columnIndex) throws SFException; public abstract boolean getBoolean(int columnIndex) throws SFException; public abstract byte getByte(int columnIndex) throws SFException; public abstract Period getPeriod(int columnIndex) throws SFException; public abstract Duration getDuration(int columnIndex) throws SFException; public abstract short getShort(int columnIndex) throws SFException; public abstract int getInt(int columnIndex) throws SFException; public abstract long getLong(int columnIndex) throws SFException; public abstract float getFloat(int columnIndex) throws SFException; public abstract double getDouble(int columnIndex) throws SFException; public abstract byte[] getBytes(int columnIndex) throws SFException; public abstract Time getTime(int columnIndex) throws SFException; public abstract Timestamp getTimestamp(int columnIndex, TimeZone tz) throws SFException; public abstract Date getDate(int columnIndex, TimeZone tz) throws SFException; public abstract Object getObject(int columnIndex) throws SFException; public abstract Object getObjectWithoutString(int columnIndex) throws SFException; public Array getArray(int columnIndex) throws SFException { throw new UnsupportedOperationException(); } public abstract BigDecimal getBigDecimal(int columnIndex) throws SFException; public abstract BigDecimal getBigDecimal(int columnIndex, int scale) throws SFException; public abstract SFStatementType getStatementType(); // this is useful to override the initial statement type if it is incorrect // (e.g. result scan yields a query type, but the results are from a DML statement) public abstract void setStatementType(SFStatementType statementType) throws SQLException; public abstract String getQueryId(); public void setSession(SFBaseSession session) { this.session = session; } public SFBaseSession getSession() { return this.session; } // default implementation public boolean next() throws SFException, SnowflakeSQLException { logger.trace("boolean next()", false); return false; } public void close() throws SnowflakeSQLException { logger.trace("void close()", false); // no exception even if already closed. resultSetMetaData = null; isClosed = true; } public boolean wasNull() { logger.trace("boolean wasNull() returning {}", wasNull); return wasNull; } public SFResultSetMetaData getMetaData() { return resultSetMetaData; } public TimeZone getSessionTimezone() { return sessionTimeZone; } public int getRow() throws SQLException { return row; } public boolean absolute(int row) throws SFException { throw new SFException(ErrorCode.FEATURE_UNSUPPORTED, "seek to a specific row"); } public boolean relative(int rows) throws SFException { throw new SFException(ErrorCode.FEATURE_UNSUPPORTED, "seek to a row relative to current row"); } public boolean previous() throws SFException { throw new SFException(ErrorCode.FEATURE_UNSUPPORTED, "seek to a previous row"); } public int getNumberOfBinds() { return numberOfBinds; } public List getMetaDataOfBinds() { return metaDataOfBinds; } public boolean isFirst() { return row == 1; } public boolean isBeforeFirst() { return row == 0; } public boolean isClosed() { return isClosed; } public boolean isArrayBindSupported() { return false; } /** * Split this whole SnowflakeResultSetSerializable into small pieces based on the user specified * data size. * * @param maxSizeInBytes The expected max data size wrapped in the ResultSetSerializables object. * NOTE: this parameter is intended to make the data size in each serializable object to be * less than it. But if user specifies a small value which may be smaller than the data size * of one result chunk. So the definition can't be guaranteed completely. For this special * case, one serializable object is used to wrap the data chunk. * @return a list of SnowflakeResultSetSerializable * @throws SQLException if fails to split objects. */ public List getResultSetSerializables(long maxSizeInBytes) throws SQLException { return this.resultSetSerializable.splitBySize(maxSizeInBytes); } public Converters getConverters() { logger.debug("Json converters weren't created"); return null; } public TimeZone getSessionTimeZone() { return resultSetSerializable.getTimeZone(); } public SQLInput createSqlInputForColumn( Object input, Class parentObjectClass, int columnIndex, SFBaseSession session, List fields) { throw new UnsupportedOperationException(); } public Date convertToDate(Object object, TimeZone tz) throws SFException { throw new UnsupportedOperationException(); } public Time convertToTime(Object object, int scale) throws SFException { throw new UnsupportedOperationException(); } public Timestamp convertToTimestamp( Object object, int columnType, int columnSubType, TimeZone tz, int scale) throws SFException { throw new UnsupportedOperationException(); } protected SQLInput createJsonSqlInputForColumn( Object input, SFBaseSession session, List fields) { JsonNode inputNode; if (input instanceof JsonNode) { inputNode = (JsonNode) input; } else { inputNode = OBJECT_MAPPER.convertValue(input, JsonNode.class); } return new JsonSqlInput( input.toString(), inputNode, session, getConverters(), fields, sessionTimeZone); } protected SfSqlArray getJsonArray(String arrayString, int columnIndex, ObjectMapper objectMapper) throws SFException { try { List fieldMetadataList = resultSetMetaData.getColumnFields(columnIndex); if (fieldMetadataList.size() != 1) { throw new SFException( ErrorCode.FEATURE_UNSUPPORTED, "Wrong size of fields for array type " + fieldMetadataList.size()); } FieldMetadata fieldMetadata = fieldMetadataList.get(0); int columnSubType = fieldMetadata.getType(); int columnType = ColumnTypeHelper.getColumnType(columnSubType, session); int scale = fieldMetadata.getScale(); ArrayNode arrayNode = (ArrayNode) OBJECT_MAPPER.readTree(arrayString); Iterator nodeElements = arrayNode.elements(); switch (columnType) { case Types.INTEGER: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().integerConverter(columnType)) .toArray(Integer[]::new), session, objectMapper); case Types.SMALLINT: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().smallIntConverter(columnType)) .toArray(Short[]::new), session, objectMapper); case Types.TINYINT: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().tinyIntConverter(columnType)) .toArray(Byte[]::new), session, objectMapper); case Types.BIGINT: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().bigIntConverter(columnType)) .toArray(Long[]::new), session, objectMapper); case Types.DECIMAL: case Types.NUMERIC: return new SfSqlArray( arrayString, columnSubType, convertToFixedArray( getStream(nodeElements, getConverters().bigDecimalConverter(columnType))), session, objectMapper); case Types.CHAR: case Types.VARCHAR: case Types.LONGNVARCHAR: return new SfSqlArray( arrayString, columnSubType, getStream( nodeElements, getConverters().varcharConverter(columnType, columnSubType, scale)) .toArray(String[]::new), session, objectMapper); case Types.BINARY: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().bytesConverter(columnType, scale)) .toArray(Byte[][]::new), session, objectMapper); case Types.FLOAT: case Types.REAL: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().floatConverter(columnType)) .toArray(Float[]::new), session, objectMapper); case Types.DOUBLE: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().doubleConverter(columnType)) .toArray(Double[]::new), session, objectMapper); case Types.DATE: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().dateStringConverter(session)) .toArray(Date[]::new), session, objectMapper); case Types.TIME: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().timeFromStringConverter(session)) .toArray(Time[]::new), session, objectMapper); case Types.TIMESTAMP: return new SfSqlArray( arrayString, columnSubType, getStream( nodeElements, getConverters() .timestampFromStringConverter( columnSubType, columnType, scale, session, null, sessionTimeZone)) .toArray(Timestamp[]::new), session, objectMapper); case Types.BOOLEAN: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().booleanConverter(columnType)) .toArray(Boolean[]::new), session, objectMapper); case Types.STRUCT: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().structConverter(OBJECT_MAPPER)) .toArray(Map[]::new), session, objectMapper); case Types.ARRAY: return new SfSqlArray( arrayString, columnSubType, getStream(nodeElements, getConverters().arrayConverter(OBJECT_MAPPER)) .toArray(Map[][]::new), session, objectMapper); default: throw new SFException( ErrorCode.FEATURE_UNSUPPORTED, "Can't construct array for data type: " + columnSubType); } } catch (JsonProcessingException e) { throw new SFException(e, ErrorCode.INVALID_STRUCT_DATA); } } protected Date convertStringToDate(String object, TimeZone tz) throws SFException { return (Date) getConverters().dateStringConverter(session).convert(object); } protected Time convertStringToTime(String object, int scale) throws SFException { return (Time) getConverters().timeFromStringConverter(session).convert(object); } protected Timestamp convertStringToTimestamp( String object, int columnType, int columnSubType, TimeZone tz, int scale) throws SFException { return (Timestamp) getConverters() .timestampFromStringConverter(columnSubType, columnType, scale, session, null, tz) .convert(object); } private Stream getStream(Iterator nodeElements, Converter converter) { return StreamSupport.stream( Spliterators.spliteratorUnknownSize(nodeElements, Spliterator.ORDERED), false) .map( elem -> { try { return convert(converter, (JsonNode) elem); } catch (SFException e) { throw new RuntimeException(e); } }); } private static Object convert(Converter converter, JsonNode node) throws SFException { String nodeValue = getJsonNodeStringValue(node); return converter.convert(nodeValue); } private Object[] convertToFixedArray(Stream inputStream) { AtomicInteger bigDecimalCount = new AtomicInteger(); Object[] elements = inputStream .peek( elem -> { if (elem instanceof BigDecimal) { bigDecimalCount.incrementAndGet(); } }) .toArray( size -> { boolean shouldReturnAsBigDecimal = bigDecimalCount.get() > 0; Class returnedClass = shouldReturnAsBigDecimal ? BigDecimal.class : Long.class; return java.lang.reflect.Array.newInstance(returnedClass, size); }); return elements; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFBaseSession.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.sql.DriverPropertyInfo; import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.QueryStatus; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.crl.CertRevocationCheckMode; import net.snowflake.client.internal.jdbc.SFConnectionHandler; import net.snowflake.client.internal.jdbc.SnowflakeConnectString; import net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.InternalCallMarker; import net.snowflake.client.internal.jdbc.telemetry.Telemetry; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Snowflake session implementation base. The methods and fields contained within this class are * setters and getters for shared session properties, i.e., those that may be set via connection * time, in properties, as well as those that may be parsed from response headers from Snowflake * (i.e., session parameters). * *

New connection properties and session parameters can be added here, as SFBaseSession contains * the logic for storing, setting, and getting these session properties. * *

For logic that is specific to a particular type of Session, four methods need to be * implemented: * *

open(), for establishing a connection. close(), for closing a connection. isSafeToClose(), for * checking whether the connection can be closed. checkProperties(), invoked at connection time to * verify if the requisite properties are set; and if not, returns a list of missing properties * raiseError(), which handles exceptions that may be raised in the session isTelemetryEnabled(), * which signals whether to enable client telemetry */ public abstract class SFBaseSession { private static final Set STICKY_HEADERS_NAMES = new HashSet<>(Collections.singletonList("x-snowflake-session")); private static final SFLogger logger = SFLoggerFactory.getLogger(SFBaseSession.class); private final Properties clientInfo = new Properties(); private final AtomicBoolean autoCommit = new AtomicBoolean(true); // Injected delay for the purpose of connection timeout testing // Any statement execution will sleep for the specified number of milliseconds private final AtomicInteger _injectedDelay = new AtomicInteger(0); // Connection properties map private final Map connectionPropertiesMap = new HashMap<>(); // Custom key-value map for other options values *not* defined in SFSessionProperties, // i.e., session-implementation specific private final Map customSessionProperties = new HashMap<>(1); private SFConnectionHandler sfConnectionHandler; protected List sqlWarnings = new ArrayList<>(); // Unique Session ID private String sessionId; // Database versions private String databaseVersion = null; private int databaseMajorVersion = 0; private int databaseMinorVersion = 0; // Used for parsing results private SnowflakeType timestampMappedType = SnowflakeType.TIMESTAMP_LTZ; private boolean isResultColumnCaseInsensitive; private boolean isJdbcTreatDecimalAsInt = true; private boolean treatNTZAsUTC; private boolean preparedStatementLogging = false; // Inject failure for testing private String injectFileUploadFailure; private boolean enableHeartbeat; protected int heartbeatFrequency = 3600; private boolean formatDateWithTimezone; private boolean enableCombineDescribe; private boolean clientTelemetryEnabled = false; private boolean useSessionTimezone; private boolean defaultFormatDateWithTimezone = true; private boolean getDateUseNullTimezone = true; // The server can read array binds from a stage instead of query payload. // When there as many bind values as this threshold, we should upload them to a stage. private int arrayBindStageThreshold = 0; private boolean storeTemporaryCredential; private String serviceName; private boolean sfSQLMode; // whether to enable conservative memory usage mode private boolean enableConservativeMemoryUsage; // the step in MB to adjust memory usage private int conservativeMemoryAdjustStep = 64; private int clientMemoryLimit; private int clientResultChunkSize; private int clientPrefetchThreads; // validate the default parameters by GS? private boolean validateDefaultParameters; // Current session context w/ re to database, schema, role, warehouse private String database; private String schema; private String role; private String warehouse; // For Metadata request(i.e. DatabaseMetadata.getTables or // DatabaseMetadata.getSchemas,), whether to use connection ctx to // improve the request time private boolean metadataRequestUseConnectionCtx = false; // For Metadata request(i.e. DatabaseMetadata.getTables or // DatabaseMetadata.getSchemas), whether to search using multiple schemas with // session database private boolean metadataRequestUseSessionDatabase = false; // Forces to use regional s3 end point API to generate regional url for aws endpoints private boolean useRegionalS3EndpointsForPresignedURL = false; // Stores other parameters sent by server private final Map otherParameters = new HashMap<>(); private HttpClientSettingsKey ocspAndProxyAndGzipKey = null; // Default value for memory limit in SFBaseSession public static long MEMORY_LIMIT_UNSET = -1; // Memory limit for SnowflakeChunkDownloader. This gets set from SFBaseSession for testing // purposes only. private long memoryLimitForTesting = MEMORY_LIMIT_UNSET; // name of temporary stage to upload array binds to; null if none has been created yet private String arrayBindStage = null; // Maximum size of the query context cache for current session private int queryContextCacheSize = 5; // Whether enable returning timestamp with timezone as data type private boolean enableReturnTimestampWithTimeZone = true; // Server side value private boolean jdbcEnablePutGet = true; // Connection string setting private boolean enablePutGet = true; private boolean enableCopyResultSet = false; // Enables the use of pattern searches for certain DatabaseMetaData methods // which do not by definition allow the use of patterns, but // we need to allow for it to maintain backwards compatibility. private boolean enablePatternSearch = true; // Enables the use of exact schema searches for certain DatabaseMetaData methods // that should use schema from context (CLIENT_METADATA_REQUEST_USE_CONNECTION_CTX=true) // value is false for backwards compatibility. private boolean enableExactSchemaSearch = false; // This is true by default, but can be set to false to disable pattern matching in cases when // wildcards are used as a part of identifiers eg. "my_table" or "my_schema" private boolean enableWildcardsInShowMetadataCommands = true; /** Disable lookup for default credentials by GCS library */ private boolean disableGcsDefaultCredentials = true; private Map commonParameters; // Headers that once they are returned from Snowflake, will then be added to each subsequent HTTP // request // e.g. x-snowflake-session header private final Map stickyHttpHeaders = new HashMap<>(); private boolean isJdbcArrowTreatDecimalAsInt = true; private boolean implicitServerSideQueryTimeout = false; private boolean clearBatchOnlyAfterSuccessfulExecution = false; /** Treat java.sql.Time as wall clock time without converting it to UTC */ private boolean treatTimeAsWallClockTime = false; private boolean ownerOnlyStageFilePermissionsEnabled = false; private boolean allowCertificatesWithoutCrlUrl = false; protected SFBaseSession(SFConnectionHandler sfConnectionHandler) { this.sfConnectionHandler = sfConnectionHandler; } public void setMemoryLimitForTesting(long memLimit) { this.memoryLimitForTesting = memLimit; } public long getMemoryLimitForTesting() { return this.memoryLimitForTesting; } /** * Part of the JDBC API, where client applications may fetch a Map of Properties to set various * attributes. This is not used internally by any driver component, but should be maintained by * the Session object. * * @return client info as Properties */ public Properties getClientInfo() { // defensive copy to avoid client from changing the properties // directly w/o going through the API Properties copy = new Properties(); copy.putAll(this.clientInfo); return copy; } /** * Set common parameters * * @param parameters the parameters to set */ public void setCommonParameters(Map parameters) { this.commonParameters = parameters; } /** * Get common parameters * * @return Map of common parameters */ public Map getCommonParameters() { return this.commonParameters; } /** * Gets the Property associated with the key 'name' in the ClientInfo map. * * @param name The key from which to fetch the Property. * @return The ClientInfo entry property. */ public String getClientInfo(String name) { return this.clientInfo.getProperty(name); } /** * Returns a unique id for this session. * * @return unique id for session */ public String getSessionId() { return sessionId; } /** * Sets the session-id attribute in this session. * * @param sessionId The session id as a string. */ public void setSessionId(String sessionId) { this.sessionId = sessionId; } /** * @return true if session is in SQLMode */ public boolean isSfSQLMode() { return sfSQLMode; } /** * Set sfSQLMode * * @param sfSQLMode boolean */ public void setSfSQLMode(boolean sfSQLMode) { this.sfSQLMode = sfSQLMode; } /** * Get the database version * * @return database version */ public String getDatabaseVersion() { return databaseVersion; } /** * Set database version * * @param databaseVersion the version to set */ public void setDatabaseVersion(String databaseVersion) { this.databaseVersion = databaseVersion; } /** * Get databse major version * * @return the database major version */ public int getDatabaseMajorVersion() { return databaseMajorVersion; } /** * Set database major version * * @param databaseMajorVersion the database major version */ public void setDatabaseMajorVersion(int databaseMajorVersion) { this.databaseMajorVersion = databaseMajorVersion; } /** * Get the database minor version * * @return database minor version */ public int getDatabaseMinorVersion() { return databaseMinorVersion; } /** * Set the database minor version * * @param databaseMinorVersion the minor version */ public void setDatabaseMinorVersion(int databaseMinorVersion) { this.databaseMinorVersion = databaseMinorVersion; } /** * Gets the value of CLIENT_ENABLE_LOG_INFO_STATEMENT_PARAMETERS if one has been set. False by * default. * * @see CLIENT_ENABLE_LOG_INFO_STATEMENT_PARAMETERS * @return true if enabled */ public boolean getPreparedStatementLogging() { return this.preparedStatementLogging; } /** * Set prepared statement logging * * @see SFBaseSession#getPreparedStatementLogging() * @param value boolean */ public void setPreparedStatementLogging(boolean value) { this.preparedStatementLogging = value; } /** * Get inject file upload failure. Note: Should only be used in internal tests! * * @return file to fail */ public String getInjectFileUploadFailure() { return this.injectFileUploadFailure; } /** * Set inject file upload failure Note: Should only be used in internal tests! * * @param fileToFail the file to fail */ public void setInjectFileUploadFailure(String fileToFail) { this.injectFileUploadFailure = fileToFail; } /** * Get timestamp mapped type * * @see CLIENT_TIMESTAMP_TYPE_MAPPING * @return {@link SnowflakeType} */ public SnowflakeType getTimestampMappedType() { return timestampMappedType; } /** * Set the timestamp mapped type * * @see SFBaseSession#getTimestampMappedType() * @param timestampMappedType SnowflakeType */ public void setTimestampMappedType(SnowflakeType timestampMappedType) { this.timestampMappedType = timestampMappedType; } /** * Get if result column is case-insensitive * * @see SFBaseSession#setResultColumnCaseInsensitive(boolean) * @return true if result column is case-insensitive */ public boolean isResultColumnCaseInsensitive() { return isResultColumnCaseInsensitive; } /** * Set if result column is case-insensitive * * @see CLIENT_RESULT_COLUMN_CASE_INSENSITIVE * @param resultColumnCaseInsensitive boolean */ public void setResultColumnCaseInsensitive(boolean resultColumnCaseInsensitive) { isResultColumnCaseInsensitive = resultColumnCaseInsensitive; } /** * Check if we want to treat decimal as int JDBC types * * @see JDBC_TREAT_DECIMAL_AS_INT * @return true if decimal is treated as int */ public boolean isJdbcTreatDecimalAsInt() { return isJdbcTreatDecimalAsInt; } /** * Set if decimal should be treated as int type * * @see SFBaseSession#isJdbcTreatDecimalAsInt() * @param jdbcTreatDecimalAsInt boolean */ public void setJdbcTreatDecimalAsInt(boolean jdbcTreatDecimalAsInt) { isJdbcTreatDecimalAsInt = jdbcTreatDecimalAsInt; } /** * @return true if decimal should be treated as int for arrow types */ public boolean isJdbcArrowTreatDecimalAsInt() { return isJdbcArrowTreatDecimalAsInt; } /** * Set if decimal should be treated as int for arrow types * * @param jdbcArrowTreatDecimalAsInt boolean */ public void setJdbcArrowTreatDecimalAsInt(boolean jdbcArrowTreatDecimalAsInt) { isJdbcArrowTreatDecimalAsInt = jdbcArrowTreatDecimalAsInt; } /** * Get the server url * * @return the server url or null if it is not set */ public String getServerUrl() { if (connectionPropertiesMap.containsKey(SFSessionProperty.SERVER_URL)) { return (String) connectionPropertiesMap.get(SFSessionProperty.SERVER_URL); } return null; } /** * Get whether columns strings are quoted. * * @return value of 'stringsQuotedForColumnDef' connection property or false if not set. */ public boolean isStringQuoted() { if (connectionPropertiesMap.containsKey(SFSessionProperty.STRINGS_QUOTED)) { return (Boolean) connectionPropertiesMap.get(SFSessionProperty.STRINGS_QUOTED); } return false; } /** * Wrapper function for the other addProperty(String, Object) method that takes an * SFSessionProperty instead of a String key. * * @param sfSessionProperty The property for which to set the value. * @param propertyValue The value to set for the property. * @throws SFException If the value already exists for the given key (should only be set once), or * if the value is invalid. */ public void addProperty(SFSessionProperty sfSessionProperty, Object propertyValue) throws SFException { addProperty(sfSessionProperty.getPropertyKey(), propertyValue); } /** * Adds a connection property to the connection-properties map. Connection properties are those * that are defined in SFSessionProperty. They are set typically through instantiation of the * Session. * * @param propertyName The name of the property, as a string. Recognized ones are defined in the * SFSessionProperty enum. * @param propertyValue The value to set for this key. * @throws SFException If the value already exists for the given key (should only be set once), or * if the value is invalid. */ public void addProperty(String propertyName, Object propertyValue) throws SFException { SFSessionProperty connectionProperty = SFSessionProperty.lookupByKey(propertyName); // check if the value type is as expected propertyValue = SFSessionProperty.checkPropertyValue(connectionProperty, propertyValue); if (connectionPropertiesMap.containsKey(connectionProperty)) { throw new SFException(ErrorCode.DUPLICATE_CONNECTION_PROPERTY_SPECIFIED, propertyName); } else if (propertyValue != null && connectionProperty == SFSessionProperty.AUTHENTICATOR) { String[] authenticatorWithParams = propertyValue.toString().split(";"); if (authenticatorWithParams.length == 1) { connectionPropertiesMap.put(connectionProperty, propertyValue); } else { String[] oktaUserKeyPair = authenticatorWithParams[1].split("="); if (oktaUserKeyPair.length == 2) { connectionPropertiesMap.put(connectionProperty, authenticatorWithParams[0]); connectionPropertiesMap.put(SFSessionProperty.OKTA_USERNAME, oktaUserKeyPair[1]); } else { throw new SFException(ErrorCode.INVALID_OKTA_USERNAME, propertyName); } } } else { connectionPropertiesMap.put(connectionProperty, propertyValue); } } /** * Get the connection properties map * * @return the connection properties map */ public Map getConnectionPropertiesMap() { return connectionPropertiesMap; } /** * Get the http client key * * @return HttpClientSettingsKey * @throws SnowflakeSQLException if exception encountered */ public HttpClientSettingsKey getHttpClientKey() throws SnowflakeSQLException { // if key is already created, return it without making a new one if (ocspAndProxyAndGzipKey != null) { return ocspAndProxyAndGzipKey; } OCSPMode ocspMode = getOCSPMode(); CertRevocationCheckMode certRevocationCheckMode = getCertRevocationCheckMode(); if (certRevocationCheckMode != CertRevocationCheckMode.DISABLED && ocspMode != OCSPMode.DISABLE_OCSP_CHECKS) { throw new SnowflakeSQLException(ErrorCode.BOTH_OCSP_AND_CERT_REVOCATION_CHECK); } Boolean gzipDisabled = false; if (connectionPropertiesMap.containsKey(SFSessionProperty.GZIP_DISABLED)) { gzipDisabled = (Boolean) connectionPropertiesMap.get(SFSessionProperty.GZIP_DISABLED); } // if not, create a new key boolean useProxy = false; if (connectionPropertiesMap.containsKey(SFSessionProperty.USE_PROXY)) { useProxy = (boolean) connectionPropertiesMap.get(SFSessionProperty.USE_PROXY); } // Check for any user agent suffix String userAgentSuffix = ""; if (connectionPropertiesMap.containsKey(SFSessionProperty.USER_AGENT_SUFFIX)) { userAgentSuffix = (String) connectionPropertiesMap.get(SFSessionProperty.USER_AGENT_SUFFIX); } if (useProxy) { int proxyPort; try { proxyPort = Integer.parseInt(connectionPropertiesMap.get(SFSessionProperty.PROXY_PORT).toString()); } catch (NumberFormatException | NullPointerException e) { throw new SnowflakeSQLException( ErrorCode.INVALID_PROXY_PROPERTIES, "Could not parse port number"); } String proxyHost = (String) connectionPropertiesMap.get(SFSessionProperty.PROXY_HOST); String proxyUser = (String) connectionPropertiesMap.get(SFSessionProperty.PROXY_USER); String proxyPassword = (String) connectionPropertiesMap.get(SFSessionProperty.PROXY_PASSWORD); String nonProxyHosts = (String) connectionPropertiesMap.get(SFSessionProperty.NON_PROXY_HOSTS); String proxyProtocol = (String) connectionPropertiesMap.get(SFSessionProperty.PROXY_PROTOCOL); ocspAndProxyAndGzipKey = new HttpClientSettingsKey( ocspMode, proxyHost, proxyPort, nonProxyHosts, proxyUser, proxyPassword, proxyProtocol, userAgentSuffix, gzipDisabled); logHttpClientInitInfo(ocspAndProxyAndGzipKey); } // If JVM proxy parameters are specified, proxies need to go through the JDBC driver's // HttpClientSettingsKey logic in order to work properly. else { boolean httpUseProxy = Boolean.parseBoolean(systemGetProperty("http.useProxy")); String httpProxyHost = systemGetProperty("http.proxyHost"); String httpProxyPort = systemGetProperty("http.proxyPort"); String httpProxyUser = systemGetProperty("http.proxyUser"); String httpProxyPassword = systemGetProperty("http.proxyPassword"); String httpsProxyHost = systemGetProperty("https.proxyHost"); String httpsProxyPort = systemGetProperty("https.proxyPort"); String httpsProxyUser = systemGetProperty("https.proxyUser"); String httpsProxyPassword = systemGetProperty("https.proxyPassword"); String httpProxyProtocol = systemGetProperty("http.proxyProtocol"); String noProxy = systemGetEnv("NO_PROXY"); String nonProxyHosts = systemGetProperty("http.nonProxyHosts"); // log the JVM parameters that are being used if (httpUseProxy) { logger.debug( "Using JVM parameters for proxy setup: http.useProxy={}, http.proxyHost={}, http.proxyPort={}, http.proxyUser={}, " + "http.proxyPassword is {}, https.proxyHost={}, https.proxyPort={}, https.proxyUser={}, " + "https.proxyPassword is {}, http.nonProxyHosts={}, NO_PROXY={}, http.proxyProtocol={}", httpUseProxy, httpProxyHost, httpProxyPort, httpProxyUser, httpProxyPassword == null || httpProxyPassword.isEmpty() ? "not set" : "set", httpsProxyHost, httpsProxyPort, httpsProxyUser, httpsProxyPassword == null || httpsProxyPassword.isEmpty() ? "not set" : "set", nonProxyHosts, noProxy, httpProxyProtocol, userAgentSuffix, gzipDisabled); // There are 2 possible parameters for non proxy hosts that can be combined into 1 String combinedNonProxyHosts = isNullOrEmpty(nonProxyHosts) ? "" : nonProxyHosts; if (!isNullOrEmpty(noProxy)) { combinedNonProxyHosts += combinedNonProxyHosts.length() == 0 ? "" : "|"; combinedNonProxyHosts += noProxy; } // It is possible that a user can have both http and https proxies specified in the JVM // parameters. The default protocol is http. String proxyProtocol = "http"; if (!isNullOrEmpty(httpProxyProtocol)) { proxyProtocol = httpProxyProtocol; } else if (!isNullOrEmpty(httpsProxyHost) && !isNullOrEmpty(httpsProxyPort) && isNullOrEmpty(httpProxyHost) && isNullOrEmpty(httpProxyPort)) { proxyProtocol = "https"; } if (proxyProtocol.equals("https") && !isNullOrEmpty(httpsProxyHost) && !isNullOrEmpty(httpsProxyPort)) { logger.debug("Using https proxy configuration from JVM parameters"); int proxyPort; try { proxyPort = Integer.parseInt(httpsProxyPort); } catch (NumberFormatException | NullPointerException e) { throw new SnowflakeSQLException( ErrorCode.INVALID_PROXY_PROPERTIES, "Could not parse port number"); } ocspAndProxyAndGzipKey = new HttpClientSettingsKey( ocspMode, httpsProxyHost, proxyPort, combinedNonProxyHosts, httpsProxyUser, httpsProxyPassword, "https", userAgentSuffix, gzipDisabled); logHttpClientInitInfo(ocspAndProxyAndGzipKey); } else if (proxyProtocol.equals("http") && !isNullOrEmpty(httpProxyHost) && !isNullOrEmpty(httpProxyPort)) { logger.debug("Using http proxy configuration from JVM parameters"); int proxyPort; try { proxyPort = Integer.parseInt(httpProxyPort); } catch (NumberFormatException | NullPointerException e) { throw new SnowflakeSQLException( ErrorCode.INVALID_PROXY_PROPERTIES, "Could not parse port number"); } ocspAndProxyAndGzipKey = new HttpClientSettingsKey( ocspMode, httpProxyHost, proxyPort, combinedNonProxyHosts, httpProxyUser, httpProxyPassword, "http", userAgentSuffix, gzipDisabled); logHttpClientInitInfo(ocspAndProxyAndGzipKey); } else { // Not enough parameters set to use the proxy. logger.warn( "Failed parsing the proxy settings from JVM parameters as http.useProxy={}," + " but valid host and port were not provided.", httpUseProxy); ocspAndProxyAndGzipKey = new HttpClientSettingsKey(ocspMode, userAgentSuffix, gzipDisabled); logHttpClientInitInfo(ocspAndProxyAndGzipKey); } } else { // If no proxy is used or JVM http proxy is used, no need for setting parameters logger.debug("http.useProxy={}. JVM proxy not used.", httpUseProxy); unsetInvalidProxyHostAndPort(); ocspAndProxyAndGzipKey = new HttpClientSettingsKey(ocspMode, userAgentSuffix, gzipDisabled); logHttpClientInitInfo(ocspAndProxyAndGzipKey); } } ocspAndProxyAndGzipKey.setRevocationCheckMode(certRevocationCheckMode); ocspAndProxyAndGzipKey.setAllowCertificatesWithoutCrlUrl(allowCertificatesWithoutCrlUrl); return ocspAndProxyAndGzipKey; } private void logHttpClientInitInfo(HttpClientSettingsKey key) { if (key.usesProxy()) { logger.info( "Driver OCSP mode: {}, gzip disabled: {}, proxy protocol: {}," + " proxy host: {}, proxy port: {}, non proxy hosts: {}, proxy user: {}, proxy password is {}", key.getOcspMode(), key.getGzipDisabled(), key.getProxyHttpProtocol(), key.getProxyHost(), key.getProxyPort(), key.getNonProxyHosts(), key.getProxyUser(), key.getProxyPassword().isEmpty() ? "not set" : "set"); } else { logger.debug( "Driver OCSP mode: {}, gzip disabled: {} and no proxy", key.getOcspMode(), key.getGzipDisabled()); } } /** Unset invalid proxy host and port values. */ public void unsetInvalidProxyHostAndPort() { // If proxyHost and proxyPort are used without http or https unset them, so they are not used // later by the ProxySelector. if (!isNullOrEmpty(systemGetProperty("proxyHost"))) { System.clearProperty("proxyHost"); } if (!isNullOrEmpty(systemGetProperty("proxyPort"))) { System.clearProperty("proxyPort"); } } /** * Get OCSP mode * * @return {@link OCSPMode} * @throws SnowflakeSQLException */ public OCSPMode getOCSPMode() throws SnowflakeSQLException { OCSPMode ret; Boolean disableOCSPChecks = (Boolean) connectionPropertiesMap.get(SFSessionProperty.DISABLE_OCSP_CHECKS); Boolean insecureMode = (Boolean) connectionPropertiesMap.get(SFSessionProperty.INSECURE_MODE); if (insecureMode != null && insecureMode) { logger.warn( "The 'insecureMode' connection property is deprecated. Please use 'disableOCSPChecks' instead."); } if ((disableOCSPChecks != null && insecureMode != null) && (disableOCSPChecks != insecureMode)) { logger.error( "The values for 'disableOCSPChecks' and 'insecureMode' must be identical. " + "Please unset insecureMode."); throw new SnowflakeSQLException( ErrorCode.DISABLEOCSP_INSECUREMODE_VALUE_MISMATCH, "The values for 'disableOCSPChecks' and 'insecureMode' " + "must be identical."); } if ((disableOCSPChecks != null && disableOCSPChecks) || (insecureMode != null && insecureMode)) { // skip OCSP checks ret = OCSPMode.DISABLE_OCSP_CHECKS; } else if (!connectionPropertiesMap.containsKey(SFSessionProperty.OCSP_FAIL_OPEN) || (boolean) connectionPropertiesMap.get(SFSessionProperty.OCSP_FAIL_OPEN)) { // fail open (by default, not set) ret = OCSPMode.FAIL_OPEN; } else { // explicitly set ocspFailOpen=false ret = OCSPMode.FAIL_CLOSED; } return ret; } public CertRevocationCheckMode getCertRevocationCheckMode() throws SnowflakeSQLException { String certRevocationCheckModeStr = (String) connectionPropertiesMap.getOrDefault( SFSessionProperty.CERT_REVOCATION_CHECK_MODE, CertRevocationCheckMode.DISABLED.name()); try { return CertRevocationCheckMode.valueOf(certRevocationCheckModeStr); } catch (IllegalArgumentException e) { throw new SnowflakeSQLException( ErrorCode.UNKNOWN_CERT_REVOCATION_CHECK_MODE, "The value passed for " + SFSessionProperty.CERT_REVOCATION_CHECK_MODE + " is invalid. Possible values are " + Arrays.toString(CertRevocationCheckMode.values())); } } /** * Get the query timeout * * @return the query timeout value */ public Integer getQueryTimeout() { return (Integer) this.connectionPropertiesMap.get(SFSessionProperty.QUERY_TIMEOUT); } /** * Get the user name * * @return user name */ public String getUser() { return (String) this.connectionPropertiesMap.get(SFSessionProperty.USER); } /** * Get the server URL * * @return the server URL */ public String getUrl() { return (String) this.connectionPropertiesMap.get(SFSessionProperty.SERVER_URL); } /** * Get inject wait input * * @return the value of 'inject_wait_in_put' or 0 if not set */ public int getInjectWaitInPut() { Object retVal = this.connectionPropertiesMap.get(SFSessionProperty.INJECT_WAIT_IN_PUT); if (retVal != null) { try { return (int) retVal; } catch (Exception e) { return 0; } } return 0; } /** * Get whether the metadata request should use the session database. * * @return true if it should use the session database */ public boolean getMetadataRequestUseSessionDatabase() { return metadataRequestUseSessionDatabase; } /** * Set to true if the metadata request should use the session database. * * @param enabled boolean */ public void setMetadataRequestUseSessionDatabase(boolean enabled) { this.metadataRequestUseSessionDatabase = enabled; } /** * Get if metadata request should use the connection ctx * * @return true if it should use the connection ctx */ public boolean getMetadataRequestUseConnectionCtx() { return this.metadataRequestUseConnectionCtx; } /** * Set to true if metadata request should use connection ctx * * @param enabled boolean */ public void setMetadataRequestUseConnectionCtx(boolean enabled) { this.metadataRequestUseConnectionCtx = enabled; } /** * Get injected delay * * @return {@link AtomicInteger} */ AtomicInteger getInjectedDelay() { return _injectedDelay; } /** * Set the injected delay * * @param injectedDelay injectedDelay value */ public void setInjectedDelay(int injectedDelay) { this._injectedDelay.set(injectedDelay); } /** * Get if NTZ should be treated as UTC * * @return true if NTZ should be treated as UTC */ public boolean getTreatNTZAsUTC() { return treatNTZAsUTC; } /** * Set whether NTZ should be treated as UTC * * @param treatNTZAsUTC boolean */ public void setTreatNTZAsUTC(boolean treatNTZAsUTC) { this.treatNTZAsUTC = treatNTZAsUTC; } /** * Get if heartbeat is enabled * * @return true if enabled */ public boolean getEnableHeartbeat() { return enableHeartbeat; } /** * Set if heartbeat is enabled * * @param enableHeartbeat boolean */ public void setEnableHeartbeat(boolean enableHeartbeat) { this.enableHeartbeat = enableHeartbeat; } /** * Set the heartbeat frequency in seconds. This is the frequency with which the session token is * refreshed. * * @param frequency heartbeat frequency in seconds */ public void setHeartbeatFrequency(int frequency) { if (frequency < 900) { this.heartbeatFrequency = 900; } else if (frequency > 3600) { this.heartbeatFrequency = 3600; } else { this.heartbeatFrequency = frequency; } } /** * Retrieve session heartbeat frequency in seconds * * @return the heartbeat frequency in seconds */ public int getHeartbeatFrequency() { return this.heartbeatFrequency; } /** * autoCommit field specifies whether autocommit is enabled for the session. Autocommit determines * whether a DML statement, when executed without an active transaction, is automatically * committed after the statement successfully completes. default: true * * @see Transactions/Autocommit * @return a boolean value of autocommit field */ public boolean getAutoCommit() { return autoCommit.get(); } /** * Sets value of autoCommit field * * @see SFBaseSession#getAutoCommit() * @param autoCommit boolean */ public void setAutoCommit(boolean autoCommit) { this.autoCommit.set(autoCommit); } /** * Get if date should be formatted with timezone * * @return true if date should be formatted with timezone */ public boolean getFormatDateWithTimezone() { return formatDateWithTimezone; } /** * Set if date should be formatted with timezone * * @param formatDateWithTimezone boolean */ public void setFormatDateWithTimezone(boolean formatDateWithTimezone) { this.formatDateWithTimezone = formatDateWithTimezone; } /** * Get if session timezone should be used. * * @return true if using session timezone */ public boolean getUseSessionTimezone() { return useSessionTimezone; } /** * Get if using default date format with timezone. * * @return true if using default date format with timezone. */ public boolean getDefaultFormatDateWithTimezone() { return defaultFormatDateWithTimezone; } /** * Set if session timezone should be used. * * @param useSessionTimezone boolean */ public void setUseSessionTimezone(boolean useSessionTimezone) { this.useSessionTimezone = useSessionTimezone; } /** * Set if default date format with timezone should be used * * @param defaultFormatDateWithTimezone boolean */ public void setDefaultFormatDateWithTimezone(boolean defaultFormatDateWithTimezone) { this.defaultFormatDateWithTimezone = defaultFormatDateWithTimezone; } public boolean getGetDateUseNullTimezone() { return getDateUseNullTimezone; } public void setGetDateUseNullTimezone(boolean getDateUseNullTimezone) { this.getDateUseNullTimezone = getDateUseNullTimezone; } public boolean getEnableCombineDescribe() { return enableCombineDescribe; } public void setEnableCombineDescribe(boolean enableCombineDescribe) { this.enableCombineDescribe = enableCombineDescribe; } public boolean isClientTelemetryEnabled() { return clientTelemetryEnabled; } public void setClientTelemetryEnabled(boolean clientTelemetryEnabled) { this.clientTelemetryEnabled = clientTelemetryEnabled; } public int getArrayBindStageThreshold() { return arrayBindStageThreshold; } public void setArrayBindStageThreshold(int arrayBindStageThreshold) { this.arrayBindStageThreshold = arrayBindStageThreshold; } public boolean getStoreTemporaryCredential() { return storeTemporaryCredential; } public void setStoreTemporaryCredential(boolean storeTemporaryCredential) { this.storeTemporaryCredential = storeTemporaryCredential; } public String getServiceName() { return serviceName; } public void setServiceName(String serviceName) { this.serviceName = serviceName; } public void setEnableConservativeMemoryUsage(boolean enableConservativeMemoryUsage) { this.enableConservativeMemoryUsage = enableConservativeMemoryUsage; } public boolean isConservativeMemoryUsageEnabled() { return enableConservativeMemoryUsage; } public int getConservativeMemoryAdjustStep() { return conservativeMemoryAdjustStep; } public void setConservativeMemoryAdjustStep(int conservativeMemoryAdjustStep) { this.conservativeMemoryAdjustStep = conservativeMemoryAdjustStep; } public int getClientMemoryLimit() { return clientMemoryLimit; } public void setClientMemoryLimit(int clientMemoryLimit) { this.clientMemoryLimit = clientMemoryLimit; } public int getQueryContextCacheSize() { return queryContextCacheSize; } public void setQueryContextCacheSize(int queryContextCacheSize) { this.queryContextCacheSize = queryContextCacheSize; } public boolean getJdbcEnablePutGet() { return jdbcEnablePutGet; } public void setJdbcEnablePutGet(boolean jdbcEnablePutGet) { this.jdbcEnablePutGet = jdbcEnablePutGet; } public boolean getEnablePutGet() { return enablePutGet; } public boolean setEnablePutGet(boolean enablePutGet) { return this.enablePutGet = enablePutGet; } public boolean isEnableCopyResultSet() { return enableCopyResultSet; } public boolean setEnableCopyResultSet(boolean enableCopyResultSet) { return this.enableCopyResultSet = enableCopyResultSet; } public boolean getEnablePatternSearch() { return enablePatternSearch; } public void setEnablePatternSearch(boolean enablePatternSearch) { this.enablePatternSearch = enablePatternSearch; } public boolean getEnableExactSchemaSearch() { return enableExactSchemaSearch; } void setEnableExactSchemaSearch(boolean enableExactSchemaSearch) { this.enableExactSchemaSearch = enableExactSchemaSearch; } public boolean getEnableWildcardsInShowMetadataCommands() { return enableWildcardsInShowMetadataCommands; } void setEnableWildcardsInShowMetadataCommands(boolean enableWildcardsInShowMetadataCommands) { this.enableWildcardsInShowMetadataCommands = enableWildcardsInShowMetadataCommands; } public boolean getDisableGcsDefaultCredentials() { return disableGcsDefaultCredentials; } public void setDisableGcsDefaultCredentials(boolean disableGcsDefaultCredentials) { this.disableGcsDefaultCredentials = disableGcsDefaultCredentials; } public int getClientResultChunkSize() { return clientResultChunkSize; } public void setClientResultChunkSize(int clientResultChunkSize) { this.clientResultChunkSize = clientResultChunkSize; } public Object getOtherParameter(String key) { return this.otherParameters.get(key); } public void setOtherParameter(String key, Object value) { this.otherParameters.put(key, value); } public int getClientPrefetchThreads() { return clientPrefetchThreads; } public void setClientPrefetchThreads(int clientPrefetchThreads) { this.clientPrefetchThreads = clientPrefetchThreads; } public boolean getValidateDefaultParameters() { return validateDefaultParameters; } public void setValidateDefaultParameters(boolean validateDefaultParameters) { this.validateDefaultParameters = validateDefaultParameters; } public String getDatabase() { return database; } public void setDatabase(String database) { if (!isNullOrEmpty(database)) { this.database = database; } } public String getSchema() { return schema; } public void setSchema(String schema) { if (!isNullOrEmpty(schema)) { this.schema = schema; } } public String getRole() { return role; } public void setRole(String role) { this.role = role; } public String getWarehouse() { return warehouse; } public void setWarehouse(String warehouse) { if (!isNullOrEmpty(warehouse)) { this.warehouse = warehouse; } } public void setUseRegionalS3EndpointsForPresignedURL(boolean regionalS3Endpoint) { this.useRegionalS3EndpointsForPresignedURL = regionalS3Endpoint; } public boolean getUseRegionalS3EndpointsForPresignedURL() { return useRegionalS3EndpointsForPresignedURL; } public String getArrayBindStage() { return arrayBindStage; } public void setArrayBindStage(String arrayBindStage) { this.arrayBindStage = String.format("%s.%s.%s", getDatabase(), getSchema(), arrayBindStage); } /** * Enables setting a value in the custom-properties map. This is used for properties that are * implementation specific to the session, and not shared by the different implementations. * * @param propertyName A string key for the property to set. * @param propertyValue The property value. */ public void setSessionPropertyByKey(String propertyName, Object propertyValue) { this.customSessionProperties.put(propertyName, propertyValue); } /** * Fetch the value for a custom session property. * * @param propertyName The key of the session property to fetch. * @return session property value */ public Object getSessionPropertyByKey(String propertyName) { return this.customSessionProperties.get(propertyName); } /** * Function that checks if the active session can be closed when the connection is closed. Called * by SnowflakeConnectionImpl. * * @return true if the active session is safe to close. */ public abstract boolean isSafeToClose(); /** * @param queryID query ID of the query whose status is being investigated * @return QueryStatus indicating the query's status * @throws SQLException if error encountered */ public abstract QueryStatus getQueryStatus(String queryID) throws SQLException; /** * Validates the connection properties used by this session, and returns a list of missing * properties. * * @return List of DriverPropertyInfo */ public abstract List checkProperties(); /** * Close the connection * * @throws SnowflakeSQLException if failed to close the connection * @throws SFException if failed to close the connection */ public void close() throws SFException, SnowflakeSQLException { close(null); } /** Marker-aware overload for internal call paths. */ public abstract void close(InternalCallMarker internalCallMarker) throws SFException, SnowflakeSQLException; /** * @return Returns the telemetry client, if supported, by this session. If not, should return a * NoOpTelemetryClient. */ public Telemetry getTelemetryClient() { return getTelemetryClient(null); } /** * Marker-aware overload for internal call paths. Implementations may override to bypass * external-usage telemetry tracking. */ public abstract Telemetry getTelemetryClient(InternalCallMarker internalCallMarker); /** * Makes a heartbeat call to check for session validity. * * @param timeout timeout value * @throws Exception if exception occurs * @throws SFException if exception occurs */ public abstract void callHeartBeat(int timeout) throws Exception, SFException; /** * JDBC API. Returns a list of warnings generated since starting this session, or the last time it * was cleared. * * @return List of SFException's */ public List getSqlWarnings() { return sqlWarnings; } /** * JDBC API. Clears the list of warnings generated since the start of the session, or the last * time it was cleared. */ public void clearSqlWarnings() { sqlWarnings.clear(); } /** * Get the SFConnectionHandler * * @return {@link SFConnectionHandler} */ public SFConnectionHandler getSfConnectionHandler() { return sfConnectionHandler; } /** * Get network timeout in milliseconds * * @return network timeout in milliseconds */ public abstract int getNetworkTimeoutInMilli(); /** * @return auth timeout in seconds */ @Deprecated public abstract int getAuthTimeout(); /** * @return max http retries */ public abstract int getMaxHttpRetries(); /** * @return {@link SnowflakeConnectString} */ public abstract SnowflakeConnectString getSnowflakeConnectionString(); /** * @return true if this is an async session */ public abstract boolean isAsyncSession(); /** * @return QueryContextDTO containing opaque information shared with the cloud service. */ public abstract QueryContextDTO getQueryContextDTO(); /** * Set query context * * @param queryContext the query context string */ public abstract void setQueryContext(String queryContext); /** * @return If true, JDBC will enable returning TIMESTAMP_WITH_TIMEZONE as column type, otherwise * it will not. This function will always return true for JDBC client, so that the client JDBC * will not have any behavior change. Stored proc JDBC will override this function to return * the value of SP_JDBC_ENABLE_TIMESTAMP_WITH_TIMEZONE from server for backward compatibility. */ public boolean getEnableReturnTimestampWithTimeZone() { return enableReturnTimestampWithTimeZone; } boolean getImplicitServerSideQueryTimeout() { return implicitServerSideQueryTimeout; } void setImplicitServerSideQueryTimeout(boolean value) { this.implicitServerSideQueryTimeout = value; } void setClearBatchOnlyAfterSuccessfulExecution(boolean value) { this.clearBatchOnlyAfterSuccessfulExecution = value; } public boolean getClearBatchOnlyAfterSuccessfulExecution() { return this.clearBatchOnlyAfterSuccessfulExecution; } public boolean getTreatTimeAsWallClockTime() { return treatTimeAsWallClockTime; } public void setTreatTimeAsWallClockTime(boolean treatTimeAsWallClockTime) { this.treatTimeAsWallClockTime = treatTimeAsWallClockTime; } /** * Get if owner-only stage file permissions feature is enabled. * * @return true if owner-only stage file permissions feature is enabled */ public boolean isOwnerOnlyStageFilePermissionsEnabled() { return ownerOnlyStageFilePermissionsEnabled; } public void setOwnerOnlyStageFilePermissionsEnabled(boolean booleanValue) { this.ownerOnlyStageFilePermissionsEnabled = booleanValue; } public boolean isAllowCertificatesWithoutCrlUrl() { return allowCertificatesWithoutCrlUrl; } public void setAllowCertificatesWithoutCrlUrl(boolean allowCertificatesWithoutCrlUrl) { this.allowCertificatesWithoutCrlUrl = allowCertificatesWithoutCrlUrl; } public Map getStickyHttpHeaders() { return stickyHttpHeaders; } public void extractAndUpdateStickyHttpHeaders(Map allHeaders) { this.stickyHttpHeaders.putAll(filterStickyHeaders(allHeaders)); } private static Map filterStickyHeaders(Map headers) { return headers.entrySet().stream() .filter(entry -> STICKY_HEADERS_NAMES.contains(entry.getKey().toLowerCase())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFBaseStatement.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.InternalCallMarker; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Base abstract class for an SFStatement implementation. Statements are used in executing queries, * both in standard and prepared forms. They are accessed by users via the public API class, * SnowflakeStatementV(x). */ public abstract class SFBaseStatement { // maximum number of parameters for the statement; if this threshold is exceeded, // we throw an exception protected static final int MAX_STATEMENT_PARAMETERS = 1000; private static final SFLogger logger = SFLoggerFactory.getLogger(SFBaseStatement.class); // statement level parameters; just a string-key, object-value map. protected final Map statementParametersMap = new HashMap<>(); // timeout in seconds for queries protected int queryTimeout = 0; /** * Add a statement parameter * *

Make sure a property is not added more than once and the number of properties does not * exceed limit. * * @param propertyName property name * @param propertyValue property value * @throws SFException if too many parameters for a statement */ public void addProperty(String propertyName, Object propertyValue) throws SFException { statementParametersMap.put(propertyName, propertyValue); if ("query_timeout".equalsIgnoreCase(propertyName)) { queryTimeout = (Integer) propertyValue; } // check if the number of session properties exceed limit if (statementParametersMap.size() > MAX_STATEMENT_PARAMETERS) { throw new SFException(ErrorCode.TOO_MANY_STATEMENT_PARAMETERS, MAX_STATEMENT_PARAMETERS); } } public Map getStatementParameters() { return statementParametersMap; } /** * Describe a statement. This is invoked when prepareStatement() occurs. SFStatementMetadata * should be returned by this action, which contains metadata such as the schema of the result. * * @param sql The SQL string of the query/statement. * @return metadata of statement including resultset metadata and binding information * @throws SQLException if connection is already closed * @throws SFException if result set is null */ public abstract SFPreparedStatementMetaData describe(String sql) throws SFException, SQLException; /** * Executes the given SQL string. * * @param sql The SQL string to execute, synchronously. * @param parametersBinding parameters to bind * @param caller the JDBC interface method that called this method, if any * @param execTimeData OOB telemetry object to record timings * @return whether there is result set or not * @throws SQLException if failed to execute sql * @throws SFException exception raised from Snowflake components * @throws SQLException if SQL error occurs */ public abstract SFBaseResultSet execute( String sql, Map parametersBinding, CallingMethod caller, ExecTimeTelemetryData execTimeData) throws SQLException, SFException; /** * Execute sql asynchronously. Note that at a minimum, this does not have to be supported; if * executeAsyncQuery() is called from SnowflakeStatement and the SFConnectionHandler's * supportsAsyncQuery() returns false, an exception is thrown. If this is un-implemented, then * supportsAsyncQuery() should return false. * * @param sql sql statement. * @param parametersBinding parameters to bind * @param caller the JDBC interface method that called this method, if any * @param execTimeData ExecTimeTelemetryData * @return whether there is result set or not * @throws SQLException if failed to execute sql * @throws SFException exception raised from Snowflake components * @throws SQLException if SQL error occurs */ public abstract SFBaseResultSet asyncExecute( String sql, Map parametersBinding, CallingMethod caller, ExecTimeTelemetryData execTimeData) throws SQLException, SFException; /** * Closes the statement. Open result sets are closed, connections are terminated, state is * cleared, etc. */ public abstract void close(); /** * Aborts the statement. * * @throws SFException if the statement is already closed. * @throws SQLException if there are server-side errors from trying to abort. * @deprecated use {@link #cancel(CancellationReason)} instead */ @Deprecated public abstract void cancel() throws SFException, SQLException; /** * Aborts the statement. * * @param cancellationReason reason for the cancellation * @throws SFException if the statement is already closed. * @throws SQLException if there are server-side errors from trying to abort. */ public void cancel(CancellationReason cancellationReason) throws SFException, SQLException { cancel(); // default cancel is called to keep interface backward compatibility } /** * Sets a property within session properties, i.e., if the sql is using set-sf-property * * @param sql the set property sql */ public void executeSetProperty(final String sql) { logger.trace("Setting property", false); // tokenize the sql String[] tokens = sql.split("\\s+"); if (tokens.length < 2) { return; } if ("sort".equalsIgnoreCase(tokens[1])) { if (tokens.length >= 3 && "on".equalsIgnoreCase(tokens[2])) { logger.debug("Setting sort on", false); this.getSFBaseSession(internalCallMarker()).setSessionPropertyByKey("sort", true); } else { logger.debug("Setting sort off", false); this.getSFBaseSession(internalCallMarker()).setSessionPropertyByKey("sort", false); } } } /** * A method to check if a sql is file upload statement with consideration for potential comments * in front of put keyword. * * @param sql sql statement * @return true if the command is upload statement */ public static boolean isFileTransfer(String sql) { SFStatementType statementType = StmtUtil.checkStageManageCommand(sql); return statementType == SFStatementType.PUT || statementType == SFStatementType.GET; } /** * If this is a multi-statement, i.e., has child results. * * @return true if has child results */ public abstract boolean hasChildren(); /** * Get the SFBaseSession associated with this SFBaseStatement. * * @return The SFBaseSession associated with this SFBaseStatement. */ public abstract SFBaseSession getSFBaseSession(); /** * Marker-aware overload for internal call paths. Implementations may override to bypass * external-usage telemetry tracking. */ public abstract SFBaseSession getSFBaseSession(InternalCallMarker internalCallMarker); /** * Retrieves the current result as a ResultSet, if any. This is invoked by SnowflakeStatement and * should return an SFBaseResultSet, which is then wrapped in a SnowflakeResultSet. * * @return {@link SFBaseResultSet} */ public abstract SFBaseResultSet getResultSet(); /** * Sets the result set to the next one, if available. * * @param current What to do with the current result. One of Statement.CLOSE_CURRENT_RESULT, * Statement.CLOSE_ALL_RESULTS, or Statement.KEEP_CURRENT_RESULT * @return true if there is a next result and it's a result set false if there are no more * results, or there is a next result and it's an update count * @throws SQLException if something fails while getting the next result */ public abstract boolean getMoreResults(int current) throws SQLException; /** The type of query that is being executed. Used internally by SnowflakeStatementV(x). */ public enum CallingMethod { EXECUTE, EXECUTE_UPDATE, EXECUTE_QUERY } public abstract long getConservativeMemoryLimit(); public abstract int getConservativePrefetchThreads(); /** * @param queryID the queryID * @return the child query IDs for the multiple statements query. * @throws SQLException if an error occurs while getting child query ID's */ public abstract String[] getChildQueryIds(String queryID) throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFBasicCrlTrustManager.java ================================================ package net.snowflake.client.internal.core; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import javax.net.ssl.X509TrustManager; import net.snowflake.client.internal.core.crl.CrlRevocationManager; public class SFBasicCrlTrustManager implements X509TrustManager { private final X509TrustManager trustManager; private final CrlRevocationManager revocationManager; public SFBasicCrlTrustManager( CrlRevocationManager revocationManager, X509TrustManager trustManager) { this.revocationManager = revocationManager; this.trustManager = trustManager; } @Override public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { trustManager.checkClientTrusted(chain, authType); } @Override public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { trustManager.checkServerTrusted(chain, authType); revocationManager.validateRevocationStatus(chain, authType); } @Override public X509Certificate[] getAcceptedIssuers() { return trustManager.getAcceptedIssuers(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFChildResult.java ================================================ package net.snowflake.client.internal.core; /** Data class to wrap information about child job results */ public class SFChildResult { // query id of child query, to look up child result private final String id; // statement type of child query, to properly interpret result private final SFStatementType type; public SFChildResult(String id, SFStatementType type) { this.id = id; this.type = type; } // For Snowflake internal use public String getId() { return id; } // For Snowflake internal use public SFStatementType getType() { return type; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFCrlTrustManagerFactory.java ================================================ package net.snowflake.client.internal.core; import java.security.KeyStore; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.cert.CertificateException; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; import javax.net.ssl.X509TrustManager; import net.snowflake.client.internal.core.crl.CrlRevocationManager; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.ssl.SSLInitializationException; class SFCrlTrustManagerFactory { private static final SFLogger logger = SFLoggerFactory.getLogger(SFCrlTrustManagerFactory.class); static X509TrustManager createCrlTrustManager(HttpClientSettingsKey key) throws CertificateException { X509TrustManager systemTrustManager = getSystemTrustManager(); CrlRevocationManager revocationManager = new CrlRevocationManager(key, systemTrustManager); if (systemTrustManager instanceof X509ExtendedTrustManager) { logger.debug("JVM provides X509ExtendedTrustManager - creating extended CRL trust manager"); return new SFExtendedCrlTrustManager( revocationManager, (X509ExtendedTrustManager) systemTrustManager); } else { logger.debug("JVM provides basic X509TrustManager - creating basic CRL trust manager"); return new SFBasicCrlTrustManager(revocationManager, systemTrustManager); } } private static X509TrustManager getSystemTrustManager() { try { TrustManagerFactory factory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); factory.init((KeyStore) null); for (TrustManager tm : factory.getTrustManagers()) { if (tm instanceof X509TrustManager) { return (X509TrustManager) tm; } } throw new SSLInitializationException( "No X509TrustManager found in default trust managers", null); } catch (NoSuchAlgorithmException | KeyStoreException ex) { throw new SSLInitializationException("Failed to initialize default trust manager", ex); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFException.java ================================================ package net.snowflake.client.internal.core; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.ResourceBundleManager; public class SFException extends Throwable { private static final SFLogger logger = SFLoggerFactory.getLogger(SFException.class); private static final long serialVersionUID = 1L; static final ResourceBundleManager errorResourceBundleManager = ResourceBundleManager.getSingleton(ErrorCode.errorMessageResource); private Throwable cause; private String queryId; private String sqlState; private int vendorCode; private Object[] params; /** * Use {@link SFException#SFException(String, Throwable, ErrorCode, Object...)} * * @param errorCode the error code * @param params additional params */ @Deprecated public SFException(ErrorCode errorCode, Object... params) { this(null, null, errorCode, params); } /** * use {@link SFException#SFException(String, Throwable, ErrorCode, Object...)} * * @param queryID the query id * @param errorCode the error code * @param params additional params */ @Deprecated public SFException(String queryID, ErrorCode errorCode, Object... params) { this(queryID, null, errorCode, params); } /** * use {@link SFException#SFException(String, Throwable, ErrorCode, Object...)} * * @param cause throwable * @param errorCode error code * @param params additional params */ @Deprecated public SFException(Throwable cause, ErrorCode errorCode, Object... params) { this(null, cause, errorCode, params); } /** * @param queryId query ID * @param cause throwable * @param errorCode error code * @param params additional params */ public SFException(String queryId, Throwable cause, ErrorCode errorCode, Object... params) { super( errorResourceBundleManager.getLocalizedMessage( String.valueOf(errorCode.getMessageCode()), params), cause); this.cause = null; this.queryId = queryId; this.sqlState = errorCode.getSqlState(); this.vendorCode = errorCode.getMessageCode(); this.params = params; } /** * Get the error cause * * @return Throwable */ public Throwable getCause() { return cause; } /** * Get the query ID * * @return query ID string */ public String getQueryId() { return queryId; } /** * Get the SQL state * * @return SQL state string */ public String getSqlState() { return sqlState; } /** * Get the vendor code * * @return vendor code */ public int getVendorCode() { return vendorCode; } /** * Get additional parameters * * @return parameter array */ public Object[] getParams() { return params; } public static String oneLiner(String prefix, Throwable thrown) { StackTraceElement[] stack = thrown.getStackTrace(); String topOfStack = null; if (stack.length > 0) { topOfStack = " at " + stack[0]; } return prefix + " " + thrown + topOfStack; } @Override public String toString() { return super.toString() + (getQueryId() != null ? ", query id = " + getQueryId() : "") + (getSqlState() != null ? ", sql state = " + getSqlState() : ""); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFExtendedCrlTrustManager.java ================================================ package net.snowflake.client.internal.core; import java.net.Socket; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import javax.net.ssl.SSLEngine; import javax.net.ssl.X509ExtendedTrustManager; import net.snowflake.client.internal.core.crl.CrlRevocationManager; public class SFExtendedCrlTrustManager extends X509ExtendedTrustManager { private final X509ExtendedTrustManager exTrustManager; private final CrlRevocationManager revocationManager; public SFExtendedCrlTrustManager( CrlRevocationManager revocationManager, X509ExtendedTrustManager trustManager) { this.revocationManager = revocationManager; this.exTrustManager = trustManager; } @Override public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { exTrustManager.checkClientTrusted(chain, authType); } @Override public void checkClientTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException { exTrustManager.checkClientTrusted(chain, authType, socket); } @Override public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine engine) throws CertificateException { exTrustManager.checkClientTrusted(chain, authType, engine); } @Override public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { exTrustManager.checkServerTrusted(chain, authType); revocationManager.validateRevocationStatus(chain, authType); } @Override public void checkServerTrusted(X509Certificate[] chain, String authType, SSLEngine engine) throws CertificateException { exTrustManager.checkServerTrusted(chain, authType, engine); revocationManager.validateRevocationStatus(chain, authType); } @Override public void checkServerTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException { exTrustManager.checkServerTrusted(chain, authType, socket); revocationManager.validateRevocationStatus(chain, authType); } @Override public X509Certificate[] getAcceptedIssuers() { return exTrustManager.getAcceptedIssuers(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFFixedViewResultSet.java ================================================ package net.snowflake.client.internal.core; import java.sql.SQLException; import java.util.List; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.common.core.SFBinaryFormat; import net.snowflake.client.internal.core.json.Converters; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.SFBaseFileTransferAgent; import net.snowflake.client.internal.jdbc.SFBaseFileTransferAgent.CommandType; import net.snowflake.client.internal.jdbc.SnowflakeFixedView; import net.snowflake.client.internal.jdbc.SnowflakeLoggedFeatureNotSupportedException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; /** * Fixed view result set. This class iterates through any fixed view implementation and return the * objects as rows */ // Works only for strings, numbers, etc, does not work for timestamps, dates, times etc. public class SFFixedViewResultSet extends SFJsonResultSet { private static final SFLogger logger = SFLoggerFactory.getLogger(SFFixedViewResultSet.class); private SnowflakeFixedView fixedView; private Object[] nextRow = null; private final CommandType commandType; private final String queryID; public SFFixedViewResultSet(SnowflakeFixedView fixedView, CommandType commandType, String queryID) throws SnowflakeSQLException { super( null, new Converters( null, new SFSession(), 0, false, false, false, false, SFBinaryFormat.BASE64, null, null, null, null, null)); this.fixedView = fixedView; this.commandType = commandType; this.queryID = queryID; try { resultSetMetaData = new SFResultSetMetaData( fixedView.describeColumns(session), session, timestampNTZFormatter, timestampLTZFormatter, timestampTZFormatter, dateFormatter, timeFormatter); } catch (Exception ex) { throw new SnowflakeSQLLoggedException( queryID, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Failed to describe fixed view: " + fixedView.getClass().getName()); } } /** * Advance to next row * * @return true if next row exists, false otherwise * @throws net.snowflake.client.internal.core.SFException if failed to get next row */ @Override public boolean next() throws SFException { logger.trace("next called", false); List nextRowList; try { // call the fixed view next row method nextRowList = fixedView.getNextRow(); } catch (Exception ex) { throw new SFException( queryID, ErrorCode.INTERNAL_ERROR, SFException.oneLiner("Error getting next row from " + "fixed view:", ex)); } row++; if (nextRowList == null) { logger.debug("End of result", false); return false; } if (nextRow == null) { nextRow = new Object[nextRowList.size()]; } nextRow = nextRowList.toArray(nextRow); return true; } @Override protected Object getObjectInternal(int columnIndex) throws SFException { logger.trace("Object getObjectInternal(int columnIndex)", false); if (nextRow == null) { throw new SFException(queryID, ErrorCode.ROW_DOES_NOT_EXIST); } if (columnIndex <= 0 || columnIndex > nextRow.length) { throw new SFException(queryID, ErrorCode.COLUMN_DOES_NOT_EXIST, columnIndex); } wasNull = nextRow[columnIndex - 1] == null; return nextRow[columnIndex - 1]; } @Override public void close() throws SnowflakeSQLException { super.close(); // free the object so that they can be Garbage collected nextRow = null; fixedView = null; } @Override public SFStatementType getStatementType() { if (this.commandType == SFBaseFileTransferAgent.CommandType.DOWNLOAD) { return SFStatementType.GET; } else { return SFStatementType.PUT; } } @Override public void setStatementType(SFStatementType statementType) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public boolean isLast() { return row == fixedView.getTotalRows(); } @Override public boolean isAfterLast() { return row > fixedView.getTotalRows(); } @Override public String getQueryId() { return queryID; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFJsonResultSet.java ================================================ package net.snowflake.client.internal.core; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.math.BigDecimal; import java.sql.Array; import java.sql.Date; import java.sql.SQLInput; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.time.Duration; import java.time.Period; import java.util.List; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.arrow.StructObjectWrapper; import net.snowflake.client.internal.core.json.Converters; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Abstract class used to represent snowflake result set in json format */ public abstract class SFJsonResultSet extends SFBaseResultSet { private static final SFLogger logger = SFLoggerFactory.getLogger(SFJsonResultSet.class); protected final Converters converters; protected final ObjectMapper objectMapper; protected SFJsonResultSet(TimeZone sessionTimeZone, Converters converters) { this.sessionTimeZone = sessionTimeZone; this.converters = converters; this.objectMapper = ObjectMapperFactory.getObjectMapperForSession(session); } /** * Given a column index, get current row's value as an object * * @param columnIndex index of columns * @return an object * @throws SFException raises if any error occurs */ protected abstract Object getObjectInternal(int columnIndex) throws SFException; public Object getObject(int columnIndex) throws SFException { int type = resultSetMetaData.getColumnType(columnIndex); Object obj = getObjectInternal(columnIndex); if (obj == null) { return null; } switch (type) { case Types.VARCHAR: case Types.CHAR: case SnowflakeType.EXTRA_TYPES_VECTOR: return getString(columnIndex); case Types.BINARY: return getBytes(columnIndex); case Types.INTEGER: return getInt(columnIndex); case Types.DECIMAL: return getBigDecimal(columnIndex); case Types.BIGINT: return getBigInt(columnIndex, obj); case Types.DOUBLE: return getDouble(columnIndex); case Types.TIMESTAMP: case Types.TIMESTAMP_WITH_TIMEZONE: return getTimestamp(columnIndex); case Types.DATE: return getDate(columnIndex); case Types.TIME: return getTime(columnIndex); case Types.BOOLEAN: return getBoolean(columnIndex); case Types.STRUCT: if (resultSetMetaData.isStructuredTypeColumn(columnIndex)) { return new StructObjectWrapper((String) obj, getSqlInput((String) obj, columnIndex)); } else { throw new SFException(ErrorCode.FEATURE_UNSUPPORTED, "data type: " + type); } case Types.ARRAY: if (resultSetMetaData.isStructuredTypeColumn(columnIndex)) { return new StructObjectWrapper((String) obj, getArray(columnIndex)); } else { throw new SFException(ErrorCode.FEATURE_UNSUPPORTED, "data type: " + type); } default: throw new SFException(ErrorCode.FEATURE_UNSUPPORTED, "data type: " + type); } } @Override public Object getObjectWithoutString(int columnIndex) throws SFException { return getObject(columnIndex); } /** * Sometimes large BIGINTS overflow the java Long type. In these cases, return a BigDecimal type * instead. * * @param columnIndex the column index * @return an object of type long or BigDecimal depending on number size * @throws SFException if an error occurs */ private Object getBigInt(int columnIndex, Object obj) throws SFException { return converters.getNumberConverter().getBigInt(obj, columnIndex); } @Override public Array getArray(int columnIndex) throws SFException { Object obj = getObjectInternal(columnIndex); if (obj == null) { return null; } return getJsonArray((String) obj, columnIndex, objectMapper); } @Override public String getString(int columnIndex) throws SFException { logger.trace("String getString(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getInternalColumnType(columnIndex); int columnSubType = resultSetMetaData.getInternalColumnType(columnIndex); int scale = resultSetMetaData.getScale(columnIndex); return converters.getStringConverter().getString(obj, columnType, columnSubType, scale); } @Override public boolean getBoolean(int columnIndex) throws SFException { logger.trace("boolean getBoolean(int columnIndex)", false); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getBooleanConverter().getBoolean(getObjectInternal(columnIndex), columnType); } @Override public byte getByte(int columnIndex) throws SFException { logger.trace("short getByte(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); return converters.getNumberConverter().getByte(obj); } @Override public Period getPeriod(int columnIndex) throws SFException { logger.trace("Period getPeriod(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getNumberConverter().getPeriod(obj, columnType); } @Override public Duration getDuration(int columnIndex) throws SFException { logger.trace("Duration getDuration(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getNumberConverter().getDuration(obj, columnType); } @Override public short getShort(int columnIndex) throws SFException { logger.trace("short getShort(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getNumberConverter().getShort(obj, columnType); } @Override public int getInt(int columnIndex) throws SFException { logger.trace("int getInt(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getNumberConverter().getInt(obj, columnType); } @Override public long getLong(int columnIndex) throws SFException { logger.trace("long getLong(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getNumberConverter().getLong(obj, columnType); } @Override public BigDecimal getBigDecimal(int columnIndex) throws SFException { logger.trace("BigDecimal getBigDecimal(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getNumberConverter().getBigDecimal(obj, columnType); } @Override public BigDecimal getBigDecimal(int columnIndex, int scale) throws SFException { logger.trace("BigDecimal getBigDecimal(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getNumberConverter().getBigDecimal(obj, columnType, scale); } @Override public Time getTime(int columnIndex) throws SFException { logger.trace("Time getTime(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); int columnSubType = resultSetMetaData.getInternalColumnType(columnIndex); int scale = resultSetMetaData.getScale(columnIndex); return converters .getDateTimeConverter() .getTime(obj, columnType, columnSubType, TimeZone.getDefault(), scale); } @Override public Timestamp getTimestamp(int columnIndex, TimeZone tz) throws SFException { logger.trace("Timestamp getTimestamp(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); int columnSubType = resultSetMetaData.getInternalColumnType(columnIndex); int scale = resultSetMetaData.getScale(columnIndex); return converters .getDateTimeConverter() .getTimestamp(obj, columnType, columnSubType, tz, scale); } @Override public float getFloat(int columnIndex) throws SFException { logger.trace("float getFloat(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getNumberConverter().getFloat(obj, columnType); } @Override public double getDouble(int columnIndex) throws SFException { logger.trace("double getDouble(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); return converters.getNumberConverter().getDouble(obj, columnType); } @Override public byte[] getBytes(int columnIndex) throws SFException { logger.trace("byte[] getBytes(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); int columnSubType = resultSetMetaData.getInternalColumnType(columnIndex); int scale = resultSetMetaData.getScale(columnIndex); return converters.getBytesConverter().getBytes(obj, columnType, columnSubType, scale); } public Date getDate(int columnIndex) throws SFException { return getDate(columnIndex, TimeZone.getDefault()); } @Override public Date getDate(int columnIndex, TimeZone tz) throws SFException { logger.trace("Date getDate(int columnIndex)", false); Object obj = getObjectInternal(columnIndex); int columnType = resultSetMetaData.getColumnType(columnIndex); int columnSubType = resultSetMetaData.getInternalColumnType(columnIndex); int scale = resultSetMetaData.getScale(columnIndex); return converters.getDateTimeConverter().getDate(obj, columnType, columnSubType, tz, scale); } @Override public SQLInput createSqlInputForColumn( Object input, Class parentObjectClass, int columnIndex, SFBaseSession session, List fields) { return createJsonSqlInputForColumn(input, session, fields); } @Override public Date convertToDate(Object object, TimeZone tz) throws SFException { return convertStringToDate((String) object, tz); } @Override public Time convertToTime(Object object, int scale) throws SFException { return convertStringToTime((String) object, scale); } @Override public Timestamp convertToTimestamp( Object object, int columnType, int columnSubType, TimeZone tz, int scale) throws SFException { return convertStringToTimestamp((String) object, columnType, columnSubType, tz, scale); } private Timestamp getTimestamp(int columnIndex) throws SFException { return getTimestamp(columnIndex, TimeZone.getDefault()); } @Override public Converters getConverters() { return converters; } private Object getSqlInput(String input, int columnIndex) throws SFException { try { JsonNode jsonNode = OBJECT_MAPPER.readTree(input); return new JsonSqlInput( input, jsonNode, session, converters, resultSetMetaData.getColumnFields(columnIndex), sessionTimeZone); } catch (JsonProcessingException e) { throw new SFException(e, ErrorCode.INVALID_STRUCT_DATA); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFLoginInput.java ================================================ package net.snowflake.client.internal.core; import java.net.MalformedURLException; import java.net.URL; import java.security.PrivateKey; import java.time.Duration; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.auth.wif.WorkloadIdentityAttestation; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.http.client.methods.HttpRequestBase; /** A class for holding all information required for login */ public class SFLoginInput { private String serverUrl; private String databaseName; private String schemaName; private String warehouse; private String role; private boolean validateDefaultParameters; private String originalAuthenticator; private String authenticator; private String oktaUserName; private String accountName; private int loginTimeout = -1; // default is invalid private int retryTimeout = 300; private int authTimeout = 0; private String userName; private String password; private boolean passcodeInPassword; private String passcode; private String token; private Duration connectionTimeout = HttpUtil.getConnectionTimeout(); private Duration socketTimeout = HttpUtil.getSocketTimeout(); private String appId; private String appVersion; private String sessionToken; private String masterToken; private Map sessionParameters; private PrivateKey privateKey; private String application; private String idToken; private String mfaToken; private String oauthAccessToken; private String oauthRefreshToken; private String dpopPublicKey; private boolean dpopEnabled = false; private String serviceName; private OCSPMode ocspMode; private HttpClientSettingsKey httpClientKey; private String privateKeyFile; private String privateKeyBase64; private String privateKeyPwd; private String inFlightCtx; // Opaque string sent for Snowsight account activation private int platformDetectionTimeoutMs = 200; // Default 200ms timeout for platform detection private boolean disablePlatformDetection = false; // Default false - platform detection enabled private int maxRetryCount; private SFOauthLoginInput oauthLoginInput; private boolean disableConsoleLogin = true; private boolean disableSamlURLCheck = false; private boolean enableClientStoreTemporaryCredential; private boolean enableClientRequestMfaToken; // Workload Identity Federation private String workloadIdentityProvider; private WorkloadIdentityAttestation workloadIdentityAttestation; private String workloadIdentityEntraResource; private List workloadIdentityImpersonationPath = Collections.emptyList(); private String workloadIdentityAwsExternalId; // OAuth private int redirectUriPort = -1; private String clientId; private String clientSecret; private SessionUtilExternalBrowser.AuthExternalBrowserHandlers browserHandler = new SessionUtilExternalBrowser.DefaultAuthExternalBrowserHandlers(); private Duration browserResponseTimeout; // Additional headers to add for Snowsight. Map additionalHttpHeadersForSnowsight; public SFLoginInput() {} public Duration getBrowserResponseTimeout() { return browserResponseTimeout; } public SFLoginInput setBrowserResponseTimeout(Duration browserResponseTimeout) { this.browserResponseTimeout = browserResponseTimeout; return this; } public String getServerUrl() { return serverUrl; } public SFLoginInput setServerUrl(String serverUrl) { this.serverUrl = serverUrl; return this; } public boolean getDisableConsoleLogin() { return disableConsoleLogin; } SFLoginInput setDisableConsoleLogin(boolean disableConsoleLogin) { this.disableConsoleLogin = disableConsoleLogin; return this; } String getDatabaseName() { return databaseName; } SFLoginInput setDatabaseName(String databaseName) { this.databaseName = databaseName; return this; } public String getSchemaName() { return schemaName; } public SFLoginInput setSchemaName(String schemaName) { this.schemaName = schemaName; return this; } public String getWarehouse() { return warehouse; } public SFLoginInput setWarehouse(String warehouse) { this.warehouse = warehouse; return this; } public String getRole() { return role; } public SFLoginInput setRole(String role) { this.role = role; return this; } public boolean isValidateDefaultParameters() { return validateDefaultParameters; } public SFLoginInput setValidateDefaultParameters(Object v) { validateDefaultParameters = getBooleanValue(v); return this; } public String getAuthenticator() { return authenticator; } public SFLoginInput setAuthenticator(String authenticator) { this.authenticator = authenticator; return this; } public String getOKTAUserName() { return oktaUserName; } public SFLoginInput setOKTAUserName(String oktaUserName) { this.oktaUserName = oktaUserName; return this; } public String getAccountName() { return accountName; } public SFLoginInput setAccountName(String accountName) { this.accountName = accountName; return this; } public int getLoginTimeout() { return loginTimeout; } // We want to choose the smaller of the two values between retryTimeout and loginTimeout for the // new retry strategy. SFLoginInput setLoginTimeout(int loginTimeout) { if (loginTimeout > retryTimeout && retryTimeout != 0) { this.loginTimeout = retryTimeout; } else { this.loginTimeout = loginTimeout; } return this; } int getRetryTimeout() { return retryTimeout; } SFLoginInput setRetryTimeout(int retryTimeout) { this.retryTimeout = retryTimeout; return this; } public int getAuthTimeout() { return authTimeout; } SFLoginInput setAuthTimeout(int authTimeout) { this.authTimeout = authTimeout; return this; } public String getUserName() { return userName; } SFLoginInput setUserName(String userName) { this.userName = userName; return this; } public String getPassword() { return password; } public SFLoginInput setPassword(String password) { this.password = password; return this; } String getPasscode() { return passcode; } SFLoginInput setPasscode(String passcode) { this.passcode = passcode; return this; } public String getToken() { return token; } public SFLoginInput setToken(String token) { this.token = token; return this; } int getConnectionTimeoutInMillis() { return (int) connectionTimeout.toMillis(); } SFLoginInput setConnectionTimeout(Duration connectionTimeout) { this.connectionTimeout = connectionTimeout; return this; } public int getSocketTimeoutInMillis() { return (int) socketTimeout.toMillis(); } public SFLoginInput setSocketTimeout(Duration socketTimeout) { this.socketTimeout = socketTimeout; return this; } boolean isPasscodeInPassword() { return passcodeInPassword; } SFLoginInput setPasscodeInPassword(boolean passcodeInPassword) { this.passcodeInPassword = passcodeInPassword; return this; } String getAppId() { return appId; } SFLoginInput setAppId(String appId) { this.appId = appId; return this; } String getAppVersion() { return appVersion; } SFLoginInput setAppVersion(String appVersion) { this.appVersion = appVersion; return this; } public String getSessionToken() { return sessionToken; } public SFLoginInput setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } String getMasterToken() { return masterToken; } SFLoginInput setMasterToken(String masterToken) { this.masterToken = masterToken; return this; } String getIdToken() { return idToken; } SFLoginInput setIdToken(String idToken) { this.idToken = idToken; return this; } String getMfaToken() { return mfaToken; } SFLoginInput setMfaToken(String mfaToken) { this.mfaToken = mfaToken; return this; } String getOauthAccessToken() { return oauthAccessToken; } SFLoginInput setOauthAccessToken(String oauthAccessToken) { this.oauthAccessToken = oauthAccessToken; return this; } public String getOauthRefreshToken() { return oauthRefreshToken; } SFLoginInput setOauthRefreshToken(String oauthRefreshToken) { this.oauthRefreshToken = oauthRefreshToken; return this; } String getWorkloadIdentityProvider() { return workloadIdentityProvider; } SFLoginInput setWorkloadIdentityProvider(String workloadIdentityProvider) { this.workloadIdentityProvider = workloadIdentityProvider; return this; } public String getDPoPPublicKey() { return dpopPublicKey; } SFLoginInput setDPoPPublicKey(String dpopPublicKey) { this.dpopPublicKey = dpopPublicKey; return this; } public boolean isDPoPEnabled() { return dpopEnabled; } // Currently only used for testing purpose public void setDPoPEnabled(boolean dpopEnabled) { this.dpopEnabled = dpopEnabled; } Map getSessionParameters() { return sessionParameters; } SFLoginInput setSessionParameters(Map sessionParameters) { this.sessionParameters = sessionParameters; return this; } PrivateKey getPrivateKey() { return privateKey; } SFLoginInput setPrivateKey(PrivateKey privateKey) { this.privateKey = privateKey; return this; } String getPrivateKeyBase64() { return privateKeyBase64; } SFLoginInput setPrivateKeyBase64(String privateKeyBase64) { this.privateKeyBase64 = privateKeyBase64; return this; } SFLoginInput setPrivateKeyFile(String privateKeyFile) { this.privateKeyFile = privateKeyFile; return this; } SFLoginInput setPrivateKeyPwd(String privateKeyPwd) { this.privateKeyPwd = privateKeyPwd; return this; } String getPrivateKeyFile() { return privateKeyFile; } String getPrivateKeyPwd() { return privateKeyPwd; } boolean isPrivateKeyProvided() { return (getPrivateKey() != null || getPrivateKeyFile() != null || getPrivateKeyBase64() != null); } public String getApplication() { return application; } public SFLoginInput setApplication(String application) { this.application = application; return this; } String getServiceName() { return serviceName; } SFLoginInput setServiceName(String serviceName) { this.serviceName = serviceName; return this; } OCSPMode getOCSPMode() { return ocspMode; } SFLoginInput setOCSPMode(OCSPMode ocspMode) { this.ocspMode = ocspMode; return this; } public HttpClientSettingsKey getHttpClientSettingsKey() { return httpClientKey; } public SFLoginInput setHttpClientSettingsKey(HttpClientSettingsKey key) { this.httpClientKey = key; return this; } // Opaque string sent for Snowsight account activation String getInFlightCtx() { return inFlightCtx; } // Opaque string sent for Snowsight account activation SFLoginInput setInFlightCtx(String inFlightCtx) { this.inFlightCtx = inFlightCtx; return this; } boolean getDisableSamlURLCheck() { return disableSamlURLCheck; } SFLoginInput setDisableSamlURLCheck(boolean disableSamlURLCheck) { this.disableSamlURLCheck = disableSamlURLCheck; return this; } public int getRedirectUriPort() { return redirectUriPort; } public SFLoginInput setRedirectUriPort(int redirectUriPort) { this.redirectUriPort = redirectUriPort; return this; } public String getClientId() { return clientId; } public SFLoginInput setClientId(String clientId) { this.clientId = clientId; return this; } public String getClientSecret() { return clientSecret; } public SFLoginInput setClientSecret(String clientSecret) { this.clientSecret = clientSecret; return this; } Map getAdditionalHttpHeadersForSnowsight() { return additionalHttpHeadersForSnowsight; } /** * Set additional http headers to apply to the outgoing request. The additional headers cannot be * used to replace or overwrite a header in use by the driver. These will be applied to the * outgoing request. Primarily used by Snowsight, as described in {@link * HttpUtil#applyAdditionalHeadersForSnowsight(HttpRequestBase, Map)} * * @param additionalHttpHeaders The new headers to add * @return The input object, for chaining * @see HttpUtil#applyAdditionalHeadersForSnowsight(HttpRequestBase, Map) */ public SFLoginInput setAdditionalHttpHeadersForSnowsight( Map additionalHttpHeaders) { this.additionalHttpHeadersForSnowsight = additionalHttpHeaders; return this; } static boolean getBooleanValue(Object v) { if (v instanceof Boolean) { return (Boolean) v; } else if (v instanceof String) { return !Boolean.FALSE.toString().equalsIgnoreCase((String) v) && !"off".equalsIgnoreCase((String) v) && (Boolean.TRUE.toString().equalsIgnoreCase((String) v) || "on".equalsIgnoreCase((String) v)); } return false; } String getHostFromServerUrl() throws SFException { URL url; try { if (!serverUrl.startsWith("http")) { url = new URL("https://" + serverUrl); } else { url = new URL(serverUrl); } } catch (MalformedURLException e) { throw new SFException( e, ErrorCode.INTERNAL_ERROR, "Invalid serverUrl for retrieving host name"); } return url.getHost(); } boolean isEnableClientStoreTemporaryCredential() { return enableClientStoreTemporaryCredential; } SFLoginInput setEnableClientStoreTemporaryCredential( boolean enableClientStoreTemporaryCredential) { this.enableClientStoreTemporaryCredential = enableClientStoreTemporaryCredential; return this; } boolean isEnableClientRequestMfaToken() { return enableClientRequestMfaToken; } SFLoginInput setEnableClientRequestMfaToken(boolean enableClientRequestMfaToken) { this.enableClientRequestMfaToken = enableClientRequestMfaToken; return this; } public SFOauthLoginInput getOauthLoginInput() { return oauthLoginInput; } public SFLoginInput setOauthLoginInput(SFOauthLoginInput oauthLoginInput) { this.oauthLoginInput = oauthLoginInput; return this; } void restoreOriginalAuthenticator() { this.authenticator = this.originalAuthenticator; } String getOriginalAuthenticator() { return this.originalAuthenticator; } SFLoginInput setOriginalAuthenticator(String originalAuthenticator) { this.originalAuthenticator = originalAuthenticator; return this; } public void setWorkloadIdentityAttestation( WorkloadIdentityAttestation workloadIdentityAttestation) { this.workloadIdentityAttestation = workloadIdentityAttestation; } public WorkloadIdentityAttestation getWorkloadIdentityAttestation() { return workloadIdentityAttestation; } public String getWorkloadIdentityEntraResource() { return this.workloadIdentityEntraResource; } public SFLoginInput setWorkloadIdentityEntraResource(String workloadIdentityEntraResource) { this.workloadIdentityEntraResource = workloadIdentityEntraResource; return this; } public List getWorkloadIdentityImpersonationPath() { return workloadIdentityImpersonationPath; } public SFLoginInput setWorkloadIdentityImpersonationPath( String workloadIdentityImpersonationPath) { if (!SnowflakeUtil.isNullOrEmpty(workloadIdentityImpersonationPath)) { this.workloadIdentityImpersonationPath = Arrays.stream(workloadIdentityImpersonationPath.split(",")) .map(String::trim) .filter(s -> !s.isEmpty()) .collect(Collectors.toList()); } return this; } public String getWorkloadIdentityAwsExternalId() { return workloadIdentityAwsExternalId; } public SFLoginInput setWorkloadIdentityAwsExternalId(String workloadIdentityAwsExternalId) { this.workloadIdentityAwsExternalId = workloadIdentityAwsExternalId; return this; } public SessionUtilExternalBrowser.AuthExternalBrowserHandlers getBrowserHandler() { return browserHandler; } public void setBrowserHandler( SessionUtilExternalBrowser.AuthExternalBrowserHandlers browserHandler) { this.browserHandler = browserHandler; } public int getPlatformDetectionTimeoutMs() { return platformDetectionTimeoutMs; } public SFLoginInput setPlatformDetectionTimeoutMs(int platformDetectionTimeoutMs) { this.platformDetectionTimeoutMs = platformDetectionTimeoutMs; return this; } public boolean isDisablePlatformDetection() { return disablePlatformDetection; } public SFLoginInput setDisablePlatformDetection(boolean disablePlatformDetection) { this.disablePlatformDetection = disablePlatformDetection; return this; } public int getMaxRetryCount() { return maxRetryCount; } public SFLoginInput setMaxRetryCount(int maxRetryCount) { this.maxRetryCount = maxRetryCount; return this; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFLoginOutput.java ================================================ package net.snowflake.client.internal.core; import java.time.Duration; import java.util.Map; /** Login output information including session tokens, database versions */ public class SFLoginOutput { private String sessionToken; private String masterToken; private long masterTokenValidityInSeconds; private String idToken; private String mfaToken; private String oauthAccessToken; private String oauthRefreshToken; private String databaseVersion; private int databaseMajorVersion; private int databaseMinorVersion; private Duration httpClientSocketTimeout; private Duration httpClientConnectionTimeout; private String sessionDatabase; private String sessionSchema; private String sessionRole; private String sessionWarehouse; private Map commonParams; private Map loginResponseHeaders; private String sessionId; SFLoginOutput() {} SFLoginOutput( String sessionToken, String masterToken, long masterTokenValidityInSeconds, String idToken, String mfaToken, String oauthAccessToken, String oauthRefreshToken, String databaseVersion, int databaseMajorVersion, int databaseMinorVersion, int httpClientSocketTimeout, int httpClientConnectionTimeout, String sessionDatabase, String sessionSchema, String sessionRole, String sessionWarehouse, String sessionId, Map commonParams, Map loginResponseHeaders) { this.sessionToken = sessionToken; this.masterToken = masterToken; this.idToken = idToken; this.mfaToken = mfaToken; this.oauthAccessToken = oauthAccessToken; this.oauthRefreshToken = oauthRefreshToken; this.databaseVersion = databaseVersion; this.databaseMajorVersion = databaseMajorVersion; this.databaseMinorVersion = databaseMinorVersion; this.httpClientSocketTimeout = Duration.ofMillis(httpClientSocketTimeout); this.httpClientConnectionTimeout = Duration.ofMillis(httpClientConnectionTimeout); this.sessionDatabase = sessionDatabase; this.sessionSchema = sessionSchema; this.sessionRole = sessionRole; this.sessionWarehouse = sessionWarehouse; this.commonParams = commonParams; this.masterTokenValidityInSeconds = masterTokenValidityInSeconds; this.sessionId = sessionId; this.loginResponseHeaders = loginResponseHeaders; } public boolean getAutoCommit() { return (Boolean) this.commonParams.get("AUTOCOMMIT"); } public String getSessionId() { return sessionId; } public String getSessionToken() { return sessionToken; } public SFLoginOutput setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } String getMasterToken() { return masterToken; } SFLoginOutput setMasterToken(String masterToken) { this.masterToken = masterToken; return this; } String getIdToken() { return idToken; } String getMfaToken() { return mfaToken; } String getOauthAccessToken() { return oauthAccessToken; } String getOauthRefreshToken() { return oauthRefreshToken; } String getDatabaseVersion() { return databaseVersion; } int getDatabaseMajorVersion() { return databaseMajorVersion; } int getDatabaseMinorVersion() { return databaseMinorVersion; } Duration getHttpClientSocketTimeout() { return httpClientSocketTimeout; } Duration getHttpClientConnectionTimeout() { return httpClientConnectionTimeout; } Map getCommonParams() { return commonParams; } String getSessionDatabase() { return sessionDatabase; } String getSessionSchema() { return sessionSchema; } String getSessionRole() { return sessionRole; } String getSessionWarehouse() { return sessionWarehouse; } long getMasterTokenValidityInSeconds() { return masterTokenValidityInSeconds; } public Map getLoginResponseHeaders() { return loginResponseHeaders; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFOCSPException.java ================================================ package net.snowflake.client.internal.core; import net.snowflake.client.internal.jdbc.OCSPErrorCode; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class SFOCSPException extends Throwable { private static final SFLogger logger = SFLoggerFactory.getLogger(SFOCSPException.class); private static final long serialVersionUID = 1L; private final OCSPErrorCode errorCode; public SFOCSPException(OCSPErrorCode errorCode, String errorMsg) { this(errorCode, errorMsg, null); } public SFOCSPException(OCSPErrorCode errorCode, String errorMsg, Throwable cause) { super(errorMsg); this.errorCode = errorCode; if (cause != null) { this.initCause(cause); } } public OCSPErrorCode getErrorCode() { return errorCode; } @Override public String toString() { return super.toString() + (getErrorCode() != null ? ", errorCode = " + getErrorCode() : "") + (getMessage() != null ? ", errorMsg = " + getMessage() : ""); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFOauthLoginInput.java ================================================ package net.snowflake.client.internal.core; public class SFOauthLoginInput { private static final String LOCAL_APPLICATION_CLIENT_CREDENTIAL = "LOCAL_APPLICATION"; private String clientId; private String clientSecret; private final String redirectUri; private final String authorizationUrl; private final String tokenRequestUrl; private final String scope; private final boolean enableSingleUseRefreshTokens; public SFOauthLoginInput( String clientId, String clientSecret, String redirectUri, String authorizationUrl, String tokenRequestUrl, String scope) { this(clientId, clientSecret, redirectUri, authorizationUrl, tokenRequestUrl, scope, false); } public SFOauthLoginInput( String clientId, String clientSecret, String redirectUri, String authorizationUrl, String tokenRequestUrl, String scope, boolean enableSingleUseRefreshTokens) { this.redirectUri = redirectUri; this.clientId = clientId; this.clientSecret = clientSecret; this.authorizationUrl = authorizationUrl; this.tokenRequestUrl = tokenRequestUrl; this.scope = scope; this.enableSingleUseRefreshTokens = enableSingleUseRefreshTokens; } public String getRedirectUri() { return redirectUri; } public String getClientId() { return clientId; } public String getClientSecret() { return clientSecret; } public String getAuthorizationUrl() { return authorizationUrl; } public String getTokenRequestUrl() { return tokenRequestUrl; } public String getScope() { return scope; } public boolean getEnableSingleUseRefreshTokens() { return enableSingleUseRefreshTokens; } public void setLocalApplicationClientCredential() { this.clientId = LOCAL_APPLICATION_CLIENT_CREDENTIAL; this.clientSecret = LOCAL_APPLICATION_CLIENT_CREDENTIAL; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFPreparedStatementMetaData.java ================================================ package net.snowflake.client.internal.core; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.common.core.SqlState; /** Statement metadata which includes the result metadata and bind information. */ public class SFPreparedStatementMetaData { // result metadata private SFResultSetMetaData resultSetMetaData; // number of binds private int numberOfBinds; private final SFStatementType statementType; private final boolean arrayBindSupported; private List metaDataOfBinds; private final boolean isValidMetaData; public SFPreparedStatementMetaData( SFResultSetMetaData resultSetMetaData, SFStatementType statementType, int numberOfBinds, boolean arrayBindSupported, List metaDataOfBinds, boolean isValidMetaData) { this.resultSetMetaData = resultSetMetaData; this.statementType = statementType; this.numberOfBinds = numberOfBinds; this.arrayBindSupported = arrayBindSupported; this.metaDataOfBinds = metaDataOfBinds; this.isValidMetaData = isValidMetaData; } public SFResultSetMetaData getResultSetMetaData() { return resultSetMetaData; } public void setResultSetMetaData(SFResultSetMetaData resultSetMetaData) { this.resultSetMetaData = resultSetMetaData; } public int getNumberOfBinds() { return numberOfBinds; } public MetaDataOfBinds getMetaDataForBindParam(int param) throws SQLException { if (param < 1 || param > numberOfBinds) { throw new SnowflakeSQLException( SqlState.NUMERIC_VALUE_OUT_OF_RANGE, ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE.getMessageCode(), param, numberOfBinds); } if (numberOfBinds != metaDataOfBinds.size() || metaDataOfBinds.size() == 0) { throw new SnowflakeSQLException( ((String) null), SqlState.NO_DATA, ErrorCode.NO_VALID_DATA.getMessageCode()); } return metaDataOfBinds.get(param - 1); } public void setNumberOfBinds(int numberOfBinds) { this.numberOfBinds = numberOfBinds; } /** * Is a valid metadata or not. If true, this object is a valid metadata from describe. If false, a * dummy/empty metadata generated because prepare statement fails. * *

This is used to determine if the content is valid or not, e.g., number of bind parameters. * * @return true or false */ public boolean isValidMetaData() { return isValidMetaData; } /** * According to StatementType, to decide whether array binds supported or not * *

Currently, only INSERT supports array bind * * @return true if array binds is supported. */ public boolean isArrayBindSupported() { return this.arrayBindSupported; } public SFStatementType getStatementType() { return this.statementType; } /** * Generates an empty/invalid metadata for placeholder. * * @return statement metadata */ public static SFPreparedStatementMetaData emptyMetaData() { return new SFPreparedStatementMetaData( new SFResultSetMetaData( 0, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), null), SFStatementType.UNKNOWN, 0, false, new ArrayList<>(), false); // invalid metadata } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFPubKeysInternal.java ================================================ ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFResultSet.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.StmtUtil.eventHandler; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.databind.JsonNode; import java.sql.SQLException; import java.util.Arrays; import java.util.Comparator; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.BasicEvent.QueryState; import net.snowflake.client.internal.core.json.Converters; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.JsonResultChunk; import net.snowflake.client.internal.jdbc.SnowflakeResultChunk; import net.snowflake.client.internal.jdbc.SnowflakeResultSetSerializableV1; import net.snowflake.client.internal.jdbc.telemetry.Telemetry; import net.snowflake.client.internal.jdbc.telemetry.TelemetryData; import net.snowflake.client.internal.jdbc.telemetry.TelemetryField; import net.snowflake.client.internal.jdbc.telemetry.TelemetryUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; /** Snowflake ResultSet implementation */ public class SFResultSet extends SFJsonResultSet { private static final SFLogger logger = SFLoggerFactory.getLogger(SFResultSet.class); private int columnCount = 0; private int currentChunkRowCount = 0; private int currentChunkRowIndex = -1; private JsonNode firstChunkRowset = null; private JsonResultChunk currentChunk = null; private String queryId; private SFStatementType statementType; private boolean totalRowCountTruncated; private boolean sortResult = false; private Object[][] firstChunkSortedRowSet; // time the first chunk is consumed at (timestamp taken at object creation) private final long firstChunkTime; private long chunkCount = 0; private long nextChunkIndex = 0; private ChunkDownloader chunkDownloader; protected SFBaseStatement statement; private final boolean arrayBindSupported; private Telemetry telemetryClient; // If customer wants Timestamp_NTZ values to be stored in UTC time // instead of a local/session timezone, set to true private boolean treatNTZAsUTC; private boolean formatDateWithTimezone; /** * Constructor takes a result from the API response that we get from executing a SQL statement. * *

The constructor will initialize the ResultSetMetaData. * * @param resultSetSerializable result data after parsing * @param statement statement object * @param sortResult true if sort results otherwise false * @throws SQLException exception raised from general SQL layers */ public SFResultSet( SnowflakeResultSetSerializableV1 resultSetSerializable, SFBaseStatement statement, boolean sortResult) throws SQLException { this( resultSetSerializable, statement.getSFBaseSession(internalCallMarker()), statement.getSFBaseSession(internalCallMarker()).getTelemetryClient(internalCallMarker()), sortResult); this.statement = statement; SFBaseSession session = statement.getSFBaseSession(internalCallMarker()); session.setDatabase(resultSetSerializable.getFinalDatabaseName()); session.setSchema(resultSetSerializable.getFinalSchemaName()); session.setRole(resultSetSerializable.getFinalRoleName()); session.setWarehouse(resultSetSerializable.getFinalWarehouseName()); this.treatNTZAsUTC = resultSetSerializable.getTreatNTZAsUTC(); this.formatDateWithTimezone = resultSetSerializable.getFormatDateWithTimeZone(); // update the driver/session with common parameters from GS SessionUtil.updateSfDriverParamValues( this.parameters, statement.getSFBaseSession(internalCallMarker())); // if server gives a send time, log time it took to arrive if (resultSetSerializable.getSendResultTime() != 0) { long timeConsumeFirstResult = this.firstChunkTime - resultSetSerializable.getSendResultTime(); logMetric(TelemetryField.TIME_CONSUME_FIRST_RESULT, timeConsumeFirstResult); } eventHandler.triggerStateTransition( BasicEvent.QueryState.CONSUMING_RESULT, String.format(QueryState.CONSUMING_RESULT.getArgString(), queryId, 0)); } /** * This is a minimum initialization for SFResultSet. Mainly used for testing purpose. However, * real prod constructor will call this constructor as well * * @param resultSetSerializable data returned in query response * @param telemetryClient telemetryClient * @param sortResult should sorting take place * @throws SQLException if exception is encountered */ public SFResultSet( SnowflakeResultSetSerializableV1 resultSetSerializable, Telemetry telemetryClient, boolean sortResult) throws SQLException { this(resultSetSerializable, new SFSession(), telemetryClient, sortResult); } /** * This is a minimum initialization for SFResultSet. Mainly used for testing purpose. However, * real prod constructor will call this constructor as well * * @param resultSetSerializable data returned in query response * @param session snowflake session * @param telemetryClient telemetryClient * @param sortResult should sorting take place * @throws SQLException if an exception is encountered. */ public SFResultSet( SnowflakeResultSetSerializableV1 resultSetSerializable, SFBaseSession session, Telemetry telemetryClient, boolean sortResult) throws SQLException { super(resultSetSerializable.getTimeZone(), new Converters(session, resultSetSerializable)); this.resultSetSerializable = resultSetSerializable; this.columnCount = 0; this.sortResult = sortResult; this.firstChunkTime = System.currentTimeMillis(); this.telemetryClient = telemetryClient; this.queryId = resultSetSerializable.getQueryId(); this.statementType = resultSetSerializable.getStatementType(); this.totalRowCountTruncated = resultSetSerializable.isTotalRowCountTruncated(); this.parameters = resultSetSerializable.getParameters(); this.columnCount = resultSetSerializable.getColumnCount(); this.firstChunkRowset = resultSetSerializable.getAndClearFirstChunkRowset(); this.currentChunkRowCount = resultSetSerializable.getFirstChunkRowCount(); this.chunkCount = resultSetSerializable.getChunkFileCount(); this.chunkDownloader = resultSetSerializable.getChunkDownloader(); this.timestampNTZFormatter = resultSetSerializable.getTimestampNTZFormatter(); this.timestampLTZFormatter = resultSetSerializable.getTimestampLTZFormatter(); this.timestampTZFormatter = resultSetSerializable.getTimestampTZFormatter(); this.dateFormatter = resultSetSerializable.getDateFormatter(); this.timeFormatter = resultSetSerializable.getTimeFormatter(); this.honorClientTZForTimestampNTZ = resultSetSerializable.isHonorClientTZForTimestampNTZ(); this.binaryFormatter = resultSetSerializable.getBinaryFormatter(); this.resultVersion = resultSetSerializable.getResultVersion(); this.numberOfBinds = resultSetSerializable.getNumberOfBinds(); this.arrayBindSupported = resultSetSerializable.isArrayBindSupported(); this.metaDataOfBinds = resultSetSerializable.getMetaDataOfBinds(); this.resultSetMetaData = resultSetSerializable.getSFResultSetMetaData(internalCallMarker()); this.treatNTZAsUTC = resultSetSerializable.getTreatNTZAsUTC(); this.formatDateWithTimezone = resultSetSerializable.getFormatDateWithTimeZone(); // sort result set if needed if (sortResult) { // we don't support sort result when there are offline chunks if (chunkCount > 0) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.CLIENT_SIDE_SORTING_NOT_SUPPORTED.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED); } sortResultSet(); } } private boolean fetchNextRow() throws SFException, SnowflakeSQLException { if (sortResult) { return fetchNextRowSorted(); } else { return fetchNextRowUnsorted(); } } private boolean fetchNextRowSorted() { currentChunkRowIndex++; if (currentChunkRowIndex < currentChunkRowCount) { return true; } firstChunkSortedRowSet = null; // no more chunks as sorted is only supported // for one chunk return false; } private boolean fetchNextRowUnsorted() throws SFException, SnowflakeSQLException { currentChunkRowIndex++; if (currentChunkRowIndex < currentChunkRowCount) { return true; } // let GC collect first rowset firstChunkRowset = null; if (nextChunkIndex < chunkCount) { try { eventHandler.triggerStateTransition( BasicEvent.QueryState.CONSUMING_RESULT, String.format(QueryState.CONSUMING_RESULT.getArgString(), queryId, nextChunkIndex)); SnowflakeResultChunk nextChunk = chunkDownloader.getNextChunkToConsume(); if (nextChunk == null) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Expect chunk but got null for chunk index " + nextChunkIndex); } currentChunkRowIndex = 0; currentChunkRowCount = nextChunk.getRowCount(); currentChunk = (JsonResultChunk) nextChunk; logger.debug( "Moving to chunk index {}, row count={}", nextChunkIndex, currentChunkRowCount); nextChunkIndex++; return true; } catch (InterruptedException ex) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERRUPTED.getMessageCode(), SqlState.QUERY_CANCELED); } } else if (chunkCount > 0) { try { logger.debug("End of chunks", false); DownloaderMetrics metrics = chunkDownloader.terminate(); logChunkDownloaderMetrics(metrics); } catch (InterruptedException ex) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERRUPTED.getMessageCode(), SqlState.QUERY_CANCELED); } } return false; } private void logMetric(TelemetryField field, long value) { TelemetryData data = TelemetryUtil.buildJobData(this.queryId, field, value); this.telemetryClient.addLogToBatch(data); } private void logChunkDownloaderMetrics(DownloaderMetrics metrics) { if (metrics != null) { logMetric(TelemetryField.TIME_WAITING_FOR_CHUNKS, metrics.getMillisWaiting()); logMetric(TelemetryField.TIME_DOWNLOADING_CHUNKS, metrics.getMillisDownloading()); logMetric(TelemetryField.TIME_PARSING_CHUNKS, metrics.getMillisParsing()); } } /** * Advance to next row * * @return true if next row exists, false otherwise */ @Override public boolean next() throws SFException, SnowflakeSQLException { if (isClosed()) { return false; } // otherwise try to fetch again if (fetchNextRow()) { row++; if (isLast()) { long timeConsumeLastResult = System.currentTimeMillis() - this.firstChunkTime; logMetric(TelemetryField.TIME_CONSUME_LAST_RESULT, timeConsumeLastResult); } return true; } else { logger.debug("End of result", false); /* * Here we check if the result has been truncated and throw exception if * so. */ if (totalRowCountTruncated || Boolean.TRUE .toString() .equalsIgnoreCase(systemGetProperty("snowflake.enable_incident_test2"))) { throw new SFException(queryId, ErrorCode.MAX_RESULT_LIMIT_EXCEEDED); } // mark end of result return false; } } @Override protected Object getObjectInternal(int columnIndex) throws SFException { if (columnIndex <= 0 || columnIndex > resultSetMetaData.getColumnCount()) { throw new SFException(queryId, ErrorCode.COLUMN_DOES_NOT_EXIST, columnIndex); } final int internalColumnIndex = columnIndex - 1; Object retValue; if (sortResult) { retValue = firstChunkSortedRowSet[currentChunkRowIndex][internalColumnIndex]; } else if (firstChunkRowset != null) { retValue = JsonResultChunk.extractCell(firstChunkRowset, currentChunkRowIndex, internalColumnIndex); } else if (currentChunk != null) { retValue = currentChunk.getCell(currentChunkRowIndex, internalColumnIndex); } else { throw new SFException(queryId, ErrorCode.COLUMN_DOES_NOT_EXIST, columnIndex); } wasNull = retValue == null; return retValue; } private void sortResultSet() { // first fetch rows into firstChunkSortedRowSet firstChunkSortedRowSet = new Object[currentChunkRowCount][]; for (int rowIdx = 0; rowIdx < currentChunkRowCount; rowIdx++) { firstChunkSortedRowSet[rowIdx] = new Object[columnCount]; for (int colIdx = 0; colIdx < columnCount; colIdx++) { firstChunkSortedRowSet[rowIdx][colIdx] = JsonResultChunk.extractCell(firstChunkRowset, rowIdx, colIdx); } } // now sort it Arrays.sort( firstChunkSortedRowSet, new Comparator() { public int compare(Object[] a, Object[] b) { int numCols = a.length; for (int colIdx = 0; colIdx < numCols; colIdx++) { if (a[colIdx] == null && b[colIdx] == null) { continue; } // null is considered bigger than all values if (a[colIdx] == null) { return 1; } if (b[colIdx] == null) { return -1; } int res = a[colIdx].toString().compareTo(b[colIdx].toString()); // continue to next column if no difference if (res == 0) { continue; } return res; } // all columns are the same return 0; } }); } @Override public boolean isLast() { return nextChunkIndex == chunkCount && currentChunkRowIndex + 1 == currentChunkRowCount; } @Override public boolean isAfterLast() { return nextChunkIndex == chunkCount && currentChunkRowIndex >= currentChunkRowCount; } @Override public void close() throws SnowflakeSQLException { super.close(); try { if (chunkDownloader != null) { DownloaderMetrics metrics = chunkDownloader.terminate(); logChunkDownloaderMetrics(metrics); firstChunkSortedRowSet = null; firstChunkRowset = null; currentChunk = null; } } catch (InterruptedException ex) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERRUPTED.getMessageCode(), SqlState.QUERY_CANCELED); } } @Override public SFStatementType getStatementType() { return statementType; } @Override public void setStatementType(SFStatementType statementType) { this.statementType = statementType; } @Override public boolean isArrayBindSupported() { return this.arrayBindSupported; } @Override public String getQueryId() { return queryId; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFResultSetFactory.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.databind.JsonNode; import java.sql.SQLException; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.SnowflakeResultSetSerializableV1; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Factory class to create SFBaseResultSet class. Depending on result format, different instance * will be created */ class SFResultSetFactory { private static final SFLogger logger = SFLoggerFactory.getLogger(SFResultSetFactory.class); /** * Factory class used to generate ResultSet object according to query result format * * @param result raw response from server * @param statement statement that created current resultset * @param sortResult true if sort first chunk * @return result set object */ static SFBaseResultSet getResultSet( JsonNode result, SFStatement statement, boolean sortResult, ExecTimeTelemetryData execTimeData) throws SQLException { execTimeData.setProcessResultChunkStart(); SnowflakeResultSetSerializableV1 resultSetSerializable = SnowflakeResultSetSerializableV1.create( result, statement.getSFBaseSession(internalCallMarker()), statement, internalCallMarker()); execTimeData.setProcessResultChunkEnd(); SFBaseResultSet rs; execTimeData.setCreateResultSetStart(); switch (resultSetSerializable.getQueryResultFormat()) { case ARROW: logger.debug("Query result received in ARROW format. Processing with SFArrowResultSet."); rs = new SFArrowResultSet( resultSetSerializable, statement.getSFBaseSession(internalCallMarker()), statement, sortResult); break; case JSON: rs = new SFResultSet(resultSetSerializable, statement, sortResult); break; default: rs = null; break; } execTimeData.setCreateResultSetEnd(); if (rs == null) { throw new SnowflakeSQLLoggedException( statement.getSFBaseSession(internalCallMarker()), ErrorCode.INTERNAL_ERROR, "Unsupported query result format: " + resultSetSerializable.getQueryResultFormat().name()); } return rs; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFResultSetMetaData.java ================================================ package net.snowflake.client.internal.core; import java.sql.Date; import java.sql.ResultSetMetaData; import java.sql.Types; import java.util.ArrayList; import java.util.Calendar; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.jdbc.SnowflakeColumnMetadata; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SFTime; import net.snowflake.common.core.SFTimestamp; import net.snowflake.common.core.SnowflakeDateTimeFormat; /** Snowflake ResultSetMetaData */ public class SFResultSetMetaData { private static final SFLogger logger = SFLoggerFactory.getLogger(SFResultSetMetaData.class); private int columnCount = 0; private List columnNames; private List columnTypeNames; private List columnTypes; private List precisions; private List dimensions; private List scales; private List nullables; private List columnSrcTables; private List columnSrcSchemas; private List columnSrcDatabases; private List columnDisplaySizes; private List columnMetadata = new ArrayList<>(); private String queryId; private Map columnNamePositionMap = new HashMap<>(); private Map columnNameUpperCasePositionMap = new HashMap<>(); // For creating incidents private SFBaseSession session; // Date time formatter for calculating the display size private SnowflakeDateTimeFormat timestampNTZFormatter; private SnowflakeDateTimeFormat timestampLTZFormatter; private SnowflakeDateTimeFormat timestampTZFormatter; private SnowflakeDateTimeFormat timeFormatter; private SnowflakeDateTimeFormat dateFormatter; // provide default display size for databasemetadata result set. // i.e. result set returned calling getTables etc private int timestampNTZStringLength = 30; private int timestampLTZStringLength = 30; private int timestampTZStringLength = 30; private int timeStringLength = 18; private int dateStringLength = 10; private boolean isResultColumnCaseInsensitive = false; private List isAutoIncrementList; public SFResultSetMetaData( int columnCount, List columnNames, List columnTypeNames, List columnTypes, SFBaseSession session) { this.columnCount = columnCount; this.columnNames = columnNames; this.columnTypeNames = columnTypeNames; this.columnTypes = columnTypes; this.session = session; } public SFResultSetMetaData( List columnMetadata, SFBaseSession session, SnowflakeDateTimeFormat timestampNTZFormatter, SnowflakeDateTimeFormat timestampLTZFormatter, SnowflakeDateTimeFormat timestampTZFormatter, SnowflakeDateTimeFormat dateFormatter, SnowflakeDateTimeFormat timeFormatter) { this( columnMetadata, "none", session, (session != null) && session.isResultColumnCaseInsensitive(), timestampNTZFormatter, timestampLTZFormatter, timestampTZFormatter, dateFormatter, timeFormatter); } public SFResultSetMetaData( List columnMetadata, String queryId, SFBaseSession session, boolean isResultColumnCaseInsensitive, SnowflakeDateTimeFormat timestampNTZFormatter, SnowflakeDateTimeFormat timestampLTZFormatter, SnowflakeDateTimeFormat timestampTZFormatter, SnowflakeDateTimeFormat dateFormatter, SnowflakeDateTimeFormat timeFormatter) { this.columnCount = columnMetadata.size(); this.columnMetadata = columnMetadata; this.queryId = queryId; this.timestampNTZFormatter = timestampNTZFormatter; this.timestampLTZFormatter = timestampLTZFormatter; this.timestampTZFormatter = timestampTZFormatter; this.dateFormatter = dateFormatter; this.timeFormatter = timeFormatter; calculateDateTimeStringLength(); this.columnNames = new ArrayList<>(this.columnCount); this.columnTypeNames = new ArrayList<>(this.columnCount); this.columnTypes = new ArrayList<>(this.columnCount); this.precisions = new ArrayList<>(this.columnCount); this.dimensions = new ArrayList<>(this.columnCount); this.scales = new ArrayList<>(this.columnCount); this.nullables = new ArrayList<>(this.columnCount); this.columnSrcDatabases = new ArrayList<>(this.columnCount); this.columnSrcSchemas = new ArrayList<>(this.columnCount); this.columnSrcTables = new ArrayList<>(this.columnCount); this.columnDisplaySizes = new ArrayList<>(this.columnCount); this.isAutoIncrementList = new ArrayList<>(this.columnCount); this.isResultColumnCaseInsensitive = isResultColumnCaseInsensitive; for (int colIdx = 0; colIdx < columnCount; colIdx++) { columnNames.add(columnMetadata.get(colIdx).getName()); columnTypeNames.add(columnMetadata.get(colIdx).getTypeName()); precisions.add(calculatePrecision(columnMetadata.get(colIdx))); dimensions.add(calculateDimension(columnMetadata.get(colIdx))); columnTypes.add(columnMetadata.get(colIdx).getType()); scales.add(columnMetadata.get(colIdx).getScale()); nullables.add( columnMetadata.get(colIdx).isNullable() ? ResultSetMetaData.columnNullable : ResultSetMetaData.columnNoNulls); columnSrcDatabases.add(columnMetadata.get(colIdx).getColumnSrcDatabase()); columnSrcSchemas.add(columnMetadata.get(colIdx).getColumnSrcSchema()); columnSrcTables.add(columnMetadata.get(colIdx).getColumnSrcTable()); columnDisplaySizes.add(calculateDisplaySize(columnMetadata.get(colIdx))); isAutoIncrementList.add(columnMetadata.get(colIdx).isAutoIncrement()); } this.session = session; } private Integer calculatePrecision(SnowflakeColumnMetadata columnMetadata) { int columnType = columnMetadata.getType(); switch (columnType) { case Types.CHAR: case Types.VARCHAR: case Types.BINARY: return columnMetadata.getLength(); case Types.INTEGER: case Types.DECIMAL: case Types.BIGINT: return columnMetadata.getPrecision(); case Types.DATE: return dateStringLength; case Types.TIME: return timeStringLength; case SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ: return timestampLTZStringLength; case SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ: return timestampTZStringLength; case Types.TIMESTAMP: return timestampNTZStringLength; // for double and boolean // Precision is not applicable hence return 0 default: return 0; } } private Integer calculateDimension(SnowflakeColumnMetadata columnMetadata) { int columnType = columnMetadata.getType(); if (columnType == SnowflakeType.EXTRA_TYPES_VECTOR) { return columnMetadata.getDimension(); } return 0; } private Integer calculateDisplaySize(SnowflakeColumnMetadata columnMetadata) { int columnType = columnMetadata.getType(); switch (columnType) { case Types.CHAR: case Types.VARCHAR: case Types.BINARY: return columnMetadata.getLength(); case Types.INTEGER: case Types.BIGINT: // + 1 because number can be negative, it could be -20 for number(2,0) return columnMetadata.getPrecision() + 1; case Types.DECIMAL: // first + 1 because number can be negative, second + 1 because it always // include decimal point. // i.e. number(2, 1) could be -1.3 return columnMetadata.getPrecision() + 1 + 1; case Types.DOUBLE: // Hard code as 24 since the longest float // represented in char is // -2.2250738585072020E−308 return 24; case Types.DATE: return dateStringLength; case Types.TIME: return timeStringLength; case SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ: return timestampLTZStringLength; case SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ: return timestampTZStringLength; case Types.TIMESTAMP: return timestampNTZStringLength; case Types.BOOLEAN: // Hard code as 5 since the longest char to represent // a boolean would be false, which is 5. return 5; default: return 25; } } private void calculateDateTimeStringLength() { SFTimestamp ts = SFTimestamp.fromMilliseconds(System.currentTimeMillis(), TimeZone.getDefault()); try { if (timestampNTZFormatter != null) { String tsNTZStr = ResultUtil.getSFTimestampAsString( ts, Types.TIMESTAMP, 9, timestampNTZFormatter, timestampLTZFormatter, timestampTZFormatter, session); timestampNTZStringLength = tsNTZStr.length(); } if (timestampLTZFormatter != null) { String tsLTZStr = ResultUtil.getSFTimestampAsString( ts, SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ, 9, timestampNTZFormatter, timestampLTZFormatter, timestampTZFormatter, session); timestampLTZStringLength = tsLTZStr.length(); } if (timestampTZFormatter != null) { String tsTZStr = ResultUtil.getSFTimestampAsString( ts, SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ, 9, timestampNTZFormatter, timestampLTZFormatter, timestampTZFormatter, session); timestampTZStringLength = tsTZStr.length(); } SFTime time = SFTime.fromTimestamp(ts); if (timeFormatter != null) { timeStringLength = ResultUtil.getSFTimeAsString(time, 9, timeFormatter).length(); } if (dateFormatter != null) { final Calendar calendar = Calendar.getInstance(); calendar.set(2015, Calendar.DECEMBER, 11); dateStringLength = ResultUtil.getDateAsString(new Date(calendar.getTimeInMillis()), dateFormatter) .length(); } } catch (SFException e) { logger.debug("Failed to calculate the display size. Use default one.", false); } } /** * get the query id * * @return query id */ public String getQueryId() { return queryId; } /** * get the session * * @return session object */ public SFBaseSession getSession() { return session; } /** * Get the list of column names * * @return column names in list */ public List getColumnNames() { return columnNames; } /** * Get the index of the column by name * * @param columnName column name * @return index of the column that names matches the column name */ public int getColumnIndex(String columnName) { columnName = isResultColumnCaseInsensitive ? columnName.toUpperCase() : columnName; Map nameToIndexMap = isResultColumnCaseInsensitive ? columnNameUpperCasePositionMap : columnNamePositionMap; if (nameToIndexMap.get(columnName) != null) { return nameToIndexMap.get(columnName); } else { int columnIndex = isResultColumnCaseInsensitive ? ResultUtil.listSearchCaseInsensitive(columnNames, columnName) : columnNames.indexOf(columnName); nameToIndexMap.put(columnName, columnIndex); return columnIndex; } } /** * Get number of columns * * @return column count */ public int getColumnCount() { return columnCount; } public int getColumnType(int column) throws SFException { return ColumnTypeHelper.getColumnType(getInternalColumnType(column), session); } public int getInternalColumnType(int column) throws SFException { int columnIdx = column - 1; if (column < 1 || column > columnTypes.size()) { throw new SFException(queryId, ErrorCode.COLUMN_DOES_NOT_EXIST, column); } if (columnTypes.get(columnIdx) == null) { throw new SFException( queryId, ErrorCode.INTERNAL_ERROR, "Missing column type for column " + column); } return columnTypes.get(columnIdx); } public String getColumnTypeName(int column) throws SFException { if (column < 1 || column > columnTypeNames.size()) { throw new SFException(queryId, ErrorCode.COLUMN_DOES_NOT_EXIST, column); } if (columnTypeNames.get(column - 1) == null) { throw new SFException( queryId, ErrorCode.INTERNAL_ERROR, "Missing column type for column " + column); } return columnTypeNames.get(column - 1); } public int getScale(int column) { if (scales != null && scales.size() >= column) { return scales.get(column - 1); } else { // TODO: fix this later to use different defaults for number or timestamp return 9; } } public int getPrecision(int column) { if (precisions != null && precisions.size() >= column) { return precisions.get(column - 1); } else { // TODO: fix this later to use different defaults for number or timestamp return 9; } } public int getDimension(int column) { if (dimensions != null && dimensions.size() >= column && column > 0) { return dimensions.get(column - 1); } else { return 0; } } public boolean isSigned(int column) { return (columnTypes.get(column - 1) == Types.INTEGER || columnTypes.get(column - 1) == Types.DECIMAL || columnTypes.get(column - 1) == Types.BIGINT || columnTypes.get(column - 1) == Types.DOUBLE); } public String getColumnLabel(int column) { if (columnNames != null) { return columnNames.get(column - 1); } else { return "C" + Integer.toString(column - 1); } } public String getColumnName(int column) { if (columnNames != null) { return columnNames.get(column - 1); } else { return "C" + Integer.toString(column - 1); } } public int isNullable(int column) { if (nullables != null) { return nullables.get(column - 1); } else { return ResultSetMetaData.columnNullableUnknown; } } public String getCatalogName(int column) { if (columnSrcDatabases == null) { return ""; } return columnSrcDatabases.get(column - 1); } public String getSchemaName(int column) { if (columnSrcDatabases == null) { return ""; } return columnSrcSchemas.get(column - 1); } public String getTableName(int column) { if (columnSrcDatabases == null) { return "T"; } return columnSrcTables.get(column - 1); } public Integer getColumnDisplaySize(int column) { if (columnDisplaySizes == null) { return 25; } return columnDisplaySizes.get(column - 1); } public boolean getIsAutoIncrement(int column) { if (isAutoIncrementList == null || isAutoIncrementList.size() == 0) { return false; } return isAutoIncrementList.get(column - 1); } public List getIsAutoIncrementList() { return isAutoIncrementList; } public List getColumnFields(int column) throws SFException { if (column < 1 || column > columnMetadata.size()) { throw new SFException(queryId, ErrorCode.COLUMN_DOES_NOT_EXIST, column); } if (columnMetadata.get(column - 1) == null) { throw new SFException( queryId, ErrorCode.INTERNAL_ERROR, "Missing column fields for column " + column); } return columnMetadata.get(column - 1).getFields(); } public boolean isStructuredTypeColumn(int columnIndex) { return columnMetadata.get(columnIndex - 1).getFields() != null && !columnMetadata.get(columnIndex - 1).getFields().isEmpty(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFSSLConnectionSocketFactory.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.IOException; import java.net.Proxy; import java.net.Socket; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLServerSocketFactory; import javax.net.ssl.TrustManager; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.protocol.HttpContext; /** Snowflake custom SSLConnectionSocketFactory */ public class SFSSLConnectionSocketFactory extends SSLConnectionSocketFactory { private static final SFLogger logger = SFLoggerFactory.getLogger(SFSSLConnectionSocketFactory.class); private static TlsVersion minTlsVersion = TlsVersion.TLS_1_2; private static TlsVersion maxTlsVersion = TlsVersion.TLS_1_3; private final boolean socksProxyDisabled; public SFSSLConnectionSocketFactory(TrustManager[] trustManagers, boolean socksProxyDisabled) throws NoSuchAlgorithmException, KeyManagementException { super( initSSLContext(trustManagers), getSupportedTlsVersions(), decideCipherSuites(), SSLConnectionSocketFactory.getDefaultHostnameVerifier()); this.socksProxyDisabled = socksProxyDisabled; } private static String[] getSupportedTlsVersions() { if (minTlsVersion.compareTo(maxTlsVersion) > 0) { throw new IllegalArgumentException( String.format( "Minimum TLS version %s cannot be greater than the maximum TLS version %s", minTlsVersion.getProtocolName(), maxTlsVersion.getProtocolName())); } List supported = Arrays.stream(TlsVersion.values()) .filter(TlsVersion::isAvailable) .filter(v -> v.compareTo(minTlsVersion) >= 0) .filter(v -> v.compareTo(maxTlsVersion) <= 0) .map(TlsVersion::getProtocolName) .collect(Collectors.toList()); if (supported.isEmpty()) { throw new IllegalStateException( String.format( "No TLS versions match constraints: min=%s, max=%s", minTlsVersion.getProtocolName(), maxTlsVersion.getProtocolName())); } return supported.toArray(new String[0]); } private static SSLContext initSSLContext(TrustManager[] trustManagers) throws NoSuchAlgorithmException, KeyManagementException { // Use generic TLS context to support multiple versions SSLContext sslContext = SSLContext.getInstance("TLS"); sslContext.init( null, // key manager trustManagers, // trust manager null); // secure random return sslContext; } @Override public Socket createSocket(HttpContext ctx) throws IOException { return socksProxyDisabled ? new Socket(Proxy.NO_PROXY) : super.createSocket(ctx); } /** * Decide cipher suites that will be passed into the SSLConnectionSocketFactory * * @return List of cipher suites. */ private static String[] decideCipherSuites() { String sysCipherSuites = systemGetProperty("https.cipherSuites"); String[] cipherSuites = sysCipherSuites != null ? sysCipherSuites.split(",") : // use jdk default cipher suites ((SSLServerSocketFactory) SSLServerSocketFactory.getDefault()).getDefaultCipherSuites(); // cipher suites need to be picked up in code explicitly for jdk 1.7 // https://stackoverflow.com/questions/44378970/ logger.trace("Cipher suites used: {}", (ArgSupplier) () -> Arrays.toString(cipherSuites)); return cipherSuites; } public static void setMinTlsVersion(String minTlsVersion) { logger.debug("Setting minimum TLS version to: {}", minTlsVersion); SFSSLConnectionSocketFactory.minTlsVersion = TlsVersion.fromString(minTlsVersion); } public static void setMaxTlsVersion(String maxTlsVersion) { logger.debug("Setting maximum TLS version to: {}", maxTlsVersion); SFSSLConnectionSocketFactory.maxTlsVersion = TlsVersion.fromString(maxTlsVersion); } private enum TlsVersion { TLS_1_2("TLSv1.2"), TLS_1_3("TLSv1.3"); private final String protocolName; TlsVersion(String protocolName) { this.protocolName = protocolName; } String getProtocolName() { return protocolName; } boolean isAvailable() { try { SSLContext.getInstance(this.protocolName); return true; } catch (NoSuchAlgorithmException e) { logger.debug("TLS protocol {} is not available", this.protocolName); return false; } } static TlsVersion fromString(String text) { if (text == null) { return null; } for (TlsVersion v : TlsVersion.values()) { if (v.protocolName.equalsIgnoreCase(text)) { return v; } } throw new IllegalArgumentException("Unsupported TLS version: " + text); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFSession.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.SFLoginInput.getBooleanValue; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.recordIfExternal; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; import java.security.PrivateKey; import java.sql.DriverPropertyInfo; import java.sql.SQLException; import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Level; import net.snowflake.client.api.auth.AuthenticatorType; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.api.resultset.QueryStatus; import net.snowflake.client.internal.config.SFClientConfig; import net.snowflake.client.internal.core.crl.CRLValidator; import net.snowflake.client.internal.core.minicore.MinicoreTelemetry; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.DefaultSFConnectionHandler; import net.snowflake.client.internal.jdbc.SnowflakeConnectString; import net.snowflake.client.internal.jdbc.SnowflakeReauthenticationRequest; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.jdbc.diagnostic.DiagnosticContext; import net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker; import net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.InternalCallMarker; import net.snowflake.client.internal.jdbc.telemetry.NoOpTelemetryClient; import net.snowflake.client.internal.jdbc.telemetry.Telemetry; import net.snowflake.client.internal.jdbc.telemetry.TelemetryClient; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.log.JDK14Logger; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.log.SFLoggerUtil; import net.snowflake.client.internal.util.Stopwatch; import net.snowflake.common.core.SqlState; import org.apache.http.HttpHeaders; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URIBuilder; /** Snowflake session implementation */ public class SFSession extends SFBaseSession { public static final String SF_QUERY_REQUEST_ID = "requestId"; public static final String SF_HEADER_AUTHORIZATION = HttpHeaders.AUTHORIZATION; public static final String SF_HEADER_SNOWFLAKE_AUTHTYPE = "Snowflake"; public static final String SF_HEADER_TOKEN_TAG = "Token"; private static final String SF_ENABLE_WIF_AWS_EXTERNAL_ID = "SF_ENABLE_WIF_AWS_EXTERNAL_ID"; private static final SFLogger logger = SFLoggerFactory.getLogger(SFSession.class); private static final ObjectMapper OBJECT_MAPPER = ObjectMapperFactory.getObjectMapper(); private static final String SF_PATH_SESSION_HEARTBEAT = "/session/heartbeat"; private static final String SF_PATH_QUERY_MONITOR = "/monitoring/queries/"; private static final int MAX_SESSION_PARAMETERS = 1000; // this constant was public - let's not change it public static final int DEFAULT_HTTP_CLIENT_SOCKET_TIMEOUT = HttpUtil.DEFAULT_HTTP_CLIENT_SOCKET_TIMEOUT_IN_MS; private final AtomicInteger sequenceId = new AtomicInteger(0); private final List missingProperties = new ArrayList<>(); // list of active asynchronous queries. Used to see if session should be closed when connection // closes private Set activeAsyncQueries = ConcurrentHashMap.newKeySet(); private boolean isClosed = true; private String sessionToken; private String masterToken; private long masterTokenValidityInSeconds; private String idToken; private String mfaToken; private String oauthAccessToken; private String oauthRefreshToken; private String privateKeyFileLocation; private String privateKeyBase64; private String privateKeyPassword; private PrivateKey privateKey; private SFClientConfig sfClientConfig; /** * Amount of seconds a user is willing to tolerate for establishing the connection with database. * In our case, it means the first login request to get authorization token. * *

Default:300 seconds */ private int loginTimeout = 300; /** * Amount of milliseconds a user is willing to tolerate for network related issues (e.g. HTTP * 503/504) or database transient issues (e.g. GS not responding) * *

A value of 0 means no timeout * *

Default: 0 */ private int networkTimeoutInMilli = 0; // in milliseconds @Deprecated private int authTimeout = 0; private boolean enableCombineDescribe = false; private Duration httpClientConnectionTimeout = HttpUtil.getConnectionTimeout(); private Duration httpClientSocketTimeout = HttpUtil.getSocketTimeout(); // whether we try to simulate a socket timeout (a default value of 0 means // no simulation). The value is in milliseconds private int injectSocketTimeout = 0; // simulate client pause after initial execute and before first get-result // call ( a default value of 0 means no pause). The value is in seconds private int injectClientPause = 0; // session parameters private Map sessionParametersMap = new HashMap<>(); private boolean passcodeInPassword = false; // deprecated private Level tracingLevel = Level.INFO; // client to log session metrics to telemetry in GS private Telemetry telemetryClient; private SnowflakeConnectString sfConnStr; // The cache of query context sent from Cloud Service. QueryContextCache qcc; // Max retries for outgoing http requests. private int maxHttpRetries = 7; /** * Retry timeout in seconds. Cannot be less than 300. * *

Default: 300 */ private int retryTimeout = 300; private int defaultPlatformDetectionTimeoutMs = 200; private boolean enableClientStoreTemporaryCredential = true; private boolean enableClientRequestMfaToken = true; /** * Max timeout for external browser authentication in seconds * *

Default: 120 */ private Duration browserResponseTimeout = Duration.ofSeconds(120); private boolean javaUtilLoggingConsoleOut = false; private String javaUtilLoggingConsoleOutThreshold = null; private List httpHeadersCustomizers; // This constructor is used only by tests with no real connection. // For real connections, the other constructor is always used. @VisibleForTesting public SFSession() { this(new DefaultSFConnectionHandler(null)); } public SFSession(DefaultSFConnectionHandler sfConnectionHandler) { super(sfConnectionHandler); } /** * Function that checks if the active session can be closed when the connection is closed. If * there are active asynchronous queries running, the session should stay open even if the * connection closes so that the queries can finish running. * * @return true if it is safe to close this session, false if not */ @Override public boolean isSafeToClose() { boolean canClose = true; // if the set of asynchronous queries is empty, return true if (this.activeAsyncQueries.isEmpty()) { return canClose; } // if the set is not empty, iterate through each query and check its status for (String query : this.activeAsyncQueries) { try { QueryStatus queryStatus = getQueryStatus(query); // if any query is still running, it is not safe to close. if (queryStatus.isStillRunning()) { canClose = false; } } catch (SQLException e) { logger.error(e.getMessage(), true); } } return canClose; } /** * Add async query to list of active async queries based on its query ID * * @param queryID query ID */ public void addQueryToActiveQueryList(String queryID) { activeAsyncQueries.add(queryID); } private JsonNode getQueryMetadata(String queryID) throws SQLException { // create the URL to check the query monitoring endpoint String statusUrl = ""; String sessionUrl = getUrl(); if (sessionUrl.endsWith("/")) { statusUrl = sessionUrl.substring(0, sessionUrl.length() - 1) + SF_PATH_QUERY_MONITOR + queryID; } else { statusUrl = sessionUrl + SF_PATH_QUERY_MONITOR + queryID; } // Create a new HTTP GET object and set appropriate headers HttpGet get = new HttpGet(statusUrl); String response = null; JsonNode jsonNode = null; boolean sessionRenewed; // Do this while the session hasn't been renewed do { sessionRenewed = false; try { get.setHeader("Content-type", "application/json"); get.setHeader("Authorization", "Snowflake Token=\"" + this.sessionToken + "\""); response = HttpUtil.executeGeneralRequest( get, loginTimeout, 0, (int) httpClientSocketTimeout.toMillis(), maxHttpRetries, getHttpClientKey(), this); jsonNode = OBJECT_MAPPER.readTree(response); } catch (Exception e) { throw new SnowflakeSQLLoggedException( queryID, this, e.getMessage(), "No response or invalid response from GET request. Error: " + e.getMessage(), e); } // Get response as JSON and parse it to get the query status // check the success field first if (!jsonNode.path("success").asBoolean()) { logger.debug("Response: {}", response); int errorCode = jsonNode.path("code").asInt(); // If the error is due to an expired session token, try renewing the session and trying // again if (errorCode == Constants.SESSION_EXPIRED_GS_CODE) { try { this.renewSession(this.sessionToken); } catch (SnowflakeReauthenticationRequest | SFException ex) { // If we fail to renew the session based on a re-authentication error, try to // re-authenticate the session first if (ex instanceof SnowflakeReauthenticationRequest && this.isExternalbrowserOrOAuthFullFlowAuthenticator()) { try { this.open(internalCallMarker()); } catch (SFException e) { throw new SnowflakeSQLException(e); } } // If we reach a re-authentication error but cannot re-authenticate, throw an exception else if (ex instanceof SnowflakeReauthenticationRequest) { throw (SnowflakeSQLException) ex; } // If trying to renew the session results in an error for any other reason, throw an // exception else if (ex instanceof SFException) { throw new SnowflakeSQLException((SFException) ex); } throw new SnowflakeSQLException(null, queryID, ex.getMessage()); } sessionRenewed = true; // If the error code was not due to session renewal issues, throw an exception } else { throw new SnowflakeSQLException( queryID, jsonNode.path("message").asText(), SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, errorCode); } } } while (sessionRenewed); return jsonNode.path("data").path("queries"); } /** * @param queryID query ID of the query whose status is being investigated * @return a QueryStatus instance indicating the query's status * @throws SQLException if an error is encountered */ @Override public QueryStatus getQueryStatus(String queryID) throws SQLException { JsonNode queryNode = getQueryMetadata(queryID); logger.debug("Query status: {}", queryNode.asText()); if (queryNode.isEmpty()) { return QueryStatus.empty(); } JsonNode node = queryNode.get(0); long endTime = node.path("endTime").asLong(0); int errorCode = node.path("errorCode").asInt(0); String errorMessage = node.path("errorMessage").asText("No error reported"); String id = node.path("id").asText(""); String name = node.path("status").asText(""); long sessionId = node.path("sessionId").asLong(0); String sqlText = node.path("sqlText").asText(""); long startTime = node.path("startTime").asLong(0); String state = node.path("state").asText(""); int totalDuration = node.path("totalDuration").asInt(0); String warehouseExternalSize = node.path("warehouseExternalSize").asText(null); int warehouseId = node.path("warehouseId").asInt(0); String warehouseName = node.path("warehouseName").asText(null); String warehouseServerType = node.path("warehouseServerType").asText(null); QueryStatus result = new QueryStatus( endTime, errorCode, errorMessage, id, name, sessionId, sqlText, startTime, state, totalDuration, warehouseExternalSize, warehouseId, warehouseName, warehouseServerType); if (!result.isStillRunning()) { activeAsyncQueries.remove(queryID); } return result; } /** * Add a property If a property is known for connection, add it to connection properties If not, * add it as a dynamic session parameters * *

Make sure a property is not added more than once and the number of properties does not * exceed limit. * * @param propertyName property name * @param propertyValue property value * @throws SFException exception raised from Snowflake components */ public void addSFSessionProperty(String propertyName, Object propertyValue) throws SFException { SFSessionProperty connectionProperty = SFSessionProperty.lookupByKey(propertyName); if (connectionProperty != null) { addProperty(propertyName, propertyValue); // check if the value type is as expected propertyValue = SFSessionProperty.checkPropertyValue(connectionProperty, propertyValue); switch (connectionProperty) { case LOGIN_TIMEOUT: if (propertyValue != null) { loginTimeout = (Integer) propertyValue; } break; case NETWORK_TIMEOUT: if (propertyValue != null) { networkTimeoutInMilli = (Integer) propertyValue; } break; case INJECT_CLIENT_PAUSE: if (propertyValue != null) { injectClientPause = (Integer) propertyValue; } break; case INJECT_SOCKET_TIMEOUT: if (propertyValue != null) { injectSocketTimeout = (Integer) propertyValue; } break; case PASSCODE_IN_PASSWORD: passcodeInPassword = (propertyValue != null && (Boolean) propertyValue); break; case TRACING: if (propertyValue != null) { tracingLevel = Level.parse(((String) propertyValue).toUpperCase()); } break; case JAVA_LOGGING_CONSOLE_STD_OUT: if (propertyValue != null) { javaUtilLoggingConsoleOut = (Boolean) propertyValue; } break; case JAVA_LOGGING_CONSOLE_STD_OUT_THRESHOLD: if (propertyValue != null) { javaUtilLoggingConsoleOutThreshold = (String) propertyValue; } break; case DISABLE_SOCKS_PROXY: // note: if any session has this parameter, it will be used for all // sessions on the current JVM. if (propertyValue != null) { HttpUtil.setSocksProxyDisabled((Boolean) propertyValue); } break; case VALIDATE_DEFAULT_PARAMETERS: if (propertyValue != null) { setValidateDefaultParameters(getBooleanValue(propertyValue)); } break; case PRIVATE_KEY_FILE: if (propertyValue != null) { privateKeyFileLocation = (String) propertyValue; } break; case PRIVATE_KEY_BASE64: if (propertyValue != null) { privateKeyBase64 = (String) propertyValue; } break; case PRIVATE_KEY_FILE_PWD: case PRIVATE_KEY_PWD: if (propertyValue != null) { privateKeyPassword = (String) propertyValue; } break; case MAX_HTTP_RETRIES: if (propertyValue != null) { maxHttpRetries = (Integer) propertyValue; } break; case ENABLE_PUT_GET: if (propertyValue != null) { setEnablePutGet(getBooleanValue(propertyValue)); } break; case ENABLE_COPY_RESULT_SET: if (propertyValue != null) { setEnableCopyResultSet(getBooleanValue(propertyValue)); } break; case RETRY_TIMEOUT: if (propertyValue != null) { int timeoutValue = (Integer) propertyValue; if (timeoutValue >= 300 || timeoutValue == 0) { retryTimeout = timeoutValue; } } break; case ENABLE_PATTERN_SEARCH: if (propertyValue != null) { setEnablePatternSearch(getBooleanValue(propertyValue)); } break; case ENABLE_EXACT_SCHEMA_SEARCH_ENABLED: if (propertyValue != null) { setEnableExactSchemaSearch(getBooleanValue(propertyValue)); } break; case ENABLE_WILDCARDS_IN_SHOW_METADATA_COMMANDS: if (propertyValue != null) { setEnableWildcardsInShowMetadataCommands(getBooleanValue(propertyValue)); } break; case DISABLE_GCS_DEFAULT_CREDENTIALS: if (propertyValue != null) { setDisableGcsDefaultCredentials(getBooleanValue(propertyValue)); } break; case JDBC_ARROW_TREAT_DECIMAL_AS_INT: if (propertyValue != null) { setJdbcArrowTreatDecimalAsInt(getBooleanValue(propertyValue)); } break; case BROWSER_RESPONSE_TIMEOUT: if (propertyValue != null) { browserResponseTimeout = Duration.ofSeconds((Integer) propertyValue); } break; case JDBC_DEFAULT_FORMAT_DATE_WITH_TIMEZONE: if (propertyValue != null) { setDefaultFormatDateWithTimezone(getBooleanValue(propertyValue)); } break; case JDBC_GET_DATE_USE_NULL_TIMEZONE: if (propertyValue != null) { setGetDateUseNullTimezone(getBooleanValue(propertyValue)); } break; case ENABLE_CLIENT_STORE_TEMPORARY_CREDENTIAL: if (propertyValue != null) { enableClientStoreTemporaryCredential = getBooleanValue(propertyValue); } break; case ENABLE_CLIENT_REQUEST_MFA_TOKEN: if (propertyValue != null) { enableClientRequestMfaToken = getBooleanValue(propertyValue); } break; case IMPLICIT_SERVER_SIDE_QUERY_TIMEOUT: if (propertyValue != null) { setImplicitServerSideQueryTimeout(getBooleanValue(propertyValue)); } break; case CLEAR_BATCH_ONLY_AFTER_SUCCESSFUL_EXECUTION: if (propertyValue != null) { setClearBatchOnlyAfterSuccessfulExecution(getBooleanValue(propertyValue)); } break; case CLIENT_TREAT_TIME_AS_WALL_CLOCK_TIME: if (propertyValue != null) { setTreatTimeAsWallClockTime(getBooleanValue(propertyValue)); } break; case OWNER_ONLY_STAGE_FILE_PERMISSIONS_ENABLED: if (propertyValue != null) { setOwnerOnlyStageFilePermissionsEnabled(getBooleanValue(propertyValue)); } break; case MIN_TLS_VERSION: if (propertyValue != null) { SFSSLConnectionSocketFactory.setMinTlsVersion((String) propertyValue); } break; case MAX_TLS_VERSION: if (propertyValue != null) { SFSSLConnectionSocketFactory.setMaxTlsVersion((String) propertyValue); } break; case ALLOW_CERTIFICATES_WITHOUT_CRL_URL: if (propertyValue != null) { setAllowCertificatesWithoutCrlUrl(getBooleanValue(propertyValue)); } break; default: break; } } else { // this property does not match any predefined property, treat it as // session parameter if (sessionParametersMap.containsKey(propertyName)) { throw new SFException(ErrorCode.DUPLICATE_CONNECTION_PROPERTY_SPECIFIED, propertyName); } else { sessionParametersMap.put(propertyName, propertyValue); } // check if the number of session properties exceed limit if (sessionParametersMap.size() > MAX_SESSION_PARAMETERS) { throw new SFException(ErrorCode.TOO_MANY_SESSION_PARAMETERS, MAX_SESSION_PARAMETERS); } } } public void overrideConsoleHandlerWhenNecessary() { if (javaUtilLoggingConsoleOut) { JDK14Logger.useStdOutConsoleHandler(javaUtilLoggingConsoleOutThreshold); } } public boolean containProperty(String key) { return sessionParametersMap.containsKey(key); } /** * Open a new database session * * @throws SFException this is a runtime exception * @throws SnowflakeSQLException exception raised from Snowflake components */ @VisibleForTesting static void checkAwsExternalIdEnabled(Map props) throws SFException { if (!AuthenticatorType.WORKLOAD_IDENTITY .name() .equalsIgnoreCase((String) props.get(SFSessionProperty.AUTHENTICATOR)) || !"aws" .equalsIgnoreCase((String) props.get(SFSessionProperty.WORKLOAD_IDENTITY_PROVIDER))) { return; } String awsExternalId = (String) props.get(SFSessionProperty.WORKLOAD_IDENTITY_AWS_EXTERNAL_ID); if (!SnowflakeUtil.convertSystemGetEnvToBooleanValue(SF_ENABLE_WIF_AWS_EXTERNAL_ID, false) && awsExternalId != null && !awsExternalId.isEmpty()) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Connection property workloadIdentityAwsExternalId is not enabled"); } } public synchronized void open() throws SFException, SnowflakeSQLException { open(null); } public synchronized void open(InternalCallMarker internalCallMarker) throws SFException, SnowflakeSQLException { recordIfExternal("SFSession", "open", internalCallMarker); Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); performSanityCheckOnProperties(); Map connectionPropertiesMap = getConnectionPropertiesMap(); logger.info( "Opening session with server: {}, account: {}, user: {}, password is {}, role: {}, database: {}, schema: {}," + " warehouse: {}, validate default parameters: {}, authenticator: {}, ocsp mode: {}," + " passcode in password: {}, passcode is {}, private key is {}, disable socks proxy: {}," + " application: {}, app id: {}, app version: {}, login timeout: {}, retry timeout: {}, network timeout: {}," + " query timeout: {}, connection timeout: {}, socket timeout: {}, tracing: {}," + " private key file: {}, private key base 64: {}, private key pwd is {}," + " enable_diagnostics: {}, diagnostics_allowlist_path: {}," + " session parameters: client store temporary credential: {}, gzip disabled: {}, browser response timeout: {}", connectionPropertiesMap.get(SFSessionProperty.SERVER_URL), connectionPropertiesMap.get(SFSessionProperty.ACCOUNT), connectionPropertiesMap.get(SFSessionProperty.USER), SFLoggerUtil.isVariableProvided( (String) connectionPropertiesMap.get(SFSessionProperty.PASSWORD)), connectionPropertiesMap.get(SFSessionProperty.ROLE), connectionPropertiesMap.get(SFSessionProperty.DATABASE), connectionPropertiesMap.get(SFSessionProperty.SCHEMA), connectionPropertiesMap.get(SFSessionProperty.WAREHOUSE), connectionPropertiesMap.get(SFSessionProperty.VALIDATE_DEFAULT_PARAMETERS), connectionPropertiesMap.get(SFSessionProperty.AUTHENTICATOR), getOCSPMode().name(), connectionPropertiesMap.get(SFSessionProperty.PASSCODE_IN_PASSWORD), SFLoggerUtil.isVariableProvided( (String) connectionPropertiesMap.get(SFSessionProperty.PASSCODE)), SFLoggerUtil.isVariableProvided(connectionPropertiesMap.get(SFSessionProperty.PRIVATE_KEY)), connectionPropertiesMap.get(SFSessionProperty.DISABLE_SOCKS_PROXY), connectionPropertiesMap.get(SFSessionProperty.APPLICATION), connectionPropertiesMap.get(SFSessionProperty.APP_ID), connectionPropertiesMap.get(SFSessionProperty.APP_VERSION), connectionPropertiesMap.get(SFSessionProperty.LOGIN_TIMEOUT), connectionPropertiesMap.get(SFSessionProperty.RETRY_TIMEOUT), connectionPropertiesMap.get(SFSessionProperty.NETWORK_TIMEOUT), connectionPropertiesMap.get(SFSessionProperty.QUERY_TIMEOUT), connectionPropertiesMap.get(SFSessionProperty.HTTP_CLIENT_CONNECTION_TIMEOUT), connectionPropertiesMap.get(SFSessionProperty.HTTP_CLIENT_SOCKET_TIMEOUT), connectionPropertiesMap.get(SFSessionProperty.TRACING), connectionPropertiesMap.get(SFSessionProperty.PRIVATE_KEY_FILE), SFLoggerUtil.isVariableProvided( (String) connectionPropertiesMap.get(SFSessionProperty.PRIVATE_KEY_BASE64)), SFLoggerUtil.isVariableProvided( (String) connectionPropertiesMap.getOrDefault( SFSessionProperty.PRIVATE_KEY_PWD, connectionPropertiesMap.get(SFSessionProperty.PRIVATE_KEY_FILE_PWD))), connectionPropertiesMap.get(SFSessionProperty.ENABLE_DIAGNOSTICS), connectionPropertiesMap.get(SFSessionProperty.DIAGNOSTICS_ALLOWLIST_FILE), sessionParametersMap.get(SessionUtil.CLIENT_STORE_TEMPORARY_CREDENTIAL), connectionPropertiesMap.get(SFSessionProperty.GZIP_DISABLED), connectionPropertiesMap.get(SFSessionProperty.BROWSER_RESPONSE_TIMEOUT)); HttpClientSettingsKey httpClientSettingsKey = getHttpClientKey(); logger.debug( "Connection proxy parameters: use proxy: {}, proxy host: {}, proxy port: {}, proxy user: {}," + " proxy password is {}, non proxy hosts: {}, proxy protocol: {}", httpClientSettingsKey.usesProxy(), httpClientSettingsKey.getProxyHost(), httpClientSettingsKey.getProxyPort(), httpClientSettingsKey.getProxyUser(), SFLoggerUtil.isVariableProvided(httpClientSettingsKey.getProxyPassword()), httpClientSettingsKey.getNonProxyHosts(), httpClientSettingsKey.getProxyHttpProtocol()); checkAwsExternalIdEnabled(connectionPropertiesMap); // TODO: temporarily hardcode sessionParameter debug info. will be changed in the future SFLoginInput loginInput = new SFLoginInput(); SFOauthLoginInput oauthLoginInput = new SFOauthLoginInput( (String) connectionPropertiesMap.get(SFSessionProperty.OAUTH_CLIENT_ID), (String) connectionPropertiesMap.get(SFSessionProperty.OAUTH_CLIENT_SECRET), (String) connectionPropertiesMap.get(SFSessionProperty.OAUTH_REDIRECT_URI), (String) connectionPropertiesMap.get(SFSessionProperty.OAUTH_AUTHORIZATION_URL), (String) connectionPropertiesMap.get(SFSessionProperty.OAUTH_TOKEN_REQUEST_URL), (String) connectionPropertiesMap.get(SFSessionProperty.OAUTH_SCOPE), getBooleanValue( connectionPropertiesMap.get( SFSessionProperty.OAUTH_ENABLE_SINGLE_USE_REFRESH_TOKENS))); loginInput .setServerUrl((String) connectionPropertiesMap.get(SFSessionProperty.SERVER_URL)) .setDatabaseName((String) connectionPropertiesMap.get(SFSessionProperty.DATABASE)) .setSchemaName((String) connectionPropertiesMap.get(SFSessionProperty.SCHEMA)) .setWarehouse((String) connectionPropertiesMap.get(SFSessionProperty.WAREHOUSE)) .setRole((String) connectionPropertiesMap.get(SFSessionProperty.ROLE)) .setValidateDefaultParameters( connectionPropertiesMap.get(SFSessionProperty.VALIDATE_DEFAULT_PARAMETERS)) .setAuthenticator((String) connectionPropertiesMap.get(SFSessionProperty.AUTHENTICATOR)) .setOriginalAuthenticator( (String) connectionPropertiesMap.get(SFSessionProperty.AUTHENTICATOR)) .setOKTAUserName((String) connectionPropertiesMap.get(SFSessionProperty.OKTA_USERNAME)) .setAccountName((String) connectionPropertiesMap.get(SFSessionProperty.ACCOUNT)) .setLoginTimeout(loginTimeout) .setRetryTimeout(retryTimeout) .setAuthTimeout(authTimeout) .setUserName((String) connectionPropertiesMap.get(SFSessionProperty.USER)) .setPassword((String) connectionPropertiesMap.get(SFSessionProperty.PASSWORD)) .setToken((String) connectionPropertiesMap.get(SFSessionProperty.TOKEN)) .setPasscodeInPassword(passcodeInPassword) .setPasscode((String) connectionPropertiesMap.get(SFSessionProperty.PASSCODE)) .setConnectionTimeout( connectionPropertiesMap.get(SFSessionProperty.HTTP_CLIENT_CONNECTION_TIMEOUT) != null ? Duration.ofMillis( (int) connectionPropertiesMap.get( SFSessionProperty.HTTP_CLIENT_CONNECTION_TIMEOUT)) : httpClientConnectionTimeout) .setSocketTimeout( connectionPropertiesMap.get(SFSessionProperty.HTTP_CLIENT_SOCKET_TIMEOUT) != null ? Duration.ofMillis( (int) connectionPropertiesMap.get(SFSessionProperty.HTTP_CLIENT_SOCKET_TIMEOUT)) : httpClientSocketTimeout) .setAppId((String) connectionPropertiesMap.get(SFSessionProperty.APP_ID)) .setAppVersion((String) connectionPropertiesMap.get(SFSessionProperty.APP_VERSION)) .setSessionParameters(sessionParametersMap) .setPrivateKey((PrivateKey) connectionPropertiesMap.get(SFSessionProperty.PRIVATE_KEY)) .setPrivateKeyFile((String) connectionPropertiesMap.get(SFSessionProperty.PRIVATE_KEY_FILE)) .setOauthLoginInput(oauthLoginInput) .setWorkloadIdentityProvider( (String) connectionPropertiesMap.get(SFSessionProperty.WORKLOAD_IDENTITY_PROVIDER)) .setWorkloadIdentityEntraResource( (String) connectionPropertiesMap.get(SFSessionProperty.WORKLOAD_IDENTITY_ENTRA_RESOURCE)) .setWorkloadIdentityImpersonationPath( (String) connectionPropertiesMap.get(SFSessionProperty.WORKLOAD_IDENTITY_IMPERSONATION_PATH)) .setWorkloadIdentityAwsExternalId( (String) connectionPropertiesMap.get(SFSessionProperty.WORKLOAD_IDENTITY_AWS_EXTERNAL_ID)) .setPrivateKeyBase64( (String) connectionPropertiesMap.get(SFSessionProperty.PRIVATE_KEY_BASE64)) .setPrivateKeyPwd( (String) connectionPropertiesMap.getOrDefault( SFSessionProperty.PRIVATE_KEY_PWD, connectionPropertiesMap.get(SFSessionProperty.PRIVATE_KEY_FILE_PWD))) .setApplication((String) connectionPropertiesMap.get(SFSessionProperty.APPLICATION)) .setServiceName(getServiceName()) .setOCSPMode(getOCSPMode()) .setHttpClientSettingsKey(httpClientSettingsKey) .setDisableConsoleLogin( connectionPropertiesMap.get(SFSessionProperty.DISABLE_CONSOLE_LOGIN) != null ? getBooleanValue( connectionPropertiesMap.get(SFSessionProperty.DISABLE_CONSOLE_LOGIN)) : true) .setDisableSamlURLCheck( connectionPropertiesMap.get(SFSessionProperty.DISABLE_SAML_URL_CHECK) != null ? getBooleanValue( connectionPropertiesMap.get(SFSessionProperty.DISABLE_SAML_URL_CHECK)) : false) .setEnableClientStoreTemporaryCredential(enableClientStoreTemporaryCredential) .setEnableClientRequestMfaToken(enableClientRequestMfaToken) .setBrowserResponseTimeout(browserResponseTimeout) .setPlatformDetectionTimeoutMs( connectionPropertiesMap.get(SFSessionProperty.PLATFORM_DETECTION_TIMEOUT_MS) != null ? (int) connectionPropertiesMap.get(SFSessionProperty.PLATFORM_DETECTION_TIMEOUT_MS) : defaultPlatformDetectionTimeoutMs) .setDisablePlatformDetection( connectionPropertiesMap.get(SFSessionProperty.DISABLE_PLATFORM_DETECTION) != null ? getBooleanValue( connectionPropertiesMap.get(SFSessionProperty.DISABLE_PLATFORM_DETECTION)) : false) // Default to false (platform detection enabled) .setMaxRetryCount(maxHttpRetries); logger.info( "Connecting to {} Snowflake domain", loginInput.getHostFromServerUrl().toLowerCase().endsWith(".cn") ? "CHINA" : "GLOBAL"); // we ignore the parameters CLIENT_OUT_OF_BAND_TELEMETRY_ENABLED and htapOOBTelemetryEnabled // OOB telemetry is disabled TelemetryService.disableOOBTelemetry(); // propagate OCSP mode to SFTrustManager. Note OCSP setting is global on JVM. HttpUtil.setConnectionTimeout(loginInput.getConnectionTimeoutInMillis()); HttpUtil.setSocketTimeout(loginInput.getSocketTimeoutInMillis()); HttpUtil.initHttpClient(httpClientSettingsKey, null, httpHeadersCustomizers); runDiagnosticsIfEnabled(); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, connectionPropertiesMap, tracingLevel.toString()); isClosed = false; authTimeout = loginInput.getAuthTimeout(); sessionToken = loginOutput.getSessionToken(); masterToken = loginOutput.getMasterToken(); idToken = loginOutput.getIdToken(); mfaToken = loginOutput.getMfaToken(); oauthAccessToken = loginOutput.getOauthAccessToken(); oauthRefreshToken = loginOutput.getOauthRefreshToken(); setDatabaseVersion(loginOutput.getDatabaseVersion()); setDatabaseMajorVersion(loginOutput.getDatabaseMajorVersion()); setDatabaseMinorVersion(loginOutput.getDatabaseMinorVersion()); httpClientSocketTimeout = loginOutput.getHttpClientSocketTimeout(); httpClientConnectionTimeout = loginOutput.getHttpClientConnectionTimeout(); masterTokenValidityInSeconds = loginOutput.getMasterTokenValidityInSeconds(); setDatabase(loginOutput.getSessionDatabase()); setSchema(loginOutput.getSessionSchema()); setRole(loginOutput.getSessionRole()); setWarehouse(loginOutput.getSessionWarehouse()); setSessionId(loginOutput.getSessionId()); setAutoCommit(loginOutput.getAutoCommit()); extractAndUpdateStickyHttpHeaders(loginOutput.getLoginResponseHeaders()); // Update common parameter values for this session SessionUtil.updateSfDriverParamValues(loginOutput.getCommonParams(), this); String loginDatabaseName = (String) connectionPropertiesMap.get(SFSessionProperty.DATABASE); String loginSchemaName = (String) connectionPropertiesMap.get(SFSessionProperty.SCHEMA); String loginRole = (String) connectionPropertiesMap.get(SFSessionProperty.ROLE); String loginWarehouse = (String) connectionPropertiesMap.get(SFSessionProperty.WAREHOUSE); if (loginDatabaseName != null && !loginDatabaseName.equalsIgnoreCase(getDatabase())) { sqlWarnings.add( new SFException( ErrorCode.CONNECTION_ESTABLISHED_WITH_DIFFERENT_PROP, "Database", loginDatabaseName, getDatabase())); } if (loginSchemaName != null && !loginSchemaName.equalsIgnoreCase(getSchema())) { sqlWarnings.add( new SFException( ErrorCode.CONNECTION_ESTABLISHED_WITH_DIFFERENT_PROP, "Schema", loginSchemaName, getSchema())); } if (loginRole != null && !loginRole.equalsIgnoreCase(getRole())) { sqlWarnings.add( new SFException( ErrorCode.CONNECTION_ESTABLISHED_WITH_DIFFERENT_PROP, "Role", loginRole, getRole())); } if (loginWarehouse != null && !loginWarehouse.equalsIgnoreCase(getWarehouse())) { sqlWarnings.add( new SFException( ErrorCode.CONNECTION_ESTABLISHED_WITH_DIFFERENT_PROP, "Warehouse", loginWarehouse, getWarehouse())); } boolean disableQueryContextCache = getDisableQueryContextCacheOption(); logger.debug( "Query context cache is {}", ((disableQueryContextCache) ? "disabled" : "enabled")); // Initialize QCC if (!disableQueryContextCache) { qcc = new QueryContextCache(this.getQueryContextCacheSize()); } else { qcc = null; } // start heartbeat for this session so that the master token will not expire startHeartbeatForThisSession(); this.getTelemetryClient(internalCallMarker()); // Flush any internal API usage telemetry that accumulated before session login InternalApiTelemetryTracker.flush(getTelemetryClient(internalCallMarker())); // Send minicore telemetry after session is established sendMinicoreTelemetry(); stopwatch.stop(); logger.debug("Session {} opened in {} ms.", getSessionId(), stopwatch.elapsedMillis()); } private void sendMinicoreTelemetry() { try { Telemetry telemetry = getTelemetryClient(internalCallMarker()); if (!(telemetry instanceof TelemetryClient)) { logger.trace("Telemetry client not available, skipping minicore telemetry"); return; } TelemetryClient telemetryClient = (TelemetryClient) telemetry; MinicoreTelemetry minicoreTelemetry = MinicoreTelemetry.create(); telemetryClient.addLogToBatch( minicoreTelemetry.toInBandTelemetryNode(), System.currentTimeMillis()); logger.trace("Queued minicore telemetry for sending"); } catch (Exception e) { // Never fail the session due to telemetry logger.trace("Failed to send minicore telemetry: {}", e.getMessage()); } } /** * If authenticator is null and private key is specified, jdbc will assume key pair authentication * * @return true if authenticator type is SNOWFLAKE (meaning password) */ private boolean isSnowflakeAuthenticator() { Map connectionPropertiesMap = getConnectionPropertiesMap(); String authenticator = (String) connectionPropertiesMap.get(SFSessionProperty.AUTHENTICATOR); PrivateKey privateKey = (PrivateKey) connectionPropertiesMap.get(SFSessionProperty.PRIVATE_KEY); return (authenticator == null && privateKey == null && privateKeyFileLocation == null && privateKeyBase64 == null) || AuthenticatorType.SNOWFLAKE.name().equalsIgnoreCase(authenticator); } boolean isExternalbrowserOrOAuthFullFlowAuthenticator() { Map connectionPropertiesMap = getConnectionPropertiesMap(); String authenticator = (String) connectionPropertiesMap.get(SFSessionProperty.AUTHENTICATOR); return AuthenticatorType.EXTERNALBROWSER.name().equalsIgnoreCase(authenticator) || AuthenticatorType.OAUTH_AUTHORIZATION_CODE.name().equalsIgnoreCase(authenticator) || AuthenticatorType.OAUTH_CLIENT_CREDENTIALS.name().equalsIgnoreCase(authenticator); } /** * Returns true if authenticator is OKTA native * * @return true or false */ boolean isOKTAAuthenticator() { Map connectionPropertiesMap = getConnectionPropertiesMap(); String authenticator = (String) connectionPropertiesMap.get(SFSessionProperty.AUTHENTICATOR); return !isNullOrEmpty(authenticator) && authenticator.startsWith("https://"); } /** * Returns true if authenticator is UsernamePasswordMFA native * * @return true or false */ boolean isUsernamePasswordMFAAuthenticator() { Map connectionPropertiesMap = getConnectionPropertiesMap(); String authenticator = (String) connectionPropertiesMap.get(SFSessionProperty.AUTHENTICATOR); return AuthenticatorType.USERNAME_PASSWORD_MFA.name().equalsIgnoreCase(authenticator); } /** * A helper function to call global service and renew session. * * @param prevSessionToken the session token that has expired * @throws SnowflakeSQLException if failed to renew the session * @throws SFException if failed to renew the session */ synchronized void renewSession(String prevSessionToken) throws SFException, SnowflakeSQLException { if (sessionToken != null && !sessionToken.equals(prevSessionToken)) { logger.debug( "Not renewing session {} because session token has not been updated.", getSessionId()); return; } Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); logger.debug("Renewing session {}", getSessionId()); SFLoginInput loginInput = new SFLoginInput(); loginInput .setServerUrl(getServerUrl()) .setSessionToken(sessionToken) .setMasterToken(masterToken) .setIdToken(idToken) .setMfaToken(mfaToken) .setOauthAccessToken(oauthAccessToken) .setOauthRefreshToken(oauthRefreshToken) .setLoginTimeout(loginTimeout) .setRetryTimeout(retryTimeout) .setDatabaseName(getDatabase()) .setSchemaName(getSchema()) .setRole(getRole()) .setWarehouse(getWarehouse()) .setOCSPMode(getOCSPMode()) .setHttpClientSettingsKey(getHttpClientKey()); SFLoginOutput loginOutput = SessionUtil.renewSession(loginInput, this); sessionToken = loginOutput.getSessionToken(); masterToken = loginOutput.getMasterToken(); stopwatch.stop(); logger.debug( "Session {} renewed successfully in {} ms", getSessionId(), stopwatch.elapsedMillis()); } /** * get session token * * @return session token */ public String getSessionToken() { return getSessionToken(null); } public String getSessionToken(InternalCallMarker internalCallMarker) { recordIfExternal("SFSession", "getSessionToken", internalCallMarker); return sessionToken; } public void close(InternalCallMarker internalCallMarker) throws SFException, SnowflakeSQLException { recordIfExternal("SFSession", "close", internalCallMarker); logger.debug("Closing session {}", getSessionId()); // stop heartbeat for this session stopHeartbeatForThisSession(); if (isClosed) { logger.debug("Session {} is already closed", getSessionId()); return; } Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); SFLoginInput loginInput = new SFLoginInput(); loginInput .setServerUrl(getServerUrl()) .setSessionToken(sessionToken) .setLoginTimeout(loginTimeout) .setRetryTimeout(retryTimeout) .setOCSPMode(getOCSPMode()) .setHttpClientSettingsKey(getHttpClientKey()); SessionUtil.closeSession(loginInput, this); InternalApiTelemetryTracker.flush(getTelemetryClient(internalCallMarker())); closeTelemetryClient(); getClientInfo().clear(); // qcc can be null, if disabled. if (qcc != null) { qcc.clearCache(); } stopwatch.stop(); logger.debug( "Session {} has been successfully closed in {} ms", getSessionId(), stopwatch.elapsedMillis()); isClosed = true; } /** * Makes a heartbeat call to check for session validity. * * @param timeout the query timeout * @throws Exception if an error occurs * @throws SFException exception raised from Snowflake */ public void callHeartBeat(int timeout) throws Exception, SFException { if (timeout > 0) { callHeartBeatWithQueryTimeout(timeout); } else { heartbeat(); } } /** * Makes a heartbeat call with query timeout to check for session validity. * * @param timeout the query timeout * @throws Exception if an error occurs * @throws SFException exception raised from Snowflake */ private void callHeartBeatWithQueryTimeout(int timeout) throws Exception, SFException { class HeartbeatTask implements Callable { @Override public Void call() throws SQLException { try { heartbeat(); } catch (SFException e) { throw new SnowflakeSQLException(e, e.getSqlState(), e.getVendorCode(), e.getParams()); } return null; } } ExecutorService executor = Executors.newSingleThreadExecutor(); Future future = executor.submit(new HeartbeatTask()); // Cancel the heartbeat call when timeout is reached try { future.get(timeout, TimeUnit.SECONDS); } catch (TimeoutException e) { future.cancel(true); throw new SFException(ErrorCode.QUERY_CANCELED); } finally { executor.shutdownNow(); } } /** Start heartbeat for this session */ protected void startHeartbeatForThisSession() { if (getEnableHeartbeat() && !isNullOrEmpty(masterToken)) { logger.debug( "Session {} start heartbeat, master token validity: {} s", getSessionId(), masterTokenValidityInSeconds); HeartbeatRegistry.getInstance() .addSession(this, masterTokenValidityInSeconds, heartbeatFrequency); } else { logger.debug("Heartbeat not enabled for the session {}", getSessionId()); } } /** Stop heartbeat for this session */ protected void stopHeartbeatForThisSession() { if (getEnableHeartbeat() && !isNullOrEmpty(masterToken)) { logger.debug("Session {} stop heartbeat", getSessionId()); HeartbeatRegistry.getInstance().removeSession(this); } else { logger.debug("Heartbeat not enabled for the session {}", getSessionId()); } } /** * Send heartbeat for the session * * @throws SFException exception raised from Snowflake * @throws SQLException exception raised from SQL generic layers */ protected void heartbeat() throws SFException, SQLException { logger.debug("Session {} heartbeat", getSessionId()); if (isClosed) { return; } Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); HttpPost postRequest = null; String requestId = UUIDUtils.getUUID().toString(); boolean retry = false; // the loop for retrying if it runs into session expiration do { try { URIBuilder uriBuilder; uriBuilder = new URIBuilder(getServerUrl()); uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, requestId); uriBuilder.setPath(SF_PATH_SESSION_HEARTBEAT); postRequest = new HttpPost(uriBuilder.build()); // remember the session token in case it expires we need to renew // the session only when no other thread has renewed it String prevSessionToken = sessionToken; postRequest.setHeader( SF_HEADER_AUTHORIZATION, SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SF_HEADER_TOKEN_TAG + "=\"" + prevSessionToken + "\""); logger.debug("Executing heartbeat request: {}", postRequest.toString()); // the following will retry transient network issues // increase heartbeat timeout from 60 sec to 300 sec // per https://support-snowflake.zendesk.com/agent/tickets/6629 int SF_HEARTBEAT_TIMEOUT = 300; String theResponse = HttpUtil.executeGeneralRequest( postRequest, SF_HEARTBEAT_TIMEOUT, 0, (int) httpClientSocketTimeout.toMillis(), 0, getHttpClientKey(), this); JsonNode rootNode; logger.debug("Connection heartbeat response: {}", theResponse); rootNode = OBJECT_MAPPER.readTree(theResponse); // check the response to see if it is session expiration response if (rootNode != null && (Constants.SESSION_EXPIRED_GS_CODE == rootNode.path("code").asInt())) { logger.debug("Renew session and retry", false); this.renewSession(prevSessionToken); retry = true; continue; } SnowflakeUtil.checkErrorAndThrowException(rootNode); // success retry = false; } catch (Throwable ex) { // for snowflake exception, just rethrow it if (ex instanceof SnowflakeSQLException) { throw (SnowflakeSQLException) ex; } logger.error("Unexpected exception", ex); throw new SFException( ErrorCode.INTERNAL_ERROR, SFException.oneLiner("unexpected exception", ex)); } } while (retry); stopwatch.stop(); logger.debug( "Session {} heartbeat successful in {} ms", getSessionId(), stopwatch.elapsedMillis()); } void injectedDelay() { AtomicInteger injectedDelay = getInjectedDelay(); int d = injectedDelay.get(); if (d != 0) { injectedDelay.set(0); try { logger.trace("delayed for {}", d); Thread.sleep(d); } catch (InterruptedException ex) { } } } public int getInjectSocketTimeout() { return injectSocketTimeout; } public void setInjectSocketTimeout(int injectSocketTimeout) { this.injectSocketTimeout = injectSocketTimeout; } public int getNetworkTimeoutInMilli() { return networkTimeoutInMilli; } public int getAuthTimeout() { return authTimeout; } public int getHttpClientSocketTimeout() { return (int) httpClientSocketTimeout.toMillis(); } public int getHttpClientConnectionTimeout() { return (int) httpClientConnectionTimeout.toMillis(); } public boolean isClosed() { return isClosed; } public int getInjectClientPause() { return injectClientPause; } public int getMaxHttpRetries() { return maxHttpRetries; } public void setInjectClientPause(int injectClientPause) { this.injectClientPause = injectClientPause; } protected int getAndIncrementSequenceId() { return sequenceId.getAndIncrement(); } public boolean getEnableCombineDescribe() { return this.enableCombineDescribe; } public void setEnableCombineDescribe(boolean enable) { this.enableCombineDescribe = enable; } public synchronized Telemetry getTelemetryClient(InternalCallMarker internalCallMarker) { recordIfExternal("SFSession", "getTelemetryClient", internalCallMarker); // initialize for the first time. this should only be done after session // properties have been set, else the client won't properly resolve the URL. if (telemetryClient == null) { if (getUrl() == null) { logger.debug( "Telemetry client requested before session properties set; returning no-op client"); return new NoOpTelemetryClient(); } telemetryClient = TelemetryClient.createTelemetry(this); // Provide the real telemetry client to the CRL validator for this session's // HttpClientSettingsKey try { CRLValidator.setTelemetryClientForKey(getHttpClientKey(), telemetryClient); } catch (Exception e) { logger.warn("Failed to provide telemetry client to CRL trust manager: {}", e.getMessage()); } } return telemetryClient; } public void closeTelemetryClient() { if (telemetryClient != null) { telemetryClient.close(); } } public String getIdToken() { return getIdToken(null); } public String getIdToken(InternalCallMarker internalCallMarker) { recordIfExternal("SFSession", "getIdToken", internalCallMarker); return idToken; } public String getAccessToken() { return getAccessToken(null); } public String getAccessToken(InternalCallMarker internalCallMarker) { recordIfExternal("SFSession", "getAccessToken", internalCallMarker); return oauthAccessToken; } public String getMfaToken() { return getMfaToken(null); } public String getMfaToken(InternalCallMarker internalCallMarker) { recordIfExternal("SFSession", "getMfaToken", internalCallMarker); return mfaToken; } public SnowflakeConnectString getSnowflakeConnectionString() { return sfConnStr; } public void setSnowflakeConnectionString(SnowflakeConnectString connStr) { sfConnStr = connStr; } /** * Performs a sanity check on properties. Sanity checking includes: - verifying that a server url * is present - verifying various combinations of properties given the authenticator * * @throws SFException Will be thrown if any of the necessary properties are missing */ private void performSanityCheckOnProperties() throws SFException { Map connectionPropertiesMap = getConnectionPropertiesMap(); for (SFSessionProperty property : SFSessionProperty.values()) { if (property.isRequired() && !connectionPropertiesMap.containsKey(property)) { switch (property) { case SERVER_URL: throw new SFException(ErrorCode.MISSING_SERVER_URL); default: throw new SFException(ErrorCode.MISSING_CONNECTION_PROPERTY, property.getPropertyKey()); } } } if (isSnowflakeAuthenticator() || isOKTAAuthenticator() || isUsernamePasswordMFAAuthenticator()) { // userName and password are expected for both Snowflake and Okta. String userName = (String) connectionPropertiesMap.get(SFSessionProperty.USER); if (isNullOrEmpty(userName)) { throw new SFException(ErrorCode.MISSING_USERNAME); } String password = (String) connectionPropertiesMap.get(SFSessionProperty.PASSWORD); if (isNullOrEmpty(password)) { throw new SFException(ErrorCode.MISSING_PASSWORD); } } // perform sanity check on proxy settings boolean useProxy = (boolean) connectionPropertiesMap.getOrDefault(SFSessionProperty.USE_PROXY, false); if (useProxy) { if (!connectionPropertiesMap.containsKey(SFSessionProperty.PROXY_HOST) || connectionPropertiesMap.get(SFSessionProperty.PROXY_HOST) == null || ((String) connectionPropertiesMap.get(SFSessionProperty.PROXY_HOST)).isEmpty() || !connectionPropertiesMap.containsKey(SFSessionProperty.PROXY_PORT) || connectionPropertiesMap.get(SFSessionProperty.PROXY_HOST) == null) { throw new SFException( ErrorCode.INVALID_PROXY_PROPERTIES, "Both proxy host and port values are needed."); } } } @Override public List checkProperties() { Map connectionPropertiesMap = getConnectionPropertiesMap(); for (SFSessionProperty property : SFSessionProperty.values()) { if (property.isRequired() && !connectionPropertiesMap.containsKey(property)) { missingProperties.add(addNewDriverProperty(property.getPropertyKey(), null)); } } if (isSnowflakeAuthenticator() || isOKTAAuthenticator()) { // userName and password are expected for both Snowflake and Okta. String userName = (String) connectionPropertiesMap.get(SFSessionProperty.USER); if (isNullOrEmpty(userName)) { missingProperties.add( addNewDriverProperty(SFSessionProperty.USER.getPropertyKey(), "username for account")); } String password = (String) connectionPropertiesMap.get(SFSessionProperty.PASSWORD); if (isNullOrEmpty(password)) { missingProperties.add( addNewDriverProperty( SFSessionProperty.PASSWORD.getPropertyKey(), "password for " + "account")); } } boolean useProxy = (boolean) connectionPropertiesMap.getOrDefault(SFSessionProperty.USE_PROXY, false); if (useProxy) { if (!connectionPropertiesMap.containsKey(SFSessionProperty.PROXY_HOST)) { missingProperties.add( addNewDriverProperty(SFSessionProperty.PROXY_HOST.getPropertyKey(), "proxy host name")); } if (!connectionPropertiesMap.containsKey(SFSessionProperty.PROXY_PORT)) { missingProperties.add( addNewDriverProperty( SFSessionProperty.PROXY_PORT.getPropertyKey(), "proxy port; " + "should be an integer")); } } return missingProperties; } private DriverPropertyInfo addNewDriverProperty(String name, String description) { DriverPropertyInfo info = new DriverPropertyInfo(name, null); info.description = description; return info; } /** * @return whether this session uses async queries */ public boolean isAsyncSession() { return !activeAsyncQueries.isEmpty(); } private boolean getDisableQueryContextCacheOption() { Boolean disableQueryContextCache = false; Map connectionPropertiesMap = getConnectionPropertiesMap(); if (connectionPropertiesMap.containsKey(SFSessionProperty.DISABLE_QUERY_CONTEXT_CACHE)) { disableQueryContextCache = (Boolean) connectionPropertiesMap.get(SFSessionProperty.DISABLE_QUERY_CONTEXT_CACHE); } return disableQueryContextCache; } @Override public void setQueryContext(String queryContext) { boolean disableQueryContextCache = getDisableQueryContextCacheOption(); if (!disableQueryContextCache) { qcc.deserializeQueryContextJson(queryContext); } } @Override public QueryContextDTO getQueryContextDTO() { boolean disableQueryContextCache = getDisableQueryContextCacheOption(); if (!disableQueryContextCache) { QueryContextDTO res = qcc.serializeQueryContextDTO(); return res; } else { return null; } } public SFClientConfig getSfClientConfig() { return sfClientConfig; } public void setSfClientConfig(SFClientConfig sfClientConfig) { this.sfClientConfig = sfClientConfig; } /** * If the JDBC driver starts in diagnostics mode then the method prints results of the * connectivity tests it performs in the logs. A SQLException is thrown with a message indicating * that the driver is in diagnostics mode, and that a connection was not created. */ private void runDiagnosticsIfEnabled() throws SnowflakeSQLException { Map connectionPropertiesMap = getConnectionPropertiesMap(); boolean isDiagnosticsEnabled = Optional.ofNullable(connectionPropertiesMap.get(SFSessionProperty.ENABLE_DIAGNOSTICS)) .map(b -> (Boolean) b) .orElse(false); if (!isDiagnosticsEnabled) { return; } logger.info("Running diagnostics tests"); String allowListFile = (String) connectionPropertiesMap.get(SFSessionProperty.DIAGNOSTICS_ALLOWLIST_FILE); if (allowListFile == null || allowListFile.isEmpty()) { logger.error( "Diagnostics was enabled but an allowlist file was not provided." + " Please provide an allowlist JSON file using the connection parameter {}", SFSessionProperty.DIAGNOSTICS_ALLOWLIST_FILE); throw new SnowflakeSQLException( "Diagnostics was enabled but an allowlist file was not provided. " + "Please provide an allowlist JSON file using the connection parameter " + SFSessionProperty.DIAGNOSTICS_ALLOWLIST_FILE); } else { DiagnosticContext diagnosticContext = new DiagnosticContext(allowListFile, connectionPropertiesMap); diagnosticContext.runDiagnostics(); } throw new SnowflakeSQLException( "A connection was not created because the driver is running in diagnostics mode." + " If this is unintended then disable diagnostics check by removing the " + SFSessionProperty.ENABLE_DIAGNOSTICS + " connection parameter"); } public void setHttpHeadersCustomizers(List httpHeadersCustomizers) { this.httpHeadersCustomizers = httpHeadersCustomizers; } public List getHttpHeadersCustomizers() { return httpHeadersCustomizers; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFSessionProperty.java ================================================ package net.snowflake.client.internal.core; import java.security.PrivateKey; import java.util.List; import java.util.regex.Pattern; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.http.HttpHeadersCustomizer; /** session properties accepted for opening a new session. */ public enum SFSessionProperty { SERVER_URL("serverURL", true, String.class), USER("user", false, String.class), PASSWORD("password", false, String.class), ACCOUNT("account", true, String.class), DATABASE("database", false, String.class, "db"), SCHEMA("schema", false, String.class), PASSCODE_IN_PASSWORD("passcodeInPassword", false, Boolean.class), PASSCODE("passcode", false, String.class), TOKEN("token", false, String.class), ID_TOKEN_PASSWORD("id_token_password", false, String.class), ROLE("role", false, String.class), AUTHENTICATOR("authenticator", false, String.class), OKTA_USERNAME("oktausername", false, String.class), PRIVATE_KEY("privateKey", false, PrivateKey.class), OAUTH_REDIRECT_URI("oauthRedirectUri", false, String.class), OAUTH_CLIENT_ID("oauthClientID", false, String.class), OAUTH_CLIENT_SECRET("oauthClientSecret", false, String.class), OAUTH_SCOPE("oauthScope", false, String.class), OAUTH_AUTHORIZATION_URL("oauthAuthorizationUrl", false, String.class), OAUTH_TOKEN_REQUEST_URL("oauthTokenRequestUrl", false, String.class), OAUTH_ENABLE_SINGLE_USE_REFRESH_TOKENS("oauthEnableSingleUseRefreshTokens", false, Boolean.class), WORKLOAD_IDENTITY_PROVIDER("workloadIdentityProvider", false, String.class), WORKLOAD_IDENTITY_ENTRA_RESOURCE("workloadIdentityEntraResource", false, String.class), WORKLOAD_IDENTITY_IMPERSONATION_PATH("workloadIdentityImpersonationPath", false, String.class), WORKLOAD_IDENTITY_AWS_EXTERNAL_ID("workloadIdentityAwsExternalId", false, String.class), WAREHOUSE("warehouse", false, String.class), LOGIN_TIMEOUT("loginTimeout", false, Integer.class), NETWORK_TIMEOUT("networkTimeout", false, Integer.class), INJECT_SOCKET_TIMEOUT("injectSocketTimeout", false, Integer.class), INJECT_CLIENT_PAUSE("injectClientPause", false, Integer.class), APP_ID("appId", false, String.class), APP_VERSION("appVersion", false, String.class), OCSP_FAIL_OPEN("ocspFailOpen", false, Boolean.class), /** * @deprecated Use {@link #DISABLE_OCSP_CHECKS} for clarity. This configuration option is used to * disable OCSP verification. */ @Deprecated INSECURE_MODE("insecureMode", false, Boolean.class), DISABLE_OCSP_CHECKS("disableOCSPChecks", false, Boolean.class), QUERY_TIMEOUT("queryTimeout", false, Integer.class), STRINGS_QUOTED("stringsQuotedForColumnDef", false, Boolean.class), APPLICATION("application", false, String.class), TRACING("tracing", false, String.class), DISABLE_SOCKS_PROXY("disableSocksProxy", false, Boolean.class), // connection proxy USE_PROXY("useProxy", false, Boolean.class), PROXY_HOST("proxyHost", false, String.class), PROXY_PORT("proxyPort", false, String.class), PROXY_USER("proxyUser", false, String.class), PROXY_PASSWORD("proxyPassword", false, String.class), NON_PROXY_HOSTS("nonProxyHosts", false, String.class), PROXY_PROTOCOL("proxyProtocol", false, String.class), VALIDATE_DEFAULT_PARAMETERS("validateDefaultParameters", false, Boolean.class), INJECT_WAIT_IN_PUT("inject_wait_in_put", false, Integer.class), PRIVATE_KEY_FILE("private_key_file", false, String.class), PRIVATE_KEY_BASE64("private_key_base64", false, String.class), /** * @deprecated Use {@link #PRIVATE_KEY_PWD} for clarity. The given password will be used to * decrypt the private key value independent of whether that value is supplied as a file or * base64 string */ @Deprecated PRIVATE_KEY_FILE_PWD("private_key_file_pwd", false, String.class), PRIVATE_KEY_PWD("private_key_pwd", false, String.class), CLIENT_INFO("snowflakeClientInfo", false, String.class), ALLOW_UNDERSCORES_IN_HOST("allowUnderscoresInHost", false, Boolean.class), // Adds a suffix to the user agent header in the http requests made by the jdbc driver USER_AGENT_SUFFIX("user_agent_suffix", false, String.class), CLIENT_OUT_OF_BAND_TELEMETRY_ENABLED( "CLIENT_OUT_OF_BAND_TELEMETRY_ENABLED", false, Boolean.class), GZIP_DISABLED("gzipDisabled", false, Boolean.class), DISABLE_QUERY_CONTEXT_CACHE("disableQueryContextCache", false, Boolean.class), HTAP_OOB_TELEMETRY_ENABLED("htapOOBTelemetryEnabled", false, Boolean.class), CLIENT_CONFIG_FILE("client_config_file", false, String.class), MAX_HTTP_RETRIES("maxHttpRetries", false, Integer.class), ENABLE_PUT_GET("enablePutGet", false, Boolean.class), ENABLE_COPY_RESULT_SET("enableCopyResultSet", false, Boolean.class), DISABLE_CONSOLE_LOGIN("disableConsoleLogin", false, Boolean.class), PUT_GET_MAX_RETRIES("putGetMaxRetries", false, Integer.class), RETRY_TIMEOUT("retryTimeout", false, Integer.class), ENABLE_DIAGNOSTICS("ENABLE_DIAGNOSTICS", false, Boolean.class), DIAGNOSTICS_ALLOWLIST_FILE("DIAGNOSTICS_ALLOWLIST_FILE", false, String.class), ENABLE_PATTERN_SEARCH("enablePatternSearch", false, Boolean.class), ENABLE_EXACT_SCHEMA_SEARCH_ENABLED("ENABLE_EXACT_SCHEMA_SEARCH_ENABLED", false, Boolean.class), DISABLE_GCS_DEFAULT_CREDENTIALS("disableGcsDefaultCredentials", false, Boolean.class), JDBC_ARROW_TREAT_DECIMAL_AS_INT("JDBC_ARROW_TREAT_DECIMAL_AS_INT", false, Boolean.class), DISABLE_SAML_URL_CHECK("disableSamlURLCheck", false, Boolean.class), // Used to determine whether to use the previously hardcoded value for the formatter (for // backwards compatibility) or use the value of JDBC_FORMAT_DATE_WITH_TIMEZONE JDBC_DEFAULT_FORMAT_DATE_WITH_TIMEZONE( "JDBC_DEFAULT_FORMAT_DATE_WITH_TIMEZONE", false, Boolean.class), // Used as a fix for issue SNOW-354859. Remove with snowflake-jdbc version 4.x with BCR changes. JDBC_GET_DATE_USE_NULL_TIMEZONE("JDBC_GET_DATE_USE_NULL_TIMEZONE", false, Boolean.class), BROWSER_RESPONSE_TIMEOUT("BROWSER_RESPONSE_TIMEOUT", false, Integer.class), ENABLE_CLIENT_STORE_TEMPORARY_CREDENTIAL("clientStoreTemporaryCredential", false, Boolean.class), ENABLE_CLIENT_REQUEST_MFA_TOKEN("clientRequestMfaToken", false, Boolean.class), HTTP_CLIENT_CONNECTION_TIMEOUT("HTTP_CLIENT_CONNECTION_TIMEOUT", false, Integer.class), HTTP_CLIENT_SOCKET_TIMEOUT("HTTP_CLIENT_SOCKET_TIMEOUT", false, Integer.class), JAVA_LOGGING_CONSOLE_STD_OUT("JAVA_LOGGING_CONSOLE_STD_OUT", false, Boolean.class), JAVA_LOGGING_CONSOLE_STD_OUT_THRESHOLD( "JAVA_LOGGING_CONSOLE_STD_OUT_THRESHOLD", false, String.class), IMPLICIT_SERVER_SIDE_QUERY_TIMEOUT("IMPLICIT_SERVER_SIDE_QUERY_TIMEOUT", false, Boolean.class), CLEAR_BATCH_ONLY_AFTER_SUCCESSFUL_EXECUTION( "CLEAR_BATCH_ONLY_AFTER_SUCCESSFUL_EXECUTION", false, Boolean.class), CLIENT_TREAT_TIME_AS_WALL_CLOCK_TIME( "CLIENT_TREAT_TIME_AS_WALL_CLOCK_TIME", false, Boolean.class), HTTP_HEADER_CUSTOMIZERS( HttpHeadersCustomizer.HTTP_HEADER_CUSTOMIZERS_PROPERTY_KEY, false, List.class), // Used to enable the owner-only stage file permissions feature. This feature will be enabled by // default in the next major release. OWNER_ONLY_STAGE_FILE_PERMISSIONS_ENABLED( "ownerOnlyStageFilePermissionsEnabled", false, Boolean.class), ENABLE_WILDCARDS_IN_SHOW_METADATA_COMMANDS( "ENABLE_WILDCARDS_IN_SHOW_METADATA_COMMANDS", false, Boolean.class), MIN_TLS_VERSION("MIN_TLS_VERSION", false, String.class), MAX_TLS_VERSION("MAX_TLS_VERSION", false, String.class), CERT_REVOCATION_CHECK_MODE("CERT_REVOCATION_CHECK_MODE", false, String.class), ALLOW_CERTIFICATES_WITHOUT_CRL_URL("ALLOW_CERTIFICATES_WITHOUT_CRL_URL", false, Boolean.class), PLATFORM_DETECTION_TIMEOUT_MS("platformDetectionTimeoutMs", false, Integer.class), DISABLE_PLATFORM_DETECTION("disablePlatformDetection", false, Boolean.class); // property key in string private String propertyKey; // if required when establishing connection private boolean required; // value type private Class valueType; // alias to property key private String[] aliases; // application name matcher public static Pattern APPLICATION_REGEX = Pattern.compile("^[A-Za-z][A-Za-z0-9\\.\\-_]{1,50}$"); public boolean isRequired() { return required; } public String getPropertyKey() { return propertyKey; } public Class getValueType() { return valueType; } SFSessionProperty(String propertyKey, boolean required, Class valueType, String... aliases) { this.propertyKey = propertyKey; this.required = required; this.valueType = valueType; this.aliases = aliases; } static SFSessionProperty lookupByKey(String propertyKey) { for (SFSessionProperty property : SFSessionProperty.values()) { if (property.propertyKey.equalsIgnoreCase(propertyKey)) { return property; } else { for (String alias : property.aliases) { if (alias.equalsIgnoreCase(propertyKey)) { return property; } } } } return null; } /** * Check if property value is desired class. Convert if possible * * @param property The session property to check * @param propertyValue The property value to check * @return The checked property value * @throws SFException Will be thrown if an invalid property value is passed in */ static Object checkPropertyValue(SFSessionProperty property, Object propertyValue) throws SFException { if (propertyValue == null) { return null; } if (property.getValueType().isAssignableFrom(propertyValue.getClass())) { switch (property) { case APPLICATION: if (APPLICATION_REGEX.matcher((String) propertyValue).find()) { return propertyValue; } else { throw new SFException(ErrorCode.INVALID_PARAMETER_VALUE, propertyValue, property); } default: return propertyValue; } } else { if (property.getValueType() == Boolean.class && propertyValue instanceof String) { return SFLoginInput.getBooleanValue(propertyValue); } else if (property.getValueType() == Integer.class && propertyValue instanceof String) { try { return Integer.valueOf((String) propertyValue); } catch (NumberFormatException e) { throw new SFException( ErrorCode.INVALID_PARAMETER_VALUE, propertyValue.getClass().getName(), property.getValueType().getName()); } } } throw new SFException( ErrorCode.INVALID_PARAMETER_TYPE, propertyValue.getClass().getName(), property.getValueType().getName()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFSqlInput.java ================================================ package net.snowflake.client.internal.core; import java.sql.SQLException; import java.sql.SQLInput; import java.util.List; import java.util.Map; import java.util.TimeZone; /** This interface extends the standard {@link SQLInput} interface to provide additional methods. */ public interface SFSqlInput extends SQLInput { /** * Method unwrapping object of class SQLInput to object of class SfSqlInput. * * @param sqlInput SQLInput to consider. * @return Object unwrapped to SFSqlInput class. */ static SFSqlInput unwrap(SQLInput sqlInput) { return (SFSqlInput) sqlInput; } /** * Reads the next attribute in the stream and returns it as a java.sql.Timestamp * object. * * @param tz timezone to consider. * @return the attribute; if the value is SQL NULL, returns null * @exception SQLException if a database access error occurs */ java.sql.Timestamp readTimestamp(TimeZone tz) throws SQLException; /** * Reads the next attribute in the stream and returns it as a Object object. * * @param the type of the class modeled by this Class object * @param type Class representing the Java data type to convert the attribute to. * @param tz timezone to consider. * @return the attribute at the head of the stream as an {@code Object} in the Java programming * language;{@code null} if the attribute is SQL {@code NULL} * @exception SQLException if a database access error occurs */ T readObject(Class type, TimeZone tz) throws SQLException; /** * Reads the next attribute in the stream and returns it as a List object. * * @param the type of the class modeled by this Class object * @param type Class representing the Java data type to convert the attribute to. * @return the attribute at the head of the stream as an {@code List} in the Java programming * language;{@code null} if the attribute is SQL {@code NULL} * @exception SQLException if a database access error occurs */ List readList(Class type) throws SQLException; /** * Reads the next attribute in the stream and returns it as a Map object. * * @param the type of the class modeled by this Class object * @param type Class representing the Java data type to convert the attribute to. * @return the attribute at the head of the stream as an {@code Map} in the Java programming * language;{@code null} if the attribute is SQL {@code NULL} * @exception SQLException if a database access error occurs */ Map readMap(Class type) throws SQLException; /** * Reads the next attribute in the stream and returns it as a Array object. * * @param the type of the class modeled by this Class object * @param type Class representing the Java data type to convert the attribute to. * @return the attribute at the head of the stream as an {@code Array} in the Java programming * language;{@code null} if the attribute is SQL {@code NULL} * @exception SQLException if a database access error occurs */ T[] readArray(Class type) throws SQLException; } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFStatement.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.SessionUtil.DEFAULT_CLIENT_MEMORY_LIMIT; import static net.snowflake.client.internal.core.SessionUtil.DEFAULT_CLIENT_PREFETCH_THREADS; import static net.snowflake.client.internal.core.SessionUtil.MAX_CLIENT_CHUNK_SIZE; import static net.snowflake.client.internal.core.SessionUtil.MIN_CLIENT_CHUNK_SIZE; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.recordIfExternal; import com.fasterxml.jackson.databind.JsonNode; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.QueryStatus; import net.snowflake.client.internal.core.BasicEvent.QueryState; import net.snowflake.client.internal.core.bind.BindException; import net.snowflake.client.internal.core.bind.BindUploader; import net.snowflake.client.internal.driver.DriverInitializer; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.SnowflakeFileTransferAgent; import net.snowflake.client.internal.jdbc.SnowflakeReauthenticationRequest; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.InternalCallMarker; import net.snowflake.client.internal.jdbc.telemetry.TelemetryData; import net.snowflake.client.internal.jdbc.telemetry.TelemetryField; import net.snowflake.client.internal.jdbc.telemetry.TelemetryUtil; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; import org.apache.http.client.methods.HttpRequestBase; /** Snowflake statement */ public class SFStatement extends SFBaseStatement { private static final SFLogger logger = SFLoggerFactory.getLogger(SFStatement.class); private SFSession session; private SFBaseResultSet resultSet = null; private HttpRequestBase httpRequest; private boolean isClosed = false; private int sequenceId = -1; private String requestId = null; private String sqlText = null; private final AtomicBoolean canceling = new AtomicBoolean(false); private boolean isFileTransfer = false; private SnowflakeFileTransferAgent transferAgent = null; private static final int MAX_BINDING_PARAMS_FOR_LOGGING = 1000; /** id used in combine describe and execute */ private String describeJobUUID; // list of child result objects for queries called by the current query, if any private List childResults = null; // Three parameters adjusted in conservative memory usage mode private int conservativePrefetchThreads; private int conservativeResultChunkSize; private long conservativeMemoryLimit; // in bytes public SFStatement(SFSession session) { logger.trace("SFStatement(SFSession session)", false); this.session = session; Integer queryTimeout = session == null ? null : session.getQueryTimeout(); this.queryTimeout = queryTimeout != null ? queryTimeout : this.queryTimeout; verifyArrowSupport(); } private void verifyArrowSupport() { if (!DriverInitializer.isArrowEnabled()) { logger.debug("Disable arrow support: {}", DriverInitializer.getArrowDisableReason()); statementParametersMap.put("JDBC_QUERY_RESULT_FORMAT", "JSON"); } } /** * Sanity check query text * * @param sql The SQL statement to check * @throws SQLException Will be thrown if sql is null or empty */ private void sanityCheckQuery(String sql) throws SQLException { if (sql == null || sql.isEmpty()) { throw new SnowflakeSQLException( SqlState.SQL_STATEMENT_NOT_YET_COMPLETE, ErrorCode.INVALID_SQL.getMessageCode(), sql); } } /** * Execute SQL query with an option for describe only * * @param sql sql statement * @param describeOnly true if describe only * @return query result set * @throws SQLException if connection is already closed * @throws SFException if result set is null */ private SFBaseResultSet executeQuery( String sql, Map parametersBinding, boolean describeOnly, boolean asyncExec, CallingMethod caller, ExecTimeTelemetryData execTimeData) throws SQLException, SFException { sanityCheckQuery(sql); String trimmedSql = sql.trim(); // snowflake specific client side commands if (isFileTransfer(trimmedSql)) { // Server side value or Connection string value is false then disable the PUT/GET command if ((session != null && !(session.getJdbcEnablePutGet() && session.getEnablePutGet()))) { // PUT/GET command disabled either on server side or in the client connection string logger.debug("Executing file transfer locally is disabled: {}", sql); throw new SnowflakeSQLException("File transfers have been disabled."); } // PUT/GET command logger.debug("Executing file transfer locally: {}", sql); return executeFileTransfer(sql); } // NOTE: It is intentional two describeOnly parameters are specified. return executeQueryInternal( sql, parametersBinding, describeOnly, describeOnly, // internal query if describeOnly is true asyncExec, caller, execTimeData); } /** * Describe a statement * * @param sql statement * @return metadata of statement including result set metadata and binding information * @throws SQLException if connection is already closed * @throws SFException if result set is null */ @Override public SFPreparedStatementMetaData describe(String sql) throws SFException, SQLException { SFBaseResultSet baseResultSet = executeQuery(sql, null, true, false, null, new ExecTimeTelemetryData()); describeJobUUID = baseResultSet.getQueryId(); return new SFPreparedStatementMetaData( baseResultSet.getMetaData(), baseResultSet.getStatementType(), baseResultSet.getNumberOfBinds(), baseResultSet.isArrayBindSupported(), baseResultSet.getMetaDataOfBinds(), true); // valid metadata } /** * Internal method for executing a query with bindings accepted. * *

* * @param sql sql statement * @param parameterBindings binding information * @param describeOnly true if just showing result set metadata * @param internal true if internal command not showing up in the history * @param caller the JDBC method that called this function, null if none * @return snowflake query result set * @throws SQLException if connection is already closed * @throws SFException if result set is null */ SFBaseResultSet executeQueryInternal( String sql, Map parameterBindings, boolean describeOnly, boolean internal, boolean asyncExec, CallingMethod caller, ExecTimeTelemetryData execTimeData) throws SQLException, SFException { resetState(); logger.debug("ExecuteQuery: {}", sql); if (session == null || session.isClosed()) { throw new SQLException("connection is closed"); } Object result = executeHelper( sql, StmtUtil.SF_MEDIA_TYPE, parameterBindings, describeOnly, internal, asyncExec, execTimeData); if (result == null) { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "got null result"); } /* * we sort the result if the connection is in sorting mode */ Object sortProperty = session.getSessionPropertyByKey("sort"); boolean sortResult = sortProperty != null && (Boolean) sortProperty; logger.debug("Creating result set", false); try { JsonNode jsonResult = (JsonNode) result; resultSet = SFResultSetFactory.getResultSet(jsonResult, this, sortResult, execTimeData); childResults = ResultUtil.getChildResults(session, requestId, jsonResult); // if child results are available, skip over this result set and set the // current result to the first child's result. // we still construct the first result set for its side effects. if (!childResults.isEmpty()) { SFStatementType type = childResults.get(0).getType(); // ensure first query type matches the calling JDBC method, if exists if (caller == CallingMethod.EXECUTE_QUERY && !type.isGenerateResultSet()) { throw new SnowflakeSQLLoggedException( session, ErrorCode.QUERY_FIRST_RESULT_NOT_RESULT_SET); } else if (caller == CallingMethod.EXECUTE_UPDATE && type.isGenerateResultSet()) { throw new SnowflakeSQLLoggedException( session, ErrorCode.UPDATE_FIRST_RESULT_NOT_UPDATE_COUNT); } // this will update resultSet to point to the first child result before we return it getMoreResults(); } } catch (SnowflakeSQLException | OutOfMemoryError ex) { // snow-24428: no need to generate incident for exceptions we generate // snow-29403: or client OOM throw ex; } catch (Throwable ex) { // SNOW-22813 log exception logger.error("Exception creating result", ex); throw new SFException( ErrorCode.INTERNAL_ERROR, SFException.oneLiner("exception creating result", ex)); } logger.debug("Done creating result set", false); if (asyncExec) { session.addQueryToActiveQueryList(resultSet.getQueryId()); } execTimeData.setQueryId(resultSet.getQueryId()); return resultSet; } /** * Set a time bomb to cancel the outstanding query when timeout is reached. * * @param executor object to execute statement cancel request */ private void setTimeBomb(ScheduledExecutorService executor) { class TimeBombTask implements Callable { private final SFStatement statement; private TimeBombTask(SFStatement statement) { this.statement = statement; } @Override public Void call() throws SQLException { try { statement.cancel(CancellationReason.TIMEOUT); } catch (SFException ex) { throw new SnowflakeSQLLoggedException( session, ex.getSqlState(), ex.getVendorCode(), ex, ex.getParams()); } return null; } } executor.schedule(new TimeBombTask(this), this.queryTimeout, TimeUnit.SECONDS); } /** * A helper method to build URL and submit the SQL to snowflake for exec * * @param sql sql statement * @param mediaType media type * @param bindValues map of binding values * @param describeOnly whether only show the result set metadata * @param internal run internal query not showing up in history * @param asyncExec is async execute * @param execTimeData ExecTimeTelemetryData * @return raw json response * @throws SFException if query is canceled * @throws SnowflakeSQLException if query is already running */ public Object executeHelper( String sql, String mediaType, Map bindValues, boolean describeOnly, boolean internal, boolean asyncExec, ExecTimeTelemetryData execTimeData) throws SnowflakeSQLException, SFException { ScheduledExecutorService executor = null; try { synchronized (this) { if (isClosed) { throw new SFException(ErrorCode.STATEMENT_CLOSED); } // initialize a sequence id if not closed or not for aborting if (canceling.get()) { // nothing to do if canceled throw new SFException(ErrorCode.QUERY_CANCELED); } if (this.requestId != null) { throw new SnowflakeSQLLoggedException( session, ErrorCode.STATEMENT_ALREADY_RUNNING_QUERY.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED); } this.requestId = UUIDUtils.getUUID().toString(); execTimeData.setRequestId(requestId); this.sequenceId = session.getAndIncrementSequenceId(); this.sqlText = sql; } EventUtil.triggerStateTransition( BasicEvent.QueryState.QUERY_STARTED, String.format(QueryState.QUERY_STARTED.getArgString(), requestId)); // if there are a large number of bind values, we should upload them to stage // instead of passing them in the payload (if enabled) execTimeData.setBindStart(); int numBinds = BindUploader.arrayBindValueCount(bindValues); String bindStagePath = null; if (0 < session.getArrayBindStageThreshold() && session.getArrayBindStageThreshold() <= numBinds && !describeOnly && BindUploader.isArrayBind(bindValues)) { try (BindUploader uploader = BindUploader.newInstance(session, requestId)) { uploader.upload(bindValues); bindStagePath = uploader.getStagePath(); } catch (BindException ex) { logger.debug( "Exception encountered trying to upload binds to stage with input stream. Attaching" + " binds in payload instead. ", ex); TelemetryData errorLog = TelemetryUtil.buildJobData(this.requestId, ex.type.field, 1); this.session.getTelemetryClient(internalCallMarker()).addLogToBatch(errorLog); } catch (SQLException ex) { logger.debug( "Exception encountered trying to upload binds to stage with input stream. Attaching" + " binds in payload instead. ", ex); TelemetryData errorLog = TelemetryUtil.buildJobData(this.requestId, TelemetryField.FAILED_BIND_UPLOAD, 1); this.session.getTelemetryClient(internalCallMarker()).addLogToBatch(errorLog); } } if (session.isConservativeMemoryUsageEnabled()) { logger.debug("JDBC conservative memory usage is enabled.", false); calculateConservativeMemoryUsage(); } StmtUtil.StmtInput stmtInput = new StmtUtil.StmtInput(); stmtInput .setSql(sql) .setMediaType(mediaType) .setInternal(internal) .setDescribeOnly(describeOnly) .setAsync(asyncExec) .setServerUrl(session.getServerUrl()) .setRequestId(requestId) .setSequenceId(sequenceId) .setParametersMap(statementParametersMap) .setSessionToken(session.getSessionToken(internalCallMarker())) .setNetworkTimeoutInMillis(session.getNetworkTimeoutInMilli()) .setInjectSocketTimeout(session.getInjectSocketTimeout()) .setInjectClientPause(session.getInjectClientPause()) .setCanceling(canceling) .setRetry(false) .setDescribedJobId(describeJobUUID) .setCombineDescribe(session.getEnableCombineDescribe()) .setQuerySubmissionTime(System.currentTimeMillis()) .setServiceName(session.getServiceName()) .setOCSPMode(session.getOCSPMode()) .setHttpClientSettingsKey(session.getHttpClientKey()) .setMaxRetries(session.getMaxHttpRetries()) .setQueryContextDTO(session.isAsyncSession() ? null : session.getQueryContextDTO()); if (bindStagePath != null) { stmtInput.setBindValues(null).setBindStage(bindStagePath); // use the new SQL format for this query so dates/timestamps are parsed correctly setUseNewSqlFormat(true); statementParametersMap.put("TIMESTAMP_INPUT_FORMAT", "AUTO"); } else { stmtInput.setBindValues(bindValues).setBindStage(null); } if (numBinds > 0 && session.getPreparedStatementLogging()) { if (numBinds > MAX_BINDING_PARAMS_FOR_LOGGING) { logger.debug( "Number of binds exceeds logging limit. Printing off {} binding parameters.", MAX_BINDING_PARAMS_FOR_LOGGING); } else { logger.debug("Printing off {} binding parameters.", numBinds); } int counter = 0; // if it's an array bind, print off the first few rows from each column. if (BindUploader.isArrayBind(bindValues)) { int numRowsPrinted = MAX_BINDING_PARAMS_FOR_LOGGING / bindValues.size(); if (numRowsPrinted <= 0) { numRowsPrinted = 1; } for (Map.Entry entry : bindValues.entrySet()) { List bindRows = (List) entry.getValue().getValue(); if (numRowsPrinted >= bindRows.size()) { numRowsPrinted = bindRows.size(); } String rows = "["; for (int i = 0; i < numRowsPrinted; i++) { rows += bindRows.get(i) + ", "; } rows += "]"; logger.debug("Column {}: {}", entry.getKey(), rows); counter += numRowsPrinted; if (counter >= MAX_BINDING_PARAMS_FOR_LOGGING) { break; } } } // not an array, just a bunch of columns else { for (Map.Entry entry : bindValues.entrySet()) { if (counter >= MAX_BINDING_PARAMS_FOR_LOGGING) { break; } counter++; logger.debug("Column {}: {}", entry.getKey(), entry.getValue().getValue()); } } } execTimeData.setBindEnd(); if (canceling.get()) { logger.debug("Query cancelled", false); throw new SFException(ErrorCode.QUERY_CANCELED); } // if timeout is set, start a thread to cancel the request after timeout // reached. if (this.queryTimeout > 0) { if (session.getImplicitServerSideQueryTimeout()) { // Server side only query timeout statementParametersMap.put("STATEMENT_TIMEOUT_IN_SECONDS", this.queryTimeout); } else { // client side only query timeout executor = Executors.newScheduledThreadPool(1); setTimeBomb(executor); } } StmtUtil.StmtOutput stmtOutput = null; boolean sessionRenewed; do { sessionRenewed = false; try { stmtOutput = StmtUtil.execute(stmtInput, execTimeData, session); break; } catch (SnowflakeSQLException ex) { renewSessionOnExpiry(ex, stmtInput.sessionToken); // SNOW-18822: reset session token for the statement stmtInput.setSessionToken(session.getSessionToken(internalCallMarker())); stmtInput.setRetry(true); sessionRenewed = true; execTimeData.incrementRetryCount(); execTimeData.addRetryLocation("renewSession"); logger.debug("Session got renewed, will retry", false); } } while (sessionRenewed && !canceling.get()); // Debugging/Testing for incidents if (Boolean.TRUE .toString() .equalsIgnoreCase(systemGetProperty("snowflake.enable_incident_test1"))) { throw new SFException(ErrorCode.STATEMENT_CLOSED); } synchronized (this) { /* * done with the remote execution of the query. set sequenceId to -1 * and request id to null so that we don't try to abort it upon canceling. */ this.sequenceId = -1; this.requestId = null; } if (canceling.get()) { // If we are here, this is the context for the initial query that // is being canceled. Raise an exception anyway here even if // the server fails to abort it. throw new SFException(ErrorCode.QUERY_CANCELED); } logger.debug("Returning from executeHelper", false); if (stmtOutput != null) { return stmtOutput.getResult(); } throw new SFException(ErrorCode.INTERNAL_ERROR); } catch (SFException | SnowflakeSQLException ex) { isClosed = true; throw ex; } finally { if (executor != null) { executor.shutdownNow(); } // if this query enabled the new SQL format, re-disable it now setUseNewSqlFormat(false); } } /** * calculate conservative memory limit and the number of prefetch threads before query execution */ private void calculateConservativeMemoryUsage() { int clientMemoryLimit = session.getClientMemoryLimit(); int clientPrefetchThread = session.getClientPrefetchThreads(); int clientChunkSize = session.getClientResultChunkSize(); long memoryLimitInBytes; if (clientMemoryLimit == DEFAULT_CLIENT_MEMORY_LIMIT) { // this is all default scenario // only allows JDBC use at most 80% of free memory long freeMemoryToUse = Runtime.getRuntime().freeMemory() * 8 / 10; memoryLimitInBytes = Math.min( (long) 2 * clientPrefetchThread * clientChunkSize * 1024 * 1024, freeMemoryToUse); } else { memoryLimitInBytes = (long) clientMemoryLimit * 1024 * 1024; } conservativeMemoryLimit = memoryLimitInBytes; reducePrefetchThreadsAndChunkSizeToFitMemoryLimit( conservativeMemoryLimit, clientPrefetchThread, clientChunkSize); } private void updateConservativeResultChunkSize(int clientChunkSize) { if (clientChunkSize != conservativeResultChunkSize) { logger.debug( "conservativeResultChunkSize changed from {} to {}", conservativeResultChunkSize, clientChunkSize); conservativeResultChunkSize = clientChunkSize; statementParametersMap.put("CLIENT_RESULT_CHUNK_SIZE", conservativeResultChunkSize); } } private void reducePrefetchThreadsAndChunkSizeToFitMemoryLimit( long clientMemoryLimit, int clientPrefetchThread, int clientChunkSize) { if (clientPrefetchThread != DEFAULT_CLIENT_PREFETCH_THREADS) { // prefetch threads are configured so only reduce chunk size conservativePrefetchThreads = clientPrefetchThread; for (; clientChunkSize >= MIN_CLIENT_CHUNK_SIZE; clientChunkSize -= session.getConservativeMemoryAdjustStep()) { if (clientMemoryLimit >= (long) 2 * clientPrefetchThread * clientChunkSize * 1024 * 1024) { updateConservativeResultChunkSize(clientChunkSize); return; } } updateConservativeResultChunkSize(MIN_CLIENT_CHUNK_SIZE); } else { // reduce both prefetch threads and chunk size while (clientPrefetchThread > 1) { for (clientChunkSize = MAX_CLIENT_CHUNK_SIZE; clientChunkSize >= MIN_CLIENT_CHUNK_SIZE; clientChunkSize -= session.getConservativeMemoryAdjustStep()) { if (clientMemoryLimit >= (long) 2 * clientPrefetchThread * clientChunkSize * 1024 * 1024) { conservativePrefetchThreads = clientPrefetchThread; updateConservativeResultChunkSize(clientChunkSize); return; } } clientPrefetchThread--; } conservativePrefetchThreads = clientPrefetchThread; updateConservativeResultChunkSize(MIN_CLIENT_CHUNK_SIZE); } } /** * @return conservative prefetch threads before fetching results */ public int getConservativePrefetchThreads() { return conservativePrefetchThreads; } /** * @return conservative memory limit before fetching results */ public long getConservativeMemoryLimit() { return conservativeMemoryLimit; } /** * Return an array of child query ID for the given query ID. * *

If the given query ID is for a multiple statements query, it returns an array of its child * statements, otherwise, it returns an array to include the given query ID. * * @param queryID The given query ID * @return An array of child query IDs * @throws SQLException If the query is running or the corresponding query is FAILED. */ @Override public String[] getChildQueryIds(String queryID) throws SQLException { QueryStatus queryStatus = session.getQueryStatus(queryID); if (queryStatus.isStillRunning()) { throw new SQLException( "Status of query associated with resultSet is " + queryStatus.getDescription() + ". Results not generated."); } try { JsonNode jsonResult; try { jsonResult = StmtUtil.getQueryResultJSON(queryID, session); } catch (SnowflakeSQLException ex) { renewSessionOnExpiry(ex, session.getSessionToken(internalCallMarker())); logger.debug("Session renewed during getChildQueryIds, retrying", false); jsonResult = StmtUtil.getQueryResultJSON(queryID, session); } List childResults = ResultUtil.getChildResults(session, requestId, jsonResult); List resultList = new ArrayList<>(); for (int i = 0; i < childResults.size(); i++) { resultList.add(childResults.get(i).getId()); } if (resultList.isEmpty()) { resultList.add(queryID); } String[] result = new String[resultList.size()]; resultList.toArray(result); return result; } catch (SFException ex) { throw new SnowflakeSQLException(ex); } } @Override public SFBaseResultSet execute( String sql, Map parametersBinding, CallingMethod caller, ExecTimeTelemetryData execTimeData) throws SQLException, SFException { return execute(sql, false, parametersBinding, caller, execTimeData); } /** * A helper method to build URL and cancel the SQL for exec * * @param sql sql statement * @param mediaType media type * @param cancellationReason reason for the cancellation * @throws SnowflakeSQLException if failed to cancel the statement * @throws SFException if statement is already closed */ private void cancelHelper(String sql, String mediaType, CancellationReason cancellationReason) throws SnowflakeSQLException, SFException { synchronized (this) { if (isClosed) { throw new SFException(ErrorCode.INTERNAL_ERROR, "statement already closed"); } } StmtUtil.StmtInput stmtInput = new StmtUtil.StmtInput(); stmtInput .setServerUrl(session.getServerUrl()) .setSql(sql) .setMediaType(mediaType) .setRequestId(requestId) .setSessionToken(session.getSessionToken(internalCallMarker())) .setServiceName(session.getServiceName()) .setOCSPMode(session.getOCSPMode()) .setMaxRetries(session.getMaxHttpRetries()) .setHttpClientSettingsKey(session.getHttpClientKey()); StmtUtil.cancel(stmtInput, cancellationReason, session); synchronized (this) { /* * done with the remote execution of the query. set sequenceId to -1 * and request id to null so that we don't try to abort it again upon * canceling. */ this.sequenceId = -1; this.requestId = null; } } /** * Execute sql * * @param sql sql statement. * @param asyncExec is async exec * @param parametersBinding parameters to bind * @param caller the JDBC interface method that called this method, if any * @param execTimeData ExecTimeTelemetryData * @return whether there is result set or not * @throws SQLException if failed to execute sql * @throws SFException exception raised from Snowflake components * @throws SQLException if SQL error occurs */ public SFBaseResultSet execute( String sql, boolean asyncExec, Map parametersBinding, CallingMethod caller, ExecTimeTelemetryData execTimeData) throws SQLException, SFException { TelemetryService.getInstance().updateContext(session.getSnowflakeConnectionString()); sanityCheckQuery(sql); session.injectedDelay(); if (session.getPreparedStatementLogging()) { logger.info("Execute: {}", sql); } else { logger.debug("Execute: {}", sql); } String trimmedSql = sql.trim(); if (trimmedSql.length() >= 20 && trimmedSql.toLowerCase().startsWith("set-sf-property")) { executeSetProperty(sql); return null; } return executeQuery(sql, parametersBinding, false, asyncExec, caller, execTimeData); } private SFBaseResultSet executeFileTransfer(String sql) throws SQLException, SFException { session.injectedDelay(); resetState(); logger.debug("Entering executeFileTransfer", false); isFileTransfer = true; transferAgent = new SnowflakeFileTransferAgent(sql, session, this, internalCallMarker()); try { transferAgent.execute(); logger.debug("Setting result set", false); resultSet = (SFFixedViewResultSet) transferAgent.getResultSet(); childResults = Collections.emptyList(); logger.debug("Number of cols: {}", resultSet.getMetaData().getColumnCount()); logger.debug("Completed transferring data", false); return resultSet; } catch (SQLException ex) { logger.debug("Exception: {}", ex.getMessage()); throw ex; } } @Override public void close() { logger.trace("void close()", false); if (requestId != null) { EventUtil.triggerStateTransition( BasicEvent.QueryState.QUERY_ENDED, String.format(QueryState.QUERY_ENDED.getArgString(), requestId)); } resultSet = null; childResults = null; isClosed = true; if (httpRequest != null) { logger.debug("Releasing connection for the http request", false); httpRequest.releaseConnection(); httpRequest = null; } session.getTelemetryClient(internalCallMarker()).sendBatchAsync(); isFileTransfer = false; transferAgent = null; } @Override public void cancel() throws SFException, SQLException { logger.trace("void cancel()", false); cancel(CancellationReason.UNKNOWN); } @Override public void cancel(CancellationReason cancellationReason) throws SFException, SQLException { logger.trace("void cancel(CancellationReason)", false); if (canceling.get()) { logger.debug("Query is already cancelled", false); return; } canceling.set(true); if (isFileTransfer) { if (transferAgent != null) { logger.debug("Cancel file transferring ... ", false); transferAgent.cancel(); } } else { synchronized (this) { // the query hasn't been sent to GS yet, just mark the stmt closed if (requestId == null) { logger.debug("No remote query outstanding", false); return; } } // cancel the query on the server side if it has been issued cancelHelper(this.sqlText, StmtUtil.SF_MEDIA_TYPE, cancellationReason); } } private void resetState() { resultSet = null; childResults = null; if (httpRequest != null) { httpRequest.releaseConnection(); httpRequest = null; } isClosed = false; sequenceId = -1; requestId = null; sqlText = null; canceling.set(false); isFileTransfer = false; transferAgent = null; } @Override public SFBaseSession getSFBaseSession() { return getSFBaseSession(null); } public SFBaseSession getSFBaseSession(InternalCallMarker internalCallMarker) { recordIfExternal("SFStatement", "getSFBaseSession", internalCallMarker); return session; } // *NOTE* this new SQL format is incomplete. It should only be used under certain circumstances. private void setUseNewSqlFormat(boolean useNewSqlFormat) throws SFException { this.addProperty("NEW_SQL_FORMAT", useNewSqlFormat); } /** * Renew the session if the given exception indicates session token expiry (error code 390112). If * the error is not session expiry, the original exception is re-thrown. */ void renewSessionOnExpiry(SnowflakeSQLException ex, String prevSessionToken) throws SFException, SnowflakeSQLException { if (ex.getErrorCode() != Constants.SESSION_EXPIRED_GS_CODE) { throw ex; } try { session.renewSession(prevSessionToken); } catch (SnowflakeReauthenticationRequest ex0) { if (session.isExternalbrowserOrOAuthFullFlowAuthenticator()) { session.open(internalCallMarker()); } else { throw ex0; } } } public boolean getMoreResults() throws SQLException { return getMoreResults(Statement.CLOSE_CURRENT_RESULT); } @Override public boolean getMoreResults(int current) throws SQLException { // clean up current result, if exists if (resultSet != null && (current == Statement.CLOSE_CURRENT_RESULT || current == Statement.CLOSE_ALL_RESULTS)) { resultSet.close(); } resultSet = null; // verify if more results exist if (childResults == null || childResults.isEmpty()) { return false; } // fetch next result using the query id SFChildResult nextResult = childResults.remove(0); try { JsonNode result; try { result = StmtUtil.getQueryResultJSON(nextResult.getId(), session); } catch (SnowflakeSQLException ex) { renewSessionOnExpiry(ex, session.getSessionToken(internalCallMarker())); logger.debug("Session renewed during getMoreResults, retrying child result fetch", false); result = StmtUtil.getQueryResultJSON(nextResult.getId(), session); } Object sortProperty = session.getSessionPropertyByKey("sort"); boolean sortResult = sortProperty != null && (Boolean) sortProperty; resultSet = SFResultSetFactory.getResultSet(result, this, sortResult, new ExecTimeTelemetryData()); // override statement type so we can treat the result set like a result of // the original statement called (and not the result scan) resultSet.setStatementType(nextResult.getType()); return nextResult.getType().isGenerateResultSet(); } catch (SFException ex) { throw new SnowflakeSQLException(ex); } } @Override public SFBaseResultSet getResultSet() { return resultSet; } @Override public boolean hasChildren() { return !childResults.isEmpty(); } @Override public SFBaseResultSet asyncExecute( String sql, Map parametersBinding, CallingMethod caller, ExecTimeTelemetryData execTimeData) throws SQLException, SFException { return execute(sql, true, parametersBinding, caller, execTimeData); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFStatementType.java ================================================ /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package net.snowflake.client.internal.core; /** * Used to check if the statementType belongs to DDL or DML The enum of each statement type is * defined in com.snowflake.core.Statement.java */ public enum SFStatementType { /** * By default we set query will generate result set, which means executeUpdate will throw * exception. In that way, we have a clear control of what statements can be executed by * executeUpdate */ UNKNOWN(0x0000, true), SELECT(0x1000, true), /** Data Manipulation Language */ DML(0x3000, false), INSERT(0x3000 + 0x100, false), UPDATE(0x3000 + 0x200, false), DELETE(0x3000 + 0x300, false), MERGE(0x3000 + 0x400, false), MULTI_INSERT(0x3000 + 0x500, false), COPY(0x3000 + 0x600, false), UNLOAD(0x3000 + 0x700, false), RECLUSTER(0x3000 + 0x800, false), /** System Command Language (USE, DESCRIBE etc) */ SCL(0x4000, false), ALTER_SESSION(0x4000 + 0x100, false), USE(0x4000 + 0x300, false), USE_DATABASE(0x4000 + 0x300 + 0x01, false), USE_SCHEMA(0x4000 + 0x300 + 0x02, false), USE_WAREHOUSE(0x4000 + 0x300 + 0x03, false), SHOW(0x4000 + 0x400, true), DESCRIBE(0x4000 + 0x500, true), LIST(0x4000 + 0x700 + 0x01, true), /** Transaction Command Language (COMMIT, ROLLBACK) */ TCL(0x5000, false), /** Data Definition Language */ DDL(0x6000, false), ALTER_USER_MANAGE_PATS(0x6000 + 0x200 + 0x44, true), /** Stage-related commands (other than LIST) */ GET(0x7000 + 0x100 + 0x01, true), PUT(0x7000 + 0x100 + 0x02, true), REMOVE(0x7000 + 0x100 + 0x03, true), ; private final long statementTypeId; /** * Used by Statement.executeUpdate to determine if we should return update counts or throw * exception. In general, JDBC should only throw exception if a result set object is required. */ private final boolean generateResultSet; private static final long LEVEL_3_RANGE = 0x1000; SFStatementType(long id, boolean generateResultSet) { this.statementTypeId = id; this.generateResultSet = generateResultSet; } public static SFStatementType lookUpTypeById(long id) { for (SFStatementType type : SFStatementType.values()) { if (type.getStatementTypeId() == id) { return type; } } // if not specific type is found, then return category of statement if (id >= SCL.getStatementTypeId() && id < SCL.getStatementTypeId() + LEVEL_3_RANGE) { return SCL; } else if (id >= TCL.getStatementTypeId() && id < TCL.getStatementTypeId() + LEVEL_3_RANGE) { return TCL; } else if (id >= DDL.getStatementTypeId() && id < DDL.getStatementTypeId() + LEVEL_3_RANGE) { return DDL; } else { return UNKNOWN; } } public long getStatementTypeId() { return statementTypeId; } public boolean isDDL() { return this == DDL; } public boolean isDML() { return statementTypeId >= DML.getStatementTypeId() && statementTypeId < DML.getStatementTypeId() + LEVEL_3_RANGE; } public boolean isTCL() { return this == TCL; } public boolean isSCL() { return this == SCL; } public boolean isGenerateResultSet() { return this.generateResultSet; } public boolean isSelect() { return this.statementTypeId == SELECT.statementTypeId; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SFTrustManager.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeType; import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.math.BigInteger; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.security.InvalidKeyException; import java.security.KeyStore; import java.security.KeyStoreException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.security.Signature; import java.security.SignatureException; import java.security.cert.CertificateEncodingException; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.text.MessageFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.Enumeration; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TimeZone; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import javax.net.ssl.SSLEngine; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; import javax.net.ssl.X509TrustManager; import net.snowflake.client.internal.jdbc.OCSPErrorCode; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.DecorrelatedJitterBackoff; import net.snowflake.client.internal.util.SFPair; import org.apache.commons.codec.binary.Base64; import org.apache.commons.io.IOUtils; import org.apache.http.HttpStatus; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.ssl.SSLInitializationException; import org.bouncycastle.asn1.ASN1Encodable; import org.bouncycastle.asn1.ASN1Integer; import org.bouncycastle.asn1.ASN1ObjectIdentifier; import org.bouncycastle.asn1.ASN1OctetString; import org.bouncycastle.asn1.DEROctetString; import org.bouncycastle.asn1.DLSequence; import org.bouncycastle.asn1.ocsp.CertID; import org.bouncycastle.asn1.oiw.OIWObjectIdentifiers; import org.bouncycastle.asn1.x509.AlgorithmIdentifier; import org.bouncycastle.asn1.x509.Certificate; import org.bouncycastle.asn1.x509.Extension; import org.bouncycastle.asn1.x509.Extensions; import org.bouncycastle.asn1.x509.GeneralName; import org.bouncycastle.asn1.x509.TBSCertificate; import org.bouncycastle.cert.X509CertificateHolder; import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; import org.bouncycastle.cert.ocsp.BasicOCSPResp; import org.bouncycastle.cert.ocsp.CertificateID; import org.bouncycastle.cert.ocsp.CertificateStatus; import org.bouncycastle.cert.ocsp.OCSPException; import org.bouncycastle.cert.ocsp.OCSPReq; import org.bouncycastle.cert.ocsp.OCSPReqBuilder; import org.bouncycastle.cert.ocsp.OCSPResp; import org.bouncycastle.cert.ocsp.RevokedStatus; import org.bouncycastle.cert.ocsp.SingleResp; import org.bouncycastle.operator.DigestCalculator; /** * SFTrustManager is a composite of TrustManager of the default JVM TrustManager and Snowflake OCSP * revocation status checker. Use this when initializing SSLContext object. * *

{@code
 * TrustManager[] trustManagers = {new SFTrustManager()};
 * SSLContext sslContext = SSLContext.getInstance("TLS");
 * sslContext.init(null, trustManagers, null);
 * }
*/ public class SFTrustManager extends X509ExtendedTrustManager { /** Test System Parameters. Not used in the production */ public static final String SF_OCSP_RESPONSE_CACHE_SERVER_URL = "SF_OCSP_RESPONSE_CACHE_SERVER_URL"; public static final String SF_OCSP_RESPONSE_CACHE_SERVER_ENABLED = "SF_OCSP_RESPONSE_CACHE_SERVER_ENABLED"; public static final String SF_OCSP_TEST_INJECT_VALIDITY_ERROR = "SF_OCSP_TEST_INJECT_VALIDITY_ERROR"; public static final String SF_OCSP_TEST_INJECT_UNKNOWN_STATUS = "SF_OCSP_TEST_INJECT_UNKNOWN_STATUS"; public static final String SF_OCSP_TEST_RESPONDER_URL = "SF_OCSP_TEST_RESPONDER_URL"; public static final String SF_OCSP_TEST_OCSP_RESPONSE_CACHE_SERVER_TIMEOUT = "SF_OCSP_TEST_OCSP_RESPONSE_CACHE_SERVER_TIMEOUT"; public static final String SF_OCSP_TEST_OCSP_RESPONDER_TIMEOUT = "SF_OCSP_TEST_OCSP_RESPONDER_TIMEOUT"; public static final String SF_OCSP_TEST_INVALID_SIGNING_CERT = "SF_OCSP_TEST_INVALID_SIGNING_CERT"; public static final String SF_OCSP_TEST_NO_OCSP_RESPONDER_URL = "SF_OCSP_TEST_NO_OCSP_RESPONDER_URL"; /** OCSP response cache file name. Should be identical to other driver's cache file name. */ static final String CACHE_FILE_NAME = "ocsp_response_cache.json"; private static final SFLogger logger = SFLoggerFactory.getLogger(SFTrustManager.class); private static final ASN1ObjectIdentifier OIDocsp = new ASN1ObjectIdentifier("1.3.6.1.5.5.7.48.1").intern(); private static final ASN1ObjectIdentifier SHA1RSA = new ASN1ObjectIdentifier("1.2.840.113549.1.1.5").intern(); private static final ASN1ObjectIdentifier SHA256RSA = new ASN1ObjectIdentifier("1.2.840.113549.1.1.11").intern(); private static final ASN1ObjectIdentifier SHA384RSA = new ASN1ObjectIdentifier("1.2.840.113549.1.1.12").intern(); private static final ASN1ObjectIdentifier SHA512RSA = new ASN1ObjectIdentifier("1.2.840.113549.1.1.13").intern(); private static final String ALGORITHM_SHA1_NAME = "SHA-1"; /** Object mapper for JSON encoding and decoding */ private static final ObjectMapper OBJECT_MAPPER = ObjectMapperFactory.getObjectMapper(); /** System property name to specify cache directory. */ private static final String CACHE_DIR_PROP = "net.snowflake.jdbc.ocspResponseCacheDir"; /** Environment name to specify the cache directory. Used if system property not set. */ private static final String CACHE_DIR_ENV = "SF_OCSP_RESPONSE_CACHE_DIR"; /** OCSP response cache entry expiration time (s) */ private static final long CACHE_EXPIRATION_IN_SECONDS = 432000L; /** OCSP response cache lock file expiration time (s) */ private static final long CACHE_FILE_LOCK_EXPIRATION_IN_SECONDS = 60L; /** Default OCSP Cache server connection timeout */ private static final int DEFAULT_OCSP_CACHE_SERVER_CONNECTION_TIMEOUT = 5000; /** Default OCSP responder connection timeout */ private static final int DEFAULT_OCSP_RESPONDER_CONNECTION_TIMEOUT = 10000; /** Default OCSP Cache server host name prefix */ private static final String DEFAULT_OCSP_CACHE_HOST_PREFIX = "http://ocsp.snowflakecomputing."; /** Default domain for OCSP cache host */ private static final String DEFAULT_OCSP_CACHE_HOST_DOMAIN = "com"; /** OCSP response file cache directory */ private static final FileCacheManager fileCacheManager; /** Tolerable validity date range ratio. */ private static final float TOLERABLE_VALIDITY_RANGE_RATIO = 0.01f; /** Maximum clocktime skew (ms) */ private static final long MAX_CLOCK_SKEW_IN_MILLISECONDS = 900000L; /** Minimum cache warm up time (ms) */ private static final long MIN_CACHE_WARMUP_TIME_IN_MILLISECONDS = 18000000L; /** Initial sleeping time in retry (ms) */ private static final long INITIAL_SLEEPING_TIME_IN_MILLISECONDS = 1000L; /** Maximum sleeping time in retry (ms) */ private static final long MAX_SLEEPING_TIME_IN_MILLISECONDS = 16000L; /** Map from signature algorithm ASN1 object to the name. */ private static final Map SIGNATURE_OID_TO_STRING = new ConcurrentHashMap<>(); /** Map from OCSP response code to a string representation. */ private static final Map OCSP_RESPONSE_CODE_TO_STRING = new ConcurrentHashMap<>(); private static final Object ROOT_CA_LOCK = new Object(); /** OCSP Response cache */ private static final Map> OCSP_RESPONSE_CACHE = new ConcurrentHashMap<>(); /** Date and timestamp format */ private static final SimpleDateFormat DATE_FORMAT_UTC = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); /** OCSP Response Cache server Retry URL pattern */ static String SF_OCSP_RESPONSE_CACHE_SERVER_RETRY_URL_PATTERN; /** OCSP response cache server URL. */ static String SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE; private static JcaX509CertificateConverter CONVERTER_X509 = new JcaX509CertificateConverter(); /** RootCA cache */ private static Map ROOT_CA = new ConcurrentHashMap<>(); private static final AtomicBoolean WAS_CACHE_UPDATED = new AtomicBoolean(); private static final AtomicBoolean WAS_CACHE_READ = new AtomicBoolean(); /** OCSP HTTP client */ private static final Map ocspCacheServerClient = new ConcurrentHashMap<>(); /** OCSP event types */ public static String SF_OCSP_EVENT_TYPE_REVOKED_CERTIFICATE_ERROR = "RevokedCertificateError"; public static String SF_OCSP_EVENT_TYPE_VALIDATION_ERROR = "OCSPValidationError"; private final HttpClientSettingsKey proxySettingsKey; static { // init OCSP response cache file manager fileCacheManager = new FileCacheManagerBuilder() .setCacheDirectorySystemProperty(CACHE_DIR_PROP) .setCacheDirectoryEnvironmentVariable(CACHE_DIR_ENV) .setBaseCacheFileName(CACHE_FILE_NAME) .setCacheFileLockExpirationInSeconds(CACHE_FILE_LOCK_EXPIRATION_IN_SECONDS) .setOnlyOwnerPermissions(false) .build(); } static { SIGNATURE_OID_TO_STRING.put(SHA1RSA, "SHA1withRSA"); SIGNATURE_OID_TO_STRING.put(SHA256RSA, "SHA256withRSA"); SIGNATURE_OID_TO_STRING.put(SHA384RSA, "SHA384withRSA"); SIGNATURE_OID_TO_STRING.put(SHA512RSA, "SHA512withRSA"); } static { OCSP_RESPONSE_CODE_TO_STRING.put(OCSPResp.SUCCESSFUL, "successful"); OCSP_RESPONSE_CODE_TO_STRING.put(OCSPResp.MALFORMED_REQUEST, "malformedRequest"); OCSP_RESPONSE_CODE_TO_STRING.put(OCSPResp.INTERNAL_ERROR, "internalError"); OCSP_RESPONSE_CODE_TO_STRING.put(OCSPResp.TRY_LATER, "tryLater"); OCSP_RESPONSE_CODE_TO_STRING.put(OCSPResp.SIG_REQUIRED, "sigRequired"); OCSP_RESPONSE_CODE_TO_STRING.put(OCSPResp.UNAUTHORIZED, "unauthorized"); } static { DATE_FORMAT_UTC.setTimeZone(TimeZone.getTimeZone("UTC")); } /** The default JVM Trust manager. */ private final X509TrustManager trustManager; /** The default JVM Extended Trust Manager */ private final X509ExtendedTrustManager exTrustManager; OCSPCacheServer ocspCacheServer = new OCSPCacheServer(); /** OCSP mode */ private OCSPMode ocspMode; /** * Constructor with the cache file. If not specified, the default cachefile is used. * * @param key HttpClientSettingsKey * @param cacheFile cache file. */ SFTrustManager(HttpClientSettingsKey key, File cacheFile) { this.ocspMode = key.getOcspMode(); this.proxySettingsKey = key; this.trustManager = getTrustManager(TrustManagerFactory.getDefaultAlgorithm()); if (trustManager instanceof X509ExtendedTrustManager) { this.exTrustManager = (X509ExtendedTrustManager) trustManager; } else { logger.debug("Standard X509TrustManager is used instead of X509ExtendedTrustManager."); this.exTrustManager = null; } checkNewOCSPEndpointAvailability(); if (cacheFile != null) { fileCacheManager.overrideCacheFile(cacheFile); } if (!WAS_CACHE_READ.getAndSet(true)) { // read cache file once JsonNode res = fileCacheManager.readCacheFile(); readJsonStoreCache(res); } logger.debug( "Initializing trust manager with OCSP mode: {}, cache file: {}", ocspMode, cacheFile); } /** Deletes OCSP response cache file from disk. */ public static void deleteCache() { fileCacheManager.deleteCacheFile(); } public static void cleanTestSystemParameters() { System.clearProperty(SF_OCSP_RESPONSE_CACHE_SERVER_URL); System.clearProperty(SF_OCSP_RESPONSE_CACHE_SERVER_ENABLED); System.clearProperty(SF_OCSP_TEST_INJECT_VALIDITY_ERROR); System.clearProperty(SF_OCSP_TEST_INJECT_UNKNOWN_STATUS); System.clearProperty(SF_OCSP_TEST_RESPONDER_URL); System.clearProperty(SF_OCSP_TEST_OCSP_RESPONDER_TIMEOUT); System.clearProperty(SF_OCSP_TEST_OCSP_RESPONSE_CACHE_SERVER_TIMEOUT); System.clearProperty(SF_OCSP_TEST_INVALID_SIGNING_CERT); System.clearProperty(SF_OCSP_TEST_NO_OCSP_RESPONDER_URL); } /** * Reset OCSP Cache server URL * * @param ocspCacheServerUrl OCSP Cache server URL */ static void resetOCSPResponseCacherServerURL(String ocspCacheServerUrl) throws IOException { if (ocspCacheServerUrl == null || SF_OCSP_RESPONSE_CACHE_SERVER_RETRY_URL_PATTERN != null) { return; } SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE = ocspCacheServerUrl; if (!SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE.startsWith(DEFAULT_OCSP_CACHE_HOST_PREFIX)) { URL url = new URL(SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE); if (url.getPort() > 0) { SF_OCSP_RESPONSE_CACHE_SERVER_RETRY_URL_PATTERN = String.format( "%s://%s:%d/retry/%s", url.getProtocol(), url.getHost(), url.getPort(), "%s/%s"); } else { SF_OCSP_RESPONSE_CACHE_SERVER_RETRY_URL_PATTERN = String.format("%s://%s/retry/%s", url.getProtocol(), url.getHost(), "%s/%s"); } logger.debug( "Reset OCSP response cache server URL to: {}", SF_OCSP_RESPONSE_CACHE_SERVER_RETRY_URL_PATTERN); } } static void setOCSPResponseCacheServerURL(String serverURL) { String ocspCacheUrl = systemGetProperty(SF_OCSP_RESPONSE_CACHE_SERVER_URL); if (ocspCacheUrl != null) { SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE = ocspCacheUrl; } try { ocspCacheUrl = systemGetEnv(SF_OCSP_RESPONSE_CACHE_SERVER_URL); if (ocspCacheUrl != null) { SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE = ocspCacheUrl; } } catch (Throwable ex) { logger.debug( "Failed to get environment variable " + SF_OCSP_RESPONSE_CACHE_SERVER_URL + ". Ignored", true); } if (SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE == null) { String topLevelDomain = DEFAULT_OCSP_CACHE_HOST_DOMAIN; try { URL url = new URL(serverURL); int domainIndex = url.getHost().lastIndexOf(".") + 1; topLevelDomain = url.getHost().substring(domainIndex); } catch (Exception e) { logger.debug("Exception while setting top level domain (for OCSP)", e); } SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE = String.format("%s%s/%s", DEFAULT_OCSP_CACHE_HOST_PREFIX, topLevelDomain, CACHE_FILE_NAME); } logger.debug("Set OCSP response cache server to: {}", SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE); } private static boolean useOCSPResponseCacheServer() { String ocspCacheServerEnabled = systemGetProperty(SF_OCSP_RESPONSE_CACHE_SERVER_ENABLED); if (Boolean.FALSE.toString().equalsIgnoreCase(ocspCacheServerEnabled)) { logger.debug("No OCSP Response Cache Server is used.", false); return false; } try { ocspCacheServerEnabled = systemGetEnv(SF_OCSP_RESPONSE_CACHE_SERVER_ENABLED); if (Boolean.FALSE.toString().equalsIgnoreCase(ocspCacheServerEnabled)) { logger.debug("No OCSP Response Cache Server is used.", false); return false; } } catch (Throwable ex) { logger.debug( "Failed to get environment variable " + SF_OCSP_RESPONSE_CACHE_SERVER_ENABLED + ". Ignored", false); } return true; } /** * Convert cache key to base64 encoded cert id * * @param ocsp_cache_key Cache key to encode */ private static String encodeCacheKey(OcspResponseCacheKey ocsp_cache_key) { try { DigestCalculator digest = new SHA1DigestCalculator(); AlgorithmIdentifier algo = digest.getAlgorithmIdentifier(); ASN1OctetString nameHash = ASN1OctetString.getInstance(ocsp_cache_key.nameHash); ASN1OctetString keyHash = ASN1OctetString.getInstance(ocsp_cache_key.keyHash); ASN1Integer snumber = new ASN1Integer(ocsp_cache_key.serialNumber); CertID cid = new CertID(algo, nameHash, keyHash, snumber); return Base64.encodeBase64String(cid.toASN1Primitive().getEncoded()); } catch (Exception ex) { logger.debug("Failed to encode cache key to base64 encoded cert id", false); } return null; } /** * CertificateID to string * * @param certificateID CertificateID * @return a string representation of CertificateID */ private static String CertificateIDToString(CertificateID certificateID) { return String.format( "CertID. NameHash: %s, KeyHash: %s, Serial Number: %s", SnowflakeUtil.byteToHexString(certificateID.getIssuerNameHash()), SnowflakeUtil.byteToHexString(certificateID.getIssuerKeyHash()), MessageFormat.format("{0,number,#}", certificateID.getSerialNumber())); } /** * Decodes OCSP Response Cache key from JSON * * @param elem A JSON element * @return OcspResponseCacheKey object */ private static SFPair> decodeCacheFromJSON( Map.Entry elem) throws IOException { long currentTimeSecond = new Date().getTime() / 1000; byte[] certIdDer = Base64.decodeBase64(elem.getKey()); DLSequence rawCertId = (DLSequence) ASN1ObjectIdentifier.fromByteArray(certIdDer); ASN1Encodable[] rawCertIdArray = rawCertId.toArray(); byte[] issuerNameHashDer = ((DEROctetString) rawCertIdArray[1]).getEncoded(); byte[] issuerKeyHashDer = ((DEROctetString) rawCertIdArray[2]).getEncoded(); BigInteger serialNumber = ((ASN1Integer) rawCertIdArray[3]).getValue(); OcspResponseCacheKey k = new OcspResponseCacheKey(issuerNameHashDer, issuerKeyHashDer, serialNumber); JsonNode ocspRespBase64 = elem.getValue(); if (!ocspRespBase64.isArray() || ocspRespBase64.size() != 2) { logger.debug("Invalid cache file format. Ignored", false); return null; } long producedAt = ocspRespBase64.get(0).asLong(); String ocspResp = ocspRespBase64.get(1).asText(); if (currentTimeSecond - CACHE_EXPIRATION_IN_SECONDS <= producedAt) { // add cache return SFPair.of(k, SFPair.of(producedAt, ocspResp)); } else { // delete cache return SFPair.of(k, SFPair.of(producedAt, null)); } } /** * Encode OCSP Response Cache to JSON * * @return JSON object */ private static ObjectNode encodeCacheToJSON() { try { ObjectNode out = OBJECT_MAPPER.createObjectNode(); for (Map.Entry> elem : OCSP_RESPONSE_CACHE.entrySet()) { OcspResponseCacheKey key = elem.getKey(); SFPair value0 = elem.getValue(); long currentTimeSecond = value0.left; DigestCalculator digest = new SHA1DigestCalculator(); AlgorithmIdentifier algo = digest.getAlgorithmIdentifier(); ASN1OctetString nameHash = ASN1OctetString.getInstance(key.nameHash); ASN1OctetString keyHash = ASN1OctetString.getInstance(key.keyHash); ASN1Integer serialNumber = new ASN1Integer(key.serialNumber); CertID cid = new CertID(algo, nameHash, keyHash, serialNumber); ArrayNode vout = OBJECT_MAPPER.createArrayNode(); vout.add(currentTimeSecond); vout.add(value0.right); out.set(Base64.encodeBase64String(cid.toASN1Primitive().getEncoded()), vout); } return out; } catch (IOException ex) { logger.debug("Failed to encode ASN1 object.", false); } return null; } private static synchronized void readJsonStoreCache(JsonNode m) { if (m == null || !m.getNodeType().equals(JsonNodeType.OBJECT)) { logger.debug("Invalid cache file format.", false); return; } try { for (Iterator> itr = m.fields(); itr.hasNext(); ) { SFPair> ky = decodeCacheFromJSON(itr.next()); if (ky != null && ky.right != null && ky.right.right != null) { // valid range. cache the result in memory OCSP_RESPONSE_CACHE.put(ky.left, ky.right); WAS_CACHE_UPDATED.set(true); } else if (ky != null && OCSP_RESPONSE_CACHE.containsKey(ky.left)) { // delete it from the cache if no OCSP response is back. OCSP_RESPONSE_CACHE.remove(ky.left); WAS_CACHE_UPDATED.set(true); } } } catch (IOException ex) { logger.debug("Failed to decode the cache file", false); } } /** * Verifies the signature of the data * * @param cert a certificate for public key. * @param sig signature in a byte array. * @param data data in a byte array. * @param idf algorithm identifier object. * @throws CertificateException raises if the verification fails. */ private static void verifySignature( X509CertificateHolder cert, byte[] sig, byte[] data, AlgorithmIdentifier idf) throws CertificateException { try { String algorithm = SIGNATURE_OID_TO_STRING.get(idf.getAlgorithm()); if (algorithm == null) { throw new NoSuchAlgorithmException( String.format("Unsupported signature OID. OID: %s", idf)); } Signature signer = Signature.getInstance(algorithm); X509Certificate c = CONVERTER_X509.getCertificate(cert); signer.initVerify(c.getPublicKey()); signer.update(data); if (!signer.verify(sig)) { throw new CertificateEncodingException( String.format( "Failed to verify the signature. Potentially the " + "data was not generated by by the cert, %s", cert.getSubject())); } } catch (NoSuchAlgorithmException | InvalidKeyException | SignatureException ex) { throw new CertificateEncodingException("Failed to verify the signature.", ex); } } private static long maxLong(long v1, long v2) { return Math.max(v1, v2); } /** * Calculates the tolerable validity time beyond the next update. * *

Sometimes CA's OCSP response update is delayed beyond the clock skew as the update is not * populated to all OCSP servers for certain period. * * @param thisUpdate the last update * @param nextUpdate the next update * @return the tolerable validity beyond the next update. */ private static long calculateTolerableValidity(Date thisUpdate, Date nextUpdate) { return maxLong( (long) ((float) (nextUpdate.getTime() - thisUpdate.getTime()) * TOLERABLE_VALIDITY_RANGE_RATIO), MIN_CACHE_WARMUP_TIME_IN_MILLISECONDS); } /** * Checks the validity * * @param currentTime the current time * @param thisUpdate the last update timestamp * @param nextUpdate the next update timestamp * @return true if valid or false */ private static boolean isValidityRange(Date currentTime, Date thisUpdate, Date nextUpdate) { if (checkOCSPResponseValidityErrorParameter()) { return false; // test } long tolerableValidity = calculateTolerableValidity(thisUpdate, nextUpdate); return thisUpdate.getTime() - MAX_CLOCK_SKEW_IN_MILLISECONDS <= currentTime.getTime() && currentTime.getTime() <= nextUpdate.getTime() + tolerableValidity; } private static boolean checkOCSPResponseValidityErrorParameter() { String injectValidityError = systemGetProperty(SF_OCSP_TEST_INJECT_VALIDITY_ERROR); return Boolean.TRUE.toString().equalsIgnoreCase(injectValidityError); } /** * Is the test parameter enabled? * * @param key the test parameter * @return true if enabled otherwise false */ private boolean isEnabledSystemTestParameter(String key) { return Boolean.TRUE.toString().equalsIgnoreCase(systemGetProperty(key)); } /** fail open mode current state */ private boolean isOCSPFailOpen() { return ocspMode == OCSPMode.FAIL_OPEN; } private void checkNewOCSPEndpointAvailability() { String new_ocsp_ept; try { new_ocsp_ept = systemGetEnv("SF_OCSP_ACTIVATE_NEW_ENDPOINT"); } catch (Throwable ex) { logger.debug( "Could not get environment variable to check for New OCSP Endpoint Availability", false); new_ocsp_ept = systemGetProperty("net.snowflake.jdbc.ocsp_activate_new_endpoint"); } ocspCacheServer.new_endpoint_enabled = new_ocsp_ept != null; } /** * Get TrustManager for the algorithm. This is mainly used to get the JVM default trust manager * and cache all of the root CA. * * @param algorithm algorithm. * @return TrustManager object. */ private X509TrustManager getTrustManager(String algorithm) { try { TrustManagerFactory factory = TrustManagerFactory.getInstance(algorithm); factory.init((KeyStore) null); X509TrustManager ret = null; for (TrustManager tm : factory.getTrustManagers()) { // Multiple TrustManager may be attached. We just need X509 Trust // Manager here. if (tm instanceof X509TrustManager) { ret = (X509TrustManager) tm; break; } } if (ret == null) { return null; } synchronized (ROOT_CA_LOCK) { // cache root CA certificates for later use. if (ROOT_CA.isEmpty()) { for (X509Certificate cert : ret.getAcceptedIssuers()) { Certificate bcCert = Certificate.getInstance(cert.getEncoded()); ROOT_CA.put(bcCert.getSubject().hashCode(), bcCert); } } } return ret; } catch (NoSuchAlgorithmException | KeyStoreException | CertificateEncodingException ex) { throw new SSLInitializationException(ex.getMessage(), ex); } } @Override public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { // default behavior trustManager.checkClientTrusted(chain, authType); } @Override public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { trustManager.checkServerTrusted(chain, authType); } @Override public void checkClientTrusted(X509Certificate[] chain, String authType, java.net.Socket socket) throws CertificateException { if (exTrustManager != null) { exTrustManager.checkClientTrusted(chain, authType, socket); } else { trustManager.checkClientTrusted(chain, authType); } } @Override public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException { if (exTrustManager != null) { exTrustManager.checkClientTrusted(chain, authType, sslEngine); } else { trustManager.checkClientTrusted(chain, authType); } } @Override public void checkServerTrusted(X509Certificate[] chain, String authType, java.net.Socket socket) throws CertificateException { if (exTrustManager != null) { exTrustManager.checkServerTrusted(chain, authType, socket); } else { trustManager.checkServerTrusted(chain, authType); } String host = socket.getInetAddress().getHostName(); this.validateRevocationStatus(chain, host); } @Override public void checkServerTrusted(X509Certificate[] chain, String authType, SSLEngine sslEngine) throws CertificateException { if (exTrustManager != null) { exTrustManager.checkServerTrusted(chain, authType, sslEngine); } else { trustManager.checkServerTrusted(chain, authType); } this.validateRevocationStatus(chain, sslEngine.getPeerHost()); } @Override public X509Certificate[] getAcceptedIssuers() { return trustManager.getAcceptedIssuers(); } /** * Certificate Revocation checks * * @param chain chain of certificates attached. * @param peerHost Hostname of the server * @throws CertificateException if any certificate validation fails */ void validateRevocationStatus(X509Certificate[] chain, String peerHost) throws CertificateException { final List bcChain = convertToBouncyCastleCertificate(chain); final List> pairIssuerSubjectList = getPairIssuerSubject(bcChain); if (peerHost.startsWith("ocspssd")) { return; } if (ocspCacheServer.new_endpoint_enabled) { ocspCacheServer.resetOCSPResponseCacheServer(peerHost); } boolean isCached = isCached(pairIssuerSubjectList); if (useOCSPResponseCacheServer() && !isCached) { if (!ocspCacheServer.new_endpoint_enabled) { logger.debug( "Downloading OCSP response cache from the server. URL: {}", SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE); } else { logger.debug( "Downloading OCSP response cache from the server. URL: {}", ocspCacheServer.SF_OCSP_RESPONSE_CACHE_SERVER); } try { readOcspResponseCacheServer(); } catch (SFOCSPException ex) { logger.debug( "Error downloading OCSP Response from cache server : {}." + "OCSP Responses will be fetched directly from the CA OCSP" + "Responder ", ex.getMessage()); } // if the cache is downloaded from the server, it should be written // to the file cache at all times. } executeRevocationStatusChecks(pairIssuerSubjectList, peerHost); if (WAS_CACHE_UPDATED.getAndSet(false)) { JsonNode input = encodeCacheToJSON(); fileCacheManager.writeCacheFile(input); } } /** * Executes the revocation status checks for all chained certificates * * @param pairIssuerSubjectList a list of pair of issuer and subject certificates. * @throws CertificateException raises if any error occurs. */ private void executeRevocationStatusChecks( List> pairIssuerSubjectList, String peerHost) throws CertificateException { long currentTimeSecond = new Date().getTime() / 1000L; for (SFPair pairIssuerSubject : pairIssuerSubjectList) { executeOneRevocationStatusCheck(pairIssuerSubject, currentTimeSecond, peerHost); } } private String generateFailOpenLog(String logData) { return "OCSP responder didn't respond correctly. Assuming certificate is " + "not revoked. Details: " + logData; } /** * Executes a single revocation status check * * @param pairIssuerSubject a pair of issuer and subject certificate * @param currentTimeSecond the current timestamp * @throws CertificateException if certificate exception is raised. */ private void executeOneRevocationStatusCheck( SFPair pairIssuerSubject, long currentTimeSecond, String peerHost) throws CertificateException { OCSPReq req; OcspResponseCacheKey keyOcspResponse; try { req = createRequest(pairIssuerSubject); CertID cid = req.getRequestList()[0].getCertID().toASN1Primitive(); keyOcspResponse = new OcspResponseCacheKey( cid.getIssuerNameHash().getEncoded(), cid.getIssuerKeyHash().getEncoded(), cid.getSerialNumber().getValue()); } catch (IOException ex) { throw new CertificateException(ex.getMessage(), ex); } long sleepTime = INITIAL_SLEEPING_TIME_IN_MILLISECONDS; DecorrelatedJitterBackoff backoff = new DecorrelatedJitterBackoff(sleepTime, MAX_SLEEPING_TIME_IN_MILLISECONDS); CertificateException error; boolean success = false; String ocspLog; OCSPTelemetryData telemetryData = new OCSPTelemetryData(); telemetryData.setSfcPeerHost(peerHost); telemetryData.setCertId(encodeCacheKey(keyOcspResponse)); telemetryData.setCacheEnabled(useOCSPResponseCacheServer()); telemetryData.setOCSPMode(ocspMode); Throwable cause = null; try { final int maxRetryCounter = isOCSPFailOpen() ? 1 : 2; for (int retry = 0; retry < maxRetryCounter; ++retry) { try { SFPair value0 = OCSP_RESPONSE_CACHE.get(keyOcspResponse); OCSPResp ocspResp; try { try { if (value0 == null) { telemetryData.setCacheHit(false); ocspResp = fetchOcspResponse( pairIssuerSubject, req, encodeCacheKey(keyOcspResponse), peerHost, telemetryData); OCSP_RESPONSE_CACHE.put( keyOcspResponse, SFPair.of(currentTimeSecond, ocspResponseToB64(ocspResp))); WAS_CACHE_UPDATED.set(true); value0 = SFPair.of(currentTimeSecond, ocspResponseToB64(ocspResp)); } else { telemetryData.setCacheHit(true); } } catch (Throwable ex) { logger.debug( "Exception occurred while trying to fetch OCSP Response - {}", ex.getMessage()); throw new SFOCSPException( OCSPErrorCode.OCSP_RESPONSE_FETCH_FAILURE, "Exception occurred while trying to fetch OCSP Response", ex); } logger.debug( "Validating. {}", CertificateIDToString(req.getRequestList()[0].getCertID())); try { validateRevocationStatusMain(pairIssuerSubject, value0.right); success = true; break; } catch (SFOCSPException ex) { if (ex.getErrorCode() != OCSPErrorCode.REVOCATION_CHECK_FAILURE) { throw ex; } throw new CertificateException(ex.getMessage(), ex); } } catch (SFOCSPException ex) { if (ex.getErrorCode() == OCSPErrorCode.CERTIFICATE_STATUS_REVOKED) { throw ex; } else { throw new CertificateException(ex.getMessage(), ex); } } } catch (CertificateException ex) { WAS_CACHE_UPDATED.set(OCSP_RESPONSE_CACHE.remove(keyOcspResponse) != null); if (WAS_CACHE_UPDATED.get()) { logger.debug("Deleting the invalid OCSP cache.", false); } cause = ex; logger.debug( "Retrying {}/{} after sleeping {} ms", retry + 1, maxRetryCounter, sleepTime); try { if (retry + 1 < maxRetryCounter) { Thread.sleep(sleepTime); sleepTime = backoff.nextSleepTime(sleepTime); } } catch (InterruptedException ex0) { // nop } } } } catch (SFOCSPException ex) { // Revoked Certificate error = new CertificateException(ex); ocspLog = telemetryData.generateTelemetry(SF_OCSP_EVENT_TYPE_REVOKED_CERTIFICATE_ERROR, error); logger.error(ocspLog, false); throw error; } if (!success) { if (cause != null) // cause is set in the above catch block { error = new CertificateException( "Certificate Revocation check failed. Could not retrieve OCSP Response.", cause); logger.debug(cause.getMessage(), false); } else { error = new CertificateException( "Certificate Revocation check failed. Could not retrieve OCSP Response."); logger.debug(error.getMessage(), false); } ocspLog = telemetryData.generateTelemetry(SF_OCSP_EVENT_TYPE_VALIDATION_ERROR, error); if (isOCSPFailOpen()) { // Log includes fail-open warning. logger.debug(generateFailOpenLog(ocspLog), false); } else { // still not success, raise an error. logger.debug(ocspLog, false); throw error; } } } /** * Is OCSP Response cached? * * @param pairIssuerSubjectList a list of pair of issuer and subject certificates * @return true if all of OCSP response are cached else false */ private boolean isCached(List> pairIssuerSubjectList) { long currentTimeSecond = new Date().getTime() / 1000L; boolean isCached = true; try { for (SFPair pairIssuerSubject : pairIssuerSubjectList) { OCSPReq req = createRequest(pairIssuerSubject); CertificateID certificateId = req.getRequestList()[0].getCertID(); logger.debug(CertificateIDToString(certificateId), false); CertID cid = certificateId.toASN1Primitive(); OcspResponseCacheKey k = new OcspResponseCacheKey( cid.getIssuerNameHash().getEncoded(), cid.getIssuerKeyHash().getEncoded(), cid.getSerialNumber().getValue()); SFPair res = OCSP_RESPONSE_CACHE.get(k); if (res == null) { logger.debug("Not all OCSP responses for the certificate is in the cache.", false); isCached = false; break; } else if (currentTimeSecond - CACHE_EXPIRATION_IN_SECONDS > res.left) { logger.debug("Cache for CertID expired.", false); isCached = false; break; } else { try { validateRevocationStatusMain(pairIssuerSubject, res.right); } catch (SFOCSPException ex) { logger.debug( "Cache includes invalid OCSPResponse. " + "Will download the OCSP cache from Snowflake OCSP server", false); isCached = false; } } } } catch (IOException ex) { logger.debug("Failed to encode CertID.", false); } return isCached; } /** Reads the OCSP response cache from the server. */ private void readOcspResponseCacheServer() throws SFOCSPException { String ocspCacheServerInUse; if (ocspCacheServer.new_endpoint_enabled) { ocspCacheServerInUse = ocspCacheServer.SF_OCSP_RESPONSE_CACHE_SERVER; } else { ocspCacheServerInUse = SF_OCSP_RESPONSE_CACHE_SERVER_URL_VALUE; } CloseableHttpResponse response = null; CloseableHttpClient httpClient = ocspCacheServerClient.computeIfAbsent(proxySettingsKey, HttpUtil::getHttpClientForOcsp); try { URI uri = new URI(ocspCacheServerInUse); HttpGet get = new HttpGet(uri); response = httpClient.execute(get); if (response == null || response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { throw new IOException( String.format( "Failed to get the OCSP response from the OCSP " + "cache server: HTTP: %d", response != null ? response.getStatusLine().getStatusCode() : -1)); } ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copy(response.getEntity().getContent(), out); JsonNode m = OBJECT_MAPPER.readTree(out.toByteArray()); out.close(); readJsonStoreCache(m); logger.debug("Successfully downloaded OCSP cache from the server.", false); } catch (IOException ex) { logger.debug( "Failed to read the OCSP response cache from the server. " + "Server: {}, Err: {}", ocspCacheServerInUse, ex); } catch (URISyntaxException ex) { logger.debug("Indicate that a string could not be parsed as a URI reference.", false); throw new SFOCSPException( OCSPErrorCode.INVALID_CACHE_SERVER_URL, "Invalid OCSP Cache Server URL used", ex); } finally { IOUtils.closeQuietly(response); } } private int getOCSPCacheServerConnectionTimeout() { int timeout = DEFAULT_OCSP_CACHE_SERVER_CONNECTION_TIMEOUT; if (systemGetProperty(SF_OCSP_TEST_OCSP_RESPONSE_CACHE_SERVER_TIMEOUT) != null) { try { timeout = Integer.parseInt(systemGetProperty(SF_OCSP_TEST_OCSP_RESPONSE_CACHE_SERVER_TIMEOUT)); } catch (Exception ex) { // nop } } return timeout; } /** * Fetches OCSP response from OCSP server * * @param pairIssuerSubject a pair of issuer and subject certificates * @param req OCSP Request object * @return OCSP Response object * @throws CertificateEncodingException if any other error occurs */ private OCSPResp fetchOcspResponse( SFPair pairIssuerSubject, OCSPReq req, String cid_enc, String hname, OCSPTelemetryData telemetryData) throws CertificateEncodingException { CloseableHttpResponse response = null; try { byte[] ocspReqDer = req.getEncoded(); String ocspReqDerBase64 = Base64.encodeBase64String(ocspReqDer); Set ocspUrls = getOcspUrls(pairIssuerSubject.right); checkExistOCSPURL(ocspUrls); String ocspUrlStr = ocspUrls.iterator().next(); // first one ocspUrlStr = overrideOCSPURL(ocspUrlStr); telemetryData.setOcspUrl(ocspUrlStr); telemetryData.setOcspReq(ocspReqDerBase64); URL url; String path = ""; if (!ocspCacheServer.new_endpoint_enabled) { String urlEncodedOCSPReq = URLUtil.urlEncode(ocspReqDerBase64); if (SF_OCSP_RESPONSE_CACHE_SERVER_RETRY_URL_PATTERN != null) { URL ocspUrl = new URL(ocspUrlStr); if (!isNullOrEmpty(ocspUrl.getPath())) { path = ocspUrl.getPath(); } if (ocspUrl.getPort() > 0) { url = new URL( String.format( SF_OCSP_RESPONSE_CACHE_SERVER_RETRY_URL_PATTERN, ocspUrl.getHost() + ":" + ocspUrl.getPort() + path, urlEncodedOCSPReq)); } else { url = new URL( String.format( SF_OCSP_RESPONSE_CACHE_SERVER_RETRY_URL_PATTERN, ocspUrl.getHost() + path, urlEncodedOCSPReq)); } } else { url = new URL(String.format("%s/%s", ocspUrlStr, urlEncodedOCSPReq)); } logger.debug("Not hit cache. Fetching OCSP response from CA OCSP server. {}", url); } else { url = new URL(ocspCacheServer.SF_OCSP_RESPONSE_RETRY_URL); logger.debug( "Not hit cache. Fetching OCSP response from Snowflake OCSP Response Fetcher. {}", url); } long sleepTime = INITIAL_SLEEPING_TIME_IN_MILLISECONDS; DecorrelatedJitterBackoff backoff = new DecorrelatedJitterBackoff(sleepTime, MAX_SLEEPING_TIME_IN_MILLISECONDS); boolean success = false; final int maxRetryCounter = isOCSPFailOpen() ? 1 : 2; Exception savedEx = null; CloseableHttpClient httpClient = ocspCacheServerClient.computeIfAbsent(proxySettingsKey, HttpUtil::getHttpClientForOcsp); for (int retry = 0; retry < maxRetryCounter; ++retry) { try { if (!ocspCacheServer.new_endpoint_enabled) { HttpGet get = new HttpGet(url.toString()); response = httpClient.execute(get); } else { HttpPost post = new HttpPost(url.toString()); post.setHeader("Content-Type", "application/json"); OCSPPostReqData postReqData = new OCSPPostReqData(ocspUrlStr, ocspReqDerBase64, cid_enc, hname); String json_payload = OBJECT_MAPPER.writeValueAsString(postReqData); post.setEntity(new StringEntity(json_payload, "utf-8")); response = httpClient.execute(post); } success = response != null && response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; if (success) { break; } } catch (IOException ex) { logger.debug("Failed to reach out OCSP responder: {}", ex.getMessage()); savedEx = ex; } IOUtils.closeQuietly(response); logger.debug("Retrying {}/{} after sleeping {} ms", retry + 1, maxRetryCounter, sleepTime); try { if (retry + 1 < maxRetryCounter) { Thread.sleep(sleepTime); sleepTime = backoff.nextSleepTime(sleepTime); } } catch (InterruptedException ex0) { // nop } } if (!success) { throw new CertificateEncodingException( String.format( "Failed to get OCSP response. StatusCode: %d, URL: %s", response == null ? null : response.getStatusLine().getStatusCode(), ocspUrlStr), savedEx); } ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copy(response.getEntity().getContent(), out); OCSPResp ocspResp = new OCSPResp(out.toByteArray()); out.close(); if (ocspResp.getStatus() != OCSPResp.SUCCESSFUL) { throw new CertificateEncodingException( String.format( "Failed to get OCSP response. Status: %s", OCSP_RESPONSE_CODE_TO_STRING.get(ocspResp.getStatus()))); } return ocspResp; } catch (IOException ex) { throw new CertificateEncodingException("Failed to encode object.", ex); } finally { IOUtils.closeQuietly(response); } } private void checkExistOCSPURL(Set ocspUrls) throws CertificateEncodingException { if (ocspUrls.size() == 0 || isEnabledSystemTestParameter(SF_OCSP_TEST_NO_OCSP_RESPONDER_URL)) { throw new CertificateEncodingException( "No OCSP Responder URL is attached to the certificate.", new SFOCSPException( OCSPErrorCode.NO_OCSP_URL_ATTACHED, "No OCSP Responder URL is attached to the certificate.")); } } private String overrideOCSPURL(String ocspURL) { String ocspURLInput = systemGetProperty(SF_OCSP_TEST_RESPONDER_URL); if (ocspURLInput != null) { logger.debug("Overriding OCSP url to: {}", ocspURLInput); return ocspURLInput; } logger.debug("Overriding OCSP url to: {}", ocspURL); return ocspURL; } /** * Validates the certificate revocation status * * @param pairIssuerSubject a pair of issuer and subject certificates * @param ocspRespB64 Base64 encoded OCSP Response object * @throws SFOCSPException raises if any other error occurs */ // Package-private for testing (see SFTrustManagerOcspCachePoisoningTest). void validateRevocationStatusMain( SFPair pairIssuerSubject, String ocspRespB64) throws SFOCSPException { try { OCSPResp ocspResp = b64ToOCSPResp(ocspRespB64); if (ocspResp == null) { throw new SFOCSPException( OCSPErrorCode.INVALID_OCSP_RESPONSE, "OCSP response is null. The content is invalid."); } Date currentTime = new Date(); // getResponseObject() returns null for non-SUCCESSFUL OCSP responses (e.g. unauthorized(6)). // Surface as SFOCSPException so cache eviction and fail-open run instead of NPEing. BasicOCSPResp basicOcspResp = (BasicOCSPResp) (ocspResp.getResponseObject()); if (basicOcspResp == null) { throw new SFOCSPException( OCSPErrorCode.INVALID_OCSP_RESPONSE, "OCSP response body is null (non-SUCCESSFUL or malformed response). The content is invalid."); } X509CertificateHolder[] attachedCerts = basicOcspResp.getCerts(); X509CertificateHolder signVerifyCert; checkInvalidSigningCertTestParameter(); if (attachedCerts.length > 0) { logger.debug( "Certificate is attached for verification. " + "Verifying it by the issuer certificate.", false); signVerifyCert = attachedCerts[0]; if (currentTime.after(signVerifyCert.getNotAfter()) || currentTime.before(signVerifyCert.getNotBefore())) { throw new SFOCSPException( OCSPErrorCode.EXPIRED_OCSP_SIGNING_CERTIFICATE, String.format( "Cert attached to " + "OCSP Response is invalid." + "Current time - %s" + "Certificate not before time - %s" + "Certificate not after time - %s", currentTime, signVerifyCert.getNotBefore(), signVerifyCert.getNotAfter())); } try { verifySignature( new X509CertificateHolder(pairIssuerSubject.left.getEncoded()), signVerifyCert.getSignature(), CONVERTER_X509.getCertificate(signVerifyCert).getTBSCertificate(), signVerifyCert.getSignatureAlgorithm()); } catch (CertificateException ex) { logger.debug("OCSP Signing Certificate signature verification failed", false); throw new SFOCSPException( OCSPErrorCode.INVALID_CERTIFICATE_SIGNATURE, "OCSP Signing Certificate signature verification failed", ex); } logger.debug("Verifying OCSP signature by the attached certificate public key.", false); } else { logger.debug( "Certificate is NOT attached for verification. " + "Verifying OCSP signature by the issuer public key.", false); signVerifyCert = new X509CertificateHolder(pairIssuerSubject.left.getEncoded()); } try { verifySignature( signVerifyCert, basicOcspResp.getSignature(), basicOcspResp.getTBSResponseData(), basicOcspResp.getSignatureAlgorithmID()); } catch (CertificateException ex) { logger.debug("OCSP signature verification failed", false); throw new SFOCSPException( OCSPErrorCode.INVALID_OCSP_RESPONSE_SIGNATURE, "OCSP signature verification failed", ex); } validateBasicOcspResponse(currentTime, basicOcspResp); } catch (IOException | OCSPException ex) { throw new SFOCSPException( OCSPErrorCode.REVOCATION_CHECK_FAILURE, "Failed to check revocation status.", ex); } catch (RuntimeException ex) { // Convert unexpected unchecked failures so the caller can evict the bad cache entry and // fail open instead of letting an unchecked exception escape into the SSL handshake. throw new SFOCSPException( OCSPErrorCode.REVOCATION_CHECK_FAILURE, "Failed to check revocation status due to an unexpected error.", ex); } } private void checkInvalidSigningCertTestParameter() throws SFOCSPException { if (isEnabledSystemTestParameter(SF_OCSP_TEST_INVALID_SIGNING_CERT)) { throw new SFOCSPException( OCSPErrorCode.EXPIRED_OCSP_SIGNING_CERTIFICATE, "Cert attached to OCSP Response is invalid"); } } /** * Validates OCSP Basic OCSP response. * * @param currentTime the current timestamp. * @param basicOcspResp BasicOcspResponse data. * @throws SFOCSPException raises if any failure occurs. */ private void validateBasicOcspResponse(Date currentTime, BasicOCSPResp basicOcspResp) throws SFOCSPException { for (SingleResp singleResps : basicOcspResp.getResponses()) { checkCertUnknownTestParameter(); CertificateStatus certStatus = singleResps.getCertStatus(); if (certStatus != CertificateStatus.GOOD) { if (certStatus instanceof RevokedStatus) { RevokedStatus status = (RevokedStatus) certStatus; int reason; try { reason = status.getRevocationReason(); } catch (IllegalStateException ex) { reason = -1; } Date revocationTime = status.getRevocationTime(); throw new SFOCSPException( OCSPErrorCode.CERTIFICATE_STATUS_REVOKED, String.format( "The certificate has been revoked. Reason: %d, Time: %s", reason, DATE_FORMAT_UTC.format(revocationTime))); } else { // Unknown status throw new SFOCSPException( OCSPErrorCode.CERTIFICATE_STATUS_UNKNOWN, "Failed to validate the certificate for UNKNOWN reason."); } } Date thisUpdate = singleResps.getThisUpdate(); Date nextUpdate = singleResps.getNextUpdate(); logger.debug( "Current Time: {}, This Update: {}, Next Update: {}", currentTime, thisUpdate, nextUpdate); if (!isValidityRange(currentTime, thisUpdate, nextUpdate)) { throw new SFOCSPException( OCSPErrorCode.INVALID_OCSP_RESPONSE_VALIDITY, String.format( "The OCSP response validity is out of range: " + "Current Time: %s, This Update: %s, Next Update: %s", DATE_FORMAT_UTC.format(currentTime), DATE_FORMAT_UTC.format(thisUpdate), DATE_FORMAT_UTC.format(nextUpdate))); } } logger.debug("OK. Verified the certificate revocation status.", false); } private void checkCertUnknownTestParameter() throws SFOCSPException { if (isEnabledSystemTestParameter(SF_OCSP_TEST_INJECT_UNKNOWN_STATUS)) { throw new SFOCSPException( OCSPErrorCode.CERTIFICATE_STATUS_UNKNOWN, "Failed to validate the certificate for UNKNOWN reason."); } } /** * Creates a OCSP Request * * @param pairIssuerSubject a pair of issuer and subject certificates * @return OCSPReq object */ private OCSPReq createRequest(SFPair pairIssuerSubject) throws IOException { Certificate issuer = pairIssuerSubject.left; Certificate subject = pairIssuerSubject.right; OCSPReqBuilder gen = new OCSPReqBuilder(); try { DigestCalculator digest = new SHA1DigestCalculator(); X509CertificateHolder certHolder = new X509CertificateHolder(issuer.getEncoded()); CertificateID certId = new CertificateID(digest, certHolder, subject.getSerialNumber().getValue()); gen.addRequest(certId); return gen.build(); } catch (OCSPException ex) { throw new IOException("Failed to build a OCSPReq.", ex); } } /** * Converts X509Certificate to Bouncy Castle Certificate * * @param chain an array of X509Certificate * @return a list of Bouncy Castle Certificate */ private List convertToBouncyCastleCertificate(X509Certificate[] chain) throws CertificateEncodingException { final List bcChain = new ArrayList<>(); for (X509Certificate cert : chain) { bcChain.add(Certificate.getInstance(cert.getEncoded())); } return bcChain; } /** * Creates a pair of Issuer and Subject certificates * * @param bcChain a list of bouncy castle Certificate * @return a list of pair of Issuer and Subject certificates */ private List> getPairIssuerSubject(List bcChain) throws CertificateException { List> pairIssuerSubject = new ArrayList<>(); for (int i = 0, len = bcChain.size(); i < len; ++i) { Certificate bcCert = bcChain.get(i); if (bcCert.getIssuer().equals(bcCert.getSubject())) { continue; // skipping ROOT CA } if (i < len - 1) { // Check if the root certificate has been found and stop going down the chain. Certificate issuer = ROOT_CA.get(bcCert.getIssuer().hashCode()); if (issuer != null) { logger.debug( "A trusted root certificate found: %s, stopping chain traversal here", bcCert.getIssuer().toString()); pairIssuerSubject.add(SFPair.of(issuer, bcChain.get(i))); break; } pairIssuerSubject.add(SFPair.of(bcChain.get(i + 1), bcChain.get(i))); } else { // no root CA certificate is attached in the certificate chain, so // getting one from the root CA from JVM. Certificate issuer = ROOT_CA.get(bcCert.getIssuer().hashCode()); if (issuer == null) { throw new CertificateException( "Failed to find the root CA.", new SFOCSPException(OCSPErrorCode.NO_ROOTCA_FOUND, "Failed to find the root CA.")); } pairIssuerSubject.add(SFPair.of(issuer, bcChain.get(i))); } } return pairIssuerSubject; } /** * Gets OCSP URLs associated with the certificate. * * @param bcCert Bouncy Castle Certificate * @return a set of OCSP URLs */ private Set getOcspUrls(Certificate bcCert) throws IOException { TBSCertificate bcTbsCert = bcCert.getTBSCertificate(); Extensions bcExts = bcTbsCert.getExtensions(); if (bcExts == null) { throw new IOException("Failed to get Tbs Certificate."); } Set ocsp = new HashSet<>(); for (Enumeration en = bcExts.oids(); en.hasMoreElements(); ) { ASN1ObjectIdentifier oid = (ASN1ObjectIdentifier) en.nextElement(); Extension bcExt = bcExts.getExtension(oid); if (Extension.authorityInfoAccess.equals(bcExt.getExtnId())) { // OCSP URLS are included in authorityInfoAccess DLSequence seq = (DLSequence) bcExt.getParsedValue(); for (ASN1Encodable asn : seq) { ASN1Encodable[] pairOfAsn = ((DLSequence) asn).toArray(); if (pairOfAsn.length == 2) { ASN1ObjectIdentifier key = (ASN1ObjectIdentifier) pairOfAsn[0]; if (OIDocsp.equals(key)) { // ensure OCSP and not CRL GeneralName gn = GeneralName.getInstance(pairOfAsn[1]); ocsp.add(gn.getName().toString()); } } } } } return ocsp; } /** OCSP Response Utils */ private String ocspResponseToB64(OCSPResp ocspResp) { if (ocspResp == null) { return null; } try { return Base64.encodeBase64String(ocspResp.getEncoded()); } catch (Throwable ex) { logger.debug("Could not convert OCSP Response to Base64", false); return null; } } private OCSPResp b64ToOCSPResp(String ocspRespB64) { try { return new OCSPResp(Base64.decodeBase64(ocspRespB64)); } catch (Throwable ex) { logger.debug("Could not cover OCSP Response from Base64 to OCSPResp object", false); return null; } } static class OCSPCacheServer { String SF_OCSP_RESPONSE_CACHE_SERVER; String SF_OCSP_RESPONSE_RETRY_URL; boolean new_endpoint_enabled; void resetOCSPResponseCacheServer(String host) { String ocspCacheServerUrl; if (host.toLowerCase().contains(".global.snowflakecomputing.")) { ocspCacheServerUrl = String.format("https://ocspssd%s/%s", host.substring(host.indexOf('-')), "ocsp"); } else if (host.toLowerCase().contains(".snowflakecomputing.")) { ocspCacheServerUrl = String.format("https://ocspssd%s/%s", host.substring(host.indexOf('.')), "ocsp"); } else { String topLevelDomain = host.substring(host.lastIndexOf(".") + 1); ocspCacheServerUrl = String.format("https://ocspssd.snowflakecomputing.%s/ocsp", topLevelDomain); } SF_OCSP_RESPONSE_CACHE_SERVER = String.format("%s/%s", ocspCacheServerUrl, "fetch"); SF_OCSP_RESPONSE_RETRY_URL = String.format("%s/%s", ocspCacheServerUrl, "retry"); } } private static class OCSPPostReqData { private String ocsp_url; private String ocsp_req; private String cert_id_enc; private String hostname; OCSPPostReqData(String ocsp_url, String ocsp_req, String cert_id_enc, String hname) { this.ocsp_url = ocsp_url; this.ocsp_req = ocsp_req; this.cert_id_enc = cert_id_enc; this.hostname = hname; } } /** OCSP response cache key object */ static class OcspResponseCacheKey { final byte[] nameHash; final byte[] keyHash; final BigInteger serialNumber; OcspResponseCacheKey(byte[] nameHash, byte[] keyHash, BigInteger serialNumber) { this.nameHash = nameHash; this.keyHash = keyHash; this.serialNumber = serialNumber; } public int hashCode() { int ret = Arrays.hashCode(this.nameHash) * 37; ret = ret * 10 + Arrays.hashCode(this.keyHash) * 37; ret = ret * 10 + this.serialNumber.hashCode(); return ret; } public boolean equals(Object obj) { if (!(obj instanceof OcspResponseCacheKey)) { return false; } OcspResponseCacheKey target = (OcspResponseCacheKey) obj; return Arrays.equals(this.nameHash, target.nameHash) && Arrays.equals(this.keyHash, target.keyHash) && this.serialNumber.equals(target.serialNumber); } public String toString() { return String.format( "OcspResponseCacheKey: NameHash: %s, KeyHash: %s, SerialNumber: %s", SnowflakeUtil.byteToHexString(nameHash), SnowflakeUtil.byteToHexString(keyHash), serialNumber.toString()); } } /** SHA1 Digest Calculator used in OCSP Req. */ static class SHA1DigestCalculator implements DigestCalculator { private ByteArrayOutputStream bOut = new ByteArrayOutputStream(); public AlgorithmIdentifier getAlgorithmIdentifier() { return new AlgorithmIdentifier(OIWObjectIdentifiers.idSHA1); } public OutputStream getOutputStream() { return bOut; } public byte[] getDigest() { byte[] bytes = bOut.toByteArray(); bOut.reset(); try { MessageDigest messageDigest = MessageDigest.getInstance(ALGORITHM_SHA1_NAME); return messageDigest.digest(bytes); } catch (NoSuchAlgorithmException ex) { String errMsg = String.format( "Failed to instantiate the algorithm: %s. err=%s", ALGORITHM_SHA1_NAME, ex.getMessage()); logger.error(errMsg, false); throw new RuntimeException(errMsg); } } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SdkProxyRoutePlanner.java ================================================ package net.snowflake.client.internal.core; import java.util.Arrays; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.http.HttpException; import org.apache.http.HttpHost; import org.apache.http.HttpRequest; import org.apache.http.impl.conn.DefaultRoutePlanner; import org.apache.http.impl.conn.DefaultSchemePortResolver; import org.apache.http.protocol.HttpContext; /** * SdkProxyRoutePlanner delegates a Proxy Route Planner from the settings instead of the system * properties. It will use the proxy created from proxyHost, proxyPort, and proxyProtocol and filter * the hosts who matches nonProxyHosts pattern. */ public class SdkProxyRoutePlanner extends DefaultRoutePlanner { private final HttpHost proxy; private final String[] hostPatterns; public SdkProxyRoutePlanner( String proxyHost, int proxyPort, HttpProtocol proxyProtocol, String nonProxyHosts) { super(DefaultSchemePortResolver.INSTANCE); proxy = new HttpHost(proxyHost, proxyPort, proxyProtocol.toString()); if (!SnowflakeUtil.isNullOrEmpty(nonProxyHosts)) { hostPatterns = nonProxyHosts.split("\\|"); } else { hostPatterns = null; } } private boolean doesTargetMatchNonProxyHosts(HttpHost target) { if (hostPatterns == null) { return false; } String targetHost = target.getHostName().toLowerCase(); return Arrays.stream(hostPatterns) .anyMatch(pattern -> SnowflakeUtil.hostnameMatchesGlob(targetHost, pattern)); } @Override protected HttpHost determineProxy( final HttpHost target, final HttpRequest request, final HttpContext context) throws HttpException { return doesTargetMatchNonProxyHosts(target) ? null : proxy; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SecureStorageAppleManager.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import com.sun.jna.Library; import com.sun.jna.Native; import com.sun.jna.Pointer; import java.nio.charset.StandardCharsets; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class SecureStorageAppleManager implements SecureStorageManager { private static final SFLogger logger = SFLoggerFactory.getLogger(SecureStorageAppleManager.class); private final SecurityLib securityLib; private SecureStorageAppleManager() { securityLib = SecurityLibManager.getInstance(); } public static SecureStorageAppleManager builder() { logger.debug("Using Apple Keychain as a token cache storage"); return new SecureStorageAppleManager(); } public SecureStorageStatus setCredential(String host, String user, String type, String cred) { if (isNullOrEmpty(cred)) { logger.debug("No credential provided", false); return SecureStorageStatus.SUCCESS; } String target = SecureStorageManager.buildCredentialsKey(host, user, type); byte[] targetBytes = target.getBytes(StandardCharsets.UTF_8); byte[] userBytes = user.toUpperCase().getBytes(StandardCharsets.UTF_8); byte[] credBytes = cred.getBytes(StandardCharsets.UTF_8); Pointer[] itemRef = new Pointer[1]; int errCode = 0; synchronized (securityLib) { errCode = securityLib.SecKeychainFindGenericPassword( null, targetBytes.length, targetBytes, userBytes.length, userBytes, null, null, itemRef); } if (errCode != SecurityLib.ERR_SEC_SUCCESS && errCode != SecurityLib.ERR_SEC_ITEM_NOT_FOUND) { logger.warn( String.format( "Failed to check the existence of the item in keychain. Error code = %d", Native.getLastError())); return SecureStorageStatus.FAILURE; } if (itemRef[0] != null) { synchronized (securityLib) { errCode = securityLib.SecKeychainItemModifyContent(itemRef[0], null, credBytes.length, credBytes); } } else { synchronized (securityLib) { errCode = securityLib.SecKeychainAddGenericPassword( Pointer.NULL, targetBytes.length, targetBytes, userBytes.length, userBytes, credBytes.length, credBytes, null); } } if (errCode != SecurityLib.ERR_SEC_SUCCESS) { logger.warn( String.format( "Failed to set/modify the item in keychain. Error code = %d", Native.getLastError())); return SecureStorageStatus.FAILURE; } logger.debug("Set the item in keychain successfully"); return SecureStorageStatus.SUCCESS; } public String getCredential(String host, String user, String type) { String target = SecureStorageManager.buildCredentialsKey(host, user, type); byte[] targetBytes = target.getBytes(StandardCharsets.UTF_8); byte[] userBytes = user.toUpperCase().getBytes(StandardCharsets.UTF_8); int[] dataLength = new int[1]; Pointer[] data = new Pointer[1]; try { int errCode = 0; synchronized (securityLib) { errCode = securityLib.SecKeychainFindGenericPassword( null, targetBytes.length, targetBytes, userBytes.length, userBytes, dataLength, data, null); } if (errCode != SecurityLib.ERR_SEC_SUCCESS) { logger.warn( String.format( "Failed to find the item in keychain or item not exists. Error code = %d", Native.getLastError())); return null; } if (dataLength[0] == 0 || data[0] == null) { logger.warn("Found empty item or no item is found", false); return null; } byte[] credBytes = data[0].getByteArray(0, dataLength[0]); String res = new String(credBytes, StandardCharsets.UTF_8); logger.debug("Successfully read the credential. Will return it as String now"); return res; } finally { if (data[0] != null) { synchronized (securityLib) { securityLib.SecKeychainItemFreeContent(null, data[0]); } } } } public SecureStorageStatus deleteCredential(String host, String user, String type) { String target = SecureStorageManager.buildCredentialsKey(host, user, type); byte[] targetBytes = target.getBytes(StandardCharsets.UTF_8); byte[] userBytes = user.toUpperCase().getBytes(StandardCharsets.UTF_8); Pointer[] itemRef = new Pointer[1]; int errCode = 0; synchronized (securityLib) { errCode = securityLib.SecKeychainFindGenericPassword( null, targetBytes.length, targetBytes, userBytes.length, userBytes, null, null, itemRef); } if (errCode != SecurityLib.ERR_SEC_SUCCESS && errCode != SecurityLib.ERR_SEC_ITEM_NOT_FOUND) { logger.warn( String.format( "Failed to delete the item in keychain. Error code = %d", Native.getLastError())); return SecureStorageStatus.FAILURE; } if (itemRef[0] != null) { synchronized (securityLib) { errCode = securityLib.SecKeychainItemDelete(itemRef[0]); } if (errCode != SecurityLib.ERR_SEC_SUCCESS) { logger.warn( String.format( "Failed to delete the item in keychain. Error code = %d", Native.getLastError())); return SecureStorageStatus.FAILURE; } } return SecureStorageStatus.SUCCESS; } static class SecurityLibManager { private static SecurityLib INSTANCE = null; private static class ResourceHolder { private static final SecurityLib INSTANCE = (SecurityLib) Native.loadLibrary("Security", SecurityLib.class); } public static SecurityLib getInstance() { if (INSTANCE == null) { INSTANCE = ResourceHolder.INSTANCE; } return INSTANCE; } /** This function is used only for unit test */ public static void setInstance(SecurityLib instance) { INSTANCE = instance; } /** This function is a helper function for testing */ public static void resetInstance() { if (Constants.getOS() == Constants.OS.MAC) { INSTANCE = ResourceHolder.INSTANCE; } } } /** the java mapping of OS X Security Library */ interface SecurityLib extends Library { // SecurityLib INSTANCE = (SecurityLib) Native.loadLibrary("Security", SecurityLib.class); int ERR_SEC_SUCCESS = 0; int ERR_SEC_ITEM_NOT_FOUND = -25300; /** * https://developer.apple.com/documentation/security/1397301-seckeychainfindgenericpassword * *

func SecKeychainFindGenericPassword(_ keychainOrArray: CFTypeRef?, _ serviceNameLength: * UInt32, _ serviceName: UnsafePointer?, const char* _ accountNameLength: UInt32, _ * accountName: UnsafePointer?, const char* _ passwordLength: * UnsafeMutablePointer?, UInt32* _ passwordData: * UnsafeMutablePointer?, void** _ itemRef: * UnsafeMutablePointer?), SecKeychainItemRef* -> OSStatus */ public int SecKeychainFindGenericPassword( Pointer keychainOrArray, int serviceNameLength, byte[] serviceName, int accountNameLength, byte[] accountName, int[] passwordLength, Pointer[] passwordData, Pointer[] itemRef); /** * func SecKeychainAddGenericPassword(_ keychain: SecKeychain?, SecKeychainRef _ * serviceNameLength: UInt32, _ serviceName: UnsafePointer?, const char* _ * accountNameLength: UInt32, _ accountName: UnsafePointer?, const char* _ passwordLength: * UInt32, _ passwordData: UnsafeRawPointer, const void* _ itemRef: * UnsafeMutablePointer?) SecKeychainItemRef* -> OSStatus */ public int SecKeychainAddGenericPassword( Pointer keychain, int serviceNameLength, byte[] serviceName, int accountNameLength, byte[] accountName, int passwordLength, byte[] passwordData, Pointer[] itemRef); /** * OSStatus SecKeychainItemModifyContent(SecKeychainItemRef itemRef, const * SecKeychainAttributeList *attrList, UInt32 length, const void *data); */ public int SecKeychainItemModifyContent( Pointer itemRef, Pointer attrList, int length, byte[] data); /** OSStatus SecKeychainItemDelete(SecKeychainItemRef itemRef); */ public int SecKeychainItemDelete(Pointer itemRef); /** OSStatus SecKeychainItemFreeContent(SecKeychainAttributeList *attrList, void *data); */ public int SecKeychainItemFreeContent(Pointer[] attrList, Pointer data); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SecureStorageLinuxManager.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.config.SFConnectionConfigParser.SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION; import static net.snowflake.client.internal.core.StmtUtil.mapper; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.convertSystemGetEnvToBooleanValue; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.JsonNodeType; import com.fasterxml.jackson.databind.node.ObjectNode; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Linux currently doesn't have a local secure storage like Keychain/Credential Manager in * Mac/Windows. This class just wraps the local file cache logic to keep Linux platform api * consistent Mac/Windows platform. */ public class SecureStorageLinuxManager implements SecureStorageManager { private static final SFLogger logger = SFLoggerFactory.getLogger(SecureStorageLinuxManager.class); private static final String CACHE_FILE_NAME = "credential_cache_v1.json"; private static final String CACHE_DIR_PROP = "net.snowflake.jdbc.temporaryCredentialCacheDir"; private static final String CACHE_DIR_ENV = "SF_TEMPORARY_CREDENTIAL_CACHE_DIR"; private static final String CACHE_FILE_TOKENS_OBJECT_NAME = "tokens"; private static final long CACHE_FILE_LOCK_EXPIRATION_IN_SECONDS = 60L; private final FileCacheManager fileCacheManager; private SecureStorageLinuxManager() { boolean shouldSkipTokenFilePermissionsVerification = convertSystemGetEnvToBooleanValue(SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION, false); if (shouldSkipTokenFilePermissionsVerification) { logger.debug( "Skip credential cache file permissions verification because {} is enabled", SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION); } fileCacheManager = new FileCacheManagerBuilder() .setCacheDirectorySystemProperty(CACHE_DIR_PROP) .setCacheDirectoryEnvironmentVariable(CACHE_DIR_ENV) .setBaseCacheFileName(CACHE_FILE_NAME) .setOnlyOwnerPermissions(!shouldSkipTokenFilePermissionsVerification) .setCacheFileLockExpirationInSeconds(CACHE_FILE_LOCK_EXPIRATION_IN_SECONDS) .build(); } private static class SecureStorageLinuxManagerHolder { private static final SecureStorageLinuxManager INSTANCE = new SecureStorageLinuxManager(); } public static SecureStorageLinuxManager getInstance() { return SecureStorageLinuxManagerHolder.INSTANCE; } @Override public synchronized SecureStorageStatus setCredential( String host, String user, String type, String token) { if (isNullOrEmpty(token)) { logger.warn("No token provided", false); return SecureStorageStatus.SUCCESS; } fileCacheManager.withLock( () -> { Map> cachedCredentials = readJsonStoreCache(fileCacheManager.readCacheFile()); cachedCredentials.computeIfAbsent( CACHE_FILE_TOKENS_OBJECT_NAME, tokensMap -> new HashMap<>()); Map credentialsMap = cachedCredentials.get(CACHE_FILE_TOKENS_OBJECT_NAME); credentialsMap.put(SecureStorageManager.buildCredentialsKey(host, user, type), token); fileCacheManager.writeCacheFile( SecureStorageLinuxManager.this.localCacheToJson(cachedCredentials)); return null; }); return SecureStorageStatus.SUCCESS; } @Override public synchronized String getCredential(String host, String user, String type) { return fileCacheManager.withLock( () -> { JsonNode res = fileCacheManager.readCacheFile(); Map> cache = readJsonStoreCache(res); Map credentialsMap = cache.get(CACHE_FILE_TOKENS_OBJECT_NAME); if (credentialsMap == null) { return null; } return credentialsMap.get(SecureStorageManager.buildCredentialsKey(host, user, type)); }); } @Override public synchronized SecureStorageStatus deleteCredential(String host, String user, String type) { fileCacheManager.withLock( () -> { JsonNode res = fileCacheManager.readCacheFile(); Map> cache = readJsonStoreCache(res); Map credentialsMap = cache.get(CACHE_FILE_TOKENS_OBJECT_NAME); if (credentialsMap != null) { credentialsMap.remove(SecureStorageManager.buildCredentialsKey(host, user, type)); if (credentialsMap.isEmpty()) { cache.remove(CACHE_FILE_TOKENS_OBJECT_NAME); } } fileCacheManager.writeCacheFile(localCacheToJson(cache)); return null; }); return SecureStorageStatus.SUCCESS; } private ObjectNode localCacheToJson(Map> cache) { ObjectNode jsonNode = mapper.createObjectNode(); Map tokensMap = cache.get(CACHE_FILE_TOKENS_OBJECT_NAME); if (tokensMap != null) { ObjectNode tokensNode = mapper.createObjectNode(); for (Map.Entry credential : tokensMap.entrySet()) { tokensNode.put(credential.getKey(), credential.getValue()); } jsonNode.set(CACHE_FILE_TOKENS_OBJECT_NAME, tokensNode); } return jsonNode; } private Map> readJsonStoreCache(JsonNode node) { Map> cache = new HashMap<>(); if (node == null || !node.getNodeType().equals(JsonNodeType.OBJECT)) { logger.debug("Invalid cache file format."); return cache; } cache.put(CACHE_FILE_TOKENS_OBJECT_NAME, new HashMap<>()); JsonNode credentialsNode = node.get(CACHE_FILE_TOKENS_OBJECT_NAME); Map credentialsCache = cache.get(CACHE_FILE_TOKENS_OBJECT_NAME); if (credentialsNode != null && node.getNodeType().equals(JsonNodeType.OBJECT)) { for (Iterator> itr = credentialsNode.fields(); itr.hasNext(); ) { Map.Entry credential = itr.next(); credentialsCache.put(credential.getKey(), credential.getValue().asText()); } } return cache; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SecureStorageManager.java ================================================ package net.snowflake.client.internal.core; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; /** * Interface for accessing Platform specific Local Secure Storage E.g. keychain on Mac credential * manager on Windows */ interface SecureStorageManager { int COLON_CHAR_LENGTH = 1; SecureStorageStatus setCredential(String host, String user, String type, String token); String getCredential(String host, String user, String type); SecureStorageStatus deleteCredential(String host, String user, String type); static String buildCredentialsKey(String host, String user, String type) { StringBuilder target = new StringBuilder(host.length() + user.length() + type.length() + 3 * COLON_CHAR_LENGTH); target.append(host.toUpperCase()); target.append(":"); target.append(user.toUpperCase()); target.append(":"); target.append(type.toUpperCase()); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); byte[] hash = md.digest(target.toString().getBytes()); return SnowflakeUtil.byteToHexString(hash); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } enum SecureStorageStatus { NOT_FOUND, FAILURE, SUCCESS, UNSUPPORTED } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SecureStorageWindowsManager.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import com.sun.jna.Memory; import com.sun.jna.Native; import com.sun.jna.Pointer; import com.sun.jna.Structure; import com.sun.jna.WString; import com.sun.jna.platform.win32.WinBase.FILETIME; import com.sun.jna.ptr.PointerByReference; import com.sun.jna.win32.StdCallLibrary; import com.sun.jna.win32.W32APIOptions; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class SecureStorageWindowsManager implements SecureStorageManager { private static final SFLogger logger = SFLoggerFactory.getLogger(SecureStorageWindowsManager.class); private final Advapi32Lib advapi32Lib; private SecureStorageWindowsManager() { advapi32Lib = Advapi32LibManager.getInstance(); } public static SecureStorageWindowsManager builder() { logger.debug("Using Windows Credential Manager as a token cache storage"); return new SecureStorageWindowsManager(); } public SecureStorageStatus setCredential(String host, String user, String type, String token) { if (isNullOrEmpty(token)) { logger.warn("No token provided", false); return SecureStorageStatus.SUCCESS; } byte[] credBlob = token.getBytes(StandardCharsets.UTF_16LE); Memory credBlobMem = new Memory(credBlob.length); credBlobMem.write(0, credBlob, 0, credBlob.length); String target = SecureStorageManager.buildCredentialsKey(host, user, type); SecureStorageWindowsCredential cred = new SecureStorageWindowsCredential(); cred.Type = SecureStorageWindowsCredentialType.CRED_TYPE_GENERIC.getType(); cred.TargetName = new WString(target); cred.CredentialBlobSize = (int) credBlobMem.size(); cred.CredentialBlob = credBlobMem; cred.Persist = SecureStorageWindowsCredentialPersistType.CRED_PERSIST_LOCAL_MACHINE.getType(); cred.UserName = new WString(user.toUpperCase()); boolean ret = false; synchronized (advapi32Lib) { ret = advapi32Lib.CredWriteW(cred, 0); } if (!ret) { logger.warn( String.format( "Failed to write to Windows Credential Manager. Error code = %d", Native.getLastError())); return SecureStorageStatus.FAILURE; } logger.debug("Wrote to Windows Credential Manager successfully", false); return SecureStorageStatus.SUCCESS; } public String getCredential(String host, String user, String type) { PointerByReference pCredential = new PointerByReference(); String target = SecureStorageManager.buildCredentialsKey(host, user, type); try { boolean ret = false; synchronized (advapi32Lib) { ret = advapi32Lib.CredReadW( target, SecureStorageWindowsCredentialType.CRED_TYPE_GENERIC.getType(), 0, pCredential); } if (!ret) { logger.warn( String.format( "Failed to read target or could not find it in Windows Credential Manager. Error code = %d", Native.getLastError())); return null; } logger.debug("Found the token from Windows Credential Manager and now copying it", false); SecureStorageWindowsCredential cred = new SecureStorageWindowsCredential(pCredential.getValue()); if (SecureStorageWindowsCredentialType.typeOf(cred.Type) != SecureStorageWindowsCredentialType.CRED_TYPE_GENERIC) { logger.warn("Wrong type of credential. Expected: CRED_TYPE_GENERIC", false); return null; } if (cred.CredentialBlobSize == 0) { logger.debug("Returned credential is empty", false); return null; } byte[] credBytes = cred.CredentialBlob.getByteArray(0, cred.CredentialBlobSize); String res = new String(credBytes, StandardCharsets.UTF_16LE); logger.debug("Successfully read the token. Will return it as String now", false); return res; } finally { if (pCredential.getValue() != null) { synchronized (advapi32Lib) { advapi32Lib.CredFree(pCredential.getValue()); } } } } public SecureStorageStatus deleteCredential(String host, String user, String type) { String target = SecureStorageManager.buildCredentialsKey(host, user, type); boolean ret = false; synchronized (advapi32Lib) { ret = advapi32Lib.CredDeleteW( target, SecureStorageWindowsCredentialType.CRED_TYPE_GENERIC.getType(), 0); } if (!ret) { logger.warn( String.format( "Failed to delete target in Windows Credential Manager. Error code = %d", Native.getLastError())); return SecureStorageStatus.FAILURE; } logger.debug("Deleted target in Windows Credential Manager successfully", false); return SecureStorageStatus.SUCCESS; } public static class SecureStorageWindowsCredential extends Structure { /** * typedef struct _CREDENTIAL { DWORD Flags; DWORD Type; LPTSTR TargetName; LPTSTR Comment; * FILETIME LastWritten; DWORD CredentialBlobSize; LPBYTE CredentialBlob; DWORD Persist; DWORD * AttributeCount; PCREDENTIAL_ATTRIBUTE Attributes; LPTSTR TargetAlias; LPTSTR UserName; } * CREDENTIAL, *PCREDENTIAL; */ public int Flags; public int Type; public WString TargetName; public WString Comment; public FILETIME LastWritten = new FILETIME(); public int CredentialBlobSize; public Pointer CredentialBlob; public int Persist; public int AttributeCount; public Pointer Attributes; public WString TargetAlias; public WString UserName; @Override protected List getFieldOrder() { return Arrays.asList( "Flags", "Type", "TargetName", "Comment", "LastWritten", "CredentialBlobSize", "CredentialBlob", "Persist", "AttributeCount", "Attributes", "TargetAlias", "UserName"); } public SecureStorageWindowsCredential() { super(); } public SecureStorageWindowsCredential(Pointer p) { super(p); read(); } } /** Windows credential types */ enum SecureStorageWindowsCredentialType { CRED_TYPE_GENERIC(1), CRED_TYPE_DOMAIN_PASSWORD(2), CRED_TYPE_DOMAIN_CERTIFICATE(3), CRED_TYPE_DOMAIN_VISIBLE_PASSWORD(4), CRED_TYPE_GENERIC_CERTIFICATE(5), CRED_TYPE_DOMAIN_EXTENDED(6), CRED_TYPE_MAXIMUM(7); private int type; private static Map map = new HashMap(); SecureStorageWindowsCredentialType(int type) { this.type = type; } static { for (SecureStorageWindowsCredentialType credType : SecureStorageWindowsCredentialType.values()) { map.put(credType.type, credType); } } public static SecureStorageWindowsCredentialType typeOf(int type) { return map.get(type); } public int getType() { return type; } } enum SecureStorageWindowsCredentialPersistType { CRED_PERSIST_NONE(0), CRED_PERSIST_SESSION(1), CRED_PERSIST_LOCAL_MACHINE(2), CRED_PERSIST_ENTERPRISE(3); private int type; private static Map map = new HashMap(); SecureStorageWindowsCredentialPersistType(int type) { this.type = type; } static { for (SecureStorageWindowsCredentialPersistType credPersistType : SecureStorageWindowsCredentialPersistType.values()) { map.put(credPersistType.type, credPersistType); } } public int getType() { return type; } } static class Advapi32LibManager { private static Advapi32Lib INSTANCE = null; private static class ResourceHolder { // map Windows advapi32.dll to Interface Advapi32Lib private static final Advapi32Lib INSTANCE = (Advapi32Lib) Native.loadLibrary("advapi32", Advapi32Lib.class, W32APIOptions.UNICODE_OPTIONS); } public static Advapi32Lib getInstance() { if (INSTANCE == null) { INSTANCE = ResourceHolder.INSTANCE; } return INSTANCE; } /** This function is used only for unit test */ public static void setInstance(Advapi32Lib instance) { INSTANCE = instance; } /** This function is a helper function for testing */ public static void resetInstance() { if (Constants.getOS() == Constants.OS.WINDOWS) { INSTANCE = ResourceHolder.INSTANCE; } } } interface Advapi32Lib extends StdCallLibrary { /** BOOL CredReadW( LPCWSTR TargetName, DWORD Type, DWORD Flags, PCREDENTIALW *Credential ); */ boolean CredReadW(String targetName, int type, int flags, PointerByReference pcred); /** BOOL CredWriteW( PCREDENTIALW Credential, DWORD Flags ); */ boolean CredWriteW(SecureStorageWindowsManager.SecureStorageWindowsCredential cred, int flags); /** BOOL CredDeleteW( LPCWSTR TargetName, DWORD Type, DWORD Flags ); */ boolean CredDeleteW(String targetName, int type, int flags); /** void CredFree( PVOID Buffer ); */ void CredFree(Pointer cred); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SecurityUtil.java ================================================ package net.snowflake.client.internal.core; import java.lang.reflect.InvocationTargetException; import java.security.Provider; import java.security.Security; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class SecurityUtil { private static final SFLogger LOGGER = SFLoggerFactory.getLogger(SecurityUtil.class); /** provider name for FIPS */ public static final String BOUNCY_CASTLE_FIPS_PROVIDER = "BCFIPS"; public static final String BOUNCY_CASTLE_PROVIDER = "BC"; private static final String DEFAULT_SECURITY_PROVIDER_NAME = "org.bouncycastle.jce.provider.BouncyCastleProvider"; public static final String USE_BUNDLED_BOUNCY_CASTLE_FOR_PRIVATE_KEY_DECRYPTION_JVM = "net.snowflake.jdbc.useBundledBouncyCastleForPrivateKeyDecryption"; public static void addBouncyCastleProvider() { // Add Bouncy Castle to the list of security providers. This is required to // verify the signature on OCSP response and attached certificates. // It is also required to decrypt password protected private keys. // Check to see if the BouncyCastleFipsProvider has already been added. // If so, then we don't want to add the provider BouncyCastleProvider. // The addProvider() method won't add the provider if it already exists. try { if (Security.getProvider(BOUNCY_CASTLE_FIPS_PROVIDER) == null) { Security.addProvider(instantiateSecurityProvider()); } } catch (SecurityException ex) { LOGGER.warn( "SecurityManager denied access to security providers. " + "BouncyCastle provider was not added: {}", ex.getMessage()); } } private static Provider instantiateSecurityProvider() { try { Class klass = Class.forName(DEFAULT_SECURITY_PROVIDER_NAME); return (Provider) klass.getDeclaredConstructor().newInstance(); } catch (ExceptionInInitializerError | ClassNotFoundException | NoSuchMethodException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException ex) { String errMsg = String.format( "Failed to load %s, err=%s. If you use Snowflake JDBC for FIPS jar, " + "import BouncyCastleFipsProvider in the application.", DEFAULT_SECURITY_PROVIDER_NAME, ex.getMessage()); LOGGER.error(errMsg, true); throw new RuntimeException(errMsg, ex); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SessionUtil.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.SFTrustManager.resetOCSPResponseCacherServerURL; import static net.snowflake.client.internal.core.SFTrustManager.setOCSPResponseCacheServerURL; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.recordIfExternal; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.File; import java.io.IOException; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.security.PrivateKey; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; import net.snowflake.client.api.auth.AuthenticatorType; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.auth.ClientAuthnDTO; import net.snowflake.client.internal.core.auth.ClientAuthnParameter; import net.snowflake.client.internal.core.auth.oauth.AccessTokenProvider; import net.snowflake.client.internal.core.auth.oauth.DPoPUtil; import net.snowflake.client.internal.core.auth.oauth.OAuthAccessTokenForRefreshTokenProvider; import net.snowflake.client.internal.core.auth.oauth.OAuthAccessTokenProviderFactory; import net.snowflake.client.internal.core.auth.oauth.TokenResponseDTO; import net.snowflake.client.internal.core.auth.wif.AwsAttestationService; import net.snowflake.client.internal.core.auth.wif.AwsIdentityAttestationCreator; import net.snowflake.client.internal.core.auth.wif.AzureAttestationService; import net.snowflake.client.internal.core.auth.wif.AzureIdentityAttestationCreator; import net.snowflake.client.internal.core.auth.wif.GcpIdentityAttestationCreator; import net.snowflake.client.internal.core.auth.wif.OidcIdentityAttestationCreator; import net.snowflake.client.internal.core.auth.wif.WorkloadIdentityAttestation; import net.snowflake.client.internal.core.auth.wif.WorkloadIdentityAttestationProvider; import net.snowflake.client.internal.core.crl.CertRevocationCheckMode; import net.snowflake.client.internal.core.minicore.MinicoreTelemetry; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.RetryContext; import net.snowflake.client.internal.jdbc.RetryContextManager; import net.snowflake.client.internal.jdbc.SnowflakeReauthenticationRequest; import net.snowflake.client.internal.jdbc.SnowflakeSQLExceptionWithRetryContext; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.InternalCallMarker; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.jdbc.util.DriverUtil; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.LibcDetails; import net.snowflake.client.internal.util.LibcInfo; import net.snowflake.client.internal.util.OsReleaseDetails; import net.snowflake.client.internal.util.PlatformDetector; import net.snowflake.client.internal.util.SecretDetector; import net.snowflake.client.internal.util.Stopwatch; import net.snowflake.client.internal.util.ThrowingFunction; import net.snowflake.common.core.SqlState; import org.apache.http.HttpHeaders; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.utils.URIBuilder; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; import org.apache.http.message.HeaderGroup; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.select.Elements; /** Low level session util */ public class SessionUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(SessionUtil.class); // Response Field Name private static final String SF_QUERY_DATABASE = "databaseName"; private static final String SF_QUERY_SCHEMA = "schemaName"; private static final String SF_QUERY_WAREHOUSE = "warehouse"; private static final String SF_QUERY_ROLE = "roleName"; // Request path private static final String SF_PATH_LOGIN_REQUEST = "/session/v1/login-request"; private static final String SF_PATH_TOKEN_REQUEST = "/session/token-request"; private static final String SF_PATH_OKTA_TOKEN_REQUEST_SUFFIX = "/api/v1/authn"; private static final String SF_PATH_OKTA_SSO_REQUEST_SUFFIX = "/sso/saml"; public static final String SF_PATH_AUTHENTICATOR_REQUEST = "/session/authenticator-request"; public static final String SF_PATH_CONSOLE_LOGIN_REQUEST = "/console/login"; public static final String SF_QUERY_SESSION_DELETE = "delete"; static final String CLIENT_STORE_TEMPORARY_CREDENTIAL = "CLIENT_STORE_TEMPORARY_CREDENTIAL"; // Headers @Deprecated public static final String SF_HEADER_AUTHORIZATION = SFSession.SF_HEADER_AUTHORIZATION; // Authentication type private static final String SF_HEADER_BASIC_AUTHTYPE = "Basic"; private static final String CLIENT_REQUEST_MFA_TOKEN = "CLIENT_REQUEST_MFA_TOKEN"; private static final String SERVICE_NAME = "SERVICE_NAME"; private static final String CLIENT_IN_BAND_TELEMETRY_ENABLED = "CLIENT_TELEMETRY_ENABLED"; private static final String CLIENT_OUT_OF_BAND_TELEMETRY_ENABLED = "CLIENT_OUT_OF_BAND_TELEMETRY_ENABLED"; private static final String CLIENT_RESULT_COLUMN_CASE_INSENSITIVE = "CLIENT_RESULT_COLUMN_CASE_INSENSITIVE"; private static final String JDBC_RS_COLUMN_CASE_INSENSITIVE = "JDBC_RS_COLUMN_CASE_INSENSITIVE"; private static final String JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC = "JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC"; private static final String JDBC_FORMAT_DATE_WITH_TIMEZONE = "JDBC_FORMAT_DATE_WITH_TIMEZONE"; private static final String JDBC_USE_SESSION_TIMEZONE = "JDBC_USE_SESSION_TIMEZONE"; public static final String JDBC_CHUNK_DOWNLOADER_MAX_RETRY = "JDBC_CHUNK_DOWNLOADER_MAX_RETRY"; private static final String CLIENT_RESULT_CHUNK_SIZE_JVM = "net.snowflake.jdbc.clientResultChunkSize"; public static final String CLIENT_RESULT_CHUNK_SIZE = "CLIENT_RESULT_CHUNK_SIZE"; public static final String CLIENT_MEMORY_LIMIT_JVM = "net.snowflake.jdbc.clientMemoryLimit"; public static final String CLIENT_MEMORY_LIMIT = "CLIENT_MEMORY_LIMIT"; public static final String QUERY_CONTEXT_CACHE_SIZE = "QUERY_CONTEXT_CACHE_SIZE"; public static final String JDBC_ENABLE_PUT_GET = "JDBC_ENABLE_PUT_GET"; public static final String CLIENT_PREFETCH_THREADS_JVM = "net.snowflake.jdbc.clientPrefetchThreads"; public static final String CLIENT_PREFETCH_THREADS = "CLIENT_PREFETCH_THREADS"; public static final String CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE_JVM = "net.snowflake.jdbc.clientEnableConservativeMemoryUsage"; public static final String CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE = "CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE"; public static final String CLIENT_CONSERVATIVE_MEMORY_ADJUST_STEP = "CLIENT_CONSERVATIVE_MEMORY_ADJUST_STEP"; public static final String OCSP_FAIL_OPEN_JVM = "net.snowflake.jdbc.ocspFailOpen"; private static final String OCSP_FAIL_OPEN = "ocspFailOpen"; public static final String CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY = "CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY"; public static final String CLIENT_SFSQL = "CLIENT_SFSQL"; public static final String CLIENT_VALIDATE_DEFAULT_PARAMETERS = "CLIENT_VALIDATE_DEFAULT_PARAMETERS"; public static final String CLIENT_ENABLE_LOG_INFO_STATEMENT_PARAMETERS = "CLIENT_ENABLE_LOG_INFO_STATEMENT_PARAMETERS"; public static final String CLIENT_METADATA_REQUEST_USE_CONNECTION_CTX = "CLIENT_METADATA_REQUEST_USE_CONNECTION_CTX"; public static final String CLIENT_METADATA_USE_SESSION_DATABASE = "CLIENT_METADATA_USE_SESSION_DATABASE"; public static final String ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1 = "ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1"; static final String SF_HEADER_SERVICE_NAME = "X-Snowflake-Service"; public static final String SF_HEADER_CLIENT_APP_ID = "CLIENT_APP_ID"; public static final String SF_HEADER_CLIENT_APP_VERSION = "CLIENT_APP_VERSION"; private static final String ID_TOKEN_AUTHENTICATOR = "ID_TOKEN"; private static final String NO_QUERY_ID = ""; private static final String SF_PATH_SESSION = "/session"; public static long DEFAULT_CLIENT_MEMORY_LIMIT = 1536; // MB public static int DEFAULT_CLIENT_PREFETCH_THREADS = 4; public static int MIN_CLIENT_CHUNK_SIZE = 48; public static int MAX_CLIENT_CHUNK_SIZE = 160; public static Map JVM_PARAMS_TO_PARAMS = Stream.of( new String[][] { {CLIENT_RESULT_CHUNK_SIZE_JVM, CLIENT_RESULT_CHUNK_SIZE}, {CLIENT_MEMORY_LIMIT_JVM, CLIENT_MEMORY_LIMIT}, {CLIENT_PREFETCH_THREADS_JVM, CLIENT_PREFETCH_THREADS}, {OCSP_FAIL_OPEN_JVM, OCSP_FAIL_OPEN}, { CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE_JVM, CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE } }) .collect(Collectors.toMap(data -> data[0], data -> data[1])); private static ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); private static int DEFAULT_HEALTH_CHECK_INTERVAL = 45; // sec private static Set STRING_PARAMS = new HashSet<>( Arrays.asList( "TIMEZONE", "TIMESTAMP_OUTPUT_FORMAT", "TIMESTAMP_NTZ_OUTPUT_FORMAT", "TIMESTAMP_LTZ_OUTPUT_FORMAT", "TIMESTAMP_TZ_OUTPUT_FORMAT", "DATE_OUTPUT_FORMAT", "TIME_OUTPUT_FORMAT", "BINARY_OUTPUT_FORMAT", "CLIENT_TIMESTAMP_TYPE_MAPPING", SERVICE_NAME, "GEOGRAPHY_OUTPUT_FORMAT")); private static final Set INT_PARAMS = new HashSet<>( Arrays.asList( CLIENT_PREFETCH_THREADS, CLIENT_MEMORY_LIMIT, CLIENT_RESULT_CHUNK_SIZE, "CLIENT_STAGE_ARRAY_BINDING_THRESHOLD", "CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY")); private static final Set BOOLEAN_PARAMS = new HashSet<>( Arrays.asList( CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY, "CLIENT_HONOR_CLIENT_TZ_FOR_TIMESTAMP_NTZ", "CLIENT_DISABLE_INCIDENTS", "CLIENT_SESSION_KEEP_ALIVE", CLIENT_ENABLE_LOG_INFO_STATEMENT_PARAMETERS, CLIENT_IN_BAND_TELEMETRY_ENABLED, CLIENT_OUT_OF_BAND_TELEMETRY_ENABLED, CLIENT_STORE_TEMPORARY_CREDENTIAL, CLIENT_REQUEST_MFA_TOKEN, "JDBC_USE_JSON_PARSER", "AUTOCOMMIT", "JDBC_EFFICIENT_CHUNK_STORAGE", JDBC_RS_COLUMN_CASE_INSENSITIVE, JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC, JDBC_FORMAT_DATE_WITH_TIMEZONE, JDBC_USE_SESSION_TIMEZONE, CLIENT_RESULT_COLUMN_CASE_INSENSITIVE, CLIENT_METADATA_REQUEST_USE_CONNECTION_CTX, CLIENT_METADATA_USE_SESSION_DATABASE, "JDBC_TREAT_DECIMAL_AS_INT", "JDBC_ENABLE_COMBINED_DESCRIBE", CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE, CLIENT_VALIDATE_DEFAULT_PARAMETERS, ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1, "SNOWPARK_LAZY_ANALYSIS")); /** * Returns Authenticator type * * @param loginInput login information * @return Authenticator type */ private static AuthenticatorType getAuthenticator(SFLoginInput loginInput) { if (loginInput.getAuthenticator() != null) { if (loginInput .getAuthenticator() .equalsIgnoreCase(AuthenticatorType.EXTERNALBROWSER.name())) { // SAML 2.0 compliant service/application return AuthenticatorType.EXTERNALBROWSER; } else if (loginInput .getAuthenticator() .equalsIgnoreCase(AuthenticatorType.OAUTH_AUTHORIZATION_CODE.name())) { return AuthenticatorType.OAUTH_AUTHORIZATION_CODE; } else if (loginInput .getAuthenticator() .equalsIgnoreCase(AuthenticatorType.OAUTH_CLIENT_CREDENTIALS.name())) { return AuthenticatorType.OAUTH_CLIENT_CREDENTIALS; } else if (loginInput.getAuthenticator().equalsIgnoreCase(AuthenticatorType.OAUTH.name())) { // OAuth access code Authentication return AuthenticatorType.OAUTH; } else if (loginInput .getAuthenticator() .equalsIgnoreCase(AuthenticatorType.PROGRAMMATIC_ACCESS_TOKEN.name())) { return AuthenticatorType.PROGRAMMATIC_ACCESS_TOKEN; } else if (loginInput .getAuthenticator() .equalsIgnoreCase(AuthenticatorType.WORKLOAD_IDENTITY.name())) { return AuthenticatorType.WORKLOAD_IDENTITY; } else if (loginInput .getAuthenticator() .equalsIgnoreCase(AuthenticatorType.SNOWFLAKE_JWT.name())) { return AuthenticatorType.SNOWFLAKE_JWT; } else if (loginInput .getAuthenticator() .equalsIgnoreCase(AuthenticatorType.USERNAME_PASSWORD_MFA.name())) { return AuthenticatorType.USERNAME_PASSWORD_MFA; } else if (!loginInput .getAuthenticator() .equalsIgnoreCase(AuthenticatorType.SNOWFLAKE.name())) { // OKTA authenticator v1. return AuthenticatorType.OKTA; } } // authenticator is null, then jdbc will decide authenticator depends on // if privateKey is specified or not. If yes, authenticator type will be // SNOWFLAKE_JWT, otherwise it will use SNOWFLAKE. return loginInput.isPrivateKeyProvided() ? AuthenticatorType.SNOWFLAKE_JWT : AuthenticatorType.SNOWFLAKE; } /** * Open a new session * * @param loginInput login information * @return information get after login such as token information * @throws SFException if unexpected uri syntax * @throws SnowflakeSQLException if failed to establish connection with snowflake */ static SFLoginOutput openSession( SFLoginInput loginInput, Map connectionPropertiesMap, String tracingLevel) throws SFException, SnowflakeSQLException { AssertUtil.assertTrue( loginInput.getServerUrl() != null, "missing server URL for opening session"); AssertUtil.assertTrue(loginInput.getAppId() != null, "missing app id for opening session"); AssertUtil.assertTrue( loginInput.getLoginTimeout() >= 0, "negative login timeout for opening session"); final AuthenticatorType authenticator = getAuthenticator(loginInput); if (isTokenOrPasswordRequired(authenticator)) { AssertUtil.assertTrue( loginInput.getToken() != null || loginInput.getPassword() != null, "missing token or password for opening session"); } if (isUsernameRequired(authenticator)) { AssertUtil.assertTrue( loginInput.getUserName() != null, "missing user name for opening session"); } if (isEligibleForTokenCaching(authenticator)) { if ((Constants.getOS() == Constants.OS.MAC || Constants.getOS() == Constants.OS.WINDOWS) && loginInput.isEnableClientStoreTemporaryCredential()) { // force to set the flag for Mac/Windows users loginInput.getSessionParameters().put(CLIENT_STORE_TEMPORARY_CREDENTIAL, true); } else { // Linux should read from JDBC configuration. For other unsupported OS, we set it to false // as default value if (!loginInput.getSessionParameters().containsKey(CLIENT_STORE_TEMPORARY_CREDENTIAL)) { loginInput.getSessionParameters().put(CLIENT_STORE_TEMPORARY_CREDENTIAL, false); } } } else { // TODO: patch for now. We should update mergeProperties // to normalize all parameters using STRING_PARAMS, INT_PARAMS and // BOOLEAN_PARAMS. Object value = loginInput.getSessionParameters().get(CLIENT_STORE_TEMPORARY_CREDENTIAL); if (value != null) { loginInput.getSessionParameters().put(CLIENT_STORE_TEMPORARY_CREDENTIAL, asBoolean(value)); } } if (authenticator.equals(AuthenticatorType.USERNAME_PASSWORD_MFA) && loginInput.isEnableClientRequestMfaToken()) { loginInput.getSessionParameters().put(CLIENT_REQUEST_MFA_TOKEN, true); } if (authenticator.equals(AuthenticatorType.WORKLOAD_IDENTITY)) { WorkloadIdentityAttestation attestation = getWorkloadIdentityAttestation(loginInput); if (attestation != null) { loginInput.setWorkloadIdentityAttestation(attestation); } else { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Unable to obtain workload identity attestation. Make sure that correct workload identity provider has been set and that Snowflake-JDBC driver runs on supported environment."); } } convertSessionParameterStringValueToBooleanIfGiven(loginInput, CLIENT_REQUEST_MFA_TOKEN); readCachedCredentialsIfPossible(loginInput); try { resetOCSPUrlIfNecessary(loginInput.getServerUrl()); } catch (IOException ex) { throw new SFException(ex, ErrorCode.IO_ERROR, "unexpected URL syntax exception"); } if (OAuthAccessTokenProviderFactory.isEligible(getAuthenticator(loginInput))) { obtainAuthAccessTokenAndUpdateInput(loginInput); } try { return newSession(loginInput, connectionPropertiesMap, tracingLevel); } catch (SnowflakeReauthenticationRequest ex) { if (ex.getErrorCode() == Constants.OAUTH_ACCESS_TOKEN_EXPIRED_GS_CODE && isNativeOAuthOriginalAuthenticator(loginInput)) { if (loginInput.getOauthRefreshToken() != null && AuthenticatorType.OAUTH_AUTHORIZATION_CODE .name() .equals(loginInput.getOriginalAuthenticator())) { refreshOAuthAccessTokenAndUpdateInput(loginInput); } else { loginInput.restoreOriginalAuthenticator(); fetchOAuthAccessTokenAndUpdateInput(loginInput); } } return newSession(loginInput, connectionPropertiesMap, tracingLevel); } } private static boolean isNativeOAuthOriginalAuthenticator(SFLoginInput loginInput) { return AuthenticatorType.OAUTH_AUTHORIZATION_CODE .name() .equals(loginInput.getOriginalAuthenticator()) || AuthenticatorType.OAUTH_CLIENT_CREDENTIALS .name() .equals(loginInput.getOriginalAuthenticator()); } private static WorkloadIdentityAttestation getWorkloadIdentityAttestation(SFLoginInput loginInput) throws SFException { WorkloadIdentityAttestationProvider attestationProvider = new WorkloadIdentityAttestationProvider( new AwsIdentityAttestationCreator(new AwsAttestationService(), loginInput), new GcpIdentityAttestationCreator(loginInput), new AzureIdentityAttestationCreator(new AzureAttestationService(), loginInput), new OidcIdentityAttestationCreator(loginInput.getToken())); return attestationProvider.getAttestation(loginInput.getWorkloadIdentityProvider()); } private static boolean isEligibleForTokenCaching(AuthenticatorType authenticator) { return authenticator.equals(AuthenticatorType.EXTERNALBROWSER) || authenticator.equals(AuthenticatorType.OAUTH_AUTHORIZATION_CODE) || authenticator.equals(AuthenticatorType.OAUTH_CLIENT_CREDENTIALS); } private static boolean isTokenOrPasswordRequired(AuthenticatorType authenticator) { return authenticator.equals(AuthenticatorType.OAUTH) || authenticator.equals(AuthenticatorType.PROGRAMMATIC_ACCESS_TOKEN); } private static boolean isUsernameRequired(AuthenticatorType authenticator) { return !authenticator.equals(AuthenticatorType.OAUTH) && !authenticator.equals(AuthenticatorType.PROGRAMMATIC_ACCESS_TOKEN) && !authenticator.equals(AuthenticatorType.OAUTH_AUTHORIZATION_CODE) && !authenticator.equals(AuthenticatorType.OAUTH_CLIENT_CREDENTIALS) && !authenticator.equals(AuthenticatorType.WORKLOAD_IDENTITY); } private static void obtainAuthAccessTokenAndUpdateInput(SFLoginInput loginInput) throws SFException { if (loginInput.getOauthAccessToken() != null) { // Access Token was cached loginInput.setAuthenticator(AuthenticatorType.OAUTH.name()); loginInput.setToken(loginInput.getOauthAccessToken()); } else { // Access Token not cached fetchOAuthAccessTokenAndUpdateInput(loginInput); } } private static void fetchOAuthAccessTokenAndUpdateInput(SFLoginInput loginInput) throws SFException { OAuthAccessTokenProviderFactory accessTokenProviderFactory = new OAuthAccessTokenProviderFactory(); AccessTokenProvider accessTokenProvider = accessTokenProviderFactory.createAccessTokenProvider( getAuthenticator(loginInput), loginInput); TokenResponseDTO tokenResponse = accessTokenProvider.getAccessToken(loginInput); loginInput.setAuthenticator(AuthenticatorType.OAUTH.name()); loginInput.setToken(tokenResponse.getAccessToken()); loginInput.setOauthAccessToken(tokenResponse.getAccessToken()); loginInput.setOauthRefreshToken(tokenResponse.getRefreshToken()); if (loginInput.isDPoPEnabled() && accessTokenProvider.getDPoPPublicKey() != null) { loginInput.setDPoPPublicKey(accessTokenProvider.getDPoPPublicKey()); } } private static void refreshOAuthAccessTokenAndUpdateInput(SFLoginInput loginInput) throws SFException { try { OAuthAccessTokenForRefreshTokenProvider tokenRefresher = new OAuthAccessTokenForRefreshTokenProvider(); TokenResponseDTO tokenResponse = tokenRefresher.getAccessToken(loginInput); loginInput.setToken(tokenResponse.getAccessToken()); loginInput.setOauthAccessToken(tokenResponse.getAccessToken()); loginInput.setAuthenticator(AuthenticatorType.OAUTH.name()); if (loginInput.isDPoPEnabled() && tokenRefresher.getDPoPPublicKey() != null) { loginInput.setDPoPPublicKey(tokenRefresher.getDPoPPublicKey()); } if (tokenResponse.getRefreshToken() != null) { loginInput.setOauthRefreshToken(tokenResponse.getRefreshToken()); } } catch (SFException | Exception e) { logger.debug( "Refreshing OAuth access token failed. Removing OAuth refresh token from cache and restarting OAuth flow...", e); if (asBoolean(loginInput.getSessionParameters().get(CLIENT_STORE_TEMPORARY_CREDENTIAL))) { CredentialManager.deleteOAuthRefreshTokenCacheEntry(loginInput); } loginInput.restoreOriginalAuthenticator(); fetchOAuthAccessTokenAndUpdateInput(loginInput); } } private static void convertSessionParameterStringValueToBooleanIfGiven( SFLoginInput loginInput, String parameterName) { Object currentClientRequestMfaToken = loginInput.getSessionParameters().get(parameterName); if (currentClientRequestMfaToken instanceof String) { loginInput .getSessionParameters() .put(parameterName, Boolean.parseBoolean((String) currentClientRequestMfaToken)); } } private static void readCachedCredentialsIfPossible(SFLoginInput loginInput) throws SFException { if (!isNullOrEmpty(loginInput.getUserName())) { if (asBoolean(loginInput.getSessionParameters().get(CLIENT_STORE_TEMPORARY_CREDENTIAL))) { CredentialManager.fillCachedIdToken(loginInput); if (AuthenticatorType.OAUTH_AUTHORIZATION_CODE.equals(getAuthenticator(loginInput))) { CredentialManager.fillCachedOAuthRefreshToken(loginInput); if (loginInput.isDPoPEnabled()) { CredentialManager.fillCachedDPoPBundledAccessToken(loginInput); } if (loginInput.getOauthAccessToken() == null && loginInput.getDPoPPublicKey() == null) { CredentialManager.fillCachedOAuthAccessToken(loginInput); } } } if (asBoolean(loginInput.getSessionParameters().get(CLIENT_REQUEST_MFA_TOKEN))) { CredentialManager.fillCachedMfaToken(loginInput); } } } private static boolean asBoolean(Object value) { if (value == null) { return false; } switch (value.getClass().getName()) { case "java.lang.Boolean": return (Boolean) value; case "java.lang.String": return Boolean.valueOf((String) value); } return false; } static SFLoginOutput newSession( SFLoginInput loginInput, Map connectionPropertiesMap, String tracingLevel) throws SFException, SnowflakeSQLException { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); // build URL for login request URIBuilder uriBuilder; URI loginURI; String tokenOrSamlResponse = null; String samlProofKey = null; boolean consentCacheIdToken = true; String sessionToken; String masterToken; String sessionDatabase; String sessionSchema; String sessionRole; String sessionWarehouse; String sessionId; long masterTokenValidityInSeconds; String idToken; String mfaToken; String databaseVersion = null; int databaseMajorVersion = 0; int databaseMinorVersion = 0; String newClientForUpgrade; int healthCheckInterval = DEFAULT_HEALTH_CHECK_INTERVAL; int httpClientSocketTimeout = loginInput.getSocketTimeoutInMillis(); int httpClientConnectionTimeout = loginInput.getConnectionTimeoutInMillis(); final AuthenticatorType authenticatorType = getAuthenticator(loginInput); Map commonParams; String oktaUsername = loginInput.getOKTAUserName(); logger.debug( "Authenticating user: {}, host: {} with authentication method: {}." + " Login timeout: {} s, auth timeout: {} s, OCSP mode: {}{}", loginInput.getUserName(), loginInput.getHostFromServerUrl(), authenticatorType, loginInput.getLoginTimeout(), loginInput.getAuthTimeout(), loginInput.getOCSPMode(), isNullOrEmpty(oktaUsername) ? "" : ", okta username: " + oktaUsername); try { uriBuilder = new URIBuilder(loginInput.getServerUrl()); // add database name and schema name as query parameters if (loginInput.getDatabaseName() != null) { uriBuilder.addParameter(SF_QUERY_DATABASE, loginInput.getDatabaseName()); } if (loginInput.getSchemaName() != null) { uriBuilder.addParameter(SF_QUERY_SCHEMA, loginInput.getSchemaName()); } if (loginInput.getWarehouse() != null) { uriBuilder.addParameter(SF_QUERY_WAREHOUSE, loginInput.getWarehouse()); } if (loginInput.getRole() != null) { uriBuilder.addParameter(SF_QUERY_ROLE, loginInput.getRole()); } if (authenticatorType == AuthenticatorType.EXTERNALBROWSER) { // try to reuse id_token if exists if (loginInput.getIdToken() == null) { // SAML 2.0 compliant service/application SessionUtilExternalBrowser s = SessionUtilExternalBrowser.createInstance(loginInput); s.authenticate(); tokenOrSamlResponse = s.getToken(); samlProofKey = s.getProofKey(); consentCacheIdToken = s.isConsentCacheIdToken(); } } else if (authenticatorType == AuthenticatorType.OKTA) { // okta authenticator v1 tokenOrSamlResponse = getSamlResponseUsingOkta(loginInput); } else if (authenticatorType == AuthenticatorType.SNOWFLAKE_JWT) { SessionUtilKeyPair s = new SessionUtilKeyPair( loginInput.getPrivateKey(), loginInput.getPrivateKeyFile(), loginInput.getPrivateKeyBase64(), loginInput.getPrivateKeyPwd(), loginInput.getAccountName(), loginInput.getUserName()); loginInput.setToken(s.issueJwtToken()); loginInput.setAuthTimeout(SessionUtilKeyPair.getTimeout()); } uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, UUIDUtils.getUUID().toString()); uriBuilder.setPath(SF_PATH_LOGIN_REQUEST); loginURI = uriBuilder.build(); } catch (URISyntaxException ex) { logger.error("Exception when building URL", ex); throw new SFException(ex, ErrorCode.INTERNAL_ERROR, "unexpected URI syntax exception:1"); } HttpPost postRequest = null; HttpResponseWithHeaders response = null; try { Map data = new HashMap<>(); data.put(ClientAuthnParameter.CLIENT_APP_ID.name(), loginInput.getAppId()); /* * username is always included regardless of authenticator to identify * the user. */ data.put(ClientAuthnParameter.LOGIN_NAME.name(), loginInput.getUserName()); /* * only include password information in the request to GS if federated * authentication method is not specified. * When specified, this password information is really to be used to * authenticate with the IDP provider only, and GS should not have any * trace for this information. */ if (authenticatorType == AuthenticatorType.SNOWFLAKE) { data.put(ClientAuthnParameter.PASSWORD.name(), loginInput.getPassword()); } else if (authenticatorType == AuthenticatorType.EXTERNALBROWSER) { if (loginInput.getIdToken() != null) { data.put(ClientAuthnParameter.AUTHENTICATOR.name(), ID_TOKEN_AUTHENTICATOR); data.put(ClientAuthnParameter.TOKEN.name(), loginInput.getIdToken()); } else { data.put( ClientAuthnParameter.AUTHENTICATOR.name(), AuthenticatorType.EXTERNALBROWSER.name()); data.put(ClientAuthnParameter.PROOF_KEY.name(), samlProofKey); data.put(ClientAuthnParameter.TOKEN.name(), tokenOrSamlResponse); } } else if (authenticatorType == AuthenticatorType.OKTA) { data.put(ClientAuthnParameter.RAW_SAML_RESPONSE.name(), tokenOrSamlResponse); } else if (authenticatorType == AuthenticatorType.OAUTH) { data.put(ClientAuthnParameter.AUTHENTICATOR.name(), authenticatorType.name()); // Fix for HikariCP refresh token issue:SNOW-533673. // If token value is not set but password field is set then // the driver treats password as token. if (loginInput.getToken() != null) { data.put(ClientAuthnParameter.TOKEN.name(), loginInput.getToken()); } else { data.put(ClientAuthnParameter.TOKEN.name(), loginInput.getPassword()); } } else if (authenticatorType == AuthenticatorType.PROGRAMMATIC_ACCESS_TOKEN) { data.put(ClientAuthnParameter.AUTHENTICATOR.name(), authenticatorType.name()); data.put(ClientAuthnParameter.TOKEN.name(), loginInput.getToken()); } else if (authenticatorType == AuthenticatorType.SNOWFLAKE_JWT) { data.put(ClientAuthnParameter.AUTHENTICATOR.name(), authenticatorType.name()); data.put(ClientAuthnParameter.TOKEN.name(), loginInput.getToken()); } else if (authenticatorType == AuthenticatorType.USERNAME_PASSWORD_MFA) { // No authenticator name should be added here, since this will be treated as snowflake // default authenticator by backend data.put(ClientAuthnParameter.PASSWORD.name(), loginInput.getPassword()); if (loginInput.getMfaToken() != null) { data.put(ClientAuthnParameter.TOKEN.name(), loginInput.getMfaToken()); } } if (authenticatorType == AuthenticatorType.WORKLOAD_IDENTITY) { data.put(ClientAuthnParameter.AUTHENTICATOR.name(), authenticatorType.name()); data.put( ClientAuthnParameter.TOKEN.name(), loginInput.getWorkloadIdentityAttestation().getCredential()); data.put( ClientAuthnParameter.PROVIDER.name(), loginInput.getWorkloadIdentityAttestation().getProvider()); } Map clientEnv = createClientEnvironmentInfo( loginInput, connectionPropertiesMap, tracingLevel, authenticatorType); data.put(ClientAuthnParameter.CLIENT_ENVIRONMENT.name(), clientEnv); // Initialize the session parameters Map sessionParameter = loginInput.getSessionParameters(); if (loginInput.isValidateDefaultParameters()) { sessionParameter.put(CLIENT_VALIDATE_DEFAULT_PARAMETERS, true); } if (sessionParameter != null) { data.put(ClientAuthnParameter.SESSION_PARAMETERS.name(), loginInput.getSessionParameters()); } if (loginInput.getAccountName() != null) { data.put(ClientAuthnParameter.ACCOUNT_NAME.name(), loginInput.getAccountName()); } // Second Factor Authentication if (loginInput.isPasscodeInPassword()) { data.put(ClientAuthnParameter.EXT_AUTHN_DUO_METHOD.name(), "passcode"); } else if (loginInput.getPasscode() != null) { data.put(ClientAuthnParameter.EXT_AUTHN_DUO_METHOD.name(), "passcode"); data.put(ClientAuthnParameter.PASSCODE.name(), loginInput.getPasscode()); } else { data.put(ClientAuthnParameter.EXT_AUTHN_DUO_METHOD.name(), "push"); } data.put(ClientAuthnParameter.CLIENT_APP_VERSION.name(), loginInput.getAppVersion()); // SPCS service-identifier token - when the driver is // running inside an SPCS container, the runtime-issued token is attached to every login // request so the backend can identify the originating service. The token is rotated by SPCS, // so it is re-read from disk on every login and never cached. String spcsToken = new SpcsTokenReader().readSpcsToken(); if (spcsToken != null) { data.put(ClientAuthnParameter.SPCS_TOKEN.name(), spcsToken); } ClientAuthnDTO authnData = new ClientAuthnDTO(data, loginInput.getInFlightCtx()); String json = mapper.writeValueAsString(authnData); postRequest = new HttpPost(loginURI); // Add custom headers before adding common headers HttpUtil.applyAdditionalHeadersForSnowsight( postRequest, loginInput.getAdditionalHttpHeadersForSnowsight()); // Add headers for driver name and version postRequest.addHeader(SF_HEADER_CLIENT_APP_ID, loginInput.getAppId()); postRequest.addHeader(SF_HEADER_CLIENT_APP_VERSION, loginInput.getAppVersion()); // attach the login info json body to the post request StringEntity input = new StringEntity(json, StandardCharsets.UTF_8); input.setContentType("application/json"); postRequest.setEntity(input); postRequest.addHeader("accept", "application/json"); postRequest.addHeader("Accept-Encoding", ""); if (loginInput.isDPoPEnabled()) { new DPoPUtil(loginInput.getDPoPPublicKey()).addDPoPProofHeaderToRequest(postRequest, null); } /* * HttpClient should take authorization header from char[] instead of * String. */ postRequest.setHeader(SFSession.SF_HEADER_AUTHORIZATION, SF_HEADER_BASIC_AUTHTYPE); setServiceNameHeader(loginInput, postRequest); String theString = null; int leftRetryTimeout = loginInput.getLoginTimeout(); int leftsocketTimeout = loginInput.getSocketTimeoutInMillis(); int maxRetryCount = loginInput.getMaxRetryCount(); int retryedCount = 0; Exception lastRestException = null; while (true) { try { response = HttpUtil.executeGeneralRequestWithContext( postRequest, leftRetryTimeout, loginInput.getAuthTimeout(), leftsocketTimeout, maxRetryCount, retryedCount, loginInput.getHttpClientSettingsKey(), null); theString = response.getResponseBody(); } catch (SnowflakeSQLException ex) { lastRestException = ex; if (ex.getErrorCode() == ErrorCode.AUTHENTICATOR_REQUEST_TIMEOUT.getMessageCode()) { if (authenticatorType == AuthenticatorType.SNOWFLAKE_JWT || authenticatorType == AuthenticatorType.OKTA) { if (authenticatorType == AuthenticatorType.SNOWFLAKE_JWT) { SessionUtilKeyPair s = new SessionUtilKeyPair( loginInput.getPrivateKey(), loginInput.getPrivateKeyFile(), loginInput.getPrivateKeyBase64(), loginInput.getPrivateKeyPwd(), loginInput.getAccountName(), loginInput.getUserName()); data.put(ClientAuthnParameter.TOKEN.name(), s.issueJwtToken()); } else if (authenticatorType == AuthenticatorType.OKTA) { // TODO: there is no retry manager passed here for now - we still raise the // exception to retry in the old way logger.debug("Retrieve new token for Okta authentication."); // If we need to retry, we need to get a new Okta token tokenOrSamlResponse = getSamlResponseUsingOkta(loginInput); data.put(ClientAuthnParameter.RAW_SAML_RESPONSE.name(), tokenOrSamlResponse); ClientAuthnDTO updatedAuthnData = new ClientAuthnDTO(data, loginInput.getInFlightCtx()); String updatedJson = mapper.writeValueAsString(updatedAuthnData); StringEntity updatedInput = new StringEntity(updatedJson, StandardCharsets.UTF_8); updatedInput.setContentType("application/json"); postRequest.setEntity(updatedInput); } // Extract retry context information if available long elapsedSeconds = 0; long elapsedMiliSeconds = 0; boolean isSocketTimeoutNoBackoff = false; if (ex instanceof SnowflakeSQLExceptionWithRetryContext) { SnowflakeSQLExceptionWithRetryContext retryEx = (SnowflakeSQLExceptionWithRetryContext) ex; elapsedSeconds = retryEx.getElapsedSeconds(); isSocketTimeoutNoBackoff = retryEx.isSocketTimeoutNoBackoff(); elapsedMiliSeconds = retryEx.getElapsedSeconds() * 1000; retryedCount = retryEx.getRetryCount(); } if (loginInput.getLoginTimeout() > 0) { if (leftRetryTimeout > elapsedSeconds) { leftRetryTimeout -= elapsedSeconds; } else { leftRetryTimeout = 1; } logger.debug("The remaining Retry timeout is {}", leftRetryTimeout); } // In RestRequest.execute(), socket timeout is replaced with auth timeout // so we can renew the request within auth timeout. // auth timeout within socket timeout is thrown without backoff, // and we need to update time remained in socket timeout here to control // the actual socket timeout from customer setting. if (loginInput.getSocketTimeoutInMillis() > 0) { if (isSocketTimeoutNoBackoff) { if (leftsocketTimeout > elapsedMiliSeconds) { leftsocketTimeout -= elapsedMiliSeconds; } else { leftsocketTimeout = 1; } } else { // reset curl timeout for retry with backoff. leftsocketTimeout = loginInput.getSocketTimeoutInMillis(); } logger.debug("The remaining socket timeout is {}", leftsocketTimeout); } // JWT or Okta renew should not count as a retry, so we pass back the current retry // count from the exception context. continue; } } else { throw ex; } } catch (Exception ex) { lastRestException = ex; } break; } handleEmptyAuthResponse(theString, loginInput, lastRestException); // general method, same as with data binding JsonNode jsonNode = mapper.readTree(theString); // check the success field first if (!jsonNode.path("success").asBoolean()) { logger.debug("Response: {}", theString); int errorCode = jsonNode.path("code").asInt(); if (errorCode == Constants.ID_TOKEN_INVALID_LOGIN_REQUEST_GS_CODE) { // clean id_token first loginInput.setIdToken(null); if (asBoolean(loginInput.getSessionParameters().get(CLIENT_STORE_TEMPORARY_CREDENTIAL))) { deleteIdTokenCache(loginInput.getHostFromServerUrl(), loginInput.getUserName()); } logger.debug( "ID Token Expired / Not Applicable. Reauthenticating with ID Token cleared...: {}", errorCode); SnowflakeUtil.checkErrorAndThrowExceptionIncludingReauth(jsonNode); } if (errorCode == Constants.OAUTH_ACCESS_TOKEN_INVALID_GS_CODE) { logger.debug("OAuth Access Token Invalid: {}", errorCode); clearAccessTokenCache(loginInput); } if (errorCode == Constants.OAUTH_ACCESS_TOKEN_EXPIRED_GS_CODE) { clearAccessTokenCache(loginInput); logger.debug("OAuth Access Token Expired: {}", errorCode); if (AuthenticatorType.OAUTH.name().equals(loginInput.getOriginalAuthenticator())) { SnowflakeUtil.checkErrorAndThrowException(jsonNode); } else { SnowflakeUtil.checkErrorAndThrowExceptionIncludingReauth(jsonNode); } } if (authenticatorType == AuthenticatorType.USERNAME_PASSWORD_MFA && asBoolean(loginInput.getSessionParameters().get(CLIENT_REQUEST_MFA_TOKEN))) { deleteMfaTokenCache(loginInput.getHostFromServerUrl(), loginInput.getUserName()); } String errorMessage = jsonNode.path("message").asText(); logger.error( "Failed to open new session for user: {}, host: {}. Error: {}", loginInput.getUserName(), loginInput.getHostFromServerUrl(), errorMessage); throw new SnowflakeSQLException( NO_QUERY_ID, errorMessage, SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, errorCode); } // session token is in the data field of the returned json response sessionToken = jsonNode.path("data").path("token").asText(); masterToken = jsonNode.path("data").path("masterToken").asText(); idToken = nullStringAsEmptyString(jsonNode.path("data").path("idToken").asText()); mfaToken = nullStringAsEmptyString(jsonNode.path("data").path("mfaToken").asText()); masterTokenValidityInSeconds = jsonNode.path("data").path("masterValidityInSeconds").asLong(); String serverVersion = jsonNode.path("data").path("serverVersion").asText(); sessionId = jsonNode.path("data").path("sessionId").asText(); JsonNode dbNode = jsonNode.path("data").path("sessionInfo").path("databaseName"); sessionDatabase = dbNode.isNull() ? null : dbNode.asText(); JsonNode schemaNode = jsonNode.path("data").path("sessionInfo").path("schemaName"); sessionSchema = schemaNode.isNull() ? null : schemaNode.asText(); JsonNode roleNode = jsonNode.path("data").path("sessionInfo").path("roleName"); sessionRole = roleNode.isNull() ? null : roleNode.asText(); JsonNode warehouseNode = jsonNode.path("data").path("sessionInfo").path("warehouseName"); sessionWarehouse = warehouseNode.isNull() ? null : warehouseNode.asText(); commonParams = SessionUtil.getCommonParams(jsonNode.path("data").path("parameters")); if (serverVersion != null) { logger.debug("Server version: {}", serverVersion); if (serverVersion.indexOf(" ") > 0) { databaseVersion = serverVersion.substring(0, serverVersion.indexOf(" ")); } else { databaseVersion = serverVersion; } } else { logger.debug("Server version is null", false); } if (databaseVersion != null) { String[] components = databaseVersion.split("\\."); if (components.length >= 2) { try { databaseMajorVersion = Integer.parseInt(components[0]); databaseMinorVersion = Integer.parseInt(components[1]); } catch (Exception ex) { logger.error( "Exception encountered when parsing server " + "version: {} Exception: {}", databaseVersion, ex.getMessage()); } } } else { logger.debug("database version is null", false); } if (!jsonNode.path("data").path("newClientForUpgrade").isNull()) { newClientForUpgrade = jsonNode.path("data").path("newClientForUpgrade").asText(); logger.debug("New client: {}", newClientForUpgrade); } // get health check interval and adjust network timeouts if different int healthCheckIntervalFromGS = jsonNode.path("data").path("healthCheckInterval").asInt(); logger.debug("Health check interval: {}", healthCheckIntervalFromGS); if (healthCheckIntervalFromGS > 0 && healthCheckIntervalFromGS != healthCheckInterval) { // add health check interval to socket timeout httpClientSocketTimeout = loginInput.getSocketTimeoutInMillis() + (healthCheckIntervalFromGS * 1000); final RequestConfig requestConfig = RequestConfig.copy(HttpUtil.getRequestConfigWithoutCookies()) .setConnectTimeout(httpClientConnectionTimeout) .setSocketTimeout(httpClientSocketTimeout) .build(); HttpUtil.setRequestConfig(requestConfig); logger.debug("Adjusted connection timeout to: {}", httpClientConnectionTimeout); logger.debug("Adjusted socket timeout to: {}", httpClientSocketTimeout); } } catch (SnowflakeSQLException ex) { throw ex; // must catch here to avoid Throwable to get the exception } catch (IOException ex) { logger.error("IOException when creating session: " + postRequest, ex); throw new SnowflakeSQLException( ex, SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode(), "Exception encountered when opening connection: " + ex.getMessage()); } catch (Throwable ex) { logger.error("Exception when creating session: " + postRequest, ex); throw new SnowflakeSQLException( ex, SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, ErrorCode.CONNECTION_ERROR.getMessageCode(), ErrorCode.CONNECTION_ERROR.getMessageCode(), ex.getMessage()); } SFLoginOutput ret = new SFLoginOutput( sessionToken, masterToken, masterTokenValidityInSeconds, idToken, mfaToken, loginInput.getOauthAccessToken(), loginInput.getOauthRefreshToken(), databaseVersion, databaseMajorVersion, databaseMinorVersion, httpClientSocketTimeout, httpClientConnectionTimeout, sessionDatabase, sessionSchema, sessionRole, sessionWarehouse, sessionId, commonParams, response != null ? response.getHeaders() : new HashMap<>()); if (asBoolean(loginInput.getSessionParameters().get(CLIENT_STORE_TEMPORARY_CREDENTIAL))) { if (consentCacheIdToken) { CredentialManager.writeIdToken(loginInput, ret.getIdToken()); } if (AuthenticatorType.OAUTH_AUTHORIZATION_CODE .name() .equalsIgnoreCase(loginInput.getOriginalAuthenticator())) { if (loginInput.getOauthRefreshToken() != null) { CredentialManager.writeOAuthRefreshToken(loginInput); } if (loginInput.getDPoPPublicKey() != null && loginInput.getOauthAccessToken() != null && loginInput.isDPoPEnabled()) { CredentialManager.writeDPoPBundledAccessToken(loginInput); } else if (loginInput.getOauthAccessToken() != null) { CredentialManager.writeOAuthAccessToken(loginInput); } } } if (asBoolean(loginInput.getSessionParameters().get(CLIENT_REQUEST_MFA_TOKEN))) { CredentialManager.writeMfaToken(loginInput, ret.getMfaToken()); } stopwatch.stop(); logger.debug( "User: {}, host: {} with authentication method: {} authenticated successfully in {} ms", loginInput.getUserName(), loginInput.getHostFromServerUrl(), authenticatorType, stopwatch.elapsedMillis()); return ret; } static Map createClientEnvironmentInfo( SFLoginInput loginInput, Map connectionPropertiesMap, String tracingLevel, AuthenticatorType authenticatorType) { // map of client environment parameters, including connection parameters // and environment properties like OS version, etc. Map clientEnv = new HashMap<>(); clientEnv.put("OS", systemGetProperty("os.name")); clientEnv.put("OS_VERSION", systemGetProperty("os.version")); // Add Linux distribution details from /etc/os-release (only on Linux) Map osDetails = OsReleaseDetails.load(); if (!osDetails.isEmpty()) { clientEnv.put("OS_DETAILS", osDetails); } // Add libc family and version when detectable on Linux. Fields are omitted on non-Linux // systems and on Linux when detection is unavailable or inconclusive. See LibcDetails for // detection strategies. try { LibcInfo libcInfo = LibcDetails.load(); if (libcInfo.getFamily() != null) { clientEnv.put("LIBC_FAMILY", libcInfo.getFamily()); } if (libcInfo.getVersion() != null) { clientEnv.put("LIBC_VERSION", libcInfo.getVersion()); } } catch (Exception | LinkageError e) { logger.debug("Failed to detect libc details: {}", e.getMessage()); } clientEnv.put("JAVA_VERSION", systemGetProperty("java.version")); clientEnv.put("JAVA_RUNTIME", systemGetProperty("java.runtime.name")); clientEnv.put("JAVA_VM", systemGetProperty("java.vm.name")); clientEnv.put("OCSP_MODE", loginInput.getOCSPMode().name()); clientEnv.put("CERT_REVOCATION_CHECK_MODE", getCertRevocationMode(loginInput)); if (loginInput.getApplication() != null) { clientEnv.put("APPLICATION", loginInput.getApplication()); } else { // When you add new client environment info, please add new keys to // messages_en_US.src.json so that they can be displayed properly in UI // detect app name String appName = systemGetProperty("sun.java.command"); // remove the arguments if (appName != null) { if (appName.indexOf(" ") > 0) { appName = appName.substring(0, appName.indexOf(" ")); } clientEnv.put("APPLICATION", appName); } } // SNOW-20103: track additional client info in session String clientInfoJSONStr; if (connectionPropertiesMap.containsKey(SFSessionProperty.CLIENT_INFO)) { clientInfoJSONStr = (String) connectionPropertiesMap.get(SFSessionProperty.CLIENT_INFO); } // if connection property is not set, check session property else { clientInfoJSONStr = systemGetProperty("snowflake.client.info"); } if (clientInfoJSONStr != null) { JsonNode clientInfoJSON = null; try { clientInfoJSON = mapper.readTree(clientInfoJSONStr); } catch (Throwable ex) { logger.debug( "failed to process snowflake.client.info property as JSON: {}", clientInfoJSONStr, ex); } if (clientInfoJSON != null) { Iterator> fields = clientInfoJSON.fields(); while (fields.hasNext()) { Map.Entry field = fields.next(); clientEnv.put(field.getKey(), field.getValue().asText()); } } } /* Add all connection parameters and their values that have been set for this * current session into clientEnv. These are the params set via the Properties map or in the * connection string. Includes username, password, serverUrl, timeout values, etc */ for (Map.Entry entry : connectionPropertiesMap.entrySet()) { // exclude client parameters already covered by other runtime parameters that have been // added to clientEnv if (entry.getKey().equals(SFSessionProperty.APP_ID) || entry.getKey().equals(SFSessionProperty.APP_VERSION)) { continue; } String propKey = entry.getKey().getPropertyKey(); // mask sensitive values like passwords, tokens, etc String propVal = SecretDetector.maskParameterValue(propKey, entry.getValue().toString()); clientEnv.put(propKey, propVal); } // if map does not contain the tracing property, the default is set. Add // this default value to the map. if (!connectionPropertiesMap.containsKey(SFSessionProperty.TRACING)) { clientEnv.put(SFSessionProperty.TRACING.getPropertyKey(), tracingLevel); } clientEnv.put("JDBC_JAR_NAME", DriverUtil.getJdbcJarname()); clientEnv.put("LOGGING_IMPLEMENTATION", SFLoggerFactory.getLoggerImplementationName()); // Add platform detection (if not disabled) if (!loginInput.isDisablePlatformDetection()) { try { // Use cached platform detection results (initialized once on first use) List detectedPlatforms = PlatformDetector.getCachedPlatformDetection(); clientEnv.put("PLATFORM", detectedPlatforms); } catch (Exception e) { logger.debug("Platform detection failed: {}", e.getMessage()); // Continue without platform information } } else { logger.debug("Platform detection is disabled"); } // OAuth metrics data if (authenticatorType == AuthenticatorType.OAUTH && loginInput.getOriginalAuthenticator() != null) { clientEnv.put(ClientAuthnParameter.OAUTH_TYPE.name(), loginInput.getOriginalAuthenticator()); } // Application path try { String applicationPath = new File(SessionUtil.class.getProtectionDomain().getCodeSource().getLocation().toURI()) .getPath(); clientEnv.put(ClientAuthnParameter.APPLICATION_PATH.name(), applicationPath); } catch (Exception e) { logger.debug("Exception in retrieving application path for client environment", e); clientEnv.put(ClientAuthnParameter.APPLICATION_PATH.name(), "UNKNOWN"); } try { addMinicoreTelemetry(clientEnv); } catch (Throwable t) { logger.debug("Failed to add minicore telemetry: {}", t.getMessage()); } return clientEnv; } private static void addMinicoreTelemetry(Map clientEnv) { MinicoreTelemetry telemetry = MinicoreTelemetry.create(); clientEnv.putAll(telemetry.toClientEnvironmentTelemetryMap()); } private static String getCertRevocationMode(SFLoginInput loginInput) { HttpClientSettingsKey httpClientSettings = loginInput.getHttpClientSettingsKey(); if (httpClientSettings == null) { return null; } CertRevocationCheckMode revocationCheckMode = httpClientSettings.getRevocationCheckMode(); if (revocationCheckMode == null) { return null; } return revocationCheckMode.name(); } private static void clearAccessTokenCache(SFLoginInput loginInput) throws SFException { loginInput.setOauthAccessToken(null); loginInput.setDPoPPublicKey(null); if (asBoolean(loginInput.getSessionParameters().get(CLIENT_STORE_TEMPORARY_CREDENTIAL))) { CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInput); CredentialManager.deleteDPoPBundledAccessTokenCacheEntry(loginInput); } } private static void setServiceNameHeader(SFLoginInput loginInput, HttpPost postRequest) { if (!isNullOrEmpty(loginInput.getServiceName())) { // service name is used to route a request to appropriate cluster. postRequest.setHeader(SF_HEADER_SERVICE_NAME, loginInput.getServiceName()); } } private static String nullStringAsEmptyString(String value) { if (isNullOrEmpty(value) || "null".equals(value)) { return ""; } return value; } /** * Delete the id token cache * * @param host The host string * @param user The user */ public static void deleteIdTokenCache(String host, String user) { CredentialManager.deleteIdTokenCacheEntry(host, user); } /** * Delete the Oauth access token cache * * @param host The host string * @param user The user */ public static void deleteOAuthAccessTokenCache(String host, String user) { CredentialManager.deleteOAuthAccessTokenCacheEntry(host, user); } /** * Delete the Oauth refresh token cache * * @param host The host string * @param user The user */ public static void deleteOAuthRefreshTokenCache(String host, String user) { CredentialManager.deleteOAuthRefreshTokenCacheEntry(host, user); } /** * Delete the mfa token cache * * @param host The host string * @param user The user */ public static void deleteMfaTokenCache(String host, String user) { CredentialManager.deleteMfaTokenCacheEntry(host, user); } /** * Renew a session. * * @param loginInput login information * @param session the session associated with the request * @return login output * @throws SFException if unexpected uri information * @throws SnowflakeSQLException if failed to renew the session */ static SFLoginOutput renewSession(SFLoginInput loginInput, SFBaseSession session) throws SFException, SnowflakeSQLException { return renewTokenRequest(loginInput, session); } private static SFLoginOutput renewTokenRequest(SFLoginInput loginInput, SFBaseSession session) throws SFException, SnowflakeSQLException { AssertUtil.assertTrue(loginInput.getServerUrl() != null, "missing server URL for tokenRequest"); AssertUtil.assertTrue( loginInput.getMasterToken() != null, "missing master token for tokenRequest"); AssertUtil.assertTrue( loginInput.getSessionToken() != null, "missing session token for tokenRequest"); AssertUtil.assertTrue( loginInput.getLoginTimeout() >= 0, "negative login timeout for tokenRequest"); // build URL for login request URIBuilder uriBuilder; HttpPost postRequest; String sessionToken; String masterToken; try { uriBuilder = new URIBuilder(loginInput.getServerUrl()); uriBuilder.setPath(SF_PATH_TOKEN_REQUEST); uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, UUIDUtils.getUUID().toString()); postRequest = new HttpPost(uriBuilder.build()); // Add headers for driver name and version postRequest.addHeader(SF_HEADER_CLIENT_APP_ID, loginInput.getAppId()); postRequest.addHeader(SF_HEADER_CLIENT_APP_VERSION, loginInput.getAppVersion()); // Add custom headers before adding common headers HttpUtil.applyAdditionalHeadersForSnowsight( postRequest, loginInput.getAdditionalHttpHeadersForSnowsight()); } catch (URISyntaxException ex) { logger.error("Exception when creating http request", ex); throw new SFException(ex, ErrorCode.INTERNAL_ERROR, "unexpected URI syntax exception:3"); } try { // input json with old session token and request type, notice the // session token needs to be quoted. Map payload = new HashMap<>(); String headerToken = loginInput.getMasterToken(); payload.put("oldSessionToken", loginInput.getSessionToken()); payload.put("requestType", TokenRequestType.RENEW.value); String json = mapper.writeValueAsString(payload); // attach the login info json body to the post request StringEntity input = new StringEntity(json, StandardCharsets.UTF_8); input.setContentType("application/json"); postRequest.setEntity(input); postRequest.addHeader("accept", "application/json"); postRequest.setHeader( SFSession.SF_HEADER_AUTHORIZATION, SFSession.SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SFSession.SF_HEADER_TOKEN_TAG + "=\"" + headerToken + "\""); setServiceNameHeader(loginInput, postRequest); logger.debug( "Request type: {}, old session token: {}, " + "master token: {}", TokenRequestType.RENEW.value, (ArgSupplier) () -> loginInput.getSessionToken() != null ? "******" : null, (ArgSupplier) () -> loginInput.getMasterToken() != null ? "******" : null); String theString = HttpUtil.executeGeneralRequest( postRequest, loginInput.getLoginTimeout(), loginInput.getAuthTimeout(), loginInput.getSocketTimeoutInMillis(), 0, loginInput.getHttpClientSettingsKey(), session); // general method, same as with data binding JsonNode jsonNode = mapper.readTree(theString); // check the success field first if (!jsonNode.path("success").asBoolean()) { logger.debug("Response: {}", theString); String errorCode = jsonNode.path("code").asText(); String message = jsonNode.path("message").asText(); EventUtil.triggerBasicEvent( Event.EventType.NETWORK_ERROR, "SessionUtil:renewSession failure, error code=" + errorCode + ", message=" + message, true); SnowflakeUtil.checkErrorAndThrowExceptionIncludingReauth(jsonNode); } // session token is in the data field of the returned json response sessionToken = jsonNode.path("data").path("sessionToken").asText(); masterToken = jsonNode.path("data").path("masterToken").asText(); } catch (IOException ex) { logger.error("IOException when renewing session: " + postRequest, ex); // Any EventType.NETWORK_ERRORs should have been triggered before // exception was thrown. throw new SFException(ex, ErrorCode.NETWORK_ERROR, ex.getMessage()); } SFLoginOutput loginOutput = new SFLoginOutput(); loginOutput.setSessionToken(sessionToken).setMasterToken(masterToken); return loginOutput; } /** * Close a session * * @param loginInput login information * @param session the session associated with the request * @throws SnowflakeSQLException if failed to close session * @throws SFException if failed to close session */ static void closeSession(SFLoginInput loginInput, SFBaseSession session) throws SFException, SnowflakeSQLException { logger.trace("void close() throws SFException"); // assert the following inputs are valid AssertUtil.assertTrue( loginInput.getServerUrl() != null, "missing server URL for closing session"); AssertUtil.assertTrue( loginInput.getSessionToken() != null, "missing session token for closing session"); AssertUtil.assertTrue( loginInput.getLoginTimeout() >= 0, "missing login timeout for closing session"); HttpPost postRequest = null; try { URIBuilder uriBuilder; uriBuilder = new URIBuilder(loginInput.getServerUrl()); uriBuilder.addParameter(SF_QUERY_SESSION_DELETE, Boolean.TRUE.toString()); uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, UUIDUtils.getUUID().toString()); uriBuilder.setPath(SF_PATH_SESSION); postRequest = new HttpPost(uriBuilder.build()); // Add custom headers before adding common headers HttpUtil.applyAdditionalHeadersForSnowsight( postRequest, loginInput.getAdditionalHttpHeadersForSnowsight()); postRequest.setHeader( SFSession.SF_HEADER_AUTHORIZATION, SFSession.SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SFSession.SF_HEADER_TOKEN_TAG + "=\"" + loginInput.getSessionToken() + "\""); setServiceNameHeader(loginInput, postRequest); String theString = HttpUtil.executeGeneralRequestWithContext( postRequest, loginInput.getLoginTimeout(), 0, loginInput.getSocketTimeoutInMillis(), 0, 0, loginInput.getHttpClientSettingsKey(), session) .getResponseBody(); JsonNode rootNode; logger.debug("Connection close response: {}", theString); rootNode = mapper.readTree(theString); SnowflakeUtil.checkErrorAndThrowException(rootNode); } catch (URISyntaxException ex) { throw new RuntimeException("Unexpected URI syntax exception", ex); } catch (IOException ex) { logger.error("Unexpected IO exception for: " + postRequest, ex); } catch (SnowflakeSQLException ex) { // ignore exceptions for session expiration exceptions and for // sessions that no longer exist if (ex.getErrorCode() != Constants.SESSION_EXPIRED_GS_CODE && ex.getErrorCode() != Constants.SESSION_GONE) { throw ex; } } } /** * Given access token, query IDP URL snowflake app to get SAML response We also need to perform * important client side validation: validate the post back url come back with the SAML response * contains the same prefix as the Snowflake's server url, which is the intended destination url * to Snowflake. Explanation: This emulates the behavior of IDP initiated login flow in the user * browser where the IDP instructs the browser to POST the SAML assertion to the specific SP * endpoint. This is critical in preventing a SAML assertion issued to one SP from being sent to * another SP. * * @param loginInput Login Info for the request * @param ssoUrl URL to use for SSO * @param oneTimeTokenSupplier The function returning token used for SSO * @return The response in HTML form * @throws SnowflakeSQLException Will be thrown if the destination URL in the SAML assertion does * not match */ private static String federatedFlowStep4( SFLoginInput loginInput, String ssoUrl, ThrowingFunction oneTimeTokenSupplier) throws SnowflakeSQLException { // This call of the oneTimeTokenSupplier is a part of the basic federated flow (before any // retries). It is distinguished by a retrieval of a token without any RetryContext (passing // 'null'). We pass a RetryContext instance only when we are currently during retries process - // and we want to exchange information between the injected logic and the outer scope. String oneTimeToken = oneTimeTokenSupplier.apply(null); String responseHtml = ""; try { RetryContextManager retryWithNewOTTManager = createFederatedFlowStep4RetryContext(ssoUrl, oneTimeTokenSupplier, loginInput); HttpGet httpGet = new HttpGet(); prepareFederatedFlowStep4Request(httpGet, ssoUrl, oneTimeToken); responseHtml = HttpUtil.executeGeneralRequest( httpGet, loginInput.getLoginTimeout(), loginInput.getAuthTimeout(), loginInput.getSocketTimeoutInMillis(), 0, loginInput.getHttpClientSettingsKey(), retryWithNewOTTManager, null); // step 5 validateSAML(responseHtml, loginInput); } catch (IOException | URISyntaxException ex) { handleFederatedFlowError(loginInput, ex); } return responseHtml; } private static RetryContextManager createFederatedFlowStep4RetryContext( String ssoUrl, ThrowingFunction oneTimeTokenSupplier, SFLoginInput loginInput) { RetryContextManager retryWithNewOTTManager = new RetryContextManager(RetryContextManager.RetryHook.ALWAYS_BEFORE_RETRY); retryWithNewOTTManager.registerRetryCallback( (HttpRequestBase retrieveSamlRequest, RetryContext retryContext) -> { try { String newOneTimeToken = oneTimeTokenSupplier.apply(retryContext); prepareFederatedFlowStep4Request(retrieveSamlRequest, ssoUrl, newOneTimeToken); } catch (MalformedURLException | URISyntaxException ex) { handleFederatedFlowError(loginInput, ex); } return retryContext; }); return retryWithNewOTTManager; } private static void validateSAML(String responseHtml, SFLoginInput loginInput) throws SnowflakeSQLException, MalformedURLException { if (!loginInput.getDisableSamlURLCheck()) { String postBackUrl = getPostBackUrlFromHTML(responseHtml); if (!isPrefixEqual(postBackUrl, loginInput.getServerUrl())) { URL idpDestinationUrl = new URL(postBackUrl); URL clientDestinationUrl = new URL(loginInput.getServerUrl()); String idpDestinationHostName = idpDestinationUrl.getHost(); String clientDestinationHostName = clientDestinationUrl.getHost(); logger.error( "The Snowflake hostname specified in the client connection {} does not match " + "the destination hostname in the SAML response returned by the IdP: {}", clientDestinationHostName, idpDestinationHostName); // Session is in process of getting created, so exception constructor takes in null throw new SnowflakeSQLLoggedException( null, ErrorCode.IDP_INCORRECT_DESTINATION.getMessageCode(), SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION); } } } /** * Query IDP token url to authenticate and retrieve access token * * @param loginInput The login info for the request * @param tokenUrl The URL used to retrieve the access token * @return Returns the one time token * @throws SnowflakeSQLException Will be thrown if the execute request fails */ private static String federatedFlowStep3( SFLoginInput loginInput, String tokenUrl, RetryContext retryContext) throws SnowflakeSQLException { String oneTimeToken = ""; try { URL url = new URL(tokenUrl); URI tokenUri = url.toURI(); final HttpPost postRequest = new HttpPost(tokenUri); setFederatedFlowStep3PostRequestAuthData(postRequest, loginInput); int retryTimeout; if (retryContext != null) { // This casting could be avoided if all execution methods from SessionUtil to RestRequest // shared the same data type (either long or int) for the retryTimeout parameter. Now they // are all cast to long at the end (in RestRequest's methods). retryTimeout = (int) retryContext.getRemainingRetryTimeoutInSeconds(); } else { retryTimeout = loginInput.getLoginTimeout(); } final String idpResponse = HttpUtil.executeRequestWithoutCookies( postRequest, retryTimeout, loginInput.getAuthTimeout(), loginInput.getSocketTimeoutInMillis(), 0, 0, null, loginInput.getHttpClientSettingsKey(), null); // session token is in the data field of the returned json response final JsonNode jsonNode = mapper.readTree(idpResponse); boolean isMfaEnabledInOkta = "MFA_REQUIRED".equals(jsonNode.get("status").asText()); if (isMfaEnabledInOkta) { throw new SnowflakeSQLLoggedException( null, ErrorCode.OKTA_MFA_NOT_SUPPORTED.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED); } oneTimeToken = jsonNode.get("sessionToken") != null ? jsonNode.get("sessionToken").asText() : jsonNode.get("cookieToken").asText(); } catch (IOException | URISyntaxException ex) { handleFederatedFlowError(loginInput, ex); } logger.debug("User is authenticated against {}.", loginInput.getAuthenticator()); return oneTimeToken; } /** * Perform important client side validation: validate both token url and sso url contains same * prefix (protocol + host + port) as the given authenticator url. Explanation: This provides a * way for the user to 'authenticate' the IDP it is sending his/her credentials to. Without such a * check, the user could be coerced to provide credentials to an IDP impersonator. * * @param loginInput The login info for the request * @param tokenUrl The token URL * @param ssoUrl The SSO URL * @throws SnowflakeSQLException Will be thrown if the prefix for the tokenUrl and ssoUrl do not * match */ private static void federatedFlowStep2(SFLoginInput loginInput, String tokenUrl, String ssoUrl) throws SnowflakeSQLException { try { if (!isPrefixEqual(loginInput.getAuthenticator(), tokenUrl) || !isPrefixEqual(loginInput.getAuthenticator(), ssoUrl)) { logger.debug( "The specified authenticator {} is not supported.", loginInput.getAuthenticator()); // Session is in process of getting created, so exception constructor takes in null session // value throw new SnowflakeSQLLoggedException( null, ErrorCode.IDP_CONNECTION_ERROR.getMessageCode(), SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION /* session= */ ); } } catch (MalformedURLException ex) { handleFederatedFlowError(loginInput, ex); } } /** * Query Snowflake to obtain IDP token url and IDP SSO url * * @param loginInput The login info for the request * @throws SnowflakeSQLException Will be thrown if the execute request step fails */ private static JsonNode federatedFlowStep1(SFLoginInput loginInput) throws SnowflakeSQLException { JsonNode dataNode = null; try { StringEntity requestInput = prepareFederatedFlowStep1RequestInput(loginInput); HttpPost postRequest = new HttpPost(); prepareFederatedFlowStep1PostRequest(postRequest, loginInput, requestInput); final String gsResponse = HttpUtil.executeGeneralRequest( postRequest, loginInput.getLoginTimeout(), loginInput.getAuthTimeout(), loginInput.getSocketTimeoutInMillis(), 0, loginInput.getHttpClientSettingsKey(), null); logger.debug("Authenticator-request response: {}", gsResponse); JsonNode jsonNode = mapper.readTree(gsResponse); // check the success field first if (!jsonNode.path("success").asBoolean()) { logger.debug("Response: {}", gsResponse); int errorCode = jsonNode.path("code").asInt(); throw new SnowflakeSQLException( NO_QUERY_ID, jsonNode.path("message").asText(), SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, errorCode); } // session token is in the data field of the returned json response dataNode = jsonNode.path("data"); } catch (IOException | URISyntaxException ex) { handleFederatedFlowError(loginInput, ex); } return dataNode; } /** * Logs an error generated during the federated authentication flow and re-throws it as a * SnowflakeSQLException. Note that we separate IOExceptions since those tend to be network * related. * * @param loginInput The login info from the request * @param ex The exception to process * @throws SnowflakeSQLException Will be thrown for all calls to this method */ private static void handleFederatedFlowError(SFLoginInput loginInput, Exception ex) throws SnowflakeSQLException { if (ex instanceof IOException) { logger.error("IOException when authenticating with " + loginInput.getAuthenticator(), ex); throw new SnowflakeSQLException( ex, SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode(), "Exception encountered when opening connection: " + ex.getMessage()); } logger.error("Exception when authenticating with " + loginInput.getAuthenticator(), ex); throw new SnowflakeSQLException( ex, SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, ErrorCode.CONNECTION_ERROR.getMessageCode(), ErrorCode.CONNECTION_ERROR.getMessageCode(), ex.getMessage()); } /** * FEDERATED FLOW See SNOW-27798 for additional details. * * @param loginInput The login info from the request * @return saml response * @throws SnowflakeSQLException Will be thrown if any of the federated steps fail */ private static String getSamlResponseUsingOkta(SFLoginInput loginInput) throws SnowflakeSQLException { while (true) { try { JsonNode dataNode = federatedFlowStep1(loginInput); String tokenUrl = dataNode.path("tokenUrl").asText(); String ssoUrl = dataNode.path("ssoUrl").asText(); federatedFlowStep2(loginInput, tokenUrl, ssoUrl); ThrowingFunction oneTimeTokenSupplier = (RetryContext retryContext) -> federatedFlowStep3(loginInput, tokenUrl, retryContext); return federatedFlowStep4(loginInput, ssoUrl, oneTimeTokenSupplier); } catch (SnowflakeSQLException ex) { // This error gets thrown if the okta request encountered a retry-able error that // requires getting a new one-time token. if (ex.getErrorCode() == ErrorCode.AUTHENTICATOR_REQUEST_TIMEOUT.getMessageCode()) { logger.debug("Failed to get Okta SAML response. Retrying without changing retry count."); } else { throw ex; } } } } /** * Verify if two input urls have the same protocol, host, and port. * * @param aUrlStr a source URL string * @param bUrlStr a target URL string * @return true if matched otherwise false * @throws MalformedURLException raises if a URL string is not valid. */ static boolean isPrefixEqual(String aUrlStr, String bUrlStr) throws MalformedURLException { URL aUrl = new URL(aUrlStr); URL bUrl = new URL(bUrlStr); int aPort = aUrl.getPort(); int bPort = bUrl.getPort(); if (aPort == -1 && "https".equals(aUrl.getProtocol())) { // default port number for HTTPS aPort = 443; } if (bPort == -1 && "https".equals(bUrl.getProtocol())) { // default port number for HTTPS bPort = 443; } // no default port number for HTTP is supported. return aUrl.getHost().equalsIgnoreCase(bUrl.getHost()) && aUrl.getProtocol().equalsIgnoreCase(bUrl.getProtocol()) && aPort == bPort; } /** * Extracts post back url from the HTML returned by the IDP * * @param html The HTML that we are parsing to find the post back url * @return The post back url */ private static String getPostBackUrlFromHTML(String html) { Document doc = Jsoup.parse(html); Elements e1 = doc.getElementsByTag("body"); Elements e2 = e1.get(0).getElementsByTag("form"); return e2.first().attr("action"); } /** * Helper function to parse a JsonNode from a GS response containing CommonParameters, emitting an * EnumMap of parameters * * @param paramsNode parameters in JSON form * @return map object including key and value pairs */ public static Map getCommonParams(JsonNode paramsNode) { Map parameters = new HashMap<>(); for (JsonNode child : paramsNode) { // If there isn't a name then the response from GS must be erroneous. if (!child.hasNonNull("name")) { logger.error("Common Parameter JsonNode encountered with " + "no parameter name!", false); continue; } // Look up the parameter based on the "name" attribute of the node. String paramName = child.path("name").asText(); // What type of value is it and what's the value? if (!child.hasNonNull("value")) { logger.debug("No value found for Common Parameter: {}", child.path("name").asText()); continue; } if (STRING_PARAMS.contains(paramName.toUpperCase())) { parameters.put(paramName, child.path("value").asText()); } else if (INT_PARAMS.contains(paramName.toUpperCase())) { parameters.put(paramName, child.path("value").asInt()); } else if (BOOLEAN_PARAMS.contains(paramName.toUpperCase())) { parameters.put(paramName, child.path("value").asBoolean()); } else { try { // Value should only be boolean, int or string so we don't expect exceptions here. parameters.put(paramName, mapper.treeToValue(child.path("value"), Object.class)); } catch (Exception e) { logger.debug( "Unknown Common Parameter Failed to Parse: {} -> {}. Exception: {}", paramName, child.path("value"), e.getMessage()); } logger.debug("Unknown Common Parameter: {}", paramName); } logger.debug("Parameter {}: {}", paramName, child.path("value").asText()); } return parameters; } static void updateSfDriverParamValues(Map parameters, SFBaseSession session) { if (parameters != null && !parameters.isEmpty()) { session.setCommonParameters(parameters); } for (Map.Entry entry : parameters.entrySet()) { logger.debug("Processing parameter {}", entry.getKey()); if ("CLIENT_SESSION_KEEP_ALIVE".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setEnableHeartbeat((Boolean) entry.getValue()); } } else if (CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setHeartbeatFrequency((int) entry.getValue()); } } else if ("CLIENT_ENABLE_LOG_INFO_STATEMENT_PARAMETERS".equalsIgnoreCase(entry.getKey())) { boolean enableLogging = (Boolean) entry.getValue(); if (session != null && session.getPreparedStatementLogging() != enableLogging) { session.setPreparedStatementLogging(enableLogging); } } else if ("AUTOCOMMIT".equalsIgnoreCase(entry.getKey())) { boolean autoCommit = (Boolean) entry.getValue(); if (session != null && session.getAutoCommit() != autoCommit) { session.setAutoCommit(autoCommit); } } else if (JDBC_RS_COLUMN_CASE_INSENSITIVE.equalsIgnoreCase(entry.getKey()) || CLIENT_RESULT_COLUMN_CASE_INSENSITIVE.equalsIgnoreCase(entry.getKey())) { if (session != null && !session.isResultColumnCaseInsensitive()) { session.setResultColumnCaseInsensitive((boolean) entry.getValue()); } } else if (CLIENT_METADATA_REQUEST_USE_CONNECTION_CTX.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setMetadataRequestUseConnectionCtx((boolean) entry.getValue()); } } else if (CLIENT_METADATA_USE_SESSION_DATABASE.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setMetadataRequestUseSessionDatabase((boolean) entry.getValue()); } } else if (JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setTreatNTZAsUTC((boolean) entry.getValue()); } } else if (JDBC_FORMAT_DATE_WITH_TIMEZONE.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setFormatDateWithTimezone((boolean) entry.getValue()); } } else if (JDBC_USE_SESSION_TIMEZONE.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setUseSessionTimezone((boolean) entry.getValue()); } } else if ("CLIENT_TIMESTAMP_TYPE_MAPPING".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setTimestampMappedType( SnowflakeType.valueOf(((String) entry.getValue()).toUpperCase())); } } else if ("JDBC_TREAT_DECIMAL_AS_INT".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setJdbcTreatDecimalAsInt((boolean) entry.getValue()); } } else if ("JDBC_ENABLE_COMBINED_DESCRIBE".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setEnableCombineDescribe((boolean) entry.getValue()); } } else if (CLIENT_IN_BAND_TELEMETRY_ENABLED.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setClientTelemetryEnabled((boolean) entry.getValue()); } } else if ("CLIENT_STAGE_ARRAY_BINDING_THRESHOLD".equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setArrayBindStageThreshold((int) entry.getValue()); } } else if (CLIENT_STORE_TEMPORARY_CREDENTIAL.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setStoreTemporaryCredential((boolean) entry.getValue()); } } else if (SERVICE_NAME.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setServiceName((String) entry.getValue()); } } else if (CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setEnableConservativeMemoryUsage((boolean) entry.getValue()); } } else if (CLIENT_CONSERVATIVE_MEMORY_ADJUST_STEP.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setConservativeMemoryAdjustStep((int) entry.getValue()); } } else if (CLIENT_MEMORY_LIMIT.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setClientMemoryLimit((int) entry.getValue()); } } else if (CLIENT_RESULT_CHUNK_SIZE.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setClientResultChunkSize((int) entry.getValue()); } } else if (CLIENT_PREFETCH_THREADS.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setClientPrefetchThreads((int) entry.getValue()); } } else if (CLIENT_OUT_OF_BAND_TELEMETRY_ENABLED.equalsIgnoreCase(entry.getKey())) { // we ignore the parameter CLIENT_OUT_OF_BAND_TELEMETRY_ENABLED // OOB telemetry is always disabled TelemetryService.disableOOBTelemetry(); } else if (CLIENT_VALIDATE_DEFAULT_PARAMETERS.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setValidateDefaultParameters(SFLoginInput.getBooleanValue(entry.getValue())); } } else if (ENABLE_STAGE_S3_PRIVATELINK_FOR_US_EAST_1.equalsIgnoreCase((entry.getKey()))) { if (session != null) { session.setUseRegionalS3EndpointsForPresignedURL( SFLoginInput.getBooleanValue(entry.getValue())); } } else if (QUERY_CONTEXT_CACHE_SIZE.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setQueryContextCacheSize((int) entry.getValue()); } } else if (JDBC_ENABLE_PUT_GET.equalsIgnoreCase(entry.getKey())) { if (session != null) { session.setJdbcEnablePutGet(SFLoginInput.getBooleanValue(entry.getValue())); } } else { if (session != null) { session.setOtherParameter(entry.getKey(), entry.getValue()); } } } } enum TokenRequestType { RENEW("RENEW"), CLONE("CLONE"), ISSUE("ISSUE"); private String value; TokenRequestType(String value) { this.value = value; } } /** * Set OCSP cache server. If the URL is for private link sets it to special cache server. * * @param serverUrl The Snowflake URL includes protocol such as "https://" * @throws IOException If exception encountered */ public static void resetOCSPUrlIfNecessary(String serverUrl) throws IOException { setOCSPResponseCacheServerURL(serverUrl); if (PrivateLinkDetector.isPrivateLink(serverUrl)) { // Privatelink uses special OCSP Cache server URL url = new URL(serverUrl); String host = url.getHost(); logger.debug("HOST: {}", host); String ocspCacheServerUrl = String.format("http://ocsp.%s/%s", host, SFTrustManager.CACHE_FILE_NAME); logger.debug("OCSP Cache Server for Privatelink: {}", ocspCacheServerUrl); resetOCSPResponseCacherServerURL(ocspCacheServerUrl); } } /** * Helper function to generate a JWT token * * @param privateKey private key * @param privateKeyFile path to private key file * @param privateKeyBase64 base64 encoded content of the private key file * @param privateKeyPwd password for private key file or base64 encoded private key * @param accountName account name * @param userName user name * @return JWT token * @throws SFException if Snowflake error occurs */ public static String generateJWTToken( PrivateKey privateKey, String privateKeyFile, String privateKeyBase64, String privateKeyPwd, String accountName, String userName) throws SFException { return generateJWTToken( privateKey, privateKeyFile, privateKeyBase64, privateKeyPwd, accountName, userName, null); } public static String generateJWTToken( PrivateKey privateKey, String privateKeyFile, String privateKeyBase64, String privateKeyPwd, String accountName, String userName, InternalCallMarker internalCallMarker) throws SFException { recordIfExternal("SessionUtil", "generateJWTToken", internalCallMarker); SessionUtilKeyPair s = new SessionUtilKeyPair( privateKey, privateKeyFile, privateKeyBase64, privateKeyPwd, accountName, userName); return s.issueJwtToken(); } /** * Helper function to generate a JWT token. Use {@link #generateJWTToken(PrivateKey, String, * String, String, String, String)} * * @param privateKey private key * @param privateKeyFile path to private key file * @param privateKeyFilePwd password for private key file * @param accountName account name * @param userName user name * @return JWT token * @throws SFException if Snowflake error occurs */ @Deprecated public static String generateJWTToken( PrivateKey privateKey, String privateKeyFile, String privateKeyFilePwd, String accountName, String userName) throws SFException { return generateJWTToken( privateKey, privateKeyFile, null, privateKeyFilePwd, accountName, userName, null); } /** * Helper method to check if the request path is a login/auth request to use for retry strategy. * * @param request the post request * @return true if this is a login/auth request, false otherwise */ public static boolean isNewRetryStrategyRequest(HttpRequestBase request) { URI requestURI = request.getURI(); String requestPath = requestURI.getPath(); if (requestPath != null) { return requestPath.equals(SF_PATH_LOGIN_REQUEST) || requestPath.equals(SF_PATH_AUTHENTICATOR_REQUEST) || requestPath.equals(SF_PATH_TOKEN_REQUEST) || requestPath.contains(SF_PATH_OKTA_TOKEN_REQUEST_SUFFIX) || requestPath.contains(SF_PATH_OKTA_SSO_REQUEST_SUFFIX); } return false; } /** * Prepares an HTTP POST request for the first step of the federated authentication flow. * * @param loginInput The login information for the request. * @param inputData The JSON input data to include in the request. * @throws URISyntaxException If the constructed URI is invalid. */ private static void prepareFederatedFlowStep1PostRequest( HttpPost postRequest, SFLoginInput loginInput, StringEntity inputData) throws URISyntaxException { URIBuilder fedUriBuilder = new URIBuilder(loginInput.getServerUrl()); // TODO: if loginInput.serverUrl contains port or additional segments - it will be ignored and // overwritten here - to be fixed in SNOW-1922872 fedUriBuilder.setPath(SF_PATH_AUTHENTICATOR_REQUEST); URI fedUrlUri = fedUriBuilder.build(); postRequest.setURI(fedUrlUri); postRequest.setEntity(inputData); postRequest.addHeader("accept", "application/json"); postRequest.addHeader(SF_HEADER_CLIENT_APP_ID, loginInput.getAppId()); postRequest.addHeader(SF_HEADER_CLIENT_APP_VERSION, loginInput.getAppVersion()); } /** * Prepares the JSON input for the first step of the federated authentication flow. * * @param loginInput The login information for the request. * @return A {@link StringEntity} containing the JSON input for the request. * @throws JsonProcessingException If there is an error generating the JSON input. */ private static StringEntity prepareFederatedFlowStep1RequestInput(SFLoginInput loginInput) throws JsonProcessingException { Map data = new HashMap<>(); data.put(ClientAuthnParameter.ACCOUNT_NAME.name(), loginInput.getAccountName()); data.put(ClientAuthnParameter.AUTHENTICATOR.name(), loginInput.getAuthenticator()); data.put(ClientAuthnParameter.CLIENT_APP_ID.name(), loginInput.getAppId()); data.put(ClientAuthnParameter.CLIENT_APP_VERSION.name(), loginInput.getAppVersion()); ClientAuthnDTO authnData = new ClientAuthnDTO(data, null); String json = mapper.writeValueAsString(authnData); StringEntity input = new StringEntity(json, StandardCharsets.UTF_8); input.setContentType("application/json"); return input; } /** * Sets the authentication data for the third step of the federated authentication flow. * * @param postRequest The {@link HttpPost} request to update with authentication data. * @param loginInput The login information for the request. * @throws SnowflakeSQLException If an error occurs while preparing the request. */ private static void setFederatedFlowStep3PostRequestAuthData( HttpPost postRequest, SFLoginInput loginInput) throws SnowflakeSQLException { String userName = isNullOrEmpty(loginInput.getOKTAUserName()) ? loginInput.getUserName() : loginInput.getOKTAUserName(); try { StringEntity params = new StringEntity( "{\"username\":\"" + userName + "\",\"password\":\"" + loginInput.getPassword() + "\"}"); postRequest.setEntity(params); HeaderGroup headers = new HeaderGroup(); headers.addHeader(new BasicHeader(HttpHeaders.ACCEPT, "application/json")); headers.addHeader(new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json")); postRequest.setHeaders(headers.getAllHeaders()); } catch (IOException ex) { handleFederatedFlowError(loginInput, ex); } } /** * Prepares an HTTP GET request for the fourth step of the federated authentication flow. * * @param retrieveSamlRequest The {@link HttpRequestBase} to update with the SAML request details. * @param ssoUrl The SSO URL to use for the request. * @param oneTimeToken The one-time token to include in the request. * @throws MalformedURLException If the SSO URL is malformed. * @throws URISyntaxException If the URI for the request cannot be built. */ private static void prepareFederatedFlowStep4Request( HttpRequestBase retrieveSamlRequest, String ssoUrl, String oneTimeToken) throws MalformedURLException, URISyntaxException { final URL url = new URL(ssoUrl); URI oktaGetUri = new URIBuilder() .setScheme(url.getProtocol()) .setHost(url.getHost()) .setPort(url.getPort()) .setPath(url.getPath()) .setParameter("RelayState", "%2Fsome%2Fdeep%2Flink") .setParameter("onetimetoken", oneTimeToken) .build(); retrieveSamlRequest.setURI(oktaGetUri); HeaderGroup headers = new HeaderGroup(); headers.addHeader(new BasicHeader(HttpHeaders.ACCEPT, "*/*")); retrieveSamlRequest.setHeaders(headers.getAllHeaders()); } private static void handleEmptyAuthResponse( String theString, SFLoginInput loginInput, Exception lastRestException) throws Exception, SFException { if (theString == null) { if (lastRestException != null) { logger.error( "Failed to open new session for user: {}, host: {}. Error: {}", loginInput.getUserName(), loginInput.getHostFromServerUrl(), lastRestException); throw lastRestException; } else { SnowflakeSQLException exception = new SnowflakeSQLException( NO_QUERY_ID, "empty authentication response", SqlState.CONNECTION_EXCEPTION, ErrorCode.CONNECTION_ERROR.getMessageCode()); logger.error( "Failed to open new session for user: {}, host: {}. Error: {}", loginInput.getUserName(), loginInput.getHostFromServerUrl(), exception); throw exception; } } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SessionUtilExternalBrowser.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.awt.Desktop; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.io.PrintWriter; import java.net.InetAddress; import java.net.ServerSocket; import java.net.Socket; import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.security.SecureRandom; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Base64; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.auth.ClientAuthnDTO; import net.snowflake.client.internal.core.auth.ClientAuthnParameter; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; import org.apache.http.NameValuePair; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URIBuilder; import org.apache.http.client.utils.URLEncodedUtils; import org.apache.http.entity.StringEntity; /** * SAML 2.0 Compliant service/application federated authentication 1. Query GS to obtain IDP SSO url * 2. Listen a localhost port to accept Saml response 3. Open a browser in the backend so that the * user can type IdP username and password. 4. Return token and proof key to the GS to gain access. */ public class SessionUtilExternalBrowser { private static final SFLogger logger = SFLoggerFactory.getLogger(SessionUtilExternalBrowser.class); public interface AuthExternalBrowserHandlers { // build a HTTP post object HttpPost build(URI uri); // open a browser void openBrowser(String ssoUrl) throws SFException; // output void output(String msg); } public static class DefaultAuthExternalBrowserHandlers implements AuthExternalBrowserHandlers { @Override public HttpPost build(URI uri) { return new HttpPost(uri); } @Override public void openBrowser(String ssoUrl) throws SFException { if (!URLUtil.isValidURL(ssoUrl)) { throw new SFException(ErrorCode.INVALID_CONNECTION_URL, "Invalid SSOUrl found - " + ssoUrl); } try { // start web browser Runtime runtime = Runtime.getRuntime(); Constants.OS os = Constants.getOS(); if (Desktop.isDesktopSupported() && Desktop.getDesktop().isSupported(Desktop.Action.BROWSE)) { Desktop.getDesktop().browse(new URI(ssoUrl)); } else if (os == Constants.OS.MAC) { runtime.exec(new String[] {"open", ssoUrl}); } else if (os == Constants.OS.WINDOWS) { runtime.exec(new String[] {"rundll32", "url.dll,FileProtocolHandler", ssoUrl}); } else { runtime.exec(new String[] {"xdg-open", ssoUrl}); } } catch (URISyntaxException | IOException ex) { throw new SFException(ex, ErrorCode.NETWORK_ERROR, ex.getMessage()); } } @Override public void output(String msg) { System.out.println(msg); } } private final ObjectMapper mapper; private final SFLoginInput loginInput; String token; private boolean consentCacheIdToken; private String proofKey; private String origin; private AuthExternalBrowserHandlers handlers; private static final String PREFIX_GET = "GET "; private static final String PREFIX_POST = "POST "; private static final String PREFIX_OPTIONS = "OPTIONS "; private static final String PREFIX_USER_AGENT = "USER-AGENT: "; private static Charset UTF8_CHARSET; static { UTF8_CHARSET = Charset.forName("UTF-8"); } public static SessionUtilExternalBrowser createInstance(SFLoginInput loginInput) { return new SessionUtilExternalBrowser(loginInput, new DefaultAuthExternalBrowserHandlers()); } public SessionUtilExternalBrowser(SFLoginInput loginInput, AuthExternalBrowserHandlers handlers) { this.mapper = ObjectMapperFactory.getObjectMapper(); this.loginInput = loginInput; this.handlers = handlers; this.consentCacheIdToken = true; // true by default this.origin = null; } /** * Gets a free port on localhost * * @return port number * @throws SFException raised if an error occurs. */ protected ServerSocket getServerSocket() throws SFException { try { return new ServerSocket( 0, // free port 0, // default number of connections InetAddress.getByName("localhost")); } catch (IOException ex) { throw new SFException(ex, ErrorCode.NETWORK_ERROR, ex.getMessage()); } } /** * Get a port listening * * @param ssocket server socket * @return port number */ protected int getLocalPort(ServerSocket ssocket) { return ssocket.getLocalPort(); } /** * Gets SSO URL and proof key * * @return SSO URL. * @throws SFException if Snowflake error occurs * @throws SnowflakeSQLException if Snowflake SQL error occurs */ private String getSSOUrl(int port) throws SFException, SnowflakeSQLException { try { String serverUrl = loginInput.getServerUrl(); String authenticator = loginInput.getAuthenticator(); URIBuilder fedUriBuilder = new URIBuilder(serverUrl); fedUriBuilder.setPath(SessionUtil.SF_PATH_AUTHENTICATOR_REQUEST); URI fedUrlUri = fedUriBuilder.build(); HttpPost postRequest = this.handlers.build(fedUrlUri); Map data = new HashMap<>(); data.put(ClientAuthnParameter.AUTHENTICATOR.name(), authenticator); data.put(ClientAuthnParameter.ACCOUNT_NAME.name(), loginInput.getAccountName()); data.put(ClientAuthnParameter.LOGIN_NAME.name(), loginInput.getUserName()); data.put(ClientAuthnParameter.BROWSER_MODE_REDIRECT_PORT.name(), Integer.toString(port)); data.put(ClientAuthnParameter.CLIENT_APP_ID.name(), loginInput.getAppId()); data.put(ClientAuthnParameter.CLIENT_APP_VERSION.name(), loginInput.getAppVersion()); ClientAuthnDTO authnData = new ClientAuthnDTO(data, null); String json = mapper.writeValueAsString(authnData); // attach the login info json body to the post request StringEntity input = new StringEntity(json, StandardCharsets.UTF_8); input.setContentType("application/json"); postRequest.setEntity(input); postRequest.addHeader("accept", "application/json"); String theString = HttpUtil.executeGeneralRequestWithContext( postRequest, loginInput.getLoginTimeout(), loginInput.getAuthTimeout(), loginInput.getSocketTimeoutInMillis(), 0, 0, loginInput.getHttpClientSettingsKey(), null) .getResponseBody(); logger.debug("Authenticator-request response: {}", theString); // general method, same as with data binding JsonNode jsonNode = mapper.readTree(theString); // check the success field first if (!jsonNode.path("success").asBoolean()) { logger.debug("Response: {}", theString); String errorCode = jsonNode.path("code").asText(); throw new SnowflakeSQLException( SqlState.SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, Integer.valueOf(errorCode), jsonNode.path("message").asText()); } JsonNode dataNode = jsonNode.path("data"); // session token is in the data field of the returned json response this.proofKey = dataNode.path("proofKey").asText(); return dataNode.path("ssoUrl").asText(); } catch (IOException | URISyntaxException ex) { throw new SFException(ex, ErrorCode.NETWORK_ERROR, ex.getMessage()); } } private String getConsoleLoginUrl(int port) throws SFException { try { proofKey = generateProofKey(); String serverUrl = loginInput.getServerUrl(); URIBuilder consoleLoginUriBuilder = new URIBuilder(serverUrl); consoleLoginUriBuilder.setPath(SessionUtil.SF_PATH_CONSOLE_LOGIN_REQUEST); consoleLoginUriBuilder.addParameter("login_name", loginInput.getUserName()); consoleLoginUriBuilder.addParameter("browser_mode_redirect_port", Integer.toString(port)); consoleLoginUriBuilder.addParameter("proof_key", proofKey); String consoleLoginUrl = consoleLoginUriBuilder.build().toURL().toString(); logger.debug("Console login url: {}", consoleLoginUrl); return consoleLoginUrl; } catch (Exception ex) { throw new SFException(ex, ErrorCode.INTERNAL_ERROR, ex.getMessage()); } } private String generateProofKey() { SecureRandom secureRandom = new SecureRandom(); byte[] randomness = new byte[32]; secureRandom.nextBytes(randomness); return Base64.getEncoder().encodeToString(randomness); } private int getBrowserResponseTimeout() { return (int) loginInput.getBrowserResponseTimeout().toMillis(); } /** * Authenticate * * @throws SFException if any error occurs * @throws SnowflakeSQLException if any error occurs */ void authenticate() throws SFException, SnowflakeSQLException { ServerSocket ssocket = this.getServerSocket(); try { ssocket.setSoTimeout(getBrowserResponseTimeout()); // main procedure int port = this.getLocalPort(ssocket); logger.debug("Listening localhost: {}", port); if (loginInput.getDisableConsoleLogin()) { // Access GS to get SSO URL String ssoUrl = getSSOUrl(port); this.handlers.output( "Initiating login request with your identity provider. A " + "browser window should have opened for you to complete the " + "login. If you can't see it, check existing browser windows, " + "or your OS settings. Press CTRL+C to abort and try again..."); this.handlers.openBrowser(ssoUrl); } else { // Multiple SAML way to do authentication via console login String consoleLoginUrl = getConsoleLoginUrl(port); this.handlers.output( "Initiating login request with your identity provider(s). A " + "browser window should have opened for you to complete the " + "login. If you can't see it, check existing browser windows, " + "or your OS settings. Press CTRL+C to abort and try again..."); this.handlers.openBrowser(consoleLoginUrl); } while (true) { Socket socket = ssocket.accept(); // start accepting the request try { BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), UTF8_CHARSET)); char[] buf = new char[16384]; int strLen = in.read(buf); String[] rets = new String(buf, 0, strLen).split("\r\n"); if (!processOptions(rets, socket)) { processSamlToken(rets, socket); break; } } finally { socket.close(); } } } catch (SocketTimeoutException e) { throw new SFException( e, ErrorCode.NETWORK_ERROR, "External browser authentication failed within timeout of " + getBrowserResponseTimeout() + " milliseconds"); } catch (IOException ex) { throw new SFException(ex, ErrorCode.NETWORK_ERROR, ex.getMessage()); } finally { try { ssocket.close(); } catch (IOException ex) { throw new SFException(ex, ErrorCode.NETWORK_ERROR, ex.getMessage()); } } } private boolean processOptions(String[] rets, Socket socket) throws IOException { String targetLine = null; String userAgent = null; String requestedHeaderLine = null; for (String line : rets) { if (line.length() > PREFIX_OPTIONS.length() && line.substring(0, PREFIX_OPTIONS.length()).equalsIgnoreCase(PREFIX_OPTIONS)) { targetLine = line; } else if (line.length() > PREFIX_USER_AGENT.length() && line.substring(0, PREFIX_USER_AGENT.length()).equalsIgnoreCase(PREFIX_USER_AGENT)) { userAgent = line; } else if (line.startsWith("Access-Control-Request-Method")) { String[] kv = line.split(":"); if (kv.length != 2) { logger.error("no value for HTTP header: Access-Control-Request-Method. line={}", line); return false; } if (!kv[1].trim().contains("POST")) { return false; } } else if (line.startsWith("Access-Control-Request-Headers")) { String[] kv = line.split(":"); if (kv.length != 2) { logger.error("no value for HTTP header: Access-Control-Request-Method. line={}", line); return false; } requestedHeaderLine = kv[1].trim(); } else if (line.startsWith("Origin")) { String[] kv = line.split(":"); if (kv.length < 2) { logger.error("no value for HTTP header: Origin. line={}", line); return false; } this.origin = line.substring(line.indexOf(':') + 1).trim(); } } if (userAgent != null) { logger.debug("{}", userAgent); } if (isNullOrEmpty(targetLine) || isNullOrEmpty(requestedHeaderLine) || isNullOrEmpty(this.origin)) { return false; } returnToBrowserForOptions(requestedHeaderLine, socket); return true; } private void returnToBrowserForOptions(String requestedHeader, Socket socket) throws IOException { PrintWriter out = new PrintWriter(socket.getOutputStream(), true); SimpleDateFormat fmt = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss"); fmt.setTimeZone(TimeZone.getTimeZone("UTC")); String[] content = { "HTTP/1.1 200 OK", String.format("Date: %s", fmt.format(new Date()) + " GMT"), "Access-Control-Allow-Methods: POST, GET", String.format("Access-Control-Allow-Headers: %s", requestedHeader), "Access-Control-Max-Age: 86400", String.format("Access-Control-Allow-Origin: %s", this.origin), "", "" }; for (int i = 0; i < content.length; ++i) { if (i > 0) { out.print("\r\n"); } out.print(content[i]); } out.flush(); } /** * Receives SAML token from Snowflake via web browser * * @param socket socket * @throws IOException if any IO error occurs * @throws SFException if a HTTP request from browser is invalid */ private void processSamlToken(String[] rets, Socket socket) throws IOException, SFException { String targetLine = null; String userAgent = null; boolean isPost = false; for (String line : rets) { if (line.length() > PREFIX_GET.length() && line.substring(0, PREFIX_GET.length()).equalsIgnoreCase(PREFIX_GET)) { targetLine = line; } else if (line.length() > PREFIX_POST.length() && line.substring(0, PREFIX_POST.length()).equalsIgnoreCase(PREFIX_POST)) { targetLine = rets[rets.length - 1]; isPost = true; } else if (line.length() > PREFIX_USER_AGENT.length() && line.substring(0, PREFIX_USER_AGENT.length()).equalsIgnoreCase(PREFIX_USER_AGENT)) { userAgent = line; } } if (targetLine == null) { throw new SFException( ErrorCode.NETWORK_ERROR, "Invalid HTTP request. No token is given from the browser."); } if (userAgent != null) { logger.debug("{}", userAgent); } try { // attempt to get JSON response extractJsonTokenFromPostRequest(targetLine); } catch (IOException ex) { String parameters = isPost ? extractTokenFromPostRequest(targetLine) : extractTokenFromGetRequest(targetLine); try { URI inputParameter = new URI(parameters); for (NameValuePair urlParam : URLEncodedUtils.parse(inputParameter, UTF8_CHARSET)) { if ("token".equals(urlParam.getName())) { this.token = urlParam.getValue(); break; } } } catch (URISyntaxException ex0) { throw new SFException( ErrorCode.NETWORK_ERROR, String.format( "Invalid HTTP request. No token is given from the browser. %s, err: %s", targetLine, ex0)); } } if (this.token == null) { throw new SFException( ErrorCode.NETWORK_ERROR, String.format( "Invalid HTTP request. No token is given from the browser: %s", targetLine)); } returnToBrowser(socket); } private void extractJsonTokenFromPostRequest(String targetLine) throws IOException { JsonNode jsonNode = mapper.readTree(targetLine); this.token = jsonNode.get("token").asText(); this.consentCacheIdToken = jsonNode.get("consent").asBoolean(); } private String extractTokenFromPostRequest(String targetLine) { return "/?" + targetLine; } private String extractTokenFromGetRequest(String targetLine) throws SFException { String[] elems = targetLine.split("\\s"); if (elems.length != 3 || !elems[0].toLowerCase(Locale.US).equalsIgnoreCase("GET") || !elems[2].startsWith("HTTP/1.")) { throw new SFException( ErrorCode.NETWORK_ERROR, String.format( "Invalid HTTP request. No token is given from the browser: %s", targetLine)); } return elems[1]; } /** * Output the message to the browser * * @param socket client socket * @throws IOException if any IO error occurs */ private void returnToBrowser(Socket socket) throws IOException { PrintWriter out = new PrintWriter(socket.getOutputStream(), true); List content = new ArrayList<>(); content.add("HTTP/1.0 200 OK"); content.add("Content-Type: text/html"); String responseText; if (this.origin != null) { content.add(String.format("Access-Control-Allow-Origin: %s", this.origin)); content.add("Vary: Accept-Encoding, Origin"); Map data = new HashMap<>(); data.put("consent", this.consentCacheIdToken); responseText = mapper.writeValueAsString(data); } else { responseText = "" + "SAML Response for Snowflake" + "Your identity was confirmed and propagated to " + "Snowflake JDBC driver. You can close this window now and go back " + "where you started from."; } content.add(String.format("Content-Length: %s", responseText.length())); content.add(""); content.add(responseText); for (int i = 0; i < content.size(); ++i) { if (i > 0) { out.print("\r\n"); } out.print(content.get(i)); } out.flush(); } /** * Returns encoded SAML token * * @return SAML token */ String getToken() { return this.token; } /** * Returns proofkey provided in the first roundtrip with GS and back to GS in the last * login-request authentication. * * @return proofkey */ String getProofKey() { return this.proofKey; } /** * True if the user consented to cache id token * * @return true or false */ boolean isConsentCacheIdToken() { return this.consentCacheIdToken; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SessionUtilKeyPair.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import com.nimbusds.jose.JOSEException; import com.nimbusds.jose.JWSAlgorithm; import com.nimbusds.jose.JWSHeader; import com.nimbusds.jose.JWSSigner; import com.nimbusds.jose.crypto.RSASSASigner; import com.nimbusds.jwt.JWTClaimsSet; import com.nimbusds.jwt.SignedJWT; import java.io.IOException; import java.io.StringReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.security.InvalidKeyException; import java.security.KeyFactory; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.security.PrivateKey; import java.security.Provider; import java.security.PublicKey; import java.security.Security; import java.security.interfaces.RSAPrivateCrtKey; import java.security.spec.InvalidKeySpecException; import java.security.spec.PKCS8EncodedKeySpec; import java.security.spec.RSAPublicKeySpec; import java.util.Date; import javax.crypto.EncryptedPrivateKeyInfo; import javax.crypto.SecretKeyFactory; import javax.crypto.spec.PBEKeySpec; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.commons.codec.binary.Base64; import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; import org.bouncycastle.openssl.PEMKeyPair; import org.bouncycastle.openssl.PEMParser; import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter; import org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder; import org.bouncycastle.operator.InputDecryptorProvider; import org.bouncycastle.operator.OperatorCreationException; import org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo; import org.bouncycastle.pkcs.PKCSException; import org.bouncycastle.util.io.pem.PemReader; /** Class used to compute jwt token for key pair authentication. */ class SessionUtilKeyPair { private static final SFLogger logger = SFLoggerFactory.getLogger(SessionUtilKeyPair.class); // user name in upper case private final String userName; // account name in upper case private final String accountName; private final PrivateKey privateKey; private PublicKey publicKey = null; private boolean isFipsMode = false; private Provider SecurityProvider = null; private static final String ISSUER_FMT = "%s.%s.%s"; private static final String SUBJECT_FMT = "%s.%s"; private static final int JWT_DEFAULT_AUTH_TIMEOUT = 0; private boolean useBundledBouncyCastleForPrivateKeyDecryption = true; SessionUtilKeyPair( PrivateKey privateKey, String privateKeyFile, String privateKeyBase64, String privateKeyPwd, String accountName, String userName) throws SFException { this.userName = userName.toUpperCase(); this.accountName = excludeRegionInformation(accountName).toUpperCase(); String useBundledBouncyCastleJvm = systemGetProperty(SecurityUtil.USE_BUNDLED_BOUNCY_CASTLE_FOR_PRIVATE_KEY_DECRYPTION_JVM); if (useBundledBouncyCastleJvm != null) { useBundledBouncyCastleForPrivateKeyDecryption = useBundledBouncyCastleJvm.equalsIgnoreCase("true"); } // check if in FIPS mode for (Provider p : Security.getProviders()) { if (SecurityUtil.BOUNCY_CASTLE_FIPS_PROVIDER.equals(p.getName())) { this.isFipsMode = true; this.SecurityProvider = p; break; } } ensurePrivateKeyProvidedInOnlyOneProperty(privateKey, privateKeyFile, privateKeyBase64); this.privateKey = buildPrivateKey(privateKey, privateKeyFile, privateKeyBase64, privateKeyPwd); // construct public key from raw bytes if (this.privateKey instanceof RSAPrivateCrtKey) { RSAPrivateCrtKey rsaPrivateCrtKey = (RSAPrivateCrtKey) this.privateKey; RSAPublicKeySpec rsaPublicKeySpec = new RSAPublicKeySpec(rsaPrivateCrtKey.getModulus(), rsaPrivateCrtKey.getPublicExponent()); try { this.publicKey = getKeyFactoryInstance().generatePublic(rsaPublicKeySpec); } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw new SFException(e, ErrorCode.INTERNAL_ERROR, "Error retrieving public key"); } } else { throw new SFException( ErrorCode.INVALID_OR_UNSUPPORTED_PRIVATE_KEY, "Use java.security.interfaces.RSAPrivateCrtKey.class for the private key"); } } private static void ensurePrivateKeyProvidedInOnlyOneProperty( PrivateKey privateKey, String privateKeyFile, String privateKeyBase64) throws SFException { if (!isNullOrEmpty(privateKeyFile) && privateKey != null) { throw new SFException( ErrorCode.INVALID_OR_UNSUPPORTED_PRIVATE_KEY, "Cannot have both private key object and private key file."); } if (!isNullOrEmpty(privateKeyBase64) && !isNullOrEmpty(privateKeyFile)) { throw new SFException( ErrorCode.INVALID_OR_UNSUPPORTED_PRIVATE_KEY, "Cannot have both private key file and private key base64 string value."); } if (!isNullOrEmpty(privateKeyBase64) && privateKey != null) { throw new SFException( ErrorCode.INVALID_OR_UNSUPPORTED_PRIVATE_KEY, "Cannot have both private key object and private key base64 string value."); } } private PrivateKey buildPrivateKey( PrivateKey privateKey, String privateKeyFile, String privateKeyBase64, String privateKeyPwd) throws SFException { if (!isNullOrEmpty(privateKeyBase64)) { logger.trace("Reading private key from base64 string"); return extractPrivateKeyFromBase64(privateKeyBase64, privateKeyPwd); } if (!isNullOrEmpty(privateKeyFile)) { logger.trace("Reading private key from file"); return extractPrivateKeyFromFile(privateKeyFile, privateKeyPwd); } return privateKey; } private KeyFactory getKeyFactoryInstance() throws NoSuchAlgorithmException { if (isFipsMode) { return KeyFactory.getInstance("RSA", this.SecurityProvider); } else { return KeyFactory.getInstance("RSA"); } } private SecretKeyFactory getSecretKeyFactory(String algorithm) throws NoSuchAlgorithmException { if (isFipsMode) { return SecretKeyFactory.getInstance(algorithm, this.SecurityProvider); } else { return SecretKeyFactory.getInstance(algorithm); } } private PrivateKey extractPrivateKeyFromFile(String privateKeyFile, String privateKeyPwd) throws SFException { try { Path privKeyPath = Paths.get(privateKeyFile); FileUtil.logFileUsage(privKeyPath, "Extract private key from file", true); byte[] bytes = Files.readAllBytes(privKeyPath); return extractPrivateKeyFromBytes(bytes, privateKeyPwd); } catch (IOException ie) { logger.error("Could not read private key from file", ie); throw new SFException(ie, ErrorCode.INVALID_PARAMETER_VALUE, ie.getCause()); } } private PrivateKey extractPrivateKeyFromBytes(byte[] privateKeyBytes, String privateKeyPwd) throws SFException { if (useBundledBouncyCastleForPrivateKeyDecryption) { try { return extractPrivateKeyWithBouncyCastle(privateKeyBytes, privateKeyPwd); } catch (IOException | PKCSException | OperatorCreationException e) { logger.error("Could not extract private key using Bouncy Castle provider", e); throw new SFException(e, ErrorCode.INVALID_OR_UNSUPPORTED_PRIVATE_KEY, e.getCause()); } } else { try { return extractPrivateKeyWithJdk(privateKeyBytes, privateKeyPwd); } catch (NoSuchAlgorithmException | InvalidKeySpecException | IOException | IllegalArgumentException | NullPointerException | InvalidKeyException e) { logger.error( "Could not extract private key using standard JDK. Try setting the JVM argument: " + "-D{}" + "=TRUE", SecurityUtil.USE_BUNDLED_BOUNCY_CASTLE_FOR_PRIVATE_KEY_DECRYPTION_JVM); throw new SFException(e, ErrorCode.INVALID_OR_UNSUPPORTED_PRIVATE_KEY, e.getMessage()); } } } private PrivateKey extractPrivateKeyFromBase64(String privateKeyBase64, String privateKeyPwd) throws SFException { byte[] decodedKey = Base64.decodeBase64(privateKeyBase64); return extractPrivateKeyFromBytes(decodedKey, privateKeyPwd); } public String issueJwtToken() throws SFException { JWTClaimsSet.Builder builder = new JWTClaimsSet.Builder(); String sub = String.format(SUBJECT_FMT, this.accountName, this.userName); String iss = String.format( ISSUER_FMT, this.accountName, this.userName, this.calculatePublicKeyFingerprint(this.publicKey)); // iat is now Date iat = new Date(System.currentTimeMillis()); // expiration is 60 seconds later Date exp = new Date(iat.getTime() + 60L * 1000); JWTClaimsSet claimsSet = builder.issuer(iss).subject(sub).issueTime(iat).expirationTime(exp).build(); SignedJWT signedJWT = new SignedJWT(new JWSHeader(JWSAlgorithm.RS256), claimsSet); JWSSigner signer = new RSASSASigner(this.privateKey); try { signedJWT.sign(signer); } catch (JOSEException e) { throw new SFException(e, ErrorCode.FAILED_TO_GENERATE_JWT); } // Log the contents of the token, displaying expiration and issue time in epoch time logger.debug( "JWT:\n'{'\niss: {}\nsub: {}\niat: {}\nexp: {}\n'}'", iss, sub, String.valueOf(iat.getTime() / 1000), String.valueOf(exp.getTime() / 1000)); return signedJWT.serialize(); } private String calculatePublicKeyFingerprint(PublicKey publicKey) throws SFException { try { MessageDigest md = MessageDigest.getInstance("SHA-256"); byte[] sha256Hash = md.digest(publicKey.getEncoded()); return "SHA256:" + Base64.encodeBase64String(sha256Hash); } catch (NoSuchAlgorithmException e) { throw new SFException(e, ErrorCode.INTERNAL_ERROR, "Error when calculating fingerprint"); } } public static int getTimeout() { String jwtAuthTimeoutStr = systemGetEnv("JWT_AUTH_TIMEOUT"); int jwtAuthTimeout = JWT_DEFAULT_AUTH_TIMEOUT; if (jwtAuthTimeoutStr != null) { jwtAuthTimeout = Integer.parseInt(jwtAuthTimeoutStr); } return jwtAuthTimeout; } private PrivateKey extractPrivateKeyWithBouncyCastle(byte[] privateKeyBytes, String privateKeyPwd) throws IOException, PKCSException, OperatorCreationException { logger.trace("Extracting private key using Bouncy Castle provider"); PrivateKeyInfo privateKeyInfo = null; PEMParser pemParser = new PEMParser(new StringReader(new String(privateKeyBytes, StandardCharsets.UTF_8))); Object pemObject = pemParser.readObject(); if (pemObject instanceof PKCS8EncryptedPrivateKeyInfo) { // Handle the case where the private key is encrypted. PKCS8EncryptedPrivateKeyInfo encryptedPrivateKeyInfo = (PKCS8EncryptedPrivateKeyInfo) pemObject; InputDecryptorProvider pkcs8Prov = new JceOpenSSLPKCS8DecryptorProviderBuilder().build(privateKeyPwd.toCharArray()); privateKeyInfo = encryptedPrivateKeyInfo.decryptPrivateKeyInfo(pkcs8Prov); } else if (pemObject instanceof PEMKeyPair) { // PKCS#1 private key privateKeyInfo = ((PEMKeyPair) pemObject).getPrivateKeyInfo(); } else if (pemObject instanceof PrivateKeyInfo) { // Handle the case where the private key is unencrypted. privateKeyInfo = (PrivateKeyInfo) pemObject; } pemParser.close(); JcaPEMKeyConverter converter = new JcaPEMKeyConverter() .setProvider( isFipsMode ? SecurityUtil.BOUNCY_CASTLE_FIPS_PROVIDER : SecurityUtil.BOUNCY_CASTLE_PROVIDER); return converter.getPrivateKey(privateKeyInfo); } private PrivateKey extractPrivateKeyWithJdk(byte[] privateKeyFileBytes, String privateKeyPwd) throws IOException, NoSuchAlgorithmException, InvalidKeySpecException, InvalidKeyException { logger.trace("Extracting private key using JDK"); String privateKeyContent = new String(privateKeyFileBytes, StandardCharsets.UTF_8); if (isNullOrEmpty(privateKeyPwd)) { // unencrypted private key file return generatePrivateKey(false, privateKeyContent, privateKeyPwd); } else { // encrypted private key file return generatePrivateKey(true, privateKeyContent, privateKeyPwd); } } private PrivateKey generatePrivateKey( boolean isEncrypted, String privateKeyContent, String privateKeyPwd) throws IOException, NoSuchAlgorithmException, InvalidKeySpecException, InvalidKeyException { if (isEncrypted) { try (PemReader pr = new PemReader(new StringReader(privateKeyContent))) { byte[] decoded = pr.readPemObject().getContent(); pr.close(); EncryptedPrivateKeyInfo pkInfo = new EncryptedPrivateKeyInfo(decoded); PBEKeySpec keySpec = new PBEKeySpec(privateKeyPwd.toCharArray()); SecretKeyFactory pbeKeyFactory = this.getSecretKeyFactory(pkInfo.getAlgName()); PKCS8EncodedKeySpec encodedKeySpec = pkInfo.getKeySpec(pbeKeyFactory.generateSecret(keySpec)); KeyFactory keyFactory = getKeyFactoryInstance(); return keyFactory.generatePrivate(encodedKeySpec); } } else { try (PemReader pr = new PemReader(new StringReader(privateKeyContent))) { byte[] decoded = pr.readPemObject().getContent(); pr.close(); PKCS8EncodedKeySpec encodedKeySpec = new PKCS8EncodedKeySpec(decoded); KeyFactory keyFactory = getKeyFactoryInstance(); return keyFactory.generatePrivate(encodedKeySpec); } } } private static String excludeRegionInformation(String accountName) { int dotIndex = accountName.indexOf('.'); return dotIndex > 0 ? accountName.substring(0, dotIndex) : accountName; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SfSqlArray.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.FieldSchemaCreator.buildBindingSchemaForType; import static net.snowflake.client.internal.core.FieldSchemaCreator.logger; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import java.sql.Array; import java.sql.JDBCType; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.util.Arrays; import java.util.Map; import net.snowflake.client.internal.jdbc.BindingParameterMetadata; public class SfSqlArray implements Array { private final String text; private final int baseType; private final Object elements; private String jsonStringFromElements; private final ObjectMapper objectMapper; public SfSqlArray( String text, int baseType, Object elements, SFBaseSession session, ObjectMapper objectMapper) { this.text = text; this.baseType = baseType; this.elements = elements; this.objectMapper = objectMapper; } public SfSqlArray( int baseType, Object elements, SFBaseSession session, ObjectMapper objectMapper) { this(null, baseType, elements, session, objectMapper); } @Override public String getBaseTypeName() throws SQLException { return JDBCType.valueOf(baseType).getName(); } @Override public int getBaseType() throws SQLException { return baseType; } @Override public Object getArray() throws SQLException { return elements; } @Override public Object getArray(Map> map) throws SQLException { throw new SQLFeatureNotSupportedException("getArray(Map> map)"); } @Override public Object getArray(long index, int count) throws SQLException { throw new SQLFeatureNotSupportedException("getArray(long index, int count)"); } @Override public Object getArray(long index, int count, Map> map) throws SQLException { throw new SQLFeatureNotSupportedException( "getArray(long index, int count, Map> map)"); } @Override public ResultSet getResultSet() throws SQLException { throw new SQLFeatureNotSupportedException( "getArray(long index, int count, Map> map)"); } @Override public ResultSet getResultSet(Map> map) throws SQLException { throw new SQLFeatureNotSupportedException("getResultSet(Map> map)"); } @Override public ResultSet getResultSet(long index, int count) throws SQLException { throw new SQLFeatureNotSupportedException("getResultSet(long index, int count)"); } @Override public ResultSet getResultSet(long index, int count, Map> map) throws SQLException { throw new SQLFeatureNotSupportedException( "getResultSet(long index, int count, Map> map)"); } @Override public void free() throws SQLException {} public String getText() { if (text == null) { logger.warn("Text field wasn't initialized. Should never happen."); } return text; } public String getJsonString() throws SQLException { if (jsonStringFromElements == null) { jsonStringFromElements = buildJsonStringFromElements(elements); } return jsonStringFromElements; } private String buildJsonStringFromElements(Object elements) throws SQLException { try { return objectMapper.writeValueAsString(elements); } catch (JsonProcessingException e) { throw new SQLException("There is exception during array to json string.", e); } } public BindingParameterMetadata getSchema() throws SQLException { return BindingParameterMetadata.BindingParameterMetadataBuilder.bindingParameterMetadata() .withType("array") .withFields(Arrays.asList(buildBindingSchemaForType(getBaseType(), false))) .build(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SfTimestampUtil.java ================================================ package net.snowflake.client.internal.core; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.util.TimeZone; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.common.core.SnowflakeDateTimeFormat; public class SfTimestampUtil { static final long MS_IN_DAY = 86400 * 1000; public static Timestamp getTimestampFromType( int columnSubType, String value, SFBaseSession session, TimeZone sessionTimeZone, TimeZone tz) { if (columnSubType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ) { return getTimestampFromFormat( "TIMESTAMP_LTZ_OUTPUT_FORMAT", value, session, sessionTimeZone, tz); } else if (columnSubType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_NTZ || columnSubType == Types.TIMESTAMP) { return getTimestampFromFormat( "TIMESTAMP_NTZ_OUTPUT_FORMAT", value, session, sessionTimeZone, TimeZone.getDefault()); } else if (columnSubType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ) { return getTimestampFromFormat( "TIMESTAMP_TZ_OUTPUT_FORMAT", value, session, sessionTimeZone, tz); } else { return null; } } private static Timestamp getTimestampFromFormat( String format, String value, SFBaseSession session, TimeZone sessionTimeZone, TimeZone tz) { String rawFormat = (String) session.getCommonParameters().get(format); if (rawFormat == null || rawFormat.isEmpty()) { rawFormat = (String) session.getCommonParameters().get("TIMESTAMP_OUTPUT_FORMAT"); } if (tz == null) { tz = sessionTimeZone; } SnowflakeDateTimeFormat formatter = SnowflakeDateTimeFormat.fromSqlFormat(rawFormat); return formatter.parse(value, tz, 0, false).getTimestamp(); } public static long getTimeInNanoseconds(Time x) { long msSinceEpoch = x.getTime(); // Use % + % instead of just % to get the nonnegative remainder. // TODO(mkember): Change to use Math.floorMod when Client is on Java 8. long msSinceMidnight = (msSinceEpoch % MS_IN_DAY + MS_IN_DAY) % MS_IN_DAY; long nanosSinceMidnight = msSinceMidnight * 1000 * 1000; return nanosSinceMidnight; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SnowflakeMutableProxyRoutePlanner.java ================================================ package net.snowflake.client.internal.core; import java.io.Serializable; import org.apache.http.HttpException; import org.apache.http.HttpHost; import org.apache.http.HttpRequest; import org.apache.http.conn.routing.HttpRoute; import org.apache.http.conn.routing.HttpRoutePlanner; import org.apache.http.protocol.HttpContext; /** * This class defines a ProxyRoutePlanner (used for creating HttpClients) that has the ability to * change the nonProxyHosts setting. */ public class SnowflakeMutableProxyRoutePlanner implements HttpRoutePlanner, Serializable { private SdkProxyRoutePlanner proxyRoutePlanner = null; private String host; private int proxyPort; private String nonProxyHosts; private HttpProtocol protocol; /** * @param host host * @param proxyPort proxy port * @param proxyProtocol proxy protocol * @param nonProxyHosts non-proxy hosts */ public SnowflakeMutableProxyRoutePlanner( String host, int proxyPort, HttpProtocol proxyProtocol, String nonProxyHosts) { proxyRoutePlanner = new SdkProxyRoutePlanner(host, proxyPort, proxyProtocol, nonProxyHosts); this.host = host; this.proxyPort = proxyPort; this.nonProxyHosts = nonProxyHosts; this.protocol = proxyProtocol; } /** * Set non-proxy hosts * * @param nonProxyHosts non-proxy hosts */ public void setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = nonProxyHosts; proxyRoutePlanner = new SdkProxyRoutePlanner(host, proxyPort, protocol, nonProxyHosts); } /** * @return non-proxy hosts string */ public String getNonProxyHosts() { return nonProxyHosts; } @Override public HttpRoute determineRoute(HttpHost target, HttpRequest request, HttpContext context) throws HttpException { return proxyRoutePlanner.determineRoute(target, request, context); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SpcsTokenReader.java ================================================ package net.snowflake.client.internal.core; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Reads the SPCS service-identifier token that the SPCS runtime writes into the container. */ public class SpcsTokenReader { private static final SFLogger logger = SFLoggerFactory.getLogger(SpcsTokenReader.class); static final String SPCS_RUNNING_INSIDE_ENV_VAR = "SNOWFLAKE_RUNNING_INSIDE_SPCS"; static final String SPCS_TOKEN_FILE_PATH = "/snowflake/session/spcs_token"; public String readSpcsToken() { if (!isRunningInsideSpcs()) { return null; } try { String token = new String(readTokenFileBytes(), StandardCharsets.UTF_8).trim(); if (token.isEmpty()) { logger.warn("SPCS token file at {} is empty", SPCS_TOKEN_FILE_PATH); return null; } return token; } catch (Exception ex) { logger.warn("Failed to read SPCS token from {}: {}", SPCS_TOKEN_FILE_PATH, ex.getMessage()); return null; } } // Overridable in tests via Mockito.spy(...). Production reads the real env / filesystem. boolean isRunningInsideSpcs() { String env = SnowflakeUtil.systemGetEnv(SPCS_RUNNING_INSIDE_ENV_VAR); return env != null && !env.isEmpty(); } byte[] readTokenFileBytes() throws IOException { return Files.readAllBytes(Paths.get(SPCS_TOKEN_FILE_PATH)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/StmtUtil.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.zip.GZIPOutputStream; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.BasicEvent.QueryState; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SecretDetector; import net.snowflake.common.api.QueryInProgressResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.utils.URIBuilder; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.StringEntity; /** Statement Util */ public class StmtUtil { static final EventHandler eventHandler = EventUtil.getEventHandlerInstance(); static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); static final String SF_PATH_QUERY_V1 = "/queries/v1/query-request"; private static final String SF_PATH_ABORT_REQUEST_V1 = "/queries/v1/abort-request"; private static final String SF_PATH_QUERY_RESULT = "/queries/%s/result"; private static final String SF_QUERY_COMBINE_DESCRIBE_EXECUTE = "combinedDescribe"; static final String SF_MEDIA_TYPE = "application/snowflake"; // we don't want to retry canceling forever so put a limit which is // twice as much as our default socket timeout static final int SF_CANCELING_RETRY_TIMEOUT_IN_MILLIS = 600000; // 10 min private static final SFLogger logger = SFLoggerFactory.getLogger(StmtUtil.class); /** Input for executing a statement on server */ static class StmtInput { String sql; // default to snowflake (a special json format for snowflake query result String mediaType = SF_MEDIA_TYPE; Map bindValues; String bindStage; boolean describeOnly; String serverUrl; String requestId; int sequenceId = -1; boolean internal = false; boolean asyncExec = false; Map parametersMap; String sessionToken; int networkTimeoutInMillis; int socketTimeout; int injectSocketTimeout; // seconds int injectClientPause; // seconds int maxRetries; AtomicBoolean canceling = null; // canceling flag boolean retry; String prevGetResultURL = null; // previous get result URL from ping pong boolean combineDescribe = false; String describedJobId; long querySubmissionTime; // millis since epoch String serviceName; OCSPMode ocspMode; HttpClientSettingsKey httpClientSettingsKey; QueryContextDTO queryContextDTO; Map additionalHttpHeadersForSnowsight; StmtInput() {} public StmtInput setSql(String sql) { this.sql = sql; return this; } public StmtInput setMediaType(String mediaType) { this.mediaType = mediaType; return this; } public StmtInput setParametersMap(Map parametersMap) { this.parametersMap = parametersMap; return this; } public StmtInput setBindValues(Map bindValues) { this.bindValues = bindValues; return this; } public StmtInput setBindStage(String bindStage) { this.bindStage = bindStage; return this; } public StmtInput setDescribeOnly(boolean describeOnly) { this.describeOnly = describeOnly; return this; } public StmtInput setInternal(boolean internal) { this.internal = internal; return this; } public StmtInput setServerUrl(String serverUrl) { this.serverUrl = serverUrl; return this; } public StmtInput setRequestId(String requestId) { this.requestId = requestId; return this; } public StmtInput setSequenceId(int sequenceId) { this.sequenceId = sequenceId; return this; } public StmtInput setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } public StmtInput setNetworkTimeoutInMillis(int networkTimeoutInMillis) { this.networkTimeoutInMillis = networkTimeoutInMillis; return this; } public StmtInput setSocketTimeout(int socketTimeout) { this.socketTimeout = socketTimeout; return this; } public StmtInput setInjectSocketTimeout(int injectSocketTimeout) { this.injectSocketTimeout = injectSocketTimeout; return this; } public StmtInput setInjectClientPause(int injectClientPause) { this.injectClientPause = injectClientPause; return this; } public StmtInput setCanceling(AtomicBoolean canceling) { this.canceling = canceling; return this; } public StmtInput setRetry(boolean retry) { this.retry = retry; return this; } public StmtInput setCombineDescribe(boolean combineDescribe) { this.combineDescribe = combineDescribe; return this; } public StmtInput setDescribedJobId(String describedJobId) { this.describedJobId = describedJobId; return this; } public StmtInput setQuerySubmissionTime(long querySubmissionTime) { this.querySubmissionTime = querySubmissionTime; return this; } public StmtInput setServiceName(String serviceName) { this.serviceName = serviceName; return this; } public StmtInput setOCSPMode(OCSPMode ocspMode) { this.ocspMode = ocspMode; return this; } public StmtInput setHttpClientSettingsKey(HttpClientSettingsKey key) { this.httpClientSettingsKey = key; return this; } public StmtInput setAsync(boolean async) { this.asyncExec = async; return this; } public StmtInput setQueryContextDTO(QueryContextDTO queryContext) { this.queryContextDTO = queryContext; return this; } public StmtInput setMaxRetries(int maxRetries) { this.maxRetries = maxRetries; return this; } /** * Set additional http headers to apply to the outgoing request. The additional headers cannot * be used to replace or overwrite a header in use by the driver. These will be applied to the * outgoing request. Primarily used by Snowsight, as described in {@link * HttpUtil#applyAdditionalHeadersForSnowsight(org.apache.http.client.methods.HttpRequestBase, * Map)} * * @param additionalHttpHeaders The new headers to add * @return The input object, for chaining * @see * HttpUtil#applyAdditionalHeadersForSnowsight(org.apache.http.client.methods.HttpRequestBase, * Map) */ @SuppressWarnings("unchecked") public StmtInput setAdditionalHttpHeadersForSnowsight( Map additionalHttpHeaders) { this.additionalHttpHeadersForSnowsight = additionalHttpHeaders; return this; } } /** Output for running a statement on server */ public static class StmtOutput { JsonNode result; public StmtOutput(JsonNode result) { this.result = result; } public JsonNode getResult() { return result; } } /** * Execute a statement * *

side effect: stmtInput.prevGetResultURL is set if we have started ping pong and receives an * exception from session token expiration so that during retry we don't retry the query * submission, but continue the ping pong process. * * @param stmtInput input statement * @param execTimeData ExecTimeTelemetryData * @return StmtOutput output statement * @throws SFException exception raised from Snowflake components * @throws SnowflakeSQLException exception raised from Snowflake components */ public static StmtOutput execute(StmtInput stmtInput, ExecTimeTelemetryData execTimeData) throws SFException, SnowflakeSQLException { return execute(stmtInput, execTimeData, null); } /** * Execute a statement * *

side effect: stmtInput.prevGetResultURL is set if we have started ping pong and receives an * exception from session token expiration so that during retry we don't retry the query * submission, but continue the ping pong process. * * @param stmtInput input statement * @param execTimeData ExecTimeTelemetryData * @param sfSession the session associated with the request * @return StmtOutput output statement * @throws SFException exception raised from Snowflake components * @throws SnowflakeSQLException exception raised from Snowflake components */ public static StmtOutput execute( StmtInput stmtInput, ExecTimeTelemetryData execTimeData, SFBaseSession sfSession) throws SFException, SnowflakeSQLException { HttpPost httpRequest = null; AssertUtil.assertTrue( stmtInput.serverUrl != null, "Missing server url for statement execution"); AssertUtil.assertTrue(stmtInput.sql != null, "Missing sql for statement execution"); AssertUtil.assertTrue( stmtInput.requestId != null, "Missing request id for statement execution"); AssertUtil.assertTrue( stmtInput.sequenceId >= 0, "Negative sequence id for statement execution"); AssertUtil.assertTrue( stmtInput.mediaType != null, "Missing media type for statement execution"); try { String resultAsString = null; // SNOW-20443: if we are retrying and there is get result URL, we // don't need to execute the query again if (stmtInput.retry && stmtInput.prevGetResultURL != null) { logger.debug( "Retrying statement execution with get result URL: {}", stmtInput.prevGetResultURL); } else { URIBuilder uriBuilder = new URIBuilder(stmtInput.serverUrl); uriBuilder.setPath(SF_PATH_QUERY_V1); uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, stmtInput.requestId); if (stmtInput.combineDescribe) { uriBuilder.addParameter(SF_QUERY_COMBINE_DESCRIBE_EXECUTE, Boolean.TRUE.toString()); } httpRequest = new HttpPost(uriBuilder.build()); // Add custom headers before adding common headers HttpUtil.applyAdditionalHeadersForSnowsight( httpRequest, stmtInput.additionalHttpHeadersForSnowsight); /* * sequence id is only needed for old query API, when old query API * is deprecated, we can remove sequence id. */ QueryExecDTO sqlJsonBody = new QueryExecDTO( stmtInput.sql, stmtInput.describeOnly, stmtInput.sequenceId, stmtInput.bindValues, stmtInput.bindStage, stmtInput.parametersMap, stmtInput.queryContextDTO, stmtInput.querySubmissionTime, stmtInput.describeOnly || stmtInput.internal, stmtInput.asyncExec); if (!stmtInput.describeOnly) { sqlJsonBody.setDescribedJobId(stmtInput.describedJobId); } String queryContextDTO = mapper.writeValueAsString(stmtInput.queryContextDTO); logger.debug("queryContextDTO: {}", queryContextDTO); String json = mapper.writeValueAsString(sqlJsonBody); logger.debug("JSON: {}", json); ByteArrayEntity input; if (!stmtInput.httpClientSettingsKey.getGzipDisabled()) { execTimeData.setGzipStart(); // SNOW-18057: compress the post body in gzip ByteArrayOutputStream baos = new ByteArrayOutputStream(); try (GZIPOutputStream gzos = new GZIPOutputStream(baos)) { byte[] bytes = json.getBytes("UTF-8"); gzos.write(bytes); gzos.finish(); input = new ByteArrayEntity(baos.toByteArray()); httpRequest.addHeader("content-encoding", "gzip"); execTimeData.setGzipEnd(); } } else { input = new ByteArrayEntity(json.getBytes("UTF-8")); } input.setContentType("application/json"); httpRequest.setEntity(input); httpRequest.addHeader("accept", stmtInput.mediaType); httpRequest.setHeader( SFSession.SF_HEADER_AUTHORIZATION, SFSession.SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SFSession.SF_HEADER_TOKEN_TAG + "=\"" + stmtInput.sessionToken + "\""); setServiceNameHeader(stmtInput, httpRequest); eventHandler.triggerStateTransition( BasicEvent.QueryState.SENDING_QUERY, String.format(QueryState.SENDING_QUERY.getArgString(), stmtInput.requestId)); resultAsString = HttpUtil.executeRequest( httpRequest, stmtInput.networkTimeoutInMillis / 1000, 0, stmtInput.socketTimeout, stmtInput.maxRetries, stmtInput.injectSocketTimeout, stmtInput.canceling, true, // include retry parameters false, // no retry on HTTP 403 stmtInput.httpClientSettingsKey, execTimeData, sfSession); } return pollForOutput(resultAsString, stmtInput, httpRequest, execTimeData, sfSession); } catch (Exception ex) { if (!(ex instanceof SnowflakeSQLException)) { if (ex instanceof IOException) { logger.error("IOException encountered", ex); // network error throw new SFException( ex, ErrorCode.NETWORK_ERROR, "Exception encountered when executing statement: " + ex.getLocalizedMessage()); } else { logger.error("Exception encountered", ex); // raise internal exception if this is not a snowflake exception throw new SFException(ex, ErrorCode.INTERNAL_ERROR, ex.getLocalizedMessage()); } } else { throw (SnowflakeSQLException) ex; } } finally { // we can release the http connection now if (httpRequest != null) { httpRequest.releaseConnection(); } } } static void updateQueryContextFromResponse(JsonNode responseJson, SFBaseSession session) { if (session == null) { return; } JsonNode queryContextNode = responseJson.path("data").path("queryContext"); if (SnowflakeUtil.isJsonNodePresent(queryContextNode)) { session.setQueryContext(queryContextNode.toString()); } } private static void setServiceNameHeader(StmtInput stmtInput, HttpRequestBase httpRequest) { if (!isNullOrEmpty(stmtInput.serviceName)) { httpRequest.setHeader(SessionUtil.SF_HEADER_SERVICE_NAME, stmtInput.serviceName); } } private static StmtOutput pollForOutput( String resultAsString, StmtInput stmtInput, HttpPost httpRequest, ExecTimeTelemetryData execTimeData, SFBaseSession session) throws SFException, SnowflakeSQLException { /* * Check response for error or for ping pong response * * For ping-pong: want to make sure our connection is not silently dropped * by middle players (e.g load balancer/VPN timeout) between client and GS */ JsonNode pingPongResponseJson; boolean queryInProgress; boolean firstResponse = !stmtInput.retry; String previousGetResultPath = (stmtInput.retry ? stmtInput.prevGetResultURL : null); int retries = 0; final int MAX_RETRIES = 3; do { pingPongResponseJson = null; if (resultAsString != null) { try { pingPongResponseJson = mapper.readTree(resultAsString); } catch (Exception ex) { logger.error( "Bad result json: {}, " + "JSON parsing exception: {}, http request: {}", resultAsString, ex.getLocalizedMessage(), httpRequest); logger.error("Exception stack trace", ex); } } eventHandler.triggerStateTransition( BasicEvent.QueryState.WAITING_FOR_RESULT, "{requestId: " + stmtInput.requestId + "," + "pingNumber: " + retries + "}"); if (pingPongResponseJson == null) { /* * Retry for bad response for server. * But we don't want to retry too many times */ if (retries >= MAX_RETRIES) { throw new SFException(ErrorCode.BAD_RESPONSE, resultAsString); } else { logger.debug("Will retry get result. Retry count: {}", retries); execTimeData.incrementRetryCount(); execTimeData.addRetryLocation("StmtUtil null response"); retries++; } } else { retries = 0; // reset retry counter after a successful response // Merge QueryContext before error checking so that QCC is updated // even for failed queries (e.g. DPO changes from aborted transactions). updateQueryContextFromResponse(pingPongResponseJson, session); // raise server side error as an exception if any SnowflakeUtil.checkErrorAndThrowException(pingPongResponseJson); } // check the response code to see if it is a progress report response if (pingPongResponseJson != null && !QueryInProgressResponse.QUERY_IN_PROGRESS_CODE.equals( pingPongResponseJson.path("code").asText()) && !QueryInProgressResponse.QUERY_IN_PROGRESS_ASYNC_CODE.equals( pingPongResponseJson.path("code").asText())) { queryInProgress = false; } // for the purposes of this function, return false instead of true else if (stmtInput.asyncExec && QueryInProgressResponse.QUERY_IN_PROGRESS_ASYNC_CODE.equals( pingPongResponseJson.path("code").asText())) { queryInProgress = false; } else { queryInProgress = true; if (firstResponse) { // sleep some time to simulate client pause. The purpose is to // simulate client pause before trying to fetch result so that // we can test query behavior related to disconnected client if (stmtInput.injectClientPause != 0) { logger.debug("Inject client pause for {} seconds", stmtInput.injectClientPause); try { Thread.sleep(stmtInput.injectClientPause * 1000); } catch (InterruptedException ex) { logger.debug("Exception encountered while injecting pause", false); } } } execTimeData.incrementRetryCount(); execTimeData.addRetryLocation("StmtUtil queryInProgress"); resultAsString = getQueryResult(pingPongResponseJson, previousGetResultPath, stmtInput, session); // save the previous get result path in case we run into session // expiration if (pingPongResponseJson != null) { previousGetResultPath = pingPongResponseJson.path("data").path("getResultUrl").asText(); stmtInput.prevGetResultURL = previousGetResultPath; } } // not first response any more if (firstResponse) { firstResponse = false; } } while (queryInProgress); logger.debug("Returning result", false); eventHandler.triggerStateTransition( BasicEvent.QueryState.PROCESSING_RESULT, String.format(QueryState.PROCESSING_RESULT.getArgString(), stmtInput.requestId)); return new StmtOutput(pingPongResponseJson); } /** * Issue get-result call to get query result given an in-progress response. * * @param inProgressResponse In progress response in JSON form * @param previousGetResultPath previous get results path * @param stmtInput input statement * @return results in string form * @throws SFException exception raised from Snowflake components * @throws SnowflakeSQLException exception raised from Snowflake components */ @Deprecated protected static String getQueryResult( JsonNode inProgressResponse, String previousGetResultPath, StmtInput stmtInput) throws SFException, SnowflakeSQLException { return getQueryResult(inProgressResponse, previousGetResultPath, stmtInput, null); } /** * Issue get-result call to get query result given an in-progress response. * * @param inProgressResponse In progress response in JSON form * @param previousGetResultPath previous get results path * @param stmtInput input statement * @param sfSession the session associated with the request * @return results in string form * @throws SFException exception raised from Snowflake components * @throws SnowflakeSQLException exception raised from Snowflake components */ protected static String getQueryResult( JsonNode inProgressResponse, String previousGetResultPath, StmtInput stmtInput, SFBaseSession sfSession) throws SFException, SnowflakeSQLException { String getResultPath = null; // get result url better not be empty if (inProgressResponse == null || inProgressResponse.path("data").path("getResultUrl").isMissingNode()) { if (previousGetResultPath == null) { throw new SFException( ErrorCode.INTERNAL_ERROR, "No query response or missing get result URL"); } else { logger.debug( "No query response or missing get result URL, " + "use previous get result URL: {}", previousGetResultPath); getResultPath = previousGetResultPath; } } else { getResultPath = inProgressResponse.path("data").path("getResultUrl").asText(); } return getQueryResult(getResultPath, stmtInput, sfSession); } /** * Issue get-result call to get query result given an in-progress response. * * @param getResultPath path to results * @param stmtInput object with context information * @return results in string form * @throws SFException exception raised from Snowflake components * @throws SnowflakeSQLException exception raised from Snowflake components */ @Deprecated protected static String getQueryResult(String getResultPath, StmtInput stmtInput) throws SFException, SnowflakeSQLException { return getQueryResult(getResultPath, stmtInput, null); } /** * Issue get-result call to get query result given an in-progress response. * * @param getResultPath path to results * @param stmtInput object with context information * @param sfSession the session associated with the request * @return results in string form * @throws SFException exception raised from Snowflake components * @throws SnowflakeSQLException exception raised from Snowflake components */ protected static String getQueryResult( String getResultPath, StmtInput stmtInput, SFBaseSession sfSession) throws SFException, SnowflakeSQLException { HttpGet httpRequest = null; logger.debug("Get query result: {}", getResultPath); try { URIBuilder uriBuilder = new URIBuilder(stmtInput.serverUrl); uriBuilder.setPath(getResultPath); uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, UUIDUtils.getUUID().toString()); httpRequest = new HttpGet(uriBuilder.build()); // Add custom headers before adding common headers HttpUtil.applyAdditionalHeadersForSnowsight( httpRequest, stmtInput.additionalHttpHeadersForSnowsight); httpRequest.addHeader("accept", stmtInput.mediaType); httpRequest.setHeader( SFSession.SF_HEADER_AUTHORIZATION, SFSession.SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SFSession.SF_HEADER_TOKEN_TAG + "=\"" + stmtInput.sessionToken + "\""); setServiceNameHeader(stmtInput, httpRequest); return HttpUtil.executeRequest( httpRequest, stmtInput.networkTimeoutInMillis / 1000, 0, stmtInput.socketTimeout, stmtInput.maxRetries, 0, stmtInput.canceling, false, // no retry parameter false, // no retry on HTTP 403 stmtInput.httpClientSettingsKey, new ExecTimeTelemetryData(), sfSession); } catch (URISyntaxException | IOException ex) { logger.error("Exception encountered when getting result for " + httpRequest, ex); // raise internal exception if this is not a snowflake exception throw new SFException(ex, ErrorCode.INTERNAL_ERROR, ex.getLocalizedMessage()); } } /** * Issue get-result call to get query result given an in progress response. * * @param queryId id of query to get results for * @param session the current session * @return results in JSON * @throws SFException exception raised from Snowflake components * @throws SnowflakeSQLException exception raised from Snowflake components */ protected static JsonNode getQueryResultJSON(String queryId, SFSession session) throws SFException, SnowflakeSQLException { String getResultPath = String.format(SF_PATH_QUERY_RESULT, queryId); StmtInput stmtInput = new StmtInput() .setServerUrl(session.getServerUrl()) .setSessionToken(session.getSessionToken(internalCallMarker())) .setNetworkTimeoutInMillis(session.getNetworkTimeoutInMilli()) .setSocketTimeout(session.getHttpClientSocketTimeout()) .setMediaType(SF_MEDIA_TYPE) .setServiceName(session.getServiceName()) .setOCSPMode(session.getOCSPMode()) .setHttpClientSettingsKey(session.getHttpClientKey()) .setMaxRetries(session.getMaxHttpRetries()); String resultAsString = getQueryResult(getResultPath, stmtInput, session); StmtOutput stmtOutput = pollForOutput(resultAsString, stmtInput, null, new ExecTimeTelemetryData(), session); return stmtOutput.getResult(); } /** * Cancel a statement identifiable by a request id * * @param stmtInput input statement * @throws SFException if there is an internal exception * @throws SnowflakeSQLException if failed to cancel the statement * @deprecated use {@link #cancel(StmtInput, CancellationReason, SFBaseSession)} instead */ @Deprecated public static void cancel(StmtInput stmtInput) throws SFException, SnowflakeSQLException { cancel(stmtInput, CancellationReason.UNKNOWN, null); } /** * Cancel a statement identifiable by a request id * * @param stmtInput input statement * @param cancellationReason reason for the cancellation * @throws SFException if there is an internal exception * @throws SnowflakeSQLException if failed to cancel the statement */ public static void cancel(StmtInput stmtInput, CancellationReason cancellationReason) throws SFException, SnowflakeSQLException { cancel(stmtInput, cancellationReason, null); } /** * Cancel a statement identifiable by a request id * * @param stmtInput input statement * @param cancellationReason reason for the cancellation * @param sfSession the session associated with the request * @throws SFException if there is an internal exception * @throws SnowflakeSQLException if failed to cancel the statement */ public static void cancel( StmtInput stmtInput, CancellationReason cancellationReason, SFBaseSession sfSession) throws SFException, SnowflakeSQLException { HttpPost httpRequest = null; AssertUtil.assertTrue( stmtInput.serverUrl != null, "Missing server url for statement execution"); AssertUtil.assertTrue(stmtInput.sql != null, "Missing sql for statement execution"); AssertUtil.assertTrue( stmtInput.mediaType != null, "Missing media type for statement execution"); AssertUtil.assertTrue( stmtInput.requestId != null, "Missing request id for statement execution"); AssertUtil.assertTrue( stmtInput.sessionToken != null, "Missing session token for statement execution"); try { URIBuilder uriBuilder = new URIBuilder(stmtInput.serverUrl); logger.warn("Cancelling query {} with reason {}", stmtInput.requestId, cancellationReason); logger.debug("Aborting query: {}", stmtInput.sql); uriBuilder.setPath(SF_PATH_ABORT_REQUEST_V1); uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, UUIDUtils.getUUID().toString()); httpRequest = new HttpPost(uriBuilder.build()); // Add custom headers before adding common headers HttpUtil.applyAdditionalHeadersForSnowsight( httpRequest, stmtInput.additionalHttpHeadersForSnowsight); /* * The JSON input has two fields: sqlText and requestId */ Map sqlJsonBody = new HashMap(); sqlJsonBody.put("sqlText", stmtInput.sql); sqlJsonBody.put("requestId", stmtInput.requestId); String json = mapper.writeValueAsString(sqlJsonBody); logger.debug("JSON for cancel request: {}", json); StringEntity input = new StringEntity(json, StandardCharsets.UTF_8); input.setContentType("application/json"); httpRequest.setEntity(input); httpRequest.addHeader("accept", stmtInput.mediaType); httpRequest.setHeader( SFSession.SF_HEADER_AUTHORIZATION, SFSession.SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SFSession.SF_HEADER_TOKEN_TAG + "=\"" + stmtInput.sessionToken + "\""); setServiceNameHeader(stmtInput, httpRequest); String jsonString = HttpUtil.executeRequest( httpRequest, SF_CANCELING_RETRY_TIMEOUT_IN_MILLIS, 0, stmtInput.socketTimeout, 0, 0, null, false, // no retry parameter false, // no retry on HTTP 403 stmtInput.httpClientSettingsKey, new ExecTimeTelemetryData(), sfSession); // trace the response if requested logger.debug("Json response: {}", jsonString); JsonNode rootNode = null; rootNode = mapper.readTree(jsonString); // raise server side error as an exception if any SnowflakeUtil.checkErrorAndThrowException(rootNode); } catch (URISyntaxException | IOException ex) { logger.error("Exception encountered when canceling " + httpRequest, ex); // raise internal exception if this is not a snowflake exception throw new SFException(ex, ErrorCode.INTERNAL_ERROR, ex.getLocalizedMessage()); } } /** * A simple function to check if the statement is related to manipulate stage. * * @param sql a SQL statement/command * @return PUT/GET/LIST/RM if statement belongs to one of them, otherwise return NULL */ public static SFStatementType checkStageManageCommand(String sql) { if (sql == null) { return null; } String trimmedSql = sql.trim(); // skip commenting prefixed with // while (trimmedSql.startsWith("//")) { if (logger.isDebugEnabled()) { logger.debug("Skipping // comments in: \n{}", trimmedSql); } if (trimmedSql.indexOf('\n') > 0) { trimmedSql = trimmedSql.substring(trimmedSql.indexOf('\n')); trimmedSql = trimmedSql.trim(); } else { break; } if (logger.isDebugEnabled()) { logger.debug("New sql after skipping // comments: \n{}", trimmedSql); } } // skip commenting enclosed with /* */ while (trimmedSql.startsWith("/*")) { if (logger.isDebugEnabled()) { logger.debug("skipping /* */ comments in: \n{}", trimmedSql); } if (trimmedSql.indexOf("*/") > 0) { trimmedSql = trimmedSql.substring(trimmedSql.indexOf("*/") + 2); trimmedSql = trimmedSql.trim(); } else { break; } if (logger.isDebugEnabled()) { logger.debug( "New sql after skipping /* */ comments: \n{}", SecretDetector.maskSecrets(trimmedSql)); } } trimmedSql = trimmedSql.toLowerCase(); if (trimmedSql.startsWith("put ")) { return SFStatementType.PUT; } else if (trimmedSql.startsWith("get ")) { return SFStatementType.GET; } else if (trimmedSql.startsWith("ls ") || trimmedSql.startsWith("list ")) { return SFStatementType.LIST; } else if (trimmedSql.startsWith("rm ") || trimmedSql.startsWith("remove ")) { return SFStatementType.REMOVE; } else { return null; } } /** * Truncate a SQL text for logging * * @param sql original SQL * @return truncated SQL command */ public static String truncateSQL(String sql) { return sql.length() > 20 ? sql.substring(0, 20) + "..." : sql; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/SystemUtil.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class SystemUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(SystemUtil.class); /** * Helper function to convert system properties to integers * * @param systemProperty name of the system property * @param defaultValue default value used * @return the value of the system property, else the default value */ static int convertSystemPropertyToIntValue(String systemProperty, int defaultValue) { String systemPropertyValue = systemGetProperty(systemProperty); int returnVal = defaultValue; if (systemPropertyValue != null) { try { returnVal = Integer.parseInt(systemPropertyValue); } catch (NumberFormatException ex) { logger.warn( "Failed to parse the system parameter {} with value {}", systemProperty, systemPropertyValue); } } return returnVal; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/URLUtil.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.SFSession.SF_QUERY_REQUEST_ID; import java.io.UnsupportedEncodingException; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import javax.annotation.Nullable; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.NameValuePair; import org.apache.http.client.utils.URLEncodedUtils; public class URLUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(URLUtil.class); static final String validURLPattern = "^http(s?)\\:\\/\\/[0-9a-zA-Z]([-.\\w]*[0-9a-zA-Z@:])*(:(0-9)*)*(\\/?)([a-zA-Z0-9\\-\\.\\?\\,\\&\\(\\)\\/\\\\\\+&%\\$#_=@]*)?$"; static final Pattern pattern = Pattern.compile(validURLPattern); public static boolean isValidURL(String url) { try { Matcher matcher = pattern.matcher(url); return matcher.find(); } catch (PatternSyntaxException pex) { logger.debug("The URL REGEX is invalid. Falling back to basic sanity test"); try { new URL(url).toURI(); return true; } catch (MalformedURLException mex) { logger.debug("The URL " + url + ", is invalid"); return false; } catch (URISyntaxException uex) { logger.debug("The URL " + url + ", is invalid"); return false; } } } @Nullable public static String urlEncode(String target) throws UnsupportedEncodingException { String encodedTarget; try { encodedTarget = URLEncoder.encode(target, StandardCharsets.UTF_8.toString()); } catch (UnsupportedEncodingException uex) { logger.debug("The string to be encoded- " + target + ", is invalid"); return null; } return encodedTarget; } public static String getRequestId(URI uri) { return URLEncodedUtils.parse(uri, StandardCharsets.UTF_8).stream() .filter(p -> p.getName().equals(SF_QUERY_REQUEST_ID)) .findFirst() .map(NameValuePair::getValue) .orElse(null); } public static String getRequestIdLogStr(URI uri) { String requestId = getRequestId(uri); return requestId == null ? "" : "[requestId=" + requestId + "] "; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/UUIDUtils.java ================================================ package net.snowflake.client.internal.core; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; public class UUIDUtils { static Boolean newUUIDEnabled = true; public static UUID getUUID() { if (UUIDUtils.newUUIDEnabled) { return UUIDUtils.UUIDImpl(); } else { return UUID.randomUUID(); } } private static UUID UUIDImpl() { final byte[] randomBytes = new byte[16]; ThreadLocalRandom.current().nextBytes(randomBytes); randomBytes[6] &= 0x0f; randomBytes[6] |= 0x40; randomBytes[8] &= 0x3f; randomBytes[8] |= 0x80; long msb = 0; long lsb = 0; for (int i = 0; i < 8; i++) { msb = (msb << 8) | (randomBytes[i] & 0xff); } for (int i = 8; i < 16; i++) { lsb = (lsb << 8) | (randomBytes[i] & 0xff); } return new UUID(msb, lsb); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/AbstractArrowVectorConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.time.Duration; import java.time.Period; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.ValueVector; /** * Abstract class of arrow vector converter. For most types, throw invalid convert error. It depends * child class to override conversion logic * *

Note: two method toObject and toString is abstract method because every converter * implementation needs to implement them */ abstract class AbstractArrowVectorConverter implements ArrowVectorConverter { /** snowflake logical type of the target arrow vector */ protected String logicalTypeStr; /** value vector */ private ValueVector valueVector; protected DataConversionContext context; protected int columnIndex; protected boolean treatNTZasUTC; protected boolean useSessionTimezone; protected TimeZone sessionTimeZone; private boolean shouldTreatDecimalAsInt; /** Field names of the struct vectors used by timestamp */ public static final String FIELD_NAME_EPOCH = "epoch"; // seconds since epoch /** Timezone index */ public static final String FIELD_NAME_TIME_ZONE_INDEX = "timezone"; // time zone index /** Fraction in nanoseconds */ public static final String FIELD_NAME_FRACTION = "fraction"; // fraction in nanoseconds /** * @param logicalTypeStr snowflake logical type of the target arrow vector. * @param valueVector value vector * @param vectorIndex value index * @param context DataConversionContext */ AbstractArrowVectorConverter( String logicalTypeStr, ValueVector valueVector, int vectorIndex, DataConversionContext context) { this.logicalTypeStr = logicalTypeStr; this.valueVector = valueVector; this.columnIndex = vectorIndex + 1; this.context = context; this.shouldTreatDecimalAsInt = context == null || context.getSession() == null || context.getSession().isJdbcArrowTreatDecimalAsInt() || context.getSession().isJdbcTreatDecimalAsInt(); } @Override public boolean toBoolean(int rowIndex) throws SFException { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, ""); } @Override public byte toByte(int rowIndex) throws SFException { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BYTE_STR, ""); } @Override public short toShort(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.SHORT_STR, ""); } @Override public int toInt(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.INT_STR); } @Override public long toLong(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.LONG_STR, ""); } @Override public double toDouble(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.DOUBLE_STR, ""); } @Override public float toFloat(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.FLOAT_STR, ""); } @Override public byte[] toBytes(int index) throws SFException { if (isNull(index)) { return null; } throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "byteArray", ""); } @Override public Date toDate(int index, TimeZone jvmTz, boolean useDateFormat) throws SFException { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.DATE_STR, ""); } @Override public Time toTime(int index) throws SFException { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.TIME_STR, ""); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.TIMESTAMP_STR, ""); } @Override public BigDecimal toBigDecimal(int index) throws SFException { if (isNull(index)) { return null; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BIG_DECIMAL_STR, ""); } @Override public Period toPeriod(int index) throws SFException { if (isNull(index)) { return null; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.PERIOD_STR, ""); } @Override public Duration toDuration(int index) throws SFException { if (isNull(index)) { return null; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.DURATION_STR, ""); } /** * True if should treat decimal as int type. * * @return true or false if decimal should be treated as int type. */ boolean shouldTreatDecimalAsInt() { return shouldTreatDecimalAsInt; } @Override public void setTreatNTZAsUTC(boolean isUTC) { this.treatNTZasUTC = isUTC; } @Override public void setUseSessionTimezone(boolean useSessionTimezone) { this.useSessionTimezone = useSessionTimezone; } @Override public void setSessionTimeZone(TimeZone tz) { this.sessionTimeZone = tz; } @Override public boolean isNull(int index) { return valueVector.isNull(index); } @Override public abstract Object toObject(int index) throws SFException; @Override public abstract String toString(int index) throws SFException; /** * Thrown when a Snowflake timestamp cannot be manipulated in Java due to size limitations. * Snowflake can use up to a full SB16 to represent a timestamp. Java, on the other hand, requires * that the number of millis since epoch fit into a long. For timestamps whose millis since epoch * don't fit into a long, certain operations, such as conversion to java .sql.Timestamp, are not * available. */ public static class TimestampOperationNotAvailableException extends RuntimeException { private BigDecimal secsSinceEpoch; TimestampOperationNotAvailableException(long secsSinceEpoch, int fraction) { super("seconds=" + secsSinceEpoch + " nanos=" + fraction); this.secsSinceEpoch = new BigDecimal(secsSinceEpoch).add(new BigDecimal(fraction).scaleByPowerOfTen(-9)); } public BigDecimal getSecsSinceEpoch() { return secsSinceEpoch; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/ArrayConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.arrow.tostringhelpers.ArrowArrayStringRepresentationBuilder; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.complex.ListVector; /** Array type converter. */ public class ArrayConverter extends AbstractArrowVectorConverter { private final ListVector vector; /** * @param valueVector ListVector * @param vectorIndex vector index * @param context DataConversionContext */ public ArrayConverter(ListVector valueVector, int vectorIndex, DataConversionContext context) { super(SnowflakeType.ARRAY.name(), valueVector, vectorIndex, context); this.vector = valueVector; } @Override public Object toObject(int index) throws SFException { return isNull(index) ? null : vector.getObject(index); } @Override public byte[] toBytes(int index) throws SFException { return isNull(index) ? null : toString(index).getBytes(); } @Override public String toString(int index) throws SFException { FieldVector vectorUnpacked = vector.getChildrenFromFields().get(0); SnowflakeType logicalType = ArrowVectorConverterUtil.getSnowflakeTypeFromFieldMetadata(vectorUnpacked.getField()); ArrowArrayStringRepresentationBuilder builder = new ArrowArrayStringRepresentationBuilder(logicalType); final ArrowVectorConverter converter; try { converter = ArrowVectorConverterUtil.initConverter(vectorUnpacked, context, columnIndex); } catch (SnowflakeSQLException e) { return vector.getObject(index).toString(); } for (int i = vector.getElementStartIndex(index); i < vector.getElementEndIndex(index); i++) { builder.appendValue(converter.toString(i)); } return builder.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/ArrowResultChunkIndexSorter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.util.List; import java.util.stream.IntStream; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.FieldType; /** * Use quick sort to sort Arrow result chunk The sorted order is represented in the indices vector */ public class ArrowResultChunkIndexSorter { private List resultChunk; private List converters; /** Vector indices to sort. */ private IntVector indices; public ArrowResultChunkIndexSorter( List resultChunk, List converters) { this.resultChunk = resultChunk; this.converters = converters; initIndices(); } /** initialize original indices */ private void initIndices() { BufferAllocator rootAllocator = resultChunk.get(0).getAllocator(); FieldType fieldType = new FieldType(true, Types.MinorType.INT.getType(), null, null); indices = new IntVector("indices", fieldType, rootAllocator); IntStream.range(0, resultChunk.get(0).getValueCount()).forEach(i -> indices.setSafe(i, i)); } /** * This method is only used when sf-property sort is on * * @return sorted indices * @throws SFException when exception encountered */ public IntVector sort() throws SFException { quickSort(0, resultChunk.get(0).getValueCount() - 1); return indices; } private void quickSort(int low, int high) throws SFException { if (low < high) { int mid = partition(low, high); quickSort(low, mid - 1); quickSort(mid + 1, high); } } private int partition(int low, int high) throws SFException { int pivotIndex = indices.get(low); while (low < high) { while (low < high && compare(indices.get(high), pivotIndex) >= 0) { high -= 1; } indices.set(low, indices.get(high)); while (low < high && compare(indices.get(low), pivotIndex) <= 0) { low += 1; } indices.set(high, indices.get(low)); } indices.setSafe(low, pivotIndex); return low; } /** * Implement the same compare method as JSON result * * @throws SFException */ private int compare(int index1, int index2) throws SFException { int numCols = converters.size(); for (int colIdx = 0; colIdx < numCols; colIdx++) { if (converters.get(colIdx).isNull(index1) && converters.get(colIdx).isNull(index2)) { continue; } // null is considered bigger than all values if (converters.get(colIdx).isNull(index1)) { return 1; } if (converters.get(colIdx).isNull(index2)) { return -1; } int res = converters .get(colIdx) .toString(index1) .compareTo(converters.get(colIdx).toString(index2)); // continue to next column if no difference if (res == 0) { continue; } return res; } // all columns are the same return 0; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/ArrowResultUtil.java ================================================ package net.snowflake.client.internal.core.arrow; import java.sql.Date; import java.sql.Timestamp; import java.time.LocalDate; import java.util.Calendar; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeTimestampWithTimezone; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.CalendarCache; /** Result utility methods specifically for Arrow format */ public class ArrowResultUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(ArrowResultUtil.class); private static final int[] POWERS_OF_10 = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; public static final int MAX_SCALE_POWERS_OF_10 = 9; public static long powerOfTen(int pow) { long val = 1; while (pow > MAX_SCALE_POWERS_OF_10) { val *= POWERS_OF_10[MAX_SCALE_POWERS_OF_10]; pow -= MAX_SCALE_POWERS_OF_10; } return val * POWERS_OF_10[pow]; } public static String getStringFormat(int scale) { StringBuilder sb = new StringBuilder(); return sb.append("%.").append(scale).append('f').toString(); } /** * new method to get Date from integer * * @param day The day to convert. * @return Date */ public static Date getDate(int day) { LocalDate localDate = LocalDate.ofEpochDay(day); return Date.valueOf(localDate); } /** * Method to get Date from integer using timezone offsets * * @param day The day to convert. * @param oldTz The old timezone. * @param newTz The new timezone. * @return Date * @throws SFException if date value is invalid */ public static Date getDate(int day, TimeZone oldTz, TimeZone newTz) throws SFException { try { // return the date adjusted to the JVM default time zone long milliSecsSinceEpoch = (long) day * ResultUtil.MILLIS_IN_ONE_DAY; long milliSecsSinceEpochNew = milliSecsSinceEpoch + moveToTimeZoneOffset(milliSecsSinceEpoch, oldTz, newTz); Date preDate = new Date(milliSecsSinceEpochNew); // if date is on or before 1582-10-04, apply the difference // by (H-H/4-2) where H is the hundreds digit of the year according to: // http://en.wikipedia.org/wiki/Gregorian_calendar Date newDate = ResultUtil.adjustDate(preDate); logger.debug( "Adjust date from {} to {}", (ArgSupplier) preDate::toString, (ArgSupplier) newDate::toString); return newDate; } catch (NumberFormatException ex) { throw new SFException(ErrorCode.INTERNAL_ERROR, "Invalid date value: " + day); } } /** * simplified moveToTimeZone method * * @param milliSecsSinceEpoch milliseconds since Epoch * @param oldTZ old timezone * @param newTZ new timezone * @return offset offset value */ private static long moveToTimeZoneOffset( long milliSecsSinceEpoch, TimeZone oldTZ, TimeZone newTZ) { if (oldTZ.hasSameRules(newTZ)) { // same time zone return 0; } int offsetMillisInOldTZ = oldTZ.getOffset(milliSecsSinceEpoch); Calendar calendar = CalendarCache.get(oldTZ); calendar.setTimeInMillis(milliSecsSinceEpoch); int millisecondWithinDay = ((calendar.get(Calendar.HOUR_OF_DAY) * 60 + calendar.get(Calendar.MINUTE)) * 60 + calendar.get(Calendar.SECOND)) * 1000 + calendar.get(Calendar.MILLISECOND); int era = calendar.get(Calendar.ERA); int year = calendar.get(Calendar.YEAR); int month = calendar.get(Calendar.MONTH); int dayOfMonth = calendar.get(Calendar.DAY_OF_MONTH); int dayOfWeek = calendar.get(Calendar.DAY_OF_WEEK); int offsetMillisInNewTZ = newTZ.getOffset(era, year, month, dayOfMonth, dayOfWeek, millisecondWithinDay); int offsetMillis = offsetMillisInOldTZ - offsetMillisInNewTZ; return offsetMillis; } /** * move the input timestamp form oldTZ to newTZ * * @param ts Timestamp * @param oldTZ Old timezone * @param newTZ New timezone * @return timestamp in newTZ */ public static Timestamp moveToTimeZone(Timestamp ts, TimeZone oldTZ, TimeZone newTZ) { long offset = moveToTimeZoneOffset(ts.getTime(), oldTZ, newTZ); if (offset == 0) { return ts; } int nanos = ts.getNanos(); ts = new Timestamp(ts.getTime() + offset); ts.setNanos(nanos); return ts; } /** * generate Java Timestamp object * * @param epoch the value since epoch time * @param scale the scale of the value * @return Timestamp */ public static Timestamp toJavaTimestamp(long epoch, int scale) { return toJavaTimestamp(epoch, scale, TimeZone.getDefault(), false); } /** * generate Java Timestamp object * * @param epoch the value since epoch time * @param scale the scale of the value * @param sessionTimezone the session timezone * @param useSessionTimezone should the session timezone be used * @return Timestamp */ public static Timestamp toJavaTimestamp( long epoch, int scale, TimeZone sessionTimezone, boolean useSessionTimezone) { long seconds = epoch / powerOfTen(scale); int fraction = (int) ((epoch % powerOfTen(scale)) * powerOfTen(9 - scale)); if (fraction < 0) { // handle negative case here seconds--; fraction += 1000000000; } return createTimestamp(seconds, fraction, sessionTimezone, useSessionTimezone); } /** * check whether the input seconds out of the scope of Java timestamp * * @param seconds long value to check * @return true if value is out of the scope of Java timestamp. */ public static boolean isTimestampOverflow(long seconds) { return seconds < Long.MIN_VALUE / powerOfTen(3) || seconds > Long.MAX_VALUE / powerOfTen(3); } /** * create Java timestamp using seconds since epoch and fraction in nanoseconds For example, * 1232.234 represents as epoch = 1232 and fraction = 234,000,000 For example, -1232.234 * represents as epoch = -1233 and fraction = 766,000,000 For example, -0.13 represents as epoch = * -1 and fraction = 870,000,000 * * @param seconds seconds value * @param fraction fraction * @param timezone The timezone being used for the toString() formatting * @param useSessionTz boolean useSessionTz * @return java timestamp object */ public static Timestamp createTimestamp( long seconds, int fraction, TimeZone timezone, boolean useSessionTz) { // If JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC=true, set timezone to UTC to get // timestamp object. This will avoid moving the timezone and creating // daylight savings offset errors. if (useSessionTz) { return new SnowflakeTimestampWithTimezone( seconds * ArrowResultUtil.powerOfTen(3), fraction, timezone); } Timestamp ts = new Timestamp(seconds * ArrowResultUtil.powerOfTen(3)); ts.setNanos(fraction); return ts; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/ArrowVectorConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.time.Duration; import java.time.Period; import java.util.TimeZone; import net.snowflake.client.internal.core.SFException; /** Interface to convert from arrow vector values into java data types. */ public interface ArrowVectorConverter { /** * Set to true when time value should be displayed in wallclock time (no timezone offset) * * @param useSessionTimezone boolean value indicating if there is a timezone offset. */ void setUseSessionTimezone(boolean useSessionTimezone); void setSessionTimeZone(TimeZone tz); /** * Determine whether source value in arrow vector is null value or not * * @param index index of value to be checked * @return true if null value otherwise false */ boolean isNull(int index); /** * Convert value in arrow vector to boolean data * * @param index index of the value to be converted in the vector * @return boolean data converted from arrow vector * @throws SFException invalid data conversion */ boolean toBoolean(int index) throws SFException; /** * Convert value in arrow vector to byte data * * @param index index of the value to be converted in the vector * @return byte data converted from arrow vector * @throws SFException invalid data conversion */ byte toByte(int index) throws SFException; /** * Convert value in arrow vector to short data * * @param index index of the value to be converted in the vector * @return short data converted from arrow vector * @throws SFException invalid data conversion */ short toShort(int index) throws SFException; /** * Convert value in arrow vector to int data * * @param index index of the value to be converted in the vector * @return int data converted from arrow vector * @throws SFException invalid data conversion */ int toInt(int index) throws SFException; /** * Convert value in arrow vector to long data * * @param index index of the value to be converted in the vector * @return long data converted from arrow vector * @throws SFException invalid data conversion */ long toLong(int index) throws SFException; /** * Convert value in arrow vector to double data * * @param index index of the value to be converted in the vector * @return double data converted from arrow vector * @throws SFException invalid data conversion */ double toDouble(int index) throws SFException; /** * Convert value in arrow vector to float data * * @param index index of the value to be converted in the vector * @return float data converted from arrow vector * @throws SFException invalid data conversion */ float toFloat(int index) throws SFException; /** * Convert value in arrow vector to byte array * * @param index index of the value to be converted in the vector * @return byte array converted from arrow vector * @throws SFException invalid data conversion */ byte[] toBytes(int index) throws SFException; /** * Convert value in arrow vector to string * * @param index index of the value to be converted in the vector * @return string converted from arrow vector * @throws SFException invalid data conversion */ String toString(int index) throws SFException; /** * Convert value in arrow vector to Date * * @param index index of the value to be converted in the vector * @param jvmTz JVM timezone * @param useDateFormat boolean value to check whether to change timezone or not * @return Date converted from arrow vector * @throws SFException invalid data conversion */ Date toDate(int index, TimeZone jvmTz, boolean useDateFormat) throws SFException; /** * Convert value in arrow vector to Time * * @param index index of the value to be converted in the vector * @return Time converted from arrow vector * @throws SFException invalid data conversion */ Time toTime(int index) throws SFException; /** * Convert value in arrow vector to Timestamp * * @param index index of the value to be converted in the vector * @param tz time zone * @return Timestamp converted from arrow vector * @throws SFException invalid data conversion */ Timestamp toTimestamp(int index, TimeZone tz) throws SFException; /** * Convert value in arrow vector to BigDecimal * * @param index index of the value to be converted in the vector * @return BigDecimal converted from arrow vector * @throws SFException invalid data conversion */ BigDecimal toBigDecimal(int index) throws SFException; /** * Convert value in arrow vector to Period * * @param index index of the value to be converted in the vector * @return Period converted from arrow vector * @throws SFException invalid data conversion */ Period toPeriod(int index) throws SFException; /** * Convert value in arrow vector to Duration * * @param index index of the value to be converted in the vector * @return Duration converted from arrow vector * @throws SFException invalid data conversion */ Duration toDuration(int index) throws SFException; /** * Convert value in arrow vector to Object * * @param index index of the value to be converted in the vector * @return Object converted from arrow vector * @throws SFException invalid data conversion */ Object toObject(int index) throws SFException; /** * Set to true if NTZ timestamp should be set to UTC * * @param isUTC true or false value of whether NTZ timestamp should be set to UTC */ void setTreatNTZAsUTC(boolean isUTC); } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/ArrowVectorConverterUtil.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.math.RoundingMode; import java.time.Duration; import java.util.Map; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.common.core.SqlState; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.complex.FixedSizeListVector; import org.apache.arrow.vector.complex.ListVector; import org.apache.arrow.vector.complex.MapVector; import org.apache.arrow.vector.complex.StructVector; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; public final class ArrowVectorConverterUtil { private ArrowVectorConverterUtil() {} public static SnowflakeType getSnowflakeTypeFromFieldMetadata(Field field) { Map customMeta = field.getMetadata(); if (customMeta != null && customMeta.containsKey("logicalType")) { return SnowflakeType.valueOf(customMeta.get("logicalType")); } return null; } /** * Given an arrow vector (a single column in a single record batch), return an arrow vector * converter. Note, converter is built on top of arrow vector, so that arrow data can be converted * back to java data * *

Arrow converter mappings for Snowflake fixed-point numbers * ----------------------------------------------------------------------------------------- Max * position and scale Converter * ----------------------------------------------------------------------------------------- * number(3,0) {@link TinyIntToFixedConverter} number(3,2) {@link TinyIntToScaledFixedConverter} * number(5,0) {@link SmallIntToFixedConverter} number(5,4) {@link SmallIntToScaledFixedConverter} * number(10,0) {@link IntToFixedConverter} number(10,9) {@link IntToScaledFixedConverter} * number(19,0) {@link BigIntToFixedConverter} number(19,18) {@link BigIntToFixedConverter} * number(38,37) {@link DecimalToScaledFixedConverter} * ------------------------------------------------------------------------------------------ * * @param vector an arrow vector * @param context data conversion context * @param session SFBaseSession for purposes of logging * @param idx the index of the vector in its batch * @return A converter on top og the vector * @throws SnowflakeSQLException if error encountered */ public static ArrowVectorConverter initConverter( ValueVector vector, DataConversionContext context, SFBaseSession session, int idx) throws SnowflakeSQLException { // arrow minor type Types.MinorType type = Types.getMinorTypeForArrowType(vector.getField().getType()); // each column's metadata SnowflakeType st = getSnowflakeTypeFromFieldMetadata(vector.getField()); if (type == Types.MinorType.DECIMAL) { // Note: Decimal vector is different from others return new DecimalToScaledFixedConverter(vector, idx, context); } else if (st != null) { switch (st) { case ANY: case CHAR: case TEXT: case VARIANT: return new VarCharConverter(vector, idx, context); case MAP: if (vector instanceof MapVector) { return new MapConverter((MapVector) vector, idx, context); } else { return new VarCharConverter(vector, idx, context); } case VECTOR: return new VectorTypeConverter((FixedSizeListVector) vector, idx, context); case ARRAY: if (vector instanceof ListVector) { return new ArrayConverter((ListVector) vector, idx, context); } else { return new VarCharConverter(vector, idx, context); } case OBJECT: if (vector instanceof StructVector) { return new StructConverter((StructVector) vector, idx, context); } else { return new VarCharConverter(vector, idx, context); } case BINARY: return new VarBinaryToBinaryConverter(vector, idx, context); case BOOLEAN: return new BitToBooleanConverter(vector, idx, context); case DATE: boolean getFormatDateWithTimeZone = false; if (context.getSession() != null) { getFormatDateWithTimeZone = context.getSession().getFormatDateWithTimezone(); } return new DateConverter(vector, idx, context, getFormatDateWithTimeZone); case FIXED: String scaleStr = vector.getField().getMetadata().get("scale"); int sfScale = Integer.parseInt(scaleStr); switch (type) { case TINYINT: if (sfScale == 0) { return new TinyIntToFixedConverter(vector, idx, context); } else { return new TinyIntToScaledFixedConverter(vector, idx, context, sfScale); } case SMALLINT: if (sfScale == 0) { return new SmallIntToFixedConverter(vector, idx, context); } else { return new SmallIntToScaledFixedConverter(vector, idx, context, sfScale); } case INT: if (sfScale == 0) { return new IntToFixedConverter(vector, idx, context); } else { return new IntToScaledFixedConverter(vector, idx, context, sfScale); } case BIGINT: if (sfScale == 0) { return new BigIntToFixedConverter(vector, idx, context); } else { return new BigIntToScaledFixedConverter(vector, idx, context, sfScale); } } break; case DECFLOAT: return new DecfloatToDecimalConverter(vector, idx, context); case INTERVAL_YEAR_MONTH: return new IntervalYearMonthToPeriodConverter(vector, idx, context); case INTERVAL_DAY_TIME: return new IntervalDayTimeToDurationConverter(vector, idx, context); case REAL: return new DoubleToRealConverter(vector, idx, context); case TIME: switch (type) { case INT: return new IntToTimeConverter(vector, idx, context); case BIGINT: return new BigIntToTimeConverter(vector, idx, context); default: throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected Arrow Field for ", st.name()); } case TIMESTAMP_LTZ: if (vector.getField().getChildren().isEmpty()) { // case when the scale of the timestamp is equal or smaller than millisecs since epoch return new BigIntToTimestampLTZConverter(vector, idx, context); } else if (vector.getField().getChildren().size() == 2) { // case when the scale of the timestamp is larger than millisecs since epoch, e.g., // nanosecs return new TwoFieldStructToTimestampLTZConverter(vector, idx, context); } else { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected Arrow Field for ", st.name()); } case TIMESTAMP_NTZ: if (vector.getField().getChildren().isEmpty()) { // case when the scale of the timestamp is equal or smaller than 7 return new BigIntToTimestampNTZConverter(vector, idx, context); } else if (vector.getField().getChildren().size() == 2) { // when the timestamp is represent in two-field struct return new TwoFieldStructToTimestampNTZConverter(vector, idx, context); } else { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected Arrow Field for ", st.name()); } case TIMESTAMP_TZ: if (vector.getField().getChildren().size() == 2) { // case when the scale of the timestamp is equal or smaller than millisecs since epoch return new TwoFieldStructToTimestampTZConverter(vector, idx, context); } else if (vector.getField().getChildren().size() == 3) { // case when the scale of the timestamp is larger than millisecs since epoch, e.g., // nanosecs return new ThreeFieldStructToTimestampTZConverter(vector, idx, context); } else { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected SnowflakeType ", st.name()); } default: throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected Arrow Field for ", st.name()); } } throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected Arrow Field for ", type.toString()); } public static ArrowVectorConverter initConverter( FieldVector vector, DataConversionContext context, int columnIndex) throws SnowflakeSQLException { return initConverter(vector, context, context.getSession(), columnIndex); } public static Duration getDurationFromNanos(BigDecimal numNanos) { final BigDecimal nanoInSecond = BigDecimal.valueOf(1_000_000_000); int sign = numNanos.signum(); numNanos = numNanos.abs(); // Duration.ofSeconds() with passed in negative second value results in overflow // so instead we identify the sign of numNanos and use Duration.negated() accordingly Duration duration = Duration.ofSeconds( numNanos.divide(nanoInSecond, RoundingMode.FLOOR).longValueExact(), numNanos.remainder(nanoInSecond).longValueExact()); if (sign >= 0) { return duration; } else { return duration.negated(); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/BigIntToFixedConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.nio.ByteBuffer; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.ValueVector; /** Data vector whose snowflake logical type is fixed while represented as a long value vector */ public class BigIntToFixedConverter extends AbstractArrowVectorConverter { /** Underlying vector that this converter will convert from */ protected BigIntVector bigIntVector; /** scale of the fixed value */ protected int sfScale; protected ByteBuffer byteBuf = ByteBuffer.allocate(BigIntVector.TYPE_WIDTH); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public BigIntToFixedConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super( String.format( "%s(%s,%s)", SnowflakeType.FIXED, fieldVector.getField().getMetadata().get("precision"), fieldVector.getField().getMetadata().get("scale")), fieldVector, columnIndex, context); this.bigIntVector = (BigIntVector) fieldVector; } @Override public byte[] toBytes(int index) { if (isNull(index)) { return null; } else { byteBuf.putLong(0, getLong(index)); return byteBuf.array(); } } @Override public boolean toBoolean(int index) throws SFException { long longVal = toLong(index); if (longVal == 0) { return false; } else if (longVal == 1) { return true; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, longVal); } } @Override public byte toByte(int index) throws SFException { long longVal = toLong(index); byte byteVal = (byte) longVal; if (byteVal == longVal) { return byteVal; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BYTE_STR, longVal); } } @Override public short toShort(int index) throws SFException { long longVal = toLong(index); short shortVal = (short) longVal; if (shortVal == longVal) { return shortVal; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.SHORT_STR, longVal); } } @Override public int toInt(int index) throws SFException { long longVal = toLong(index); int intVal = (int) longVal; if (intVal == longVal) { return intVal; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.INT_STR, longVal); } } protected long getLong(int index) { return bigIntVector.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH); } @Override public long toLong(int index) throws SFException { if (bigIntVector.isNull(index)) { return 0; } else { return getLong(index); } } @Override public float toFloat(int index) throws SFException { return toLong(index); } @Override public double toDouble(int index) throws SFException { return toLong(index); } @Override public BigDecimal toBigDecimal(int index) { if (bigIntVector.isNull(index)) { return null; } else { return BigDecimal.valueOf(getLong(index), sfScale); } } @Override public Object toObject(int index) throws SFException { if (bigIntVector.isNull(index)) { return null; } else if (!shouldTreatDecimalAsInt()) { return BigDecimal.valueOf(getLong(index), sfScale); } return getLong(index); } @Override public String toString(int index) { return isNull(index) ? null : Long.toString(getLong(index)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/BigIntToScaledFixedConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.ValueVector; /** * Data vector whose snowflake logical type is fixed while represented as a long value vector with * scale */ public class BigIntToScaledFixedConverter extends BigIntToFixedConverter { public BigIntToScaledFixedConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context, int scale) { super(fieldVector, columnIndex, context); logicalTypeStr = String.format( "%s(%s,%s)", SnowflakeType.FIXED, fieldVector.getField().getMetadata().get("precision"), fieldVector.getField().getMetadata().get("scale")); sfScale = scale; } @Override public float toFloat(int index) throws SFException { return (float) toDouble(index); } @Override public double toDouble(int index) throws SFException { if (isNull(index)) { return 0; } if (sfScale > 9) { return toBigDecimal(index).doubleValue(); } int scale = sfScale; double res = getLong(index); res = res / ArrowResultUtil.powerOfTen(scale); return res; } @Override public short toShort(int index) throws SFException { if (isNull(index)) { return 0; } BigDecimal val = toBigDecimal(index); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Short", val.toPlainString()); } @Override public int toInt(int index) throws SFException { if (isNull(index)) { return 0; } BigDecimal val = toBigDecimal(index); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Int", val.toPlainString()); } @Override public long toLong(int index) throws SFException { if (isNull(index)) { return 0; } BigDecimal val = toBigDecimal(index); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Long", val.toPlainString()); } @Override public Object toObject(int index) { return toBigDecimal(index); } @Override public String toString(int index) { return isNull(index) ? null : BigDecimal.valueOf(getLong(index), sfScale).toPlainString(); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } BigDecimal val = toBigDecimal(index); if (val.compareTo(BigDecimal.ZERO) == 0) { return false; } else if (val.compareTo(BigDecimal.ONE) == 0) { return true; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", val.toPlainString()); } } @Override public byte[] toBytes(int index) { if (isNull(index)) { return null; } else { byteBuf.putLong(0, getLong(index)); return byteBuf.array(); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/BigIntToTimeConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.nio.ByteBuffer; import java.sql.Time; import java.sql.Timestamp; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeTimestampWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.common.core.SFTime; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.ValueVector; /** BigInt to Time type converter. */ public class BigIntToTimeConverter extends AbstractArrowVectorConverter { private BigIntVector bigIntVector; protected ByteBuffer byteBuf = ByteBuffer.allocate(BigIntVector.TYPE_WIDTH); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public BigIntToTimeConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.TIME.name(), fieldVector, columnIndex, context); this.bigIntVector = (BigIntVector) fieldVector; } /** * parse long into SFTime * * @param index * @return */ private SFTime toSFTime(int index) { long val = bigIntVector.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH); return SFTime.fromFractionalSeconds(val, context.getScale(columnIndex)); } @Override public Time toTime(int index) throws SFException { if (isNull(index)) { return null; } else { long val = bigIntVector.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH); return getTime(val, context.getScale(columnIndex), useSessionTimezone); } } /** * Return the long value as a Time object. * * @param value long value to represent as Time * @param scale the scale * @param useSessionTimezone boolean indicating use of session timezone * @return Time object representing the value * @throws SFException invalid data conversion */ public static Time getTime(long value, int scale, boolean useSessionTimezone) throws SFException { SFTime sfTime = SFTime.fromFractionalSeconds(value, scale); Time ts = new Time(sfTime.getFractionalSeconds(ResultUtil.DEFAULT_SCALE_OF_SFTIME_FRACTION_SECONDS)); if (useSessionTimezone) { ts = SnowflakeUtil.getTimeInSessionTimezone( SnowflakeUtil.getSecondsFromMillis(ts.getTime()), sfTime.getNanosecondsWithinSecond()); } return ts; } @Override public String toString(int index) throws SFException { if (context.getTimeFormatter() == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing time formatter"); } return isNull(index) ? null : ResultUtil.getSFTimeAsString( toSFTime(index), context.getScale(columnIndex), context.getTimeFormatter()); } @Override public Object toObject(int index) throws SFException { return isNull(index) ? null : toTime(index); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { if (isNull(index)) { return null; } if (useSessionTimezone) { SFTime sfTime = toSFTime(index); return new SnowflakeTimestampWithTimezone( sfTime.getFractionalSeconds(ResultUtil.DEFAULT_SCALE_OF_SFTIME_FRACTION_SECONDS), sfTime.getNanosecondsWithinSecond(), TimeZone.getTimeZone("UTC")); } return new Timestamp(toTime(index).getTime()); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } Time val = toTime(index); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, val); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/BigIntToTimestampLTZConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.nio.ByteBuffer; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.ValueVector; /** converter from BigInt (Long) to Timestamp_LTZ */ public class BigIntToTimestampLTZConverter extends AbstractArrowVectorConverter { private BigIntVector bigIntVector; private ByteBuffer byteBuf = ByteBuffer.allocate(BigIntVector.TYPE_WIDTH); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public BigIntToTimestampLTZConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.TIMESTAMP_LTZ.name(), fieldVector, columnIndex, context); this.bigIntVector = (BigIntVector) fieldVector; } @Override public String toString(int index) throws SFException { if (context.getTimestampLTZFormatter() == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing timestamp LTZ formatter"); } Timestamp ts = toTimestamp(index, TimeZone.getDefault()); return ts == null ? null : context .getTimestampLTZFormatter() .format(ts, context.getTimeZone(), context.getScale(columnIndex)); } @Override public byte[] toBytes(int index) { if (isNull(index)) { return null; } else { byteBuf.putLong(0, bigIntVector.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH)); return byteBuf.array(); } } @Override public Object toObject(int index) throws SFException { return toTimestamp(index, TimeZone.getDefault()); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { return isNull(index) ? null : getTimestamp(index, tz); } private Timestamp getTimestamp(int index, TimeZone tz) throws SFException { long val = bigIntVector.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH); int scale = context.getScale(columnIndex); return getTimestamp(val, scale, sessionTimeZone, useSessionTimezone); } @Override public Date toDate(int index, TimeZone tz, boolean useDateFormat) throws SFException { return isNull(index) ? null : new Date(getTimestamp(index, TimeZone.getDefault()).getTime()); } @Override public Time toTime(int index) throws SFException { Timestamp ts = toTimestamp(index, TimeZone.getDefault()); return ts == null ? null : new Time(ts.getTime()); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } Timestamp val = toTimestamp(index, TimeZone.getDefault()); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, val); } /** * Use {@link #getTimestamp(long, int, TimeZone, boolean)} * * @param val epoch * @param scale scale * @return Timestamp value without timezone take into account * @throws SFException if exception encountered */ @Deprecated public static Timestamp getTimestamp(long val, int scale) throws SFException { Timestamp ts = ArrowResultUtil.toJavaTimestamp(val, scale); return ResultUtil.adjustTimestamp(ts); } public static Timestamp getTimestamp( long epoch, int scale, TimeZone sessionTimeZone, boolean useSessionTimezone) throws SFException { return ResultUtil.adjustTimestamp( ArrowResultUtil.toJavaTimestamp(epoch, scale, sessionTimeZone, useSessionTimezone)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/BigIntToTimestampNTZConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.nio.ByteBuffer; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeTimeWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.ValueVector; /** converter from BigInt (Long) to Timestamp_NTZ */ public class BigIntToTimestampNTZConverter extends AbstractArrowVectorConverter { private BigIntVector bigIntVector; private static final TimeZone NTZ = TimeZone.getTimeZone("UTC"); private ByteBuffer byteBuf = ByteBuffer.allocate(BigIntVector.TYPE_WIDTH); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public BigIntToTimestampNTZConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.TIMESTAMP_NTZ.name(), fieldVector, columnIndex, context); this.bigIntVector = (BigIntVector) fieldVector; } @Override public String toString(int index) throws SFException { if (context.getTimestampNTZFormatter() == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing timestamp NTZ formatter"); } Timestamp ts = isNull(index) ? null : getTimestamp(index, TimeZone.getDefault(), true); return ts == null ? null : context .getTimestampNTZFormatter() .format(ts, TimeZone.getTimeZone("UTC"), context.getScale(columnIndex)); } @Override public byte[] toBytes(int index) { if (isNull(index)) { return null; } else { byteBuf.putLong(0, bigIntVector.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH)); return byteBuf.array(); } } @Override public Object toObject(int index) throws SFException { return toTimestamp(index, TimeZone.getDefault()); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { return isNull(index) ? null : getTimestamp(index, tz, false); } private Timestamp getTimestamp(int index, TimeZone tz, boolean fromToString) throws SFException { if (tz == null) { tz = TimeZone.getDefault(); } long val = bigIntVector.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH); int scale = context.getScale(columnIndex); return getTimestamp(val, tz, scale, context.getHonorClientTZForTimestampNTZ(), fromToString); } @Override public Date toDate(int index, TimeZone tz, boolean dateFormat) throws SFException { return isNull(index) ? null : new Date(getTimestamp(index, TimeZone.getDefault(), false).getTime()); } @Override public Time toTime(int index) throws SFException { Timestamp ts = toTimestamp(index, TimeZone.getDefault()); return ts == null ? null : new SnowflakeTimeWithTimezone(ts.getTime(), ts.getNanos(), useSessionTimezone); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } Timestamp val = toTimestamp(index, TimeZone.getDefault()); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, val); } public static Timestamp getTimestamp( long val, TimeZone tz, int scale, boolean honorClientTZForTimestampNTZ, boolean fromToString) throws SFException { if (tz == null) { tz = TimeZone.getDefault(); } Timestamp ts = ArrowResultUtil.toJavaTimestamp(val, scale); // Note: honorClientTZForTimestampNTZ is not enabled for toString method if (!fromToString && honorClientTZForTimestampNTZ) { ts = ArrowResultUtil.moveToTimeZone(ts, NTZ, tz); } return ResultUtil.adjustTimestamp(ts); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/BitToBooleanConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.BitVector; import org.apache.arrow.vector.ValueVector; /** Convert Arrow BitVector to Boolean */ public class BitToBooleanConverter extends AbstractArrowVectorConverter { private BitVector bitVector; /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public BitToBooleanConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.BOOLEAN.name(), fieldVector, columnIndex, context); this.bitVector = (BitVector) fieldVector; } private int getBit(int index) { // read a bit from the bitVector // first find the byte value final int byteIndex = index >> 3; final byte b = bitVector.getDataBuffer().getByte(byteIndex); // then get the bit value final int bitIndex = index & 7; return (b >> bitIndex) & 0x01; } @Override public boolean toBoolean(int index) { if (isNull(index)) { return false; } else { return getBit(index) != 0; } } @Override public byte[] toBytes(int index) { if (isNull(index)) { return null; } else if (toBoolean(index)) { return new byte[] {1}; } else { return new byte[] {0}; } } @Override public Object toObject(int index) { return isNull(index) ? null : toBoolean(index); } @Override public String toString(int index) { return isNull(index) ? null : toBoolean(index) ? "TRUE" : "FALSE"; } @Override public short toShort(int rowIndex) throws SFException { return (short) (toBoolean(rowIndex) ? 1 : 0); } @Override public int toInt(int rowIndex) throws SFException { return toBoolean(rowIndex) ? 1 : 0; } @Override public long toLong(int rowIndex) throws SFException { return toBoolean(rowIndex) ? 1 : 0; } @Override public float toFloat(int rowIndex) throws SFException { return toBoolean(rowIndex) ? 1 : 0; } @Override public double toDouble(int rowIndex) throws SFException { return toBoolean(rowIndex) ? 1 : 0; } @Override public BigDecimal toBigDecimal(int rowIndex) throws SFException { return isNull(rowIndex) ? null : toBoolean(rowIndex) ? BigDecimal.ONE : BigDecimal.ZERO; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/DateConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.sql.Date; import java.sql.Timestamp; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.DateDayVector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.ValueVector; /** Convert Arrow DateDayVector to Date */ public class DateConverter extends AbstractArrowVectorConverter { private DateDayVector dateVector; private static TimeZone timeZoneUTC = TimeZone.getTimeZone("UTC"); private boolean useDateFormat; @Deprecated public DateConverter(ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.DATE.name(), fieldVector, columnIndex, context); this.dateVector = (DateDayVector) fieldVector; this.useDateFormat = false; } /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext * @param useDateFormat boolean indicates whether to use session timezone */ public DateConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context, boolean useDateFormat) { super(SnowflakeType.DATE.name(), fieldVector, columnIndex, context); this.dateVector = (DateDayVector) fieldVector; this.useDateFormat = useDateFormat; } private Date getDate(int index, TimeZone jvmTz, boolean useDateFormat) throws SFException { if (isNull(index)) { return null; } else { int val = dateVector.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH); return getDate(val, jvmTz, sessionTimeZone, useDateFormat); } } @Override public Date toDate(int index, TimeZone jvmTz, boolean useDateFormat) throws SFException { return getDate(index, jvmTz, useDateFormat); } @Override public int toInt(int index) { if (isNull(index)) { return 0; } else { int val = dateVector.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH); return val; } } @Override public short toShort(int index) throws SFException { try { return (short) toInt(index); } catch (ClassCastException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.SHORT_STR, toInt(index)); } } @Override public long toLong(int index) { return toInt(index); } @Override public float toFloat(int index) { return toInt(index); } @Override public double toDouble(int index) { return toInt(index); } @Override public BigDecimal toBigDecimal(int index) { if (isNull(index)) { return null; } return BigDecimal.valueOf(toInt(index)); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { boolean useDateFormat = true; if (this.context.getSession() != null) { useDateFormat = getUseDateFormat(true); } Date date = toDate(index, tz, useDateFormat); if (date == null) { return null; } else { return new Timestamp(date.getTime()); } } @Override public String toString(int index) throws SFException { if (context.getDateFormatter() == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing date formatter"); } Date date = getDate(index, timeZoneUTC, getUseDateFormat(false)); return date == null ? null : ResultUtil.getDateAsString(date, context.getDateFormatter()); } @Override public Object toObject(int index) throws SFException { return toDate(index, TimeZone.getDefault(), getUseDateFormat(false)); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } Date val = toDate(index, TimeZone.getDefault(), getUseDateFormat(false)); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, val); } public static Date getDate( int value, TimeZone jvmTz, TimeZone sessionTimeZone, boolean useDateFormat) throws SFException { if (jvmTz == null || sessionTimeZone == null || !useDateFormat) { return ArrowResultUtil.getDate(value); } // Note: use default time zone to match with current getDate() behavior return ArrowResultUtil.getDate(value, jvmTz, sessionTimeZone); } private Boolean getUseDateFormat(Boolean defaultValue) { return this.context.getSession() == null ? defaultValue : (this.context.getSession().getDefaultFormatDateWithTimezone() ? defaultValue : this.useDateFormat); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/DecfloatToDecimalConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.math.BigInteger; import java.util.Map; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.complex.StructVector; class DecfloatToDecimalConverter extends AbstractArrowVectorConverter { private StructVector vector; public DecfloatToDecimalConverter(ValueVector vector, int idx, DataConversionContext context) { super(SnowflakeType.DECFLOAT.name(), vector, idx, context); this.vector = (StructVector) vector; } @Override public BigDecimal toBigDecimal(int index) { if (isNull(index)) { return null; } Map value = (Map) vector.getObject(index); byte[] significandBytes = (byte[]) value.get("significand"); short exponent = (short) value.get("exponent"); BigInteger significand = new BigInteger(significandBytes); return new BigDecimal(significand, -exponent); } @Override public double toDouble(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } return toBigDecimal(rowIndex).doubleValue(); } @Override public float toFloat(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } return toBigDecimal(rowIndex).floatValue(); } @Override public short toShort(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } BigDecimal bigDecimal = toBigDecimal(rowIndex); if (bigDecimal.scale() == 0) { short shortVal = bigDecimal.shortValue(); if (shortVal == bigDecimal.longValue()) { return shortVal; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Short", bigDecimal.toPlainString()); } } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Short", bigDecimal.toPlainString()); } } @Override public int toInt(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } BigDecimal bigDecimal = toBigDecimal(rowIndex); if (bigDecimal.scale() == 0) { int intVal = bigDecimal.intValue(); if (intVal == bigDecimal.longValue()) { return intVal; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Integer", bigDecimal.toPlainString()); } } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Integer", bigDecimal.toPlainString()); } } @Override public long toLong(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } BigDecimal bigDecimal = toBigDecimal(rowIndex); if (bigDecimal.scale() == 0) { BigInteger intVal = bigDecimal.toBigIntegerExact(); if (intVal.bitLength() <= 63) { return intVal.longValue(); } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Long", bigDecimal.toPlainString()); } } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Long", bigDecimal.toPlainString()); } } @Override public Object toObject(int index) throws SFException { return toBigDecimal(index); } @Override public String toString(int index) throws SFException { if (isNull(index)) { return null; } return toBigDecimal(index).toEngineeringString(); } @Override public byte[] toBytes(int index) throws SFException { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BYTES_STR, null); } @Override public boolean toBoolean(int rowIndex) throws SFException { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, null); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/DecimalToScaledFixedConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.time.Duration; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.DecimalVector; import org.apache.arrow.vector.ValueVector; /** * Data vector whose snowflake logical type is fixed while represented as a BigDecimal value vector */ public class DecimalToScaledFixedConverter extends AbstractArrowVectorConverter { protected DecimalVector decimalVector; /** * @param fieldVector ValueVector * @param vectorIndex vector index * @param context DataConversionContext */ public DecimalToScaledFixedConverter( ValueVector fieldVector, int vectorIndex, DataConversionContext context) { super( String.format( "%s(%s,%s)", SnowflakeType.FIXED, fieldVector.getField().getMetadata().get("precision"), fieldVector.getField().getMetadata().get("scale")), fieldVector, vectorIndex, context); decimalVector = (DecimalVector) fieldVector; } @Override public byte[] toBytes(int index) { if (isNull(index)) { return null; } else { return toBigDecimal(index).toBigInteger().toByteArray(); } } @Override public byte toByte(int index) throws SFException { if (isNull(index)) { return 0; } BigDecimal bigDecimal = toBigDecimal(index); if (bigDecimal.scale() == 0) { byte byteVal = bigDecimal.byteValue(); if (byteVal == bigDecimal.longValue()) { return byteVal; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Byte", bigDecimal.toPlainString()); } } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Byte", bigDecimal.toPlainString()); } } @Override public short toShort(int index) throws SFException { if (isNull(index)) { return 0; } BigDecimal bigDecimal = toBigDecimal(index); if (bigDecimal.scale() == 0) { short shortValue = bigDecimal.shortValue(); if (bigDecimal.compareTo(BigDecimal.valueOf((long) shortValue)) == 0) { return shortValue; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Short", bigDecimal.toPlainString()); } } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Short", bigDecimal.toPlainString()); } } @Override public int toInt(int index) throws SFException { if (isNull(index)) { return 0; } BigDecimal bigDecimal = toBigDecimal(index); if (bigDecimal.scale() == 0) { int intValue = bigDecimal.intValue(); if (bigDecimal.compareTo(BigDecimal.valueOf((long) intValue)) == 0) { return intValue; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Int", bigDecimal.toPlainString()); } } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Int", bigDecimal.toPlainString()); } } @Override public long toLong(int index) throws SFException { if (isNull(index)) { return 0; } BigDecimal bigDecimal = toBigDecimal(index); if (bigDecimal.scale() == 0) { long longValue = bigDecimal.longValue(); if (bigDecimal.compareTo(BigDecimal.valueOf(longValue)) == 0) { return longValue; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Long", bigDecimal.toPlainString()); } } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Long", bigDecimal.toPlainString()); } } @Override public float toFloat(int index) throws SFException { if (isNull(index)) { return 0; } BigDecimal bigDecimal = toBigDecimal(index); return bigDecimal.floatValue(); } @Override public double toDouble(int index) throws SFException { if (isNull(index)) { return 0; } BigDecimal bigDecimal = toBigDecimal(index); return bigDecimal.doubleValue(); } @Override public BigDecimal toBigDecimal(int index) { return decimalVector.getObject(index); } @Override public Object toObject(int index) throws SFException { return toBigDecimal(index); } @Override public String toString(int index) { BigDecimal bigDecimal = toBigDecimal(index); return bigDecimal == null ? null : bigDecimal.toPlainString(); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } BigDecimal val = toBigDecimal(index); if (val.compareTo(BigDecimal.ZERO) == 0) { return false; } else if (val.compareTo(BigDecimal.ONE) == 0) { return true; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", val.toPlainString()); } } @Override public Duration toDuration(int index) throws SFException { if (isNull(index)) { return null; } BigDecimal numNanos = toBigDecimal(index); try { return ArrowVectorConverterUtil.getDurationFromNanos(numNanos); } catch (ArithmeticException e) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Duration", numNanos.toPlainString()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/DoubleToRealConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.nio.ByteBuffer; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.Float8Vector; import org.apache.arrow.vector.ValueVector; /** Convert from Arrow Float8Vector to Real. */ public class DoubleToRealConverter extends AbstractArrowVectorConverter { private Float8Vector float8Vector; private ByteBuffer byteBuf = ByteBuffer.allocate(Float8Vector.TYPE_WIDTH); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public DoubleToRealConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.REAL.name(), fieldVector, columnIndex, context); this.float8Vector = (Float8Vector) fieldVector; } @Override public double toDouble(int index) { if (float8Vector.isNull(index)) { return 0; } else { return float8Vector.getDataBuffer().getDouble(index * Float8Vector.TYPE_WIDTH); } } @Override public byte[] toBytes(int index) { if (isNull(index)) { return null; } else { byteBuf.putDouble(0, toDouble(index)); return byteBuf.array(); } } @Override public float toFloat(int index) { return (float) toDouble(index); } @Override public Object toObject(int index) { return isNull(index) ? null : toDouble(index); } @Override public String toString(int index) { return isNull(index) ? null : String.valueOf(toDouble(index)); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } double val = toDouble(index); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, val); } @Override public short toShort(int rowIndex) throws SFException { try { if (isNull(rowIndex)) { return 0; } else { return (short) toDouble(rowIndex); } } catch (ClassCastException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.SHORT_STR, toObject(rowIndex)); } } @Override public int toInt(int rowIndex) throws SFException { try { if (isNull(rowIndex)) { return 0; } else { return (int) toDouble(rowIndex); } } catch (ClassCastException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.INT_STR, toObject(rowIndex)); } } @Override public long toLong(int rowIndex) throws SFException { try { if (isNull(rowIndex)) { return 0; } else { return (long) toDouble(rowIndex); } } catch (ClassCastException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.LONG_STR, toObject(rowIndex)); } } @Override public BigDecimal toBigDecimal(int rowIndex) throws SFException { if (isNull(rowIndex)) { return null; } return BigDecimal.valueOf(toDouble(rowIndex)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/IntToFixedConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.nio.ByteBuffer; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.ValueVector; /** Data vector whose snowflake logical type is fixed while represented as a int value vector */ public class IntToFixedConverter extends AbstractArrowVectorConverter { protected IntVector intVector; protected int sfScale; protected ByteBuffer byteBuf = ByteBuffer.allocate(IntVector.TYPE_WIDTH); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public IntToFixedConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super( String.format( "%s(%s,%s)", SnowflakeType.FIXED, fieldVector.getField().getMetadata().get("precision"), fieldVector.getField().getMetadata().get("scale")), fieldVector, columnIndex, context); this.intVector = (IntVector) fieldVector; } @Override public byte[] toBytes(int index) throws SFException { if (isNull(index)) { return null; } else { byteBuf.putInt(0, getInt(index)); return byteBuf.array(); } } @Override public byte toByte(int index) throws SFException { int intVal = toInt(index); byte byteVal = (byte) intVal; if (byteVal == intVal) { return byteVal; } else { throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "byte", intVal); } } @Override public short toShort(int index) throws SFException { int intVal = toInt(index); short shortVal = (short) intVal; if (shortVal == intVal) { return shortVal; } else { throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "short", intVal); } } protected int getInt(int index) throws SFException { return intVector.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH); } @Override public int toInt(int index) throws SFException { if (intVector.isNull(index)) { return 0; } else { return getInt(index); } } @Override public long toLong(int index) throws SFException { return (long) toInt(index); } @Override public float toFloat(int index) throws SFException { return toInt(index); } @Override public double toDouble(int index) throws SFException { return toInt(index); } @Override public BigDecimal toBigDecimal(int index) throws SFException { if (intVector.isNull(index)) { return null; } else { return BigDecimal.valueOf((long) getInt(index), sfScale); } } @Override public Object toObject(int index) throws SFException { if (isNull(index)) { return null; } else if (!shouldTreatDecimalAsInt()) { return BigDecimal.valueOf((long) getInt(index), sfScale); } return (long) getInt(index); } @Override public String toString(int index) throws SFException { return isNull(index) ? null : Integer.toString(getInt(index)); } @Override public boolean toBoolean(int index) throws SFException { int val = toInt(index); if (val == 0) { return false; } else if (val == 1) { return true; } else { throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", val); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/IntToScaledFixedConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.ValueVector; /** * Data vector whose snowflake logical type is fixed while represented as a int value vector with * scale */ public class IntToScaledFixedConverter extends IntToFixedConverter { private String format; public IntToScaledFixedConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context, int sfScale) { super(fieldVector, columnIndex, context); logicalTypeStr = String.format( "%s(%s,%s)", SnowflakeType.FIXED, fieldVector.getField().getMetadata().get("precision"), fieldVector.getField().getMetadata().get("scale")); format = ArrowResultUtil.getStringFormat(sfScale); this.sfScale = sfScale; } @Override public float toFloat(int index) throws SFException { if (isNull(index)) { return 0; } return ((float) getInt(index)) / ArrowResultUtil.powerOfTen(sfScale); } @Override public double toDouble(int index) throws SFException { if (isNull(index)) { return 0; } return ((double) getInt(index)) / ArrowResultUtil.powerOfTen(sfScale); } @Override public short toShort(int index) throws SFException { if (isNull(index)) { return 0; } float val = toFloat(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "short", val); } @Override public int toInt(int index) throws SFException { if (isNull(index)) { return 0; } float val = toFloat(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "int", val); } @Override public long toLong(int index) throws SFException { if (isNull(index)) { return 0; } float val = toFloat(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "long", val); } @Override public Object toObject(int index) throws SFException { return toBigDecimal(index); } @Override public String toString(int index) throws SFException { return isNull(index) ? null : String.format(format, (double) getInt(index) / ArrowResultUtil.powerOfTen(sfScale)); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } BigDecimal val = toBigDecimal(index); if (val.compareTo(BigDecimal.ZERO) == 0) { return false; } else if (val.compareTo(BigDecimal.ONE) == 0) { return true; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", val.toPlainString()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/IntToTimeConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.nio.ByteBuffer; import java.sql.Time; import java.sql.Timestamp; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeTimestampWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.common.core.SFTime; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.ValueVector; /** Convert from Arrow IntVector to Time. */ public class IntToTimeConverter extends AbstractArrowVectorConverter { private IntVector intVector; private ByteBuffer byteBuf = ByteBuffer.allocate(IntVector.TYPE_WIDTH); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public IntToTimeConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.TIME.name(), fieldVector, columnIndex, context); this.intVector = (IntVector) fieldVector; } /** * parse long into SFTime * * @param index * @return */ private SFTime toSFTime(int index) { long val = intVector.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH); return SFTime.fromFractionalSeconds(val, context.getScale(columnIndex)); } @Override public byte[] toBytes(int index) throws SFException { if (isNull(index)) { return null; } else { byteBuf.putInt(0, intVector.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH)); return byteBuf.array(); } } @Override public Time toTime(int index) throws SFException { if (isNull(index)) { return null; } else { SFTime sfTime = toSFTime(index); if (sfTime == null) { return null; } Time ts = new Time( sfTime.getFractionalSeconds(ResultUtil.DEFAULT_SCALE_OF_SFTIME_FRACTION_SECONDS)); if (useSessionTimezone) { ts = SnowflakeUtil.getTimeInSessionTimezone( SnowflakeUtil.getSecondsFromMillis(ts.getTime()), sfTime.getNanosecondsWithinSecond()); } return ts; } } @Override public String toString(int index) throws SFException { if (context.getTimeFormatter() == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing time formatter"); } return isNull(index) ? null : ResultUtil.getSFTimeAsString( toSFTime(index), context.getScale(columnIndex), context.getTimeFormatter()); } @Override public Object toObject(int index) throws SFException { return isNull(index) ? null : toTime(index); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { if (isNull(index)) { return null; } if (useSessionTimezone) { SFTime sfTime = toSFTime(index); return new SnowflakeTimestampWithTimezone( sfTime.getFractionalSeconds(ResultUtil.DEFAULT_SCALE_OF_SFTIME_FRACTION_SECONDS), sfTime.getNanosecondsWithinSecond(), TimeZone.getTimeZone("UTC")); } return new Timestamp(toTime(index).getTime()); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } Time val = toTime(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", val); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/IntervalDayTimeToDurationConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.time.Duration; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.ValueVector; class IntervalDayTimeToDurationConverter extends AbstractArrowVectorConverter { private BigIntVector vector; private static final long nanoInSecond = 1_000_000_000; public IntervalDayTimeToDurationConverter( ValueVector vector, int idx, DataConversionContext context) { super(SnowflakeType.INTERVAL_DAY_TIME.name(), vector, idx, context); this.vector = (BigIntVector) vector; } @Override public Duration toDuration(int index) throws SFException { if (isNull(index)) { return null; } long numNanos = vector.getObject(index); try { int sign = Long.signum(numNanos); numNanos = Math.abs(numNanos); // Duration.ofSeconds() with passed in negative second value results in overflow // so instead we identify the sign of numNanos and use Duration.negated() accordingly Duration duration = Duration.ofSeconds(numNanos / nanoInSecond, numNanos % nanoInSecond); if (sign >= 0) { return duration; } else { return duration.negated(); } } catch (ArithmeticException e) { throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Duration", numNanos); } } @Override public String toString(int index) throws SFException { if (isNull(index)) { return null; } return toDuration(index).toString(); } @Override public Object toObject(int index) throws SFException { return toDuration(index); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/IntervalYearMonthToPeriodConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.time.Period; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.SmallIntVector; import org.apache.arrow.vector.ValueVector; class IntervalYearMonthToPeriodConverter extends AbstractArrowVectorConverter { private SmallIntVector smallIntVector; private IntVector intVector; private BigIntVector bigIntVector; private static final int monthsInYear = 12; public IntervalYearMonthToPeriodConverter( ValueVector vector, int idx, DataConversionContext context) { super(SnowflakeType.INTERVAL_YEAR_MONTH.name(), vector, idx, context); if (vector instanceof SmallIntVector) { // Underlying Interval Year-Month type is SB2 this.smallIntVector = (SmallIntVector) vector; } else if (vector instanceof IntVector) { // Underlying Interval Year-Month type is SB4 this.intVector = (IntVector) vector; } else if (vector instanceof BigIntVector) { // Underlying Interval Year-Month type is SB8 this.bigIntVector = (BigIntVector) vector; } } @Override public Period toPeriod(int index) { if (isNull(index)) { return null; } if (smallIntVector != null) { short value = smallIntVector.get(index); return Period.of(value / monthsInYear, value % monthsInYear, 0); } else if (intVector != null) { int value = intVector.get(index); return Period.of(value / monthsInYear, value % monthsInYear, 0); } else { long value = bigIntVector.get(index); return Period.of((int) (value / monthsInYear), (int) (value % monthsInYear), 0); } } @Override public String toString(int index) throws SFException { if (isNull(index)) { return null; } return toPeriod(index).toString(); } @Override public Object toObject(int index) throws SFException { return toPeriod(index); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/MapConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.stream.Collectors; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.arrow.tostringhelpers.ArrowObjectStringRepresentationBuilder; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.complex.MapVector; import org.apache.arrow.vector.util.JsonStringHashMap; /** Arrow MapVector converter. */ public class MapConverter extends AbstractArrowVectorConverter { private final MapVector vector; /** * @param valueVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public MapConverter(MapVector valueVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.MAP.name(), valueVector, columnIndex, context); this.vector = valueVector; } @Override public Object toObject(int index) throws SFException { if (isNull(index)) { return null; } List> entriesList = (List>) vector.getObject(index); return entriesList.stream() .collect( Collectors.toMap(entry -> entry.get("key").toString(), entry -> entry.get("value"))); } @Override public byte[] toBytes(int index) throws SFException { return isNull(index) ? null : toString(index).getBytes(StandardCharsets.UTF_8); } @Override public String toString(int index) throws SFException { ArrowObjectStringRepresentationBuilder builder = new ArrowObjectStringRepresentationBuilder(); FieldVector vectorUnpacked = vector.getChildrenFromFields().get(0); FieldVector keys = vectorUnpacked.getChildrenFromFields().get(0); FieldVector values = vectorUnpacked.getChildrenFromFields().get(1); final ArrowVectorConverter keyConverter; final ArrowVectorConverter valueConverter; SnowflakeType valueLogicalType = ArrowVectorConverterUtil.getSnowflakeTypeFromFieldMetadata(values.getField()); try { keyConverter = ArrowVectorConverterUtil.initConverter(keys, context, columnIndex); valueConverter = ArrowVectorConverterUtil.initConverter(values, context, columnIndex); } catch (SnowflakeSQLException e) { return vector.getObject(index).toString(); } for (int i = vector.getElementStartIndex(index); i < vector.getElementEndIndex(index); i++) { builder.appendKeyValue( keyConverter.toString(i), valueConverter.toString(i), valueLogicalType); } return builder.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/SmallIntToFixedConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.nio.ByteBuffer; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.SmallIntVector; import org.apache.arrow.vector.ValueVector; /** Data vector whose snowflake logical type is fixed while represented as a short value vector */ public class SmallIntToFixedConverter extends AbstractArrowVectorConverter { protected int sfScale; protected SmallIntVector smallIntVector; ByteBuffer byteBuf = ByteBuffer.allocate(SmallIntVector.TYPE_WIDTH); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public SmallIntToFixedConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super( String.format( "%s(%s,%s)", SnowflakeType.FIXED, fieldVector.getField().getMetadata().get("precision"), fieldVector.getField().getMetadata().get("scale")), fieldVector, columnIndex, context); this.smallIntVector = (SmallIntVector) fieldVector; } protected short getShort(int index) throws SFException { return smallIntVector.getDataBuffer().getShort(index * SmallIntVector.TYPE_WIDTH); } @Override public byte[] toBytes(int index) throws SFException { if (isNull(index)) { return null; } else { byteBuf.putShort(0, getShort(index)); return byteBuf.array(); } } @Override public byte toByte(int index) throws SFException { short shortVal = toShort(index); byte byteVal = (byte) shortVal; if (byteVal == shortVal) { return byteVal; } throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "byte", shortVal); } @Override public short toShort(int index) throws SFException { if (smallIntVector.isNull(index)) { return 0; } else { return getShort(index); } } @Override public int toInt(int index) throws SFException { return (int) toShort(index); } @Override public long toLong(int index) throws SFException { return (long) toShort(index); } @Override public BigDecimal toBigDecimal(int index) throws SFException { if (smallIntVector.isNull(index)) { return null; } else { return BigDecimal.valueOf((long) getShort(index), sfScale); } } @Override public float toFloat(int index) throws SFException { return toShort(index); } @Override public double toDouble(int index) throws SFException { return toFloat(index); } @Override public Object toObject(int index) throws SFException { if (isNull(index)) { return null; } else if (!shouldTreatDecimalAsInt()) { return BigDecimal.valueOf((long) getShort(index), sfScale); } return (long) getShort(index); } @Override public String toString(int index) throws SFException { return isNull(index) ? null : Short.toString(getShort(index)); } @Override public boolean toBoolean(int index) throws SFException { short val = toShort(index); if (val == 0) { return false; } else if (val == 1) { return true; } else { throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", val); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/SmallIntToScaledFixedConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.ValueVector; /** * Data vector whose snowflake logical type is fixed while represented as a scaled short value * vector */ public class SmallIntToScaledFixedConverter extends SmallIntToFixedConverter { private String format; public SmallIntToScaledFixedConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context, int sfScale) { super(fieldVector, columnIndex, context); logicalTypeStr = String.format( "%s(%s,%s)", SnowflakeType.FIXED, fieldVector.getField().getMetadata().get("precision"), fieldVector.getField().getMetadata().get("scale")); format = ArrowResultUtil.getStringFormat(sfScale); this.sfScale = sfScale; } @Override public float toFloat(int index) throws SFException { if (isNull(index)) { return 0; } return ((float) getShort(index)) / ArrowResultUtil.powerOfTen(sfScale); } @Override public short toShort(int index) throws SFException { if (isNull(index)) { return 0; } float val = toFloat(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Short", val); } @Override public int toInt(int index) throws SFException { if (isNull(index)) { return 0; } float val = toFloat(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Int", val); } @Override public long toLong(int index) throws SFException { if (isNull(index)) { return 0; } float val = toFloat(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Long", val); } @Override public Object toObject(int index) throws SFException { return toBigDecimal(index); } @Override public String toString(int index) throws SFException { return isNull(index) ? null : String.format(format, (float) getShort(index) / ArrowResultUtil.powerOfTen(sfScale)); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } BigDecimal val = toBigDecimal(index); if (val.compareTo(BigDecimal.ZERO) == 0) { return false; } else if (val.compareTo(BigDecimal.ONE) == 0) { return true; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", val.toPlainString()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/StructConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.arrow.tostringhelpers.ArrowObjectStringRepresentationBuilder; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.complex.StructVector; public class StructConverter extends AbstractArrowVectorConverter { private final StructVector structVector; public StructConverter(StructVector vector, int columnIndex, DataConversionContext context) { super(SnowflakeType.OBJECT.name(), vector, columnIndex, context); structVector = vector; } @Override public Object toObject(int index) throws SFException { return isNull(index) ? null : structVector.getObject(index); } @Override public byte[] toBytes(int index) throws SFException { return isNull(index) ? null : toString(index).getBytes(); } @Override public String toString(int index) throws SFException { ArrowObjectStringRepresentationBuilder builder = new ArrowObjectStringRepresentationBuilder(); for (String childName : structVector.getChildFieldNames()) { FieldVector fieldVector = structVector.getChild(childName); SnowflakeType logicalType = ArrowVectorConverterUtil.getSnowflakeTypeFromFieldMetadata(fieldVector.getField()); try { if (fieldVector.isNull(index)) { builder.appendKeyValue(childName, null, logicalType); } else { ArrowVectorConverter converter = ArrowVectorConverterUtil.initConverter(fieldVector, context, columnIndex); builder.appendKeyValue(childName, converter.toString(index), logicalType); } } catch (SnowflakeSQLException e) { return structVector.getObject(index).toString(); } } return builder.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/StructObjectWrapper.java ================================================ package net.snowflake.client.internal.core.arrow; public class StructObjectWrapper { private final String jsonString; private final Object object; public StructObjectWrapper(String jsonString, Object object) { this.jsonString = jsonString; this.object = object; } public String getJsonString() { return jsonString; } public Object getObject() { return object; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/StructuredTypeDateTimeConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import static net.snowflake.client.api.resultset.SnowflakeType.TIMESTAMP_LTZ; import static net.snowflake.client.api.resultset.SnowflakeType.TIMESTAMP_NTZ; import static net.snowflake.client.api.resultset.SnowflakeType.TIMESTAMP_TZ; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.util.JsonStringHashMap; public class StructuredTypeDateTimeConverter { private final TimeZone sessionTimeZone; private final long resultVersion; private final boolean honorClientTZForTimestampNTZ; private final boolean treatNTZAsUTC; private final boolean useSessionTimezone; private final boolean formatDateWithTimeZone; public StructuredTypeDateTimeConverter( TimeZone sessionTimeZone, long resultVersion, boolean honorClientTZForTimestampNTZ, boolean treatNTZAsUTC, boolean useSessionTimezone, boolean formatDateWithTimeZone) { this.sessionTimeZone = sessionTimeZone; this.resultVersion = resultVersion; this.honorClientTZForTimestampNTZ = honorClientTZForTimestampNTZ; this.treatNTZAsUTC = treatNTZAsUTC; this.useSessionTimezone = useSessionTimezone; this.formatDateWithTimeZone = formatDateWithTimeZone; } public Timestamp getTimestamp( Map obj, int columnType, int columnSubType, TimeZone tz, int scale) throws SFException { if (tz == null) { tz = TimeZone.getDefault(); } if (Types.TIMESTAMP == columnType) { if (SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ == columnSubType) { return convertTimestampLtz(obj, scale); } else { return convertTimestampNtz(obj, tz, scale); } } else if (Types.TIMESTAMP_WITH_TIMEZONE == columnType && SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ == columnSubType) { return convertTimestampTz(obj, scale); } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, "Unexpected Arrow Field for columnType " + columnType + " , column subtype " + columnSubType + " , and object type " + obj.getClass()); } public Date getDate(int value, TimeZone tz) throws SFException { return DateConverter.getDate(value, tz, sessionTimeZone, formatDateWithTimeZone); } public Time getTime(long value, int scale) throws SFException { return BigIntToTimeConverter.getTime(value, scale, useSessionTimezone); } private Timestamp convertTimestampLtz(Object obj, int scale) throws SFException { if (obj instanceof JsonStringHashMap) { JsonStringHashMap map = (JsonStringHashMap) obj; if (map.values().size() == 2) { return TwoFieldStructToTimestampLTZConverter.getTimestamp( (long) map.get("epoch"), (int) map.get("fraction"), sessionTimeZone, useSessionTimezone, false); } } else if (obj instanceof Long) { return BigIntToTimestampLTZConverter.getTimestamp( (long) obj, scale, sessionTimeZone, useSessionTimezone); } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, "Unexpected Arrow Field for " + TIMESTAMP_LTZ + " and object type " + obj.getClass()); } private Timestamp convertTimestampNtz(Object obj, TimeZone tz, int scale) throws SFException { if (obj instanceof JsonStringHashMap) { JsonStringHashMap map = (JsonStringHashMap) obj; if (map.values().size() == 2) { return TwoFieldStructToTimestampNTZConverter.getTimestamp( (long) map.get("epoch"), (int) map.get("fraction"), tz, sessionTimeZone, treatNTZAsUTC, useSessionTimezone, honorClientTZForTimestampNTZ, false); } } else if (obj instanceof Long) { return BigIntToTimestampNTZConverter.getTimestamp( (long) obj, tz, scale, honorClientTZForTimestampNTZ, false); } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, "Unexpected Arrow Field for " + TIMESTAMP_NTZ + " and object type " + obj.getClass()); } private Timestamp convertTimestampTz(Object obj, int scale) throws SFException { if (obj instanceof JsonStringHashMap) { JsonStringHashMap map = (JsonStringHashMap) obj; if (map.values().size() == 2) { return TwoFieldStructToTimestampTZConverter.getTimestamp( (long) map.get("epoch"), (int) map.get("timezone"), scale); } else if (map.values().size() == 3) { return ThreeFieldStructToTimestampTZConverter.getTimestamp( (long) map.get("epoch"), (int) map.get("fraction"), (int) map.get("timezone"), resultVersion, useSessionTimezone, false); } } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, "Unexpected Arrow Field for " + TIMESTAMP_TZ + " and object type " + obj.getClass()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/ThreeFieldStructToTimestampTZConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeDateWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeTimeWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.common.core.SFTimestamp; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.complex.StructVector; /** converter from three-field struct (including epoch, fraction, and timezone) to Timestamp_TZ */ public class ThreeFieldStructToTimestampTZConverter extends AbstractArrowVectorConverter { private StructVector structVector; private BigIntVector epochs; private IntVector fractions; private IntVector timeZoneIndices; private TimeZone timeZone = TimeZone.getTimeZone("UTC"); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public ThreeFieldStructToTimestampTZConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.TIMESTAMP_LTZ.name(), fieldVector, columnIndex, context); structVector = (StructVector) fieldVector; epochs = structVector.getChild(FIELD_NAME_EPOCH, BigIntVector.class); fractions = structVector.getChild(FIELD_NAME_FRACTION, IntVector.class); timeZoneIndices = structVector.getChild(FIELD_NAME_TIME_ZONE_INDEX, IntVector.class); } @Override public boolean isNull(int index) { return structVector.isNull(index) || epochs.isNull(index) || fractions.isNull(index) || timeZoneIndices.isNull(index); } @Override public String toString(int index) throws SFException { if (context.getTimestampTZFormatter() == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing timestamp TZ formatter"); } try { Timestamp ts = isNull(index) ? null : getTimestamp(index, TimeZone.getDefault(), true); return ts == null ? null : context.getTimestampTZFormatter().format(ts, timeZone, context.getScale(columnIndex)); } catch (TimestampOperationNotAvailableException e) { return e.getSecsSinceEpoch().toPlainString(); } } @Override public byte[] toBytes(int index) throws SFException { if (isNull(index)) { return null; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "byteArray", toString(index)); } @Override public Object toObject(int index) throws SFException { return toTimestamp(index, TimeZone.getDefault()); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { return isNull(index) ? null : getTimestamp(index, tz, false); } private Timestamp getTimestamp(int index, TimeZone tz, boolean fromToString) throws SFException { long epoch = epochs.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH); int fraction = fractions.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH); int timeZoneIndex = timeZoneIndices.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH); timeZone = convertFromTimeZoneIndex(timeZoneIndex, context.getResultVersion()); return getTimestamp( epoch, fraction, timeZoneIndex, context.getResultVersion(), useSessionTimezone, fromToString); } @Override public Date toDate(int index, TimeZone tz, boolean dateFormat) throws SFException { if (isNull(index)) { return null; } Timestamp ts = getTimestamp(index, TimeZone.getDefault(), false); // ts can be null when Java's timestamp is overflow. return ts == null ? null : new SnowflakeDateWithTimezone(ts.getTime(), timeZone, useSessionTimezone); } @Override public Time toTime(int index) throws SFException { Timestamp ts = toTimestamp(index, TimeZone.getDefault()); return ts == null ? null : new SnowflakeTimeWithTimezone(ts, timeZone, useSessionTimezone); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } Timestamp val = toTimestamp(index, TimeZone.getDefault()); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, val); } @Override public short toShort(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.SHORT_STR, ""); } public static Timestamp getTimestamp( long epoch, int fraction, int timeZoneIndex, long resultVersion, boolean useSessionTimezone, boolean fromToString) throws SFException { if (ArrowResultUtil.isTimestampOverflow(epoch)) { if (fromToString) { throw new TimestampOperationNotAvailableException(epoch, fraction); } else { return null; } } TimeZone timeZone = convertFromTimeZoneIndex(timeZoneIndex, resultVersion); Timestamp ts = ArrowResultUtil.createTimestamp(epoch, fraction, timeZone, useSessionTimezone); return ResultUtil.adjustTimestamp(ts); } private static TimeZone convertFromTimeZoneIndex(int timeZoneIndex, long resultVersion) { if (resultVersion > 0) { return SFTimestamp.convertTimezoneIndexToTimeZone(timeZoneIndex); } else { return TimeZone.getTimeZone("UTC"); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/TinyIntToFixedConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.nio.ByteBuffer; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.TinyIntVector; import org.apache.arrow.vector.ValueVector; /** A converter from arrow tinyint to Snowflake Fixed type converter */ public class TinyIntToFixedConverter extends AbstractArrowVectorConverter { protected TinyIntVector tinyIntVector; protected int sfScale = 0; /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public TinyIntToFixedConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super( String.format( "%s(%s,%s)", SnowflakeType.FIXED, fieldVector.getField().getMetadata().get("precision"), fieldVector.getField().getMetadata().get("scale")), fieldVector, columnIndex, context); this.tinyIntVector = (TinyIntVector) fieldVector; } @Override public byte toByte(int index) throws SFException { if (tinyIntVector.isNull(index)) { return 0; } else { return getByte(index); } } @Override public byte[] toBytes(int index) throws SFException { if (tinyIntVector.isNull(index)) { return null; } ByteBuffer bytes = ByteBuffer.allocate(TinyIntVector.TYPE_WIDTH); tinyIntVector.getDataBuffer().getBytes(index, bytes); return bytes.array(); } protected byte getByte(int index) throws SFException { return tinyIntVector.getDataBuffer().getByte(index * TinyIntVector.TYPE_WIDTH); } @Override public short toShort(int index) throws SFException { return (short) toByte(index); } @Override public int toInt(int index) throws SFException { return (int) toByte(index); } @Override public float toFloat(int index) throws SFException { return toByte(index); } @Override public double toDouble(int index) throws SFException { return toFloat(index); } @Override public long toLong(int index) throws SFException { return (long) toByte(index); } @Override public BigDecimal toBigDecimal(int index) throws SFException { if (tinyIntVector.isNull(index)) { return null; } else { return BigDecimal.valueOf((long) getByte(index), sfScale); } } @Override public Object toObject(int index) throws SFException { if (isNull(index)) { return null; } else if (!shouldTreatDecimalAsInt()) { return BigDecimal.valueOf((long) getByte(index), sfScale); } return (long) toByte(index); } @Override public String toString(int index) throws SFException { return isNull(index) ? null : Short.toString(getByte(index)); } @Override public boolean toBoolean(int index) throws SFException { short val = toShort(index); if (val == 0) { return false; } else if (val == 1) { return true; } else { throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", val); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/TinyIntToScaledFixedConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.ValueVector; /** A converter from scaled arrow tinyint to Snowflake Fixed type converter */ public class TinyIntToScaledFixedConverter extends TinyIntToFixedConverter { private String format; public TinyIntToScaledFixedConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context, int sfScale) { super(fieldVector, columnIndex, context); logicalTypeStr = String.format( "%s(%s,%s)", SnowflakeType.FIXED, fieldVector.getField().getMetadata().get("precision"), fieldVector.getField().getMetadata().get("scale")); format = ArrowResultUtil.getStringFormat(sfScale); this.sfScale = sfScale; } @Override public short toShort(int index) throws SFException { if (isNull(index)) { return 0; } float val = toFloat(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Short", val); } @Override public int toInt(int index) throws SFException { if (isNull(index)) { return 0; } float val = toFloat(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Int", val); } @Override public float toFloat(int index) throws SFException { if (isNull(index)) { return 0; } return ((float) getByte(index)) / ArrowResultUtil.powerOfTen(sfScale); } @Override public long toLong(int index) throws SFException { if (isNull(index)) { return 0; } float val = toFloat(index); throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Long", val); } @Override public Object toObject(int index) throws SFException { return toBigDecimal(index); } @Override public String toString(int index) throws SFException { if (isNull(index)) { return null; } float f = ((float) getByte(index)) / ArrowResultUtil.powerOfTen(sfScale); return String.format(format, f); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } BigDecimal val = toBigDecimal(index); if (val.compareTo(BigDecimal.ZERO) == 0) { return false; } else if (val.compareTo(BigDecimal.ONE) == 0) { return true; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", val.toPlainString()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/TwoFieldStructToTimestampLTZConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeDateWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeTimeWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.complex.StructVector; /** converter from two-field struct (epochs and fraction) to Timestamp_LTZ */ public class TwoFieldStructToTimestampLTZConverter extends AbstractArrowVectorConverter { private StructVector structVector; private BigIntVector epochs; private IntVector fractions; /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public TwoFieldStructToTimestampLTZConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.TIMESTAMP_LTZ.name(), fieldVector, columnIndex, context); structVector = (StructVector) fieldVector; epochs = structVector.getChild(FIELD_NAME_EPOCH, BigIntVector.class); fractions = structVector.getChild(FIELD_NAME_FRACTION, IntVector.class); } @Override public boolean isNull(int index) { return structVector.isNull(index) || epochs.isNull(index) || fractions.isNull(index); } @Override public String toString(int index) throws SFException { if (context.getTimestampLTZFormatter() == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing timestamp LTZ formatter"); } try { Timestamp ts = isNull(index) ? null : getTimestamp(index, TimeZone.getDefault(), true); return ts == null ? null : context .getTimestampLTZFormatter() .format(ts, context.getTimeZone(), context.getScale(columnIndex)); } catch (TimestampOperationNotAvailableException e) { return e.getSecsSinceEpoch().toPlainString(); } } @Override public Object toObject(int index) throws SFException { return toTimestamp(index, TimeZone.getDefault()); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { return isNull(index) ? null : getTimestamp(index, tz, false); } private Timestamp getTimestamp(int index, TimeZone tz, boolean fromToString) throws SFException { long epoch = epochs.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH); int fraction = fractions.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH); return getTimestamp(epoch, fraction, sessionTimeZone, useSessionTimezone, fromToString); } @Override public byte[] toBytes(int index) throws SFException { if (isNull(index)) { return null; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "byteArray", toString(index)); } @Override public Date toDate(int index, TimeZone tz, boolean dateFormat) throws SFException { if (isNull(index)) { return null; } Timestamp ts = getTimestamp(index, TimeZone.getDefault(), false); // ts can be null when Java's timestamp is overflow. return ts == null ? null : new SnowflakeDateWithTimezone(ts.getTime(), sessionTimeZone, useSessionTimezone); } @Override public Time toTime(int index) throws SFException { Timestamp ts = toTimestamp(index, TimeZone.getDefault()); return ts == null ? null : new SnowflakeTimeWithTimezone(ts, sessionTimeZone, useSessionTimezone); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } Timestamp val = toTimestamp(index, TimeZone.getDefault()); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, val); } public static Timestamp getTimestamp( long epoch, int fraction, TimeZone sessionTimeZone, boolean useSessionTimezone, boolean fromToString) throws SFException { if (ArrowResultUtil.isTimestampOverflow(epoch)) { if (fromToString) { throw new TimestampOperationNotAvailableException(epoch, fraction); } else { return null; } } Timestamp ts = ArrowResultUtil.createTimestamp(epoch, fraction, sessionTimeZone, useSessionTimezone); return ResultUtil.adjustTimestamp(ts); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/TwoFieldStructToTimestampNTZConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeTimeWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.complex.StructVector; /** converter from two-field struct (epochs and fraction) to Timestamp_NTZ */ public class TwoFieldStructToTimestampNTZConverter extends AbstractArrowVectorConverter { private StructVector structVector; private BigIntVector epochs; private IntVector fractions; private static final TimeZone NTZ = TimeZone.getTimeZone("UTC"); /** * @param fieldVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public TwoFieldStructToTimestampNTZConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.TIMESTAMP_NTZ.name(), fieldVector, columnIndex, context); structVector = (StructVector) fieldVector; epochs = structVector.getChild(FIELD_NAME_EPOCH, BigIntVector.class); fractions = structVector.getChild(FIELD_NAME_FRACTION, IntVector.class); } @Override public boolean isNull(int index) { return structVector.isNull(index) || epochs.isNull(index) || fractions.isNull(index); } @Override public String toString(int index) throws SFException { if (context.getTimestampNTZFormatter() == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing timestamp NTZ formatter"); } try { Timestamp ts = isNull(index) ? null : getTimestamp(index, TimeZone.getDefault(), true); return ts == null ? null : context .getTimestampNTZFormatter() .format(ts, TimeZone.getTimeZone("UTC"), context.getScale(columnIndex)); } catch (TimestampOperationNotAvailableException e) { return e.getSecsSinceEpoch().toPlainString(); } } @Override public Object toObject(int index) throws SFException { return toTimestamp(index, TimeZone.getDefault()); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { if (tz == null) { tz = TimeZone.getDefault(); } return isNull(index) ? null : getTimestamp(index, tz, false); } private Timestamp getTimestamp(int index, TimeZone tz, boolean fromToString) throws SFException { long epoch = epochs.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH); int fraction = fractions.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH); return getTimestamp( epoch, fraction, tz, sessionTimeZone, treatNTZasUTC, useSessionTimezone, context.getHonorClientTZForTimestampNTZ(), fromToString); } @Override public byte[] toBytes(int index) throws SFException { if (isNull(index)) { return null; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "byteArray", toString(index)); } @Override public Date toDate(int index, TimeZone tz, boolean dateFormat) throws SFException { return isNull(index) ? null : new Date(getTimestamp(index, TimeZone.getDefault(), false).getTime()); } @Override public Time toTime(int index) throws SFException { Timestamp ts = toTimestamp(index, null); if (useSessionTimezone) { ts = toTimestamp(index, sessionTimeZone); } return ts == null ? null : new SnowflakeTimeWithTimezone(ts, sessionTimeZone, useSessionTimezone); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } Timestamp val = toTimestamp(index, TimeZone.getDefault()); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, val); } public static Timestamp getTimestamp( long epoch, int fraction, TimeZone tz, TimeZone sessionTimeZone, boolean treatNTZasUTC, boolean useSessionTimezone, boolean honorClientTZForTimestampNTZ, boolean fromToString) throws SFException { if (ArrowResultUtil.isTimestampOverflow(epoch)) { if (fromToString) { throw new TimestampOperationNotAvailableException(epoch, fraction); } else { return null; } } Timestamp ts; if (treatNTZasUTC || !useSessionTimezone) { ts = ArrowResultUtil.createTimestamp(epoch, fraction, TimeZone.getTimeZone("UTC"), true); } else { ts = ArrowResultUtil.createTimestamp(epoch, fraction, sessionTimeZone, false); } // Note: honorClientTZForTimestampNTZ is not enabled for toString method. // If JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC=false, default behavior is to honor // client timezone for NTZ time. Move NTZ timestamp offset to correspond to // client's timezone. UseSessionTimezone overrides treatNTZasUTC. if (!fromToString && (honorClientTZForTimestampNTZ && !treatNTZasUTC) || useSessionTimezone) { ts = ArrowResultUtil.moveToTimeZone(ts, NTZ, tz); } return ResultUtil.adjustTimestamp(ts); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/TwoFieldStructToTimestampTZConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeTimeWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.common.core.SFTimestamp; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.complex.StructVector; /** converter from two-field struct (epoch and time zone) to Timestamp_TZ */ public class TwoFieldStructToTimestampTZConverter extends AbstractArrowVectorConverter { private StructVector structVector; private BigIntVector epochs; private IntVector timeZoneIndices; private TimeZone timeZone = TimeZone.getTimeZone("UTC"); public TwoFieldStructToTimestampTZConverter( ValueVector fieldVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.TIMESTAMP_LTZ.name(), fieldVector, columnIndex, context); structVector = (StructVector) fieldVector; epochs = structVector.getChild(FIELD_NAME_EPOCH, BigIntVector.class); timeZoneIndices = structVector.getChild(FIELD_NAME_TIME_ZONE_INDEX, IntVector.class); } @Override public boolean isNull(int index) { return structVector.isNull(index) || epochs.isNull(index) || timeZoneIndices.isNull(index); } @Override public String toString(int index) throws SFException { if (context.getTimestampTZFormatter() == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing timestamp LTZ formatter"); } Timestamp ts = toTimestamp(index, TimeZone.getDefault()); return ts == null ? null : context.getTimestampTZFormatter().format(ts, timeZone, context.getScale(columnIndex)); } @Override public Object toObject(int index) throws SFException { return toTimestamp(index, TimeZone.getDefault()); } @Override public Timestamp toTimestamp(int index, TimeZone tz) throws SFException { return isNull(index) ? null : getTimestamp(index, tz); } private Timestamp getTimestamp(int index, TimeZone tz) throws SFException { long epoch = epochs.getDataBuffer().getLong(index * BigIntVector.TYPE_WIDTH); int timeZoneIndex = timeZoneIndices.getDataBuffer().getInt(index * IntVector.TYPE_WIDTH); if (context.getResultVersion() > 0) { timeZone = SFTimestamp.convertTimezoneIndexToTimeZone(timeZoneIndex); } else { timeZone = TimeZone.getTimeZone("UTC"); } return getTimestamp(epoch, timeZoneIndex, context.getScale(columnIndex)); } @Override public Date toDate(int index, TimeZone tz, boolean dateFormat) throws SFException { if (isNull(index)) { return null; } Timestamp ts = getTimestamp(index, TimeZone.getDefault()); // ts can be null when Java's timestamp is overflow. return ts == null ? null : new Date(ts.getTime()); } @Override public Time toTime(int index) throws SFException { Timestamp ts = toTimestamp(index, TimeZone.getDefault()); return ts == null ? null : new SnowflakeTimeWithTimezone(ts.getTime(), ts.getNanos(), useSessionTimezone); } @Override public boolean toBoolean(int index) throws SFException { if (isNull(index)) { return false; } Timestamp val = toTimestamp(index, TimeZone.getDefault()); throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, val); } @Override public byte[] toBytes(int index) throws SFException { if (isNull(index)) { return null; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "byteArray", toString(index)); } @Override public short toShort(int rowIndex) throws SFException { if (isNull(rowIndex)) { return 0; } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.SHORT_STR, ""); } public static Timestamp getTimestamp(long epoch, int timeZoneIndex, int scale) throws SFException { Timestamp ts = ArrowResultUtil.toJavaTimestamp(epoch, scale); return ResultUtil.adjustTimestamp(ts); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/VarBinaryToBinaryConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.common.core.SFBinary; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.VarBinaryVector; /** Converter from Arrow VarBinaryVector to Binary. */ public class VarBinaryToBinaryConverter extends AbstractArrowVectorConverter { private VarBinaryVector varBinaryVector; /** * @param valueVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public VarBinaryToBinaryConverter( ValueVector valueVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.BINARY.name(), valueVector, columnIndex, context); this.varBinaryVector = (VarBinaryVector) valueVector; } @Override public String toString(int index) { byte[] bytes = toBytes(index); SFBinary binary = new SFBinary(bytes); return bytes == null ? null : context.getBinaryFormatter().format(binary); } @Override public byte[] toBytes(int index) { return varBinaryVector.getObject(index); } @Override public Object toObject(int index) { return toBytes(index); } @Override public boolean toBoolean(int index) throws SFException { String str = toString(index); if (str == null) { return false; } else { throw new SFException(ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, "Boolean", str); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/VarCharConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.math.BigDecimal; import java.nio.charset.StandardCharsets; import java.sql.Date; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.VarCharVector; /** Convert Arrow VarCharVector to Java types */ public class VarCharConverter extends AbstractArrowVectorConverter { private VarCharVector varCharVector; /** * @param valueVector ValueVector * @param columnIndex column index * @param context DataConversionContext */ public VarCharConverter(ValueVector valueVector, int columnIndex, DataConversionContext context) { super(SnowflakeType.TEXT.name(), valueVector, columnIndex, context); this.varCharVector = (VarCharVector) valueVector; } @Override public String toString(int index) { byte[] bytes = toBytes(index); return bytes == null ? null : new String(bytes, StandardCharsets.UTF_8); } @Override public byte[] toBytes(int index) { return isNull(index) ? null : varCharVector.get(index); } @Override public Object toObject(int index) { return toString(index); } @Override public short toShort(int index) throws SFException { String str = toString(index); try { if (str == null) { return 0; } else { return Short.parseShort(str); } } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.SHORT_STR, str); } } @Override public int toInt(int index) throws SFException { String str = toString(index); try { if (str == null) { return 0; } else { return Integer.parseInt(str); } } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.INT_STR, str); } } @Override public long toLong(int index) throws SFException { String str = toString(index); try { if (str == null) { return 0; } else { return Long.parseLong(str); } } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.LONG_STR, str); } } @Override public float toFloat(int index) throws SFException { String str = toString(index); try { if (str == null) { return 0; } else { return Float.parseFloat(str); } } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.FLOAT_STR, str); } } @Override public double toDouble(int index) throws SFException { String str = toString(index); try { if (str == null) { return 0; } else { return Double.parseDouble(str); } } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.DOUBLE_STR, str); } } @Override public BigDecimal toBigDecimal(int index) throws SFException { String str = toString(index); try { if (str == null) { return null; } else { return new BigDecimal(str); } } catch (Exception ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BIG_DECIMAL_STR, str); } } @Override public boolean toBoolean(int index) throws SFException { String str = toString(index); if (str == null) { return false; } else if ("0".equals(str) || Boolean.FALSE.toString().equalsIgnoreCase(str)) { return false; } else if ("1".equals(str) || Boolean.TRUE.toString().equalsIgnoreCase(str)) { return true; } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.BOOLEAN_STR, str); } } @Override public Date toDate(int index, TimeZone jvmTz, boolean useDateFormat) throws SFException { if (isNull(index)) { return null; } try { DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); Date date = new Date(dateFormat.parse(toString(index)).getTime()); return date; } catch (ParseException e) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, logicalTypeStr, SnowflakeUtil.DATE_STR, ""); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/VectorTypeConverter.java ================================================ package net.snowflake.client.internal.core.arrow; import java.util.List; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFException; import org.apache.arrow.vector.complex.FixedSizeListVector; /** Arrow FixedSizeListVector converter. */ public class VectorTypeConverter extends AbstractArrowVectorConverter { private final FixedSizeListVector vector; /** * @param valueVector ValueVector * @param vectorIndex vector index * @param context DataConversionContext */ public VectorTypeConverter( FixedSizeListVector valueVector, int vectorIndex, DataConversionContext context) { super(SnowflakeType.ARRAY.name(), valueVector, vectorIndex, context); this.vector = valueVector; } @Override public Object toObject(int index) throws SFException { if (isNull(index)) { return null; } return vector.getObject(index); } @Override public byte[] toBytes(int index) throws SFException { return isNull(index) ? null : toString(index).getBytes(); } @Override public String toString(int index) throws SFException { List object = vector.getObject(index); if (object == null) { return null; } return object.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/tostringhelpers/ArrowArrayStringRepresentationBuilder.java ================================================ package net.snowflake.client.internal.core.arrow.tostringhelpers; import net.snowflake.client.api.resultset.SnowflakeType; public class ArrowArrayStringRepresentationBuilder extends ArrowStringRepresentationBuilderBase { private final SnowflakeType valueType; public ArrowArrayStringRepresentationBuilder(SnowflakeType valueType) { super(",", "[", "]"); this.valueType = valueType; } public ArrowStringRepresentationBuilderBase appendValue(String value) { return add(quoteIfNeeded(value, valueType)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/tostringhelpers/ArrowObjectStringRepresentationBuilder.java ================================================ package net.snowflake.client.internal.core.arrow.tostringhelpers; import java.util.StringJoiner; import net.snowflake.client.api.resultset.SnowflakeType; public class ArrowObjectStringRepresentationBuilder extends ArrowStringRepresentationBuilderBase { public ArrowObjectStringRepresentationBuilder() { super(",", "{", "}"); } public ArrowStringRepresentationBuilderBase appendKeyValue( String key, String value, SnowflakeType valueType) { StringJoiner joiner = new StringJoiner(": "); joiner.add('"' + key + '"'); joiner.add(quoteIfNeeded(value, valueType)); return add(joiner.toString()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/arrow/tostringhelpers/ArrowStringRepresentationBuilderBase.java ================================================ package net.snowflake.client.internal.core.arrow.tostringhelpers; import java.util.HashSet; import java.util.Set; import java.util.StringJoiner; import net.snowflake.client.api.resultset.SnowflakeType; /** * StringBuilder like class to aggregate the string representation of snowflake Native ARROW * structured types as JSON one-liners. Provides some additional snowflake-specific logic in order * to determine whether the value should be quoted or case should be changed. */ public abstract class ArrowStringRepresentationBuilderBase { private final StringJoiner joiner; private static final Set quotableTypes; static { quotableTypes = new HashSet<>(); quotableTypes.add(SnowflakeType.ANY); quotableTypes.add(SnowflakeType.CHAR); quotableTypes.add(SnowflakeType.TEXT); quotableTypes.add(SnowflakeType.BINARY); quotableTypes.add(SnowflakeType.DATE); quotableTypes.add(SnowflakeType.TIME); quotableTypes.add(SnowflakeType.TIMESTAMP_LTZ); quotableTypes.add(SnowflakeType.TIMESTAMP_NTZ); quotableTypes.add(SnowflakeType.TIMESTAMP_TZ); } public ArrowStringRepresentationBuilderBase(String delimiter, String prefix, String suffix) { joiner = new StringJoiner(delimiter, prefix, suffix); } protected ArrowStringRepresentationBuilderBase add(String string) { joiner.add(string); return this; } private boolean shouldQuoteValue(SnowflakeType type) { return quotableTypes.contains(type); } protected String quoteIfNeeded(String string, SnowflakeType type) { if (string == null) { return null; } // Turn Boolean string representations lowercase to make the output JSON-compatible // this should be changed on the converter level, but it would be a breaking change thus // for now only structured types will be valid JSONs while in NATIVE ARROW mode if (type == SnowflakeType.BOOLEAN) { string = string.toLowerCase(); } if (shouldQuoteValue(type)) { return '"' + string + '"'; } return string; } @Override public String toString() { return joiner.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/ClientAuthnDTO.java ================================================ package net.snowflake.client.internal.core.auth; import java.util.Map; import javax.annotation.Nullable; public class ClientAuthnDTO { // contains all the required data for current authn step private final Map data; /* * current state * tokenized string with all current parameters and the authn step */ private final String inFlightCtx; public ClientAuthnDTO(Map data, @Nullable String inFlightCtx) { this.data = data; this.inFlightCtx = inFlightCtx; } /** Required by Jackson */ public Map getData() { return data; } /** Required by Jackson */ public String getInFlightCtx() { return inFlightCtx; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/ClientAuthnParameter.java ================================================ package net.snowflake.client.internal.core.auth; public enum ClientAuthnParameter { LOGIN_NAME, PASSWORD, RAW_SAML_RESPONSE, ACCOUNT_NAME, CLIENT_APP_ID, CLIENT_APP_VERSION, EXT_AUTHN_DUO_METHOD, PASSCODE, CLIENT_ENVIRONMENT, AUTHENTICATOR, BROWSER_MODE_REDIRECT_PORT, SESSION_PARAMETERS, PROOF_KEY, TOKEN, OAUTH_TYPE, PROVIDER, APPLICATION_PATH, SPCS_TOKEN } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/AccessTokenProvider.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; public interface AccessTokenProvider { TokenResponseDTO getAccessToken(SFLoginInput loginInput) throws SFException; String getDPoPPublicKey(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/AuthorizationCodeRedirectRequestHandler.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import com.google.common.html.HtmlEscapers; import com.nimbusds.oauth2.sdk.id.State; import java.util.Map; import java.util.concurrent.CompletableFuture; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class AuthorizationCodeRedirectRequestHandler { private static final SFLogger logger = SFLoggerFactory.getLogger(AuthorizationCodeRedirectRequestHandler.class); static String handleRedirectRequest( Map urlParams, CompletableFuture authorizationCodeFuture, State expectedState) { String response; if (urlParams.containsKey("error")) { response = "Authorization error: " + urlParams.get("error"); authorizationCodeFuture.completeExceptionally( new SFException( ErrorCode.OAUTH_AUTHORIZATION_CODE_FLOW_ERROR, String.format( "Error during authorization: %s, %s", urlParams.get("error"), urlParams.get("error_description")))); } else if (!expectedState.getValue().equals(urlParams.get("state"))) { authorizationCodeFuture.completeExceptionally( new SFException( ErrorCode.OAUTH_AUTHORIZATION_CODE_FLOW_ERROR, String.format( "Invalid authorization request redirection state: %s, expected: %s", urlParams.get("state"), expectedState.getValue()))); response = "Authorization error: invalid authorization request redirection state"; } else { String authorizationCode = urlParams.get("code"); if (!isNullOrEmpty(authorizationCode)) { logger.debug("Received authorization code on redirect URI"); response = "Authorization completed successfully."; authorizationCodeFuture.complete(authorizationCode); } else { authorizationCodeFuture.completeExceptionally( new SFException( ErrorCode.OAUTH_AUTHORIZATION_CODE_FLOW_ERROR, String.format( "Authorization code redirect URI server received request without authorization code; queryParams: %s", urlParams))); response = "Authorization error: authorization code has not been returned to the driver."; } } return HtmlEscapers.htmlEscaper().escape(response); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/DPoPUtil.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import com.nimbusds.jose.JOSEException; import com.nimbusds.jose.JWSAlgorithm; import com.nimbusds.jose.jwk.Curve; import com.nimbusds.jose.jwk.ECKey; import com.nimbusds.jose.jwk.gen.ECKeyGenerator; import com.nimbusds.jwt.SignedJWT; import com.nimbusds.oauth2.sdk.dpop.DPoPProofFactory; import com.nimbusds.oauth2.sdk.dpop.DefaultDPoPProofFactory; import com.nimbusds.oauth2.sdk.dpop.JWKThumbprintConfirmation; import com.nimbusds.openid.connect.sdk.Nonce; import java.net.URI; import java.net.URISyntaxException; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import org.apache.http.client.methods.HttpRequestBase; public class DPoPUtil { private final ECKey jwk; DPoPUtil() throws SFException { try { jwk = new ECKeyGenerator(Curve.P_256).generate(); } catch (JOSEException e) { throw new SFException( ErrorCode.INTERNAL_ERROR, "Error during DPoP JWK initialization: " + e.getMessage()); } } public DPoPUtil(String jsonKey) throws SFException { try { jwk = ECKey.parse(jsonKey); } catch (Exception e) { throw new SFException( ErrorCode.INTERNAL_ERROR, "Error during DPoP JWK initialization: " + e.getMessage()); } } String getPublicKey() { return jwk.toJSONString(); } JWKThumbprintConfirmation getThumbprint() throws SFException { try { return JWKThumbprintConfirmation.of(this.jwk); } catch (JOSEException e) { throw new SFException( ErrorCode.INTERNAL_ERROR, "Error during JWK thumbprint generation: " + e.getMessage()); } } public void addDPoPProofHeaderToRequest(HttpRequestBase httpRequest, String nonce) throws SFException { SignedJWT signedJWT = generateDPoPProof(httpRequest, nonce); httpRequest.setHeader("DPoP", signedJWT.serialize()); } private SignedJWT generateDPoPProof(HttpRequestBase httpRequest, String nonce) throws SFException { try { DPoPProofFactory proofFactory = new DefaultDPoPProofFactory(jwk, JWSAlgorithm.ES256); if (nonce != null) { return proofFactory.createDPoPJWT( httpRequest.getMethod(), httpRequest.getURI(), new Nonce(nonce)); } else { return proofFactory.createDPoPJWT( httpRequest.getMethod(), getUriWithoutQuery(httpRequest.getURI())); } } catch (Exception e) { throw new SFException( ErrorCode.INTERNAL_ERROR, " Error during DPoP proof generation: " + e.getMessage()); } } /** * Method needed for sake of DPoP proof JWT creation. URI claim (htu) does not support query * parameters. */ private URI getUriWithoutQuery(URI uri) throws URISyntaxException { return new URI(uri.getScheme(), uri.getAuthority(), uri.getPath(), null, null); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/OAuthAccessTokenForRefreshTokenProvider.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import com.nimbusds.oauth2.sdk.RefreshTokenGrant; import com.nimbusds.oauth2.sdk.Scope; import com.nimbusds.oauth2.sdk.TokenRequest; import com.nimbusds.oauth2.sdk.auth.ClientAuthentication; import com.nimbusds.oauth2.sdk.auth.ClientSecretBasic; import com.nimbusds.oauth2.sdk.auth.Secret; import com.nimbusds.oauth2.sdk.http.HTTPRequest; import com.nimbusds.oauth2.sdk.id.ClientID; import com.nimbusds.oauth2.sdk.token.RefreshToken; import java.net.URI; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.jdbc.SnowflakeUseDPoPNonceException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.client.methods.HttpRequestBase; public class OAuthAccessTokenForRefreshTokenProvider implements AccessTokenProvider { private static final SFLogger logger = SFLoggerFactory.getLogger(OAuthClientCredentialsAccessTokenProvider.class); private final DPoPUtil dPoPUtil; public OAuthAccessTokenForRefreshTokenProvider() throws SFException { this.dPoPUtil = new DPoPUtil(); } @Override public TokenResponseDTO getAccessToken(SFLoginInput loginInput) throws SFException { return exchangeRefreshTokenForAccessToken(loginInput, null, false); } @Override public String getDPoPPublicKey() { return dPoPUtil.getPublicKey(); } private TokenResponseDTO exchangeRefreshTokenForAccessToken( SFLoginInput loginInput, String dpopNonce, boolean retried) throws SFException { try { logger.info("Obtaining new OAuth access token using refresh token..."); HttpRequestBase tokenRequest = buildTokenRequest(loginInput, dpopNonce); return OAuthUtil.sendTokenRequest(tokenRequest, loginInput); } catch (SnowflakeUseDPoPNonceException e) { logger.debug("Received \"use_dpop_nonce\" error from IdP while performing token request"); if (!retried) { logger.debug("Retrying token request with DPoP nonce included..."); return exchangeRefreshTokenForAccessToken(loginInput, e.getNonce(), true); } else { logger.debug("Skipping DPoP nonce retry as it has been already retried"); throw e; } } catch (Exception e) { logger.error("Error during OAuth refresh token flow.", e); throw new SFException(e, ErrorCode.OAUTH_REFRESH_TOKEN_FLOW_ERROR, e.getMessage()); } } private HttpRequestBase buildTokenRequest(SFLoginInput loginInput, String dpopNonce) throws SFException { URI tokenRequestUrl = OAuthUtil.getTokenRequestUrl(loginInput.getOauthLoginInput(), loginInput.getServerUrl()); ClientAuthentication clientAuthentication = new ClientSecretBasic( new ClientID(loginInput.getOauthLoginInput().getClientId()), new Secret(loginInput.getOauthLoginInput().getClientSecret())); Scope scope = new Scope(OAuthUtil.getScope(loginInput.getOauthLoginInput(), loginInput.getRole())); RefreshToken refreshToken = new RefreshToken(loginInput.getOauthRefreshToken()); TokenRequest tokenRequest = new TokenRequest( tokenRequestUrl, clientAuthentication, new RefreshTokenGrant(refreshToken), scope); HTTPRequest tokenHttpRequest = tokenRequest.toHTTPRequest(); HttpRequestBase convertedTokenRequest = OAuthUtil.convertToBaseRequest(tokenHttpRequest); if (loginInput.isDPoPEnabled()) { dPoPUtil.addDPoPProofHeaderToRequest(convertedTokenRequest, dpopNonce); } return convertedTokenRequest; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/OAuthAccessTokenProviderFactory.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import java.net.URI; import java.util.Arrays; import java.util.HashSet; import java.util.Set; import net.snowflake.client.api.auth.AuthenticatorType; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.AssertUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.core.SFOauthLoginInput; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class OAuthAccessTokenProviderFactory { private static final String SNOWFLAKE_DOMAIN = "snowflakecomputing.com"; private final SFLogger logger = SFLoggerFactory.getLogger(OAuthAccessTokenProviderFactory.class); private static final Set ELIGIBLE_AUTH_TYPES = new HashSet<>( Arrays.asList( AuthenticatorType.OAUTH_AUTHORIZATION_CODE, AuthenticatorType.OAUTH_CLIENT_CREDENTIALS)); public AccessTokenProvider createAccessTokenProvider( AuthenticatorType authenticatorType, SFLoginInput loginInput) throws SFException { switch (authenticatorType) { case OAUTH_AUTHORIZATION_CODE: if (isEligibleForDefaultClientCredentials(loginInput.getOauthLoginInput())) { loginInput.getOauthLoginInput().setLocalApplicationClientCredential(); } assertContainsClientCredentials(loginInput, authenticatorType); validateHttpRedirectUriIfSpecified(loginInput); validateAuthorizationAndTokenEndpointsIfSpecified(loginInput); return new OAuthAuthorizationCodeAccessTokenProvider( loginInput.getBrowserHandler(), new RandomStateProvider(), loginInput.getBrowserResponseTimeout().getSeconds()); case OAUTH_CLIENT_CREDENTIALS: assertContainsClientCredentials(loginInput, authenticatorType); AssertUtil.assertTrue( loginInput.getOauthLoginInput().getTokenRequestUrl() != null, "passing oauthTokenRequestUrl is required for OAUTH_CLIENT_CREDENTIALS authentication"); return new OAuthClientCredentialsAccessTokenProvider(); default: String message = "Unsupported authenticator type: " + authenticatorType; logger.error(message); throw new SFException(ErrorCode.INTERNAL_ERROR, message); } } static boolean isEligibleForDefaultClientCredentials(SFOauthLoginInput oauthLoginInput) { return areClientCredentialsNotSupplied(oauthLoginInput) && isSnowflakeAsIdP(oauthLoginInput); } private static boolean areClientCredentialsNotSupplied(SFOauthLoginInput oauthLoginInput) { return (oauthLoginInput.getClientId() == null && oauthLoginInput.getClientSecret() == null); } private static boolean isSnowflakeAsIdP(SFOauthLoginInput oauthLoginInput) { return ((oauthLoginInput.getAuthorizationUrl() == null || oauthLoginInput.getAuthorizationUrl().contains(SNOWFLAKE_DOMAIN)) && (oauthLoginInput.getTokenRequestUrl() == null || oauthLoginInput.getTokenRequestUrl().contains(SNOWFLAKE_DOMAIN))); } private void validateAuthorizationAndTokenEndpointsIfSpecified(SFLoginInput loginInput) throws SFException { String authorizationEndpoint = loginInput.getOauthLoginInput().getAuthorizationUrl(); String tokenEndpoint = loginInput.getOauthLoginInput().getTokenRequestUrl(); if ((!isNullOrEmpty(authorizationEndpoint) && isNullOrEmpty(tokenEndpoint)) || (isNullOrEmpty(authorizationEndpoint) && !isNullOrEmpty(tokenEndpoint))) { throw new SFException( ErrorCode.OAUTH_AUTHORIZATION_CODE_FLOW_ERROR, "For OAUTH_AUTHORIZATION_CODE authentication with external IdP, both oauthAuthorizationUrl and oauthTokenRequestUrl must be specified"); } else if (!isNullOrEmpty(authorizationEndpoint) && !isNullOrEmpty(tokenEndpoint)) { URI authorizationUrl = URI.create(authorizationEndpoint); URI tokenUrl = URI.create(tokenEndpoint); if (isNullOrEmpty(authorizationUrl.getHost()) || isNullOrEmpty(tokenUrl.getHost())) { throw new SFException( ErrorCode.OAUTH_AUTHORIZATION_CODE_FLOW_ERROR, String.format( "OAuth authorization URL and token URL must be specified in proper format; oauthAuthorizationUrl=%s oauthTokenRequestUrl=%s", authorizationUrl, tokenUrl)); } if (!authorizationUrl.getHost().equals(tokenUrl.getHost())) { logger.warn( String.format( "Both oauthAuthorizationUrl and oauthTokenRequestUrl should belong to the same host; oauthAuthorizationUrl=%s oauthTokenRequestUrl=%s", authorizationUrl, tokenUrl)); } } } private static void validateHttpRedirectUriIfSpecified(SFLoginInput loginInput) throws SFException { String redirectUri = loginInput.getOauthLoginInput().getRedirectUri(); if (redirectUri != null) { AssertUtil.assertTrue( !redirectUri.startsWith("https"), "provided redirect URI should start with \"http\", not \"https\""); } } public static boolean isEligible(AuthenticatorType authenticatorType) { return getEligible().contains(authenticatorType); } private static Set getEligible() { return ELIGIBLE_AUTH_TYPES; } private static void assertContainsClientCredentials( SFLoginInput loginInput, AuthenticatorType authenticatorType) throws SFException { AssertUtil.assertTrue( loginInput.getOauthLoginInput().getClientId() != null, String.format( "passing oauthClientId is required for %s authentication", authenticatorType.name())); AssertUtil.assertTrue( loginInput.getOauthLoginInput().getClientSecret() != null, String.format( "passing oauthClientSecret is required for %s authentication", authenticatorType.name())); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/OAuthAuthorizationCodeAccessTokenProvider.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import static net.snowflake.client.internal.core.SessionUtilExternalBrowser.AuthExternalBrowserHandlers; import com.nimbusds.oauth2.sdk.AuthorizationCode; import com.nimbusds.oauth2.sdk.AuthorizationCodeGrant; import com.nimbusds.oauth2.sdk.AuthorizationGrant; import com.nimbusds.oauth2.sdk.AuthorizationRequest; import com.nimbusds.oauth2.sdk.ResponseType; import com.nimbusds.oauth2.sdk.Scope; import com.nimbusds.oauth2.sdk.TokenRequest; import com.nimbusds.oauth2.sdk.auth.ClientAuthentication; import com.nimbusds.oauth2.sdk.auth.ClientSecretBasic; import com.nimbusds.oauth2.sdk.auth.Secret; import com.nimbusds.oauth2.sdk.http.HTTPRequest; import com.nimbusds.oauth2.sdk.id.ClientID; import com.nimbusds.oauth2.sdk.id.State; import com.nimbusds.oauth2.sdk.pkce.CodeChallengeMethod; import com.nimbusds.oauth2.sdk.pkce.CodeVerifier; import com.sun.net.httpserver.HttpServer; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.core.SFOauthLoginInput; import net.snowflake.client.internal.jdbc.SnowflakeUseDPoPNonceException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.NameValuePair; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.utils.URLEncodedUtils; public class OAuthAuthorizationCodeAccessTokenProvider implements AccessTokenProvider { private static final SFLogger logger = SFLoggerFactory.getLogger(OAuthAuthorizationCodeAccessTokenProvider.class); static final String DEFAULT_REDIRECT_HOST = "http://127.0.0.1"; static final String DEFAULT_REDIRECT_URI_ENDPOINT = "/"; private final AuthExternalBrowserHandlers browserHandler; private final StateProvider stateProvider; private final DPoPUtil dpopUtil; private final long browserAuthorizationTimeoutSeconds; public OAuthAuthorizationCodeAccessTokenProvider( AuthExternalBrowserHandlers browserHandler, StateProvider stateProvider, long browserAuthorizationTimeoutSeconds) throws SFException { this.browserHandler = browserHandler; this.stateProvider = stateProvider; this.dpopUtil = new DPoPUtil(); this.browserAuthorizationTimeoutSeconds = browserAuthorizationTimeoutSeconds; } @Override public TokenResponseDTO getAccessToken(SFLoginInput loginInput) throws SFException { try { logger.info("Starting OAuth authorization code authentication flow..."); CodeVerifier pkceVerifier = new CodeVerifier(); URI redirectUri = OAuthUtil.buildRedirectUri(loginInput.getOauthLoginInput()); AuthorizationCode authorizationCode = requestAuthorizationCode(loginInput, pkceVerifier, redirectUri); return exchangeAuthorizationCodeForAccessToken( loginInput, authorizationCode, pkceVerifier, redirectUri, null, false); } catch (Exception e) { logger.error( "Error during OAuth authorization code flow. Verify configuration passed to driver and IdP (URLs, grant types, scope, etc.)", e); throw new SFException(e, ErrorCode.OAUTH_AUTHORIZATION_CODE_FLOW_ERROR, e.getMessage()); } } @Override public String getDPoPPublicKey() { return dpopUtil.getPublicKey(); } private AuthorizationCode requestAuthorizationCode( SFLoginInput loginInput, CodeVerifier pkceVerifier, URI redirectUri) throws SFException, IOException { State state = new State(stateProvider.getState()); AuthorizationRequest request = buildAuthorizationRequest(loginInput, pkceVerifier, state, redirectUri); URI authorizeRequestURI = request.toURI(); HttpServer httpServer = createHttpServer(redirectUri); CompletableFuture codeFuture = setupRedirectURIServerForAuthorizationCode(httpServer, state); logger.debug("Waiting for authorization code redirection to {}...", redirectUri); return letUserAuthorize(authorizeRequestURI, codeFuture, httpServer); } private TokenResponseDTO exchangeAuthorizationCodeForAccessToken( SFLoginInput loginInput, AuthorizationCode authorizationCode, CodeVerifier pkceVerifier, URI redirectUri, String dpopNonce, boolean retried) throws SFException { try { HttpRequestBase request = buildTokenRequest(loginInput, authorizationCode, pkceVerifier, redirectUri, dpopNonce); return OAuthUtil.sendTokenRequest(request, loginInput); } catch (SnowflakeUseDPoPNonceException e) { logger.debug("Received \"use_dpop_nonce\" error from IdP while performing token request"); if (!retried) { logger.debug("Retrying token request with DPoP nonce included..."); return exchangeAuthorizationCodeForAccessToken( loginInput, authorizationCode, pkceVerifier, redirectUri, e.getNonce(), true); } else { logger.debug("Skipping DPoP nonce retry as it has been already retried"); throw e; } } catch (Exception e) { logger.error("Error during making OAuth access token request", e); throw new SFException(e, ErrorCode.OAUTH_AUTHORIZATION_CODE_FLOW_ERROR, e.getMessage()); } } private AuthorizationCode letUserAuthorize( URI authorizeRequestURI, CompletableFuture codeFuture, HttpServer httpServer) throws SFException { try { logger.debug( "Opening browser for authorization code request to: {}", authorizeRequestURI.getAuthority() + authorizeRequestURI.getPath()); browserHandler.openBrowser(authorizeRequestURI.toString()); String code = codeFuture.get(this.browserAuthorizationTimeoutSeconds, TimeUnit.SECONDS); return new AuthorizationCode(code); } catch (TimeoutException e) { throw new SFException( e, ErrorCode.OAUTH_AUTHORIZATION_CODE_FLOW_ERROR, "Authorization request timed out. Snowflake driver did not receive authorization code back to the redirect URI. Verify your security integration and driver configuration."); } catch (Exception e) { throw new SFException(e, ErrorCode.OAUTH_AUTHORIZATION_CODE_FLOW_ERROR, e.getMessage()); } finally { logger.debug("Stopping OAuth redirect URI server @ {}", httpServer.getAddress()); httpServer.stop(1); } } private static CompletableFuture setupRedirectURIServerForAuthorizationCode( HttpServer httpServer, State expectedState) { CompletableFuture authorizationCodeFuture = new CompletableFuture<>(); httpServer.createContext( DEFAULT_REDIRECT_URI_ENDPOINT, exchange -> { Map urlParams = URLEncodedUtils.parse(exchange.getRequestURI(), StandardCharsets.UTF_8).stream() .collect(Collectors.toMap(NameValuePair::getName, NameValuePair::getValue)); String response = AuthorizationCodeRedirectRequestHandler.handleRedirectRequest( urlParams, authorizationCodeFuture, expectedState); exchange.sendResponseHeaders(200, response.length()); exchange.getResponseBody().write(response.getBytes(StandardCharsets.UTF_8)); exchange.getResponseBody().close(); }); logger.debug("Starting OAuth redirect URI server @ {}", httpServer.getAddress()); httpServer.start(); return authorizationCodeFuture; } private static HttpServer createHttpServer(URI redirectUri) throws IOException { return HttpServer.create( new InetSocketAddress(redirectUri.getHost(), redirectUri.getPort()), 0); } private AuthorizationRequest buildAuthorizationRequest( SFLoginInput loginInput, CodeVerifier pkceVerifier, State state, URI redirectUri) throws SFException { SFOauthLoginInput oauthLoginInput = loginInput.getOauthLoginInput(); ClientID clientID = new ClientID(oauthLoginInput.getClientId()); String scope = OAuthUtil.getScope(loginInput.getOauthLoginInput(), loginInput.getRole()); AuthorizationRequest.Builder builder = new AuthorizationRequest.Builder(new ResponseType(ResponseType.Value.CODE), clientID) .scope(new Scope(scope)) .state(state) .redirectionURI(redirectUri) .codeChallenge(pkceVerifier, CodeChallengeMethod.S256) .endpointURI( OAuthUtil.getAuthorizationUrl( loginInput.getOauthLoginInput(), loginInput.getServerUrl())); if (loginInput.isDPoPEnabled()) { builder.dPoPJWKThumbprintConfirmation(new DPoPUtil().getThumbprint()); } return builder.build(); } private HttpRequestBase buildTokenRequest( SFLoginInput loginInput, AuthorizationCode authorizationCode, CodeVerifier pkceVerifier, URI redirectUri, String dpopNonce) throws SFException { AuthorizationGrant codeGrant = new AuthorizationCodeGrant(authorizationCode, redirectUri, pkceVerifier); ClientAuthentication clientAuthentication = new ClientSecretBasic( new ClientID(loginInput.getOauthLoginInput().getClientId()), new Secret(loginInput.getOauthLoginInput().getClientSecret())); Scope scope = new Scope(OAuthUtil.getScope(loginInput.getOauthLoginInput(), loginInput.getRole())); TokenRequest.Builder tokenRequestBuilder = new TokenRequest.Builder( OAuthUtil.getTokenRequestUrl( loginInput.getOauthLoginInput(), loginInput.getServerUrl()), clientAuthentication, codeGrant) .scope(scope); if (loginInput.getOauthLoginInput().getEnableSingleUseRefreshTokens()) { tokenRequestBuilder.customParameter("enable_single_use_refresh_tokens", "true"); } TokenRequest tokenRequest = tokenRequestBuilder.build(); HTTPRequest tokenHttpRequest = tokenRequest.toHTTPRequest(); HttpRequestBase convertedTokenRequest = OAuthUtil.convertToBaseRequest(tokenHttpRequest); if (loginInput.isDPoPEnabled()) { dpopUtil.addDPoPProofHeaderToRequest(convertedTokenRequest, dpopNonce); } return convertedTokenRequest; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/OAuthClientCredentialsAccessTokenProvider.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import com.nimbusds.oauth2.sdk.ClientCredentialsGrant; import com.nimbusds.oauth2.sdk.Scope; import com.nimbusds.oauth2.sdk.TokenRequest; import com.nimbusds.oauth2.sdk.auth.ClientAuthentication; import com.nimbusds.oauth2.sdk.auth.ClientSecretBasic; import com.nimbusds.oauth2.sdk.auth.Secret; import com.nimbusds.oauth2.sdk.http.HTTPRequest; import com.nimbusds.oauth2.sdk.id.ClientID; import java.net.URI; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.jdbc.SnowflakeUseDPoPNonceException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.client.methods.HttpRequestBase; public class OAuthClientCredentialsAccessTokenProvider implements AccessTokenProvider { private static final SFLogger logger = SFLoggerFactory.getLogger(OAuthClientCredentialsAccessTokenProvider.class); private final DPoPUtil dpopUtil; public OAuthClientCredentialsAccessTokenProvider() throws SFException { this.dpopUtil = new DPoPUtil(); } @Override public TokenResponseDTO getAccessToken(SFLoginInput loginInput) throws SFException { return exchangeClientCredentialsForAccessToken(loginInput, null, false); } @Override public String getDPoPPublicKey() { return dpopUtil.getPublicKey(); } private TokenResponseDTO exchangeClientCredentialsForAccessToken( SFLoginInput loginInput, String dpopNonce, boolean retried) throws SFException { try { logger.info("Starting OAuth client credentials authentication flow..."); HttpRequestBase tokenRequest = buildTokenRequest(loginInput, dpopNonce); return OAuthUtil.sendTokenRequest(tokenRequest, loginInput); } catch (SnowflakeUseDPoPNonceException e) { logger.debug("Received \"use_dpop_nonce\" error from IdP while performing token request"); if (!retried) { logger.debug("Retrying token request with DPoP nonce included..."); return exchangeClientCredentialsForAccessToken(loginInput, e.getNonce(), true); } else { logger.debug("Skipping DPoP nonce retry as it has been already retried"); throw e; } } catch (Exception | SFException e) { logger.error( "Error during OAuth client credentials flow. Verify configuration passed to driver and IdP (URLs, grant types, scope, etc.)", e); throw new SFException(e, ErrorCode.OAUTH_CLIENT_CREDENTIALS_FLOW_ERROR, e.getMessage()); } } private HttpRequestBase buildTokenRequest(SFLoginInput loginInput, String dpopNonce) throws SFException { URI tokenRequestUrl = OAuthUtil.getTokenRequestUrl(loginInput.getOauthLoginInput(), loginInput.getServerUrl()); ClientAuthentication clientAuthentication = new ClientSecretBasic( new ClientID(loginInput.getOauthLoginInput().getClientId()), new Secret(loginInput.getOauthLoginInput().getClientSecret())); Scope scope = new Scope(OAuthUtil.getScope(loginInput.getOauthLoginInput(), loginInput.getRole())); TokenRequest tokenRequest = new TokenRequest( tokenRequestUrl, clientAuthentication, new ClientCredentialsGrant(), scope); HTTPRequest tokenHttpRequest = tokenRequest.toHTTPRequest(); HttpRequestBase convertedTokenRequest = OAuthUtil.convertToBaseRequest(tokenHttpRequest); if (loginInput.isDPoPEnabled()) { dpopUtil.addDPoPProofHeaderToRequest(convertedTokenRequest, dpopNonce); } return convertedTokenRequest; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/OAuthUtil.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import static net.snowflake.client.internal.core.auth.oauth.OAuthAuthorizationCodeAccessTokenProvider.DEFAULT_REDIRECT_HOST; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import com.fasterxml.jackson.databind.ObjectMapper; import com.nimbusds.oauth2.sdk.http.HTTPRequest; import java.io.IOException; import java.net.ServerSocket; import java.net.URI; import java.nio.charset.StandardCharsets; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.core.SFOauthLoginInput; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.entity.StringEntity; class OAuthUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(OAuthUtil.class); private static final String SNOWFLAKE_AUTHORIZE_ENDPOINT = "/oauth/authorize"; private static final String SNOWFLAKE_TOKEN_REQUEST_ENDPOINT = "/oauth/token-request"; private static final String DEFAULT_SESSION_ROLE_SCOPE_PREFIX = "session:role:"; static URI getTokenRequestUrl(SFOauthLoginInput oauthLoginInput, String serverUrl) { URI uri = !isNullOrEmpty(oauthLoginInput.getTokenRequestUrl()) ? URI.create(oauthLoginInput.getTokenRequestUrl()) : URI.create(serverUrl + SNOWFLAKE_TOKEN_REQUEST_ENDPOINT); logIfHttpInUse(uri); return uri.normalize(); } private static void logIfHttpInUse(URI uri) { if (uri.getScheme().equalsIgnoreCase("http")) { logger.warn("OAuth URL uses insecure HTTP protocol: {}", uri); } } static HttpRequestBase convertToBaseRequest(HTTPRequest request) { HttpPost baseRequest = new HttpPost(request.getURI()); baseRequest.setEntity(new StringEntity(request.getBody(), StandardCharsets.UTF_8)); request .getHeaderMap() .forEach((key, values) -> values.forEach(value -> baseRequest.addHeader(key, value))); return baseRequest; } static URI getAuthorizationUrl(SFOauthLoginInput oauthLoginInput, String serverUrl) { URI uri = !isNullOrEmpty(oauthLoginInput.getAuthorizationUrl()) ? URI.create(oauthLoginInput.getAuthorizationUrl()) : URI.create(serverUrl + SNOWFLAKE_AUTHORIZE_ENDPOINT); logIfHttpInUse(uri); return uri.normalize(); } static String getScope(SFOauthLoginInput oauthLoginInput, String role) { return (!isNullOrEmpty(oauthLoginInput.getScope())) ? oauthLoginInput.getScope() : DEFAULT_SESSION_ROLE_SCOPE_PREFIX + role; } static URI buildRedirectUri(SFOauthLoginInput oauthLoginInput) throws IOException { String redirectUri = !isNullOrEmpty(oauthLoginInput.getRedirectUri()) ? oauthLoginInput.getRedirectUri() : createDefaultRedirectUri(); return URI.create(redirectUri); } static TokenResponseDTO sendTokenRequest(HttpRequestBase request, SFLoginInput loginInput) throws SnowflakeSQLException, IOException { URI requestUri = request.getURI(); logger.debug( "Requesting OAuth access token from: {}", requestUri.getAuthority() + requestUri.getPath()); String tokenResponse = HttpUtil.executeGeneralRequest( request, loginInput.getLoginTimeout(), loginInput.getAuthTimeout(), loginInput.getSocketTimeoutInMillis(), 0, loginInput.getHttpClientSettingsKey(), null); ObjectMapper objectMapper = new ObjectMapper(); TokenResponseDTO tokenResponseDTO = objectMapper.readValue(tokenResponse, TokenResponseDTO.class); logger.debug( "Received OAuth access token of type \"{}\" from: {}{}", tokenResponseDTO.getTokenType(), requestUri.getAuthority(), requestUri.getPath()); return tokenResponseDTO; } private static String createDefaultRedirectUri() throws IOException { try (ServerSocket socket = new ServerSocket(0)) { return String.format("%s:%s", DEFAULT_REDIRECT_HOST, socket.getLocalPort()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/RandomStateProvider.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import com.nimbusds.oauth2.sdk.id.State; public class RandomStateProvider implements StateProvider { private static final int STATE_BYTE_SIZE = 256; @Override public String getState() { return new State(STATE_BYTE_SIZE).getValue(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/StateProvider.java ================================================ package net.snowflake.client.internal.core.auth.oauth; public interface StateProvider { T getState(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/oauth/TokenResponseDTO.java ================================================ package net.snowflake.client.internal.core.auth.oauth; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; @JsonIgnoreProperties(ignoreUnknown = true) public class TokenResponseDTO { private final String accessToken; private final String refreshToken; private final String tokenType; private final String scope; private final String username; private final boolean idpInitiated; private final long expiresIn; private final long refreshTokenExpiresIn; @JsonCreator(mode = JsonCreator.Mode.PROPERTIES) public TokenResponseDTO( @JsonProperty(value = "access_token", required = true) String accessToken, @JsonProperty("refresh_token") String refreshToken, @JsonProperty("token_type") String tokenType, @JsonProperty("scope") String scope, @JsonProperty("username") String username, @JsonProperty("idp_initiated") boolean idpInitiated, @JsonProperty("expires_in") long expiresIn, @JsonProperty("refresh_token_expires_in") long refreshTokenExpiresIn) { this.accessToken = accessToken; this.tokenType = tokenType; this.refreshToken = refreshToken; this.scope = scope; this.username = username; this.idpInitiated = idpInitiated; this.expiresIn = expiresIn; this.refreshTokenExpiresIn = refreshTokenExpiresIn; } public String getAccessToken() { return accessToken; } public String getTokenType() { return tokenType; } public String getRefreshToken() { return refreshToken; } public String getScope() { return scope; } public long getExpiresIn() { return expiresIn; } public String getUsername() { return username; } public long getRefreshTokenExpiresIn() { return refreshTokenExpiresIn; } public boolean isIdpInitiated() { return idpInitiated; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/AwsAttestationService.java ================================================ package net.snowflake.client.internal.core.auth.wif; import java.time.Duration; import java.util.List; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.jdbc.EnvironmentVariables; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.auth.aws.signer.AwsV4HttpSigner; import software.amazon.awssdk.http.auth.spi.signer.SignRequest; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.AwsSessionCredentialsIdentity; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; import software.amazon.awssdk.services.sts.StsClient; import software.amazon.awssdk.services.sts.model.AssumeRoleRequest; import software.amazon.awssdk.services.sts.model.AssumeRoleResponse; import software.amazon.awssdk.services.sts.model.Credentials; import software.amazon.awssdk.services.sts.model.GetCallerIdentityResponse; public class AwsAttestationService { public static final SFLogger logger = SFLoggerFactory.getLogger(AwsAttestationService.class); public static final int TIMEOUT_MS = 10_000; private static Region region; private final AwsV4HttpSigner aws4Signer; public AwsAttestationService() { aws4Signer = AwsV4HttpSigner.create(); } void initializeSignerRegion() { logger.debug("Getting AWS region from environment variable"); String envRegion = SnowflakeUtil.systemGetEnv(EnvironmentVariables.AWS_REGION.getName()); if (envRegion != null) { region = Region.of(envRegion); } else { logger.debug("Getting AWS region from default region provider chain"); region = DefaultAwsRegionProviderChain.builder().build().getRegion(); } } public AwsCredentials getAWSCredentials() { try { AwsCredentialsProvider credentialsProvider = DefaultCredentialsProvider.create(); return credentialsProvider.resolveCredentials(); } catch (Exception e) { logger.debug("Failed to retrieve AWS credentials: {}", e.getMessage()); return null; } } Region getAWSRegion() { return region; } SdkHttpRequest signRequestWithSigV4( SdkHttpRequest signableRequest, AwsCredentials awsCredentials) { AwsCredentialsIdentity credentialsIdentity; if (awsCredentials instanceof AwsSessionCredentials) { AwsSessionCredentials sessionCredentials = (AwsSessionCredentials) awsCredentials; // Create AwsSessionCredentialsIdentity that properly includes the session token credentialsIdentity = AwsSessionCredentialsIdentity.create( sessionCredentials.accessKeyId(), sessionCredentials.secretAccessKey(), sessionCredentials.sessionToken()); } else { // For basic credentials, use AwsCredentialsIdentity credentialsIdentity = AwsCredentialsIdentity.create( awsCredentials.accessKeyId(), awsCredentials.secretAccessKey()); } SignRequest signRequest = SignRequest.builder(credentialsIdentity) .request(signableRequest) .putProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, "sts") .putProperty(AwsV4HttpSigner.REGION_NAME, getAWSRegion().toString()) .build(); return aws4Signer.sign(signRequest).request(); } AwsCredentials assumeRole(AwsCredentials currentCredentials, String roleArn, String externalId) throws SFException { try (StsClient stsClient = createStsClient(currentCredentials, TIMEOUT_MS)) { AssumeRoleRequest.Builder assumeRoleRequestBuilder = AssumeRoleRequest.builder() .roleArn(roleArn) .roleSessionName("identity-federation-session"); if (externalId != null && !externalId.isEmpty()) { assumeRoleRequestBuilder.externalId(externalId); } AssumeRoleRequest assumeRoleRequest = assumeRoleRequestBuilder.build(); AssumeRoleResponse assumeRoleResponse = stsClient.assumeRole(assumeRoleRequest); Credentials credentials = assumeRoleResponse.credentials(); logger.debug("Successfully assumed role: {}", roleArn); return AwsSessionCredentials.create( credentials.accessKeyId(), credentials.secretAccessKey(), credentials.sessionToken()); } catch (Exception e) { logger.error("Failed to assume role: {} - {}", roleArn, e.getMessage()); throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Failed to assume AWS role " + roleArn + ": " + e.getMessage()); } } AwsCredentials getCredentialsViaRoleChaining(SFLoginInput loginInput) throws SFException { AwsCredentials currentCredentials = getAWSCredentials(); if (currentCredentials == null) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "No initial AWS credentials found for role chaining"); } List impersonationPath = loginInput.getWorkloadIdentityImpersonationPath(); for (int i = 0; i < impersonationPath.size(); i++) { String roleArn = impersonationPath.get(i); logger.debug("Assuming role: {}", roleArn); String externalId = (i == impersonationPath.size() - 1) ? loginInput.getWorkloadIdentityAwsExternalId() : null; currentCredentials = assumeRole(currentCredentials, roleArn, externalId); if (currentCredentials == null) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Failed to assume role: " + roleArn); } } return currentCredentials; } public String getCallerIdentityArn(AwsCredentials credentials, int timeoutMs) { if (credentials == null) { logger.debug("Cannot get caller identity with null credentials"); return null; } Region region = getAWSRegion(); if (region == null) { logger.debug("Cannot get caller identity without AWS region"); return null; } try (StsClient stsClient = createStsClient(credentials, timeoutMs)) { GetCallerIdentityResponse callerIdentity = stsClient.getCallerIdentity(); if (callerIdentity == null || callerIdentity.arn() == null) { logger.debug("GetCallerIdentity returned null or missing ARN"); return null; } return callerIdentity.arn(); } catch (Exception e) { logger.debug("Failed to get caller identity ARN: {}", e.getMessage()); return null; } } private StsClient createStsClient(AwsCredentials credentials, int timeoutMs) { return StsClient.builder() .credentialsProvider(StaticCredentialsProvider.create(credentials)) .overrideConfiguration(config -> config.apiCallTimeout(Duration.ofMillis(timeoutMs))) .region(getAWSRegion()) .build(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/AwsIdentityAttestationCreator.java ================================================ package net.snowflake.client.internal.core.auth.wif; import java.io.ByteArrayInputStream; import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Collections; import net.minidev.json.JSONObject; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.regions.Region; public class AwsIdentityAttestationCreator implements WorkloadIdentityAttestationCreator { private static final SFLogger logger = SFLoggerFactory.getLogger(AwsIdentityAttestationCreator.class); public static final String API_VERSION = "2011-06-15"; public static final String GET_CALLER_IDENTITY_ACTION = "GetCallerIdentity"; private final AwsAttestationService attestationService; private final SFLoginInput loginInput; public AwsIdentityAttestationCreator( AwsAttestationService attestationService, SFLoginInput loginInput) { this.attestationService = attestationService; this.loginInput = loginInput; } @Override public WorkloadIdentityAttestation createAttestation() throws SFException { attestationService.initializeSignerRegion(); AwsCredentials awsCredentials; if (loginInput.getWorkloadIdentityImpersonationPath().isEmpty()) { logger.debug("Creating AWS identity attestation..."); awsCredentials = attestationService.getAWSCredentials(); } else { logger.debug("Creating AWS identity attestation with impersonation..."); awsCredentials = attestationService.getCredentialsViaRoleChaining(loginInput); } if (awsCredentials == null) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "No AWS credentials were found"); } Region region = attestationService.getAWSRegion(); if (region == null) { throw new SFException(ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "No AWS region was found"); } String stsHostname = getStsHostname(region.id()); SdkHttpRequest request = createStsRequest(stsHostname); SdkHttpRequest signedRequest = attestationService.signRequestWithSigV4(request, awsCredentials); String credential = createBase64EncodedRequestCredential(signedRequest); return new WorkloadIdentityAttestation( WorkloadIdentityProviderType.AWS, credential, Collections.emptyMap()); } private String getStsHostname(String region) { String domain = region.startsWith("cn-") ? "amazonaws.com.cn" : "amazonaws.com"; return String.format("sts.%s.%s", region, domain); } private SdkHttpRequest createStsRequest(String hostname) { String url = String.format( "https://%s?Action=%s&Version=%s", hostname, GET_CALLER_IDENTITY_ACTION, API_VERSION); SdkHttpFullRequest.Builder requestBuilder = SdkHttpFullRequest.builder() .method(SdkHttpMethod.POST) .uri(URI.create(url)) .putHeader("Host", hostname) .putHeader( WorkloadIdentityUtil.SNOWFLAKE_AUDIENCE_HEADER_NAME, WorkloadIdentityUtil.SNOWFLAKE_AUDIENCE) .contentStreamProvider( () -> new ByteArrayInputStream(new byte[0])); // needed to properly sign the request return requestBuilder.build(); } private String createBase64EncodedRequestCredential(SdkHttpRequest request) { JSONObject assertionJson = new JSONObject(); JSONObject headers = new JSONObject(); // AWS SDK 2 headers are Map>, but backend expects Map request.headers().entrySet().stream() .filter(entry -> entry.getValue() != null && !entry.getValue().isEmpty()) .forEach(entry -> headers.put(entry.getKey(), entry.getValue().get(0))); assertionJson.put("url", request.getUri().toString()); assertionJson.put("method", request.method().toString()); assertionJson.put("headers", headers); String assertionJsonString = assertionJson.toString(); return Base64.getEncoder().encodeToString(assertionJsonString.getBytes(StandardCharsets.UTF_8)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/AzureAttestationService.java ================================================ package net.snowflake.client.internal.core.auth.wif; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.client.methods.HttpRequestBase; public class AzureAttestationService { private static final SFLogger logger = SFLoggerFactory.getLogger(AzureAttestationService.class); // Expected to be set in Azure Functions environment String getIdentityEndpoint() { return SnowflakeUtil.systemGetEnv("IDENTITY_ENDPOINT"); } // Expected to be set in Azure Functions environment String getIdentityHeader() { return SnowflakeUtil.systemGetEnv("IDENTITY_HEADER"); } // Expected to be set in Azure Functions environment String getClientId() { return SnowflakeUtil.systemGetEnv("MANAGED_IDENTITY_CLIENT_ID"); } String fetchTokenFromMetadataService(HttpRequestBase tokenRequest, SFLoginInput loginInput) throws SFException { try { return WorkloadIdentityUtil.performIdentityRequest(tokenRequest, loginInput); } catch (Exception e) { logger.error("Azure metadata server request failed", e); throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Azure metadata server request was not successful: " + e.getMessage()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/AzureIdentityAttestationCreator.java ================================================ package net.snowflake.client.internal.core.auth.wif; import static net.snowflake.client.internal.core.auth.wif.WorkloadIdentityUtil.DEFAULT_AZURE_METADATA_SERVICE_BASE_URL; import static net.snowflake.client.internal.core.auth.wif.WorkloadIdentityUtil.SubjectAndIssuer; import static net.snowflake.client.internal.core.auth.wif.WorkloadIdentityUtil.extractClaimsWithoutVerifyingSignature; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Strings; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.client.methods.HttpGet; public class AzureIdentityAttestationCreator implements WorkloadIdentityAttestationCreator { private static final SFLogger logger = SFLoggerFactory.getLogger(AzureIdentityAttestationCreator.class); public static final ObjectMapper objectMapper = new ObjectMapper(); private static final String DEFAULT_WORKLOAD_IDENTITY_ENTRA_RESOURCE = "api://fd3f753b-eed3-462c-b6a7-a4b5bb650aad"; private final AzureAttestationService azureAttestationService; private final SFLoginInput loginInput; private final String workloadIdentityEntraResource; private final String azureMetadataServiceBaseUrl; public AzureIdentityAttestationCreator( AzureAttestationService azureAttestationService, SFLoginInput loginInput) { this.azureAttestationService = azureAttestationService; this.azureMetadataServiceBaseUrl = DEFAULT_AZURE_METADATA_SERVICE_BASE_URL; this.loginInput = loginInput; this.workloadIdentityEntraResource = getEntraResource(loginInput); } /** Only for testing purpose */ public AzureIdentityAttestationCreator( AzureAttestationService azureAttestationService, SFLoginInput loginInput, String azureMetadataServiceBaseUrl) { this.azureAttestationService = azureAttestationService; this.azureMetadataServiceBaseUrl = azureMetadataServiceBaseUrl; this.loginInput = loginInput; this.workloadIdentityEntraResource = getEntraResource(loginInput); } @Override public WorkloadIdentityAttestation createAttestation() throws SFException { logger.debug("Creating Azure identity attestation..."); String identityEndpoint = azureAttestationService.getIdentityEndpoint(); HttpGet request; if (Strings.isNullOrEmpty(identityEndpoint)) { request = createAzureVMIdentityRequest(); } else { String identityHeader = azureAttestationService.getIdentityHeader(); if (Strings.isNullOrEmpty(identityHeader)) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Managed identity is not enabled on this Azure function."); } request = createAzureFunctionsIdentityRequest( identityEndpoint, identityHeader, azureAttestationService.getClientId()); } if (!loginInput.getWorkloadIdentityImpersonationPath().isEmpty()) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Property 'workloadIdentityImpersonationPath' is not empty. Identity impersonation is not available on Azure."); } String tokenJson = azureAttestationService.fetchTokenFromMetadataService(request, loginInput); if (tokenJson == null) { throw new SFException(ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Could not fetch Azure token."); } String token = extractTokenFromJson(tokenJson); if (token == null) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "No access token found in Azure response."); } SubjectAndIssuer claims = extractClaimsWithoutVerifyingSignature(token); return new WorkloadIdentityAttestation( WorkloadIdentityProviderType.AZURE, token, claims.toMap()); } private String getEntraResource(SFLoginInput loginInput) { if (!Strings.isNullOrEmpty(loginInput.getWorkloadIdentityEntraResource())) { return loginInput.getWorkloadIdentityEntraResource(); } else { return DEFAULT_WORKLOAD_IDENTITY_ENTRA_RESOURCE; } } private String extractTokenFromJson(String tokenJson) throws SFException { try { JsonNode jsonNode = objectMapper.readTree(tokenJson); return jsonNode.get("access_token").asText(); } catch (Exception e) { logger.error("Unable to extract token from Azure metadata response", e); throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Unable to extract token from Azure metadata response: " + e.getMessage()); } } private HttpGet createAzureFunctionsIdentityRequest( String identityEndpoint, String identityHeader, String managedIdentityClientId) { String queryParams = "api-version=2019-08-01&resource=" + workloadIdentityEntraResource; if (managedIdentityClientId != null) { queryParams += "&client_id=" + managedIdentityClientId; } HttpGet request = new HttpGet(String.format("%s?%s", identityEndpoint, queryParams)); request.addHeader("X-IDENTITY-HEADER", identityHeader); return request; } private HttpGet createAzureVMIdentityRequest() { String urlWithoutQueryString = azureMetadataServiceBaseUrl + "/metadata/identity/oauth2/token?"; String queryParams = "api-version=2018-02-01&resource=" + workloadIdentityEntraResource; HttpGet request = new HttpGet(urlWithoutQueryString + queryParams); request.setHeader("Metadata", "True"); return request; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/GcpIdentityAttestationCreator.java ================================================ package net.snowflake.client.internal.core.auth.wif; import static net.snowflake.client.internal.core.auth.wif.WorkloadIdentityUtil.DEFAULT_GCP_METADATA_SERVICE_BASE_URL; import static net.snowflake.client.internal.core.auth.wif.WorkloadIdentityUtil.performIdentityRequest; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.StringEntity; public class GcpIdentityAttestationCreator implements WorkloadIdentityAttestationCreator { private static final String METADATA_FLAVOR_HEADER_NAME = "Metadata-Flavor"; private static final String METADATA_FLAVOR = "Google"; private static final ObjectMapper objectMapper = new ObjectMapper(); private static final String DEFAULT_GCP_IAM_CREDENTIALS_URL = "https://iamcredentials.googleapis.com"; private final String gcpMetadataServiceBaseUrl; private final String gcpIamCredentialsBaseUrl; private static final SFLogger logger = SFLoggerFactory.getLogger(GcpIdentityAttestationCreator.class); private final SFLoginInput loginInput; public GcpIdentityAttestationCreator(SFLoginInput loginInput) { this.loginInput = loginInput; this.gcpMetadataServiceBaseUrl = DEFAULT_GCP_METADATA_SERVICE_BASE_URL; this.gcpIamCredentialsBaseUrl = DEFAULT_GCP_IAM_CREDENTIALS_URL; } /** Only for testing purpose */ GcpIdentityAttestationCreator(SFLoginInput loginInput, String gcpBaseUrl, String gcpIamUrl) { this.loginInput = loginInput; this.gcpMetadataServiceBaseUrl = gcpBaseUrl; gcpIamCredentialsBaseUrl = gcpIamUrl; } @Override public WorkloadIdentityAttestation createAttestation() throws SFException { String token; if (loginInput.getWorkloadIdentityImpersonationPath().isEmpty()) { logger.debug("Creating GCP identity attestation..."); token = getGcpIdentityTokenFromMetadataService(); } else { logger.debug("Creating GCP identity attestation with impersonation..."); token = getGcpIdentityTokenViaImpersonation(); } if (token == null) { throw new SFException(ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "No GCP token was found."); } // if the token has been returned, we can assume that we're on GCP environment WorkloadIdentityUtil.SubjectAndIssuer claims = WorkloadIdentityUtil.extractClaimsWithoutVerifyingSignature(token); return new WorkloadIdentityAttestation( WorkloadIdentityProviderType.GCP, token, Collections.singletonMap("sub", claims.getSubject())); } private String getGcpIdentityTokenFromMetadataService() throws SFException { String uri = gcpMetadataServiceBaseUrl + "/computeMetadata/v1/instance/service-accounts/default/identity?audience=" + WorkloadIdentityUtil.SNOWFLAKE_AUDIENCE; return executeGcpTokenRequest(uri); } private String fetchTokenFromMetadataService() throws SFException { String uri = gcpMetadataServiceBaseUrl + "/computeMetadata/v1/instance/service-accounts/default/token"; String response = executeGcpTokenRequest(uri); try { JsonNode responseBody = objectMapper.readTree(response); return responseBody.get("access_token").asText(); } catch (JsonProcessingException e) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "GCP metadata server request for token was not successful: " + e.getMessage()); } } private String executeGcpTokenRequest(String uri) throws SFException { HttpGet tokenRequest = new HttpGet(uri); tokenRequest.setHeader(METADATA_FLAVOR_HEADER_NAME, METADATA_FLAVOR); try { return performIdentityRequest(tokenRequest, loginInput); } catch (Exception e) { logger.error("GCP metadata server request was not successful", e); throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "GCP metadata server request was not successful: " + e.getMessage()); } } private String getGcpIdentityTokenViaImpersonation() throws SFException { String accessToken = fetchTokenFromMetadataService(); List fullServiceAccountPaths = loginInput.getWorkloadIdentityImpersonationPath().stream() .map(sa -> "projects/-/serviceAccounts/" + sa) .collect(Collectors.toList()); String targetServiceAccount = fullServiceAccountPaths.get(fullServiceAccountPaths.size() - 1); List delegates = fullServiceAccountPaths.subList(0, fullServiceAccountPaths.size() - 1); String url = String.format(gcpIamCredentialsBaseUrl + "/v1/%s:generateIdToken", targetServiceAccount); HttpPost request = new HttpPost(url); request.setHeader("Authorization", "Bearer " + accessToken); request.setHeader("Content-Type", "application/json"); Map requestBody = new HashMap<>(); requestBody.put("delegates", delegates); requestBody.put("audience", WorkloadIdentityUtil.SNOWFLAKE_AUDIENCE); try { String json = objectMapper.writeValueAsString(requestBody); request.setEntity(new StringEntity(json, StandardCharsets.UTF_8)); String response = performIdentityRequest(request, loginInput); JsonNode responseBody = objectMapper.readTree(response); return responseBody.get("token").asText(); } catch (Exception e) { logger.error("Error fetching GCP impersonated identity token", e); throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Error fetching GCP impersonated identity token: " + e.getMessage()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/OidcIdentityAttestationCreator.java ================================================ package net.snowflake.client.internal.core.auth.wif; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class OidcIdentityAttestationCreator implements WorkloadIdentityAttestationCreator { private static final SFLogger logger = SFLoggerFactory.getLogger(OidcIdentityAttestationCreator.class); private final String token; public OidcIdentityAttestationCreator(String token) { this.token = token; } @Override public WorkloadIdentityAttestation createAttestation() throws SFException { logger.debug("Creating OIDC identity attestation..."); if (token == null) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "No OIDC token was specified. Please provide it in `token` property."); } WorkloadIdentityUtil.SubjectAndIssuer claims = WorkloadIdentityUtil.extractClaimsWithoutVerifyingSignature(token); return new WorkloadIdentityAttestation( WorkloadIdentityProviderType.OIDC, token, claims.toMap()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/PlatformDetectionUtil.java ================================================ package net.snowflake.client.internal.core.auth.wif; import java.io.IOException; import java.util.regex.Pattern; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.core.OCSPMode; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpRequestBase; import software.amazon.awssdk.auth.credentials.AwsCredentials; public class PlatformDetectionUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(PlatformDetectionUtil.class); // Regex patterns for validating AWS ARN for WIF private static final Pattern IAM_USER_ARN_PATTERN = Pattern.compile("^arn:[^:]+:iam::[^:]+:user/.+$"); private static final Pattern ASSUMED_ROLE_ARN_PATTERN = Pattern.compile("^arn:[^:]+:sts::[^:]+:assumed-role/.+$"); public static String performPlatformDetectionRequest(HttpRequestBase httpRequest, int timeoutMs) throws SnowflakeSQLException, IOException { httpRequest.setConfig( RequestConfig.custom() .setConnectTimeout(timeoutMs) .setSocketTimeout(timeoutMs) .setConnectionRequestTimeout(timeoutMs) .build()); return HttpUtil.executeGeneralRequestOmitSnowflakeHeaders( httpRequest, 1, timeoutMs / 1000, timeoutMs, 0, new HttpClientSettingsKey(OCSPMode.DISABLE_OCSP_CHECKS), null); } public static boolean hasValidAwsIdentityForWif( AwsAttestationService attestationService, int timeoutMs) { try { AwsCredentials credentials = attestationService.getAWSCredentials(); if (!hasValidAwsCredentials(credentials)) { logger.debug("No valid AWS credentials available for identity validation"); return false; } String arn = attestationService.getCallerIdentityArn(credentials, timeoutMs); if (arn == null) { logger.debug("Failed to retrieve caller identity ARN"); return false; } boolean isValid = isValidArnForWif(arn); if (isValid) { logger.debug("Valid AWS identity found with ARN: {}", arn); } else { logger.debug("ARN is not valid for WIF: {}", arn); } return isValid; } catch (Exception e) { logger.debug("Failed to validate AWS identity: {}", e.getMessage()); return false; } } public static boolean isValidArnForWif(String arn) { if (arn == null || arn.trim().isEmpty()) { return false; } return IAM_USER_ARN_PATTERN.matcher(arn).matches() || ASSUMED_ROLE_ARN_PATTERN.matcher(arn).matches(); } private static boolean hasValidAwsCredentials(AwsCredentials awsCredentials) { if (awsCredentials == null) { logger.debug("No AWS credentials found"); return false; } String accessKey = awsCredentials.accessKeyId(); String secretKey = awsCredentials.secretAccessKey(); if (SnowflakeUtil.isNullOrEmpty(accessKey) || SnowflakeUtil.isNullOrEmpty(secretKey)) { logger.debug("AWS credentials are incomplete"); return false; } return true; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/WorkloadIdentityAttestation.java ================================================ package net.snowflake.client.internal.core.auth.wif; import java.util.Map; public class WorkloadIdentityAttestation { private final WorkloadIdentityProviderType provider; private final String credential; private final Map userIdentifierComponents; WorkloadIdentityAttestation( WorkloadIdentityProviderType provider, String credential, Map userIdentifierComponents) { this.provider = provider; this.credential = credential; this.userIdentifierComponents = userIdentifierComponents; } public WorkloadIdentityProviderType getProvider() { return provider; } public String getCredential() { return credential; } public Map getUserIdentifierComponents() { return userIdentifierComponents; } @Override public String toString() { return "WorkloadIdentityAttestation{" + "provider=" + provider + ", credential='" + credential + '\'' + ", userIdentifierComponents=" + userIdentifierComponents + '}'; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/WorkloadIdentityAttestationCreator.java ================================================ package net.snowflake.client.internal.core.auth.wif; import net.snowflake.client.internal.core.SFException; interface WorkloadIdentityAttestationCreator { /** * @return corresponding attestation or null if it couldn't be loaded */ WorkloadIdentityAttestation createAttestation() throws SFException; } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/WorkloadIdentityAttestationProvider.java ================================================ package net.snowflake.client.internal.core.auth.wif; import java.util.Arrays; import java.util.stream.Collectors; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class WorkloadIdentityAttestationProvider { private static final SFLogger logger = SFLoggerFactory.getLogger(WorkloadIdentityAttestationProvider.class); private final AwsIdentityAttestationCreator awsAttestationCreator; private final GcpIdentityAttestationCreator gcpAttestationCreator; private final AzureIdentityAttestationCreator azureAttestationCreator; private final OidcIdentityAttestationCreator oidcAttestationCreator; public WorkloadIdentityAttestationProvider( AwsIdentityAttestationCreator awsAttestationCreator, GcpIdentityAttestationCreator gcpAttestationCreator, AzureIdentityAttestationCreator azureAttestationCreator, OidcIdentityAttestationCreator oidcAttestationCreator) { this.awsAttestationCreator = awsAttestationCreator; this.gcpAttestationCreator = gcpAttestationCreator; this.azureAttestationCreator = azureAttestationCreator; this.oidcAttestationCreator = oidcAttestationCreator; } public WorkloadIdentityAttestation getAttestation(String identityProvider) throws SFException { return getCreator(identityProvider).createAttestation(); } WorkloadIdentityAttestationCreator getCreator(String identityProvider) throws SFException { if (WorkloadIdentityProviderType.AWS.name().equalsIgnoreCase(identityProvider)) { return awsAttestationCreator; } else if (WorkloadIdentityProviderType.GCP.name().equalsIgnoreCase(identityProvider)) { return gcpAttestationCreator; } else if (WorkloadIdentityProviderType.AZURE.name().equalsIgnoreCase(identityProvider)) { return azureAttestationCreator; } else if (WorkloadIdentityProviderType.OIDC.name().equalsIgnoreCase(identityProvider)) { return oidcAttestationCreator; } else { String validValues = Arrays.stream(WorkloadIdentityProviderType.values()) .map(Enum::name) .collect(Collectors.joining(", ")); throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Unknown Workload Identity provider specified: " + identityProvider + ", valid values are: " + validValues); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/WorkloadIdentityProviderType.java ================================================ package net.snowflake.client.internal.core.auth.wif; enum WorkloadIdentityProviderType { AWS, // Provider that builds an encoded pre-signed GetCallerIdentity request using the current // workload's IAM role. AZURE, // Provider that requests an OAuth access token for the workload's managed identity. GCP, // Provider that requests an ID token for the workload's attached service account. OIDC // Provider that looks for an OIDC ID token. } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/auth/wif/WorkloadIdentityUtil.java ================================================ package net.snowflake.client.internal.core.auth.wif; import com.nimbusds.jwt.JWT; import com.nimbusds.jwt.JWTParser; import java.io.IOException; import java.util.HashMap; import java.util.Map; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFLoginInput; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.client.methods.HttpRequestBase; /** * Utility class for Workload Identity Federation (WIF) specific operations. This class contains * functions that are used exclusively within the WIF package. */ public class WorkloadIdentityUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(WorkloadIdentityUtil.class); // GCP metadata service uses a hostname that resolves to both IPv4 and IPv6 via DNS public static final String DEFAULT_GCP_METADATA_SERVICE_BASE_URL = "http://metadata.google.internal"; // Azure instance metadata service (IPv4 only; Azure does not expose an IPv6 IMDS endpoint) public static final String DEFAULT_AZURE_METADATA_SERVICE_BASE_URL = "http://169.254.169.254"; public static final String SNOWFLAKE_AUDIENCE_HEADER_NAME = "X-Snowflake-Audience"; public static final String SNOWFLAKE_AUDIENCE = "snowflakecomputing.com"; /** * Performs an HTTP request for WIF identity token retrieval. This method is used by WIF * authentication flows to communicate with cloud metadata services. */ public static String performIdentityRequest(HttpRequestBase tokenRequest, SFLoginInput loginInput) throws SnowflakeSQLException, IOException { return HttpUtil.executeGeneralRequestOmitSnowflakeHeaders( tokenRequest, loginInput.getLoginTimeout(), 3, // 3s timeout loginInput.getSocketTimeoutInMillis(), 0, loginInput.getHttpClientSettingsKey(), null); } /** * Extracts claims (subject and issuer) from a JWT token without verifying the signature. This is * used in WIF flows where signature verification is handled elsewhere. */ public static SubjectAndIssuer extractClaimsWithoutVerifyingSignature(String token) throws SFException { Map claims = extractClaimsMap(token); if (claims == null) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Failed to parse JWT and extract claims"); } String issuer = (String) claims.get("iss"); if (issuer == null) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Missing issuer claim in JWT token"); } String subject = (String) claims.get("sub"); if (subject == null) { throw new SFException( ErrorCode.WORKLOAD_IDENTITY_FLOW_ERROR, "Missing sub claim in JWT token"); } return new SubjectAndIssuer(subject, issuer); } private static Map extractClaimsMap(String token) { try { JWT jwt = JWTParser.parse(token); return jwt.getJWTClaimsSet().getClaims(); } catch (Exception e) { logger.error("Unable to extract JWT claims from token: {}", e); return null; } } /** * Container class for JWT subject and issuer claims. Used in WIF authentication flows to pass * extracted token claims. */ public static class SubjectAndIssuer { private final String subject; private final String issuer; public SubjectAndIssuer(String subject, String issuer) { this.issuer = issuer; this.subject = subject; } public String getIssuer() { return issuer; } public String getSubject() { return subject; } public Map toMap() { Map claims = new HashMap<>(); claims.put("iss", issuer); claims.put("sub", subject); return claims; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/bind/BindException.java ================================================ package net.snowflake.client.internal.core.bind; import net.snowflake.client.internal.jdbc.telemetry.TelemetryField; public class BindException extends Exception { private static final long serialVersionUID = 1L; public enum Type { SERIALIZATION(TelemetryField.FAILED_BIND_SERIALIZATION), UPLOAD(TelemetryField.FAILED_BIND_UPLOAD), OTHER(TelemetryField.FAILED_BIND_OTHER); public final TelemetryField field; Type(TelemetryField field) { this.field = field; } } public final Type type; public BindException(String msg, Type type) { super(msg); this.type = type; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/bind/BindUploader.java ================================================ package net.snowflake.client.internal.core.bind; import static java.nio.charset.StandardCharsets.UTF_8; import java.io.ByteArrayInputStream; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.sql.SQLException; import java.time.Instant; import java.time.LocalDate; import java.time.LocalTime; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; import java.util.ArrayList; import java.util.List; import java.util.Map; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.ParameterBindingDTO; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFBaseStatement; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.SFBaseFileTransferAgent; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.jdbc.util.SnowflakeTypeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SFPair; import net.snowflake.common.core.SqlState; public class BindUploader implements Closeable { private static final SFLogger logger = SFLoggerFactory.getLogger(BindUploader.class); // session of the uploader private final SFBaseSession session; // fully-qualified stage path to upload binds to private final String stagePath; // whether the uploader has completed private boolean closed = false; // size (bytes) of max input stream (10MB default) private long inputStreamBufferSize = 1024 * 1024 * 10; private int fileCount = 0; private final DateTimeFormatter timestampFormatter = new DateTimeFormatterBuilder() .append(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSSSSS ")) .appendOffset("+HH:MM", "Z") .toFormatter(); private final DateTimeFormatter dateFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd"); private final DateTimeFormatter timeFormatter = DateTimeFormatter.ofPattern("HH:mm:ss.SSSSSSSSS"); private final String createStageSQL; static class ColumnTypeDataPair { public String type; public List data; ColumnTypeDataPair(String type, List data) { this.type = type; this.data = data; } } /** * Create a new BindUploader which will write binds to the *existing* bindDir and upload them to * the given stageDir * * @param session the session to use for uploading binds * @param stageDir the stage path to upload to */ private BindUploader(SFBaseSession session, String stageDir) { this.session = session; this.stagePath = "@" + session.getSfConnectionHandler().getBindStageName() + "/" + stageDir; this.createStageSQL = "CREATE TEMPORARY STAGE IF NOT EXISTS " + session.getSfConnectionHandler().getBindStageName() + " file_format=(" + " type=csv" + " field_optionally_enclosed_by='\"'" + ")"; } private synchronized String synchronizedDateFormat(String o) { if (o == null) { return null; } long millis = Long.parseLong(o); Instant instant = Instant.ofEpochMilli(millis); LocalDate localDate = instant.atZone(ZoneOffset.UTC).toLocalDate(); return localDate.format(this.dateFormatter); } private synchronized String synchronizedTimeFormat(String o) { if (o == null) { return null; } SFPair times = getNanosAndSecs(o, false); long sec = times.left; int nano = times.right; LocalTime time = Instant.ofEpochSecond(sec, nano).atZone(ZoneOffset.UTC).toLocalTime(); return time.format(timeFormatter); } private SFPair getNanosAndSecs(String o, boolean isNegative) { String inpString = o; if (isNegative) { inpString = o.substring(1); } long sec; int nano; if (inpString.length() < 10) { sec = 0; nano = Integer.parseInt(inpString); } else { sec = Long.parseLong(inpString.substring(0, inpString.length() - 9)); nano = Integer.parseInt(inpString.substring(inpString.length() - 9)); } if (isNegative) { // adjust the timestamp sec = -1 * sec; if (nano > 0) { nano = 1000000000 - nano; sec--; } } return SFPair.of(sec, nano); } private synchronized String synchronizedTimestampFormat(String o, String type) { if (o == null) { return null; } boolean isNegative = o.length() > 0 && o.charAt(0) == '-'; SFPair times = getNanosAndSecs(o, isNegative); long sec = times.left; int nano = times.right; Instant instant = Instant.ofEpochSecond(sec, nano); // For timestamp_ntz, use UTC timezone. For timestamp_ltz, use the local timezone to minimise // the gap. if ("TIMESTAMP_LTZ".equals(type)) { ZonedDateTime zdt = instant.atZone(ZoneId.systemDefault()); return zdt.format(timestampFormatter); } else { ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, ZoneOffset.UTC); return zdt.format(timestampFormatter); } } /** * Create a new BindUploader which will upload to the given stage path. Note that no temporary * file or directory is created anymore. Instead, streaming uploading is used. * * @param session the session to use for uploading binds * @param stageDir the stage path to upload to * @return BindUploader instance */ public static synchronized BindUploader newInstance(SFBaseSession session, String stageDir) { return new BindUploader(session, stageDir); } /** * Wrapper around upload() with default compression to true. * * @param bindValues the bind map to upload * @throws BindException if there is an error when uploading bind values * @throws SQLException if any error occurs */ public void upload(Map bindValues) throws BindException, SQLException { upload(bindValues, true); } /** * Upload bind parameters via streaming. This replaces previous function upload function where * binds were written to a file which was then uploaded with a PUT statement. * * @param bindValues the bind map to upload * @param compressData whether or not to compress data * @throws BindException if there is an error when uploading bind values * @throws SQLException if any error occurs */ public void upload(Map bindValues, boolean compressData) throws BindException, SQLException { if (!closed) { List columns = getColumnValues(bindValues); List bindingRows = buildRowsAsBytes(columns); int startIndex = 0; int numBytes = 0; int rowNum = 0; fileCount = 0; while (rowNum < bindingRows.size()) { // create a list of byte arrays while (numBytes < inputStreamBufferSize && rowNum < bindingRows.size()) { numBytes += bindingRows.get(rowNum).length; rowNum++; } // concatenate all byte arrays into 1 and put into input stream ByteBuffer bb = ByteBuffer.allocate(numBytes); for (int i = startIndex; i < rowNum; i++) { bb.put(bindingRows.get(i)); } byte[] finalBytearray = bb.array(); try (ByteArrayInputStream inputStream = new ByteArrayInputStream(finalBytearray)) { // do the upload String fileName = Integer.toString(++fileCount); uploadStreamInternal(inputStream, fileName, compressData); startIndex = rowNum; numBytes = 0; } catch (IOException ex) { throw new BindException( String.format( "Failure using inputstream to upload bind data. Message: %s", ex.getMessage()), BindException.Type.SERIALIZATION); } } } } /** * Method to put data from a stream at a stage location. The data will be uploaded as one file. No * splitting is done in this method. Similar to uploadStreamInternal() in SnowflakeConnectionImpl. * *

Stream size must match the total size of data in the input stream unless compressData * parameter is set to true. * *

caller is responsible for passing the correct size for the data in the stream and releasing * the inputStream after the method is called. * * @param inputStream input stream from which the data will be uploaded * @param destFileName destination file name to use * @param compressData whether compression is requested fore uploading data * @throws SQLException raises if any error occurs * @throws BindException if there is an error when uploading bind values */ private void uploadStreamInternal( InputStream inputStream, String destFileName, boolean compressData) throws SQLException, BindException { createStageIfNeeded(); String stageName = stagePath; logger.debug( "upload data from stream: stageName={}" + ", destFileName={}", stageName, destFileName); if (stageName == null) { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "stage name is null"); } if (destFileName == null) { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "stage name is null"); } SFBaseStatement stmt = session.getSfConnectionHandler().getSFStatement(); StringBuilder putCommand = new StringBuilder(); // use a placeholder for source file putCommand.append("put file:///tmp/placeholder "); // add stage name surrounded by quotations in case special chars are used in directory name putCommand.append("'"); putCommand.append(stageName); putCommand.append("'"); putCommand.append(" overwrite=true"); SFBaseFileTransferAgent transferAgent = session.getSfConnectionHandler().getFileTransferAgent(putCommand.toString(), stmt); transferAgent.setDestStagePath(stagePath); transferAgent.setSourceStream(inputStream); transferAgent.setDestFileNameForStreamSource(destFileName); transferAgent.setCompressSourceFromStream(compressData); transferAgent.execute(); stmt.close(); } /** * Convert bind map to a list of values for each column Perform necessary type casts and invariant * checks * * @param bindValues the bind map to convert * @return list of values for each column * @throws BindException if bind map is improperly formed */ private List getColumnValues(Map bindValues) throws BindException { List columns = new ArrayList<>(bindValues.size()); for (int i = 1; i <= bindValues.size(); i++) { // bindValues should have n entries with string keys 1 ... n and list values String key = Integer.toString(i); if (!bindValues.containsKey(key)) { throw new BindException( String.format( "Bind map with %d columns should contain key \"%d\"", bindValues.size(), i), BindException.Type.SERIALIZATION); } ParameterBindingDTO value = bindValues.get(key); try { String type = value.getType(); List list = (List) value.getValue(); List convertedList = new ArrayList<>(list.size()); if ("TIMESTAMP_LTZ".equals(type) || "TIMESTAMP_NTZ".equals(type)) { for (Object e : list) { convertedList.add(synchronizedTimestampFormat((String) e, type)); } } else if ("DATE".equals(type)) { for (Object e : list) { convertedList.add(synchronizedDateFormat((String) e)); } } else if ("TIME".equals(type)) { for (Object e : list) { convertedList.add(synchronizedTimeFormat((String) e)); } } else { for (Object e : list) { convertedList.add((String) e); } } columns.add(i - 1, new ColumnTypeDataPair(type, convertedList)); } catch (ClassCastException ex) { throw new BindException( "Value in binding DTO could not be cast to a list", BindException.Type.SERIALIZATION); } } return columns; } /** * Transpose a list of columns and their values to a list of rows in bytes instead of strings * * @param columns the list of columns to transpose * @return list of rows * @throws BindException if columns improperly formed */ private List buildRowsAsBytes(List columns) throws BindException { List rows = new ArrayList<>(); int numColumns = columns.size(); // columns should have binds if (columns.get(0).data.isEmpty()) { throw new BindException("No binds found in first column", BindException.Type.SERIALIZATION); } int numRows = columns.get(0).data.size(); // every column should have the same number of binds for (int i = 0; i < numColumns; i++) { int iNumRows = columns.get(i).data.size(); if (columns.get(i).data.size() != numRows) { throw new BindException( String.format( "Column %d has a different number of binds (%d) than column 1 (%d)", i, iNumRows, numRows), BindException.Type.SERIALIZATION); } } for (int rowIdx = 0; rowIdx < numRows; rowIdx++) { String[] row = new String[numColumns]; for (int colIdx = 0; colIdx < numColumns; colIdx++) { row[colIdx] = columns.get(colIdx).data.get(rowIdx); } rows.add(createCSVRecord(row)); } return rows; } /** * Serialize row to a csv Duplicated from StreamLoader class * * @param data the row to create a csv record from * @return serialized csv for row */ private byte[] createCSVRecord(String[] data) { StringBuilder sb = new StringBuilder(1024); for (int i = 0; i < data.length; ++i) { if (i > 0) { sb.append(','); } sb.append(SnowflakeTypeUtil.escapeForCSV(data[i])); } sb.append('\n'); return sb.toString().getBytes(UTF_8); } /** * Check whether the session's temporary stage has been created, and create it if not. * * @throws BindException if creating the stage fails */ private void createStageIfNeeded() throws BindException { if (session.getArrayBindStage() != null) { return; } synchronized (session) { // another thread may have created the session by the time we enter this block if (session.getArrayBindStage() == null) { try { SFBaseStatement statement = session.getSfConnectionHandler().getSFStatement(); statement.execute(createStageSQL, null, null, new ExecTimeTelemetryData()); session.setArrayBindStage(session.getSfConnectionHandler().getBindStageName()); } catch (SFException | SQLException ex) { // to avoid repeated failures to create stage, disable array bind stage // optimization if we fail to create stage for some reason session.setArrayBindStageThreshold(0); throw new BindException( String.format( "Failed to create temporary stage for array binds. %s", ex.getMessage()), BindException.Type.UPLOAD); } } } } /** * Close uploader, deleting the local temporary directory * *

This class can be used in a try-with-resources statement, which ensures that the temporary * directory is cleaned up even when exceptions occur */ @Override public void close() { if (!closed) { closed = true; } } /** * Set the approximate maximum size in bytes for a single bind file * * @param bufferSize size in bytes */ public void setInputStreamBufferSize(int bufferSize) { this.inputStreamBufferSize = bufferSize; } /** * Return the number of files that binding data is split into on internal stage. Used for testing * purposes. * * @return number of files that binding data is split into on internal stage */ public int getFileCount() { return this.fileCount; } /** * Return the stage path to which binds are uploaded * * @return the stage path */ public String getStagePath() { return this.stagePath; } /** * Compute the number of array bind values in the given bind map * * @param bindValues the bind map * @return 0 if bindValues is null, has no binds, or is not an array bind n otherwise, where n is * the number of binds in the array bind */ public static int arrayBindValueCount(Map bindValues) { if (!isArrayBind(bindValues)) { return 0; } else { ParameterBindingDTO bindSample = bindValues.values().iterator().next(); List bindSampleValues = (List) bindSample.getValue(); return bindValues.size() * bindSampleValues.size(); } } /** * Return whether the bind map uses array binds * * @param bindValues the bind map * @return whether the bind map uses array binds */ public static boolean isArrayBind(Map bindValues) { if (bindValues == null || bindValues.size() == 0) { return false; } ParameterBindingDTO bindSample = bindValues.values().iterator().next(); return bindSample.getValue() instanceof List; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CRLCache.java ================================================ package net.snowflake.client.internal.core.crl; interface CRLCache { CRLCacheEntry get(String crlUrl); void put(String crlUrl, CRLCacheEntry entry); void cleanup(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CRLCacheConfig.java ================================================ package net.snowflake.client.internal.core.crl; import java.io.File; import java.nio.file.Path; import java.nio.file.Paths; import java.time.Duration; import net.snowflake.client.internal.core.FileCacheUtil; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class CRLCacheConfig { private static final SFLogger logger = SFLoggerFactory.getLogger(CRLCacheConfig.class); public static final String ENABLE_CRL_IN_MEMORY_CACHING = "ENABLE_CRL_IN_MEMORY_CACHING"; public static final String ENABLE_CRL_DISK_CACHING = "ENABLE_CRL_DISK_CACHING"; public static final String CRL_CACHE_VALIDITY_TIME = "CRL_CACHE_VALIDITY_TIME"; public static final String CRL_RESPONSE_CACHE_DIR = "CRL_RESPONSE_CACHE_DIR"; public static final String CRL_ON_DISK_CACHE_REMOVAL_DELAY = "CRL_ON_DISK_CACHE_REMOVAL_DELAY"; public static final String CRL_DOWNLOAD_MAX_SIZE_BYTES = "CRL_DOWNLOAD_MAX_SIZE_BYTES"; private static final long DEFAULT_CRL_DOWNLOAD_MAX_SIZE_BYTES = 20L * 1024 * 1024; // 20 MB public static boolean getInMemoryCacheEnabled() { return SnowflakeUtil.convertSystemPropertyToBooleanValue(ENABLE_CRL_IN_MEMORY_CACHING, true); } public static boolean getOnDiskCacheEnabled() { return SnowflakeUtil.convertSystemPropertyToBooleanValue(ENABLE_CRL_DISK_CACHING, true); } public static Duration getCacheValidityTime() { String validityTime = SnowflakeUtil.systemGetProperty(CRL_CACHE_VALIDITY_TIME); if (!SnowflakeUtil.isNullOrEmpty(validityTime)) { try { long seconds = Long.parseLong(validityTime); if (seconds <= 0) { throw new IllegalArgumentException("Cache validity time must be positive"); } return Duration.ofSeconds(seconds); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid cache validity time: " + validityTime, e); } } else { return Duration.ofDays(1); } } public static Path getOnDiskCacheDir() { String cacheDir = SnowflakeUtil.systemGetProperty(CRL_RESPONSE_CACHE_DIR); if (SnowflakeUtil.isNullOrEmpty(cacheDir)) { File defaultCacheDir = FileCacheUtil.getDefaultCacheDir(); if (defaultCacheDir != null) { return Paths.get(defaultCacheDir.getAbsolutePath(), "crls"); } else { throw new IllegalStateException( "Default cache dir not set but CRL file cache is enabled. Either fix the environment so that a cache directory can be determined, or disable the CRL file cache in the configuration."); } } else { return Paths.get(cacheDir); } } public static long getCrlDownloadMaxSizeBytes() { String value = SnowflakeUtil.systemGetProperty(CRL_DOWNLOAD_MAX_SIZE_BYTES); if (!SnowflakeUtil.isNullOrEmpty(value)) { try { long bytes = Long.parseLong(value); if (bytes <= 0) { throw new IllegalArgumentException("CRL download max size must be positive"); } if (bytes > Integer.MAX_VALUE - 1) { throw new IllegalArgumentException( "CRL download max size must not exceed " + (Integer.MAX_VALUE - 1) + " bytes"); } return bytes; } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid CRL download max size: " + value, e); } } return DEFAULT_CRL_DOWNLOAD_MAX_SIZE_BYTES; } public static Duration getCrlOnDiskCacheRemovalDelay() { String removalDelay = SnowflakeUtil.systemGetProperty(CRL_ON_DISK_CACHE_REMOVAL_DELAY); if (!SnowflakeUtil.isNullOrEmpty(removalDelay)) { try { long seconds = Long.parseLong(removalDelay); if (seconds <= 0) { throw new IllegalArgumentException("Cache removal delay time must be positive"); } return Duration.ofSeconds(seconds); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid cache removal delay: " + removalDelay, e); } } else { logger.debug("Using default on-disk cache removal delay of 7 days"); return Duration.ofDays(7); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CRLCacheEntry.java ================================================ package net.snowflake.client.internal.core.crl; import java.security.cert.X509CRL; import java.time.Duration; import java.time.Instant; class CRLCacheEntry { private final X509CRL crl; private final Instant downloadTime; CRLCacheEntry(X509CRL crl, Instant downloadTime) { if (crl == null) { throw new IllegalArgumentException("CRL cannot be null"); } if (downloadTime == null) { throw new IllegalArgumentException("Download time cannot be null"); } this.crl = crl; this.downloadTime = downloadTime; } X509CRL getCrl() { return crl; } Instant getDownloadTime() { return downloadTime; } boolean isCrlExpired(Instant time) { return crl.getNextUpdate() != null && crl.getNextUpdate().toInstant().isBefore(time); } boolean isEvicted(Instant time, Duration cacheValidityTime) { return downloadTime.plus(cacheValidityTime).isBefore(time); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CRLCacheManager.java ================================================ package net.snowflake.client.internal.core.crl; import java.nio.file.Path; import java.security.cert.X509CRL; import java.time.Duration; import java.time.Instant; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Cache manager that coordinates between in-memory and file-based CRL caches. Provides automatic * cleanup of expired entries and proper lifecycle management. */ public class CRLCacheManager { private static final SFLogger logger = SFLoggerFactory.getLogger(CRLCacheManager.class); private final CRLCache memoryCache; private final CRLCache fileCache; private final ScheduledExecutorService cleanupScheduler; private final Runnable cleanupTask; private final long cleanupIntervalInMs; private final Duration cacheValidityTime; CRLCacheManager( CRLCache memoryCache, CRLCache fileCache, Duration cleanupInterval, Duration cacheValidityTime) { this.memoryCache = memoryCache; this.fileCache = fileCache; this.cleanupIntervalInMs = cleanupInterval.toMillis(); this.cacheValidityTime = cacheValidityTime; this.cleanupTask = () -> { try { logger.debug( "Running periodic CRL cache cleanup with interval {} seconds", cleanupIntervalInMs / 1000.0); memoryCache.cleanup(); fileCache.cleanup(); } catch (Exception e) { logger.error("An error occurred during scheduled CRL cache cleanup.", e); } }; ThreadFactory threadFactory = r -> { Thread t = new Thread(r, "crl-cache-cleanup"); t.setDaemon(true); // Don't prevent JVM shutdown return t; }; this.cleanupScheduler = Executors.newSingleThreadScheduledExecutor(threadFactory); } public static CRLCacheManager build( boolean inMemoryCacheEnabled, boolean onDiskCacheEnabled, Path onDiskCacheDir, Duration onDiskCacheRemovalDelay, Duration cacheValidityTime) throws SnowflakeSQLLoggedException { CRLCache memoryCache; if (inMemoryCacheEnabled) { logger.debug("Enabling in-memory CRL cache"); memoryCache = new CRLInMemoryCache(cacheValidityTime); } else { logger.debug("In-memory CRL cache disabled"); memoryCache = NoopCRLCache.INSTANCE; } CRLCache fileCache; if (onDiskCacheEnabled) { logger.debug("Enabling file based CRL cache"); fileCache = new CRLFileCache(onDiskCacheDir, onDiskCacheRemovalDelay); } else { logger.debug("File based CRL cache disabled"); fileCache = NoopCRLCache.INSTANCE; } CRLCacheManager manager = new CRLCacheManager(memoryCache, fileCache, onDiskCacheRemovalDelay, cacheValidityTime); if (inMemoryCacheEnabled || onDiskCacheEnabled) { manager.startPeriodicCleanup(); } return manager; } CRLCacheEntry get(String crlUrl) { CRLCacheEntry entry = memoryCache.get(crlUrl); if (entry != null) { return entry; } entry = fileCache.get(crlUrl); if (entry != null) { // Promote to memory cache memoryCache.put(crlUrl, entry); return entry; } logger.debug("CRL not found in cache for {}", crlUrl); return null; } void put(String crlUrl, X509CRL crl, Instant downloadTime) { CRLCacheEntry entry = new CRLCacheEntry(crl, downloadTime); memoryCache.put(crlUrl, entry); fileCache.put(crlUrl, entry); } private void startPeriodicCleanup() { cleanupScheduler.scheduleAtFixedRate( cleanupTask, cleanupIntervalInMs, cleanupIntervalInMs, TimeUnit.MILLISECONDS); logger.debug( "Scheduled CRL cache cleanup task to run every {} seconds.", cleanupIntervalInMs / 1000.0); } public Duration getCacheValidityTime() { return cacheValidityTime; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CRLFileCache.java ================================================ package net.snowflake.client.internal.core.crl; import java.io.IOException; import java.io.InputStream; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.attribute.FileTime; import java.nio.file.attribute.PosixFilePermissions; import java.security.cert.CRLException; import java.security.cert.CertificateException; import java.security.cert.CertificateFactory; import java.security.cert.X509CRL; import java.time.Duration; import java.time.Instant; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; import java.util.stream.Stream; import net.snowflake.client.internal.core.Constants; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; class CRLFileCache implements CRLCache { private static final SFLogger logger = SFLoggerFactory.getLogger(CRLFileCache.class); private final Path cacheDir; private final Duration removalDelay; private final Lock cacheLock = new ReentrantLock(); CRLFileCache(Path cacheDir, Duration removalDelay) throws SnowflakeSQLLoggedException { this.cacheDir = cacheDir; this.removalDelay = removalDelay; ensureCacheDirectoryExists(cacheDir); } public CRLCacheEntry get(String crlUrl) { try { cacheLock.lock(); Path crlFilePath = getCrlFilePath(crlUrl); if (Files.exists(crlFilePath)) { logger.debug("Found CRL on disk for {}", crlFilePath); BasicFileAttributes attrs = Files.readAttributes(crlFilePath, BasicFileAttributes.class); Instant downloadTime = attrs.lastModifiedTime().toInstant(); CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); try (InputStream crlBytes = Files.newInputStream(crlFilePath)) { X509CRL crl = (X509CRL) certFactory.generateCRL(crlBytes); return new CRLCacheEntry(crl, downloadTime); } } } catch (Exception e) { logger.warn("Failed to read CRL from disk cache for {}: {}", crlUrl, e.getMessage()); } finally { cacheLock.unlock(); } return null; } public void put(String crlUrl, CRLCacheEntry entry) { try { cacheLock.lock(); Path crlFilePath = getCrlFilePath(crlUrl); if (Constants.getOS().isPosix()) { Files.deleteIfExists(crlFilePath); Files.createFile( crlFilePath, PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rw-------"))); Files.write(crlFilePath, entry.getCrl().getEncoded()); } else { Files.write(crlFilePath, entry.getCrl().getEncoded()); } Files.setLastModifiedTime(crlFilePath, FileTime.from(entry.getDownloadTime())); logger.debug("Updated disk cache for {}", crlUrl); } catch (Exception e) { logger.warn("Failed to write CRL to disk cache for {}: {}", crlUrl, e.getMessage()); } finally { cacheLock.unlock(); } } public void cleanup() { Instant now = Instant.now(); logger.debug("Cleaning up on-disk CRL cache at {}", now); try { if (!Files.exists(cacheDir)) { return; } int removedCount = 0; try (Stream files = Files.list(cacheDir)) { cacheLock.lock(); for (Path filePath : files.filter(Files::isRegularFile).collect(Collectors.toList())) { try { try (InputStream crlBytes = Files.newInputStream(filePath)) { CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); X509CRL crl = (X509CRL) certFactory.generateCRL(crlBytes); CRLCacheEntry entry = new CRLCacheEntry(crl, Files.getLastModifiedTime(filePath).toInstant()); boolean expired = entry.isCrlExpired(now); boolean evicted = entry.isEvicted(now, removalDelay); if (expired || evicted) { Files.delete(filePath); removedCount++; logger.debug( "Removing file based CRL cache entry for {}: expired={}, evicted={}", filePath, expired, evicted); } } } catch (IOException | CRLException | CertificateException e) { // If we can't parse the file, it's probably corrupted - remove it try { Files.delete(filePath); removedCount++; } catch (IOException deleteError) { logger.warn( "Failed to delete corrupted CRL file {}: {}", filePath, deleteError.getMessage()); } } } } finally { cacheLock.unlock(); } if (removedCount > 0) { logger.debug("Removed {} expired/corrupted files from disk CRL cache", removedCount); } } catch (Exception e) { logger.warn("Failed to cleanup disk CRL cache: {}", e.getMessage()); } } private Path getCrlFilePath(String crlUrl) throws UnsupportedEncodingException { String encodedUrl = URLEncoder.encode(crlUrl, StandardCharsets.UTF_8.toString()); return cacheDir.resolve(encodedUrl); } private static boolean ownerOnlyPermissions(Path cacheDir) throws IOException { return Files.getPosixFilePermissions(cacheDir) .equals(PosixFilePermissions.fromString("rwx------")); } private static void ensureCacheDirectoryExists(Path cacheDir) throws SnowflakeSQLLoggedException { try { boolean exists = Files.exists(cacheDir); if (!exists) { if (Constants.getOS().isPosix()) { Files.createDirectories( cacheDir, PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rwx------"))); logger.debug("Initialized CRL cache directory: {}", cacheDir); } else { Files.createDirectories(cacheDir); } } if (Constants.getOS().isPosix() && !ownerOnlyPermissions(cacheDir)) { Files.setPosixFilePermissions(cacheDir, PosixFilePermissions.fromString("rwx------")); logger.debug("Set CRL cache directory permissions to 'rwx------"); } } catch (Exception e) { throw new SnowflakeSQLLoggedException( null, null, SqlState.INTERNAL_ERROR, "Failed to create CRL cache directory", e); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CRLInMemoryCache.java ================================================ package net.snowflake.client.internal.core.crl; import java.time.Duration; import java.time.Instant; import java.util.concurrent.ConcurrentHashMap; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class CRLInMemoryCache implements CRLCache { private static final SFLogger logger = SFLoggerFactory.getLogger(CRLInMemoryCache.class); private final ConcurrentHashMap cache = new ConcurrentHashMap<>(); private final Duration cacheValidityTime; CRLInMemoryCache(Duration cacheValidityTime) { this.cacheValidityTime = cacheValidityTime; } public CRLCacheEntry get(String crlUrl) { CRLCacheEntry entry = cache.get(crlUrl); if (entry != null) { logger.debug("Found CRL in memory cache for {}", crlUrl); } return entry; } public void put(String crlUrl, CRLCacheEntry entry) { cache.put(crlUrl, entry); } public void cleanup() { Instant now = Instant.now(); logger.debug("Cleaning up in-memory CRL cache at {}", now); int initialSize = cache.size(); cache .entrySet() .removeIf( entry -> { CRLCacheEntry cacheEntry = entry.getValue(); boolean expired = cacheEntry.isCrlExpired(now); boolean evicted = cacheEntry.isEvicted(now, cacheValidityTime); logger.debug( "Removing in-memory CRL cache entry for {}: expired={}, evicted={}", entry.getKey(), expired, evicted); return expired || evicted; }); int removedCount = initialSize - cache.size(); if (removedCount > 0) { logger.debug("Removed {} expired/evicted entries from in-memory CRL cache", removedCount); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CRLValidationResult.java ================================================ package net.snowflake.client.internal.core.crl; enum CRLValidationResult { CHAIN_UNREVOKED, CHAIN_REVOKED, CHAIN_ERROR } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CRLValidationUtils.java ================================================ package net.snowflake.client.internal.core.crl; import java.security.cert.X509CRL; import java.security.cert.X509Certificate; import java.time.LocalDate; import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.List; import java.util.stream.Collectors; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.bouncycastle.asn1.ASN1OctetString; import org.bouncycastle.asn1.ASN1Primitive; import org.bouncycastle.asn1.DERIA5String; import org.bouncycastle.asn1.x509.CRLDistPoint; import org.bouncycastle.asn1.x509.DistributionPoint; import org.bouncycastle.asn1.x509.DistributionPointName; import org.bouncycastle.asn1.x509.GeneralName; import org.bouncycastle.asn1.x509.GeneralNames; import org.bouncycastle.asn1.x509.IssuingDistributionPoint; class CRLValidationUtils { private static final SFLogger logger = SFLoggerFactory.getLogger(CRLValidationUtils.class); // CA/Browser Forum Baseline Requirements date thresholds (using UTC for consistency) private static final Date MARCH_15_2024 = Date.from(LocalDate.of(2024, 3, 15).atStartOfDay(ZoneId.of("UTC")).toInstant()); private static final Date MARCH_15_2026 = Date.from(LocalDate.of(2026, 3, 15).atStartOfDay(ZoneId.of("UTC")).toInstant()); static List extractCRLDistributionPoints(X509Certificate cert) { List crlUrls = new ArrayList<>(); try { byte[] extensionBytes = cert.getExtensionValue("2.5.29.31"); if (extensionBytes == null) { logger.debug( "No CRL Distribution Points extension found for certificate: {}", cert.getSubjectX500Principal()); return crlUrls; } ASN1OctetString octetString = (ASN1OctetString) ASN1Primitive.fromByteArray(extensionBytes); CRLDistPoint crlDistPoint = CRLDistPoint.getInstance(ASN1Primitive.fromByteArray(octetString.getOctets())); DistributionPoint[] distributionPoints = crlDistPoint.getDistributionPoints(); if (distributionPoints != null) { for (DistributionPoint dp : distributionPoints) { DistributionPointName dpName = dp.getDistributionPoint(); if (dpName != null && dpName.getType() == DistributionPointName.FULL_NAME) { GeneralNames generalNames = (GeneralNames) dpName.getName(); for (GeneralName generalName : generalNames.getNames()) { if (generalName.getTagNo() == GeneralName.uniformResourceIdentifier) { String url = ((DERIA5String) generalName.getName()).getString(); if (url.toLowerCase().startsWith("http://") || url.toLowerCase().startsWith("https://")) { logger.debug("Found CRL URL: {}", url); crlUrls.add(url); } } } } } } } catch (Exception e) { logger.debug( "Failed to extract CRL distribution points from certificate {}: {}", cert.getSubjectX500Principal(), e.getMessage()); } logger.debug( "Extracted {} CRL URLs for certificate: {}", crlUrls.size(), cert.getSubjectX500Principal()); return crlUrls; } /** * Determines if a certificate is short-lived according to CA/Browser Forum Baseline Requirements. */ static boolean isShortLived(X509Certificate cert) { Date notBefore = cert.getNotBefore(); Date notAfter = cert.getNotAfter(); // Certificates issued before March 15, 2024 are not considered short-lived if (notBefore.before(MARCH_15_2024)) { return false; } // Determine the maximum validity period based on issuance date long maxValidityPeriodMs; if (notBefore.before(MARCH_15_2026)) { maxValidityPeriodMs = 10L * 24 * 60 * 60 * 1000; // 10 days for certificates before March 15, 2026 } else { maxValidityPeriodMs = 7L * 24 * 60 * 60 * 1000; // 7 days for certificates after March 15, 2026 } // Add 1 minute margin to account for clock differences and inclusive time boundaries maxValidityPeriodMs += 60 * 1000; long actualValidityPeriodMs = notAfter.getTime() - notBefore.getTime(); return actualValidityPeriodMs <= maxValidityPeriodMs; } static boolean verifyIssuingDistributionPoint(X509CRL crl, X509Certificate cert, String crlUrl) { try { byte[] extensionBytes = crl.getExtensionValue("2.5.29.28"); if (extensionBytes == null) { logger.debug("No IDP extension found - CRL covers all certificates"); return true; } ASN1OctetString octetString = (ASN1OctetString) ASN1Primitive.fromByteArray(extensionBytes); IssuingDistributionPoint idp = IssuingDistributionPoint.getInstance( ASN1Primitive.fromByteArray(octetString.getOctets())); // Check if this CRL only covers user certificates if (idp.onlyContainsUserCerts() && cert.getBasicConstraints() != -1) { logger.debug("CRL only covers user certificates, but certificate is a CA certificate"); return false; } // Check if this CRL only covers CA certificates if (idp.onlyContainsCACerts() && cert.getBasicConstraints() == -1) { logger.debug("CRL only covers CA certificates, but certificate is not a CA certificate"); return false; } // Check if this CRL only covers specific revocation reasons if (idp.getOnlySomeReasons() != null) { logger.debug( "CRL only covers specific revocation reasons (onlySomeReasons is set) - " + "treating as not authoritative for full revocation checking"); return false; } DistributionPointName dpName = idp.getDistributionPoint(); if (dpName != null) { if (dpName.getType() == DistributionPointName.FULL_NAME) { GeneralNames generalNames = (GeneralNames) dpName.getName(); boolean foundMatch = false; for (GeneralName generalName : generalNames.getNames()) { if (generalName.getTagNo() == GeneralName.uniformResourceIdentifier) { String idpUrl = ((DERIA5String) generalName.getName()).getString(); if (idpUrl.equals(crlUrl)) { foundMatch = true; break; } } } if (!foundMatch) { logger.debug( "CRL URL {} not found in IDP distribution points - this CRL is not authorized for this certificate", crlUrl); return false; } } } logger.debug("IDP extension verification passed"); return true; } catch (Exception e) { logger.debug("Failed to verify IDP extension: {}", e.getMessage()); return false; } } static String getCertChainSubjects(List certificateChains) { return certificateChains.stream() .flatMap(Arrays::stream) .map(cert -> cert.getSubjectX500Principal().getName()) .collect(Collectors.joining(", ")); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CRLValidator.java ================================================ package net.snowflake.client.internal.core.crl; import static net.snowflake.client.internal.core.crl.CRLValidationUtils.extractCRLDistributionPoints; import static net.snowflake.client.internal.core.crl.CRLValidationUtils.getCertChainSubjects; import static net.snowflake.client.internal.core.crl.CRLValidationUtils.isShortLived; import static net.snowflake.client.internal.core.crl.CRLValidationUtils.verifyIssuingDistributionPoint; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.security.NoSuchProviderException; import java.security.PublicKey; import java.security.SignatureException; import java.security.cert.CRLException; import java.security.cert.CertificateException; import java.security.cert.CertificateFactory; import java.security.cert.X509CRL; import java.security.cert.X509CRLEntry; import java.security.cert.X509Certificate; import java.time.Instant; import java.util.ArrayList; import java.util.Date; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.jdbc.telemetry.PreSessionTelemetryClient; import net.snowflake.client.internal.jdbc.telemetry.RevocationCheckTelemetryData; import net.snowflake.client.internal.jdbc.telemetry.Telemetry; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.commons.io.IOUtils; import org.apache.commons.io.input.BoundedInputStream; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; public class CRLValidator { private static final SFLogger logger = SFLoggerFactory.getLogger(CRLValidator.class); private static final Map validatorRegistryForTelemetry = new ConcurrentHashMap<>(); private final Map urlLocks = new ConcurrentHashMap<>(); private final CloseableHttpClient httpClient; private final CRLCacheManager cacheManager; private final CertRevocationCheckMode certRevocationCheckMode; private final boolean allowCertificatesWithoutCrlUrl; private final Telemetry telemetryClient; public CRLValidator( CertRevocationCheckMode revocationCheckMode, boolean allowCertificatesWithoutCrlUrl, CloseableHttpClient httpClient, CRLCacheManager cacheManager, Telemetry telemetryClient) { this.httpClient = httpClient; this.cacheManager = cacheManager; this.certRevocationCheckMode = revocationCheckMode; this.allowCertificatesWithoutCrlUrl = allowCertificatesWithoutCrlUrl; this.telemetryClient = telemetryClient; } /** * Validates certificate chains against CRLs. * * @param certificateChains the verified certificate chains to validate * @return true if validation passes, false otherwise */ public boolean validateCertificateChains(List certificateChains) { if (this.certRevocationCheckMode == CertRevocationCheckMode.DISABLED) { logger.debug("CRL validation is disabled"); return true; // OPEN } if (certificateChains == null || certificateChains.isEmpty()) { throw new IllegalArgumentException("Certificate chains cannot be null or empty"); } logger.debug( "Validating {} certificate chains with subjects: {}", certificateChains.size(), getCertChainSubjects(certificateChains)); List crlValidationResults = validateChains(certificateChains); if (crlValidationResults.get(crlValidationResults.size() - 1) == CRLValidationResult.CHAIN_UNREVOKED) { logger.debug("Found certificate chain with all certificates unrevoked"); return true; // OPEN } if (containsOnlyRevokedChains(crlValidationResults)) { logger.debug("Every verified certificate chain contained revoked certificates"); return false; } logger.debug("Some certificate chains didn't pass or driver wasn't able to perform the checks"); if (this.certRevocationCheckMode == CertRevocationCheckMode.ADVISORY) { logger.debug("Advisory mode: allowing connection despite validation issues"); return true; // FAIL OPEN } return false; } private List validateChains(List certChains) { List chainsValidationResults = new ArrayList<>(); for (X509Certificate[] certChain : certChains) { CRLValidationResult chainResult = CRLValidationResult.CHAIN_UNREVOKED; // Validate each certificate in the chain against CRL, skip the root certificate for (int i = 0; i < certChain.length; i++) { X509Certificate cert = certChain[i]; boolean isRoot = (i == certChain.length - 1); if (isRoot) { break; } X509Certificate parentCert = certChain[i + 1]; if (isShortLived(cert)) { logger.debug("Skipping short-lived certificate: {}", cert.getSubjectX500Principal()); continue; } List crlUrls = extractCRLDistributionPoints(cert); if (crlUrls.isEmpty()) { if (this.allowCertificatesWithoutCrlUrl) { logger.debug( "Certificate has missing CRL Distribution Point URLs: {}", cert.getSubjectX500Principal()); continue; } chainResult = CRLValidationResult.CHAIN_ERROR; continue; } CertificateValidationResult certStatus = validateCert(cert, parentCert); if (certStatus == CertificateValidationResult.CERT_REVOKED) { chainResult = CRLValidationResult.CHAIN_REVOKED; break; } if (certStatus == CertificateValidationResult.CERT_ERROR) { chainResult = CRLValidationResult.CHAIN_ERROR; } } chainsValidationResults.add(chainResult); if (chainResult == CRLValidationResult.CHAIN_UNREVOKED) { logger.debug("Found valid certificate chain, stopping validation of remaining chains"); break; } } return chainsValidationResults; } private CertificateValidationResult validateCert( X509Certificate cert, X509Certificate parentCert) { List crlUrls = extractCRLDistributionPoints(cert); Set results = new HashSet<>(); for (String url : crlUrls) { CertificateValidationResult result = validateCert(cert, url, parentCert); if (result == CertificateValidationResult.CERT_REVOKED) { return result; } results.add(result); } if (results.contains(CertificateValidationResult.CERT_ERROR)) { return CertificateValidationResult.CERT_ERROR; } else { return CertificateValidationResult.CERT_UNREVOKED; } } private CertificateValidationResult validateCert( X509Certificate cert, String crlUrl, X509Certificate parentCert) { // Thread-safe processing of CRL for given crlUrl Lock lock = urlLocks.computeIfAbsent(crlUrl, k -> new ReentrantLock()); lock.lock(); try { Instant now = Instant.now(); RevocationCheckTelemetryData revocationTelemetry = new RevocationCheckTelemetryData(); revocationTelemetry.setCrlUrl(crlUrl); CRLCacheEntry cacheEntry = cacheManager.get(crlUrl); X509CRL crl = cacheEntry != null ? cacheEntry.getCrl() : null; Instant downloadTime = cacheEntry != null ? cacheEntry.getDownloadTime() : null; boolean needsFreshCrl = crl == null || (crl.getNextUpdate() != null && crl.getNextUpdate().toInstant().isBefore(now)) || cacheEntry.isEvicted(now, cacheManager.getCacheValidityTime()); boolean shouldUpdateCache = false; if (needsFreshCrl) { X509CRL newCrl = fetchCrl(crlUrl, revocationTelemetry); if (newCrl != null) { shouldUpdateCache = crl == null || newCrl.getThisUpdate().after(crl.getThisUpdate()); if (shouldUpdateCache) { logger.debug("Found updated CRL for {}", crlUrl); crl = newCrl; downloadTime = now; } else { // New CRL isn't newer, check if old one is still valid if (crl.getNextUpdate() != null && crl.getNextUpdate().toInstant().isAfter(now)) { logger.debug("CRL for {} is up-to-date, using cached version", crlUrl); } else { logger.warn("CRL for {} is not available or outdated", crlUrl); return CertificateValidationResult.CERT_ERROR; } } } else { if (crl != null && crl.getNextUpdate() != null && crl.getNextUpdate().toInstant().isAfter(now)) { logger.debug( "Using cached CRL for {} (fetch failed but cached version still valid)", crlUrl); } else { logger.error( "Unable to fetch fresh CRL from {} and no valid cached version available", crlUrl); return CertificateValidationResult.CERT_ERROR; } } } int numberOfRevokedCertificates = crl.getRevokedCertificates() != null ? crl.getRevokedCertificates().size() : 0; logger.debug( "CRL has {} revoked entries, next update at {}", numberOfRevokedCertificates, crl.getNextUpdate()); revocationTelemetry.setNumberOfRevokedCertificates(numberOfRevokedCertificates); if (!isCrlSignatureAndIssuerValid(crl, cert, parentCert, crlUrl)) { logger.debug("Unable to verify CRL for {}", crlUrl); return CertificateValidationResult.CERT_ERROR; } // Update cache if we have a new/updated CRL if (shouldUpdateCache) { logger.debug("CRL for {} is valid, updating cache", crlUrl); cacheManager.put(crlUrl, crl, downloadTime); } if (isCertificateRevoked(crl, cert)) { logger.debug( "Certificate {} is revoked according to CRL {}", cert.getSerialNumber(), crlUrl); return CertificateValidationResult.CERT_REVOKED; } telemetryClient.addLogToBatch(revocationTelemetry.buildTelemetry()); return CertificateValidationResult.CERT_UNREVOKED; } finally { lock.unlock(); } } private boolean isCrlSignatureAndIssuerValid( X509CRL crl, X509Certificate cert, X509Certificate parentCert, String crlUrl) { try { if (!crl.getIssuerX500Principal().equals(parentCert.getSubjectX500Principal())) { logger.debug( "CRL issuer {} does not match parent certificate subject {} for {}", crl.getIssuerX500Principal(), parentCert.getSubjectX500Principal(), "validation"); return false; } Date now = new Date(); if (crl.getNextUpdate() != null && now.after(crl.getNextUpdate())) { logger.debug("CRL has expired: nextUpdate={}, now={}", crl.getNextUpdate(), now); return false; } PublicKey parentPublicKey = parentCert.getPublicKey(); try { crl.verify(parentPublicKey); logger.debug("CRL signature verified successfully using parent certificate"); } catch (InvalidKeyException | NoSuchAlgorithmException | NoSuchProviderException | SignatureException e) { logger.debug("CRL signature verification failed: {}", e.getMessage()); return false; } if (!verifyIssuingDistributionPoint(crl, cert, crlUrl)) { logger.debug("IDP extension verification failed"); return false; } return true; } catch (Exception e) { logger.debug("CRL validation failed: {}", e.getMessage()); return false; } } private boolean isCertificateRevoked(X509CRL crl, X509Certificate cert) { X509CRLEntry entry = crl.getRevokedCertificate(cert.getSerialNumber()); return entry != null; } private X509CRL fetchCrl(String crlUrl, RevocationCheckTelemetryData revocationTelemetry) { try { logger.debug("Fetching CRL from {}", crlUrl); URL url = new URL(crlUrl); HttpGet get = new HttpGet(url.toString()); CertificateFactory cf = CertificateFactory.getInstance("X.509"); long start = System.currentTimeMillis(); try (CloseableHttpResponse response = this.httpClient.execute(get)) { try (InputStream inputStream = response.getEntity().getContent()) { long maxSize = CRLCacheConfig.getCrlDownloadMaxSizeBytes(); InputStream bounded = BoundedInputStream.builder() .setInputStream(inputStream) .setMaxCount(maxSize + 1) .get(); byte[] crlData = IOUtils.toByteArray(bounded); if (crlData.length > maxSize) { logger.warn( "CRL from {} exceeds max download size of {} bytes, aborting", crlUrl, maxSize); return null; } revocationTelemetry.setTimeDownloadingCrl(System.currentTimeMillis() - start); start = System.currentTimeMillis(); X509CRL crl = (X509CRL) cf.generateCRL(new ByteArrayInputStream(crlData)); long crlBytes = crl.getEncoded().length; revocationTelemetry.setTimeParsingCrl(System.currentTimeMillis() - start); revocationTelemetry.setCrlBytes(crlBytes); return crl; } } } catch (IOException | CRLException | CertificateException e) { logger.debug("Failed to fetch CRL from {}: {}", crlUrl, e.getMessage()); return null; } } private boolean containsOnlyRevokedChains(List results) { return !results.isEmpty() && results.stream().allMatch(result -> result == CRLValidationResult.CHAIN_REVOKED); } /** * Multiple sessions may share the same HttpClientSettingsKey thus CRL telemetry might be sent for * wrong session. We accept this limitation. */ public static void setTelemetryClientForKey( HttpClientSettingsKey key, Telemetry telemetryClient) { CRLValidator result = validatorRegistryForTelemetry.computeIfPresent( key, (k, validator) -> { validator.provideTelemetryClient(telemetryClient); return validator; }); if (result == null) { logger.debug("No CRL validator found for key: {}", key); } } public static void registerValidator(HttpClientSettingsKey key, CRLValidator validator) { validatorRegistryForTelemetry.put(key, validator); } private void provideTelemetryClient(Telemetry telemetryClient) { try { PreSessionTelemetryClient preSessionTelemetryClient = (PreSessionTelemetryClient) this.telemetryClient; if (!preSessionTelemetryClient.hasRealTelemetryClient()) { preSessionTelemetryClient.setRealTelemetryClient(telemetryClient); } } catch (Exception e) { logger.warn("Failed to set real telemetry client for trust manager: {}", e.getMessage()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CertRevocationCheckMode.java ================================================ package net.snowflake.client.internal.core.crl; public enum CertRevocationCheckMode { DISABLED, ENABLED, ADVISORY } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CertificateValidationResult.java ================================================ package net.snowflake.client.internal.core.crl; enum CertificateValidationResult { CERT_UNREVOKED, CERT_REVOKED, CERT_ERROR } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/CrlRevocationManager.java ================================================ package net.snowflake.client.internal.core.crl; import java.security.cert.CertPathBuilderException; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.List; import javax.net.ssl.X509TrustManager; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.telemetry.PreSessionTelemetryClient; import org.apache.http.impl.client.CloseableHttpClient; public class CrlRevocationManager { private static final CRLCacheManager crlCacheManager; private final VerifiedCertPathBuilder certPathBuilder; private final CRLValidator crlValidator; static { try { crlCacheManager = CRLCacheManager.build( CRLCacheConfig.getInMemoryCacheEnabled(), CRLCacheConfig.getOnDiskCacheEnabled(), CRLCacheConfig.getOnDiskCacheDir(), CRLCacheConfig.getCrlOnDiskCacheRemovalDelay(), CRLCacheConfig.getCacheValidityTime()); } catch (SnowflakeSQLLoggedException e) { throw new ExceptionInInitializerError(e); } } public CrlRevocationManager(HttpClientSettingsKey key, X509TrustManager trustManager) throws CertificateException { CloseableHttpClient httpClient = HttpUtil.getHttpClientForCrl(key); this.certPathBuilder = new VerifiedCertPathBuilder(trustManager); this.crlValidator = new CRLValidator( key.getRevocationCheckMode(), key.isAllowCertificatesWithoutCrlUrl(), httpClient, crlCacheManager, new PreSessionTelemetryClient()); CRLValidator.registerValidator(key, this.crlValidator); } public void validateRevocationStatus(X509Certificate[] chain, String authType) throws CertificateException { try { List certificates = this.certPathBuilder.buildAllVerifiedPaths(chain, authType); boolean validationResult = this.crlValidator.validateCertificateChains(certificates); if (!validationResult) { throw new CertificateException( "No not revoked certificate chains found during CRL revocation check or transient error happened and not in advisory mode"); } } catch (CertPathBuilderException e) { throw new CertificateException("Certificate revocation check failed", e); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/NoopCRLCache.java ================================================ package net.snowflake.client.internal.core.crl; class NoopCRLCache implements CRLCache { static final CRLCache INSTANCE = new NoopCRLCache(); @Override public CRLCacheEntry get(String crlUrl) { return null; } @Override public void put(String crlUrl, CRLCacheEntry entry) {} @Override public void cleanup() {} } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/crl/VerifiedCertPathBuilder.java ================================================ package net.snowflake.client.internal.core.crl; import java.security.InvalidAlgorithmParameterException; import java.security.NoSuchAlgorithmException; import java.security.cert.CertPath; import java.security.cert.CertPathBuilder; import java.security.cert.CertPathBuilderException; import java.security.cert.CertPathBuilderResult; import java.security.cert.CertStore; import java.security.cert.Certificate; import java.security.cert.CertificateException; import java.security.cert.CollectionCertStoreParameters; import java.security.cert.PKIXBuilderParameters; import java.security.cert.PKIXCertPathBuilderResult; import java.security.cert.TrustAnchor; import java.security.cert.X509CertSelector; import java.security.cert.X509Certificate; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import javax.net.ssl.X509TrustManager; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Builds and verifies certificate paths using a truststore and CertPathBuilder. This class takes a * certificate chain presented by a server and returns verified paths that include trust anchors for * CRL validation support. */ public class VerifiedCertPathBuilder { private static final SFLogger logger = SFLoggerFactory.getLogger(VerifiedCertPathBuilder.class); private final X509TrustManager trustManager; private final Set trustAnchors; /** * Constructor that initializes the VerifiedCertPathBuilder with the provided trust manager. * * @param trustManager the X509TrustManager to use for certificate validation * @throws IllegalArgumentException if trustManager is null */ public VerifiedCertPathBuilder(X509TrustManager trustManager) throws CertificateException { if (trustManager == null) { throw new IllegalArgumentException("Trust manager cannot be null"); } this.trustManager = trustManager; this.trustAnchors = createTrustAnchors(trustManager); } /** * Builds and verifies all possible certificate paths from leaf certificates to trust anchors. * Unlike standard PKIX path building, this method includes trust anchor certificates at the end * of each path for CRL validation support. * * @param certificateChain the certificate chain presented by the server * @param authType the authentication type used for the connection * @return a list of all verified certificate paths with trust anchors included * @throws CertificateException if certificate validation fails * @throws CertPathBuilderException if no valid certificate paths could be built */ public List buildAllVerifiedPaths( X509Certificate[] certificateChain, String authType) throws CertificateException, CertPathBuilderException { if (certificateChain == null || certificateChain.length == 0) { throw new IllegalArgumentException("Certificate chain cannot be null or empty"); } if (authType == null || authType.trim().isEmpty()) { throw new IllegalArgumentException("Authentication type cannot be null or empty"); } logger.debug( "Building verified paths for chain length: {} with authType: {}", certificateChain.length, authType); List allVerifiedPaths = new ArrayList<>(); try { List certCollection = Arrays.asList(certificateChain); CertStore certStore = CertStore.getInstance("Collection", new CollectionCertStoreParameters(certCollection)); X509Certificate leafCertificate = identifyLeafCertificate(certificateChain); logger.debug("Identified leaf certificate: {}", leafCertificate.getSubjectX500Principal()); allVerifiedPaths.addAll( findAllPathsForTarget(leafCertificate, trustAnchors, certStore, authType)); } catch (NoSuchAlgorithmException | InvalidAlgorithmParameterException e) { throw new CertificateException("Failed to build certificate paths", e); } if (allVerifiedPaths.isEmpty()) { throw new CertPathBuilderException("No valid certificate paths could be built"); } logger.debug("Successfully built {} verified certificate paths", allVerifiedPaths.size()); return allVerifiedPaths; } /** Finds all possible valid paths from a leaf certificate to trust anchors. */ private List findAllPathsForTarget( X509Certificate targetCert, Set trustAnchors, CertStore certStore, String authType) { List pathsForTarget = new ArrayList<>(); for (TrustAnchor trustAnchor : trustAnchors) { try { Set singleTrustAnchor = Collections.singleton(trustAnchor); PKIXBuilderParameters singleAnchorParams = new PKIXBuilderParameters(singleTrustAnchor, null); singleAnchorParams.addCertStore(certStore); singleAnchorParams.setRevocationEnabled(false); X509CertSelector selector = new X509CertSelector(); selector.setCertificate(targetCert); singleAnchorParams.setTargetCertConstraints(selector); CertPathBuilder builder = CertPathBuilder.getInstance("PKIX"); CertPathBuilderResult result = builder.build(singleAnchorParams); if (result instanceof PKIXCertPathBuilderResult) { PKIXCertPathBuilderResult pkixResult = (PKIXCertPathBuilderResult) result; CertPath certPath = pkixResult.getCertPath(); try { X509Certificate[] certArray = convertCertPathToArray(certPath); trustManager.checkServerTrusted(certArray, authType); // Create path with trust anchor included for CRL validation X509Certificate[] pathWithTrustAnchor = new X509Certificate[certArray.length + 1]; System.arraycopy(certArray, 0, pathWithTrustAnchor, 0, certArray.length); pathWithTrustAnchor[certArray.length] = trustAnchor.getTrustedCert(); pathsForTarget.add(pathWithTrustAnchor); logger.trace( "Found valid path via trust anchor {}: length {}", trustAnchor.getTrustedCert().getSubjectX500Principal(), pathWithTrustAnchor.length); } catch (CertificateException e) { logger.trace( "Path validation failed via trust anchor {}: {}", trustAnchor.getTrustedCert().getSubjectX500Principal(), e.getMessage()); } } } catch (CertPathBuilderException | NoSuchAlgorithmException | InvalidAlgorithmParameterException e) { logger.trace( "Failed to build path via trust anchor {}: {}", trustAnchor.getTrustedCert().getSubjectX500Principal(), e.getMessage()); } } return pathsForTarget; } /** * Identifies the leaf certificate (end-entity certificate) in the certificate chain. * * @param certificateChain the certificate chain to analyze * @return the leaf certificate found in the chain * @throws CertificateException if no leaf certificate is found in the chain */ private X509Certificate identifyLeafCertificate(X509Certificate[] certificateChain) throws CertificateException { Set leafCerts = Arrays.stream(certificateChain) .filter( cert -> cert != null && cert.getBasicConstraints() == -1) // Basic constraints -1 indicates a leaf certificate .collect(Collectors.toSet()); if (leafCerts.isEmpty()) { throw new CertificateException("No leaf certificate found in the chain"); } if (leafCerts.size() > 1) { throw new CertificateException("Multiple leaf certificates found"); } return leafCerts.iterator().next(); } /** Creates trust anchors from the truststore. */ private Set createTrustAnchors(X509TrustManager trustManager) throws CertificateException { Set trustAnchors = new HashSet<>(); try { X509Certificate[] trustedCerts = trustManager.getAcceptedIssuers(); for (X509Certificate cert : trustedCerts) { trustAnchors.add(new TrustAnchor(cert, null)); } logger.debug("Created {} trust anchors from truststore", trustAnchors.size()); } catch (Exception e) { throw new CertificateException("Failed to create trust anchors", e); } return trustAnchors; } /** Converts a CertPath to an X509Certificate array. */ private X509Certificate[] convertCertPathToArray(CertPath certPath) throws CertificateException { List certificates = certPath.getCertificates(); if (certificates == null || certificates.isEmpty()) { throw new CertificateException("Certificate path is empty"); } X509Certificate[] certArray = new X509Certificate[certificates.size()]; for (int i = 0; i < certificates.size(); i++) { Certificate cert = certificates.get(i); if (!(cert instanceof X509Certificate)) { throw new CertificateException( "Certificate path contains non-X509 certificate: " + cert.getClass().getCanonicalName()); } certArray[i] = (X509Certificate) cert; } return certArray; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/json/BooleanConverter.java ================================================ package net.snowflake.client.internal.core.json; import java.sql.Types; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; public class BooleanConverter { public Boolean getBoolean(Object obj, int columnType) throws SFException { if (obj == null) { return false; } if (obj instanceof Boolean) { return (Boolean) obj; } // if type is an approved type that can be converted to Boolean, do this if (columnType == Types.BOOLEAN || columnType == Types.INTEGER || columnType == Types.SMALLINT || columnType == Types.TINYINT || columnType == Types.BIGINT || columnType == Types.BIT || columnType == Types.VARCHAR || columnType == Types.CHAR || columnType == Types.DECIMAL) { String type = obj.toString(); if ("1".equals(type) || Boolean.TRUE.toString().equalsIgnoreCase(type)) { return true; } if ("0".equals(type) || Boolean.FALSE.toString().equalsIgnoreCase(type)) { return false; } } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.BOOLEAN_STR, obj); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/json/BytesConverter.java ================================================ package net.snowflake.client.internal.core.json; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.sql.Types; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.common.core.SFBinary; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.arrow.vector.Float8Vector; public class BytesConverter { private final Converters converters; public BytesConverter(Converters converters) { this.converters = converters; } public byte[] getBytes(Object obj, int columnType, int columnSubType, Integer scale) throws SFException { if (obj == null) { return null; } if (obj instanceof byte[]) { return (byte[]) obj; } try { // For all types except time/date/timestamp data, convert data into byte array. Different // methods are needed // for different types. switch (columnType) { case Types.FLOAT: case Types.DOUBLE: return ByteBuffer.allocate(Float8Vector.TYPE_WIDTH) .putDouble(0, converters.getNumberConverter().getDouble(obj, columnType)) .array(); case Types.NUMERIC: case Types.INTEGER: case Types.SMALLINT: case Types.TINYINT: case Types.BIGINT: return converters .getNumberConverter() .getBigDecimal(obj, columnType, scale) .toBigInteger() .toByteArray(); case Types.VARCHAR: case Types.CHAR: case Types.STRUCT: case Types.ARRAY: case SnowflakeType.EXTRA_TYPES_VECTOR: return converters .getStringConverter() .getString(obj, columnType, columnSubType, scale) .getBytes(StandardCharsets.UTF_8); case Types.BOOLEAN: return converters.getBooleanConverter().getBoolean(obj, columnType) ? new byte[] {1} : new byte[] {0}; case Types.TIMESTAMP: case Types.TIME: case Types.DATE: case Types.DECIMAL: throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.BYTES_STR, obj); default: return SFBinary.fromHex(obj.toString()).getBytes(); } } catch (IllegalArgumentException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.BYTES_STR, obj); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/json/Converters.java ================================================ package net.snowflake.client.internal.core.json; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.time.Instant; import java.time.ZoneOffset; import java.util.Arrays; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.common.core.SFBinaryFormat; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SfTimestampUtil; import net.snowflake.client.internal.core.arrow.StructuredTypeDateTimeConverter; import net.snowflake.client.internal.jdbc.SnowflakeResultSetSerializableV1; import net.snowflake.client.internal.util.Converter; import net.snowflake.common.core.SFTimestamp; import net.snowflake.common.core.SnowflakeDateTimeFormat; public class Converters { private final BooleanConverter booleanConverter; private final NumberConverter numberConverter; private final DateTimeConverter dateTimeConverter; private final BytesConverter bytesConverter; private final StringConverter stringConverter; private final StructuredTypeDateTimeConverter structuredTypeDateTimeConverter; public Converters( TimeZone sessionTimeZone, SFBaseSession session, long resultVersion, boolean honorClientTZForTimestampNTZ, boolean treatNTZAsUTC, boolean useSessionTimezone, boolean formatDateWithTimeZone, SFBinaryFormat binaryFormatter, SnowflakeDateTimeFormat dateFormatter, SnowflakeDateTimeFormat timeFormatter, SnowflakeDateTimeFormat timestampNTZFormatter, SnowflakeDateTimeFormat timestampLTZFormatter, SnowflakeDateTimeFormat timestampTZFormatter) { booleanConverter = new BooleanConverter(); numberConverter = new NumberConverter(); dateTimeConverter = new DateTimeConverter( sessionTimeZone, session, resultVersion, honorClientTZForTimestampNTZ, treatNTZAsUTC, useSessionTimezone, formatDateWithTimeZone); bytesConverter = new BytesConverter(this); stringConverter = new StringConverter( sessionTimeZone, binaryFormatter, dateFormatter, timeFormatter, timestampNTZFormatter, timestampLTZFormatter, timestampTZFormatter, resultVersion, session, this); structuredTypeDateTimeConverter = new StructuredTypeDateTimeConverter( sessionTimeZone, resultVersion, honorClientTZForTimestampNTZ, treatNTZAsUTC, useSessionTimezone, formatDateWithTimeZone); } public Converters(SFBaseSession session, SnowflakeResultSetSerializableV1 resultSetSerializable) { this( resultSetSerializable.getTimeZone(), session, resultSetSerializable.getResultVersion(), resultSetSerializable.isHonorClientTZForTimestampNTZ(), resultSetSerializable.getTreatNTZAsUTC(), resultSetSerializable.getUseSessionTimezone(), resultSetSerializable.getFormatDateWithTimeZone(), resultSetSerializable.getBinaryFormatter(), resultSetSerializable.getDateFormatter(), resultSetSerializable.getTimeFormatter(), resultSetSerializable.getTimestampNTZFormatter(), resultSetSerializable.getTimestampLTZFormatter(), resultSetSerializable.getTimestampTZFormatter()); } public BooleanConverter getBooleanConverter() { return booleanConverter; } public NumberConverter getNumberConverter() { return numberConverter; } public DateTimeConverter getDateTimeConverter() { return dateTimeConverter; } public BytesConverter getBytesConverter() { return bytesConverter; } public StringConverter getStringConverter() { return stringConverter; } public StructuredTypeDateTimeConverter getStructuredTypeDateTimeConverter() { return structuredTypeDateTimeConverter; } public Converter integerConverter(int columnType) { return value -> getNumberConverter().getInt(value, columnType); } public Converter smallIntConverter(int columnType) { return value -> getNumberConverter().getShort(value, columnType); } public Converter tinyIntConverter(int columnType) { return value -> getNumberConverter().getByte(value); } public Converter bigIntConverter(int columnType) { return value -> getNumberConverter().getBigInt(value, columnType); } public Converter longConverter(int columnType) { return value -> getNumberConverter().getLong(value, columnType); } public Converter bigDecimalConverter(int columnType) { return value -> getNumberConverter().getBigDecimal(value, columnType); } public Converter floatConverter(int columnType) { return value -> getNumberConverter().getFloat(value, columnType); } public Converter doubleConverter(int columnType) { return value -> getNumberConverter().getDouble(value, columnType); } public Converter bytesConverter(int columnType, int scale) { return value -> { byte[] primitiveArray = getBytesConverter().getBytes(value, columnType, Types.BINARY, scale); Byte[] newByteArray = new Byte[primitiveArray.length]; Arrays.setAll(newByteArray, n -> primitiveArray[n]); return newByteArray; }; } public Converter varcharConverter(int columnType, int columnSubType, int scale) { return value -> getStringConverter().getString(value, columnType, columnSubType, scale); } public Converter booleanConverter(int columnType) { return value -> getBooleanConverter().getBoolean(value, columnType); } public Converter dateStringConverter(SFBaseSession session) { return value -> { SnowflakeDateTimeFormat formatter = SnowflakeDateTimeFormat.fromSqlFormat( (String) session.getCommonParameters().get("DATE_OUTPUT_FORMAT")); SFTimestamp timestamp = formatter.parse((String) value); return Date.valueOf( Instant.ofEpochMilli(timestamp.getTime()).atZone(ZoneOffset.UTC).toLocalDate()); }; } public Converter dateFromIntConverter(TimeZone tz) { return value -> structuredTypeDateTimeConverter.getDate((Integer) value, tz); } public Converter timeFromStringConverter(SFBaseSession session) { return value -> { SnowflakeDateTimeFormat formatter = SnowflakeDateTimeFormat.fromSqlFormat( (String) session.getCommonParameters().get("TIME_OUTPUT_FORMAT")); SFTimestamp timestamp = formatter.parse((String) value); return Time.valueOf( Instant.ofEpochMilli(timestamp.getTime()).atZone(ZoneOffset.UTC).toLocalTime()); }; } public Converter timeFromIntConverter(int scale) { return value -> structuredTypeDateTimeConverter.getTime((Long) value, scale); } public Converter timestampFromStringConverter( int columnSubType, int columnType, int scale, SFBaseSession session, TimeZone tz, TimeZone sessionTimezone) { return value -> { Timestamp result = SfTimestampUtil.getTimestampFromType( columnSubType, (String) value, session, sessionTimezone, tz); if (result != null) { return result; } return getDateTimeConverter() .getTimestamp(value, columnType, columnSubType, TimeZone.getDefault(), scale); }; } public Converter timestampFromStructConverter( int columnType, int columnSubType, TimeZone tz, int scale) { return value -> structuredTypeDateTimeConverter.getTimestamp( (Map) value, columnType, columnSubType, tz, scale); } public Converter structConverter(ObjectMapper objectMapper) { return value -> { try { return objectMapper.readValue((String) value, Map.class); } catch (JsonProcessingException e) { throw new SFException(e, ErrorCode.INVALID_STRUCT_DATA); } }; } public Converter arrayConverter(ObjectMapper objectMapper) { return value -> { try { return objectMapper.readValue((String) value, Map[].class); } catch (JsonProcessingException e) { throw new SFException(e, ErrorCode.INVALID_STRUCT_DATA); } }; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/json/DateTimeConverter.java ================================================ package net.snowflake.client.internal.core.json; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.arrow.ArrowResultUtil; import net.snowflake.client.internal.jdbc.SnowflakeDateWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeTimeWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeTimestampWithTimezone; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.common.core.SFTime; import net.snowflake.common.core.SFTimestamp; public class DateTimeConverter { private final TimeZone sessionTimeZone; private final long resultVersion; private final boolean honorClientTZForTimestampNTZ; private final boolean treatNTZAsUTC; private final boolean useSessionTimezone; private final boolean formatDateWithTimeZone; private final SFBaseSession session; public DateTimeConverter( TimeZone sessionTimeZone, SFBaseSession session, long resultVersion, boolean honorClientTZForTimestampNTZ, boolean treatNTZAsUTC, boolean useSessionTimezone, boolean formatDateWithTimeZone) { this.sessionTimeZone = sessionTimeZone; this.session = session; this.resultVersion = resultVersion; this.honorClientTZForTimestampNTZ = honorClientTZForTimestampNTZ; this.treatNTZAsUTC = treatNTZAsUTC; this.useSessionTimezone = useSessionTimezone; this.formatDateWithTimeZone = formatDateWithTimeZone; } public Timestamp getTimestamp( Object obj, int columnType, int columnSubType, TimeZone tz, int scale) throws SFException { if (obj == null) { return null; } if (Types.TIMESTAMP == columnType || Types.TIMESTAMP_WITH_TIMEZONE == columnType) { if (tz == null) { tz = TimeZone.getDefault(); } SFTimestamp sfTS = ResultUtil.getSFTimestamp( obj.toString(), scale, columnSubType, resultVersion, sessionTimeZone, session); Timestamp res = sfTS.getTimestamp(); if (res == null) { return null; } // If we want to display format with no session offset, we have to use session timezone for // ltz and tz types but UTC timezone for ntz type. if (useSessionTimezone) { if (columnSubType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ || columnSubType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ) { TimeZone specificSessionTimezone = adjustTimezoneForTimestampTZ(obj, columnSubType); res = new SnowflakeTimestampWithTimezone(res, specificSessionTimezone); } else { res = new SnowflakeTimestampWithTimezone(res); } } // If timestamp type is NTZ and JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC=true, keep // timezone in UTC to avoid daylight savings errors else if (treatNTZAsUTC && columnSubType == Types.TIMESTAMP) { res = new SnowflakeTimestampWithTimezone(res); } // If JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC=false, default behavior is to honor // client timezone for NTZ time. Move NTZ timestamp offset to correspond to // client's timezone. JDBC_USE_SESSION_TIMEZONE overrides other params. if (columnSubType == Types.TIMESTAMP && ((!treatNTZAsUTC && honorClientTZForTimestampNTZ) || useSessionTimezone)) { res = sfTS.moveToTimeZone(tz).getTimestamp(); } // Adjust time if date happens before year 1582 for difference between // Julian and Gregorian calendars return ResultUtil.adjustTimestamp(res); } else if (Types.DATE == columnType) { Date d = getDate(obj, columnType, columnSubType, tz, scale); if (d == null) { return null; } return new Timestamp(d.getTime()); } else if (Types.TIME == columnType) { Time t = getTime(obj, columnType, columnSubType, tz, scale); if (t == null) { return null; } if (useSessionTimezone) { SFTime sfTime = ResultUtil.getSFTime(obj.toString(), scale, session); return new SnowflakeTimestampWithTimezone( sfTime.getFractionalSeconds(ResultUtil.DEFAULT_SCALE_OF_SFTIME_FRACTION_SECONDS), sfTime.getNanosecondsWithinSecond(), TimeZone.getTimeZone("UTC")); } return new Timestamp(t.getTime()); } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.TIMESTAMP_STR, obj); } } public Time getTime(Object obj, int columnType, int columnSubType, TimeZone tz, int scale) throws SFException { if (obj == null) { return null; } if (Types.TIME == columnType) { SFTime sfTime = ResultUtil.getSFTime(obj.toString(), scale, session); Time ts = new Time( sfTime.getFractionalSeconds(ResultUtil.DEFAULT_SCALE_OF_SFTIME_FRACTION_SECONDS)); if (useSessionTimezone) { ts = SnowflakeUtil.getTimeInSessionTimezone( SnowflakeUtil.getSecondsFromMillis(ts.getTime()), sfTime.getNanosecondsWithinSecond()); } return ts; } else if (Types.TIMESTAMP == columnType || Types.TIMESTAMP_WITH_TIMEZONE == columnType) { Timestamp ts = getTimestamp(obj, columnType, columnSubType, tz, scale); if (ts == null) { return null; } if (useSessionTimezone) { ts = getTimestamp(obj, columnType, columnSubType, sessionTimeZone, scale); TimeZone sessionTimeZone = adjustTimezoneForTimestampTZ(obj, columnSubType); return new SnowflakeTimeWithTimezone(ts, sessionTimeZone, useSessionTimezone); } return new Time(ts.getTime()); } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.TIME_STR, obj); } } public Date getDate(Object obj, int columnType, int columnSubType, TimeZone tz, int scale) throws SFException { if (obj == null) { return null; } if (Types.TIMESTAMP == columnType || Types.TIMESTAMP_WITH_TIMEZONE == columnType) { if (tz == null) { tz = TimeZone.getDefault(); } if (columnSubType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ || columnSubType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ) { TimeZone specificSessionTimeZone = adjustTimezoneForTimestampTZ(obj, columnSubType); return new SnowflakeDateWithTimezone( getTimestamp(obj, columnType, columnSubType, tz, scale).getTime(), specificSessionTimeZone, useSessionTimezone); } return new Date(getTimestamp(obj, columnType, columnSubType, tz, scale).getTime()); } else if (Types.DATE == columnType) { if (tz == null || !formatDateWithTimeZone) { return ArrowResultUtil.getDate(Integer.parseInt((String) obj)); } return ArrowResultUtil.getDate(Integer.parseInt((String) obj), tz, sessionTimeZone); } // for Types.TIME and all other type, throw user error else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.DATE_STR, obj); } } private TimeZone adjustTimezoneForTimestampTZ(Object obj, int columnSubType) { // If the timestamp is of type timestamp_tz, use the associated offset timezone instead of the // session timezone for formatting if (obj != null && columnSubType == SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ && resultVersion > 0) { String timestampStr = obj.toString(); int indexForSeparator = timestampStr.indexOf(' '); String timezoneIndexStr = timestampStr.substring(indexForSeparator + 1); return SFTimestamp.convertTimezoneIndexToTimeZone(Integer.parseInt(timezoneIndexStr)); } // By default, return session timezone return sessionTimeZone; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/json/NumberConverter.java ================================================ package net.snowflake.client.internal.core.json; import java.math.BigDecimal; import java.math.RoundingMode; import java.sql.Types; import java.time.Duration; import java.time.Period; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.arrow.ArrowVectorConverterUtil; import net.snowflake.client.internal.jdbc.SnowflakeUtil; public class NumberConverter { // Precision of maximum long value in Java (2^63-1). Precision is 19 private static final int LONG_PRECISION = 19; private static final BigDecimal MAX_LONG_VAL = new BigDecimal(Long.MAX_VALUE); private static final BigDecimal MIN_LONG_VAL = new BigDecimal(Long.MIN_VALUE); private static final int monthsInYear = 12; public byte getByte(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return Byte.parseByte((String) obj); } else { return ((Number) obj).byteValue(); } } public Period getPeriod(Object obj, int columnType) throws SFException { if (obj == null) { return null; } try { long value; if (obj instanceof String) { String objString = (String) obj; value = Long.parseLong(objString); } else { value = ((Number) obj).longValue(); } return Period.of((int) (value / monthsInYear), (int) (value % monthsInYear), 0); } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.PERIOD_STR, obj); } } public Duration getDuration(Object obj, int columnType) throws SFException { if (obj == null) { return null; } try { BigDecimal numNanos; if (obj instanceof String) { String objString = (String) obj; numNanos = new BigDecimal(objString); } else { numNanos = getBigDecimal(obj, columnType); } try { return ArrowVectorConverterUtil.getDurationFromNanos(numNanos); } catch (ArithmeticException e) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, "Duration", numNanos.toPlainString()); } } catch (NumberFormatException nfe) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.DURATION_STR, obj); } } public short getShort(Object obj, int columnType) throws SFException { if (obj == null) { return 0; } try { if (obj instanceof String) { String objString = (String) obj; if (objString.contains(".") && (columnType == Types.FLOAT || columnType == Types.DOUBLE)) { objString = objString.substring(0, objString.indexOf(".")); } return Short.parseShort(objString); } else { return ((Number) obj).shortValue(); } } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.SHORT_STR, obj); } } public int getInt(Object obj, int columnType) throws SFException { if (obj == null) { return 0; } try { if (obj instanceof String) { String objString = (String) obj; if (objString.contains(".") && (columnType == Types.FLOAT || columnType == Types.DOUBLE)) { objString = objString.substring(0, objString.indexOf(".")); } return Integer.parseInt(objString); } else { return ((Number) obj).intValue(); } } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.INT_STR, obj); } } public long getLong(Object obj, int columnType) throws SFException { if (obj == null) { return 0; } try { if (obj instanceof String) { String objString = (String) obj; if (objString.contains(".") && (columnType == Types.FLOAT || columnType == Types.DOUBLE)) { objString = objString.substring(0, objString.indexOf(".")); } return Long.parseLong(objString); } else { return ((Number) obj).longValue(); } } catch (NumberFormatException nfe) { if (Types.INTEGER == columnType || Types.SMALLINT == columnType) { throw new SFException( ErrorCode.INTERNAL_ERROR, SnowflakeUtil.LONG_STR + ": " + obj.toString()); } else { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.LONG_STR, obj); } } } public BigDecimal getBigDecimal(Object obj, int columnType) throws SFException { if (obj == null) { return null; } try { if (columnType != Types.TIME && columnType != Types.TIMESTAMP && columnType != Types.TIMESTAMP_WITH_TIMEZONE) { return new BigDecimal(obj.toString()); } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.BIG_DECIMAL_STR, obj); } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.BIG_DECIMAL_STR, obj); } } public BigDecimal getBigDecimal(Object obj, int columnType, Integer scale) throws SFException { if (obj == null) { return null; } BigDecimal value = getBigDecimal(obj.toString(), columnType); value = value.setScale(scale, RoundingMode.HALF_UP); return value; } public float getFloat(Object obj, int columnType) throws SFException { if (obj == null) { return 0; } try { if (obj instanceof String) { if (columnType != Types.TIME && columnType != Types.TIMESTAMP && columnType != Types.TIMESTAMP_WITH_TIMEZONE) { if ("inf".equals(obj)) { return Float.POSITIVE_INFINITY; } else if ("-inf".equals(obj)) { return Float.NEGATIVE_INFINITY; } else { return Float.parseFloat((String) obj); } } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.FLOAT_STR, obj); } else { return ((Number) obj).floatValue(); } } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.FLOAT_STR, obj); } } public double getDouble(Object obj, int columnType) throws SFException { if (obj == null) { return 0; } try { if (obj instanceof String) { if (columnType != Types.TIME && columnType != Types.TIMESTAMP && columnType != Types.TIMESTAMP_WITH_TIMEZONE) { if ("inf".equals(obj)) { return Double.POSITIVE_INFINITY; } else if ("-inf".equals(obj)) { return Double.NEGATIVE_INFINITY; } else { return Double.parseDouble((String) obj); } } throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.DOUBLE_STR, obj); } else { return ((Number) obj).doubleValue(); } } catch (NumberFormatException ex) { throw new SFException( ErrorCode.INVALID_VALUE_CONVERT, columnType, SnowflakeUtil.DOUBLE_STR, obj); } } public Object getBigInt(Object obj, int columnType) throws SFException { // If precision is < precision of max long precision, we can automatically convert to long. // Otherwise, do a check to ensure it doesn't overflow max long value. if (obj == null) { return null; } String numberAsString = obj.toString(); if (numberAsString.length() >= LONG_PRECISION) { BigDecimal bigNum = getBigDecimal(obj, columnType); if (bigNum.compareTo(MAX_LONG_VAL) == 1 || bigNum.compareTo(MIN_LONG_VAL) == -1) { return bigNum; } } return getLong(obj, columnType); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/json/StringConverter.java ================================================ package net.snowflake.client.internal.core.json; import java.sql.Date; import java.sql.Types; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.common.core.SFBinary; import net.snowflake.client.internal.common.core.SFBinaryFormat; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SFTime; import net.snowflake.common.core.SFTimestamp; import net.snowflake.common.core.SnowflakeDateTimeFormat; public class StringConverter { private static final SFLogger logger = SFLoggerFactory.getLogger(StringConverter.class); private final TimeZone sessionTimeZone; private final SFBinaryFormat binaryFormatter; private final SnowflakeDateTimeFormat dateFormatter; private final SnowflakeDateTimeFormat timeFormatter; private final SnowflakeDateTimeFormat timestampNTZFormatter; private final SnowflakeDateTimeFormat timestampLTZFormatter; private final SnowflakeDateTimeFormat timestampTZFormatter; private final long resultVersion; private final SFBaseSession session; private final Converters converters; public StringConverter( TimeZone sessionTimeZone, SFBinaryFormat binaryFormatter, SnowflakeDateTimeFormat dateFormatter, SnowflakeDateTimeFormat timeFormatter, SnowflakeDateTimeFormat timestampNTZFormatter, SnowflakeDateTimeFormat timestampLTZFormatter, SnowflakeDateTimeFormat timestampTZFormatter, long resultVersion, SFBaseSession session, Converters converters) { this.sessionTimeZone = sessionTimeZone; this.binaryFormatter = binaryFormatter; this.dateFormatter = dateFormatter; this.timeFormatter = timeFormatter; this.timestampNTZFormatter = timestampNTZFormatter; this.timestampLTZFormatter = timestampLTZFormatter; this.timestampTZFormatter = timestampTZFormatter; this.resultVersion = resultVersion; this.session = session; this.converters = converters; } public String getString(Object obj, int columnType, int columnSubType, int scale) throws SFException { if (obj == null) { return null; } switch (columnType) { case Types.BOOLEAN: return ResultUtil.getBooleanAsString(ResultUtil.getBoolean(obj.toString())); case Types.TIMESTAMP: case SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ: case SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ: return timestampToString(obj, columnType, columnSubType, scale); case Types.DATE: return dateToString(obj, columnType, columnSubType, scale); case Types.TIME: return timeToString(obj, scale); case Types.BINARY: return binaryToString(obj, columnType, columnSubType, scale); default: break; } return obj.toString(); } private String timestampToString(Object obj, int columnType, int columnSubType, int scale) throws SFException { SFTimestamp sfTS = ResultUtil.getSFTimestamp( obj.toString(), scale, columnSubType, resultVersion, sessionTimeZone, session); String timestampStr = ResultUtil.getSFTimestampAsString( sfTS, columnType, scale, timestampNTZFormatter, timestampLTZFormatter, timestampTZFormatter, session); logger.debug( "Converting timestamp to string from: {} to: {}", (ArgSupplier) obj::toString, timestampStr); return timestampStr; } private String dateToString(Object obj, int columnType, int columnSubType, int scale) throws SFException { Date date = converters .getDateTimeConverter() .getDate(obj, columnType, columnSubType, TimeZone.getDefault(), scale); if (dateFormatter == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing date formatter"); } String dateStr = ResultUtil.getDateAsString(date, dateFormatter); logger.debug("Converting date to string from: {} to: {}", (ArgSupplier) obj::toString, dateStr); return dateStr; } private String timeToString(Object obj, int scale) throws SFException { SFTime sfTime = ResultUtil.getSFTime(obj.toString(), scale, session); if (timeFormatter == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing time formatter"); } String timeStr = ResultUtil.getSFTimeAsString(sfTime, scale, timeFormatter); logger.debug("Converting time to string from: {} to: {}", (ArgSupplier) obj::toString, timeStr); return timeStr; } private String binaryToString(Object obj, int columnType, int columnSubType, int scale) throws SFException { if (binaryFormatter == null) { throw new SFException(ErrorCode.INTERNAL_ERROR, "missing binary formatter"); } if (binaryFormatter == SFBinaryFormat.HEX) { // Shortcut: the values are already passed with hex encoding, so just // return the string unchanged rather than constructing an SFBinary. return obj.toString(); } SFBinary sfb = new SFBinary( converters.getBytesConverter().getBytes(obj, columnType, columnSubType, scale)); return binaryFormatter.format(sfb); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/minicore/Minicore.java ================================================ package net.snowflake.client.internal.core.minicore; import java.util.Collections; import java.util.concurrent.CompletableFuture; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class Minicore { private static final SFLogger logger = SFLoggerFactory.getLogger(Minicore.class); public static final String DISABLE_MINICORE_ENV_VAR = "SNOWFLAKE_DISABLE_MINICORE"; public static final String LIBRARY_BASE_NAME = "libsf_mini_core"; private static volatile Minicore INSTANCE; private static volatile CompletableFuture INITIALIZATION_FUTURE; private static boolean DISABLED_VIA_ENV_VAR = false; private final MinicoreLoadResult loadResult; private final MinicoreLibrary library; private Minicore(MinicoreLoadResult loadResult, MinicoreLibrary library) { this.loadResult = loadResult; this.library = library; } public static synchronized void initializeAsync() { if (INITIALIZATION_FUTURE != null) { return; // Already started } // Check if minicore is disabled via environment variable if (isMinicoreDisabled()) { logger.debug( "Minicore initialization disabled via {} environment variable", DISABLE_MINICORE_ENV_VAR); DISABLED_VIA_ENV_VAR = true; INITIALIZATION_FUTURE = CompletableFuture.completedFuture(null); return; } INITIALIZATION_FUTURE = CompletableFuture.runAsync( () -> { try { logger.trace("Starting async minicore initialization"); MinicoreLoader loader = new MinicoreLoader(); MinicoreLoadResult result = loader.loadLibrary(); INSTANCE = new Minicore(result, result.getLibrary()); } catch (Exception e) { logger.debug("Unexpected error during minicore initialization", e); MinicoreLoadResult failedResult = MinicoreLoadResult.failure( "Unexpected initialization error: " + e.getMessage(), null, e, Collections.emptyList()); INSTANCE = new Minicore(failedResult, null); } }); } private static boolean isMinicoreDisabled() { String envValue = SnowflakeUtil.systemGetEnv(DISABLE_MINICORE_ENV_VAR); return envValue != null && envValue.equalsIgnoreCase("true"); } public static synchronized void initialize() { if (INSTANCE != null) { return; } if (INITIALIZATION_FUTURE == null) { initializeAsync(); } try { INITIALIZATION_FUTURE.join(); } catch (Exception e) { logger.error("Failed to initialize minicore", e); } } public static Minicore getInstance() { return INSTANCE; } public MinicoreLibrary getLibrary() { return library; } public MinicoreLoadResult getLoadResult() { return loadResult; } public static synchronized boolean hasInitializationStarted() { return INITIALIZATION_FUTURE != null; } public static boolean isDisabledViaEnvVar() { return DISABLED_VIA_ENV_VAR; } // This method is for testing only. Do not use in production code public static synchronized void resetForTesting() { INSTANCE = null; INITIALIZATION_FUTURE = null; DISABLED_VIA_ENV_VAR = false; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/minicore/MinicoreLibrary.java ================================================ package net.snowflake.client.internal.core.minicore; import com.sun.jna.Library; /** JNA interface for the Snowflake minicore native library. */ public interface MinicoreLibrary extends Library { /** * Get the full version string from the minicore library. This method maps to the C function: * const char* sf_core_full_version(); */ String sf_core_full_version(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/minicore/MinicoreLoadError.java ================================================ package net.snowflake.client.internal.core.minicore; public enum MinicoreLoadError { DISABLED("Minicore is disabled with SNOWFLAKE_DISABLE_MINICORE env variable"), FAILED_TO_LOAD("Failed to load binary"), STILL_LOADING("Minicore is still loading"); private final String message; MinicoreLoadError(String message) { this.message = message; } public String getMessage() { return message; } @Override public String toString() { return message; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/minicore/MinicoreLoadLogger.java ================================================ package net.snowflake.client.internal.core.minicore; import java.util.ArrayList; import java.util.Collections; import java.util.List; public class MinicoreLoadLogger { private final long startTimeNanos; private final List logs; public MinicoreLoadLogger() { this.startTimeNanos = System.nanoTime(); this.logs = new ArrayList<>(); } public void log(String message) { long elapsedNanos = System.nanoTime() - startTimeNanos; double elapsedMs = elapsedNanos / 1_000_000.0; String timestampedMessage = String.format("[%.6fms] %s", elapsedMs, message); logs.add(timestampedMessage); } public List getLogs() { return Collections.unmodifiableList(logs); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/minicore/MinicoreLoadResult.java ================================================ package net.snowflake.client.internal.core.minicore; import java.util.ArrayList; import java.util.Collections; import java.util.List; public class MinicoreLoadResult { private final boolean success; private final String errorMessage; private final String libraryFileName; private final MinicoreLibrary library; private final String coreVersion; private final Throwable exception; private final List logs; private MinicoreLoadResult( boolean success, String errorMessage, String libraryFileName, MinicoreLibrary library, String coreVersion, Throwable exception, List logs) { this.success = success; this.errorMessage = errorMessage; this.libraryFileName = libraryFileName; this.library = library; this.coreVersion = coreVersion; this.exception = exception; this.logs = logs != null ? logs : new ArrayList<>(); } public static MinicoreLoadResult success( String libraryFileName, MinicoreLibrary library, String coreVersion, List logs) { return new MinicoreLoadResult(true, null, libraryFileName, library, coreVersion, null, logs); } public static MinicoreLoadResult failure( String errorMessage, String libraryFileName, Throwable exception, List logs) { return new MinicoreLoadResult( false, errorMessage, libraryFileName, null, null, exception, logs); } public boolean isSuccess() { return success; } public String getErrorMessage() { return errorMessage; } public Throwable getException() { return exception; } public String getLibraryFileName() { return libraryFileName; } public MinicoreLibrary getLibrary() { return library; } public String getCoreVersion() { return coreVersion; } public List getLogs() { return Collections.unmodifiableList(logs); } @Override public String toString() { if (success) { return String.format( "MinicoreLoadResult{success=true, libraryFileName='%s', version='%s'}", libraryFileName, coreVersion); } else { return String.format( "MinicoreLoadResult{success=false, error='%s', exception=%s}", errorMessage, exception != null ? exception.getClass().getSimpleName() : "none"); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/minicore/MinicoreLoader.java ================================================ package net.snowflake.client.internal.core.minicore; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import com.sun.jna.Native; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.PosixFilePermission; import java.util.EnumSet; import net.snowflake.client.internal.core.Constants; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.commons.io.IOUtils; public class MinicoreLoader { private enum DirectoryType { TEMP("temp"), HOME("home cache"), CWD("working"); private String name; DirectoryType(String name) { this.name = name; } } private static final SFLogger logger = SFLoggerFactory.getLogger(MinicoreLoader.class); private static final String TEMP_DIR_PREFIX = "snowflake-minicore-"; private MinicoreLoadResult loadResult; private final MinicoreLoadLogger loadLogger = new MinicoreLoadLogger(); private final MinicorePlatform platform = new MinicorePlatform(); public synchronized MinicoreLoadResult loadLibrary() { if (loadResult != null) { return loadResult; } loadLogger.log("Starting minicore loading"); loadLogger.log( "Detected platform: OS=" + platform.getOsName() + ", Arch=" + platform.getOsArch()); if (!platform.isSupported()) { loadLogger.log("Platform not supported"); loadResult = failure( "Unsupported platform: OS=" + platform.getOsName() + ", Arch=" + platform.getOsArch(), null); return loadResult; } loadLogger.log("Platform supported: " + platform.getPlatformIdentifier()); loadResult = loadFromJar(); return loadResult; } private MinicoreLoadResult loadFromJar() { String resourcePath = platform.getLibraryPath(); loadLogger.log("Library resource path: " + resourcePath); byte[] libraryBytes = readLibraryFromJar(resourcePath); if (libraryBytes == null) { return failure("Library resource not found in JAR: " + resourcePath, null); } MinicoreLoadResult result = tryDirectory(DirectoryType.TEMP, libraryBytes, this::createTempDirectory); if (result != null) { return result; } result = tryDirectory(DirectoryType.HOME, libraryBytes, this::getOrCreateHomeCacheDirectory); if (result != null) { return result; } result = tryDirectory(DirectoryType.CWD, libraryBytes, this::getWorkingDirectory); if (result != null) { return result; } loadLogger.log("No writable directory found"); return failure("No writable directory found (tried: temp, home cache, working dir)", null); } private byte[] readLibraryFromJar(String resourcePath) { try (InputStream stream = MinicoreLoader.class.getResourceAsStream(resourcePath)) { if (stream == null) { loadLogger.log("Library resource not found in JAR"); return null; } loadLogger.log("Library resource found in JAR"); return IOUtils.toByteArray(stream); } catch (IOException e) { loadLogger.log("Failed to read library from JAR: " + e.getMessage()); return null; } } private MinicoreLoadResult tryDirectory( DirectoryType directoryType, byte[] libraryBytes, DirectorySupplier supplier) { Path targetPath = null; Path createdTempDir = null; try { Path directory = supplier.get(); if (directory == null) { return null; } // Track if this is a temp directory we created (so we can clean it up on failure) if (directoryType == DirectoryType.TEMP) { createdTempDir = directory; } targetPath = directory.resolve(platform.getLibraryFileName()); loadLogger.log("Trying " + directoryType.name + " directory: " + directory); return writeLoadAndCleanup(targetPath, libraryBytes); } catch (Exception e) { loadLogger.log("Failed to use " + directoryType.name + " directory: " + e.getMessage()); cleanup(targetPath, createdTempDir); return null; } } private Path createTempDirectory() throws IOException { Path tempDir = Files.createTempDirectory(TEMP_DIR_PREFIX); setDirectoryPermissions(tempDir); return tempDir; } private Path getOrCreateHomeCacheDirectory() throws IOException { Path cacheDir = getHomeCacheDirectory(); if (cacheDir == null) { return null; } if (!Files.exists(cacheDir)) { Files.createDirectories(cacheDir); } // Always ensure correct permissions (may have been created by another process) setDirectoryPermissions(cacheDir); return cacheDir; } private Path getWorkingDirectory() { String cwd = systemGetProperty("user.dir"); return (cwd != null && !cwd.isEmpty()) ? Paths.get(cwd) : null; } /** * Returns the OS-specific cache directory path: * *

    *
  • Windows: %USERPROFILE%/AppData/Local/Snowflake/Caches/minicore/ *
  • MacOS: $HOME/Library/Caches/Snowflake/minicore/ *
  • Other: $HOME/.cache/Snowflake/minicore/ *
*/ Path getHomeCacheDirectory() { String home = systemGetProperty("user.home"); if (home == null || home.isEmpty()) { return null; } switch (platform.getOs()) { case WINDOWS: return Paths.get(home, "AppData", "Local", "Snowflake", "Caches", "minicore"); case MAC: return Paths.get(home, "Library", "Caches", "Snowflake", "minicore"); default: return Paths.get(home, ".cache", "Snowflake", "minicore"); } } private MinicoreLoadResult writeLoadAndCleanup(Path targetPath, byte[] libraryBytes) throws IOException { Files.write(targetPath, libraryBytes); loadLogger.log("Wrote library to: " + targetPath); setFilePermissions(targetPath); try { loadLogger.log("Loading library"); MinicoreLibrary library = Native.load(targetPath.toAbsolutePath().toString(), MinicoreLibrary.class); loadLogger.log("Library loaded successfully"); return getVersionAndCreateResult(library); } finally { deleteQuietly(targetPath); loadLogger.log("Deleted library file"); } } private MinicoreLoadResult getVersionAndCreateResult(MinicoreLibrary library) { try { String version = library.sf_core_full_version(); loadLogger.log("Library version: " + version); return MinicoreLoadResult.success( platform.getLibraryFileName(), library, version, loadLogger.getLogs()); } catch (UnsatisfiedLinkError e) { loadLogger.log("Library missing sf_core_full_version symbol: " + e.getMessage()); return failure("Library missing required symbol: sf_core_full_version", e); } catch (Exception e) { loadLogger.log("Failed to get library version: " + e.getMessage()); return failure("Failed to get library version: " + e.getMessage(), e); } } private void cleanup(Path filePath, Path tempDirectory) { deleteQuietly(filePath); deleteQuietly(tempDirectory); } private void deleteQuietly(Path path) { try { Files.deleteIfExists(path); } catch (IOException e) { logger.trace("Failed to delete: {}", e.getMessage()); } } private MinicoreLoadResult failure(String message, Throwable cause) { return MinicoreLoadResult.failure( message, platform.getLibraryFileName(), cause, loadLogger.getLogs()); } private void setDirectoryPermissions(Path path) throws IOException { setPermissions(path, true); } private void setFilePermissions(Path path) throws IOException { setPermissions(path, false); } private void setPermissions(Path path, boolean executable) throws IOException { if (platform.getOs() == Constants.OS.WINDOWS) { path.toFile().setReadable(true, true); path.toFile().setWritable(true, true); path.toFile().setExecutable(executable, true); } else { EnumSet perms = EnumSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE); if (executable) { perms.add(PosixFilePermission.OWNER_EXECUTE); } Files.setPosixFilePermissions(path, perms); } } @FunctionalInterface private interface DirectorySupplier { Path get() throws IOException; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/minicore/MinicorePlatform.java ================================================ package net.snowflake.client.internal.core.minicore; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import net.snowflake.client.internal.core.Constants; import net.snowflake.client.internal.core.Constants.Architecture; import net.snowflake.client.internal.core.Constants.OS; import net.snowflake.client.internal.util.LibcDetails; public class MinicorePlatform { private final OS os; private final Architecture architecture; private final String osName; private final String osArch; private final String resourcePath; private final boolean supported; public MinicorePlatform() { this.osName = systemGetProperty("os.name"); this.osArch = systemGetProperty("os.arch"); this.os = Constants.getOS(); this.architecture = Constants.getArchitecture(); this.resourcePath = buildResourcePath(); this.supported = resourcePath != null && MinicorePlatform.class.getResource(resourcePath) != null; } public boolean isSupported() { return supported; } public String getLibraryPath() { if (!supported) { throw new UnsupportedOperationException( String.format( "Minicore library not available for platform: OS=%s (%s), Arch=%s (%s)", os, osName, architecture, osArch)); } return resourcePath; } private String buildResourcePath() { String fileName = getLibraryFileName(); if (fileName == null) { return null; } // Flat structure: /minicore/{filename} return "/minicore/" + fileName; } public String getPlatformIdentifier() { String osId = getOsIdentifier(); if (osId == null || architecture == Architecture.UNKNOWN) { return null; } String archId = architecture.getIdentifier(); // For Linux, add libc family: linux-x86_64-glibc or linux-aarch64-musl String libcFamily = LibcDetails.load().getFamily(); if (libcFamily != null) { return osId + "-" + archId + "-" + libcFamily; } // For non-Linux platforms (libcFamily is null), just os-arch return osId + "-" + archId; } /** Get OS identifier (used in filenames). Returns: macos, linux, windows, aix. */ private String getOsIdentifier() { if (os == null) { return null; } switch (os) { case LINUX: if (Constants.isAix()) { return "aix"; } return "linux"; case MAC: return "macos"; case WINDOWS: return "windows"; default: return null; } } private String getLibraryExtension() { if (os == null) { return ""; } if (Constants.isAix()) { return ".so"; } switch (os) { case WINDOWS: return ".dll"; case MAC: return ".dylib"; default: // Linux included. return ".so"; } } /** * Get the library filename with platform encoding. * *

Format: {base_name}_{os}_{arch}[_{libc}]{extension} * *

Examples: * *

    *
  • Linux x86_64 glibc: {@code libsf_mini_core_linux_x86_64_glibc.so} *
  • Linux aarch64 musl: {@code libsf_mini_core_linux_aarch64_musl.so} *
  • macOS x86_64: {@code libsf_mini_core_macos_x86_64.dylib} *
  • macOS aarch64: {@code libsf_mini_core_macos_aarch64.dylib} *
  • Windows x86_64: {@code libsf_mini_core_windows_x86_64.dll} *
  • AIX ppc64: {@code libsf_mini_core_aix_ppc64.so} *
*/ public String getLibraryFileName() { String osId = getOsIdentifier(); if (osId == null || architecture == Architecture.UNKNOWN) { return null; } String archId = architecture.getIdentifier(); StringBuilder fileName = new StringBuilder(); fileName.append(Minicore.LIBRARY_BASE_NAME); fileName.append("_").append(osId); fileName.append("_").append(archId); // For Linux, add libc family String libcFamily = LibcDetails.load().getFamily(); if (libcFamily != null) { fileName.append("_").append(libcFamily); } fileName.append(getLibraryExtension()); return fileName.toString(); } public OS getOs() { return os; } public String getOsName() { return osName; } public String getOsArch() { return osArch; } @Override public String toString() { return String.format( "MinicorePlatform{os=%s, arch=%s, osName='%s', osArch='%s', supported=%s}", os, architecture, osName, osArch, isSupported()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/minicore/MinicoreTelemetry.java ================================================ package net.snowflake.client.internal.core.minicore; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import net.snowflake.client.internal.core.Constants; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.util.SecretDetector; /** * Telemetry data for minicore library loading and platform information. * *

This class encapsulates all telemetry information related to minicore that should be included * in the CLIENT_ENVIRONMENT section of the login-request. * *

Fields: * *

    *
  • ISA: Instruction Set Architecture (e.g., "amd64", "arm64") *
  • CORE_VERSION: Result of sf_core_full_version() if successful *
  • CORE_FILE_NAME: Binary library file name that the driver tried to load *
  • CORE_LOAD_ERROR: One of three error states from {@link MinicoreLoadError} *
  • loadLogs: List of log messages with detailed error info from the loading process *
* *

Note: OS, OS_VERSION, and OS_DETAILS are set by SessionUtil.createClientEnvironmentInfo() */ public class MinicoreTelemetry { private final String isa; private final String coreVersion; private final String coreFileName; private final MinicoreLoadError coreLoadError; private final List coreLoadLogs; private MinicoreTelemetry( String isa, String coreVersion, String coreFileName, MinicoreLoadError coreLoadError, List coreLoadLogs) { this.isa = isa; this.coreVersion = coreVersion; this.coreFileName = coreFileName; this.coreLoadError = coreLoadError; this.coreLoadLogs = coreLoadLogs != null ? coreLoadLogs : new ArrayList<>(); } /** * Create telemetry based on current Minicore state. * *

Handles three error cases: * *

    *
  • Disabled via env var: {@link MinicoreLoadError#DISABLED} *
  • Still loading: {@link MinicoreLoadError#STILL_LOADING} *
  • Failed to load: {@link MinicoreLoadError#FAILED_TO_LOAD} *
*/ public static MinicoreTelemetry create() { String isa = Constants.getArchitecture().getIdentifier(); // Check if disabled via environment variable if (Minicore.isDisabledViaEnvVar()) { return new MinicoreTelemetry(isa, null, null, MinicoreLoadError.DISABLED, null); } // Check if initialization hasn't started or instance not yet available Minicore minicore = Minicore.getInstance(); if (minicore == null) { return new MinicoreTelemetry(isa, null, null, MinicoreLoadError.STILL_LOADING, null); } MinicoreLoadResult result = minicore.getLoadResult(); if (result == null) { return new MinicoreTelemetry(isa, null, null, MinicoreLoadError.STILL_LOADING, null); } return fromLoadResult(result); } /** Create telemetry from a successful or failed load result. */ public static MinicoreTelemetry fromLoadResult(MinicoreLoadResult loadResult) { String isa = Constants.getArchitecture().getIdentifier(); String coreVersion = loadResult.getCoreVersion(); String coreFileName = loadResult.getLibraryFileName(); List logs = new ArrayList<>(loadResult.getLogs()); if (loadResult.isSuccess()) { return new MinicoreTelemetry(isa, coreVersion, coreFileName, null, logs); } // For failures, add the detailed error message to logs for visibility String detailedError = loadResult.getErrorMessage(); if (detailedError != null && !detailedError.isEmpty()) { logs.add("Error: " + detailedError); } Throwable exception = loadResult.getException(); if (exception != null) { logs.add("Exception: " + exception.getClass().getName() + ": " + exception.getMessage()); } return new MinicoreTelemetry( isa, coreVersion, coreFileName, MinicoreLoadError.FAILED_TO_LOAD, logs); } // Convert telemetry data to Map for client environment telemetry. Load logs are not included. public Map toClientEnvironmentTelemetryMap() { Map map = new HashMap<>(); if (isa != null) { map.put("ISA", SecretDetector.maskSecrets(isa)); } if (coreVersion != null) { map.put("CORE_VERSION", SecretDetector.maskSecrets(coreVersion)); } if (coreFileName != null) { map.put("CORE_FILE_NAME", SecretDetector.maskSecrets(coreFileName)); } if (coreLoadError != null) { map.put("CORE_LOAD_ERROR", SecretDetector.maskSecrets(coreLoadError.getMessage())); } return map; } // Convert telemetry data to ObjectNode for in-band telemetry. public ObjectNode toInBandTelemetryNode() { ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); ObjectNode message = mapper.createObjectNode(); message.put("type", "client_minicore_load"); message.put("source", "JDBC"); message.put("success", coreLoadError == null); if (coreFileName != null) { message.put("libraryFileName", SecretDetector.maskSecrets(coreFileName)); } if (coreVersion != null) { message.put("coreVersion", SecretDetector.maskSecrets(coreVersion)); } if (coreLoadError != null) { message.put("error", SecretDetector.maskSecrets(coreLoadError.getMessage())); } if (!coreLoadLogs.isEmpty()) { ArrayNode logsArray = message.putArray("loadLogs"); coreLoadLogs.stream().map(SecretDetector::maskSecrets).forEach(logsArray::add); } return message; } @Override public String toString() { return String.format( "MinicoreTelemetry{isa='%s', coreVersion='%s', coreFileName='%s', coreLoadError='%s', logs=%d entries}", isa, coreVersion, coreFileName, coreLoadError != null ? coreLoadError.getMessage() : null, coreLoadLogs.size()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/structs/SQLDataCreationHelper.java ================================================ package net.snowflake.client.internal.core.structs; import java.sql.SQLData; import java.sql.SQLException; import java.util.Optional; import java.util.function.Supplier; public class SQLDataCreationHelper { public static T create(Class type) throws SQLException { Optional> typeFactory = SnowflakeObjectTypeFactories.get(type); return (T) typeFactory .map(Supplier::get) .orElseGet(() -> createUsingReflection((Class) type)); } private static SQLData createUsingReflection(Class type) { try { return type.newInstance(); } catch (InstantiationException | IllegalAccessException e) { throw new RuntimeException(e); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/core/structs/SnowflakeObjectTypeFactories.java ================================================ package net.snowflake.client.internal.core.structs; import java.sql.SQLData; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Supplier; public class SnowflakeObjectTypeFactories { private static final Map, Supplier> factories = new ConcurrentHashMap<>(); public static void register(Class type, Supplier factory) { Objects.requireNonNull((Object) type, "type cannot be null"); Objects.requireNonNull((Object) factory, "factory cannot be null"); factories.put(type, factory); } public static void unregister(Class type) { Objects.requireNonNull((Object) type, "type cannot be null"); factories.remove(type); } public static Optional> get(Class type) { Objects.requireNonNull((Object) type, "type cannot be null"); return Optional.ofNullable(factories.get(type)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/driver/AutoConfigurationHelper.java ================================================ package net.snowflake.client.internal.driver; import java.util.Properties; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.config.ConnectionParameters; import net.snowflake.client.internal.config.SFConnectionConfigParser; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Helper for handling JDBC auto-configuration via connections.toml file. * *

Auto-configuration allows users to specify "jdbc:snowflake:auto" as the connection URL and * have connection parameters loaded from a configuration file. This provides a convenient way to * manage connection settings without hardcoding them in application code. * *

Configuration files are typically located in: * *

    *
  • ~/.snowflake/connections.toml (user-level) *
  • Project-specific locations as configured *
*/ public final class AutoConfigurationHelper { private static final SFLogger logger = SFLoggerFactory.getLogger(AutoConfigurationHelper.class); /** * The URL prefix that indicates auto-configuration should be used. * *

When a connection URL starts with or equals this prefix, the driver will attempt to load * connection parameters from a configuration file. */ public static final String AUTO_CONNECTION_PREFIX = "jdbc:snowflake:auto"; private AutoConfigurationHelper() { // Utility class - prevent instantiation } /** * Check if the URL indicates auto-configuration should be used. * * @param url the JDBC connection URL * @return true if auto-configuration is enabled */ public static boolean isAutoConfigurationUrl(String url) { return url != null && url.contains(AUTO_CONNECTION_PREFIX); } /** * Load connection parameters from configuration file or use provided parameters. * *

If the URL contains the auto-configuration prefix, this method attempts to load parameters * from the configuration file. Otherwise, it creates ConnectionParameters from the provided URL * and info Properties. * * @param url JDBC connection URL * @param info connection properties * @return ConnectionParameters with resolved configuration * @throws SnowflakeSQLException if auto-configuration is requested but fails */ public static ConnectionParameters resolveConnectionParameters(String url, Properties info) throws SnowflakeSQLException { if (isAutoConfigurationUrl(url)) { logger.debug( "JDBC connection initializing with URL '{}'. Autoconfiguration is enabled.", AUTO_CONNECTION_PREFIX); ConnectionParameters params = SFConnectionConfigParser.buildConnectionParameters(url); if (params == null) { throw new SnowflakeSQLException( "Unavailable connection configuration parameters expected for " + "auto configuration using file"); } return params; } else { return new ConnectionParameters(url, info); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/driver/ConnectionFactory.java ================================================ package net.snowflake.client.internal.driver; import java.sql.Connection; import java.sql.SQLException; import java.util.Properties; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.config.ConnectionParameters; import net.snowflake.client.internal.jdbc.SnowflakeConnectString; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Factory for creating Snowflake JDBC connections. * *

This class handles the validation and creation of connections from JDBC URLs and properties. * It supports both standard connection URLs and auto-configuration. * *

This class is thread-safe and stateless. */ public final class ConnectionFactory { private static final SFLogger logger = SFLoggerFactory.getLogger(ConnectionFactory.class); private ConnectionFactory() { // Utility class - prevent instantiation } /** * Creates a connection to the Snowflake database. * *

This method validates the URL, resolves connection parameters (including auto-configuration * if applicable), parses the connection string, and creates a new connection instance. * * @param url the database URL * @param info additional connection properties * @return a Connection object, or null if the URL is not accepted by this driver (per JDBC spec) * @throws SQLException if a database access error occurs or the connection parameters are invalid */ public static Connection createConnection(String url, Properties info) throws SQLException { // Resolve connection parameters (handles auto-configuration if needed) ConnectionParameters params = AutoConfigurationHelper.resolveConnectionParameters(url, info); // Validate URL is not null if (params.getUrl() == null) { throw new SnowflakeSQLException("Unable to connect to url of 'null'."); } // Check if URL has supported prefix (return null if not, per JDBC spec) if (!SnowflakeConnectString.hasSupportedPrefix(params.getUrl())) { // Per JDBC spec: Driver.connect() should return null if URL is not recognized return null; } // Parse and validate the connection string SnowflakeConnectString connectString = SnowflakeConnectString.parse(params.getUrl(), params.getParams()); if (!connectString.isValid()) { throw new SnowflakeSQLException("Connection string is invalid. Unable to parse."); } // Create and return the connection implementation return new SnowflakeConnectionImpl(params.getUrl(), params.getParams()); } /** * Creates a connection using auto-configuration. * *

This is a convenience method that uses the auto-configuration URL prefix to load connection * parameters from the connections.toml file. * * @return a Connection object * @throws SQLException if a database access error occurs or configuration cannot be loaded */ public static Connection createConnectionWithAutoConfig() throws SQLException { logger.debug("Creating connection with auto-configuration"); return createConnection(AutoConfigurationHelper.AUTO_CONNECTION_PREFIX, null); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/driver/DriverInitializer.java ================================================ package net.snowflake.client.internal.driver; import java.lang.reflect.Field; import java.lang.reflect.Method; import net.snowflake.client.internal.core.SecurityUtil; import net.snowflake.client.internal.core.minicore.Minicore; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Handles all one-time initialization for the Snowflake JDBC driver. * *

This includes: * *

    *
  • Arrow result format support *
  • BouncyCastle security provider registration *
  • Out-of-band telemetry configuration *
  • Suppression of illegal reflective access warnings *
* *

All initialization is performed once in the static block of {@link * net.snowflake.client.api.driver.SnowflakeDriver}. This class is thread-safe and ensures * initialization happens exactly once. */ public final class DriverInitializer { private static final SFLogger logger = SFLoggerFactory.getLogger(DriverInitializer.class); private static volatile boolean initialized = false; private static volatile boolean arrowEnabled = true; private static volatile String arrowDisableReason = null; private DriverInitializer() { // Utility class - prevent instantiation } /** * Perform all driver initialization. This method is idempotent and thread-safe. * *

If called multiple times, subsequent calls are no-ops. */ public static synchronized void initialize() { if (initialized) { logger.debug("Driver already initialized, skipping"); return; } logger.debug("Initializing Snowflake JDBC Driver..."); initializeArrowSupport(); initializeSecurityProvider(); initializeTelemetry(); initializeMinicore(); initialized = true; logger.debug("Snowflake JDBC Driver initialization complete"); } /** * Initialize Apache Arrow support for high-performance result sets. * *

This method attempts to suppress illegal reflective access warnings from Netty/Arrow. If * initialization fails, Arrow is disabled and the driver will fall back to JSON result format. * *

Note: Prior to version 4.x, this method set the system property * io.netty.tryReflectionSetAccessible=true. Testing has shown this is no longer necessary with * Arrow 17.0.0+ and Netty 4.1.130+, as modern versions handle Java module system restrictions * without requiring this property. */ private static void initializeArrowSupport() { try { // Suppress reflective access warnings from Netty/Arrow suppressIllegalReflectiveAccessWarnings(); arrowEnabled = true; logger.debug("Arrow result format enabled successfully"); } catch (Throwable t) { arrowEnabled = false; arrowDisableReason = t.getLocalizedMessage(); logger.warn("Failed to enable Arrow result format: {}", arrowDisableReason); } } /** * Suppress illegal reflective access warnings caused by Netty/Arrow dependencies. * *

Only suppresses warnings if not explicitly disabled via the {@code * snowflake.jdbc.enable.illegalAccessWarning} system property. * *

This uses sun.misc.Unsafe to set jdk.internal.module.IllegalAccessLogger's logger to null, * effectively disabling the warnings. This is necessary because the Netty dependency of Apache * Arrow causes warnings on Java 9+. Failures are non-fatal and the driver will continue to * function normally. */ private static void suppressIllegalReflectiveAccessWarnings() { if ("true" .equals(SnowflakeUtil.systemGetProperty("snowflake.jdbc.enable.illegalAccessWarning"))) { logger.debug("Keeping illegal access warnings enabled (user requested)"); return; } try { // Get sun.misc.Unsafe class and instance Class unsafeClass = Class.forName("sun.misc.Unsafe"); Field field = unsafeClass.getDeclaredField("theUnsafe"); field.setAccessible(true); Object unsafe = field.get(null); // Get Unsafe methods for manipulating static fields Method putObjectVolatile = unsafeClass.getDeclaredMethod( "putObjectVolatile", Object.class, long.class, Object.class); Method staticFieldOffset = unsafeClass.getDeclaredMethod("staticFieldOffset", Field.class); Method staticFieldBase = unsafeClass.getDeclaredMethod("staticFieldBase", Field.class); // Get the IllegalAccessLogger class and its logger field Class loggerClass = Class.forName("jdk.internal.module.IllegalAccessLogger"); Field loggerField = loggerClass.getDeclaredField("logger"); // Use Unsafe to set the logger to null, effectively disabling warnings Long loggerOffset = (Long) staticFieldOffset.invoke(unsafe, loggerField); Object loggerBase = staticFieldBase.invoke(unsafe, loggerField); putObjectVolatile.invoke(unsafe, loggerBase, loggerOffset, null); logger.debug("Illegal reflective access warnings suppressed"); } catch (Throwable ex) { // Non-fatal - just log and continue logger.debug("Failed to suppress reflective access warnings: {}", ex.getMessage()); } } /** * Register BouncyCastle security provider for cryptographic operations. * *

BouncyCastle is used for various security operations in the JDBC driver. */ private static void initializeSecurityProvider() { try { SecurityUtil.addBouncyCastleProvider(); logger.debug("BouncyCastle security provider registered"); } catch (Throwable t) { logger.warn("Failed to register BouncyCastle provider: {}", t.getMessage()); } } /** * Configure telemetry settings for the driver. * *

By default, out-of-band telemetry is disabled. */ private static void initializeTelemetry() { try { TelemetryService.disableOOBTelemetry(); logger.debug("Out-of-band telemetry disabled"); } catch (Throwable t) { logger.warn("Failed to configure telemetry: {}", t.getMessage()); } } /** * Start asynchronous minicore native library loading. * *

Minicore is loaded in the background so it can overlap with connection setup. The loading * result is reported via telemetry during session establishment. */ private static void initializeMinicore() { try { Minicore.initializeAsync(); logger.debug("Minicore async initialization started"); } catch (Throwable t) { logger.trace("Failed to start minicore initialization", t); } } // Public accessors for Arrow status /** * Check if Arrow result format is enabled. * * @return true if Arrow is enabled, false otherwise */ public static boolean isArrowEnabled() { return arrowEnabled; } /** * Get the reason why Arrow was disabled (if applicable). * * @return error message if Arrow is disabled, null otherwise */ public static String getArrowDisableReason() { return arrowDisableReason; } /** * Check if driver has been initialized. * * @return true if initialized, false otherwise */ public static boolean isInitialized() { return initialized; } static synchronized void resetForTesting() { initialized = false; arrowEnabled = true; arrowDisableReason = null; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/driver/DriverVersion.java ================================================ package net.snowflake.client.internal.driver; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.common.core.SqlState; /** * Manages Snowflake JDBC driver version information. * *

This class is responsible for parsing and providing access to the driver's version numbers * (major, minor, and patch). The version is read from {@code version.properties} on the classpath, * which is populated by Maven resource filtering at build time. * *

This class is thread-safe and immutable. */ public final class DriverVersion { private static final String implementVersion = readAndNormalizeVersion(); private final int major; private final int minor; private final long patch; private final String fullVersion; private static final DriverVersion INSTANCE = parseFromStaticVersion(); private DriverVersion(int major, int minor, long patch, String fullVersion) { this.major = major; this.minor = minor; this.patch = patch; this.fullVersion = fullVersion; } /** * Gets the singleton instance of DriverVersion. * * @return the driver version instance */ public static DriverVersion getInstance() { return INSTANCE; } private static String readAndNormalizeVersion() { String version = DriverVersionProperties.get("version"); if (version == null) { return null; } if (version.endsWith("-SNAPSHOT")) { version = version.substring(0, version.length() - "-SNAPSHOT".length()); } return version; } /** * Parses a version string in the format "major.minor.patch". * * @param versionString the version string to parse * @return a DriverVersion instance * @throws IllegalArgumentException if the version string is invalid */ public static DriverVersion parse(String versionString) throws IllegalArgumentException { if (versionString == null || versionString.isEmpty()) { throw new IllegalArgumentException("Version string cannot be null or empty"); } String[] parts = versionString.split("\\."); if (parts.length != 3) { throw new IllegalArgumentException( "Invalid version format: " + versionString + ". Expected: major.minor.patch"); } try { int major = Integer.parseInt(parts[0]); int minor = Integer.parseInt(parts[1]); long patch = Long.parseLong(parts[2]); return new DriverVersion(major, minor, patch, versionString); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid version numbers in: " + versionString, e); } } /** * Parses the version from the static version string defined in the driver. This is called during * static initialization. * * @return a DriverVersion instance */ private static DriverVersion parseFromStaticVersion() { try { if (implementVersion != null && !implementVersion.isEmpty()) { return parse(implementVersion); } else { throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), /*session = */ null, "Snowflake JDBC Version is not set. " + "Ensure static version string was initialized."); } } catch (IllegalArgumentException ex) { // Re-throw as SnowflakeSQLException for consistency with original code throw new RuntimeException( new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), /*session = */ null, "Invalid Snowflake JDBC Version: " + implementVersion)); } catch (SnowflakeSQLException ex) { throw new RuntimeException(ex); } } /** * Gets the major version number. * * @return the major version */ public int getMajor() { return major; } /** * Gets the minor version number. * * @return the minor version */ public int getMinor() { return minor; } /** * Gets the patch version number. * * @return the patch version */ public long getPatch() { return patch; } /** * Gets the full version string in the format "major.minor.patch". * * @return the full version string */ public String getFullVersion() { return fullVersion; } @Override public String toString() { return fullVersion; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/driver/DriverVersionProperties.java ================================================ package net.snowflake.client.internal.driver; import java.io.IOException; import java.io.InputStream; import java.util.Properties; /** * Provides access to build-time properties from {@code version.properties}, which is populated by * Maven resource filtering. */ public final class DriverVersionProperties { private static final String RESOURCE = "/net/snowflake/client/jdbc/version.properties"; private static final Properties PROPERTIES = loadProperties(); private DriverVersionProperties() {} public static String get(String key) { return PROPERTIES.getProperty(key); } private static Properties loadProperties() { Properties props = new Properties(); try (InputStream is = DriverVersionProperties.class.getResourceAsStream(RESOURCE)) { if (is != null) { props.load(is); } } catch (IOException e) { // Fall through with empty properties — callers will get null and fail with clear messages } return props; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/exception/SnowflakeSQLLoggedException.java ================================================ package net.snowflake.client.internal.exception; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.jdbc.telemetry.SqlExceptionTelemetryHandler; import net.snowflake.client.internal.jdbc.telemetry.TelemetryUtil; /** * This SnowflakeSQLLoggedException class extends the SnowflakeSQLException class to add OOB * telemetry data for sql exceptions. Not all sql exceptions require OOB telemetry logging so the * exceptions in this class should only be thrown if there is a need for logging the exception with * OOB telemetry. */ public class SnowflakeSQLLoggedException extends SnowflakeSQLException { public SnowflakeSQLLoggedException( String queryID, SFSession session, String sqlState, String message, Exception cause) { super(queryID, cause, sqlState, TelemetryUtil.NO_VENDOR_CODE, message); SqlExceptionTelemetryHandler.sendTelemetry( queryID, sqlState, TelemetryUtil.NO_VENDOR_CODE, session, this); } /** * @param session SFBaseSession * @param reason exception reason * @param SQLState the SQL state * @param vendorCode the vendor code * @param queryId the query ID */ public SnowflakeSQLLoggedException( SFBaseSession session, String reason, String SQLState, int vendorCode, String queryId) { super(queryId, reason, SQLState, vendorCode); SqlExceptionTelemetryHandler.sendTelemetry(queryId, SQLState, vendorCode, session, this); } /** * @param session SFBaseSession * @param vendorCode the vendor code * @param SQLState the SQL state */ public SnowflakeSQLLoggedException(SFBaseSession session, int vendorCode, String SQLState) { super((String) null, SQLState, vendorCode); SqlExceptionTelemetryHandler.sendTelemetry(null, SQLState, vendorCode, session, this); } /** * @param queryId the query ID * @param session SFBaseSession * @param vendorCode the vendor code * @param SQLState the SQL state */ public SnowflakeSQLLoggedException( String queryId, SFBaseSession session, int vendorCode, String SQLState) { super(queryId, SQLState, vendorCode); SqlExceptionTelemetryHandler.sendTelemetry(queryId, SQLState, vendorCode, session, this); } /** * @param session SFBaseSession * @param SQLState the SQL state * @param reason the exception reason */ public SnowflakeSQLLoggedException(SFBaseSession session, String SQLState, String reason) { super(null, reason, SQLState); SqlExceptionTelemetryHandler.sendTelemetry( null, SQLState, TelemetryUtil.NO_VENDOR_CODE, session, this); } /** * @param queryId the query ID * @param session SFBaseSession * @param SQLState the SQL state * @param reason the exception reason */ public SnowflakeSQLLoggedException( String queryId, SFBaseSession session, String SQLState, String reason) { super(null, reason, SQLState); SqlExceptionTelemetryHandler.sendTelemetry( queryId, SQLState, TelemetryUtil.NO_VENDOR_CODE, session, this); } /** * @param session SFBaseSession * @param vendorCode the vendor code * @param SQLState the SQL state * @param params additional parameters */ public SnowflakeSQLLoggedException( SFBaseSession session, int vendorCode, String SQLState, Object... params) { this(null, session, vendorCode, SQLState, params); } /** * @param queryId the query ID * @param session SFBaseSession * @param vendorCode the vendor code * @param SQLState the SQL state * @param params additional parameters */ public SnowflakeSQLLoggedException( String queryId, SFBaseSession session, int vendorCode, String SQLState, Object... params) { super(queryId, SQLState, vendorCode, params); SqlExceptionTelemetryHandler.sendTelemetry(queryId, SQLState, vendorCode, session, this); } /** * @param session SFBaseSession * @param errorCode the error code * @param params additional parameters */ public SnowflakeSQLLoggedException(SFBaseSession session, ErrorCode errorCode, Object... params) { super(errorCode, params); SqlExceptionTelemetryHandler.sendTelemetry( null, errorCode.getSqlState(), errorCode.getMessageCode(), session, this); } /** * @param session SFBaseSession * @param errorCode the error code * @param ex Throwable exception * @param params additional parameters */ public SnowflakeSQLLoggedException( SFBaseSession session, ErrorCode errorCode, Throwable ex, Object... params) { super(ex, errorCode, params); SqlExceptionTelemetryHandler.sendTelemetry( null, errorCode.getSqlState(), errorCode.getMessageCode(), session, this); } /** * @param session SFBaseSession * @param SQLState the SQL state * @param vendorCode the vendor code * @param ex Throwable exception * @param params additional parameters */ public SnowflakeSQLLoggedException( SFBaseSession session, String SQLState, int vendorCode, Throwable ex, Object... params) { super(ex, SQLState, vendorCode, params); SqlExceptionTelemetryHandler.sendTelemetry(null, SQLState, vendorCode, session, this); } /** * @param queryId the query ID * @param session SFBaseSession * @param SQLState the SQL state * @param vendorCode the vendor code * @param ex Throwable exception * @param params additional parameters */ public SnowflakeSQLLoggedException( String queryId, SFBaseSession session, String SQLState, int vendorCode, Throwable ex, Object... params) { super(queryId, ex, SQLState, vendorCode, params); SqlExceptionTelemetryHandler.sendTelemetry(queryId, SQLState, vendorCode, session, this); } /** * @param queryId the query ID * @param session SFBaseSession * @param errorCode the error code * @param params additional parameters */ public SnowflakeSQLLoggedException( String queryId, SFBaseSession session, ErrorCode errorCode, Object... params) { super(queryId, errorCode, params); SqlExceptionTelemetryHandler.sendTelemetry( queryId, null, TelemetryUtil.NO_VENDOR_CODE, session, this); } /** * @param session SFBaseSession * @param e throwable exception */ public SnowflakeSQLLoggedException(SFBaseSession session, SFException e) { super(e); SqlExceptionTelemetryHandler.sendTelemetry( null, null, TelemetryUtil.NO_VENDOR_CODE, session, this); } /** * @param queryId the query ID * @param session SFBaseSession * @param reason exception reason */ public SnowflakeSQLLoggedException(String queryId, SFBaseSession session, String reason) { super(queryId, reason, null); SqlExceptionTelemetryHandler.sendTelemetry( queryId, null, TelemetryUtil.NO_VENDOR_CODE, session, this); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/ArrowResultChunk.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.core.arrow.ArrowVectorConverterUtil.initConverter; import java.io.IOException; import java.io.InputStream; import java.nio.channels.ClosedByInterruptException; import java.util.ArrayList; import java.util.List; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.DataConversionContext; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.arrow.ArrowResultChunkIndexSorter; import net.snowflake.client.internal.core.arrow.ArrowVectorConverter; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; import org.apache.arrow.memory.RootAllocator; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.BitVector; import org.apache.arrow.vector.DateDayVector; import org.apache.arrow.vector.DecimalVector; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.Float8Vector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.SmallIntVector; import org.apache.arrow.vector.TinyIntVector; import org.apache.arrow.vector.ValueVector; import org.apache.arrow.vector.VarBinaryVector; import org.apache.arrow.vector.VarCharVector; import org.apache.arrow.vector.VectorSchemaRoot; import org.apache.arrow.vector.complex.StructVector; import org.apache.arrow.vector.ipc.ArrowStreamReader; import org.apache.arrow.vector.util.TransferPair; public class ArrowResultChunk extends SnowflakeResultChunk { /** * A 2-D array of arrow ValueVectors, this list represents data in the whole chunk. Since each * chunk is divided into record batches and each record batch is composed of list of column * vectors. * *

So the outer list is list of record batches, inner list represents list of columns */ private final ArrayList> batchOfVectors; private static final SFLogger logger = SFLoggerFactory.getLogger(ArrowResultChunk.class); /** arrow root allocator used by this resultSet */ private final RootAllocator rootAllocator; private boolean enableSortFirstResultChunk; private IntVector firstResultChunkSortedIndices; private VectorSchemaRoot root; private SFBaseSession session; public ArrowResultChunk( String url, int rowCount, int colCount, int uncompressedSize, RootAllocator rootAllocator, SFBaseSession session) { super(url, rowCount, colCount, uncompressedSize); this.batchOfVectors = new ArrayList<>(); this.rootAllocator = rootAllocator; this.session = session; } private void addBatchData(List batch) { batchOfVectors.add(batch); } /** * Read an inputStream of arrow data bytes and load them into java vectors of value. Note, there * is no copy of data involved once data is loaded into memory. a.k.a ArrowStreamReader originally * allocates the memory to hold vectors, but those memory ownership is transfer into * ArrowResultChunk class and once ArrowStreamReader is garbage collected, memory will not be * cleared up * * @param is inputStream which contains arrow data file in bytes * @throws IOException if failed to read data as arrow file */ public void readArrowStream(InputStream is) throws IOException { ArrayList valueVectors = new ArrayList<>(); try (ArrowStreamReader reader = new ArrowStreamReader(is, rootAllocator)) { root = reader.getVectorSchemaRoot(); while (reader.loadNextBatch()) { valueVectors = new ArrayList<>(); for (FieldVector f : root.getFieldVectors()) { // transfer will not copy data but transfer ownership of memory // from streamReader to resultChunk TransferPair t = f.getTransferPair(rootAllocator); t.transfer(); valueVectors.add(t.getTo()); } addBatchData(valueVectors); root.clear(); } } catch (ClosedByInterruptException cbie) { // happens when the statement is closed before finish parsing logger.debug("Interrupted when loading Arrow result", cbie); valueVectors.forEach(ValueVector::close); freeData(); } catch (Exception ex) { valueVectors.forEach(ValueVector::close); freeData(); throw ex; } } @Override public void reset() { freeData(); this.batchOfVectors.clear(); } @Override public long computeNeededChunkMemory() { return getUncompressedSize(); } @Override public void freeData() { batchOfVectors.forEach(list -> list.forEach(ValueVector::close)); this.batchOfVectors.clear(); if (firstResultChunkSortedIndices != null) { firstResultChunkSortedIndices.close(); } if (root != null) { root.clear(); root = null; } } /** * @param dataConversionContext DataConversionContext * @return an iterator to iterate over current chunk */ public ArrowChunkIterator getIterator(DataConversionContext dataConversionContext) { return new ArrowChunkIterator(dataConversionContext); } /** * @return an empty iterator to iterate over current chunk */ public static ArrowChunkIterator getEmptyChunkIterator() { return new EmptyArrowResultChunk().new ArrowChunkIterator(null); } public void enableSortFirstResultChunk() { enableSortFirstResultChunk = true; } /** Iterator class used to go through the arrow chunk row by row */ public class ArrowChunkIterator { /** index of record batch that iterator currently points to */ private int currentRecordBatchIndex; /** total number of record batch */ private int totalRecordBatch; /** index of row inside current record batch that iterator points to */ private int currentRowInRecordBatch; /** number of rows inside current record batch */ private int rowCountInCurrentRecordBatch; /** * list of converters that attached to current record batch Note: this list is updated every * time iterator points to a new record batch */ private List currentConverters; /** formatters to each data type */ private DataConversionContext dataConversionContext; ArrowChunkIterator(DataConversionContext dataConversionContext) { this.currentRecordBatchIndex = -1; this.totalRecordBatch = batchOfVectors.size(); this.currentRowInRecordBatch = -1; this.rowCountInCurrentRecordBatch = 0; this.dataConversionContext = dataConversionContext; } /** * Given a list of arrow vectors (all columns in a single record batch), return list of arrow * vector converter. Note, converter is built on top of arrow vector, so that arrow data can be * converted back to java data * * @param vectors list of arrow vectors * @return list of converters on top of each converters */ private List initConverters(List vectors) throws SnowflakeSQLException { List converters = new ArrayList<>(); for (int i = 0; i < vectors.size(); i++) { converters.add(initConverter(vectors.get(i), dataConversionContext, session, i)); } return converters; } /** * Advance to next row. * * @return true if there is a next row * @throws SnowflakeSQLException if an error is encountered. */ public boolean next() throws SnowflakeSQLException { currentRowInRecordBatch++; if (currentRowInRecordBatch < rowCountInCurrentRecordBatch) { // still in current recordbatch return true; } else { currentRecordBatchIndex++; if (currentRecordBatchIndex < totalRecordBatch) { this.currentRowInRecordBatch = 0; if (currentRecordBatchIndex == 0 && sortFirstResultChunkEnabled()) { // perform client-side sorting for the first chunk (only used in Snowflake internal // regression tests) // if first chunk has multiple record batches, merge them into one and sort it if (batchOfVectors.size() > 1) { mergeBatchesIntoOne(); totalRecordBatch = 1; } this.rowCountInCurrentRecordBatch = batchOfVectors.get(currentRecordBatchIndex).get(0).getValueCount(); currentConverters = initConverters(batchOfVectors.get(currentRecordBatchIndex)); sortFirstResultChunk(currentConverters); } else { this.rowCountInCurrentRecordBatch = batchOfVectors.get(currentRecordBatchIndex).get(0).getValueCount(); currentConverters = initConverters(batchOfVectors.get(currentRecordBatchIndex)); } return true; } } return false; } public boolean isLast() { return currentRecordBatchIndex + 1 == totalRecordBatch && currentRowInRecordBatch + 1 == rowCountInCurrentRecordBatch; } public boolean isAfterLast() { return currentRecordBatchIndex >= totalRecordBatch && currentRowInRecordBatch >= rowCountInCurrentRecordBatch; } public ArrowResultChunk getChunk() { return ArrowResultChunk.this; } public ArrowVectorConverter getCurrentConverter(int columnIdx) throws SFException { if (columnIdx < 0 || columnIdx >= currentConverters.size()) { throw new SFException(ErrorCode.COLUMN_DOES_NOT_EXIST, columnIdx + 1); } return currentConverters.get(columnIdx); } /** * @return index of row in current record batch */ public int getCurrentRowInRecordBatch() { if (sortFirstResultChunkEnabled() && currentRecordBatchIndex == 0) { return firstResultChunkSortedIndices.get(currentRowInRecordBatch); } else { return currentRowInRecordBatch; } } } /** * merge arrow result chunk with more than one batches into one record batch (Only used for the * first chunk when client side sorting is required) * * @throws SnowflakeSQLException if failed to merge first result chunk */ public void mergeBatchesIntoOne() throws SnowflakeSQLException { try { List first = batchOfVectors.get(0); for (int i = 1; i < batchOfVectors.size(); i++) { List batch = batchOfVectors.get(i); mergeBatch(first, batch); batch.forEach(ValueVector::close); } batchOfVectors.clear(); batchOfVectors.add(first); } catch (SFException ex) { throw new SnowflakeSQLLoggedException( session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Failed to merge first result chunk: " + ex.getLocalizedMessage()); } } /** * merge right batch into the left batch * * @param left * @param right */ private void mergeBatch(List left, List right) throws SFException { for (int i = 0; i < left.size(); i++) { mergeVector(left.get(i), right.get(i)); } } /** * todo append values from the right vector to the left * * @param left * @param right */ private void mergeVector(ValueVector left, ValueVector right) throws SFException { if (left instanceof StructVector) { mergeStructVector((StructVector) left, (StructVector) right); } else { mergeNonStructVector(left, right); } } /** * TODO merge StructVector used by Snowflake timestamp types * * @param left * @param right */ private void mergeStructVector(StructVector left, StructVector right) throws SFException { int numOfChildren = left.getChildrenFromFields().size(); for (int i = 0; i < numOfChildren; i++) { mergeNonStructVector( left.getChildrenFromFields().get(i), right.getChildrenFromFields().get(i)); } int offset = left.getValueCount(); for (int i = 0; i < right.getValueCount(); i++) { if (right.isNull(i)) { left.setNull(offset + i); } } left.setValueCount(offset + right.getValueCount()); } /** * merge not struct vectors * * @param left * @param right */ private void mergeNonStructVector(ValueVector left, ValueVector right) throws SFException { if (left instanceof BigIntVector) { BigIntVector bigIntVectorLeft = (BigIntVector) left; BigIntVector bigIntVectorRight = (BigIntVector) right; int offset = bigIntVectorLeft.getValueCount(); for (int i = 0; i < bigIntVectorRight.getValueCount(); i++) { if (bigIntVectorRight.isNull(i)) { bigIntVectorLeft.setNull(offset + i); } else { bigIntVectorLeft.setSafe(offset + i, bigIntVectorRight.get(i)); } } bigIntVectorLeft.setValueCount(offset + bigIntVectorRight.getValueCount()); } else if (left instanceof BitVector) { BitVector bitVectorLeft = (BitVector) left; BitVector bitVectorRight = (BitVector) right; int offset = bitVectorLeft.getValueCount(); for (int i = 0; i < bitVectorRight.getValueCount(); i++) { if (bitVectorRight.isNull(i)) { bitVectorLeft.setNull(offset + i); } else { try { bitVectorLeft.setSafe(offset + i, bitVectorRight.get(i)); } catch (IndexOutOfBoundsException e) { // this can be a bug in arrow that doesn't safely set value for // BitVector so we have to reAlloc manually bitVectorLeft.reAlloc(); bitVectorLeft.setSafe(offset + i, bitVectorRight.get(i)); } } } bitVectorLeft.setValueCount(offset + bitVectorRight.getValueCount()); } else if (left instanceof DateDayVector) { DateDayVector dateDayVectorLeft = (DateDayVector) left; DateDayVector dateDayVectorRight = (DateDayVector) right; int offset = dateDayVectorLeft.getValueCount(); for (int i = 0; i < dateDayVectorRight.getValueCount(); i++) { if (dateDayVectorRight.isNull(i)) { dateDayVectorLeft.setNull(offset + i); } else { dateDayVectorLeft.setSafe(offset + i, dateDayVectorRight.get(i)); } } dateDayVectorLeft.setValueCount(offset + dateDayVectorRight.getValueCount()); } else if (left instanceof DecimalVector) { DecimalVector decimalVectorLeft = (DecimalVector) left; DecimalVector decimalVectorRight = (DecimalVector) right; int offset = decimalVectorLeft.getValueCount(); for (int i = 0; i < decimalVectorRight.getValueCount(); i++) { if (decimalVectorRight.isNull(i)) { decimalVectorLeft.setNull(offset + i); } else { decimalVectorLeft.setSafe(offset + i, decimalVectorRight.get(i)); } } decimalVectorLeft.setValueCount(offset + decimalVectorRight.getValueCount()); } else if (left instanceof Float8Vector) { Float8Vector float8VectorLeft = (Float8Vector) left; Float8Vector float8VectorRight = (Float8Vector) right; int offset = float8VectorLeft.getValueCount(); for (int i = 0; i < float8VectorRight.getValueCount(); i++) { if (float8VectorRight.isNull(i)) { float8VectorLeft.setNull(offset + i); } else { float8VectorLeft.setSafe(offset + i, float8VectorRight.get(i)); } } float8VectorLeft.setValueCount(offset + float8VectorRight.getValueCount()); } else if (left instanceof IntVector) { IntVector intVectorLeft = (IntVector) left; IntVector intVectorRight = (IntVector) right; int offset = intVectorLeft.getValueCount(); for (int i = 0; i < intVectorRight.getValueCount(); i++) { if (intVectorRight.isNull(i)) { intVectorLeft.setNull(offset + i); } else { intVectorLeft.setSafe(offset + i, intVectorRight.get(i)); } } intVectorLeft.setValueCount(offset + intVectorRight.getValueCount()); } else if (left instanceof SmallIntVector) { SmallIntVector smallIntVectorLeft = (SmallIntVector) left; SmallIntVector smallIntVectorRight = (SmallIntVector) right; int offset = smallIntVectorLeft.getValueCount(); for (int i = 0; i < smallIntVectorRight.getValueCount(); i++) { if (smallIntVectorRight.isNull(i)) { smallIntVectorLeft.setNull(offset + i); } else { smallIntVectorLeft.setSafe(offset + i, smallIntVectorRight.get(i)); } } smallIntVectorLeft.setValueCount(offset + smallIntVectorRight.getValueCount()); } else if (left instanceof TinyIntVector) { TinyIntVector tinyIntVectorLeft = (TinyIntVector) left; TinyIntVector tinyIntVectorRight = (TinyIntVector) right; int offset = tinyIntVectorLeft.getValueCount(); for (int i = 0; i < tinyIntVectorRight.getValueCount(); i++) { if (tinyIntVectorRight.isNull(i)) { tinyIntVectorLeft.setNull(offset + i); } else { tinyIntVectorLeft.setSafe(offset + i, tinyIntVectorRight.get(i)); } } tinyIntVectorLeft.setValueCount(offset + tinyIntVectorRight.getValueCount()); } else if (left instanceof VarBinaryVector) { VarBinaryVector varBinaryVectorLeft = (VarBinaryVector) left; VarBinaryVector varBinaryVectorRight = (VarBinaryVector) right; int offset = varBinaryVectorLeft.getValueCount(); for (int i = 0; i < varBinaryVectorRight.getValueCount(); i++) { if (varBinaryVectorRight.isNull(i)) { varBinaryVectorLeft.setNull(offset + i); } else { varBinaryVectorLeft.setSafe(offset + i, varBinaryVectorRight.get(i)); } } varBinaryVectorLeft.setValueCount(offset + varBinaryVectorRight.getValueCount()); } else if (left instanceof VarCharVector) { VarCharVector varCharVectorLeft = (VarCharVector) left; VarCharVector varCharVectorRight = (VarCharVector) right; int offset = varCharVectorLeft.getValueCount(); for (int i = 0; i < varCharVectorRight.getValueCount(); i++) { if (varCharVectorRight.isNull(i)) { varCharVectorLeft.setNull(offset + i); } else { varCharVectorLeft.setSafe(offset + i, varCharVectorRight.get(i)); } } varCharVectorLeft.setValueCount(offset + varCharVectorRight.getValueCount()); } else { throw new SFException( ErrorCode.INTERNAL_ERROR, "Failed to merge vector due to unknown vector type"); } } private void sortFirstResultChunk(List converters) throws SnowflakeSQLException { try { List firstResultChunk = this.batchOfVectors.get(0); ArrowResultChunkIndexSorter sorter = new ArrowResultChunkIndexSorter(firstResultChunk, converters); firstResultChunkSortedIndices = sorter.sort(); } catch (SFException ex) { throw new SnowflakeSQLException( ex, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Failed to sort first result chunk: " + ex.getLocalizedMessage()); } } private boolean sortFirstResultChunkEnabled() { return enableSortFirstResultChunk; } /** * Empty arrow result chunk implementation. Used when rowset from server is null or empty or in * testing */ private static class EmptyArrowResultChunk extends ArrowResultChunk { EmptyArrowResultChunk() { super("", 0, 0, 0, null, null); } @Override public final long computeNeededChunkMemory() { return 0; } @Override public final void freeData() { // do nothing } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/BindingParameterMetadata.java ================================================ package net.snowflake.client.internal.jdbc; import com.fasterxml.jackson.annotation.JsonInclude; import java.util.List; @JsonInclude(JsonInclude.Include.NON_NULL) public class BindingParameterMetadata { private String type; private String name; private Integer length; private Integer byteLength; private Integer precision; private Integer scale; private boolean nullable = true; private List fields; public BindingParameterMetadata(String type) { this.type = type; } public BindingParameterMetadata(String type, String name) { this.type = type; this.name = name; } public BindingParameterMetadata( String type, String name, Integer length, Integer byteLength, Integer precision, Integer scale, Boolean nullable) { this.type = type; this.name = name; this.length = length; this.byteLength = byteLength; this.precision = precision; this.scale = scale; this.nullable = nullable; } public BindingParameterMetadata() {} public String getType() { return type; } public void setType(String type) { this.type = type; } public String getName() { return name; } public void setName(String name) { this.name = name; } public Integer getLength() { return length; } public void setLength(Integer length) { this.length = length; } public Integer getByteLength() { return byteLength; } public void setByteLength(Integer byteLength) { this.byteLength = byteLength; } public Integer getPrecision() { return precision; } public void setPrecision(Integer precision) { this.precision = precision; } public Integer getScale() { return scale; } public void setScale(Integer scale) { this.scale = scale; } public Boolean isNullable() { return nullable; } public void setNullable(Boolean nullable) { this.nullable = nullable; } public List getFields() { return fields; } public void setFields(List fields) { this.fields = fields; } public static class BindingParameterMetadataBuilder { private BindingParameterMetadata bindingParameterMetadata; private BindingParameterMetadataBuilder() { bindingParameterMetadata = new BindingParameterMetadata(); } public BindingParameterMetadataBuilder withType(String type) { bindingParameterMetadata.type = type; return this; } public BindingParameterMetadataBuilder withName(String name) { bindingParameterMetadata.name = name; return this; } public BindingParameterMetadataBuilder withLength(Integer length) { bindingParameterMetadata.length = length; return this; } public BindingParameterMetadataBuilder withByteLength(Integer byteLength) { bindingParameterMetadata.byteLength = byteLength; return this; } public BindingParameterMetadataBuilder withPrecision(Integer precision) { bindingParameterMetadata.precision = precision; return this; } public BindingParameterMetadataBuilder withScale(Integer scale) { bindingParameterMetadata.scale = scale; return this; } public BindingParameterMetadataBuilder withNullable(Boolean nullable) { bindingParameterMetadata.nullable = nullable; return this; } public BindingParameterMetadataBuilder withFields(List fields) { bindingParameterMetadata.fields = fields; return this; } public static BindingParameterMetadataBuilder bindingParameterMetadata() { return new BindingParameterMetadataBuilder(); } public BindingParameterMetadata build() { return bindingParameterMetadata; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/ChunkDownloadContext.java ================================================ package net.snowflake.client.internal.jdbc; import java.util.Map; import net.snowflake.client.internal.core.SFBaseSession; /** * Simple struct to contain download context for a chunk. This is useful to organize the collection * of properties that may be used for containing download information, and allows for the * getInputStream() method to be overridden. */ public class ChunkDownloadContext { private final SnowflakeChunkDownloader chunkDownloader; public SnowflakeChunkDownloader getChunkDownloader() { return chunkDownloader; } public SnowflakeResultChunk getResultChunk() { return resultChunk; } public String getQrmk() { return qrmk; } public int getChunkIndex() { return chunkIndex; } public Map getChunkHeadersMap() { return chunkHeadersMap; } public int getNetworkTimeoutInMilli() { return networkTimeoutInMilli; } public int getAuthTimeout() { return authTimeout; } public int getSocketTimeout() { return socketTimeout; } public SFBaseSession getSession() { return session; } private final SnowflakeResultChunk resultChunk; private final String qrmk; private final int chunkIndex; private final Map chunkHeadersMap; private final int networkTimeoutInMilli; private final int authTimeout; private final int socketTimeout; private final int maxHttpRetries; private final SFBaseSession session; public ChunkDownloadContext( SnowflakeChunkDownloader chunkDownloader, SnowflakeResultChunk resultChunk, String qrmk, int chunkIndex, Map chunkHeadersMap, int networkTimeoutInMilli, int authTimeout, int socketTimeout, int maxHttpRetries, SFBaseSession session) { this.chunkDownloader = chunkDownloader; this.resultChunk = resultChunk; this.qrmk = qrmk; this.chunkIndex = chunkIndex; this.chunkHeadersMap = chunkHeadersMap; this.networkTimeoutInMilli = networkTimeoutInMilli; this.authTimeout = authTimeout; this.socketTimeout = socketTimeout; this.maxHttpRetries = maxHttpRetries; this.session = session; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/ColumnTypeInfo.java ================================================ package net.snowflake.client.internal.jdbc; import net.snowflake.client.api.resultset.SnowflakeType; public class ColumnTypeInfo { private int columnType; private String extColTypeName; private SnowflakeType snowflakeType; public ColumnTypeInfo(int columnType, String extColTypeName, SnowflakeType snowflakeType) { this.columnType = columnType; this.extColTypeName = extColTypeName; this.snowflakeType = snowflakeType; } public int getColumnType() { return columnType; } public String getExtColTypeName() { return extColTypeName; } public SnowflakeType getSnowflakeType() { return snowflakeType; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/CompressedStreamFactory.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.core.Constants.MB; import static net.snowflake.common.core.FileCompressionType.GZIP; import static net.snowflake.common.core.FileCompressionType.ZSTD; import com.github.luben.zstd.ZstdInputStream; import java.io.IOException; import java.io.InputStream; import java.util.zip.GZIPInputStream; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.common.core.SqlState; import org.apache.http.Header; class CompressedStreamFactory { private static final int STREAM_BUFFER_SIZE = MB; /** * Determine the format of the response, if it is not either plain text or gzip, raise an error. */ public InputStream createBasedOnEncodingHeader(InputStream is, Header encoding) throws IOException, SnowflakeSQLException { if (encoding != null) { if (GZIP.name().equalsIgnoreCase(encoding.getValue())) { return new GZIPInputStream(is, STREAM_BUFFER_SIZE); } else if (ZSTD.name().equalsIgnoreCase(encoding.getValue())) { return new ZstdInputStream(is); } else { throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Exception: unexpected compression got " + encoding.getValue()); } } else { return DefaultResultStreamProvider.detectGzipAndGetStream(is); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/DBMetadataResultSetMetadata.java ================================================ package net.snowflake.client.internal.jdbc; import java.sql.Types; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; /** * For function call getTables/getSchemas, we returned resultset. We stored these resultSetMetadata * here */ public enum DBMetadataResultSetMetadata { GET_CATALOGS( Collections.singletonList("TABLE_CAT"), Collections.singletonList("TEXT"), Collections.singletonList(Types.VARCHAR)), GET_SCHEMAS( Arrays.asList("TABLE_SCHEM", "TABLE_CATALOG"), Arrays.asList("TEXT", "TEXT"), Arrays.asList(Types.VARCHAR, Types.VARCHAR)), GET_TABLES( Arrays.asList( "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "TABLE_TYPE", "REMARKS", "TYPE_CAT", "TYPE_SCHEM", "TYPE_NAME", "SELF_REFERENCING_COL_NAME", "REF_GENERATION"), Arrays.asList("TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR)), GET_COLUMNS( Arrays.asList( "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", "DATA_TYPE", "TYPE_NAME", "COLUMN_SIZE", "BUFFER_LENGTH", "DECIMAL_DIGITS", "NUM_PREC_RADIX", "NULLABLE", "REMARKS", "COLUMN_DEF", "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "CHAR_OCTET_LENGTH", "ORDINAL_POSITION", "IS_NULLABLE", "SCOPE_CATALOG", "SCOPE_SCHEMA", "SCOPE_TABLE", "SOURCE_DATA_TYPE", "IS_AUTOINCREMENT", "IS_GENERATEDCOLUMN"), Arrays.asList( "TEXT", "TEXT", "TEXT", "TEXT", "INTEGER", "TEXT", "INTEGER", "INTEGER", "INTEGER", "INTEGER", "INTEGER", "TEXT", "TEXT", "INTEGER", "INTEGER", "INTEGER", "INTEGER", "TEXT", "TEXT", "TEXT", "TEXT", "SHORT", "TEXT", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.VARCHAR, Types.VARCHAR)), GET_COLUMNS_EXTENDED_SET( GET_COLUMNS, Collections.singletonList("BASE_TYPE"), Collections.singletonList("TEXT"), Collections.singletonList(Types.VARCHAR)), GET_PRIMARY_KEYS( Arrays.asList("TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", "KEY_SEQ", "PK_NAME"), Arrays.asList("TEXT", "TEXT", "TEXT", "TEXT", "INTEGER", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.VARCHAR)), GET_FOREIGN_KEYS( Arrays.asList( "PKTABLE_CAT", "PKTABLE_SCHEM", "PKTABLE_NAME", "PKCOLUMN_NAME", "FKTABLE_CAT", "FKTABLE_SCHEM", "FKTABLE_NAME", "FKCOLUMN_NAME", "KEY_SEQ", "UPDATE_RULE", "DELETE_RULE", "FK_NAME", "PK_NAME", "DEFERRABILITY"), Arrays.asList( "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "SHORT", "SHORT", "SHORT", "TEXT", "TEXT", "SHORT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.SMALLINT, Types.SMALLINT, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT)), GET_FUNCTIONS( Arrays.asList( "FUNCTION_CAT", "FUNCTION_SCHEM", "FUNCTION_NAME", "REMARKS", "FUNCTION_TYPE", "SPECIFIC_NAME"), Arrays.asList("TEXT", "TEXT", "TEXT", "TEXT", "SHORT", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.VARCHAR)), GET_FUNCTION_COLUMNS( Arrays.asList( "FUNCTION_CAT", "FUNCTION_SCHEM", "FUNCTION_NAME", "COLUMN_NAME", "COLUMN_TYPE", "DATA_TYPE", "TYPE_NAME", "PRECISION", "LENGTH", "SCALE", "RADIX", "NULLABLE", "REMARKS", "CHAR_OCTET_LENGTH", "ORDINAL_POSITION", "IS_NULLABLE", "SPECIFIC_NAME"), Arrays.asList( "TEXT", "TEXT", "TEXT", "TEXT", "SHORT", "INTEGER", "TEXT", "INTEGER", "INTEGER", "SHORT", "SHORT", "SHORT", "TEXT", "INTEGER", "INTEGER", "TEXT", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.INTEGER, Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.SMALLINT, Types.SMALLINT, Types.SMALLINT, Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.VARCHAR, Types.VARCHAR)), GET_PROCEDURES( Arrays.asList( "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", "REMARKS", "PROCEDURE_TYPE", "SPECIFIC_NAME"), Arrays.asList("TEXT", "TEXT", "TEXT", "TEXT", "SHORT", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.VARCHAR)), GET_PROCEDURE_COLUMNS( Arrays.asList( "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", "COLUMN_NAME", "COLUMN_TYPE", "DATA_TYPE", "TYPE_NAME", "PRECISION", "LENGTH", "SCALE", "RADIX", "NULLABLE", "REMARKS", "COLUMN_DEF", "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "CHAR_OCTET_LENGTH", "ORDINAL_POSITION", "IS_NULLABLE", "SPECIFIC_NAME"), Arrays.asList( "TEXT", "TEXT", "TEXT", "TEXT", "SHORT", "INTEGER", "TEXT", "INTEGER", "INTEGER", "SHORT", "SHORT", "SHORT", "TEXT", "TEXT", "INTEGER", "INTEGER", "INTEGER", "INTEGER", "TEXT", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.INTEGER, Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.SMALLINT, Types.SMALLINT, Types.SMALLINT, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.VARCHAR, Types.VARCHAR)), GET_TABLE_PRIVILEGES( Arrays.asList( "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "GRANTOR", "GRANTEE", "PRIVILEGE", "IS_GRANTABLE"), Arrays.asList("TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR)), GET_STREAMS( Arrays.asList( "STREAM_NAME", "DATABASE_NAME", "SCHEMA_NAME", "OWNER", "COMMENT", "TABLE_NAME", "SOURCE_TYPE", "BASE_TABLES", "TYPE", "STALE", "MODE"), Arrays.asList( "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT", "TEXT"), Arrays.asList( Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR)), ; private List columnNames; private List columnTypeNames; private List columnTypes; DBMetadataResultSetMetadata( List columnNames, List columnTypeNames, List columnTypes) { this.columnNames = columnNames; this.columnTypeNames = columnTypeNames; this.columnTypes = columnTypes; } DBMetadataResultSetMetadata( DBMetadataResultSetMetadata base, List additionalColumnNames, List additionalColumnTypeNames, List additionalColumnTypes) { this.columnNames = new ArrayList<>(base.getColumnNames()); this.columnTypeNames = new ArrayList<>(base.getColumnTypeNames()); this.columnTypes = new ArrayList<>(base.getColumnTypes()); columnNames.addAll(additionalColumnNames); columnTypeNames.addAll(additionalColumnTypeNames); columnTypes.addAll(additionalColumnTypes); } public List getColumnNames() { return columnNames; } public List getColumnTypeNames() { return columnTypeNames; } public List getColumnTypes() { return columnTypes; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/DefaultResultStreamProvider.java ================================================ package net.snowflake.client.internal.jdbc; import java.io.IOException; import java.io.InputStream; import java.io.PushbackInputStream; import java.net.URISyntaxException; import java.util.List; import java.util.Map; import java.util.zip.GZIPInputStream; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SecretDetector; import net.snowflake.common.core.SqlState; import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.utils.URIBuilder; import org.apache.http.impl.client.CloseableHttpClient; public class DefaultResultStreamProvider implements ResultStreamProvider { private static final SFLogger logger = SFLoggerFactory.getLogger(DefaultResultStreamProvider.class); // SSE-C algorithm header private static final String SSE_C_ALGORITHM = "x-amz-server-side-encryption-customer-algorithm"; // SSE-C customer key header private static final String SSE_C_KEY = "x-amz-server-side-encryption-customer-key"; // SSE-C algorithm value private static final String SSE_C_AES = "AES256"; private CompressedStreamFactory compressedStreamFactory; public DefaultResultStreamProvider() { this.compressedStreamFactory = new CompressedStreamFactory(); } @Override public InputStream getInputStream(ChunkDownloadContext context) throws Exception { HttpResponse response; try { response = getResultChunk(context); } catch (URISyntaxException | IOException ex) { throw new SnowflakeSQLLoggedException( context.getSession(), ErrorCode.NETWORK_ERROR.getMessageCode(), SqlState.IO_ERROR, "Error encountered when request a result chunk URL: " + context.getResultChunk().getUrl() + " " + ex.getLocalizedMessage()); } /* * return error if we don't get a response or the response code * means failure. */ if (response == null || response.getStatusLine().getStatusCode() != 200) { logger.error("Error fetching chunk from: {}", context.getResultChunk().getScrubbedUrl()); SnowflakeUtil.logResponseDetails(response, logger); throw new SnowflakeSQLException( SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode(), "Error encountered when downloading a result chunk: HTTP " + "status: " + ((response != null) ? response.getStatusLine().getStatusCode() : "null response")); } InputStream inputStream; final HttpEntity entity = response.getEntity(); Header encoding = response.getFirstHeader("Content-Encoding"); try { // create stream based on compression type inputStream = compressedStreamFactory.createBasedOnEncodingHeader(entity.getContent(), encoding); } catch (Exception ex) { logger.error("Failed to decompress data: {}", response); throw new SnowflakeSQLLoggedException( context.getSession(), ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Failed to decompress data: " + response.toString()); } // trace the response if requested logger.debug("Json response: {}", response); return inputStream; } private HttpResponse getResultChunk(ChunkDownloadContext context) throws Exception { URIBuilder uriBuilder = new URIBuilder(context.getResultChunk().getUrl()); HttpGet httpRequest = new HttpGet(uriBuilder.build()); if (context.getChunkHeadersMap() != null && context.getChunkHeadersMap().size() != 0) { for (Map.Entry entry : context.getChunkHeadersMap().entrySet()) { logger.debug("Adding header key: {}", entry.getKey()); httpRequest.addHeader(entry.getKey(), entry.getValue()); } } // Add SSE-C headers else if (context.getQrmk() != null) { httpRequest.addHeader(SSE_C_ALGORITHM, SSE_C_AES); httpRequest.addHeader(SSE_C_KEY, context.getQrmk()); logger.debug("Adding SSE-C headers", false); } logger.debug( "Thread {} Fetching result chunk#{}: {}", Thread.currentThread().getId(), context.getChunkIndex(), context.getResultChunk().getScrubbedUrl()); SFBaseSession session = context.getSession(); List headersCustomizers = null; if (session instanceof SFSession) { headersCustomizers = ((SFSession) session).getHttpHeadersCustomizers(); } CloseableHttpClient httpClient = HttpUtil.getHttpClient( context.getChunkDownloader().getHttpClientSettingsKey(), headersCustomizers); // fetch the result chunk HttpResponse response = RestRequest.executeWithRetries( httpClient, httpRequest, context.getNetworkTimeoutInMilli() / 1000, // retry timeout 0, context.getSocketTimeout(), 0, 0, // no socket timeout injection null, // no canceling false, // no cookie false, // no retry parameters in url false, // no request_guid true, // retry on HTTP403 for AWS S3 true, // no retry on http request false, new ExecTimeTelemetryData(), session, context.getChunkDownloader().getHttpClientSettingsKey(), headersCustomizers, false) .getHttpResponse(); logger.debug( "Thread {} Call chunk#{} returned for URL: {}, response: {}", Thread.currentThread().getId(), context.getChunkIndex(), (ArgSupplier) () -> SecretDetector.maskSASToken(context.getResultChunk().getUrl()), response); return response; } public static InputStream detectGzipAndGetStream(InputStream is) throws IOException { PushbackInputStream pb = new PushbackInputStream(is, 2); byte[] signature = new byte[2]; int len = pb.read(signature); pb.unread(signature, 0, len); // https://tools.ietf.org/html/rfc1952 if (signature[0] == (byte) 0x1f && signature[1] == (byte) 0x8b) { return new GZIPInputStream(pb); } else { return pb; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/DefaultSFConnectionHandler.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.config.SFClientConfigParser.checkConfigFilePermissions; import static net.snowflake.client.internal.core.SessionUtil.CLIENT_SFSQL; import static net.snowflake.client.internal.core.SessionUtil.JVM_PARAMS_TO_PARAMS; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isWindows; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.SQLNonTransientConnectionException; import java.sql.Statement; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.logging.Level; import net.snowflake.client.api.driver.SnowflakeDriver; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.internal.api.implementation.resultset.SnowflakeBaseResultSet; import net.snowflake.client.internal.config.SFClientConfig; import net.snowflake.client.internal.config.SFClientConfigParser; import net.snowflake.client.internal.core.Constants; import net.snowflake.client.internal.core.SFBaseResultSet; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFBaseStatement; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.core.SFStatement; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.jdbc.util.DriverUtil; import net.snowflake.client.internal.log.JDK14Logger; import net.snowflake.client.internal.log.SFLogLevel; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.log.SFToJavaLogMapper; import net.snowflake.common.core.LoginInfoDTO; /** * The default ConnectionHandler used by SnowflakeConnectionV(x). Unless a separate implementation * is provided, a DefaultConnectionHandler will be constructed automatically by the Connection * class. */ public class DefaultSFConnectionHandler implements SFConnectionHandler { private static final SFLogger logger = SFLoggerFactory.getLogger(DefaultSFConnectionHandler.class); private final SFSession sfSession; private final SnowflakeConnectString conStr; private final boolean skipOpen; /** * Constructs a DefaultConnectionHandler using a SnowflakeConnectString. This can be done by using * SnowflakeConnectString.parse(url, info), where url is a connection url and info is a * java.util.Properties * * @param conStr A SnowflakeConnectString object */ public DefaultSFConnectionHandler(SnowflakeConnectString conStr) { this(conStr, false); } /** * Constructs a DefaultConnectionHandler using a SnowflakeConnectString. This can be done by using * SnowflakeConnectString.parse(url, info), where url is a connection url and info is a * java.util.Properties * * @param conStr A SnowflakeConnectString object * @param skipOpen Skip calling open() on the session (for test-use only) */ public DefaultSFConnectionHandler(SnowflakeConnectString conStr, boolean skipOpen) { this.sfSession = new SFSession(this); this.conStr = conStr; this.skipOpen = skipOpen; sfSession.setSnowflakeConnectionString(conStr); } /** * Processes parameters given in the connection string. This extracts accountName, databaseName, * schemaName from the URL if it is specified there. * * @param conStr Connection string object * @return a map containing accountName, databaseName and schemaName if specified */ public static Map mergeProperties(SnowflakeConnectString conStr) { conStr.getParameters().remove("SSL"); conStr .getParameters() .put( "SERVERURL", conStr.getScheme() + "://" + conStr.getHost() + ":" + conStr.getPort() + "/"); return conStr.getParameters(); } @Override public boolean supportsAsyncQuery() { return true; } @Override public void initializeConnection(String url, Properties info) throws SQLException { initialize( conStr, LoginInfoDTO.SF_JDBC_APP_ID, SnowflakeDriver.getImplementationVersion(), info); } /** Returns the default SFSession client implementation. */ @Override public SFBaseSession getSFSession() { return sfSession; } /** Returns the default SFStatement client implementation. */ @Override public SFBaseStatement getSFStatement() { return new SFStatement(sfSession); } protected void initialize(SnowflakeConnectString conStr, String appID, String appVersion) throws SQLException { this.initialize(conStr, appID, appVersion, null); } protected void initialize( SnowflakeConnectString conStr, String appID, String appVersion, Properties properties) throws SQLException { TelemetryService.getInstance().updateContext(conStr); try { // pass the parameters to sfSession initSessionProperties(conStr, appID, appVersion); setClientConfig(); initLogger(); initHttpHeaderCustomizers(properties); logger.debug("Trying to establish session, JDBC driver: {}", DriverUtil.getJdbcJarname()); if (!skipOpen) { sfSession.open(internalCallMarker()); } } catch (SFException ex) { throw new SnowflakeSQLLoggedException( sfSession, ex.getSqlState(), ex.getVendorCode(), ex.getCause(), ex.getParams()); } } private void setClientConfig() throws SnowflakeSQLLoggedException { Map connectionPropertiesMap = sfSession.getConnectionPropertiesMap(); String clientConfigFilePath = (String) connectionPropertiesMap.getOrDefault(SFSessionProperty.CLIENT_CONFIG_FILE, null); SFClientConfig sfClientConfig = sfSession.getSfClientConfig(); if (sfClientConfig == null) { try { sfClientConfig = SFClientConfigParser.loadSFClientConfig(clientConfigFilePath); } catch (IOException e) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR, e.getMessage(), e.getCause()); } sfSession.setSfClientConfig(sfClientConfig); } } /** * This method instantiates a JDK14Logger. This will be used if the java.util.logging.config.file * properties file is missing. The method performs the following actions: 1. Check if the * CLIENT_CONFIG_FILE is present. If it is, the method loads the logLevel and logPath from the * client config. 2. Check if the Tracing parameter is present in the URL or connection * properties. If it is, the method will overwrite the logLevel obtained from step 1. 3. * Instantiate java.util.logging with the specified logLevel and logPath. 4. If both the logLevel * and logPath are null, this method doesn't do anything. */ private void initLogger() throws SnowflakeSQLLoggedException { if (logger instanceof JDK14Logger && systemGetProperty("java.util.logging.config.file") == null) { Map connectionPropertiesMap = sfSession.getConnectionPropertiesMap(); String tracingLevelFromConnectionProp = (String) connectionPropertiesMap.getOrDefault(SFSessionProperty.TRACING, null); Level logLevel = null; String logPattern = "%h/snowflake_jdbc%u.log"; // default pattern. SFClientConfig sfClientConfig = sfSession.getSfClientConfig(); Path logPath = null; if (sfClientConfig != null) { String logPathFromConfig = sfClientConfig.getCommonProps().getLogPath(); logPath = getLogPath(logPathFromConfig); logPattern = constructLogPattern(logPath, logPathFromConfig); String levelStr = sfClientConfig.getCommonProps().getLogLevel(); SFLogLevel sfLogLevel = SFLogLevel.getLogLevel(levelStr); logLevel = SFToJavaLogMapper.toJavaUtilLoggingLevel(sfLogLevel); } if (tracingLevelFromConnectionProp != null) { // Log level from connection param will overwrite the log level from sf config file. logLevel = Level.parse(tracingLevelFromConnectionProp.toUpperCase()); } if (logLevel != null && logPattern != null) { try { logger.debug("Setting logger with log level {} and log pattern {}", logLevel, logPattern); JDK14Logger.instantiateLogger(logLevel, logPattern); if (sfClientConfig != null) { logger.debug( "SF Client config found at location: {}.", sfClientConfig.getConfigFilePath()); checkConfigFilePermissions(sfClientConfig.getConfigFilePath()); } if (logPath != null) { checkLogFolderPermissions(logPath); } } catch (IOException ex) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR, ex.getMessage()); } logger.debug( "Instantiating JDK14Logger with level: {}, output path: {}", logLevel, logPattern); } } } private Path getLogPath(String logPathFromConfig) throws SnowflakeSQLLoggedException { if (JDK14Logger.STDOUT.equalsIgnoreCase(logPathFromConfig)) { return null; } Path logPath; if (logPathFromConfig != null && !logPathFromConfig.isEmpty()) { // Get log path from configuration logPath = Paths.get(logPathFromConfig); if (!Files.exists(logPath)) { try { Files.createDirectories(logPath); } catch (IOException ex) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR, String.format( "Unable to create log path mentioned in configfile %s ,%s", logPathFromConfig, ex.getMessage())); } } } else { // Get log path from home directory String homePath = systemGetProperty("user.home"); if (homePath == null || homePath.isEmpty()) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR, String.format( "Log path not set in configfile %s and home directory not set.", logPathFromConfig)); } logPath = Paths.get(homePath); } return createLogPathSubDirectory(logPath); } private String constructLogPattern(Path logPath, String logPathFromConfig) { if (JDK14Logger.STDOUT.equalsIgnoreCase(logPathFromConfig)) { return JDK14Logger.STDOUT; } String logPattern = "%t/snowflake_jdbc%u.log"; // java.tmpdir logPattern = Paths.get(logPath.toString(), "snowflake_jdbc%u.log").toString(); return logPattern; } private Path createLogPathSubDirectory(Path logPath) throws SnowflakeSQLLoggedException { Path path = Paths.get(logPath.toString(), "jdbc"); if (!Files.exists(path)) { createLogFolder(path); } return path; } private void createLogFolder(Path path) throws SnowflakeSQLLoggedException { try { if (Constants.getOS() == Constants.OS.WINDOWS) { Files.createDirectories(path); } else { Files.createDirectories( path, PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rwx------"))); } } catch (IOException ex) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR, String.format( "Unable to create jdbc subfolder in configfile %s ,%s", path.toString(), ex.getMessage(), ex.getCause())); } } private void checkLogFolderPermissions(Path path) throws SnowflakeSQLLoggedException { if (!isWindows()) { try { Set folderPermissions = Files.getPosixFilePermissions(path); if (folderPermissions.contains(PosixFilePermission.GROUP_WRITE) || folderPermissions.contains(PosixFilePermission.GROUP_READ) || folderPermissions.contains(PosixFilePermission.GROUP_EXECUTE) || folderPermissions.contains(PosixFilePermission.OTHERS_WRITE) || folderPermissions.contains(PosixFilePermission.OTHERS_READ) || folderPermissions.contains(PosixFilePermission.OTHERS_EXECUTE)) { logger.warn( "Access permission for the logs directory {} is currently {} and is potentially " + "accessible to users other than the owner of the logs directory.", path.toString(), folderPermissions.toString()); } } catch (IOException ex) { throw new SnowflakeSQLLoggedException( sfSession, ErrorCode.INTERNAL_ERROR, String.format( "Unable to get permissions of log directory %s ,%s", path.toString(), ex.getMessage(), ex.getCause())); } } } private void initSessionProperties(SnowflakeConnectString conStr, String appID, String appVersion) throws SFException { Map properties = mergeProperties(conStr); for (Map.Entry property : properties.entrySet()) { if ("CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY".equals(property.getKey())) { try { Object v0 = property.getValue(); int intV; if (v0 instanceof Integer) { intV = (Integer) v0; } else { intV = Integer.parseInt((String) v0); } if (intV > 3600) { properties.replace(property.getKey(), "3600"); } if (intV < 900) { properties.replace(property.getKey(), "900"); } } catch (NumberFormatException ex) { logger.warn( "Invalid data type for CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY: {}", property.getValue()); continue; } } else if (CLIENT_SFSQL.equals(property.getKey())) { Object v0 = property.getValue(); boolean booleanV = v0 instanceof Boolean ? (Boolean) v0 : Boolean.parseBoolean((String) v0); sfSession.setSfSQLMode(booleanV); } sfSession.addSFSessionProperty(property.getKey(), property.getValue()); } sfSession.overrideConsoleHandlerWhenNecessary(); // populate app id and version sfSession.addProperty(SFSessionProperty.APP_ID, appID); sfSession.addProperty(SFSessionProperty.APP_VERSION, appVersion); // Set the corresponding session parameters to the JVM properties for (Map.Entry entry : JVM_PARAMS_TO_PARAMS.entrySet()) { String value = systemGetProperty(entry.getKey()); if (value != null && !sfSession.containProperty(entry.getValue())) { sfSession.addSFSessionProperty(entry.getValue(), value); } } } @Override public ResultSet createResultSet(String queryID, Statement statement) throws SQLException { SFAsyncResultSet rs = new SFAsyncResultSet(queryID, statement); rs.setSession(sfSession); rs.setStatement(statement); return rs; } @Override public SnowflakeBaseResultSet createResultSet(SFBaseResultSet resultSet, Statement statement) throws SQLException { return new SnowflakeResultSetV1(resultSet, statement); } @Override public SnowflakeBaseResultSet createAsyncResultSet(SFBaseResultSet resultSet, Statement statement) throws SQLException { return new SFAsyncResultSet(resultSet, statement); } @Override public SFBaseFileTransferAgent getFileTransferAgent(String command, SFBaseStatement statement) throws SQLNonTransientConnectionException, SnowflakeSQLException { if (!(statement instanceof SFStatement)) { throw new SnowflakeSQLException( "getFileTransferAgent() called with an incompatible SFBaseStatement type. Requires an" + " SFStatement."); } return new SnowflakeFileTransferAgent( command, sfSession, (SFStatement) statement, internalCallMarker()); } private void initHttpHeaderCustomizers(Properties properties) { if (properties == null) { return; } Object httpHeadersCustomizers = properties.get(HttpHeadersCustomizer.HTTP_HEADER_CUSTOMIZERS_PROPERTY_KEY); if (httpHeadersCustomizers instanceof List) { List typedCustomizers = new ArrayList<>(); for (Object customizer : (List) httpHeadersCustomizers) { if (customizer instanceof HttpHeadersCustomizer) { typedCustomizers.add((HttpHeadersCustomizer) customizer); } else if (customizer != null) { logger.warn( "Invalid object type found in HttpHeadersCustomizer list: {}", customizer.getClass().getName()); } } logger.debug("Registering {} HttpHeadersCustomizer", typedCustomizers.size()); this.sfSession.setHttpHeadersCustomizers(typedCustomizers); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/EnvironmentVariables.java ================================================ package net.snowflake.client.internal.jdbc; public enum EnvironmentVariables { AWS_REGION("AWS_REGION"); private final String name; EnvironmentVariables(String name) { this.name = name; } public String getName() { return name; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/FileBackedOutputStream.java ================================================ /* * Copyright (C) 2008 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.snowflake.client.internal.jdbc; import com.google.common.annotations.Beta; import com.google.common.io.ByteSource; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import net.snowflake.client.internal.core.FileUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * An {@link OutputStream} that starts buffering to a byte array, but switches to file buffering * once the data reaches a configurable size. * *

This class is thread-safe. * *

Adapted by Snowflake to return File object when file is spilled to disk. * * @author Chris Nokleberg * @since 1.0 */ @Beta public final class FileBackedOutputStream extends OutputStream { private static final SFLogger logger = SFLoggerFactory.getLogger(FileBackedOutputStream.class); private final int fileThreshold; private final boolean resetOnFinalize; private final ByteSource source; private OutputStream out; private MemoryOutput memory; private File file; /** ByteArrayOutputStream that exposes its internals. */ private static class MemoryOutput extends ByteArrayOutputStream { byte[] getBuffer() { return buf; } int getCount() { return count; } } /** * @return the file holding the data (possibly null). */ public synchronized File getFile() { return file; } /** * Creates a new instance that uses the given file threshold, and does not reset the data when the * {@link ByteSource} returned by {@link #asByteSource} is finalized. * * @param fileThreshold the number of bytes before the stream should switch to buffering to a file */ public FileBackedOutputStream(int fileThreshold) { this(fileThreshold, false); } /** * Creates a new instance that uses the given file threshold, and optionally resets the data when * the {@link ByteSource} returned by {@link #asByteSource} is finalized. * * @param fileThreshold the number of bytes before the stream should switch to buffering to a file * @param resetOnFinalize if true, the {@link #reset} method will be called when the {@link * ByteSource} returned by {@link #asByteSource} is finalized */ public FileBackedOutputStream(int fileThreshold, boolean resetOnFinalize) { this.fileThreshold = fileThreshold; this.resetOnFinalize = resetOnFinalize; memory = new MemoryOutput(); out = memory; if (resetOnFinalize) { source = new ByteSource() { @Override public InputStream openStream() throws IOException { return openInputStream(); } @Override protected void finalize() { try { reset(); } catch (Throwable t) { logger.error("Exception occurred on finalize", t); } } }; } else { source = new ByteSource() { @Override public InputStream openStream() throws IOException { return openInputStream(); } }; } } /** * @return a readable {@link ByteSource} view of the data that has been written to this stream. * @since 15.0 */ public ByteSource asByteSource() { return source; } private synchronized InputStream openInputStream() throws IOException { if (file != null) { FileUtil.logFileUsage(file, "Data buffering stream", false); return new FileInputStream(file); } else { return new ByteArrayInputStream(memory.getBuffer(), 0, memory.getCount()); } } /** * Calls {@link #close} if not already closed, and then resets this object back to its initial * state, for reuse. If data was buffered to a file, it will be deleted. * * @throws IOException if an I/O error occurred while deleting the file buffer */ public synchronized void reset() throws IOException { try { close(); } finally { if (memory == null) { memory = new MemoryOutput(); } else { memory.reset(); } out = memory; if (file != null) { File deleteMe = file; file = null; if (!deleteMe.delete()) { throw new IOException("Could not delete: " + deleteMe); } } } } @Override public synchronized void write(int b) throws IOException { update(1); out.write(b); } @Override public synchronized void write(byte[] b) throws IOException { write(b, 0, b.length); } @Override public synchronized void write(byte[] b, int off, int len) throws IOException { update(len); out.write(b, off, len); } @Override public synchronized void close() throws IOException { out.close(); } @Override public synchronized void flush() throws IOException { out.flush(); } /** * Checks if writing {@code len} bytes would go over threshold, and switches to file buffering if * so. */ private void update(int len) throws IOException { if (file == null && (memory.getCount() + len > fileThreshold)) { File temp = File.createTempFile("FileBackedOutputStream", null); if (resetOnFinalize) { // Finalizers are not guaranteed to be called on system shutdown; // this is insurance. temp.deleteOnExit(); } FileOutputStream transfer = new FileOutputStream(temp); transfer.write(memory.getBuffer(), 0, memory.getCount()); transfer.flush(); // We've successfully transferred the data; switch to writing to file out = transfer; file = temp; memory = null; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/JsonResultChunk.java ================================================ package net.snowflake.client.internal.jdbc; import com.fasterxml.jackson.databind.JsonNode; import java.lang.ref.SoftReference; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.BitSet; import java.util.LinkedList; import java.util.List; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; public class JsonResultChunk extends SnowflakeResultChunk { private static final SFLogger logger = SFLoggerFactory.getLogger(JsonResultChunk.class); private ResultChunkData data; private int currentRow; private SFBaseSession session; public JsonResultChunk( String url, int rowCount, int colCount, int uncompressedSize, SFBaseSession session) { super(url, rowCount, colCount, uncompressedSize); data = new BlockResultChunkDataV2(computeCharactersNeeded(), rowCount, colCount, session); this.session = session; } public static Object extractCell(JsonNode resultData, int rowIdx, int colIdx) { JsonNode currentRow = resultData.get(rowIdx); JsonNode colNode = currentRow.get(colIdx); if (colNode.isTextual()) { return colNode.asText(); } else if (colNode.isNumber()) { return colNode.numberValue(); } else if (colNode.isNull()) { return null; } throw new RuntimeException("Unknow json type"); } public void tryReuse(ResultChunkDataCache cache) { // Allocate chunk data, double necessary amount for later reuse cache.reuseOrCreateResultData(data); } /** * Creates a String object for the given cell * * @param rowIdx zero based row * @param colIdx zero based column * @return String */ public final Object getCell(int rowIdx, int colIdx) { return data.get(colCount * rowIdx + colIdx); } public final void addRow(Object[] row) throws SnowflakeSQLException { if (row.length != colCount) { throw new SnowflakeSQLLoggedException( this.session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Exception: expected " + colCount + " columns and received " + row.length); } for (Object cell : row) { if (cell == null) { data.add(null); } else { if (cell instanceof String) { data.add((String) cell); } else if (cell instanceof Boolean) { data.add((boolean) cell ? "1" : "0"); } else { throw new SnowflakeSQLLoggedException( this.session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "unknown data type in JSON row " + cell.getClass().toString()); } } } currentRow++; } /** * Checks that all data has been added after parsing. * * @throws SnowflakeSQLException when rows are not all downloaded */ public final void ensureRowsComplete() throws SnowflakeSQLException { // Check that all the rows have been decoded, raise an error if not if (rowCount != currentRow) { throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Exception: expected " + rowCount + " rows and received " + currentRow); } } @Override public void reset() { this.currentRow = 0; this.data.reset(); } /** * Compute the memory necessary to store the data of this chunk * * @return necessary memory in bytes */ @Override public final long computeNeededChunkMemory() { if (data != null) { return data.computeNeededChunkMemory(); } return 0; } @Override public final void freeData() { if (data != null) { data.freeData(); } } public int computeCharactersNeeded() { // remove [ , ] characters, they won't be stored return uncompressedSize - (rowCount * 2) // opening [ and komma separating rows - (rowCount * colCount); // komma separating cells and closing ] } public void addOffset(int offset) throws SnowflakeSQLException { data.addOffset(offset); } public void setIsNull() throws SnowflakeSQLException { data.setIsNull(); } public void setLastLength(int len) throws SnowflakeSQLException { data.setLastLength(len); } public void nextIndex() throws SnowflakeSQLException { data.nextIndex(); } public byte get(int offset) throws SnowflakeSQLException { return data.getByte(offset); } public void addByte(byte b, int pos) throws SnowflakeSQLException { data.addByte(b, pos); } public void addBytes(byte[] src, int offset, int pos, int length) throws SnowflakeSQLException { data.addBytes(src, offset, pos, length); } /** * This class abstracts the storage of the strings in one chunk. To the user the class behaves * similar to an ArrayList. */ private interface ResultChunkData { /** * Add the string to the data list * * @param string value to add */ void add(String string) throws SnowflakeSQLException; /** * Access an element by an index * * @param index determines the element * @return String containing the same data as the one passed to add() */ String get(int index); /** * Compute the necessary memory to store this chunk * * @return memory in bytes */ long computeNeededChunkMemory(); /** Let GC collect the memory */ void freeData(); /** * add offset into offset buffer * * @param offset * @throws SnowflakeSQLException */ void addOffset(int offset) throws SnowflakeSQLException; /** * label current value as null * * @throws SnowflakeSQLException */ void setIsNull() throws SnowflakeSQLException; /** * increase index * * @throws SnowflakeSQLException */ void nextIndex() throws SnowflakeSQLException; /** * set the last length * * @param len * @throws SnowflakeSQLException */ void setLastLength(int len) throws SnowflakeSQLException; /** * get one byte from the byte array * * @param offset * @return * @throws SnowflakeSQLException */ byte getByte(int offset) throws SnowflakeSQLException; /** * add one byte to the byte array at nextIndex * * @param b * @param pos * @throws SnowflakeSQLException */ void addByte(byte b, int pos) throws SnowflakeSQLException; /** * add bytes to the byte array * * @param src * @param src_offset * @param pos * @param length * @throws SnowflakeSQLException */ void addBytes(byte[] src, int src_offset, int pos, int length) throws SnowflakeSQLException; void reset(); } /** * BlockResultChunkDataV2: This implementation copies the strings to byte arrays and stores the * offsets and bitmaps. This design can save half of the memory usage compared to the original one */ private static class BlockResultChunkDataV2 implements ResultChunkData { BlockResultChunkDataV2(int totalLength, int rowCount, int colCount, SFBaseSession session) { this.blockCount = getBlock(totalLength - 1) + 1; this.rowCount = rowCount; this.colCount = colCount; this.metaBlockCount = getMetaBlock(this.rowCount * this.colCount - 1) + 1; this.session = session; } @Override public void reset() { freeData(); this.lastLength = 0; this.nextIndex = 0; } @Override public void addOffset(int offset) { if (data.size() < blockCount || offsets.size() < metaBlockCount) { allocateArrays(); } offsets.get(getMetaBlock(nextIndex))[getMetaBlockIndex(nextIndex)] = offset; } @Override public void setIsNull() { isNulls.get(getMetaBlock(nextIndex)).set(getMetaBlockIndex(nextIndex)); } @Override public void setLastLength(int len) { lastLength = len; } @Override public byte getByte(int offset) { return data.get(getBlock(offset))[getBlockOffset(offset)]; } @Override public void addByte(byte b, int pos) { if (data.size() < blockCount || offsets.size() < metaBlockCount) { allocateArrays(); } data.get(getBlock(pos))[getBlockOffset(pos)] = b; } @Override public void addBytes(byte[] src, int src_offset, int pos, int length) { if (data.size() < blockCount || offsets.size() < metaBlockCount) { allocateArrays(); } final int offset = pos; // copy string to the char array int copied = 0; if (spaceLeftOnBlock(offset) < length) { while (copied < length) { final int copySize = Math.min(length - copied, spaceLeftOnBlock(offset + copied)); System.arraycopy( src, src_offset + copied, data.get(getBlock(offset + copied)), getBlockOffset(offset + copied), copySize); copied += copySize; } } else { System.arraycopy( src, src_offset, data.get(getBlock(offset)), getBlockOffset(offset), length); } } @Override public void nextIndex() { nextIndex++; } @Override public void add(String string) throws SnowflakeSQLException { throw new SnowflakeSQLLoggedException( this.session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unimplemented"); } private int getLength(int index, int offset) { if (index == rowCount * colCount - 1) { // last one return lastLength; } else { int nextOffset = offsets.get(getMetaBlock(index + 1))[getMetaBlockIndex(index + 1)]; return nextOffset - offset; } } @Override public String get(int index) { final boolean isNull = isNulls.get(getMetaBlock(index)).get(getMetaBlockIndex(index)); if (isNull) { return null; } else { final int offset = offsets.get(getMetaBlock(index))[getMetaBlockIndex(index)]; final int length = getLength(index, offset); // Create string from the char arrays if (spaceLeftOnBlock(offset) < length) { int copied = 0; byte[] cell = new byte[length]; while (copied < length) { final int copySize = Math.min(length - copied, spaceLeftOnBlock(offset + copied)); System.arraycopy( data.get(getBlock(offset + copied)), getBlockOffset(offset + copied), cell, copied, copySize); copied += copySize; } return new String(cell, StandardCharsets.UTF_8); } else { return new String( data.get(getBlock(offset)), getBlockOffset(offset), length, StandardCharsets.UTF_8); } } } @Override public long computeNeededChunkMemory() { long dataRequirement = blockCount * blockLength * 1L; long metadataRequirement = metaBlockCount * metaBlockLength * 4L // offsets + metaBlockCount * metaBlockLength / 8L // isNulls + 1L; // lastLength return dataRequirement + metadataRequirement; } @Override public void freeData() { data.clear(); offsets.clear(); isNulls.clear(); } private static int getBlock(int offset) { return offset >> blockLengthBits; } private static int getBlockOffset(int offset) { return offset & (blockLength - 1); } private static int spaceLeftOnBlock(int offset) { return blockLength - getBlockOffset(offset); } private static int getMetaBlock(int index) { return index >> metaBlockLengthBits; } private static int getMetaBlockIndex(int index) { return index & (metaBlockLength - 1); } private void allocateArrays() { logger.debug("allocating {} B for ResultChunk", computeNeededChunkMemory()); while (data.size() < blockCount) { data.add(new byte[1 << blockLengthBits]); } while (offsets.size() < metaBlockCount) { offsets.add(new int[1 << metaBlockLengthBits]); isNulls.add(new BitSet(1 << metaBlockLengthBits)); } logger.debug("allocated {} B for ResultChunk", computeNeededChunkMemory()); } // blocks for storing the string data int blockCount; private static final int blockLengthBits = 23; private static int blockLength = 1 << blockLengthBits; private final ArrayList data = new ArrayList<>(); SFBaseSession session; // blocks for storing offsets and lengths int metaBlockCount; private static int metaBlockLengthBits = 15; private static int metaBlockLength = 1 << metaBlockLengthBits; private final ArrayList offsets = new ArrayList<>(); private final ArrayList isNulls = new ArrayList<>(); private int lastLength; private int rowCount, colCount; private int nextIndex = 0; } /** Cache the data, offset and length blocks */ static class ResultChunkDataCache { /** * Add the data to the cache. CAUTION: The result chunk is not usable afterward * * @param chunk add this to the cache */ void add(JsonResultChunk chunk) { cache.add(new SoftReference<>(chunk.data)); chunk.data = null; } /** * Creates a new ResultChunkData which reuses as much blocks as possible * * @param data fill this with reused blocks */ void reuseOrCreateResultData(ResultChunkData data) { List> remove = new ArrayList<>(); try { for (SoftReference ref : cache) { ResultChunkData dat = ref.get(); if (dat == null) { remove.add(ref); continue; } if (dat instanceof BlockResultChunkDataV2) { BlockResultChunkDataV2 bTargetData = (BlockResultChunkDataV2) data; BlockResultChunkDataV2 bCachedDat = (BlockResultChunkDataV2) dat; if (bCachedDat.data.size() == 0 && bCachedDat.offsets.size() == 0) { remove.add(ref); continue; } while (bTargetData.data.size() < bTargetData.blockCount && bCachedDat.data.size() > 0) { bTargetData.data.add(bCachedDat.data.remove(bCachedDat.data.size() - 1)); } while (bTargetData.offsets.size() < bTargetData.metaBlockCount && bCachedDat.offsets.size() > 0) { bTargetData.offsets.add(bCachedDat.offsets.remove(bCachedDat.offsets.size() - 1)); BitSet isNulls = bCachedDat.isNulls.remove(bCachedDat.isNulls.size() - 1); isNulls.clear(); // SNOW-80208 have to clear isNulls explicitly bTargetData.isNulls.add(isNulls); } if (bTargetData.data.size() == bTargetData.blockCount && bTargetData.offsets.size() == bTargetData.metaBlockCount) { return; } } else { remove.add(ref); } } } finally { cache.removeAll(remove); } } /** Let GC collect all data hold by the cache */ void clear() { for (SoftReference ref : cache) { ResultChunkData dat = ref.get(); if (dat != null) { dat.freeData(); } } cache.clear(); } List> cache = new LinkedList<>(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/MatDesc.java ================================================ package net.snowflake.client.internal.jdbc; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import net.snowflake.client.internal.core.ObjectMapperFactory; /** A class to handle S3 material descriptor metadata entries (matdesc). */ public class MatDesc { /** MatDesc key for query ID */ public static String QUERY_ID = "queryId"; /** MatDesc key for stage master key ID */ public static String SMK_ID = "smkId"; /** MatDesc key for the length of the key in bits */ public static String KEY_SIZE = "keySize"; /** If key size is not explicitly specified, assume DEFAULT_KEY_SIZE */ public static int DEFAULT_KEY_SIZE = 256; /** The JSON parser for matdesc entries */ private static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); /** The Stage Master Key ID */ private final long smkId; /** The query ID */ private final String queryId; /** The key length in bits */ private final int keySize; public MatDesc(long smkId, String queryId, int keySize) { this.smkId = smkId; this.queryId = queryId; this.keySize = keySize; } public MatDesc(long smkId, String queryId) { this(smkId, queryId, DEFAULT_KEY_SIZE); } public long getSmkId() { return this.smkId; } public String getQueryId() { return this.queryId; } public int getKeySize() { return this.keySize; } /** * Try to parse the material descriptor string. * * @param matdesc string * @return The material description or null */ public static MatDesc parse(String matdesc) { if (matdesc == null) { return null; } try { JsonNode jsonNode = mapper.readTree(matdesc); JsonNode queryIdNode = jsonNode.path(QUERY_ID); if (queryIdNode.isMissingNode() || queryIdNode.isNull()) { return null; } JsonNode smkIdNode = jsonNode.path(SMK_ID); if (smkIdNode.isMissingNode() || smkIdNode.isNull()) { return null; } String queryId = queryIdNode.asText(); long smkId = smkIdNode.asLong(); JsonNode keySizeNode = jsonNode.path(KEY_SIZE); if (!keySizeNode.isMissingNode() && !keySizeNode.isNull()) { return new MatDesc(smkId, queryId, keySizeNode.asInt()); } return new MatDesc(smkId, queryId); } catch (Exception ex) { return null; } } @Override public String toString() { ObjectNode obj = mapper.createObjectNode(); obj.put(QUERY_ID, this.queryId); obj.put(SMK_ID, Long.toString(this.smkId)); obj.put(KEY_SIZE, Integer.toString(this.keySize)); return obj.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/OCSPErrorCode.java ================================================ package net.snowflake.client.internal.jdbc; public enum OCSPErrorCode { CERTIFICATE_STATUS_GOOD, CERTIFICATE_STATUS_REVOKED, CERTIFICATE_STATUS_UNKNOWN, OCSP_CACHE_DOWNLOAD_TIMEOUT, OCSP_RESPONSE_FETCH_TIMEOUT, OCSP_RESPONSE_FETCH_FAILURE, INVALID_CACHE_SERVER_URL, EXPIRED_OCSP_SIGNING_CERTIFICATE, INVALID_CERTIFICATE_SIGNATURE, INVALID_OCSP_RESPONSE_SIGNATURE, INVALID_OCSP_RESPONSE_VALIDITY, INVALID_OCSP_RESPONSE, REVOCATION_CHECK_FAILURE, INVALID_SSD, NO_OCSP_URL_ATTACHED, NO_ROOTCA_FOUND } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/QueryIdValidator.java ================================================ package net.snowflake.client.internal.jdbc; import java.util.regex.Pattern; public class QueryIdValidator { private static final Pattern QUERY_ID_REGEX = Pattern.compile("[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}"); public static boolean isValid(String queryId) { return queryId != null && QUERY_ID_REGEX.matcher(queryId).matches(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/RestRequest.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.io.PrintWriter; import java.io.StringWriter; import java.net.URISyntaxException; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import javax.net.ssl.SSLHandshakeException; import javax.net.ssl.SSLKeyException; import javax.net.ssl.SSLPeerUnverifiedException; import javax.net.ssl.SSLProtocolException; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.internal.core.Event; import net.snowflake.client.internal.core.EventUtil; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.HttpExecutingContext; import net.snowflake.client.internal.core.HttpExecutingContextBuilder; import net.snowflake.client.internal.core.HttpResponseContextDto; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFOCSPException; import net.snowflake.client.internal.core.SessionUtil; import net.snowflake.client.internal.core.URLUtil; import net.snowflake.client.internal.core.UUIDUtils; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.jdbc.telemetry.TelemetryData; import net.snowflake.client.internal.jdbc.telemetry.TelemetryField; import net.snowflake.client.internal.jdbc.telemetry.TelemetryUtil; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.DecorrelatedJitterBackoff; import net.snowflake.client.internal.util.SecretDetector; import net.snowflake.client.internal.util.Stopwatch; import net.snowflake.common.core.SqlState; import org.apache.commons.io.IOUtils; import org.apache.http.StatusLine; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.utils.URIBuilder; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.util.EntityUtils; /** * This is an abstraction on top of http client. * *

Currently it only has one method for retrying http request execution so that the same logic * doesn't have to be replicated at difference places where retry is needed. */ public class RestRequest { private static final SFLogger logger = SFLoggerFactory.getLogger(RestRequest.class); // Request guid per HTTP request private static final String SF_REQUEST_GUID = "request_guid"; // min backoff in milli before we retry due to transient issues private static final long minBackoffInMilli = 1000; // max backoff in milli before we retry due to transient issues // we double the backoff after each retry till we reach the max backoff private static final long maxBackoffInMilli = 16000; // retry at least once even if timeout limit has been reached private static final int MIN_RETRY_COUNT = 1; static final String ERROR_FIELD_NAME = "error"; static final String ERROR_USE_DPOP_NONCE = "use_dpop_nonce"; static final String DPOP_NONCE_HEADER_NAME = "dpop-nonce"; static final Set> sslExceptions = new HashSet<>( Arrays.asList( SSLHandshakeException.class, SSLKeyException.class, SSLPeerUnverifiedException.class, SSLProtocolException.class)); /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not should be executed before and/or after * the retry * @return HttpResponse Object get from server * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ @Deprecated public static CloseableHttpResponse execute( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, ExecTimeTelemetryData execTimeTelemetryData) throws SnowflakeSQLException { return execute( httpClient, httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryHTTP403, execTimeTelemetryData, (SFBaseSession) null); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not should be executed before and/or after * the retry * @param sfSession the session associated with the request * @return HttpResponse Object get from server * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ public static CloseableHttpResponse execute( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, ExecTimeTelemetryData execTimeTelemetryData, SFBaseSession sfSession) throws SnowflakeSQLException { return execute( httpClient, httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryHTTP403, false, // noRetry execTimeTelemetryData, null, sfSession, null, null, false); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not should be executed before and/or after * the retry * @return HttpResponse Object get from server * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ @Deprecated public static CloseableHttpResponse execute( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, boolean noRetry, ExecTimeTelemetryData execTimeData) throws SnowflakeSQLException { return execute( httpClient, httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryHTTP403, noRetry, execTimeData, (SFBaseSession) null); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not should be executed before and/or after * the retry * @param sfSession the session associated with the request * @return HttpResponse Object get from server * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ public static CloseableHttpResponse execute( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, boolean noRetry, ExecTimeTelemetryData execTimeData, SFBaseSession sfSession) throws SnowflakeSQLException { return execute( httpClient, httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryHTTP403, noRetry, execTimeData, null, sfSession, null, null, false); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not * @param execTimeData ExecTimeTelemetryData should be executed before and/or after the retry * @return HttpResponse Object get from server * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ @Deprecated public static CloseableHttpResponse execute( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, ExecTimeTelemetryData execTimeData, RetryContextManager retryContextManager) throws SnowflakeSQLException { return execute( httpClient, httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryHTTP403, execTimeData, retryContextManager, null); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not * @param execTimeData ExecTimeTelemetryData should be executed before and/or after the retry * @param sfSession the session associated with the request * @return HttpResponse Object get from server * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ public static CloseableHttpResponse execute( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, ExecTimeTelemetryData execTimeData, RetryContextManager retryContextManager, SFBaseSession sfSession) throws SnowflakeSQLException { return execute( httpClient, httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryHTTP403, false, // noRetry execTimeData, retryContextManager, sfSession, null, null, false); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not * @param noRetry should we disable retry on non-successful http resp code * @param execTimeData ExecTimeTelemetryData * @param retryManager RetryContextManager - object allowing to optionally pass custom logic that * should be executed before and/or after the retry * @return HttpResponse Object get from server * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ @Deprecated public static CloseableHttpResponse execute( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, boolean noRetry, ExecTimeTelemetryData execTimeData, RetryContextManager retryManager) throws SnowflakeSQLException { return execute( httpClient, httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryHTTP403, noRetry, execTimeData, retryManager, null, null, null, false); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not * @param noRetry should we disable retry on non-successful http resp code * @param execTimeData ExecTimeTelemetryData * @param retryManager RetryContextManager - object allowing to optionally pass custom logic that * should be executed before and/or after the retry * @param sfSession the session associated with the request * @param key HttpClientSettingsKey object * @param httpHeaderCustomizer HttpHeadersCustomizer object for customization of HTTP headers for * requests sent by the Snowflake JDBC driver. * @param isHttpClientWithoutDecompression flag for create client without Decompression * @return HttpResponse Object get from server * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ public static CloseableHttpResponse execute( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, boolean noRetry, ExecTimeTelemetryData execTimeData, RetryContextManager retryManager, SFBaseSession sfSession, HttpClientSettingsKey key, List httpHeaderCustomizer, boolean isHttpClientWithoutDecompression) throws SnowflakeSQLException { return executeWithRetries( httpClient, httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, // no canceling withoutCookies, // no cookie includeRetryParameters, // no retry includeSnowflakeHeaders, retryHTTP403, // retry on HTTP 403 noRetry, new ExecTimeTelemetryData(), sfSession, key, httpHeaderCustomizer, isHttpClientWithoutDecompression) .getHttpResponse(); } static long getNewBackoffInMilli( long previousBackoffInMilli, boolean isLoginRequest, DecorrelatedJitterBackoff decorrelatedJitterBackoff, int retryCount, long retryTimeoutInMilliseconds, long elapsedMilliForTransientIssues) { long backoffInMilli; if (isLoginRequest) { long jitteredBackoffInMilli = decorrelatedJitterBackoff.getJitterForLogin(previousBackoffInMilli); backoffInMilli = (long) decorrelatedJitterBackoff.chooseRandom( jitteredBackoffInMilli + previousBackoffInMilli, Math.pow(2, retryCount) + jitteredBackoffInMilli); } else { backoffInMilli = decorrelatedJitterBackoff.nextSleepTime(previousBackoffInMilli); } backoffInMilli = Math.min(maxBackoffInMilli, Math.max(previousBackoffInMilli, backoffInMilli)); if (retryTimeoutInMilliseconds > 0 && (elapsedMilliForTransientIssues + backoffInMilli) > retryTimeoutInMilliseconds) { // If the timeout will be reached before the next backoff, just use the remaining // time (but cannot be negative) - this is the only place when backoff is not in range // min-max. backoffInMilli = Math.max( 0, Math.min( backoffInMilli, retryTimeoutInMilliseconds - elapsedMilliForTransientIssues)); logger.debug( "We are approaching retry timeout {}ms, setting backoff to {}ms", retryTimeoutInMilliseconds, backoffInMilli); } return backoffInMilli; } static boolean isNonRetryableHTTPCode(CloseableHttpResponse response, boolean retryHTTP403) { return (response != null) && (response.getStatusLine().getStatusCode() < 500 || // service unavailable response.getStatusLine().getStatusCode() >= 600) && // gateway timeout response.getStatusLine().getStatusCode() != 408 && // retry response.getStatusLine().getStatusCode() != 429 && // request timeout (!retryHTTP403 || response.getStatusLine().getStatusCode() != 403); } private static boolean isCertificateRevoked(Exception ex) { if (ex == null) { return false; } Throwable ex0 = getRootCause(ex); if (!(ex0 instanceof SFOCSPException)) { return false; } SFOCSPException cause = (SFOCSPException) ex0; return cause.getErrorCode() == OCSPErrorCode.CERTIFICATE_STATUS_REVOKED; } private static Throwable getRootCause(Throwable ex) { Throwable ex0 = ex; while (ex0.getCause() != null) { ex0 = ex0.getCause(); } return ex0; } private static boolean isTransientHandshakeEOF(Exception ex) { Throwable root = getRootCause(ex); return root instanceof EOFException; } private static void setRequestConfig( HttpRequestBase httpRequest, boolean withoutCookies, int injectSocketTimeout, String requestIdStr, long authTimeoutInMilli) { if (withoutCookies) { httpRequest.setConfig(HttpUtil.getRequestConfigWithoutCookies()); } // For first call, simulate a socket timeout by setting socket timeout // to the injected socket timeout value if (injectSocketTimeout != 0) { // test code path logger.debug( "{}Injecting socket timeout by setting socket timeout to {} ms", requestIdStr, injectSocketTimeout); httpRequest.setConfig( HttpUtil.getDefaultRequestConfigWithSocketTimeout(injectSocketTimeout, withoutCookies)); } // When the auth timeout is set, set the socket timeout as the authTimeout // so that it can be renewed in time and pass it to the http request configuration. if (authTimeoutInMilli > 0) { int requestSocketAndConnectTimeout = (int) authTimeoutInMilli; logger.debug( "{}Setting auth timeout as the socket timeout: {} ms", requestIdStr, authTimeoutInMilli); httpRequest.setConfig( HttpUtil.getDefaultRequestConfigWithSocketAndConnectTimeout( requestSocketAndConnectTimeout, withoutCookies)); } } private static void setRequestURI( HttpRequestBase httpRequest, String requestIdStr, boolean includeRetryParameters, boolean includeSnowflakeHeaders, int retryCount, String lastStatusCodeForRetry, long startTime, String requestInfoScrubbed) throws URISyntaxException { /* * Add retryCount if the first request failed * GS can use the parameter for optimization. Specifically GS * will only check metadata database to see if a query has been running * for a retry request. This way for the majority of query requests * which are not part of retry we don't have to pay the performance * overhead of looking up in metadata database. */ URIBuilder builder = new URIBuilder(httpRequest.getURI()); // If HTAP if ("true".equalsIgnoreCase(systemGetEnv("HTAP_SIMULATION")) && builder.getPathSegments().contains("query-request")) { logger.debug("{}Setting htap simulation", requestIdStr); builder.setParameter("target", "htap_simulation"); } if (includeRetryParameters && retryCount > 0) { updateRetryParameters(builder, retryCount, lastStatusCodeForRetry, startTime); } if (includeSnowflakeHeaders) { UUID guid = UUIDUtils.getUUID(); logger.debug("{}Request {} guid: {}", requestIdStr, requestInfoScrubbed, guid.toString()); // Add request_guid for better tracing builder.setParameter(SF_REQUEST_GUID, guid.toString()); } httpRequest.setURI(builder.build()); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not * @param sfSession the session associated with the request * @return HttpResponseContextDto Object get from server or exception * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ public static HttpResponseContextDto executeWithRetries( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, boolean unpackResponse, ExecTimeTelemetryData execTimeTelemetryData, SFBaseSession sfSession, HttpClientSettingsKey key, List httpHeaderCustomizer, boolean isHttpClientWithoutDecompression) throws SnowflakeSQLException { return executeWithRetries( httpClient, httpRequest, retryTimeout, authTimeout, socketTimeout, maxRetries, injectSocketTimeout, canceling, withoutCookies, includeRetryParameters, includeSnowflakeHeaders, retryHTTP403, false, unpackResponse, execTimeTelemetryData, sfSession, key, httpHeaderCustomizer, isHttpClientWithoutDecompression); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param retryTimeout : retry timeout (in seconds) * @param authTimeout : authenticator specific timeout (in seconds) * @param socketTimeout : curl timeout (in ms) * @param maxRetries : max retry count for the request * @param injectSocketTimeout : simulate socket timeout * @param canceling canceling flag * @param withoutCookies whether the cookie spec should be set to IGNORE or not * @param includeRetryParameters whether to include retry parameters in retried requests. Only * needs to be true for JDBC statement execution (query requests to Snowflake server). * @param includeSnowflakeHeaders whether to include Snowflake headers (incl. request_guid) * @param retryHTTP403 whether to retry on HTTP 403 or not * @param execTimeTelemetryData ExecTimeTelemetryData should be executed before and/or after the * retry * @param sfSession the session associated with the request * @return HttpResponseContextDto Object get from server or exception * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ public static HttpResponseContextDto executeWithRetries( CloseableHttpClient httpClient, HttpRequestBase httpRequest, long retryTimeout, long authTimeout, int socketTimeout, int maxRetries, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeSnowflakeHeaders, boolean retryHTTP403, boolean noRetry, boolean unpackResponse, ExecTimeTelemetryData execTimeTelemetryData, SFBaseSession sfSession, HttpClientSettingsKey key, List httpHeaderCustomizer, boolean isHttpClientWithoutDecompression) throws SnowflakeSQLException { String requestIdStr = URLUtil.getRequestIdLogStr(httpRequest.getURI()); String requestInfoScrubbed = SecretDetector.maskSASToken(httpRequest.toString()); HttpExecutingContext context = HttpExecutingContextBuilder.withRequest(requestIdStr, requestInfoScrubbed) .retryTimeout(retryTimeout) .authTimeout(authTimeout) .origSocketTimeout(socketTimeout) .maxRetries(maxRetries) .injectSocketTimeout(injectSocketTimeout) .canceling(canceling) .withoutCookies(withoutCookies) .includeRetryParameters(includeRetryParameters) .includeSnowflakeHeaders(includeSnowflakeHeaders) .retryHTTP403(retryHTTP403) .noRetry(noRetry) .unpackResponse(unpackResponse) .loginRequest(SessionUtil.isNewRetryStrategyRequest(httpRequest)) .withSfSession(sfSession) .build(); return executeWithRetries( httpClient, httpRequest, context, execTimeTelemetryData, null, key, httpHeaderCustomizer, isHttpClientWithoutDecompression); } /** * Execute an HTTP request with retry logic. * * @param httpClient client object used to communicate with other machine * @param httpRequest request object contains all the request information * @param execTimeData ExecTimeTelemetryData should be executed before and/or after the retry * @param retryManager RetryManager containing extra actions used during retries * @return HttpResponseContextDto Object get from server or exception * @throws net.snowflake.client.api.exception.SnowflakeSQLException Request timeout Exception or * Illegal State Exception i.e. connection is already shutdown etc */ public static HttpResponseContextDto executeWithRetries( CloseableHttpClient httpClient, HttpRequestBase httpRequest, HttpExecutingContext httpExecutingContext, ExecTimeTelemetryData execTimeData, RetryContextManager retryManager, HttpClientSettingsKey key, List httpHeaderCustomizer, boolean isHttpClientWithoutDecompression) throws SnowflakeSQLException { Stopwatch networkComunnicationStapwatch = null; Stopwatch requestReponseStopWatch = null; HttpResponseContextDto responseDto = new HttpResponseContextDto(); if (logger.isDebugEnabled()) { networkComunnicationStapwatch = new Stopwatch(); networkComunnicationStapwatch.start(); logger.debug( "{}Executing rest request: {}, retry timeout: {}, socket timeout: {}, max retries: {}," + " inject socket timeout: {}, canceling: {}, without cookies: {}, include retry parameters: {}," + " include request guid: {}, retry http 403: {}, no retry: {}", httpExecutingContext.getRequestId(), httpExecutingContext.getRequestInfoScrubbed(), httpExecutingContext.getRetryTimeoutInMilliseconds(), httpExecutingContext.getOrigSocketTimeout(), httpExecutingContext.getMaxRetries(), httpExecutingContext.isInjectSocketTimeout(), httpExecutingContext.getCanceling(), httpExecutingContext.isWithoutCookies(), httpExecutingContext.isIncludeRetryParameters(), httpExecutingContext.isIncludeSnowflakeHeaders(), httpExecutingContext.isRetryHTTP403(), httpExecutingContext.isNoRetry()); } if (httpExecutingContext.isLoginRequest()) { logger.debug( "{}Request is a login/auth request. Using new retry strategy", httpExecutingContext.getRequestId()); } RestRequest.setRequestConfig( httpRequest, httpExecutingContext.isWithoutCookies(), httpExecutingContext.getInjectSocketTimeout(), httpExecutingContext.getRequestId(), httpExecutingContext.getAuthTimeoutInMilliseconds()); execTimeData.setExecuteToSendQueryEnd(); // try request till we get a good response or retry timeout while (true) { logger.debug( "{}Retry count: {}, max retries: {}, retry timeout: {} s, backoff: {} ms. Attempting request: {}", httpExecutingContext.getRequestId(), httpExecutingContext.getRetryCount(), httpExecutingContext.getMaxRetries(), httpExecutingContext.getRetryTimeout(), httpExecutingContext.getMinBackoffInMillis(), httpExecutingContext.getRequestInfoScrubbed()); try { // update start time httpExecutingContext.setStartTimePerRequest(System.currentTimeMillis()); RestRequest.setRequestURI( httpRequest, httpExecutingContext.getRequestId(), httpExecutingContext.isIncludeRetryParameters(), httpExecutingContext.isIncludeSnowflakeHeaders(), httpExecutingContext.getRetryCount(), httpExecutingContext.getLastStatusCodeForRetry(), httpExecutingContext.getStartTime(), httpExecutingContext.getRequestInfoScrubbed()); SFBaseSession session = httpExecutingContext.getSfSession(); if (httpExecutingContext.isIncludeSnowflakeHeaders() && session != null) { if (session.getStickyHttpHeaders() != null) { for (Map.Entry entry : session.getStickyHttpHeaders().entrySet()) { httpRequest.setHeader(entry.getKey(), entry.getValue()); } } } execTimeData.setHttpClientStart(); CloseableHttpResponse response = httpClient.execute(httpRequest); responseDto.setHttpResponse(response); execTimeData.setHttpClientEnd(); } catch (Exception ex) { if (ex instanceof IllegalStateException) { // if exception is caused by illegal state, e.g shutdown of http client // because of closing of connection, then recreate the http client and remove existing // from the cache. logger.warn( "IllegalStateException encountered while processing the HTTP request." + " The HttpClient was shut down due to connection closure. " + "Attempting to rebuild the HttpClient and retry the request."); // Clear the httpClient cache. try { httpClient.close(); } catch (IOException e) { logger.warn("Cannot close http client", e); } HttpUtil.httpClient.remove(key); // rebuild the http client. if (isHttpClientWithoutDecompression) { httpClient = HttpUtil.getHttpClientWithoutDecompression(key, httpHeaderCustomizer); } else { httpClient = HttpUtil.getHttpClient(key, httpHeaderCustomizer); } continue; } else if (ex instanceof SSLHandshakeException && isTransientHandshakeEOF(ex)) { // Treat transient EOF during TLS handshake as retryable: // set saved exception for logging/telemetry and allow the retry loop to proceed. responseDto.setSavedEx(ex); } else { responseDto.setSavedEx(handlingNotRetryableException(ex, httpExecutingContext)); } } finally { // Reset the socket timeout to its original value if it is not the // very first iteration. if (httpExecutingContext.getInjectSocketTimeout() != 0 && httpExecutingContext.getRetryCount() == 0) { // test code path httpRequest.setConfig( HttpUtil.getDefaultRequestConfigWithSocketTimeout( httpExecutingContext.getOrigSocketTimeout(), httpExecutingContext.isWithoutCookies())); } } boolean shouldSkipRetry = shouldSkipRetryWithLoggedReason(httpRequest, responseDto, httpExecutingContext); httpExecutingContext.setShouldRetry(!shouldSkipRetry); if (httpExecutingContext.isUnpackResponse() && responseDto.getHttpResponse() != null && responseDto.getHttpResponse().getStatusLine().getStatusCode() == 200) { // todo extract getter for statusCode processHttpResponse(httpExecutingContext, execTimeData, responseDto); } if (!httpExecutingContext.isShouldRetry()) { if (responseDto.getHttpResponse() == null) { if (responseDto.getSavedEx() != null) { logger.error( "{}Returning null response. Cause: {}, request: {}", httpExecutingContext.getRequestId(), getRootCause(responseDto.getSavedEx()), httpExecutingContext.getRequestInfoScrubbed()); } else { logger.error( "{}Returning null response for request: {}", httpExecutingContext.getRequestId(), httpExecutingContext.getRequestInfoScrubbed()); } } else if (responseDto.getHttpResponse().getStatusLine().getStatusCode() != 200) { logger.error( "{}Error response: HTTP Response code: {}, request: {}", httpExecutingContext.getRequestId(), responseDto.getHttpResponse().getStatusLine().getStatusCode(), httpExecutingContext.getRequestInfoScrubbed()); responseDto.setSavedEx( new SnowflakeSQLException( SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode(), "HTTP status=" + ((responseDto.getHttpResponse() != null) ? responseDto.getHttpResponse().getStatusLine().getStatusCode() : "null response"))); } else if ((responseDto.getHttpResponse() == null || responseDto.getHttpResponse().getStatusLine().getStatusCode() != 200)) { sendTelemetryEvent( httpRequest, httpExecutingContext, responseDto.getHttpResponse(), responseDto.getSavedEx()); } break; } else { prepareRetry(httpRequest, httpExecutingContext, retryManager, responseDto); } } logger.debug( "{}Execution of request {} took {} ms with total of {} retries", httpExecutingContext.getRequestId(), httpExecutingContext.getRequestInfoScrubbed(), networkComunnicationStapwatch == null ? "n/a" : networkComunnicationStapwatch.elapsedMillis(), httpExecutingContext.getRetryCount()); httpExecutingContext.resetRetryCount(); if (logger.isDebugEnabled() && networkComunnicationStapwatch != null) { networkComunnicationStapwatch.stop(); } if (responseDto.getSavedEx() != null) { Exception savedEx = responseDto.getSavedEx(); sendIBHttpErrorEvent(httpRequest, responseDto.getHttpResponse(), httpExecutingContext); if (savedEx instanceof SnowflakeSQLException) { throw (SnowflakeSQLException) savedEx; } else { throw new SnowflakeSQLException( savedEx, ErrorCode.NETWORK_ERROR, "Exception encountered for HTTP request: " + savedEx.getMessage()); } } return responseDto; } private static void processHttpResponse( HttpExecutingContext httpExecutingContext, ExecTimeTelemetryData execTimeData, HttpResponseContextDto responseDto) { CloseableHttpResponse response = responseDto.getHttpResponse(); try { String responseText; responseText = verifyAndUnpackResponse(response, execTimeData); updateSessionWithStickyHeaders(httpExecutingContext.getSfSession(), response); httpExecutingContext.setShouldRetry(false); responseDto.setUnpackedCloseableHttpResponse(responseText); } catch (IOException ex) { boolean skipRetriesBecauseOf200 = httpExecutingContext.isSkipRetriesBecauseOf200(); boolean retryReasonDifferentThan200 = !httpExecutingContext.isShouldRetry() && skipRetriesBecauseOf200; httpExecutingContext.setShouldRetry(retryReasonDifferentThan200); responseDto.setSavedEx(ex); } } private static void updateSessionWithStickyHeaders( SFBaseSession sfSession, CloseableHttpResponse response) { if (sfSession != null && response != null) { Map responseHeaders = HttpUtil.extractHeadersAsMap(response); sfSession.extractAndUpdateStickyHttpHeaders(responseHeaders); } } private static void updateRetryParameters( URIBuilder builder, int retryCount, String lastStatusCodeForRetry, long startTime) { builder.setParameter("retryCount", String.valueOf(retryCount)); builder.setParameter("retryReason", lastStatusCodeForRetry); builder.setParameter("clientStartTime", String.valueOf(startTime)); } private static void prepareRetry( HttpRequestBase httpRequest, HttpExecutingContext httpExecutingContext, RetryContextManager retryManager, HttpResponseContextDto dto) throws SnowflakeSQLException { // Potentially retryable error logRequestResult( dto.getHttpResponse(), httpExecutingContext.getRequestId(), httpExecutingContext.getRequestInfoScrubbed(), dto.getSavedEx()); // get the elapsed time for the last request // elapsed in millisecond for last call, used for calculating the // remaining amount of time to sleep: // (backoffInMilli - elapsedMilliForLastCall) long elapsedMilliForLastCall = System.currentTimeMillis() - httpExecutingContext.getStartTimePerRequest(); if (httpExecutingContext.socketOrConnectTimeoutReached()) /* socket timeout not reached */ { /* connect timeout not reached */ // check if this is a login-request if (String.valueOf(httpRequest.getURI()).contains("login-request")) { throw new SnowflakeSQLExceptionWithRetryContext( ErrorCode.AUTHENTICATOR_REQUEST_TIMEOUT, httpExecutingContext.getRetryCount(), true, httpExecutingContext.getElapsedMilliForTransientIssues() / 1000); } } // sleep for backoff - elapsed amount of time sleepForBackoffAndPrepareNext(elapsedMilliForLastCall, httpExecutingContext); httpExecutingContext.incrementRetryCount(); httpExecutingContext.setLastStatusCodeForRetry( dto.getHttpResponse() == null ? "0" : String.valueOf(dto.getHttpResponse().getStatusLine().getStatusCode())); // If the request failed with any other retry-able error and auth timeout is reached // increase the retry count and throw special exception to renew the token before retrying. RetryContextManager.RetryHook retryManagerHook = null; if (retryManager != null) { retryManagerHook = retryManager.getRetryHook(); retryManager .getRetryContext() .setElapsedTimeInMillis(httpExecutingContext.getElapsedMilliForTransientIssues()) .setRetryTimeoutInMillis(httpExecutingContext.getRetryTimeoutInMilliseconds()); } // Make sure that any authenticator specific info that needs to be // updated gets updated before the next retry. Ex - OKTA OTT, JWT token // Aim is to achieve this using RetryContextManager, but raising // AUTHENTICATOR_REQUEST_TIMEOUT Exception is still supported as well. In both cases the // retried request must be aware of the elapsed time not to exceed the timeout limit. if (retryManagerHook == RetryContextManager.RetryHook.ALWAYS_BEFORE_RETRY) { retryManager.executeRetryCallbacks(httpRequest); } if (httpExecutingContext.getAuthTimeout() > 0 && httpExecutingContext.getElapsedMilliForTransientIssues() >= httpExecutingContext.getAuthTimeout()) { throw new SnowflakeSQLExceptionWithRetryContext( ErrorCode.AUTHENTICATOR_REQUEST_TIMEOUT, httpExecutingContext.getRetryCount(), false, httpExecutingContext.getElapsedMilliForTransientIssues() / 1000); } int numOfRetryToTriggerTelemetry = TelemetryService.getInstance().getNumOfRetryToTriggerTelemetry(); if (httpExecutingContext.getRetryCount() == numOfRetryToTriggerTelemetry) { TelemetryService.getInstance() .logHttpRequestTelemetryEvent( String.format("HttpRequestRetry%dTimes", numOfRetryToTriggerTelemetry), httpRequest, httpExecutingContext.getInjectSocketTimeout(), httpExecutingContext.getCanceling(), httpExecutingContext.isWithoutCookies(), httpExecutingContext.isIncludeRetryParameters(), httpExecutingContext.isIncludeSnowflakeHeaders(), dto.getHttpResponse(), dto.getSavedEx(), httpExecutingContext.getBreakRetryReason(), httpExecutingContext.getRetryTimeout(), httpExecutingContext.getRetryCount(), SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode()); } dto.setSavedEx(null); httpExecutingContext.setSkipRetriesBecauseOf200(false); // release connection before retry httpRequest.releaseConnection(); } private static void sendTelemetryEvent( HttpRequestBase httpRequest, HttpExecutingContext httpExecutingContext, CloseableHttpResponse response, Exception savedEx) { String eventName; if (response == null) { eventName = "NullResponseHttpError"; } else { if (response.getStatusLine() == null) { eventName = "NullResponseStatusLine"; } else { eventName = String.format("HttpError%d", response.getStatusLine().getStatusCode()); } } TelemetryService.getInstance() .logHttpRequestTelemetryEvent( eventName, httpRequest, httpExecutingContext.getInjectSocketTimeout(), httpExecutingContext.getCanceling(), httpExecutingContext.isWithoutCookies(), httpExecutingContext.isIncludeRetryParameters(), httpExecutingContext.isIncludeSnowflakeHeaders(), response, savedEx, httpExecutingContext.getBreakRetryReason(), httpExecutingContext.getRetryTimeout(), httpExecutingContext.getRetryCount(), null, 0); } private static void sleepForBackoffAndPrepareNext( long elapsedMilliForLastCall, HttpExecutingContext context) { if (context.getMinBackoffInMillis() > elapsedMilliForLastCall) { try { logger.debug( "{}Retry request {}: sleeping for {} ms", context.getRequestId(), context.getRequestInfoScrubbed(), context.getBackoffInMillis()); Thread.sleep(context.getBackoffInMillis()); } catch (InterruptedException ex1) { logger.debug( "{}Backoff sleep before retrying login got interrupted", context.getRequestId()); } context.increaseElapsedMilliForTransientIssues(context.getBackoffInMillis()); context.setBackoffInMillis( getNewBackoffInMilli( context.getBackoffInMillis(), context.isLoginRequest(), context.getBackoff(), context.getRetryCount(), context.getRetryTimeoutInMilliseconds(), context.getElapsedMilliForTransientIssues())); } } private static void logRequestResult( CloseableHttpResponse response, String requestIdStr, String requestInfoScrubbed, Exception savedEx) { if (response != null) { logger.debug( "{}HTTP response not ok: status code: {}, request: {}", requestIdStr, response.getStatusLine().getStatusCode(), requestInfoScrubbed); } else if (savedEx != null) { logger.debug( "{}Null response for cause: {}, request: {}", requestIdStr, getRootCause(savedEx).getMessage(), requestInfoScrubbed); } else { logger.debug("{}Null response for request: {}", requestIdStr, requestInfoScrubbed); } } private static void checkForDPoPNonceError(CloseableHttpResponse response) throws IOException { String errorResponse = EntityUtils.toString(response.getEntity()); if (!isNullOrEmpty(errorResponse)) { ObjectMapper objectMapper = ObjectMapperFactory.getObjectMapper(); JsonNode rootNode = objectMapper.readTree(errorResponse); JsonNode errorNode = rootNode.get(ERROR_FIELD_NAME); if (errorNode != null && errorNode.isValueNode() && errorNode.isTextual() && errorNode.textValue().equals(ERROR_USE_DPOP_NONCE)) { throw new SnowflakeUseDPoPNonceException( response.getFirstHeader(DPOP_NONCE_HEADER_NAME).getValue()); } } } static Exception handlingNotRetryableException( Exception ex, HttpExecutingContext httpExecutingContext) throws SnowflakeSQLLoggedException { Exception savedEx = null; if (ex instanceof IllegalStateException) { throw new SnowflakeSQLLoggedException( null, ErrorCode.INVALID_STATE, ex, /* session= */ ex.getMessage()); } else if (isExceptionInGroup(ex, sslExceptions) && !isProtocolVersionError(ex)) { String formattedMsg = ex.getMessage() + "\n" + "Verify that the hostnames and portnumbers in SYSTEM$ALLOWLIST are added to your firewall's allowed list.\n" + "To troubleshoot your connection further, you can refer to this article:\n" + "https://docs.snowflake.com/en/user-guide/client-connectivity-troubleshooting/overview"; Throwable rootCause = getRootCause(ex); if (rootCause instanceof SFOCSPException) { sendIBOCSPErrorEvent(httpExecutingContext, (SFOCSPException) rootCause); } throw new SnowflakeSQLLoggedException(null, ErrorCode.NETWORK_ERROR, ex, formattedMsg); } else if (ex instanceof Exception) { savedEx = ex; // if the request took more than socket timeout log a warning long currentMillis = System.currentTimeMillis(); if ((currentMillis - httpExecutingContext.getStartTimePerRequest()) > HttpUtil.getSocketTimeout().toMillis()) { logger.warn( "{}HTTP request took longer than socket timeout {} ms: {} ms", httpExecutingContext.getRequestId(), HttpUtil.getSocketTimeout().toMillis(), (currentMillis - httpExecutingContext.getStartTimePerRequest())); } StringWriter sw = new StringWriter(); savedEx.printStackTrace(new PrintWriter(sw)); logger.debug( "{}Exception encountered for: {}, {}, {}", httpExecutingContext.getRequestId(), httpExecutingContext.getRequestInfoScrubbed(), ex.getLocalizedMessage(), (ArgSupplier) sw::toString); } return ex; } static boolean isExceptionInGroup(Exception e, Set> group) { for (Class clazz : group) { if (clazz.isInstance(e)) { return true; } } return false; } static boolean isProtocolVersionError(Exception e) { return e.getMessage() != null && e.getMessage().contains("Received fatal alert: protocol_version"); } private static boolean handleCertificateRevoked( Exception savedEx, HttpExecutingContext httpExecutingContext, boolean skipRetrying) { if (!skipRetrying && RestRequest.isCertificateRevoked(savedEx)) { String msg = "Unknown reason"; Throwable rootCause = RestRequest.getRootCause(savedEx); msg = rootCause.getMessage() != null && !rootCause.getMessage().isEmpty() ? rootCause.getMessage() : msg; logger.debug( "{}Error response not retryable, " + msg + ", request: {}", httpExecutingContext.getRequestId(), httpExecutingContext.getRequestInfoScrubbed()); EventUtil.triggerBasicEvent( Event.EventType.NETWORK_ERROR, msg + ", Request: " + httpExecutingContext.getRequestInfoScrubbed(), false); httpExecutingContext.setBreakRetryReason("certificate revoked error"); httpExecutingContext.setBreakRetryEventName("HttpRequestRetryVertificateRevoked"); httpExecutingContext.setShouldRetry(false); return true; } return skipRetrying; } private static boolean handleNonRetryableHttpCode( HttpResponseContextDto dto, HttpExecutingContext httpExecutingContext, boolean skipRetrying) { CloseableHttpResponse response = dto.getHttpResponse(); if (!skipRetrying && isNonRetryableHTTPCode(response, httpExecutingContext.isRetryHTTP403())) { String msg = "Unknown reason"; if (response != null) { logger.debug( "{}HTTP response code for request {}: {}", httpExecutingContext.getRequestId(), httpExecutingContext.getRequestInfoScrubbed(), response.getStatusLine().getStatusCode()); msg = "StatusCode: " + response.getStatusLine().getStatusCode() + ", Reason: " + response.getStatusLine().getReasonPhrase(); } else if (dto.getSavedEx() != null) // may be null. { Throwable rootCause = RestRequest.getRootCause(dto.getSavedEx()); msg = rootCause.getMessage(); } if (response == null || response.getStatusLine().getStatusCode() != 200) { logger.debug( "{}Error response not retryable, " + msg + ", request: {}", httpExecutingContext.getRequestId(), httpExecutingContext.getRequestInfoScrubbed()); EventUtil.triggerBasicEvent( Event.EventType.NETWORK_ERROR, msg + ", Request: " + httpExecutingContext.getRequestInfoScrubbed(), false); } httpExecutingContext.setBreakRetryReason("status code does not need retry"); httpExecutingContext.setShouldRetry(false); httpExecutingContext.setSkipRetriesBecauseOf200( response.getStatusLine().getStatusCode() == 200); try { if (response == null || response.getStatusLine().getStatusCode() != 200) { logger.error( "Error executing request: {}", httpExecutingContext.getRequestInfoScrubbed()); if (response != null && response.getStatusLine().getStatusCode() == 400 && response.getEntity() != null) { checkForDPoPNonceError(response); } SnowflakeUtil.logResponseDetails(response, logger); if (response != null) { EntityUtils.consume(response.getEntity()); } // We throw here exception if timeout was reached for login dto.setSavedEx( new SnowflakeSQLException( SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode(), "HTTP status=" + ((response != null) ? response.getStatusLine().getStatusCode() : "null response"))); } } catch (IOException e) { dto.setSavedEx( new SnowflakeSQLException( SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode(), "Exception details: " + e.getMessage())); } return true; } return skipRetrying; } private static void logTelemetryEvent( HttpRequestBase request, CloseableHttpResponse response, Exception savedEx, HttpExecutingContext httpExecutingContext) { TelemetryService.getInstance() .logHttpRequestTelemetryEvent( httpExecutingContext.getBreakRetryEventName(), request, httpExecutingContext.getInjectSocketTimeout(), httpExecutingContext.getCanceling(), httpExecutingContext.isWithoutCookies(), httpExecutingContext.isIncludeRetryParameters(), httpExecutingContext.isIncludeSnowflakeHeaders(), response, savedEx, httpExecutingContext.getBreakRetryReason(), httpExecutingContext.getRetryTimeout(), httpExecutingContext.getRetryCount(), SqlState.IO_ERROR, ErrorCode.NETWORK_ERROR.getMessageCode()); } private static void sendIBHttpErrorEvent( HttpRequestBase request, CloseableHttpResponse response, HttpExecutingContext httpExecutingContext) { SFBaseSession session = httpExecutingContext.getSfSession(); if (session == null) { logger.debug("Not sending telemetry event as the request is sessionless (session is null)"); return; } if (response == null) { logger.debug( "Not sending telemetry event as the response is null (request failed before receiving response)"); return; } StatusLine statusLine = response.getStatusLine(); logger.debug( "Preparing telemetry event for HTTP error: {} {}", statusLine.getStatusCode(), statusLine.getReasonPhrase()); int calculatedErrorNumber = ErrorCode.HTTP_GENERAL_ERROR.getMessageCode() + statusLine.getStatusCode(); String errorMessage = "HTTP " + statusLine.getStatusCode() + " " + statusLine.getReasonPhrase() + ": " + request.getMethod() + " " + request.getURI().getHost() + request.getURI().getPath(); ObjectNode ibValue = TelemetryUtil.createIBValue( null, SqlState.INTERNAL_ERROR, calculatedErrorNumber, TelemetryField.HTTP_EXCEPTION, errorMessage, null); TelemetryData td = TelemetryUtil.buildJobData(ibValue); session.getTelemetryClient(internalCallMarker()).addLogToBatch(td); } private static void sendIBOCSPErrorEvent( HttpExecutingContext httpExecutingContext, SFOCSPException ex) { SFBaseSession session = httpExecutingContext.getSfSession(); if (session == null) { return; } String errorMessage = ex.toString(); ObjectNode ibValue = TelemetryUtil.createIBValue( null, SqlState.INTERNAL_ERROR, ErrorCode.OCSP_GENERAL_ERROR.getMessageCode(), TelemetryField.OCSP_EXCEPTION, errorMessage, ex.toString()); TelemetryData td = TelemetryUtil.buildJobData(ibValue); session.getTelemetryClient(internalCallMarker()).addLogToBatch(td); } private static boolean handleMaxRetriesExceeded( HttpExecutingContext httpExecutingContext, boolean skipRetrying) { if (!skipRetrying && httpExecutingContext.maxRetriesExceeded()) { logger.error( "{}Stop retrying as max retries have been reached for request: {}! Max retry count: {}", httpExecutingContext.getRequestId(), httpExecutingContext.getRequestInfoScrubbed(), httpExecutingContext.getMaxRetries()); httpExecutingContext.setBreakRetryReason("max retries reached"); httpExecutingContext.setBreakRetryEventName("HttpRequestRetryLimitExceeded"); httpExecutingContext.setShouldRetry(false); return true; } return skipRetrying; } private static boolean handleElapsedTimeoutExceeded( HttpExecutingContext httpExecutingContext, boolean skipRetrying) { if (!skipRetrying && httpExecutingContext.getRetryTimeoutInMilliseconds() > 0) { // Check for retry time-out. // increment total elapsed due to transient issues long elapsedMilliForLastCall = System.currentTimeMillis() - httpExecutingContext.getStartTimePerRequest(); httpExecutingContext.increaseElapsedMilliForTransientIssues(elapsedMilliForLastCall); // check if the total elapsed time for transient issues has exceeded // the retry timeout and we retry at least the min, if so, we will not // retry if (httpExecutingContext.elapsedTimeExceeded() && httpExecutingContext.moreThanMinRetries()) { logger.error( "{}Stop retrying since elapsed time due to network " + "issues has reached timeout. " + "Elapsed: {} ms, timeout: {} ms", httpExecutingContext.getRequestId(), httpExecutingContext.getElapsedMilliForTransientIssues(), httpExecutingContext.getRetryTimeoutInMilliseconds()); httpExecutingContext.setBreakRetryReason("retry timeout"); httpExecutingContext.setBreakRetryEventName("HttpRequestRetryTimeout"); httpExecutingContext.setShouldRetry(false); return true; } } return skipRetrying; } private static boolean handleCancelingSignal( HttpExecutingContext httpExecutingContext, boolean skipRetrying) { if (!skipRetrying && httpExecutingContext.getCanceling() != null && httpExecutingContext.getCanceling().get()) { logger.debug( "{}Stop retrying since canceling is requested", httpExecutingContext.getRequestId()); httpExecutingContext.setBreakRetryReason("canceling is requested"); httpExecutingContext.setShouldRetry(false); return true; } return skipRetrying; } private static boolean handleNoRetryFlag( HttpExecutingContext httpExecutingContext, boolean skipRetrying) { if (!skipRetrying && httpExecutingContext.isNoRetry()) { logger.debug( "{}HTTP retry disabled for this request. noRetry: {}", httpExecutingContext.getRequestId(), httpExecutingContext.isNoRetry()); httpExecutingContext.setBreakRetryReason("retry is disabled"); httpExecutingContext.resetRetryCount(); httpExecutingContext.setShouldRetry(false); return true; } return skipRetrying; } private static boolean shouldSkipRetryWithLoggedReason( HttpRequestBase request, HttpResponseContextDto responseDto, HttpExecutingContext httpExecutingContext) { CloseableHttpResponse response = responseDto.getHttpResponse(); Exception savedEx = responseDto.getSavedEx(); List> conditions = Arrays.asList( skipRetrying -> handleNoRetryFlag(httpExecutingContext, skipRetrying), skipRetrying -> handleCancelingSignal(httpExecutingContext, skipRetrying), skipRetrying -> handleElapsedTimeoutExceeded(httpExecutingContext, skipRetrying), skipRetrying -> handleMaxRetriesExceeded(httpExecutingContext, skipRetrying), skipRetrying -> handleCertificateRevoked(savedEx, httpExecutingContext, skipRetrying), skipRetrying -> handleNonRetryableHttpCode(responseDto, httpExecutingContext, skipRetrying)); // Process each condition using Stream boolean skipRetrying = conditions.stream().reduce(Function::andThen).orElse(Function.identity()).apply(false); // Log telemetry logTelemetryEvent(request, response, savedEx, httpExecutingContext); return skipRetrying; } private static String verifyAndUnpackResponse( CloseableHttpResponse response, ExecTimeTelemetryData execTimeData) throws IOException { try (StringWriter writer = new StringWriter()) { execTimeData.setResponseIOStreamStart(); try (InputStream ins = response.getEntity().getContent()) { IOUtils.copy(ins, writer, "UTF-8"); } execTimeData.setResponseIOStreamEnd(); return writer.toString(); } finally { IOUtils.closeQuietly(response); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/ResultJsonParserV2.java ================================================ package net.snowflake.client.internal.jdbc; import java.nio.Buffer; import java.nio.ByteBuffer; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.common.core.SqlState; /** This is the Java version of the ODBC's ResultJsonParserV2 class */ public class ResultJsonParserV2 { private enum State { UNINITIALIZED, // no parsing in progress NEXT_ROW, // Waiting for [ to start the next row ROW_FINISHED, // Waiting for , to separate the next row WAIT_FOR_VALUE, // Waiting for the next value to start IN_VALUE, // Copy the value and wait for its end IN_STRING, // Copy the string and wait for its end ESCAPE, // Expect escaped character next WAIT_FOR_NEXT // Wait for , to separate next column } private static final byte[] BNULL = {0x6e, 0x75, 0x6c, 0x6c}; private State state = State.UNINITIALIZED; private int currentColumn; private int outputCurValuePosition; private int outputPosition; // Temporarily store unicode escape sequence when buffer is empty // contains \\u as well private ByteBuffer partialEscapedUnicode; private int outputDataLength; // private int currentRow; private JsonResultChunk resultChunk; public void startParsing(JsonResultChunk resultChunk, SFBaseSession session) throws SnowflakeSQLException { this.resultChunk = resultChunk; if (state != State.UNINITIALIZED) { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Json parser is already used!"); } state = State.NEXT_ROW; outputPosition = 0; outputCurValuePosition = 0; currentColumn = 0; // outputDataLength can be smaller as no ',' and '[' are stored outputDataLength = resultChunk.computeCharactersNeeded(); } /** * Check if the chunk has been parsed correctly. After calling this it is safe to acquire the * output data * * @param in byte buffer * @param session SFBaseSession * @throws SnowflakeSQLException if parsing fails */ public void endParsing(ByteBuffer in, SFBaseSession session) throws SnowflakeSQLException { continueParsingInternal(in, true, session); if (state != State.ROW_FINISHED) { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "SFResultJsonParser2Failed: Chunk is truncated!"); } currentColumn = 0; state = State.UNINITIALIZED; } /** * Continue parsing with the given data * * @param in readOnly byteBuffer backed by an array (the data to be reed is from position to * limit) * @param session SFBaseSession * @return int remaining number of elements in byteBuffer * @throws SnowflakeSQLException if an error is encountered during parsing */ public int continueParsing(ByteBuffer in, SFBaseSession session) throws SnowflakeSQLException { if (state == State.UNINITIALIZED) { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Json parser hasn't been initialized!"); } continueParsingInternal(in, false, session); return in.remaining(); } /** * @param in readOnly byteBuffer backed by an array (the data is from position to limit) * @param lastData If true, this signifies this is the last data in parsing * @param session SFBaseSession * @throws SnowflakeSQLException Will be thrown if parsing the chunk data fails */ private void continueParsingInternal(ByteBuffer in, boolean lastData, SFBaseSession session) throws SnowflakeSQLException { /* * This function parses a Snowflake result chunk json, copies the data * to one block of memory and creates a vector of vectors with the offsets * and lengths. There's one vector for each column that contains all its * rows. * * Result json looks like this [ "text", null, "text2" ], [... * The parser keeps state at which element it currently is. * */ while (in.hasRemaining()) { if (outputPosition >= outputDataLength) { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "column chunk longer than expected"); } switch (state) { case UNINITIALIZED: throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "parser is in inconsistent state"); case NEXT_ROW: switch (in.get()) { case 0x20: // ' ' case 0x9: // '\t' case 0xa: // '\n' case 0xd: // '\r\ // skip the whitespaces break; case 0x5b: // '[' // beginning of the next row state = State.WAIT_FOR_VALUE; break; default: { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, String.format( "encountered unexpected character 0x%x between rows", in.get(((Buffer) in).position() - 1))); } } break; case ROW_FINISHED: switch (in.get()) { case 0x2c: // ',' state = State.NEXT_ROW; break; case 0x20: // ' ' case 0x9: // '\t' case 0xa: // '\n' case 0xd: // '\r\ // skip the whitespaces break; default: { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, String.format( "encountered unexpected character 0x%x after array", in.get(((Buffer) in).position() - 1))); } } break; case WAIT_FOR_VALUE: switch (in.get()) { case 0x20: // ' ' case 0x9: // '\t' case 0xa: // '\n' case 0xd: // '\r\ // skip the whitespaces break; case 0x2c: // ',' // null value addNullValue(); state = State.WAIT_FOR_NEXT; // reread the comma in the WAIT_FOR_NEXT state ((Buffer) in).position(((Buffer) in).position() - 1); continue; case 0x5d: // ']' // null value (only saw spaces) addNullValue(); currentColumn = 0; state = State.ROW_FINISHED; break; case 0x22: // '"' outputCurValuePosition = outputPosition; // String starts, we do not copy the parenthesis resultChunk.addOffset(outputPosition); state = State.IN_STRING; break; default: outputCurValuePosition = outputPosition; // write resultChunk.addOffset(outputPosition); addByteToOutput(in.get(((Buffer) in).position() - 1)); state = State.IN_VALUE; break; } break; case IN_VALUE: switch (in.get()) { case 0x20: // ' ' case 0x9: // '\t' case 0xa: // '\n' case 0xd: // '\r\ case 0x2c: // ',' case 0x5d: // ']' { // value ended int length = outputPosition - outputCurValuePosition; // Check if value is null if (length == 4 && isNull()) { resultChunk.setIsNull(); outputPosition = outputCurValuePosition; } else { resultChunk.setLastLength(length); } state = State.WAIT_FOR_NEXT; ((Buffer) in).position(((Buffer) in).position() - 1); continue; // reread this char in WAIT_FOR_NEXT } default: addByteToOutput(in.get(((Buffer) in).position() - 1)); break; } break; case IN_STRING: switch (in.get()) { case 0x22: // '"' resultChunk.setLastLength(outputPosition - outputCurValuePosition); state = State.WAIT_FOR_NEXT; break; case 0x5c: // '\\' state = State.ESCAPE; break; default: // Check how many characters don't have escape characters // copy those with one memcpy int inputPositionStart = ((Buffer) in).position() - 1; while (in.hasRemaining()) { byte cur = in.get(); if (cur == 0x22 /* '"' */ || cur == 0x5c /* '\\' */) { // end of string chunk ((Buffer) in).position(((Buffer) in).position() - 1); break; } } addByteArrayToOutput( in.array(), in.arrayOffset() + inputPositionStart, ((Buffer) in).position() - inputPositionStart); if (in.hasRemaining() && (in.get(((Buffer) in).position()) == 0x22 /* '"' */ || in.get(((Buffer) in).position()) == 0x5c /* '\\' */)) { // Those need special parsing continue; } } break; case ESCAPE: switch (in.get()) { case 0x22 /* '"' */: addByteToOutput((byte) 0x22); state = State.IN_STRING; break; case 0x5c /* '\\' */: addByteToOutput((byte) 0x5c /* '\\' */); state = State.IN_STRING; break; case 0x2f: // '/' addByteToOutput((byte) 0x2f); state = State.IN_STRING; break; case 0x62: // 'b' addByteToOutput((byte) 0x0b /*'\b'*/); state = State.IN_STRING; break; case 0x66: // 'f' addByteToOutput((byte) 0x0c /*'\f'*/); state = State.IN_STRING; break; case 0x6e: // 'n' addByteToOutput((byte) 0xa /* '\n' */); state = State.IN_STRING; break; case 0x72: // 'r' addByteToOutput((byte) 0xd /*'\r'*/); state = State.IN_STRING; break; case 0x74: // 't' addByteToOutput((byte) 0x9 /*'\t'*/); state = State.IN_STRING; break; case 0x75: // 'u' // UTF-16 hex encoded, can be up to 12 bytes // when in doesn't have that many left, cache them and parse at the // next invocation of continueParsing() // have to have at least 4+2+4=10 chars left to read // already saw "\\u", now missing "AAAA\\uAAAA" if (in.remaining() >= 9 || (lastData && in.remaining() >= 3)) { if (!parseCodepoint(in)) { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "SFResultJsonParser2Failed: invalid escaped unicode character"); } state = State.IN_STRING; } else { // if the number of bytes left un-parsed in the buffer is less than 9 (unless it is // the last remaining data in the buffer), // there is not enough bytes to parse the codepoint. Move the position back 1, // so we can re-enter parsing at this position with the ESCAPE state. ((Buffer) in).position(((Buffer) in).position() - 1); state = State.ESCAPE; return; } break; default: { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "SFResultJsonParser2Failed: encountered unexpected escape character " + "0x%x", in.get(((Buffer) in).position() - 1)); } } break; case WAIT_FOR_NEXT: switch (in.get()) { case 0x2c: // ',': ++currentColumn; resultChunk.nextIndex(); if (currentColumn >= resultChunk.getColCount()) { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "SFResultJsonParser2Failed: Too many columns!"); } state = State.WAIT_FOR_VALUE; break; case 0x5d: // ']' currentColumn = 0; resultChunk.nextIndex(); state = State.ROW_FINISHED; break; case 0x20: // ' ' case 0x9: // '\t' case 0xa: // '\n' case 0xd: // '\r\ // skip whitespace break; default: { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, String.format( "encountered unexpected character 0x%x between columns", in.get(((Buffer) in).position() - 1))); } } break; } } } private boolean isNull() throws SnowflakeSQLException { int pos = outputPosition; if (resultChunk.get(--pos) == BNULL[3] && resultChunk.get(--pos) == BNULL[2] && resultChunk.get(--pos) == BNULL[1] && resultChunk.get(--pos) == BNULL[0]) { return true; } return false; } private int parseQuadhex(ByteBuffer s) { // function from picojson int uni_ch = 0, hex; for (int i = 0; i < 4; i++) { if ((hex = s.get()) == -1) { return -1; } if (0x30 /*0*/ <= hex && hex <= 0x39 /*'9'*/) { hex -= 0x30 /*0*/; } else if (0x41 /*'A'*/ <= hex && hex <= 0x46 /*'F'*/) { hex -= 0x41 /*'A'*/ - 0xa; } else if (0x61 /*'a'*/ <= hex && hex <= 0x66 /*'f'*/) { hex -= 0x61 /*'a'*/ - 0xa; } else { return -1; } uni_ch = uni_ch * 16 + hex; } return uni_ch; } private void addNullValue() throws SnowflakeSQLException { resultChunk.addOffset(outputPosition); } private void addByteToOutput(byte c) throws SnowflakeSQLException { resultChunk.addByte(c, outputPosition); outputPosition++; } private void addByteArrayToOutput(byte[] src, int offset, int length) throws SnowflakeSQLException { resultChunk.addBytes(src, offset, outputPosition, length); outputPosition += length; } private boolean parseCodepoint(ByteBuffer s) throws SnowflakeSQLException { int uni_ch; if ((uni_ch = parseQuadhex(s)) == -1) { return false; } if (0xd800 <= uni_ch && uni_ch <= 0xdfff) { if (0xdc00 <= uni_ch) { // a second 16-bit of a surrogate pair appeared return false; } // first 16-bit of surrogate pair, get the next one if (2 >= s.remaining()) { // not long enough for \\u return false; } if (s.get() != 0x5c /* '\\' */ || s.get() != 0x75 /* 'u' */) { return false; } if (4 > s.remaining()) { // not long enough for the next four hex chars return false; } int second = parseQuadhex(s); if (!(0xdc00 <= second && second <= 0xdfff)) { return false; } uni_ch = ((uni_ch - 0xd800) << 10) | ((second - 0xdc00) & 0x3ff); uni_ch += 0x10000; } if (uni_ch < 0x80) { addByteToOutput((byte) uni_ch); } else { if (uni_ch < 0x800) { addByteToOutput((byte) (0xc0 | (uni_ch >> 6))); } else { if (uni_ch < 0x10000) { addByteToOutput((byte) (0xe0 | (uni_ch >> 12))); } else { addByteToOutput((byte) (0xf0 | (uni_ch >> 18))); addByteToOutput((byte) (0x80 | ((uni_ch >> 12) & 0x3f))); } addByteToOutput((byte) (0x80 | ((uni_ch >> 6) & 0x3f))); } addByteToOutput((byte) (0x80 | (uni_ch & 0x3f))); } return true; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/ResultStreamProvider.java ================================================ package net.snowflake.client.internal.jdbc; import java.io.InputStream; // Defines how the underlying data stream is to be fetched; i.e. // allows large resultset data to come from a different source public interface ResultStreamProvider { InputStream getInputStream(ChunkDownloadContext context) throws Exception; } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/RetryContext.java ================================================ package net.snowflake.client.internal.jdbc; /** RetryContext stores information about an ongoing request's retrying process. */ public class RetryContext { static final int SECONDS_TO_MILLIS_FACTOR = 1000; private long elapsedTimeInMillis; private long retryTimeoutInMillis; private long retryCount; public RetryContext() {} public RetryContext setElapsedTimeInMillis(long elapsedTimeInMillis) { this.elapsedTimeInMillis = elapsedTimeInMillis; return this; } public RetryContext setRetryTimeoutInMillis(long retryTimeoutInMillis) { this.retryTimeoutInMillis = retryTimeoutInMillis; return this; } public RetryContext setRetryCount(long retryCount) { this.retryCount = retryCount; return this; } private long getRemainingRetryTimeoutInMillis() { return retryTimeoutInMillis - elapsedTimeInMillis; } public long getRemainingRetryTimeoutInSeconds() { return (getRemainingRetryTimeoutInMillis()) / SECONDS_TO_MILLIS_FACTOR; } public long getRetryCount() { return retryCount; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/RetryContextManager.java ================================================ package net.snowflake.client.internal.jdbc; import java.util.ArrayList; import java.util.List; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.util.ThrowingBiFunction; import org.apache.http.client.methods.HttpRequestBase; /** * RetryContextManager lets you register logic (as callbacks) that will be re-executed during a * retry of a request. */ public class RetryContextManager { // List of retry callbacks that will be executed in the order they were registered. private final List< ThrowingBiFunction> retryCallbacks = new ArrayList<>(); // A RetryHook flag that can be used by client code to decide when (or if) callbacks should be // executed. private final RetryHook retryHook; private RetryContext retryContext; /** Enumeration for different retry hook strategies. */ public enum RetryHook { /** Always execute the registered retry callbacks on every retry. */ ALWAYS_BEFORE_RETRY, } /** Default constructor using ALWAYS_BEFORE_RETRY as the default retry hook. */ public RetryContextManager() { this(RetryHook.ALWAYS_BEFORE_RETRY); } /** * Constructor that accepts a specific RetryHook. * * @param retryHook the retry hook strategy. */ public RetryContextManager(RetryHook retryHook) { this.retryHook = retryHook; this.retryContext = new RetryContext(); } /** * Registers a retry callback that will be executed on each retry. * * @param callback A RetryCallback encapsulating the logic to be replayed on retry. * @return the current instance for fluent chaining. */ public RetryContextManager registerRetryCallback( ThrowingBiFunction callback) { retryCallbacks.add(callback); return this; } /** * Executes all registered retry callbacks in the order they were added, before reattempting the * operation. * * @param requestToRetry the HTTP request to retry. * @throws SnowflakeSQLException if an error occurs during callback execution. */ public void executeRetryCallbacks(HttpRequestBase requestToRetry) throws SnowflakeSQLException { for (ThrowingBiFunction callback : retryCallbacks) { retryContext = callback.apply(requestToRetry, retryContext); } } /** * Returns the configured RetryHook. * * @return the retry hook. */ public RetryHook getRetryHook() { return retryHook; } public RetryContext getRetryContext() { return retryContext; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SFAsyncResultSet.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.api.resultset.QueryStatus.Status.NO_DATA; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import java.math.BigDecimal; import java.sql.Date; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.util.List; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.QueryStatus; import net.snowflake.client.api.resultset.SnowflakeAsyncResultSet; import net.snowflake.client.api.resultset.SnowflakeResultSet; import net.snowflake.client.api.resultset.SnowflakeResultSetSerializable; import net.snowflake.client.internal.api.implementation.resultset.SnowflakeBaseResultSet; import net.snowflake.client.internal.api.implementation.statement.SnowflakeStatementImpl; import net.snowflake.client.internal.core.SFBaseResultSet; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; /** SFAsyncResultSet implementation. Note: For Snowflake internal use */ public class SFAsyncResultSet extends SnowflakeBaseResultSet implements SnowflakeAsyncResultSet, ResultSet { private static final SFLogger logger = SFLoggerFactory.getLogger(SFAsyncResultSet.class); private ResultSet resultSetForNext = new SnowflakeResultSetV1.EmptyResultSet(); private boolean resultSetForNextInitialized = false; private String queryID; private SFBaseSession session; private Statement extraStatement; private QueryStatus lastQueriedStatus = QueryStatus.empty(); /** * Constructor takes an inputstream from the API response that we get from executing a SQL * statement. * *

The constructor will fetch the first row (if any) so that it can initialize the * ResultSetMetaData. * * @param sfBaseResultSet snowflake core base result rest object * @param statement query statement that generates this result set * @throws SQLException if failed to construct snowflake result set metadata */ SFAsyncResultSet(SFBaseResultSet sfBaseResultSet, Statement statement) throws SQLException { super(statement); this.queryID = sfBaseResultSet.getQueryId(); this.session = sfBaseResultSet.getSession(); this.extraStatement = statement; this.resultSetMetaData = new SnowflakeResultSetMetaDataV1(sfBaseResultSet.getMetaData()); this.resultSetMetaData.setQueryIdForAsyncResults(this.queryID); this.resultSetMetaData.setQueryType(SnowflakeResultSetMetaDataV1.QueryType.ASYNC); } /** * Constructor takes a result set serializable object to create a sessionless result set. * * @param sfBaseResultSet snowflake core base result rest object * @param resultSetSerializable The result set serializable object which includes all metadata to * create the result set * @throws SQLException if fails to create the result set object */ public SFAsyncResultSet( SFBaseResultSet sfBaseResultSet, SnowflakeResultSetSerializableV1 resultSetSerializable) throws SQLException { super(resultSetSerializable); this.queryID = sfBaseResultSet.getQueryId(); this.resultSetMetaData = new SnowflakeResultSetMetaDataV1(sfBaseResultSet.getMetaData()); this.resultSetMetaData.setQueryIdForAsyncResults(this.queryID); this.resultSetMetaData.setQueryType(SnowflakeResultSetMetaDataV1.QueryType.ASYNC); } public SFAsyncResultSet(String queryID, Statement statement) throws SQLException { super(statement); queryID.trim(); if (!QueryIdValidator.isValid(queryID)) { throw new SQLException( "The provided query ID " + queryID + " is invalid.", SqlState.INVALID_PARAMETER_VALUE); } this.queryID = queryID; } @Override protected void raiseSQLExceptionIfResultSetIsClosed() throws SQLException { if (isClosed()) { throw new SnowflakeSQLException(ErrorCode.RESULTSET_ALREADY_CLOSED); } } @Override public QueryStatus getStatus() throws SQLException { if (session == null) { throw new SQLException("Session not set"); } if (this.queryID == null) { throw new SQLException("QueryID unknown"); } if (this.lastQueriedStatus.isSuccess()) { return this.lastQueriedStatus; } this.lastQueriedStatus = session.getQueryStatus(this.queryID); // if query has completed successfully, cache its metadata to avoid unnecessary future server // calls return this.lastQueriedStatus; } /** * helper function for next() and getMetaData(). Calls result_scan to get resultSet after * asynchronous query call * * @throws SQLException */ private void getRealResults() throws SQLException { if (!resultSetForNextInitialized) { // If query has already succeeded, go straight to result scan to get results if (!this.lastQueriedStatus.isSuccess()) { QueryStatus queryStatus = this.lastQueriedStatus; int noDataRetry = 0; final int noDataMaxRetries = 30; final int[] retryPattern = {1, 1, 2, 3, 4, 8, 10}; final int maxIndex = retryPattern.length - 1; int retry = 0; while (!queryStatus.isSuccess()) { // if query is not running due to a failure (Aborted, failed with error, etc), generate // exception if (!queryStatus.isStillRunning()) { String errorMessage = this.lastQueriedStatus.getErrorMessage(); if (isNullOrEmpty(errorMessage)) { errorMessage = "No error message available"; } throw new SQLException( "Status of query associated with resultSet is " + queryStatus.getDescription() + ". " + errorMessage + " Results not generated."); } // if no data about the query is returned after about 2 minutes, give up if (queryStatus.getStatus() == NO_DATA) { noDataRetry++; if (noDataRetry >= noDataMaxRetries) { throw new SQLException( "Cannot retrieve data on the status of this query. No information returned from server for queryID={}.", this.queryID); } } try { // Sleep for an amount before trying again. Exponential backoff up to 5 seconds // implemented. Thread.sleep(500 * retryPattern[retry]); } catch (InterruptedException e) { e.printStackTrace(); } if (retry < maxIndex) { retry++; } queryStatus = this.getStatus(); } } resultSetForNext = extraStatement.executeQuery("select * from table(result_scan('" + this.queryID + "'))"); resultSetForNextInitialized = true; } } /** * Advance to next row * * @return true if next row exists, false otherwise * @throws SQLException if failed to move to the next row */ @Override public boolean next() throws SQLException { getMetaData(); return resultSetForNext.next(); } @Override public void close() throws SQLException { close(true); } public void close(boolean removeClosedResultSetFromStatement) throws SQLException { // no SQLException is raised. resultSetForNext.close(); if (sfBaseResultSet != null) { sfBaseResultSet.close(); } if (removeClosedResultSetFromStatement && statement.isWrapperFor(SnowflakeStatementImpl.class)) { statement.unwrap(SnowflakeStatementImpl.class).removeClosedResultSet(this); } } public String getQueryID() { return this.queryID; } public boolean wasNull() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.wasNull(); } public String getString(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getString(columnIndex); } public void setSession(SFSession session) { this.session = session; } public void setStatement(Statement statement) { this.extraStatement = statement; } public boolean getBoolean(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getBoolean(columnIndex); } @Override public byte getByte(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getByte(columnIndex); } public short getShort(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getShort(columnIndex); } public int getInt(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getInt(columnIndex); } public long getLong(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getLong(columnIndex); } public float getFloat(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getFloat(columnIndex); } public double getDouble(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getDouble(columnIndex); } public Date getDate(int columnIndex, TimeZone tz) throws SQLException { // Note: currently we provide this API but it does not use TimeZone tz. // TODO: use the time zone passed from the arguments raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getDate(columnIndex); } public Time getTime(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getTime(columnIndex); } public Timestamp getTimestamp(int columnIndex, TimeZone tz) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.unwrap(SnowflakeResultSetV1.class).getTimestamp(columnIndex, tz); } public ResultSetMetaData getMetaData() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); getRealResults(); this.resultSetMetaData = (SnowflakeResultSetMetaDataV1) resultSetForNext.unwrap(SnowflakeResultSetV1.class).getMetaData(); this.resultSetMetaData.setQueryIdForAsyncResults(this.queryID); this.resultSetMetaData.setQueryType(SnowflakeResultSetMetaDataV1.QueryType.ASYNC); return resultSetMetaData; } public Object getObject(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getObject(columnIndex); } public BigDecimal getBigDecimal(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getBigDecimal(columnIndex); } @Deprecated public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getBigDecimal(columnIndex, scale); } public byte[] getBytes(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getBytes(columnIndex); } public int getRow() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.getRow(); } public boolean isFirst() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.isFirst(); } public boolean isClosed() throws SQLException { // no exception is raised. if (sfBaseResultSet != null) { return (resultSetForNext.isClosed() && sfBaseResultSet.isClosed()); } return resultSetForNext.isClosed(); } @Override public boolean isLast() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.isLast(); } @Override public boolean isAfterLast() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetForNext.isAfterLast(); } @Override public boolean isBeforeFirst() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); // if ResultSet is not initialized yet, this means neither next() nor getMetaData() has been // called. // If next() hasn't been called, we are at the beginning of the ResultSet so should return true. return !resultSetForNextInitialized || resultSetForNext.isBeforeFirst(); } @Override public boolean isWrapperFor(Class iface) throws SQLException { logger.trace("boolean isWrapperFor(Class iface)", false); return iface.isInstance(this); } @SuppressWarnings("unchecked") @Override public T unwrap(Class iface) throws SQLException { logger.trace(" T unwrap(Class iface)", false); if (!iface.isInstance(this)) { throw new SQLException( this.getClass().getName() + " not unwrappable from " + iface.getName()); } return (T) this; } /** * Get a list of ResultSetSerializables for the ResultSet in order to parallel processing * *

Not currently supported for asynchronous result sets. */ @Override public List getResultSetSerializables(long maxSizeInBytes) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); getRealResults(); return resultSetForNext .unwrap(SnowflakeResultSet.class) .getResultSetSerializables(maxSizeInBytes); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SFBaseFileTransferAgent.java ================================================ package net.snowflake.client.internal.jdbc; import java.io.InputStream; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.common.util.ClassUtil; import net.snowflake.common.util.FixedViewColumn; /** * Base class for file transfers: given a SnowflakeConnection, files may be uploaded or downloaded * from/to an InputStream. * *

Note that while SnowflakeFileTransferAgent is used liberally throughout the codebase for * performing uploads, the "alternative" implementations may have quite different ways of uploading * to cloud storage, so this is a rather "thin" abstract class that leaves much of the * implementation up to the implementing class. * *

It is also expected that a command (GET/PUT) is parsed by the FileTransferAgent before either * execute() or downloadStream() is called. This is not enforced by the abstract class's interface, * but will be passed to the SFConnectionHandler's getFileTransferAgent(). * *

In general, besides the abstract methods execute() and downloadStream(), an implementing class * needs to also populate the statusRows List with the file-metadata rows forming the fixed view, as * well as set the showEncryptionParameter boolean (usually returned by the session parameter; * default is false). */ public abstract class SFBaseFileTransferAgent implements SnowflakeFixedView { /** A class for encapsulating the columns to return for the upload command */ public enum UploadColumns { source, target, source_size, target_size, source_compression, target_compression, status, encryption, message }; public class UploadCommandFacade { @FixedViewColumn(name = "source", ordinal = 10) private String srcFile; @FixedViewColumn(name = "target", ordinal = 20) private String destFile; @FixedViewColumn(name = "source_size", ordinal = 30) private long srcSize; @FixedViewColumn(name = "target_size", ordinal = 40) private long destSize = -1; @FixedViewColumn(name = "source_compression", ordinal = 50) private String srcCompressionType; @FixedViewColumn(name = "target_compression", ordinal = 60) private String destCompressionType; @FixedViewColumn(name = "status", ordinal = 70) private String resultStatus; @FixedViewColumn(name = "message", ordinal = 80) private String errorDetails; public UploadCommandFacade( String srcFile, String destFile, String resultStatus, String errorDetails, long srcSize, long destSize, String srcCompressionType, String destCompressionType) { this.srcFile = srcFile; this.destFile = destFile; this.resultStatus = resultStatus; this.errorDetails = errorDetails; this.srcSize = srcSize; this.destSize = destSize; this.srcCompressionType = srcCompressionType; this.destCompressionType = destCompressionType; } public String getSrcFile() { return srcFile; } } public class UploadCommandEncryptionFacade extends UploadCommandFacade { @FixedViewColumn(name = "encryption", ordinal = 75) private String encryption; public UploadCommandEncryptionFacade( String srcFile, String destFile, String resultStatus, String errorDetails, long srcSize, long destSize, String srcCompressionType, String destCompressionType, boolean isEncrypted) { super( srcFile, destFile, resultStatus, errorDetails, srcSize, destSize, srcCompressionType, destCompressionType); this.encryption = isEncrypted ? "ENCRYPTED" : ""; } } /** A class for encapsulating the columns to return for the download command */ public class DownloadCommandFacade { @FixedViewColumn(name = "file", ordinal = 10) private String file; @FixedViewColumn(name = "size", ordinal = 20) private long size; @FixedViewColumn(name = "status", ordinal = 30) private String resultStatus; @FixedViewColumn(name = "message", ordinal = 40) private String errorDetails; public DownloadCommandFacade(String file, String resultStatus, String errorDetails, long size) { this.file = file; this.resultStatus = resultStatus; this.errorDetails = errorDetails; this.size = size; } public String getFile() { return file; } } public class DownloadCommandEncryptionFacade extends DownloadCommandFacade { @FixedViewColumn(name = "encryption", ordinal = 35) private String encryption; public DownloadCommandEncryptionFacade( String file, String resultStatus, String errorDetails, long size, boolean isEncrypted) { super(file, resultStatus, errorDetails, size); this.encryption = isEncrypted ? "DECRYPTED" : ""; } } protected boolean compressSourceFromStream; protected String destStagePath; protected String destFileNameForStreamSource; protected InputStream sourceStream; protected boolean sourceFromStream; protected boolean showEncryptionParameter; protected List statusRows = new ArrayList<>(); protected CommandType commandType = CommandType.UPLOAD; private int currentRowIndex; /** * Gets the total number of rows that this GET/PUT command's output returns in the fixed-view * result. The statusRows list must be populated with the FileMetadata. * * @return The number of rows that this fixed-view represents. */ @Override public int getTotalRows() { return statusRows.size(); } /** * Move on to the next row of file metadata. The statusRows list must be populated with the * file-metadata output of the GET/PUT command. * * @return The row, represented as a list of Object. */ @Override public List getNextRow() throws Exception { if (currentRowIndex < statusRows.size()) { return ClassUtil.getFixedViewObjectAsRow( commandType == CommandType.UPLOAD ? (showEncryptionParameter ? UploadCommandEncryptionFacade.class : UploadCommandFacade.class) : (showEncryptionParameter ? DownloadCommandEncryptionFacade.class : DownloadCommandFacade.class), statusRows.get(currentRowIndex++)); } else { return null; } } /** * Describe the metadata of a fixed view. * * @return list of column meta data * @throws Exception failed to construct list */ @Override public List describeColumns(SFBaseSession session) throws Exception { return SnowflakeUtil.describeFixedViewColumns( commandType == CommandType.UPLOAD ? (showEncryptionParameter ? UploadCommandEncryptionFacade.class : UploadCommandFacade.class) : (showEncryptionParameter ? DownloadCommandEncryptionFacade.class : DownloadCommandFacade.class), session); } /** * Sets the source data stream to be uploaded. * * @param sourceStream The source data to upload. */ public void setSourceStream(InputStream sourceStream) { this.sourceStream = sourceStream; this.sourceFromStream = true; } /** * Sets the destination stage path * * @param destStagePath The target destination stage path. */ public void setDestStagePath(String destStagePath) { this.destStagePath = destStagePath; } /** * Sets the target filename for uploading * * @param destFileNameForStreamSource The target destination filename once the file is uploaded. */ public void setDestFileNameForStreamSource(String destFileNameForStreamSource) { this.destFileNameForStreamSource = destFileNameForStreamSource; } /** * Whether to compress the source stream before upload. * * @param compressSourceFromStream boolean for whether to compress the data stream before upload. */ public void setCompressSourceFromStream(boolean compressSourceFromStream) { this.compressSourceFromStream = compressSourceFromStream; } /** * Run the PUT/GET command, if a command has been set. * * @return Whether the operation was completed successfully, and completely. * @throws SQLException for SQL or upload errors */ public abstract boolean execute() throws SQLException; /** * Download data from a stage. * * @param fileName A file on a stage to download. * @return An InputStream for the requested file. * @throws SnowflakeSQLException If the file does not exist, or if an error occurred during * transport. */ public abstract InputStream downloadStream(String fileName) throws SnowflakeSQLException; /** The types of file transfer: upload and download. */ public enum CommandType { UPLOAD, DOWNLOAD } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SFConnectionHandler.java ================================================ package net.snowflake.client.internal.jdbc; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.SQLNonTransientConnectionException; import java.sql.Statement; import java.util.Properties; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.api.implementation.resultset.SnowflakeBaseResultSet; import net.snowflake.client.internal.core.SFBaseResultSet; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFBaseStatement; /** * Class that presents the implementation of a Snowflake Connection. This allows for alternate * definitions of SFSession, SFStatement, and SFResultSet, (representing the 'physical' * implementation layer) that can share high-level code. */ public interface SFConnectionHandler { /** * @return Whether this Connection supports asynchronous queries. If yes, createAsyncResultSet may * be called. */ boolean supportsAsyncQuery(); /** * Initializes the SnowflakeConnection * * @param url url string * @param info connection parameters * @throws SQLException if any error is encountered */ void initializeConnection(String url, Properties info) throws SQLException; /** * @return Gets the SFBaseSession implementation for this connection implementation */ SFBaseSession getSFSession(); /** * @return Returns the SFStatementInterface implementation for this connection implementation * @throws SQLException if any error occurs */ SFBaseStatement getSFStatement() throws SQLException; /** * Creates a result set from a query id. * * @param queryID the query ID * @param statement Statement object * @return ResultSet * @throws SQLException if any error occurs */ ResultSet createResultSet(String queryID, Statement statement) throws SQLException; /** * @param resultSet SFBaseResultSet * @param statement Statement * @return Creates a SnowflakeResultSet from a base SFBaseResultSet for this connection * implementation. * @throws SQLException if an error occurs */ SnowflakeBaseResultSet createResultSet(SFBaseResultSet resultSet, Statement statement) throws SQLException; /** * Creates an asynchronous result set from a base SFBaseResultSet for this connection * implementation. * * @param resultSet SFBaseResultSet * @param statement Statement * @return An asynchronous result set from SFBaseResultSet * @throws SQLException if an error occurs */ SnowflakeBaseResultSet createAsyncResultSet(SFBaseResultSet resultSet, Statement statement) throws SQLException; /** * @param command The command to parse for this file transfer (e.g., PUT/GET) * @param statement The statement to use for this file transfer * @return SFBaseFileTransferAgent * @throws SQLNonTransientConnectionException if a connection error occurs * @throws SnowflakeSQLException if any other exception occurs */ SFBaseFileTransferAgent getFileTransferAgent(String command, SFBaseStatement statement) throws SQLNonTransientConnectionException, SnowflakeSQLException; /** * Overridable method that allows for different connection implementations to use different stage * names for binds uploads. By default, it uses SYSTEM$BIND * * @return The name of the identifier with which a temporary stage is created in the Session for * uploading array bind values. */ default String getBindStageName() { return "SYSTEM$BIND"; } ; } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeChunkDownloader.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.core.Constants.MB; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.databind.MappingJsonFactory; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.PrintWriter; import java.io.StringWriter; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.ChunkDownloader; import net.snowflake.client.internal.core.DownloaderMetrics; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.core.OCSPMode; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.QueryResultFormat; import net.snowflake.client.internal.core.SFArrowResultSet; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SessionUtil; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.SnowflakeResultChunk.DownloadState; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; import org.apache.arrow.memory.RootAllocator; /** Class for managing async download of offline result chunks */ public class SnowflakeChunkDownloader implements ChunkDownloader { // object mapper for deserialize JSON private static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); /** a shared JSON parser factory. */ private static final JsonFactory jsonFactory = new MappingJsonFactory(); private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeChunkDownloader.class); private static final int STREAM_BUFFER_SIZE = MB; private static final long SHUTDOWN_TIME = 3; private final SnowflakeConnectString snowflakeConnectionString; private final OCSPMode ocspMode; private final HttpClientSettingsKey ocspModeAndProxyKey; // Session object, used solely for throwing exceptions. CAUTION: MAY BE NULL! private SFBaseSession session; private JsonResultChunk.ResultChunkDataCache chunkDataCache = new JsonResultChunk.ResultChunkDataCache(); private List chunks; // index of next chunk to be consumed (it may not be ready yet) private int nextChunkToConsume = 0; // index of next chunk to be downloaded private int nextChunkToDownload = 0; // number of prefetch slots private final int prefetchSlots; // thread pool private final ThreadPoolExecutor executor; // number of millis main thread waiting for chunks from downloader private long numberMillisWaitingForChunks = 0; // is the downloader terminated private final AtomicBoolean terminated = new AtomicBoolean(false); // number of millis spent on downloading result chunks private final AtomicLong totalMillisDownloadingChunks = new AtomicLong(0); // number of millis spent on parsing result chunks private final AtomicLong totalMillisParsingChunks = new AtomicLong(0); // The query result master key private final String qrmk; private Map chunkHeadersMap; private final int networkTimeoutInMilli; private final int authTimeout; private final int socketTimeout; private final int maxHttpRetries; private long memoryLimit; // the current memory usage across JVM private static final AtomicLong currentMemoryUsage = new AtomicLong(); // used to track the downloading threads private Map downloaderFutures = new ConcurrentHashMap<>(); /** query result format */ private QueryResultFormat queryResultFormat; /** Arrow memory allocator for the current resultSet */ private RootAllocator rootAllocator; private final String queryId; private final int firstChunkRowCount; static long getCurrentMemoryUsage() { synchronized (currentMemoryUsage) { return currentMemoryUsage.longValue(); } } // The parameters used to wait for available memory: // starting waiting time will be BASE_WAITING_MS * WAITING_SECS_MULTIPLIER = 100 ms private long BASE_WAITING_MS = 50; private long WAITING_SECS_MULTIPLIER = 2; // the maximum waiting time private long MAX_WAITING_MS = 30 * 1000; // the default jitter ratio 10% private long WAITING_JITTER_RATIO = 10; private final ResultStreamProvider resultStreamProvider; /** Timeout that the main thread waits for downloading the current chunk */ private static final long downloadedConditionTimeoutInSeconds = HttpUtil.getDownloadedConditionTimeoutInSeconds(); private static final int MAX_RETRY_JITTER = 1000; // milliseconds // Only controls the max retry number when prefetch runs out of memory // Will wait a while then retry to see if we can allocate the required memory // Default value is 1 private int prefetchMaxRetry = 1; private static Throwable injectedDownloaderException = null; // for testing purpose // This function should only be used for testing purpose static void setInjectedDownloaderException(Throwable th) { injectedDownloaderException = th; } public OCSPMode getOCSPMode() { return ocspMode; } public HttpClientSettingsKey getHttpClientSettingsKey() { return ocspModeAndProxyKey; } public ResultStreamProvider getResultStreamProvider() { return resultStreamProvider; } /** * Create a pool of downloader threads. * * @param threadNamePrefix name of threads in pool * @param parallel number of thread in pool * @return new thread pool */ private static ThreadPoolExecutor createChunkDownloaderExecutorService( final String threadNamePrefix, final int parallel) { ThreadFactory threadFactory = new ThreadFactory() { private int threadCount = 1; public Thread newThread(final Runnable r) { final Thread thread = new Thread(r); thread.setName(threadNamePrefix + threadCount++); thread.setUncaughtExceptionHandler( new Thread.UncaughtExceptionHandler() { public void uncaughtException(Thread t, Throwable e) { logger.error("Uncaught Exception in thread {}: {}", t, e); } }); thread.setDaemon(true); return thread; } }; return (ThreadPoolExecutor) Executors.newFixedThreadPool(parallel, threadFactory); } /** * Constructor to initialize downloader, which uses the default stream provider * * @param resultSetSerializable the result set serializable object which includes required * metadata to start chunk downloader * @throws SnowflakeSQLException if an error is encountered */ public SnowflakeChunkDownloader(SnowflakeResultSetSerializableV1 resultSetSerializable) throws SnowflakeSQLException { this.queryId = resultSetSerializable.getQueryId(); this.firstChunkRowCount = resultSetSerializable.getFirstChunkRowCount(); this.snowflakeConnectionString = resultSetSerializable.getSnowflakeConnectString(); this.ocspMode = resultSetSerializable.getOCSPMode(); this.ocspModeAndProxyKey = resultSetSerializable.getHttpClientKey(); this.qrmk = resultSetSerializable.getQrmk(); this.networkTimeoutInMilli = resultSetSerializable.getNetworkTimeoutInMilli(); this.authTimeout = resultSetSerializable.getAuthTimeout(); this.socketTimeout = resultSetSerializable.getSocketTimeout(); this.maxHttpRetries = resultSetSerializable.getMaxHttpRetries(); this.prefetchSlots = resultSetSerializable.getResultPrefetchThreads() * 2; this.queryResultFormat = resultSetSerializable.getQueryResultFormat(); logger.debug("qrmk: {}", this.qrmk); this.chunkHeadersMap = resultSetSerializable.getChunkHeadersMap(); // session may be null. Its only use is for in-band telemetry in this class this.session = (resultSetSerializable.getSession(internalCallMarker()) != null) ? resultSetSerializable.getSession(internalCallMarker()).orElse(null) : null; if (this.session != null) { Object prefetchMaxRetry = this.session.getOtherParameter(SessionUtil.JDBC_CHUNK_DOWNLOADER_MAX_RETRY); if (prefetchMaxRetry != null) { this.prefetchMaxRetry = (int) prefetchMaxRetry; } } if (resultSetSerializable.getServerURL() != null) { try { SessionUtil.resetOCSPUrlIfNecessary(resultSetSerializable.getServerURL()); } catch (IOException e) { logger.debug("Exception while resetting OCSP URL", e); } } this.memoryLimit = resultSetSerializable.getMemoryLimit(); if (this.session != null && session.getMemoryLimitForTesting() != SFBaseSession.MEMORY_LIMIT_UNSET) { this.memoryLimit = session.getMemoryLimitForTesting(); } // create the chunks array this.chunks = new ArrayList<>(resultSetSerializable.getChunkFileCount()); this.resultStreamProvider = resultSetSerializable.getResultStreamProvider(internalCallMarker()); if (resultSetSerializable.getChunkFileCount() < 1) { throw new SnowflakeSQLLoggedException( this.session, ErrorCode.INTERNAL_ERROR, "Incorrect chunk count: " + resultSetSerializable.getChunkFileCount()); } // initialize chunks with url and row count for (SnowflakeResultSetSerializableV1.ChunkFileMetadata chunkFileMetadata : resultSetSerializable.getChunkFileMetadatas()) { SnowflakeResultChunk chunk; switch (this.queryResultFormat) { case ARROW: this.rootAllocator = resultSetSerializable.getRootAllocator(); chunk = new ArrowResultChunk( chunkFileMetadata.getFileURL(), chunkFileMetadata.getRowCount(), resultSetSerializable.getColumnCount(), chunkFileMetadata.getUncompressedByteSize(), this.rootAllocator, this.session); break; case JSON: chunk = new JsonResultChunk( chunkFileMetadata.getFileURL(), chunkFileMetadata.getRowCount(), resultSetSerializable.getColumnCount(), chunkFileMetadata.getUncompressedByteSize(), this.session); break; default: throw new SnowflakeSQLLoggedException( this.session, ErrorCode.INTERNAL_ERROR, "Invalid result format: " + queryResultFormat.name()); } logger.debug( "Add chunk: url: {} rowCount: {} uncompressedSize: {} neededChunkMemory: {}, chunkResultFormat: {}", chunk.getScrubbedUrl(), chunk.getRowCount(), chunk.getUncompressedSize(), chunk.computeNeededChunkMemory(), queryResultFormat.name()); chunks.add(chunk); } // prefetch threads and slots from parameter settings int effectiveThreads = Math.min( resultSetSerializable.getResultPrefetchThreads(), resultSetSerializable.getChunkFileCount()); logger.debug( "#chunks: {} #threads: {} #slots: {} -> pool: {}", resultSetSerializable.getChunkFileCount(), resultSetSerializable.getResultPrefetchThreads(), prefetchSlots, effectiveThreads); // create thread pool executor = createChunkDownloaderExecutorService("result-chunk-downloader-", effectiveThreads); try { startNextDownloaders(); } catch (OutOfMemoryError outOfMemoryError) { logOutOfMemoryError(); StringWriter errors = new StringWriter(); outOfMemoryError.printStackTrace(new PrintWriter(errors)); throw new SnowflakeSQLLoggedException( this.session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, errors); } } /** Submit download chunk tasks to executor. Number depends on thread and memory limit */ private void startNextDownloaders() throws SnowflakeSQLException { long waitingTime = BASE_WAITING_MS; long getPrefetchMemRetry = 0; // submit the chunks to be downloaded up to the prefetch slot capacity // and limited by memory while (nextChunkToDownload - nextChunkToConsume < prefetchSlots && nextChunkToDownload < chunks.size()) { // check if memory limit allows more prefetching final SnowflakeResultChunk nextChunk = chunks.get(nextChunkToDownload); final long neededChunkMemory = nextChunk.computeNeededChunkMemory(); // make sure memoryLimit > neededChunkMemory; otherwise, the thread hangs if (neededChunkMemory > memoryLimit) { logger.debug( "Thread {}: reset memoryLimit from {} MB to current chunk size {} MB", (ArgSupplier) () -> Thread.currentThread().getId(), (ArgSupplier) () -> memoryLimit / 1024 / 1024, (ArgSupplier) () -> neededChunkMemory / 1024 / 1024); memoryLimit = neededChunkMemory; } // try to reserve the needed memory long curMem = currentMemoryUsage.addAndGet(neededChunkMemory); // If there is not enough memory available for prefetch, cancel the memory allocation. It's // cancelled when: // 1. We haven't consumed enough chunks to download next chunk (nextChunkToDownload > // nextChunkToConsume) // 2. There is not enough memory for prefetching to begin with (nextChunkToDownload=0 && // nextChunkToConsume=0) // In all other cases, don't cancel but wait until memory frees up. if (curMem > memoryLimit && (nextChunkToDownload - nextChunkToConsume > 0 || (nextChunkToDownload == 0 && nextChunkToConsume == 0))) { // cancel the reserved memory and this downloader too logger.debug( "Not enough memory available for prefetch. Cancel reserved memory. MemoryLimit: {}," + " curMem: {}, nextChunkToDownload: {}, nextChunkToConsume: {}, retry: {}", memoryLimit, curMem, nextChunkToDownload, nextChunkToConsume, getPrefetchMemRetry); currentMemoryUsage.addAndGet(-neededChunkMemory); nextChunk.getLock().lock(); try { nextChunk.setDownloadState(DownloadState.FAILURE); } finally { nextChunk.getLock().unlock(); } break; } // only allocate memory when the future usage is less than the limit if (curMem <= memoryLimit) { if (queryResultFormat == QueryResultFormat.JSON) { ((JsonResultChunk) nextChunk).tryReuse(chunkDataCache); } logger.debug( "Thread {}: currentMemoryUsage in MB: {}, nextChunkToDownload: {}, " + "nextChunkToConsume: {}, newReservedMemory in B: {} ", (ArgSupplier) () -> Thread.currentThread().getId(), curMem / MB, nextChunkToDownload, nextChunkToConsume, neededChunkMemory); logger.debug( "Submit chunk #{} for downloading, url: {}", this.nextChunkToDownload, nextChunk.getScrubbedUrl()); // SNOW-615824 Imagine this scenario to understand the root cause of this issue: // When consuming chunk N, we try to prefetch chunk N+1. The prefetching failed due to // hitting memoryLimit. We will mark the chunk N+1 as FAILED. // After we are done with chunk N, we try to consume chunk N+1. // In getNextChunkToConsume, we first call startNextDownloaders then call waitForChunkReady. // startNextDownloaders sees that the next chunk to download is N+1. With enough memory at // this time, it will try to download the chunk. waitForChunkReady sees that chunk N+1 is // marked as FAILED, it will also try to download the chunk because it thinks that no // prefetching will download the chunk. // Thus we will submit two download jobs, causing chunk N+1 appears to be lost. // Therefore the fix is to only prefetch chunks that are marked as NOT_STARTED here. nextChunk.getLock().lock(); try { if (nextChunk.getDownloadState() != DownloadState.NOT_STARTED) { break; } } finally { nextChunk.getLock().unlock(); } Future downloaderFuture = executor.submit( getDownloadChunkCallable( this, nextChunk, qrmk, nextChunkToDownload, chunkHeadersMap, networkTimeoutInMilli, authTimeout, socketTimeout, maxHttpRetries, this.session, chunks.size(), queryId)); downloaderFutures.put(nextChunkToDownload, downloaderFuture); // increment next chunk to download nextChunkToDownload++; // make sure reset waiting time waitingTime = BASE_WAITING_MS; // go to next chunk continue; } else { // cancel the reserved memory logger.debug("Cancel the reserved memory.", false); curMem = currentMemoryUsage.addAndGet(-neededChunkMemory); if (getPrefetchMemRetry > prefetchMaxRetry) { logger.debug( "Retry limit for prefetch has been reached. Cancel reserved memory and prefetch" + " attempt.", false); nextChunk.getLock().lock(); try { nextChunk.setDownloadState(DownloadState.FAILURE); } finally { nextChunk.getLock().unlock(); } break; } } // waiting when nextChunkToDownload is equal to nextChunkToConsume but reach memory limit try { waitingTime *= WAITING_SECS_MULTIPLIER; waitingTime = waitingTime > MAX_WAITING_MS ? MAX_WAITING_MS : waitingTime; long jitter = ThreadLocalRandom.current().nextLong(0, waitingTime / WAITING_JITTER_RATIO); waitingTime += jitter; getPrefetchMemRetry++; if (logger.isDebugEnabled()) { logger.debug( "Thread {} waiting for {} s: currentMemoryUsage in MB: {}, neededChunkMemory in MB:" + " {}, nextChunkToDownload: {}, nextChunkToConsume: {}, retry: {}", (ArgSupplier) () -> Thread.currentThread().getId(), waitingTime / 1000.0, curMem / MB, neededChunkMemory / MB, nextChunkToDownload, nextChunkToConsume, getPrefetchMemRetry); } Thread.sleep(waitingTime); } catch (InterruptedException ie) { throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Waiting SnowflakeChunkDownloader has been interrupted."); } } // clear the cache, we can't download more at the moment // so we won't need them in the near future chunkDataCache.clear(); } /** * release the memory usage from currentMemoryUsage * * @param chunkId chunk ID * @param optionalReleaseSize if present, then release the specified size */ private void releaseCurrentMemoryUsage(int chunkId, Optional optionalReleaseSize) { long releaseSize = optionalReleaseSize.isPresent() ? optionalReleaseSize.get() : chunks.get(chunkId).computeNeededChunkMemory(); if (releaseSize > 0 && !chunks.get(chunkId).isReleased()) { // has to be before reusing the memory long curMem = currentMemoryUsage.addAndGet(-releaseSize); logger.debug( "Thread {} - currentMemoryUsage in MB: {}, released in MB: {}, " + "chunk: {}, optionalReleaseSize: {}, JVMFreeMem: {}", (ArgSupplier) () -> Thread.currentThread().getId(), (ArgSupplier) () -> curMem / MB, releaseSize, chunkId, optionalReleaseSize.isPresent(), Runtime.getRuntime().freeMemory()); chunks.get(chunkId).setReleased(); } } /** release all existing chunk memory usage before close */ private void releaseAllChunkMemoryUsage() { if (chunks == null || chunks.size() == 0) { return; } // only release the chunks has been downloading or downloaded for (int i = 0; i < nextChunkToDownload; i++) { releaseCurrentMemoryUsage(i, Optional.empty()); } } /** * The method does the following: * *

1. free the previous chunk data and submit a new chunk to be downloaded * *

2. get next chunk to consume, if it is not ready for consumption, it waits until it is ready * * @return next SnowflakeResultChunk to be consumed * @throws InterruptedException if downloading thread was interrupted * @throws SnowflakeSQLException if downloader encountered an error */ public SnowflakeResultChunk getNextChunkToConsume() throws InterruptedException, SnowflakeSQLException { // free previous chunk data and submit a new chunk for downloading if (this.nextChunkToConsume > 0) { int prevChunk = this.nextChunkToConsume - 1; // free the chunk data for previous chunk logger.debug("Free chunk data for chunk #{}", prevChunk); long chunkMemUsage = chunks.get(prevChunk).computeNeededChunkMemory(); // reuse chunkcache if json result if (this.queryResultFormat == QueryResultFormat.JSON) { if (this.nextChunkToDownload < this.chunks.size()) { // Reuse the set of object to avoid reallocation // It is important to do this BEFORE starting the next download chunkDataCache.add((JsonResultChunk) this.chunks.get(prevChunk)); } else { // clear the cache if we don't need it anymore chunkDataCache.clear(); } } // Free any memory the previous chunk might hang on this.chunks.get(prevChunk).freeData(); releaseCurrentMemoryUsage(prevChunk, Optional.of(chunkMemUsage)); } // if no more chunks, return null if (this.nextChunkToConsume >= this.chunks.size()) { logger.debug("No more chunk", false); return null; } // prefetch next chunks try { startNextDownloaders(); } catch (OutOfMemoryError outOfMemoryError) { logOutOfMemoryError(); StringWriter errors = new StringWriter(); outOfMemoryError.printStackTrace(new PrintWriter(errors)); throw new SnowflakeSQLLoggedException( this.session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, errors); } SnowflakeResultChunk currentChunk = this.chunks.get(nextChunkToConsume); if (currentChunk.getDownloadState() == DownloadState.SUCCESS) { logger.debug("Chunk #{} is ready to consume", nextChunkToConsume); nextChunkToConsume++; if (nextChunkToConsume == this.chunks.size()) { // make sure to release the last chunk releaseCurrentMemoryUsage(nextChunkToConsume - 1, Optional.empty()); } return currentChunk; } else { // the chunk we want to consume is not ready yet, wait for it currentChunk.getLock().lock(); try { logger.debug("Chunk#{} is not ready to consume", nextChunkToConsume); logger.debug("Consumer get lock to check chunk state", false); waitForChunkReady(currentChunk); // downloader thread encountered an error if (currentChunk.getDownloadState() == DownloadState.FAILURE) { releaseAllChunkMemoryUsage(); logger.error("Downloader encountered error: {}", currentChunk.getDownloadError()); if (currentChunk .getDownloadError() .contains("java.lang.OutOfMemoryError: Java heap space")) { logOutOfMemoryError(); } throw new SnowflakeSQLLoggedException( this.session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, currentChunk.getDownloadError()); } logger.debug("Chunk#{} is ready to consume", nextChunkToConsume); nextChunkToConsume++; // next chunk to consume is ready for consumption return currentChunk; } finally { logger.debug("Consumer free lock", false); boolean terminateDownloader = (currentChunk.getDownloadState() == DownloadState.FAILURE); // release the unlock always currentChunk.getLock().unlock(); if (nextChunkToConsume == this.chunks.size()) { // make sure to release the last chunk releaseCurrentMemoryUsage(nextChunkToConsume - 1, Optional.empty()); } if (terminateDownloader) { logger.debug("Download result fail. Shut down the chunk downloader", false); terminate(); } } } } /** * wait for the current chunk to be ready to consume if the downloader fails then let it retry for * at most 10 times if the downloader is in progress for at most one hour or the downloader has * already retried more than 10 times, then throw an exception. * * @param currentChunk * @throws InterruptedException */ private void waitForChunkReady(SnowflakeResultChunk currentChunk) throws InterruptedException { int retry = 0; long startTime = System.currentTimeMillis(); while (true) { logger.debug( "Thread {} is waiting for chunk#{} to be ready, current chunk state is: {}, retry: {}", Thread.currentThread().getId(), nextChunkToConsume, currentChunk.getDownloadState(), retry); if (currentChunk.getDownloadState() != DownloadState.FAILURE && currentChunk.getDownloadState() != DownloadState.SUCCESS) { // if the state is not failure, we should keep waiting; otherwise, we skip // waiting if (!currentChunk .getDownloadCondition() .await(downloadedConditionTimeoutInSeconds, TimeUnit.SECONDS)) { // if the current chunk has not condition change over the timeout (which is rare) logger.debug( "Thread {} is timeout for waiting chunk#{} to be ready, current" + " chunk state is: {}, retry: {}, scrubbedUrl: {}", Thread.currentThread().getId(), nextChunkToConsume, currentChunk.getDownloadState(), retry, currentChunk.getScrubbedUrl()); currentChunk.setDownloadState(DownloadState.FAILURE); currentChunk.setDownloadError( String.format( "Timeout waiting for the download of chunk#%d(Total chunks: %d) retry: %d scrubbedUrl: %s", nextChunkToConsume, this.chunks.size(), retry, currentChunk.getScrubbedUrl())); break; } } // retry if chunk is not successfully downloaded if (currentChunk.getDownloadState() != DownloadState.SUCCESS) { retry++; // timeout or failed logger.debug( "Since downloadState is {} Thread {} decides to retry {} time(s) for chunk#{}", currentChunk.getDownloadState(), Thread.currentThread().getId(), retry, nextChunkToConsume); Future downloaderFuture = downloaderFutures.get(nextChunkToConsume); if (downloaderFuture != null) { downloaderFuture.cancel(true); } chunks.get(nextChunkToConsume).getLock().lock(); try { chunks.get(nextChunkToConsume).setDownloadState(DownloadState.IN_PROGRESS); chunks.get(nextChunkToConsume).reset(); } finally { chunks.get(nextChunkToConsume).getLock().unlock(); } // random jitter before start next retry Thread.sleep(new Random().nextInt(MAX_RETRY_JITTER)); downloaderFuture = executor.submit( getDownloadChunkCallable( this, chunks.get(nextChunkToConsume), qrmk, nextChunkToConsume, chunkHeadersMap, networkTimeoutInMilli, authTimeout, socketTimeout, maxHttpRetries, session, chunks.size(), queryId)); downloaderFutures.put(nextChunkToConsume, downloaderFuture); // Only when prefetch fails due to internal memory limitation, nextChunkToDownload // equals nextChunkToConsume. In that case we need to increment nextChunkToDownload if (nextChunkToDownload == nextChunkToConsume) { nextChunkToDownload = nextChunkToConsume + 1; } } // exit if chunk has downloaded or we have hit max retry // maxHttpRetries = 0 will retry indefinitely if (currentChunk.getDownloadState() == DownloadState.SUCCESS || (maxHttpRetries > 0 && retry >= maxHttpRetries)) { break; } } if (currentChunk.getDownloadState() == DownloadState.SUCCESS) { logger.debug("Ready to consume chunk#{}, succeed retry={}", nextChunkToConsume, retry); } else if (retry >= maxHttpRetries) { // stop retrying and report failure currentChunk.setDownloadState(DownloadState.FAILURE); currentChunk.setDownloadError( String.format( "Max retry reached for the download of chunk#%d " + "(Total chunks: %d) retry: %d, error: %s", nextChunkToConsume, this.chunks.size(), retry, chunks.get(nextChunkToConsume).getDownloadError())); } this.numberMillisWaitingForChunks += (System.currentTimeMillis() - startTime); } /** log out of memory error and provide the suggestion to avoid this error */ private void logOutOfMemoryError() { logger.error( "Dump some crucial information below:\n" + "Total milliseconds waiting for chunks: {},\n" + "Total memory used: {}, Max heap size: {}, total download time: {} millisec,\n" + "total parsing time: {} milliseconds, total chunks: {},\n" + "currentMemoryUsage in Byte: {}, currentMemoryLimit in Bytes: {} \n" + "nextChunkToDownload: {}, nextChunkToConsume: {}\n" + "Several suggestions to try to resolve the OOM issue:\n" + "1. increase the JVM heap size if you have more space; or \n" + "2. use CLIENT_MEMORY_LIMIT to reduce the memory usage by the JDBC driver " + "(https://docs.snowflake.net/manuals/sql-reference/parameters.html#client-memory-limit)3." + " please make sure 2 * CLIENT_PREFETCH_THREADS * CLIENT_RESULT_CHUNK_SIZE <" + " CLIENT_MEMORY_LIMIT. If not, please reduce CLIENT_PREFETCH_THREADS and" + " CLIENT_RESULT_CHUNK_SIZE too.", numberMillisWaitingForChunks, Runtime.getRuntime().totalMemory(), Runtime.getRuntime().maxMemory(), totalMillisDownloadingChunks.get(), totalMillisParsingChunks.get(), chunks.size(), currentMemoryUsage, memoryLimit, nextChunkToDownload, nextChunkToConsume); } /** * terminate the downloader * * @return chunk downloader metrics collected over instance lifetime */ @Override public DownloaderMetrics terminate() throws InterruptedException { if (!terminated.getAndSet(true)) { try { if (executor != null) { if (!executor.isShutdown()) { // cancel running downloaders downloaderFutures.forEach((k, v) -> v.cancel(true)); // shutdown executor executor.shutdown(); if (!executor.awaitTermination(SHUTDOWN_TIME, TimeUnit.SECONDS)) { logger.debug("Executor did not terminate in the specified time.", false); List droppedTasks = executor.shutdownNow(); // optional ** logger.debug( "Executor was abruptly shut down. {} tasks will not be executed.", droppedTasks.size()); // optional ** } } // Normal flow will never hit here. This is only for testing purposes if (SnowflakeChunkDownloader.injectedDownloaderException != null && injectedDownloaderException instanceof InterruptedException) { throw (InterruptedException) SnowflakeChunkDownloader.injectedDownloaderException; } } long totalUncompressedSize = chunks.stream() .reduce(0L, (acc, chunk) -> acc + chunk.getUncompressedSize(), Long::sum); long rowsInChunks = chunks.stream().reduce(0L, (acc, chunk) -> acc + chunk.getRowCount(), Long::sum); long chunksSize = chunks.size(); logger.debug( "Completed processing {} {} chunks for query {} in {} ms. Download took {} ms (average: {} ms)," + " parsing took {} ms (average: {} ms). Chunks uncompressed size: {} MB (average: {} MB)," + " rows in chunks: {} (total: {}, average in chunk: {}), total memory used: {} MB", chunksSize, queryResultFormat == QueryResultFormat.ARROW ? "ARROW" : "JSON", queryId, totalMillisParsingChunks.get() + totalMillisDownloadingChunks.get(), totalMillisDownloadingChunks.get(), totalMillisDownloadingChunks.get() / chunksSize, totalMillisParsingChunks, totalMillisParsingChunks.get() / chunksSize, totalUncompressedSize / MB, totalUncompressedSize / MB / chunksSize, rowsInChunks, firstChunkRowCount + rowsInChunks, rowsInChunks / chunksSize, Runtime.getRuntime().totalMemory() / MB); return new DownloaderMetrics( numberMillisWaitingForChunks, totalMillisDownloadingChunks.get(), totalMillisParsingChunks.get()); } finally { for (SnowflakeResultChunk chunk : chunks) { // explicitly free each chunk since Arrow chunk may hold direct memory chunk.freeData(); } if (queryResultFormat == QueryResultFormat.ARROW) { SFArrowResultSet.closeRootAllocator(rootAllocator); } else { chunkDataCache.clear(); } releaseAllChunkMemoryUsage(); chunks = null; } } return null; } /** * add download time * * @param downloadTime Time for downloading a single chunk */ private void addDownloadTime(long downloadTime) { this.totalMillisDownloadingChunks.addAndGet(downloadTime); } /** * add parsing time * * @param parsingTime Time for parsing a single chunk */ private void addParsingTime(long parsingTime) { this.totalMillisParsingChunks.addAndGet(parsingTime); } /** * Create a download callable that will be run in download thread * * @param downloader object to download the chunk * @param resultChunk object contains information about the chunk will be downloaded * @param qrmk Query Result Master Key * @param chunkIndex the index of the chunk which will be downloaded in array chunks. This is * mainly for logging purpose * @param chunkHeadersMap contains headers needed to be added when downloading from s3 * @param networkTimeoutInMilli network timeout * @param totalChunks used to log the information of total chunks * @param queryId used to log the queryId to which the chunk belongs to * @return A callable responsible for downloading chunk */ private static Callable getDownloadChunkCallable( final SnowflakeChunkDownloader downloader, final SnowflakeResultChunk resultChunk, final String qrmk, final int chunkIndex, final Map chunkHeadersMap, final int networkTimeoutInMilli, final int authTimeout, final int socketTimeout, final int maxHttpRetries, final SFBaseSession session, final int totalChunks, final String queryId) { ChunkDownloadContext downloadContext = new ChunkDownloadContext( downloader, resultChunk, qrmk, chunkIndex, chunkHeadersMap, networkTimeoutInMilli, authTimeout, socketTimeout, maxHttpRetries, session); return new Callable() { /** * Read the input stream and parse chunk data into memory * * @param inputStream * @throws SnowflakeSQLException */ private void downloadAndParseChunk(InputStream inputStream) throws SnowflakeSQLException { // remember the download time resultChunk.setDownloadTime(System.currentTimeMillis() - startTime); downloader.addDownloadTime(resultChunk.getDownloadTime()); startTime = System.currentTimeMillis(); // parse the result json try { if (downloader.queryResultFormat == QueryResultFormat.ARROW) { ((ArrowResultChunk) resultChunk).readArrowStream(inputStream); } else { parseJsonToChunkV2(inputStream, resultChunk); } } catch (Exception ex) { logger.debug( "Thread {} Exception when parsing result chunk#{}: {}", Thread.currentThread().getId(), chunkIndex, ex.getLocalizedMessage()); throw new SnowflakeSQLLoggedException( session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Exception: " + ex.getLocalizedMessage()); } finally { // close the buffer reader will close underlying stream logger.debug( "Thread {} close input stream for chunk#{}", Thread.currentThread().getId(), chunkIndex); try { inputStream.close(); } catch (IOException ex) { throw new SnowflakeSQLLoggedException( session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Exception: " + ex.getLocalizedMessage()); } } // add parsing time resultChunk.setParseTime(System.currentTimeMillis() - startTime); downloader.addParsingTime(resultChunk.getParseTime()); } private long startTime; public Void call() { resultChunk.getLock().lock(); try { resultChunk.setDownloadState(DownloadState.IN_PROGRESS); } finally { resultChunk.getLock().unlock(); } logger.debug( "Downloading chunk#{}, url: {}, Thread {}", chunkIndex, resultChunk.getUrl(), Thread.currentThread().getId()); startTime = System.currentTimeMillis(); // initialize the telemetry service for this downloader thread using the main telemetry // service TelemetryService.getInstance().updateContext(downloader.snowflakeConnectionString); try { if (SnowflakeChunkDownloader.injectedDownloaderException != null) { // Normal flow will never hit here. This is only for testing purpose throw SnowflakeChunkDownloader.injectedDownloaderException; } InputStream is = downloader.getResultStreamProvider().getInputStream(downloadContext); logger.debug( "Thread {} start downloading chunk#{}", Thread.currentThread().getId(), chunkIndex); downloadAndParseChunk(is); logger.debug( "Thread {} finish downloading chunk#{}", Thread.currentThread().getId(), chunkIndex); downloader.downloaderFutures.remove(chunkIndex); if (chunkIndex % 5 == 0) { logger.debug( "Processed {} chunk#{} in {} ms ({} out of {}) for query {}. Download took {} ms, " + "parsing took {} ms. Chunk uncompressed size: {} kB, cols: {}, rows: {}, scrubbed URL: {}", downloader.queryResultFormat == QueryResultFormat.ARROW ? "ARROW" : "JSON", chunkIndex, resultChunk.getTotalTime(), chunkIndex + 1, totalChunks, queryId, resultChunk.getDownloadTime(), resultChunk.getParseTime(), resultChunk.getUncompressedSize() / 1024, resultChunk.colCount, resultChunk.rowCount, resultChunk.getScrubbedUrl()); } else { logger.trace( "Processed {} chunk#{} in {} ms ({} out of {}) for query {}. Download took {} ms, " + "parsing took {} ms. Chunk uncompressed size: {} kB, cols: {}, rows: {}, scrubbed URL: {}", downloader.queryResultFormat == QueryResultFormat.ARROW ? "ARROW" : "JSON", chunkIndex, resultChunk.getTotalTime(), chunkIndex + 1, totalChunks, queryId, resultChunk.getDownloadTime(), resultChunk.getParseTime(), resultChunk.getUncompressedSize() / 1024, resultChunk.colCount, resultChunk.rowCount, resultChunk.getScrubbedUrl()); } resultChunk.getLock().lock(); try { logger.debug("Get lock to change the chunk to be ready to consume", false); logger.debug("Wake up consumer if it is waiting for a chunk to be ready", false); resultChunk.setDownloadState(DownloadState.SUCCESS); resultChunk.getDownloadCondition().signal(); } finally { logger.debug("Downloaded chunk#{}, free lock", chunkIndex); resultChunk.getLock().unlock(); } } catch (Throwable th) { resultChunk.getLock().lock(); try { logger.debug("Get lock to set chunk download error", false); resultChunk.setDownloadState(DownloadState.FAILURE); downloader.releaseCurrentMemoryUsage(chunkIndex, Optional.empty()); StringWriter errors = new StringWriter(); th.printStackTrace(new PrintWriter(errors)); resultChunk.setDownloadError(errors.toString()); logger.debug("Wake up consumer if it is waiting for a chunk to be ready", false); resultChunk.getDownloadCondition().signal(); } finally { logger.debug("Failed to download chunk#{}, free lock", chunkIndex); resultChunk.getLock().unlock(); } logger.debug( "Thread {} Exception encountered ({}:{}) fetching chunk#{} from: {}, Error {}", Thread.currentThread().getId(), th.getClass().getName(), th.getLocalizedMessage(), chunkIndex, resultChunk.getScrubbedUrl(), resultChunk.getDownloadError()); } return null; } private void parseJsonToChunkV2(InputStream jsonInputStream, SnowflakeResultChunk resultChunk) throws IOException, SnowflakeSQLException { /* * This is a hand-written binary parser that * handle. * [ "c1", "c2", null, ... ], * [ null, "c2", "c3", ... ], * ... * [ "c1", "c2", "c3", ... ], * in UTF-8 * The number of rows is known and the number of expected columns * is also known. */ ResultJsonParserV2 jp = new ResultJsonParserV2(); jp.startParsing((JsonResultChunk) resultChunk, session); byte[] buf = new byte[STREAM_BUFFER_SIZE]; // To be used to copy the leftover buffer data in the case of buffer ending in escape state // during parsing. byte[] prevBuffer = null; ByteBuffer bBuf = null; int len; logger.debug( "Thread {} start to read inputstream for chunk#{}", Thread.currentThread().getId(), chunkIndex); while ((len = jsonInputStream.read(buf)) != -1) { if (prevBuffer != null) { // if parsing stopped during an escape sequence in jp.continueParsing() and there is // leftover data in the buffer, // prepend the copied data to the next buffer read from the output stream. ByteArrayOutputStream os = new ByteArrayOutputStream(); os.write(prevBuffer); os.write(buf); buf = os.toByteArray(); len += prevBuffer.length; } bBuf = ByteBuffer.wrap(buf, 0, len); jp.continueParsing(bBuf, session); if (bBuf.remaining() > 0) { // if there is any data left un-parsed, it will be prepended to the next buffer read. prevBuffer = new byte[bBuf.remaining()]; bBuf.get(prevBuffer); } else { prevBuffer = null; } } logger.debug( "Thread {} finish reading inputstream for chunk#{}", Thread.currentThread().getId(), chunkIndex); if (prevBuffer != null) { bBuf = ByteBuffer.wrap(prevBuffer); } else { bBuf = ByteBuffer.wrap(new byte[0]); } jp.endParsing(bBuf, session); } }; } /** This is a No Operation chunk downloader to avoid potential null pointer exception */ public static class NoOpChunkDownloader implements ChunkDownloader { @Override public SnowflakeResultChunk getNextChunkToConsume() throws SnowflakeSQLException { return null; } @Override public DownloaderMetrics terminate() { return null; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeClob.java ================================================ package net.snowflake.client.internal.jdbc; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; import java.io.StringReader; import java.io.Writer; import java.sql.Clob; import java.sql.SQLException; /** A simple Clob implementation using String */ public class SnowflakeClob implements Clob { private StringBuffer buffer; private class StringBufferWriter extends Writer { private StringBuffer main; private StringBuffer current; /** */ public StringBufferWriter(StringBuffer buffer, int pos) { super(); this.main = buffer; this.current = new StringBuffer(); } @Override public void write(final char[] cbuf, final int off, final int len) throws IOException { for (int i = 0; i < len; i++) { this.current.append(cbuf[off + i]); } } @Override public void flush() throws IOException { this.main.append(this.current); this.current.delete(0, this.current.length()); } @Override public void close() throws IOException { if (this.current == null) { throw new IOException(); } flush(); this.current = null; } } private class StringBufferOutputStream extends OutputStream { private StringBuffer buffer; private int offset; /** */ public StringBufferOutputStream(StringBuffer buffer, int pos) { super(); this.buffer = buffer; this.offset = pos - 1; } /* * @see java.io.OutputStream#write(int) */ public void write(int c) throws IOException { if (this.offset >= this.buffer.length()) { buffer.append((char) c); } else { buffer.replace(this.offset, this.offset + 1, Integer.toString(c)); } } public String toString() { return buffer.toString(); } public void clear() { buffer.delete(0, buffer.length()); } } public SnowflakeClob() { buffer = new StringBuffer(); } public SnowflakeClob(String content) { buffer = new StringBuffer(content); } @Override public long length() throws SQLException { return buffer.length(); } @Override public String getSubString(final long pos, final int length) throws SQLException { if (pos < 1 || length < 0) { throw new SQLException(); } return buffer.substring((int) pos - 0, (int) pos - 0 + length); } @Override public Reader getCharacterStream() throws SQLException { return new StringReader(buffer.toString()); } @Override public InputStream getAsciiStream() throws SQLException { return new ByteArrayInputStream(buffer.toString().getBytes()); } @Override public long position(final String searchstr, final long start) throws SQLException { if (start < 1) { throw new SQLException(); } return (long) buffer.lastIndexOf(searchstr, (int) start - 1); } @Override public long position(final Clob searchstr, final long start) throws SQLException { if (start < 1) { throw new SQLException(); } return (long) buffer.lastIndexOf(searchstr.toString(), (int) start - 1); } @Override public int setString(final long pos, final String str) throws SQLException { if (pos < 1) { throw new SQLException(); } buffer.insert((int) pos - 1, str); return str.length(); } @Override public int setString(final long pos, final String str, final int offset, final int len) throws SQLException { if (pos < 1) { throw new SQLException(); } String substring = str.substring(offset, len); buffer.insert((int) pos - 1, substring); return substring.length(); } @Override public OutputStream setAsciiStream(final long pos) throws SQLException { return new StringBufferOutputStream(buffer, (int) pos); } @Override public Writer setCharacterStream(final long pos) throws SQLException { return new StringBufferWriter(buffer, (int) pos); } @Override public void truncate(final long len) throws SQLException { if (buffer.length() > len) { buffer.delete((int) len, buffer.length()); } } @Override public void free() throws SQLException { buffer = new StringBuffer(); } @Override public Reader getCharacterStream(final long pos, final long length) throws SQLException { return new StringReader(buffer.substring((int) pos - 1, (int) pos - 1 + (int) length)); } @Override public String toString() { return buffer.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeColumn.java ================================================ package net.snowflake.client.internal.jdbc; import static java.lang.annotation.ElementType.FIELD; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @Target({FIELD}) @Retention(RetentionPolicy.RUNTIME) public @interface SnowflakeColumn { /** * (Optional) The name for a column in database, * * @return The default value is empty string. Provided name can override SqlData field name. */ String name() default ""; /** * (Optional) The snowflake type for a column * * @return The default value is empty string Provided type can override default type. */ String type() default ""; /** * (Optional) The snowflake nullable flag for a column * * @return The default value is true Provided value can override default nullable value. */ boolean nullable() default true; /** * (Optional) The length for a column of SQL type {@code varchar} or {@code binary}, or of similar * database-native type. * *

Applies only to columns of exact varchar and binary type. * * @return The default value {@code -1} indicates that a provider-determined length should be * inferred. */ int length() default -1; /** * (Optional) The length for a column of SQL type {@code binary}, or of similar database-native * type. * *

Applies only to columns of exact varchar and binary type. * * @return The default value {@code -1} indicates that a provider-determined byteLength should be * inferred. */ int byteLength() default -1; /** * (Optional) The precision for a column of SQL type {@code decimal} or {@code numeric}, or of * similar database-native type. * *

Applies only to columns of exact numeric type. * * @return The default value {@code -1} indicates that a provider-determined precision should be * inferred. */ int precision() default -1; /** * (Optional) The scale for a column of SQL type {@code decimal} or {@code numeric}, or of similar * database-native type. * *

Applies only to columns of exact numeric type. * * @return The default value {@code 0} indicates that a provider-determined scale should be * inferred. */ int scale() default -1; } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeColumnMetadata.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.getFieldMetadata; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.getSnowflakeType; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isVectorType; import com.fasterxml.jackson.databind.JsonNode; import java.io.Serializable; import java.sql.Types; import java.util.List; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; public class SnowflakeColumnMetadata implements Serializable { private static final long serialVersionUID = 1L; private String name; private String typeName; private int type; private boolean nullable; private int length; private int precision; private int scale; private boolean fixed; private SnowflakeType base; private List fields; private String columnSrcTable; private String columnSrcSchema; private String columnSrcDatabase; private boolean isAutoIncrement; private int dimension; // vector type contains dimension public SnowflakeColumnMetadata( String name, int type, boolean nullable, int length, int precision, int scale, String typeName, boolean fixed, SnowflakeType base, List fields, String columnSrcDatabase, String columnSrcSchema, String columnSrcTable, boolean isAutoIncrement, int dimension) { this.name = name; this.type = type; this.nullable = nullable; this.length = length; this.precision = precision; this.scale = scale; this.typeName = typeName; this.fixed = fixed; this.base = base; this.fields = fields; this.columnSrcDatabase = columnSrcDatabase; this.columnSrcSchema = columnSrcSchema; this.columnSrcTable = columnSrcTable; this.isAutoIncrement = isAutoIncrement; this.dimension = dimension; } /** * @deprecated Use {@link SnowflakeColumnMetadata#SnowflakeColumnMetadata(String, int, boolean, * int, int, int, String, boolean, SnowflakeType, List, String, String, String, boolean, int)} * instead * @param name name * @param type type * @param nullable is nullable * @param length length * @param precision precision * @param scale scale * @param typeName type name * @param fixed is fixed * @param base SnowflakeType * @param columnSrcDatabase column source database * @param columnSrcSchema column source schema * @param columnSrcTable column source table * @param isAutoIncrement is auto-increment */ @Deprecated public SnowflakeColumnMetadata( String name, int type, boolean nullable, int length, int precision, int scale, String typeName, boolean fixed, SnowflakeType base, String columnSrcDatabase, String columnSrcSchema, String columnSrcTable, boolean isAutoIncrement) { this.name = name; this.type = type; this.nullable = nullable; this.length = length; this.precision = precision; this.scale = scale; this.typeName = typeName; this.fixed = fixed; this.base = base; this.columnSrcDatabase = columnSrcDatabase; this.columnSrcSchema = columnSrcSchema; this.columnSrcTable = columnSrcTable; this.isAutoIncrement = isAutoIncrement; } public SnowflakeColumnMetadata( JsonNode colNode, boolean jdbcTreatDecimalAsInt, SFBaseSession session) throws SnowflakeSQLLoggedException { this.name = colNode.path("name").asText(); this.nullable = colNode.path("nullable").asBoolean(); this.precision = colNode.path("precision").asInt(); this.scale = colNode.path("scale").asInt(); this.length = colNode.path("length").asInt(); int dimension = colNode .path("dimension") .asInt(); // vector dimension when checking columns via connection.getMetadata int vectorDimension = colNode .path("vectorDimension") .asInt(); // dimension when checking columns via resultSet.getMetadata this.dimension = dimension > 0 ? dimension : vectorDimension; this.fixed = colNode.path("fixed").asBoolean(); JsonNode udtOutputType = colNode.path("outputType"); JsonNode extColTypeNameNode = colNode.path("extTypeName"); String extColTypeName = null; if (!extColTypeNameNode.isMissingNode() && !isNullOrEmpty(extColTypeNameNode.asText())) { extColTypeName = extColTypeNameNode.asText(); } String internalColTypeName = colNode.path("type").asText(); List fieldsMetadata = getFieldMetadata(jdbcTreatDecimalAsInt, internalColTypeName, colNode); int fixedColType = jdbcTreatDecimalAsInt && scale == 0 ? Types.BIGINT : Types.DECIMAL; ColumnTypeInfo columnTypeInfo = getSnowflakeType( internalColTypeName, extColTypeName, udtOutputType, session, fixedColType, !fieldsMetadata.isEmpty(), isVectorType(internalColTypeName)); this.typeName = columnTypeInfo.getExtColTypeName(); this.type = columnTypeInfo.getColumnType(); this.base = columnTypeInfo.getSnowflakeType(); this.fields = fieldsMetadata; this.columnSrcDatabase = colNode.path("database").asText(); this.columnSrcSchema = colNode.path("schema").asText(); this.columnSrcTable = colNode.path("table").asText(); this.isAutoIncrement = colNode.path("isAutoIncrement").asBoolean(); } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getType() { return type; } public void setType(int type) { this.type = type; } public boolean isNullable() { return nullable; } public void setNullable(boolean nullable) { this.nullable = nullable; } public int getLength() { return length; } public void setLength(int length) { this.length = length; } public int getPrecision() { return precision; } public void setPrecision(int precision) { this.precision = precision; } public int getScale() { return scale; } public void setScale(int scale) { this.scale = scale; } public String getTypeName() { return typeName; } public void setTypeName(String typeName) { this.typeName = typeName; } public boolean isFixed() { return fixed; } public void setFixed(boolean fixed) { this.fixed = fixed; } public SnowflakeType getBase() { return this.base; } public List getFields() { return fields; } public void setFields(List fields) { this.fields = fields; } public String getColumnSrcTable() { return this.columnSrcTable; } public String getColumnSrcSchema() { return this.columnSrcSchema; } public String getColumnSrcDatabase() { return this.columnSrcDatabase; } public boolean isAutoIncrement() { return isAutoIncrement; } public void setAutoIncrement(boolean autoIncrement) { isAutoIncrement = autoIncrement; } public int getDimension() { return dimension; } public String toString() { StringBuilder sBuilder = new StringBuilder(); sBuilder.append("name=").append(name); sBuilder.append(",typeName=").append(typeName); sBuilder.append(",type=").append(type); sBuilder.append(",nullable=").append(nullable); sBuilder.append(",length=").append(length); sBuilder.append(",precision=").append(precision); sBuilder.append(",scale=").append(scale); sBuilder.append(",fixed=").append(fixed); sBuilder.append(",database=").append(columnSrcDatabase); sBuilder.append(",schema=").append(columnSrcSchema); sBuilder.append(",table=").append(columnSrcTable); sBuilder.append((",isAutoIncrement=")).append(isAutoIncrement); sBuilder.append((",dimension=")).append(dimension); return sBuilder.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeConnectString.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import java.io.Serializable; import java.io.UnsupportedEncodingException; import java.net.URI; import java.net.URISyntaxException; import java.net.URLDecoder; import java.net.URLEncoder; import java.util.Collections; import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.Properties; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SecretDetector; public class SnowflakeConnectString implements Serializable { private static final long serialVersionUID = 1L; private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeConnectString.class); private final String scheme; private final String host; private final int port; private final Map parameters; private final String account; private static SnowflakeConnectString INVALID_CONNECT_STRING = new SnowflakeConnectString("", "", -1, Collections.emptyMap(), ""); private static final String PREFIX = "jdbc:snowflake://"; public static boolean hasSupportedPrefix(String url) { return url.startsWith(PREFIX); } public static SnowflakeConnectString parse(String url, Properties info) { if (url == null) { logger.debug("Connect strings must be non-null"); return INVALID_CONNECT_STRING; } int pos = url.indexOf(PREFIX); if (pos != 0) { logger.debug("Connect strings must start with jdbc:snowflake://"); return INVALID_CONNECT_STRING; // not start with jdbc:snowflake:// } String afterPrefix = url.substring(pos + PREFIX.length()); String scheme; String host = null; int port = -1; Map parameters = new HashMap<>(); try { URI uri; if (!afterPrefix.startsWith("http://") && !afterPrefix.startsWith("https://")) { // not explicitly specified afterPrefix = url.substring(url.indexOf("snowflake:")); } uri = new URI(afterPrefix); scheme = uri.getScheme(); String authority = uri.getRawAuthority(); String[] hostAndPort = authority.split(":"); if (hostAndPort.length == 2) { host = hostAndPort[0]; port = Integer.parseInt(hostAndPort[1]); } else if (hostAndPort.length == 1) { host = hostAndPort[0]; } String queryData = uri.getRawQuery(); if (!scheme.equals("snowflake") && !scheme.equals("http") && !scheme.equals("https")) { logger.debug("Connect strings must have a valid scheme: 'snowflake' or 'http' or 'https'"); return INVALID_CONNECT_STRING; } if (isNullOrEmpty(host)) { logger.debug("Connect strings must have a valid host: found null or empty host"); return INVALID_CONNECT_STRING; } if (port == -1) { port = 443; } String path = uri.getPath(); if (!isNullOrEmpty(path) && !"/".equals(path)) { logger.debug("Connect strings must have no path: expecting empty or null or '/'"); return INVALID_CONNECT_STRING; } String account = null; if (!isNullOrEmpty(queryData)) { String[] params = queryData.split("&"); for (String p : params) { String[] keyVals = p.split("="); if (keyVals.length != 2) { continue; // ignore invalid pair of parameters. } try { String k = URLDecoder.decode(keyVals[0], "UTF-8"); String v = URLDecoder.decode(keyVals[1], "UTF-8"); if ("ssl".equalsIgnoreCase(k) && !getBooleanTrueByDefault(v)) { scheme = "http"; } else if ("account".equalsIgnoreCase(k)) { account = v; } parameters.put(k.toUpperCase(Locale.US), v); } catch (UnsupportedEncodingException ex0) { logger.warn("Failed to decode a parameter {}. Ignored.", p); } } } if ("snowflake".equals(scheme)) { scheme = "https"; // by default } if (info.size() > 0) { // NOTE: value in info could be any data type. // overwrite the properties for (Map.Entry entry : info.entrySet()) { String k = entry.getKey().toString(); Object v = entry.getValue(); if ("ssl".equalsIgnoreCase(k) && !getBooleanTrueByDefault(v)) { scheme = "http"; } else if ("account".equalsIgnoreCase(k)) { account = (String) v; } parameters.put(k.toUpperCase(Locale.US), v); } } if (parameters.get("ACCOUNT") == null && account == null && host.indexOf(".") > 0) { account = host.substring(0, host.indexOf(".")); // If this is a global URL, then extract out the external ID part if (host.contains(".global.")) { account = account.substring(0, account.lastIndexOf('-')); } // Account names should not be altered. Set it to a value without org name // if it's a global url parameters.put("ACCOUNT", account); } if (isNullOrEmpty(account)) { logger.debug("Connect strings must contain account identifier"); return INVALID_CONNECT_STRING; } // By default, don't allow underscores in host name unless the property is set to true via // connection properties. boolean allowUnderscoresInHost = false; if ("true" .equalsIgnoreCase( (String) parameters.get( SFSessionProperty.ALLOW_UNDERSCORES_IN_HOST .getPropertyKey() .toUpperCase()))) { allowUnderscoresInHost = true; } if (account.contains("_") && !allowUnderscoresInHost && host.startsWith(account)) { // The account needs to have underscores in it and the host URL needs to start // with the account name. There are cases where the host URL might not have the // the account name in it, ex - ip address instead of host name. // The property allowUnderscoresInHost needs to be set to false. // Update the Host URL to remove underscores if there are any String account_wo_uscores = account.replaceAll("_", "-"); host = host.replaceFirst(account, account_wo_uscores); } return new SnowflakeConnectString(scheme, host, port, parameters, account); } catch (URISyntaxException uriEx) { logger.warn( "Exception thrown while parsing Snowflake connect string. Illegal character in url."); return INVALID_CONNECT_STRING; } catch (Exception ex) { logger.warn("Exception thrown while parsing Snowflake connect string", ex); return INVALID_CONNECT_STRING; } } private SnowflakeConnectString( String scheme, String host, int port, Map parameters, String account) { this.scheme = scheme; this.host = host; this.port = port; this.parameters = parameters; this.account = account; } public String toString() { return toString(true); } public String toString(boolean maskSensitiveValue) { StringBuilder urlStr = new StringBuilder(); urlStr.append(scheme); urlStr.append("://"); urlStr.append(host); urlStr.append(":"); urlStr.append(port); urlStr.append(parameters.size() > 0 ? "?" : ""); int cnt = 0; for (Map.Entry entry : parameters.entrySet()) { if (cnt > 0) { urlStr.append('&'); } try { String k = URLEncoder.encode(entry.getKey(), "UTF-8"); String v = URLEncoder.encode(entry.getValue().toString(), "UTF-8"); urlStr.append(k).append('='); if (maskSensitiveValue) { urlStr.append(SecretDetector.maskParameterValue(k, v)); } else { urlStr.append(v); } } catch (UnsupportedEncodingException ex) { logger.warn("Failed to encode a parameter {}. Ignored.", entry.getKey()); } ++cnt; } return urlStr.toString(); } public boolean isValid() { // invalid if host name is null or empty return !isNullOrEmpty(host); } public String getScheme() { return scheme; } public String getHost() { return host; } public int getPort() { return port; } public Map getParameters() { return parameters; } public String getAccount() { return account; } private static boolean getBooleanTrueByDefault(Object value) { if (value instanceof Boolean) { return (Boolean) value; } String vs = value.toString(); return !"off".equalsIgnoreCase(vs) && !Boolean.FALSE.toString().equalsIgnoreCase(vs); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeDatabaseMetaDataQueryResultSet.java ================================================ package net.snowflake.client.internal.jdbc; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; /** Database Metadata query based result set. */ public class SnowflakeDatabaseMetaDataQueryResultSet extends SnowflakeDatabaseMetaDataResultSet { public SnowflakeDatabaseMetaDataQueryResultSet( DBMetadataResultSetMetadata metadataType, ResultSet resultSet, Statement statement) throws SQLException { super( metadataType.getColumnNames(), metadataType.getColumnTypeNames(), metadataType.getColumnTypes(), resultSet, statement); } /** * Query result set cannot tell the last row. * * @return n/a * @throws SQLException if the result set is closed or SQLFeatureNotSupportedException */ @Override public boolean isLast() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); throw new SnowflakeLoggedFeatureNotSupportedException(session); } /** * Query result set cannot tell after the last row. * * @return n/a * @throws SQLException if the result set is closed or SQLFeatureNotSupportedException */ @Override public boolean isAfterLast() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); throw new SnowflakeLoggedFeatureNotSupportedException(session); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeDatabaseMetaDataResultSet.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import java.math.BigDecimal; import java.math.RoundingMode; import java.nio.charset.StandardCharsets; import java.sql.Date; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.util.List; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeResultSetSerializable; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.api.implementation.resultset.SnowflakeBaseResultSet; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFResultSetMetaData; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; public class SnowflakeDatabaseMetaDataResultSet extends SnowflakeBaseResultSet { protected ResultSet showObjectResultSet; protected Object[] nextRow; private boolean wasNull = false; protected Object[][] rows; protected int row = -1; private String queryId; private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeDatabaseMetaDataResultSet.class); /** * DatabaseMetadataResultSet based on result from show command * * @param columnNames column names * @param columnTypeNames column type names * @param columnTypes column types * @param showObjectResultSet result set after issuing a show command * @param statement show command statement * @throws SQLException if failed to construct snowflake database metadata result set */ public SnowflakeDatabaseMetaDataResultSet( final List columnNames, final List columnTypeNames, final List columnTypes, final ResultSet showObjectResultSet, final Statement statement) throws SQLException { super(statement); this.showObjectResultSet = showObjectResultSet; SFBaseSession session = statement .getConnection() .unwrap(SnowflakeConnectionImpl.class) .getSFBaseSession(internalCallMarker()); SFResultSetMetaData sfset = new SFResultSetMetaData( columnNames.size(), columnNames, columnTypeNames, columnTypes, session); this.resultSetMetaData = new SnowflakeResultSetMetaDataV1(sfset); this.nextRow = new Object[columnNames.size()]; } /** * DatabaseMetadataResultSet based on a constant rowset. * * @param columnNames column name * @param columnTypeNames column types name * @param columnTypes column type * @param rows returned value of database metadata * @param statement show command statement * @throws SQLException if failed to construct snowflake database metadata result set */ public SnowflakeDatabaseMetaDataResultSet( final List columnNames, final List columnTypeNames, final List columnTypes, final Object[][] rows, final Statement statement) throws SQLException { super(statement); this.rows = rows; SFBaseSession session = statement .getConnection() .unwrap(SnowflakeConnectionImpl.class) .getSFBaseSession(internalCallMarker()); SFResultSetMetaData sfset = new SFResultSetMetaData( columnNames.size(), columnNames, columnTypeNames, columnTypes, session); this.resultSetMetaData = new SnowflakeResultSetMetaDataV1(sfset); this.nextRow = new Object[columnNames.size()]; } public SnowflakeDatabaseMetaDataResultSet( DBMetadataResultSetMetadata metadataType, Object[][] rows, Statement statement) throws SQLException { this( metadataType.getColumnNames(), metadataType.getColumnTypeNames(), metadataType.getColumnTypes(), rows, statement); } public SnowflakeDatabaseMetaDataResultSet( DBMetadataResultSetMetadata metadataType, Object[][] rows, Statement statement, String queryId) throws SQLException { this( metadataType.getColumnNames(), metadataType.getColumnTypeNames(), metadataType.getColumnTypes(), rows, statement); this.queryId = queryId; } @Override public boolean isClosed() throws SQLException { // no exception is raised. return statement.isClosed(); } @Override public boolean next() throws SQLException { logger.trace("boolean next()", false); incrementRow(); // no exception is raised even after the result set is closed. if (row < rows.length) { nextRow = rows[row]; return true; } return false; } /** * Increments result set row pointer. Mainly used to check the result set isBeforeFirst or * isFirst. */ protected void incrementRow() { ++row; } @Override public void close() throws SQLException { // no exception try { getStatement().close(); // should close both result set and statement. } catch (SQLException ex) { logger.debug("Failed to close", ex); } } @Override public boolean isFirst() throws SQLException { logger.trace("boolean isFirst()", false); raiseSQLExceptionIfResultSetIsClosed(); return row == 0; } @Override public boolean isBeforeFirst() throws SQLException { logger.trace("boolean isBeforeFirst()", false); raiseSQLExceptionIfResultSetIsClosed(); return row == -1; } @Override public boolean isLast() throws SQLException { logger.trace("boolean isLast()", false); raiseSQLExceptionIfResultSetIsClosed(); return !isBeforeFirst() && row == rows.length - 1; } @Override public boolean isAfterLast() throws SQLException { logger.trace("boolean isAfterLast()", false); raiseSQLExceptionIfResultSetIsClosed(); return row == rows.length; } @Override public int getRow() throws SQLException { logger.trace("int getRow()", false); raiseSQLExceptionIfResultSetIsClosed(); return row; } @Override public byte[] getBytes(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); String str = this.getString(columnIndex); if (str != null) { return str.getBytes(StandardCharsets.UTF_8); } else { throw new SQLException("Cannot get bytes on null column"); } } @Override public Time getTime(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); Object obj = getObjectInternal(columnIndex); if (obj instanceof Time) { return (Time) obj; } else { throw new SnowflakeSQLException( ErrorCode.INVALID_VALUE_CONVERT, obj.getClass().getName(), "TIME", obj); } } @Override public Timestamp getTimestamp(int columnIndex, TimeZone tz) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); Object obj = getObjectInternal(columnIndex); if (obj instanceof Timestamp) { return (Timestamp) obj; } else { throw new SnowflakeSQLException( ErrorCode.INVALID_VALUE_CONVERT, obj.getClass().getName(), "TIMESTAMP", obj); } } @Override public Date getDate(int columnIndex, TimeZone tz) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); Object obj = getObjectInternal(columnIndex); if (obj instanceof Date) { return (Date) obj; } else { throw new SnowflakeSQLException( ErrorCode.INVALID_VALUE_CONVERT, obj.getClass().getName(), "DATE", obj); } } public static ResultSet getEmptyResult( DBMetadataResultSetMetadata metadataType, Statement statement, String queryId) throws SQLException { return new SnowflakeDatabaseMetaDataResultSet( metadataType, new Object[][] {}, statement, queryId); } public static ResultSet getEmptyResultSet( DBMetadataResultSetMetadata metadataType, Statement statement) throws SQLException { return new SnowflakeDatabaseMetaDataResultSet(metadataType, new Object[][] {}, statement); } Object getObjectInternal(int columnIndex) throws SQLException { logger.trace("Object getObjectInternal(int columnIndex)", false); raiseSQLExceptionIfResultSetIsClosed(); if (nextRow == null) { throw new SQLException("No row found."); } if (columnIndex > nextRow.length) { throw new SQLException("Invalid column index: " + columnIndex); } wasNull = nextRow[columnIndex - 1] == null; logger.debug("Returning column: " + columnIndex + ": " + nextRow[columnIndex - 1]); return nextRow[columnIndex - 1]; } @Override public boolean wasNull() throws SQLException { logger.trace("boolean wasNull() returning {}", wasNull); raiseSQLExceptionIfResultSetIsClosed(); return wasNull; } @Override public String getString(int columnIndex) throws SQLException { logger.trace("String getString(int columnIndex)", false); // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); return obj == null ? null : obj.toString(); } @Override public boolean getBoolean(int columnIndex) throws SQLException { logger.trace("boolean getBoolean(int columnIndex)", false); // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); if (obj == null) { return false; } if (obj instanceof String) { if (obj.toString().equals("1")) { return Boolean.TRUE; } return Boolean.FALSE; } else if (obj instanceof Integer) { int i = (Integer) obj; if (i > 0) { return Boolean.TRUE; } return Boolean.FALSE; } else { return ((Boolean) obj).booleanValue(); } } @Override public byte getByte(int columnIndex) throws SQLException { logger.trace("byte getByte(int columnIndex)", false); // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); if (obj == null) { return 0; } if (obj instanceof String) { return Byte.valueOf((String) obj); } else { return (Byte) obj; } } @Override public short getShort(int columnIndex) throws SQLException { logger.trace("short getShort(int columnIndex)", false); // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); if (obj == null) { return 0; } if (obj instanceof String) { return (Short.valueOf((String) obj)).shortValue(); } else { return ((Number) obj).shortValue(); } } @Override public int getInt(int columnIndex) throws SQLException { logger.trace("int getInt(int columnIndex)", false); // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); if (obj == null) { return 0; } if (obj instanceof String) { return (Integer.valueOf((String) obj)).intValue(); } else { return ((Number) obj).intValue(); } } @Override public long getLong(int columnIndex) throws SQLException { logger.trace("long getLong(int columnIndex)", false); // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); if (obj == null) { return 0; } try { if (obj instanceof String) { return (Long.valueOf((String) obj)).longValue(); } else { return ((Number) obj).longValue(); } } catch (NumberFormatException nfe) { throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Invalid long: " + (String) obj); } } @Override public float getFloat(int columnIndex) throws SQLException { logger.trace("float getFloat(int columnIndex)", false); // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); if (obj == null) { return 0; } if (obj instanceof String) { return (Float.valueOf((String) obj)).floatValue(); } else { return ((Number) obj).floatValue(); } } @Override public double getDouble(int columnIndex) throws SQLException { logger.trace("double getDouble(int columnIndex)", false); // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); // snow-11974: null for getDouble should return 0 if (obj == null) { return 0; } if (obj instanceof String) { return (Double.valueOf((String) obj)).doubleValue(); } else { return ((Number) obj).doubleValue(); } } public String getQueryID() { return queryId; } /** * @deprecated */ @Deprecated @Override public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { logger.trace("BigDecimal getBigDecimal(int columnIndex, int scale)", false); BigDecimal value; // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); if (obj == null) { return null; } if (obj instanceof String) { value = new BigDecimal((String) obj); } else { value = new BigDecimal(obj.toString()); } value = value.setScale(scale, RoundingMode.HALF_UP); return value; } @Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { logger.trace("BigDecimal getBigDecimal(int columnIndex)", false); BigDecimal value = null; // Column index starts from 1, not 0. Object obj = getObjectInternal(columnIndex); if (obj == null) { return null; } if (obj instanceof String) { value = new BigDecimal((String) obj); } else { value = new BigDecimal(obj.toString()); } return value; } @Override public Object getObject(int columnIndex) throws SQLException { logger.trace("Object getObject(int columnIndex)", false); int type = resultSetMetaData.getColumnType(columnIndex); Object internalObj = getObjectInternal(columnIndex); if (internalObj == null) { return null; } switch (type) { case Types.VARCHAR: case Types.CHAR: return getString(columnIndex); case Types.BINARY: return getBytes(columnIndex); case Types.INTEGER: case Types.SMALLINT: return Integer.valueOf(getInt(columnIndex)); case Types.DECIMAL: return getBigDecimal(columnIndex); case Types.BIGINT: return getLong(columnIndex); case Types.DOUBLE: return Double.valueOf(getDouble(columnIndex)); case Types.TIMESTAMP: return getTimestamp(columnIndex); case Types.DATE: return getDate(columnIndex); case Types.TIME: return getTime(columnIndex); case Types.BOOLEAN: return getBoolean(columnIndex); default: throw new SnowflakeLoggedFeatureNotSupportedException(session); } } @Override public List getResultSetSerializables(long maxSizeInBytes) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public T[] getArray(int columnIndex, Class type) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public List getList(int columnIndex, Class type) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public Map getMap(int columnIndex, Class type) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeDateWithTimezone.java ================================================ package net.snowflake.client.internal.jdbc; import java.sql.Date; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.TimeZone; /** * Date with toString() overridden to display date values in session timezone. Only relevant for * timestamp objects fetched as dates. Normal date objects do not have a timezone associated with * them. */ public class SnowflakeDateWithTimezone extends Date { TimeZone timezone = TimeZone.getDefault(); boolean useSessionTimezone = false; public SnowflakeDateWithTimezone(long date, TimeZone timezone, boolean useSessionTimezone) { super(date); this.timezone = timezone; this.useSessionTimezone = useSessionTimezone; } /** * Returns a string representation in UTC so as to display "wallclock time" * * @return a string representation of the object */ public synchronized String toString() { if (!useSessionTimezone) { return super.toString(); } String baseFormat = "yyyy-MM-dd"; DateFormat formatter = new SimpleDateFormat(baseFormat); formatter.setTimeZone(this.timezone); return formatter.format(this); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeFileTransferAgent.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.core.Constants.NO_SPACE_LEFT_ON_DEVICE_ERR; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.createOwnerOnlyPermissionDir; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.recordIfExternal; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.io.ByteStreams; import com.google.common.io.CountingOutputStream; import java.io.File; import java.io.FileFilter; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Paths; import java.security.DigestOutputStream; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.zip.GZIPOutputStream; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.FileUtil; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.OCSPMode; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFFixedViewResultSet; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.core.SFStatement; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.cloud.storage.SnowflakeStorageClient; import net.snowflake.client.internal.jdbc.cloud.storage.StageInfo; import net.snowflake.client.internal.jdbc.cloud.storage.StorageClientFactory; import net.snowflake.client.internal.jdbc.cloud.storage.StorageObjectMetadata; import net.snowflake.client.internal.jdbc.cloud.storage.StorageObjectSummary; import net.snowflake.client.internal.jdbc.cloud.storage.StorageObjectSummaryCollection; import net.snowflake.client.internal.jdbc.cloud.storage.StorageProviderException; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.InternalCallMarker; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SecretDetector; import net.snowflake.common.core.FileCompressionType; import net.snowflake.common.core.RemoteStoreFileEncryptionMaterial; import net.snowflake.common.core.SqlState; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.io.filefilter.WildcardFileFilter; /** Class for uploading/downloading files */ public class SnowflakeFileTransferAgent extends SFBaseFileTransferAgent { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeFileTransferAgent.class); private static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); // We will allow buffering of upto 128M data before spilling to disk during // compression and digest computation static final int MAX_BUFFER_SIZE = 1 << 27; public static final String SRC_FILE_NAME_FOR_STREAM = "stream"; private static final String FILE_PROTOCOL = "file://"; private static final String localFSFileSep = systemGetProperty("file.separator"); private static final int DEFAULT_PARALLEL = 10; private final String command; // list of files specified. Wildcard should be expanded already for uploading // For downloading, it the list of stage file names private Set sourceFiles; // big source files >=16MB, for which we will not upload them in serial mode // since TransferManager will parallelize upload private Set bigSourceFiles; // big source files < 16MB, for which we will upload them in parallel mode // with 4 threads by default private Set smallSourceFiles; // Threshold for splitting a file to upload multiple parts in parallel private int bigFileThreshold = 200 * 1024 * 1024; private Map fileMetadataMap; // stage related info private StageInfo stageInfo; // local location for where to download files to private String localLocation; // Query ID of PUT or GET statement private String queryID = null; // default parallelism private int parallel = DEFAULT_PARALLEL; private SFSession session; private SFStatement statement; private static Throwable injectedFileTransferException = null; // for testing purpose // This function should only be used for testing purpose static void setInjectedFileTransferException(Throwable th) { injectedFileTransferException = th; } static boolean isInjectedFileTransferExceptionEnabled() { return injectedFileTransferException != null; } public StageInfo getStageInfo() { return this.stageInfo; } /** * Get value of big file threshold. For testing purposes. * * @return integer value in bytes of threshold */ int getBigFileThreshold() { return this.bigFileThreshold; } // Encryption material private List encryptionMaterial; // Presigned URLs private List presignedUrls; // Index: Source file to encryption material HashMap srcFileToEncMat; // Index: Source file to presigned URL HashMap srcFileToPresignedUrl; public Map getStageCredentials() { return new HashMap<>(stageInfo.getCredentials()); } public List getEncryptionMaterial() { return new ArrayList<>(encryptionMaterial); } public Map getSrcToMaterialsMap() { return new HashMap<>(srcFileToEncMat); } public Map getSrcToPresignedUrlMap() { return new HashMap<>(srcFileToPresignedUrl); } public String getStageLocation() { return stageInfo.getLocation(); } private void initEncryptionMaterial(CommandType commandType, JsonNode jsonNode) throws SnowflakeSQLException, JsonProcessingException { encryptionMaterial = getEncryptionMaterial(commandType, jsonNode); } /** * Get the encryption information for an UPLOAD or DOWNLOAD given a PUT command response JsonNode * * @param commandType CommandType of action (e.g UPLOAD or DOWNLOAD) * @param jsonNode JsonNod of PUT call response * @return List of RemoteStoreFileEncryptionMaterial objects */ static List getEncryptionMaterial( CommandType commandType, JsonNode jsonNode) throws SnowflakeSQLException, JsonProcessingException { List encryptionMaterial = new ArrayList<>(); JsonNode rootNode = jsonNode.path("data").path("encryptionMaterial"); if (commandType == CommandType.UPLOAD) { logger.debug("InitEncryptionMaterial: UPLOAD", false); RemoteStoreFileEncryptionMaterial encMat = null; if (!rootNode.isMissingNode() && !rootNode.isNull()) { encMat = mapper.treeToValue(rootNode, RemoteStoreFileEncryptionMaterial.class); } encryptionMaterial.add(encMat); } else { logger.debug("InitEncryptionMaterial: DOWNLOAD", false); if (!rootNode.isMissingNode() && !rootNode.isNull()) { encryptionMaterial = Arrays.asList(mapper.treeToValue(rootNode, RemoteStoreFileEncryptionMaterial[].class)); } } return encryptionMaterial; } private void initPresignedUrls(CommandType commandType, JsonNode jsonNode) throws SnowflakeSQLException, JsonProcessingException, IOException { presignedUrls = getPresignedUrls(commandType, jsonNode); } private static List getPresignedUrls(CommandType commandType, JsonNode jsonNode) throws SnowflakeSQLException, JsonProcessingException, IOException { List presignedUrls = new ArrayList<>(); JsonNode rootNode = jsonNode.path("data").path("presignedUrls"); if (commandType == CommandType.DOWNLOAD) { logger.debug("InitEncryptionMaterial: DOWNLOAD", false); if (!rootNode.isMissingNode() && !rootNode.isNull()) { presignedUrls = Arrays.asList(mapper.readValue(rootNode.toString(), String[].class)); } } return presignedUrls; } private boolean autoCompress = true; private boolean overwrite = false; private SnowflakeStorageClient storageClient = null; private static final String SOURCE_COMPRESSION_AUTO_DETECT = "auto_detect"; private static final String SOURCE_COMPRESSION_NONE = "none"; private String sourceCompression = SOURCE_COMPRESSION_AUTO_DETECT; private ExecutorService threadExecutor = null; private Boolean canceled = false; /** Result status enum */ public enum ResultStatus { UNKNOWN("Unknown status"), UPLOADED("File uploaded"), UNSUPPORTED("File type not supported"), ERROR("Error encountered"), SKIPPED("Skipped since file exists"), NONEXIST("File does not exist"), COLLISION("File name collides with another file"), DIRECTORY("Not a file, but directory"), DOWNLOADED("File downloaded"); private String desc; public String getDesc() { return desc; } private ResultStatus(String desc) { this.desc = desc; } } /** Remote object location location: "bucket" for S3, "container" for Azure BLOB */ private static class remoteLocation { String location; String path; public remoteLocation(String remoteStorageLocation, String remotePath) { location = remoteStorageLocation; path = remotePath; } } /** * File metadata with everything we care so we don't need to repeat same processing to get these * info. */ private class FileMetadata { public String srcFileName; public long srcFileSize; public String destFileName; public long destFileSize; public boolean requireCompress; public ResultStatus resultStatus = ResultStatus.UNKNOWN; public String errorDetails = ""; public FileCompressionType srcCompressionType; public FileCompressionType destCompressionType; public boolean isEncrypted = false; } static class InputStreamWithMetadata { long size; String digest; // FileBackedOutputStream that should be destroyed when // the input stream has been consumed entirely FileBackedOutputStream fileBackedOutputStream; InputStreamWithMetadata( long size, String digest, FileBackedOutputStream fileBackedOutputStream) { this.size = size; this.digest = digest; this.fileBackedOutputStream = fileBackedOutputStream; } } /** * Compress an input stream with GZIP and return the result size, digest and compressed stream. * * @param inputStream data input * @param session the session * @return result size, digest and compressed stream * @throws SnowflakeSQLException if encountered exception when compressing */ private static InputStreamWithMetadata compressStreamWithGZIP( InputStream inputStream, SFBaseSession session, String queryId) throws SnowflakeSQLException { FileBackedOutputStream tempStream = new FileBackedOutputStream(MAX_BUFFER_SIZE, true); try { DigestOutputStream digestStream = new DigestOutputStream(tempStream, MessageDigest.getInstance("SHA-256")); CountingOutputStream countingStream = new CountingOutputStream(digestStream); // construct a gzip stream with sync_flush mode GZIPOutputStream gzipStream; gzipStream = new GZIPOutputStream(countingStream, true); IOUtils.copy(inputStream, gzipStream); inputStream.close(); gzipStream.finish(); gzipStream.flush(); countingStream.flush(); // Normal flow will never hit here. This is only for testing purposes if (isInjectedFileTransferExceptionEnabled() && SnowflakeFileTransferAgent.injectedFileTransferException instanceof NoSuchAlgorithmException) { throw (NoSuchAlgorithmException) SnowflakeFileTransferAgent.injectedFileTransferException; } return new InputStreamWithMetadata( countingStream.getCount(), Base64.getEncoder().encodeToString(digestStream.getMessageDigest().digest()), tempStream); } catch (IOException | NoSuchAlgorithmException ex) { logger.error("Exception compressing input stream", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "error encountered for compression"); } } /** * Compress an input stream with GZIP and return the result size, digest and compressed stream. * * @param inputStream The input stream to compress * @return the compressed stream * @throws SnowflakeSQLException Will be thrown if there is a problem with compression * @deprecated Can be removed when all accounts are encrypted */ @Deprecated private static InputStreamWithMetadata compressStreamWithGZIPNoDigest( InputStream inputStream, SFBaseSession session, String queryId) throws SnowflakeSQLException { try { FileBackedOutputStream tempStream = new FileBackedOutputStream(MAX_BUFFER_SIZE, true); CountingOutputStream countingStream = new CountingOutputStream(tempStream); // construct a gzip stream with sync_flush mode GZIPOutputStream gzipStream; gzipStream = new GZIPOutputStream(countingStream, true); IOUtils.copy(inputStream, gzipStream); inputStream.close(); gzipStream.finish(); gzipStream.flush(); countingStream.flush(); // Normal flow will never hit here. This is only for testing purposes if (isInjectedFileTransferExceptionEnabled()) { throw (IOException) SnowflakeFileTransferAgent.injectedFileTransferException; } return new InputStreamWithMetadata(countingStream.getCount(), null, tempStream); } catch (IOException ex) { logger.error("Exception compressing input stream", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "error encountered for compression"); } } private static InputStreamWithMetadata computeDigest(InputStream is, boolean resetStream) throws NoSuchAlgorithmException, IOException { MessageDigest md = MessageDigest.getInstance("SHA-256"); if (resetStream) { FileBackedOutputStream tempStream = new FileBackedOutputStream(MAX_BUFFER_SIZE, true); CountingOutputStream countingOutputStream = new CountingOutputStream(tempStream); DigestOutputStream digestStream = new DigestOutputStream(countingOutputStream, md); IOUtils.copy(is, digestStream); return new InputStreamWithMetadata( countingOutputStream.getCount(), Base64.getEncoder().encodeToString(digestStream.getMessageDigest().digest()), tempStream); } else { CountingOutputStream countingOutputStream = new CountingOutputStream(ByteStreams.nullOutputStream()); DigestOutputStream digestStream = new DigestOutputStream(countingOutputStream, md); IOUtils.copy(is, digestStream); return new InputStreamWithMetadata( countingOutputStream.getCount(), Base64.getEncoder().encodeToString(digestStream.getMessageDigest().digest()), null); } } /** * A callable that can be executed in a separate thread using executor service. * *

The callable does compression if needed and upload the result to the table's staging area. * * @deprecated use {@link #getUploadFileCallable(StageInfo, String, FileMetadata, * SnowflakeStorageClient, SFSession, String, InputStream, boolean, int, File, * RemoteStoreFileEncryptionMaterial, String)} * @param stage information about the stage * @param srcFilePath source file path * @param metadata file metadata * @param client client object used to communicate with c3 * @param session session object * @param command command string * @param inputStream null if upload source is file * @param sourceFromStream whether upload source is file or stream * @param parallel number of threads for parallel uploading * @param srcFile source file name * @param encMat not null if encryption is required * @return a callable that uploading file to the remote store */ @Deprecated public static Callable getUploadFileCallable( final StageInfo stage, final String srcFilePath, final FileMetadata metadata, final SnowflakeStorageClient client, final SFSession session, final String command, final InputStream inputStream, final boolean sourceFromStream, final int parallel, final File srcFile, final RemoteStoreFileEncryptionMaterial encMat) { return getUploadFileCallable( stage, srcFilePath, metadata, client, session, command, inputStream, sourceFromStream, parallel, srcFile, encMat, null); } /** * A callable that can be executed in a separate thread using executor service. * *

The callable does compression if needed and upload the result to the table's staging area. * * @param stage information about the stage * @param srcFilePath source file path * @param metadata file metadata * @param client client object used to communicate with c3 * @param session session object * @param command command string * @param inputStream null if upload source is file * @param sourceFromStream whether upload source is file or stream * @param parallel number of threads for parallel uploading * @param srcFile source file name * @param encMat not null if encryption is required * @param queryId last executed query id (for forwarding in possible exceptions) * @return a callable that uploading file to the remote store */ public static Callable getUploadFileCallable( final StageInfo stage, final String srcFilePath, final FileMetadata metadata, final SnowflakeStorageClient client, final SFSession session, final String command, final InputStream inputStream, final boolean sourceFromStream, final int parallel, final File srcFile, final RemoteStoreFileEncryptionMaterial encMat, final String queryId) { return new Callable() { public Void call() throws Exception { logger.trace("Entering getUploadFileCallable...", false); // make sure initialize context for the telemetry service for this thread TelemetryService.getInstance().updateContext(session.getSnowflakeConnectionString()); InputStream uploadStream = inputStream; File fileToUpload = null; if (uploadStream == null) { try { // Normal flow will never hit here. This is only for testing purposes if (isInjectedFileTransferExceptionEnabled() && SnowflakeFileTransferAgent.injectedFileTransferException instanceof FileNotFoundException) { throw (FileNotFoundException) SnowflakeFileTransferAgent.injectedFileTransferException; } FileUtil.logFileUsage(srcFilePath, "Get file to upload", false); uploadStream = new FileInputStream(srcFilePath); } catch (FileNotFoundException ex) { metadata.resultStatus = ResultStatus.ERROR; metadata.errorDetails = ex.getMessage(); throw ex; } } // this shouldn't happen if (metadata == null) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "missing file metadata for: " + srcFilePath); } String destFileName = metadata.destFileName; long uploadSize; String digest = null; logger.debug("Dest file name: {}", false); // Temp file that needs to be cleaned up when upload was successful FileBackedOutputStream fileBackedOutputStream = null; // SNOW-16082: we should capture exception if we fail to compress or // calculate digest. try { if (metadata.requireCompress) { InputStreamWithMetadata compressedSizeAndStream = (encMat == null ? compressStreamWithGZIPNoDigest(uploadStream, session, queryId) : compressStreamWithGZIP(uploadStream, session, queryId)); fileBackedOutputStream = compressedSizeAndStream.fileBackedOutputStream; // update the size uploadSize = compressedSizeAndStream.size; digest = compressedSizeAndStream.digest; if (compressedSizeAndStream.fileBackedOutputStream.getFile() != null) { fileToUpload = compressedSizeAndStream.fileBackedOutputStream.getFile(); } logger.debug("New size after compression: {}", uploadSize); } else if (stage.getStageType() != StageInfo.StageType.LOCAL_FS) { // If it's not local_fs, we store our digest in the metadata // In local_fs, we don't need digest, and if we turn it on, we will consume whole // uploadStream, which local_fs uses. InputStreamWithMetadata result = computeDigest(uploadStream, sourceFromStream); digest = result.digest; fileBackedOutputStream = result.fileBackedOutputStream; uploadSize = result.size; if (!sourceFromStream) { fileToUpload = srcFile; } else if (result.fileBackedOutputStream.getFile() != null) { fileToUpload = result.fileBackedOutputStream.getFile(); } } else { if (!sourceFromStream && (srcFile != null)) { fileToUpload = srcFile; } // if stage is local_fs and upload source is stream, upload size // does not matter since 1) transfer did not require size 2) no // output from uploadStream api is required uploadSize = sourceFromStream ? 0 : srcFile.length(); } logger.debug( "Started copying file from: {} to {}:{} destName: {} " + "auto compressed? {} size: {}", srcFilePath, stage.getStageType().name(), stage.getLocation(), destFileName, (metadata.requireCompress ? "yes" : "no"), uploadSize); // Simulated failure code. if (session.getInjectFileUploadFailure() != null && srcFilePath.endsWith((session).getInjectFileUploadFailure())) { throw new SnowflakeSimulatedUploadFailure( srcFile != null ? srcFile.getName() : "Unknown"); } // upload it switch (stage.getStageType()) { case LOCAL_FS: pushFileToLocal( stage.getLocation(), srcFilePath, destFileName, uploadStream, fileBackedOutputStream, session, queryId); break; case S3: case AZURE: case GCS: pushFileToRemoteStore( stage, destFileName, uploadStream, fileBackedOutputStream, uploadSize, digest, metadata.destCompressionType, client, session, command, parallel, fileToUpload, (fileToUpload == null), encMat, null, null, queryId); metadata.isEncrypted = encMat != null; break; } } catch (SnowflakeSimulatedUploadFailure ex) { // This code path is used for Simulated failure code in tests. // Never happen in production metadata.resultStatus = ResultStatus.ERROR; metadata.errorDetails = ex.getMessage(); throw ex; } catch (Throwable ex) { logger.error("Exception encountered during file upload", ex); metadata.resultStatus = ResultStatus.ERROR; metadata.errorDetails = ex.getMessage(); throw ex; } finally { if (fileBackedOutputStream != null) { try { fileBackedOutputStream.reset(); } catch (IOException ex) { logger.debug("Failed to clean up temp file: {}", ex); } } if (inputStream == null) { IOUtils.closeQuietly(uploadStream); } if (client != null) { client.shutdown(); } } logger.debug("FilePath: {}", srcFilePath); // set dest size metadata.destFileSize = uploadSize; // mark the file as being uploaded metadata.resultStatus = ResultStatus.UPLOADED; return null; } }; } /** * A callable that can be executed in a separate thread using executor service. * *

The callable download files from a stage location to a local location * * @deprecated use {@link #getDownloadFileCallable(StageInfo, String, String, Map, * SnowflakeStorageClient, SFSession, String, int, RemoteStoreFileEncryptionMaterial, String, * String)} * @param stage stage information * @param srcFilePath path that stores the downloaded file * @param localLocation local location * @param fileMetadataMap file metadata map * @param client remote store client * @param session session object * @param command command string * @param encMat remote store encryption material * @param parallel number of parallel threads for downloading * @param presignedUrl Presigned URL for file download * @return a callable responsible for downloading files */ @Deprecated public static Callable getDownloadFileCallable( final StageInfo stage, final String srcFilePath, final String localLocation, final Map fileMetadataMap, final SnowflakeStorageClient client, final SFSession session, final String command, final int parallel, final RemoteStoreFileEncryptionMaterial encMat, final String presignedUrl) { return getDownloadFileCallable( stage, srcFilePath, localLocation, fileMetadataMap, client, session, command, parallel, encMat, presignedUrl, null); } /** * A callable that can be executed in a separate thread using executor service. * *

The callable download files from a stage location to a local location * * @param stage stage information * @param srcFilePath path that stores the downloaded file * @param localLocation local location * @param fileMetadataMap file metadata map * @param client remote store client * @param session session object * @param command command string * @param encMat remote store encryption material * @param parallel number of parallel threads for downloading * @param presignedUrl Presigned URL for file download * @param queryId the query ID * @return a callable responsible for downloading files */ public static Callable getDownloadFileCallable( final StageInfo stage, final String srcFilePath, final String localLocation, final Map fileMetadataMap, final SnowflakeStorageClient client, final SFSession session, final String command, final int parallel, final RemoteStoreFileEncryptionMaterial encMat, final String presignedUrl, final String queryId) { return new Callable() { public Void call() throws Exception { logger.debug("Entering getDownloadFileCallable...", false); // make sure initialize context for the telemetry service for this thread TelemetryService.getInstance().updateContext(session.getSnowflakeConnectionString()); FileMetadata metadata = fileMetadataMap.get(srcFilePath); // this shouldn't happen if (metadata == null) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "missing file metadata for: " + srcFilePath); } String destFileName = metadata.destFileName; logger.debug( "Started copying file from: {}:{} file path:{} to {} destName:{}", stage.getStageType().name(), stage.getLocation(), srcFilePath, localLocation, destFileName); try { switch (stage.getStageType()) { case LOCAL_FS: pullFileFromLocal( stage.getLocation(), srcFilePath, localLocation, destFileName, session, queryId); break; case AZURE: case S3: case GCS: pullFileFromRemoteStore( stage, srcFilePath, destFileName, localLocation, client, session, command, parallel, encMat, presignedUrl, queryId); metadata.isEncrypted = encMat != null; break; } } catch (Throwable ex) { logger.error("Exception encountered during file download", ex); metadata.resultStatus = ResultStatus.ERROR; metadata.errorDetails = ex.getMessage(); throw ex; } finally { if (client != null) { client.shutdown(); } } logger.debug("FilePath: {}", srcFilePath); File destFile = new File(localLocation + localFSFileSep + destFileName); long downloadSize = destFile.length(); // set dest size metadata.destFileSize = downloadSize; // mark the file as being uploaded metadata.resultStatus = ResultStatus.DOWNLOADED; return null; } }; } public SnowflakeFileTransferAgent(String command, SFSession session, SFStatement statement) throws SnowflakeSQLException { this(command, session, statement, null); } public SnowflakeFileTransferAgent( String command, SFSession session, SFStatement statement, InternalCallMarker internalCallMarker) throws SnowflakeSQLException { recordIfExternal("SnowflakeFileTransferAgent", "", internalCallMarker); this.command = command; this.session = session; this.statement = statement; // parse the command logger.debug("Start parsing", false); parseCommand(); if (stageInfo.getStageType() != StageInfo.StageType.LOCAL_FS) { storageClient = StorageClientFactory.getFactory().createClient(stageInfo, parallel, null, session); } } /** * Parse the put/get command. * *

We send the command to the GS to do the parsing. In the future, we will delegate more work * to GS such as copying files from HTTP to the remote store. * * @throws SnowflakeSQLException failure to parse the PUT/GET command */ private void parseCommand() throws SnowflakeSQLException { // For AWS and Azure, this command returns enough info for us to get creds // we can use for each of the GETs/PUTs. For GCS, we need to issue a separate // call to GS to get creds (in the form of a presigned URL) for each file // we're uploading or downloading. This call gets our src_location and // encryption material, which we'll use for all the subsequent calls to GS // for creds for each file. Those calls are made from pushFileToRemoteStore // and pullFileFromRemoteStore if the storage client requires a presigned // URL. JsonNode jsonNode = parseCommandInGS(statement, command); // get command type if (!jsonNode.path("data").path("command").isMissingNode()) { commandType = CommandType.valueOf(jsonNode.path("data").path("command").asText()); } // get source file locations as array (apply to both upload and download) JsonNode locationsNode = jsonNode.path("data").path("src_locations"); if (!locationsNode.isArray()) { throw new SnowflakeSQLException( queryID, ErrorCode.INTERNAL_ERROR, "src_locations must be an array"); } queryID = jsonNode.path("data").path("queryId").asText(); String[] src_locations; try { // Normal flow will never hit here. This is only for testing purposes if (isInjectedFileTransferExceptionEnabled() && injectedFileTransferException instanceof SnowflakeSQLException) { throw (SnowflakeSQLException) SnowflakeFileTransferAgent.injectedFileTransferException; } src_locations = mapper.readValue(locationsNode.toString(), String[].class); initEncryptionMaterial(commandType, jsonNode); initPresignedUrls(commandType, jsonNode); } catch (Exception ex) { throw new SnowflakeSQLException( queryID, ex, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Failed to parse the locations due to: " + ex.getMessage()); } showEncryptionParameter = jsonNode.path("data").path("clientShowEncryptionParameter").asBoolean(); JsonNode thresholdNode = jsonNode.path("data").path("threshold"); int threshold = thresholdNode.asInt(); // if value is <= 0, this means an error was made in parsing the threshold or the threshold is // invalid. // Only use the threshold value if it is valid. if (threshold > 0) { bigFileThreshold = threshold; } String localFilePathFromGS = null; // do upload command specific parsing if (commandType == CommandType.UPLOAD) { if (src_locations.length > 0) { localFilePathFromGS = src_locations[0]; } sourceFiles = expandFileNames(src_locations, queryID); autoCompress = jsonNode.path("data").path("autoCompress").asBoolean(true); if (!jsonNode.path("data").path("sourceCompression").isMissingNode()) { sourceCompression = jsonNode.path("data").path("sourceCompression").asText(); } } else { // do download command specific parsing srcFileToEncMat = new HashMap<>(); // create mapping from source file to encryption materials if (src_locations.length == encryptionMaterial.size()) { for (int srcFileIdx = 0; srcFileIdx < src_locations.length; srcFileIdx++) { srcFileToEncMat.put(src_locations[srcFileIdx], encryptionMaterial.get(srcFileIdx)); } } // create mapping from source file to presigned URLs srcFileToPresignedUrl = new HashMap<>(); if (src_locations.length == presignedUrls.size()) { for (int srcFileIdx = 0; srcFileIdx < src_locations.length; srcFileIdx++) { srcFileToPresignedUrl.put(src_locations[srcFileIdx], presignedUrls.get(srcFileIdx)); } } sourceFiles = new HashSet(Arrays.asList(src_locations)); localLocation = jsonNode.path("data").path("localLocation").asText(); localFilePathFromGS = localLocation; if (localLocation.startsWith("~")) { // replace ~ with user home localLocation = systemGetProperty("user.home") + localLocation.substring(1); } // it should not start with any ~ after the above replacement if (localLocation.startsWith("~")) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.PATH_NOT_DIRECTORY.getMessageCode(), SqlState.IO_ERROR, localLocation); } // todo: replace ~userid with the home directory of a given userid // one idea is to get the home directory for current user and replace // the last user id with the given user id. // user may also specify files relative to current directory // add the current path if that is the case if (!(new File(localLocation)).isAbsolute()) { String cwd = systemGetProperty("user.dir"); logger.debug("Adding current working dir to relative file path.", false); localLocation = cwd + localFSFileSep + localLocation; } // local location should be a directory if ((new File(localLocation)).isFile()) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.PATH_NOT_DIRECTORY.getMessageCode(), SqlState.IO_ERROR, localLocation); } } // SNOW-15153: verify that the value after file:// is not changed by GS verifyLocalFilePath(localFilePathFromGS); parallel = jsonNode.path("data").path("parallel").asInt(); overwrite = jsonNode.path("data").path("overwrite").asBoolean(false); stageInfo = getStageInfo(jsonNode, this.session); if (logger.isDebugEnabled()) { logger.debug("Command type: {}", commandType); if (commandType == CommandType.UPLOAD) { logger.debug("Auto compress: {}, source compression: {}", autoCompress, sourceCompression); } else { logger.debug("Local download location: {}", localLocation); } logger.debug("Source files: {}", String.join(",", sourceFiles)); logger.debug( "stageLocation: {}, parallel: {}, overwrite: {}, destLocationType: {}, stageRegion: {}," + " endPoint: {}, storageAccount: {}", stageInfo.getLocation(), parallel, overwrite, stageInfo.getStageType(), stageInfo.getRegion(), stageInfo.getEndPoint(), stageInfo.getStorageAccount()); } } /** * Construct Stage Info object from JsonNode. * * @param jsonNode JsonNode to use serialize into StageInfo Object * @param session can be null. * @return StageInfo constructed from JsonNode and session params. * @throws SnowflakeSQLException */ static StageInfo getStageInfo(JsonNode jsonNode, SFSession session) throws SnowflakeSQLException { String queryId = jsonNode.path("data").path("queryId").asText(); // more parameters common to upload/download String stageLocation = jsonNode.path("data").path("stageInfo").path("location").asText(); String stageLocationType = jsonNode.path("data").path("stageInfo").path("locationType").asText(); String stageRegion = null; if (!jsonNode.path("data").path("stageInfo").path("region").isMissingNode()) { stageRegion = jsonNode.path("data").path("stageInfo").path("region").asText(); } boolean isClientSideEncrypted = true; if (!jsonNode.path("data").path("stageInfo").path("isClientSideEncrypted").isMissingNode()) { isClientSideEncrypted = jsonNode.path("data").path("stageInfo").path("isClientSideEncrypted").asBoolean(true); } // endPoint is currently known to be set for Azure stages or S3. For S3 it will be set // specifically // for FIPS or VPCE S3 endpoint. SNOW-652696 String endPoint = null; if ("AZURE".equalsIgnoreCase(stageLocationType) || "S3".equalsIgnoreCase(stageLocationType) || "GCS".equalsIgnoreCase(stageLocationType)) { endPoint = jsonNode.path("data").path("stageInfo").findValue("endPoint").asText(); if ("GCS".equalsIgnoreCase(stageLocationType) && endPoint != null && (endPoint.trim().isEmpty() || "null".equals(endPoint))) { // setting to null to preserve previous behaviour for GCS endPoint = null; } } String stgAcct = null; // storageAccount are only available in Azure stages. Value // will be present but null in other platforms. if ("AZURE".equalsIgnoreCase(stageLocationType)) { // Jackson is doing some very strange things trying to pull the value of // the storageAccount node after adding the GCP library dependencies. // If we try to pull the value by name, we get back null, but clearly the // node is there. This code works around the issue by enumerating through // all the nodes and getting the one that starts with "sto". The value // then comes back with double quotes around it, so we're stripping them // off. As long as our JSON doc doesn't add another node that starts with // "sto", this should work fine. Iterator> fields = jsonNode.path("data").path("stageInfo").fields(); while (fields.hasNext()) { Entry jsonField = fields.next(); if (jsonField.getKey().startsWith("sto")) { stgAcct = jsonField .getValue() .toString() .trim() .substring(1, jsonField.getValue().toString().trim().lastIndexOf("\"")); } } } if ("LOCAL_FS".equalsIgnoreCase(stageLocationType)) { if (stageLocation.startsWith("~")) { // replace ~ with user home stageLocation = systemGetProperty("user.home") + stageLocation.substring(1); } if (!(new File(stageLocation)).isAbsolute()) { String cwd = systemGetProperty("user.dir"); logger.debug("Adding current working dir to stage file path."); stageLocation = cwd + localFSFileSep + stageLocation; } } Map stageCredentials = extractStageCreds(jsonNode, queryId); StageInfo stageInfo = StageInfo.createStageInfo( stageLocationType, stageLocation, stageCredentials, stageRegion, endPoint, stgAcct, isClientSideEncrypted); // Setup pre-signed URL into stage info if pre-signed URL is returned. if (stageInfo.getStageType() == StageInfo.StageType.GCS) { JsonNode presignedUrlNode = jsonNode.path("data").path("stageInfo").path("presignedUrl"); if (!presignedUrlNode.isMissingNode()) { String presignedUrl = presignedUrlNode.asText(); if (!isNullOrEmpty(presignedUrl)) { stageInfo.setPresignedUrl(presignedUrl); } } } setupUseRegionalUrl(jsonNode, stageInfo); setupUseVirtualUrl(jsonNode, stageInfo); if (stageInfo.getStageType() == StageInfo.StageType.S3) { if (session == null) { // This node's value is set if PUT is used without Session. (For Snowpipe Streaming, we rely // on a response from a server to have this field set to use S3RegionalURL) JsonNode useS3RegionalURLNode = jsonNode.path("data").path("stageInfo").path("useS3RegionalUrl"); if (!useS3RegionalURLNode.isMissingNode()) { boolean useS3RegionalUrl = useS3RegionalURLNode.asBoolean(false); stageInfo.setUseS3RegionalUrl(useS3RegionalUrl); } } else { // Update StageInfo to reflect use of S3 regional URL. // This is required for connecting to S3 over privatelink when the // target stage is in us-east-1 stageInfo.setUseS3RegionalUrl(session.getUseRegionalS3EndpointsForPresignedURL()); } } return stageInfo; } private static void setupUseRegionalUrl(JsonNode jsonNode, StageInfo stageInfo) { if (stageInfo.getStageType() != StageInfo.StageType.GCS && stageInfo.getStageType() != StageInfo.StageType.S3) { return; } JsonNode useRegionalURLNode = jsonNode.path("data").path("stageInfo").path("useRegionalUrl"); if (!useRegionalURLNode.isMissingNode()) { boolean useRegionalURL = useRegionalURLNode.asBoolean(false); stageInfo.setUseRegionalUrl(useRegionalURL); } } private static void setupUseVirtualUrl(JsonNode jsonNode, StageInfo stageInfo) { if (stageInfo.getStageType() != StageInfo.StageType.GCS) { return; } JsonNode useVirtualURLNode = jsonNode.path("data").path("stageInfo").path("useVirtualUrl"); if (!useVirtualURLNode.isMissingNode()) { boolean useVirtualURL = useVirtualURLNode.asBoolean(false); stageInfo.setUseVirtualUrl(useVirtualURL); } else { logger.debug("useVirtualUrl property missing from stage info"); } } /** * A helper method to verify if the local file path from GS matches what's parsed locally. This is * for security purpose as documented in SNOW-15153. * * @param localFilePathFromGS the local file path to verify * @throws SnowflakeSQLException Will be thrown if the log path if empty or if it doesn't match * what comes back from GS */ private void verifyLocalFilePath(String localFilePathFromGS) throws SnowflakeSQLException { String localFilePath = getLocalFilePathFromCommand(command, true); if (!localFilePath.isEmpty() && !localFilePath.equals(localFilePathFromGS)) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected local file path from GS. From GS: " + localFilePathFromGS + ", expected: " + localFilePath); } else if (localFilePath.isEmpty()) { logger.debug("Fail to parse local file path from command: {}", command); } else { logger.trace("local file path from GS matches local parsing: {}", localFilePath); } } /** * Parses out the local file path from the command. We need this to get the file paths to expand * wildcards and make sure the paths GS returns are correct * * @param command The GET/PUT command we send to GS * @param unescape True to unescape backslashes coming from GS * @return Path to the local file */ private static String getLocalFilePathFromCommand(String command, boolean unescape) { if (command == null) { logger.error("null command", false); return null; } if (command.indexOf(FILE_PROTOCOL) < 0) { logger.error("file:// prefix not found in command: {}", command); return null; } int localFilePathBeginIdx = command.indexOf(FILE_PROTOCOL) + FILE_PROTOCOL.length(); boolean isLocalFilePathQuoted = (localFilePathBeginIdx > FILE_PROTOCOL.length()) && (command.charAt(localFilePathBeginIdx - 1 - FILE_PROTOCOL.length()) == '\''); // the ending index is exclusive int localFilePathEndIdx = 0; String localFilePath = ""; if (isLocalFilePathQuoted) { // look for the matching quote localFilePathEndIdx = command.indexOf("'", localFilePathBeginIdx); if (localFilePathEndIdx > localFilePathBeginIdx) { localFilePath = command.substring(localFilePathBeginIdx, localFilePathEndIdx); } // unescape backslashes to match the file name from GS if (unescape) { localFilePath = localFilePath.replaceAll("\\\\\\\\", "\\\\"); } } else { // look for the first space or new line or semi colon List indexList = new ArrayList<>(); char[] delimiterChars = {' ', '\n', ';'}; for (int i = 0; i < delimiterChars.length; i++) { int charIndex = command.indexOf(delimiterChars[i], localFilePathBeginIdx); if (charIndex != -1) { indexList.add(charIndex); } } localFilePathEndIdx = indexList.isEmpty() ? -1 : Collections.min(indexList); if (localFilePathEndIdx > localFilePathBeginIdx) { localFilePath = command.substring(localFilePathBeginIdx, localFilePathEndIdx); } else if (localFilePathEndIdx == -1) { localFilePath = command.substring(localFilePathBeginIdx); } } return localFilePath; } /** * @return JSON doc containing the command options returned by GS * @throws SnowflakeSQLException Will be thrown if parsing the command by GS fails */ private static JsonNode parseCommandInGS(SFStatement statement, String command) throws SnowflakeSQLException { Object result = null; // send the command to GS try { result = statement.executeHelper( command, "application/json", null, // bindValues false, // describeOnly false, // internal false, // async new ExecTimeTelemetryData()); // OOB telemetry timing queries } catch (SFException ex) { throw new SnowflakeSQLException( ex.getQueryId(), ex, ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } JsonNode jsonNode = (JsonNode) result; logger.debug("Response: {}", SecretDetector.maskSecrets(jsonNode.toString())); SnowflakeUtil.checkErrorAndThrowException(jsonNode); return jsonNode; } /** * @param rootNode JSON doc returned by GS * @throws SnowflakeSQLException Will be thrown if we fail to parse the stage credentials */ private static Map extractStageCreds(JsonNode rootNode, String queryId) throws SnowflakeSQLException { JsonNode credsNode = rootNode.path("data").path("stageInfo").path("creds"); Map stageCredentials = null; try { TypeReference> typeRef = new TypeReference>() {}; stageCredentials = mapper.readValue(credsNode.toString(), typeRef); } catch (Exception ex) { throw new SnowflakeSQLException( queryId, ex, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Failed to parse the credentials (" + (credsNode != null ? credsNode.toString() : "null") + ") due to exception: " + ex.getMessage()); } return stageCredentials; } /** * This is API function to retrieve the File Transfer Metadatas. * *

NOTE: It only supports PUT on S3/AZURE/GCS * * @return The file transfer metadatas for to-be-transferred files. * @throws SnowflakeSQLException if any error occurs */ public List getFileTransferMetadatas() throws SnowflakeSQLException { return getFileTransferMetadatas((InternalCallMarker) null); } public List getFileTransferMetadatas( InternalCallMarker internalCallMarker) throws SnowflakeSQLException { recordIfExternal("SnowflakeFileTransferAgent", "getFileTransferMetadatas", internalCallMarker); List result = new ArrayList<>(); if (stageInfo.getStageType() != StageInfo.StageType.GCS && stageInfo.getStageType() != StageInfo.StageType.AZURE && stageInfo.getStageType() != StageInfo.StageType.S3) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "This API only supports S3/AZURE/GCS"); } if (commandType != CommandType.UPLOAD) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "This API only supports PUT command"); } for (String sourceFilePath : sourceFiles) { String sourceFileName = sourceFilePath.substring(sourceFilePath.lastIndexOf("/") + 1); result.add( new SnowflakeFileTransferMetadataV1( stageInfo.getPresignedUrl(), sourceFileName, encryptionMaterial.get(0).getQueryStageMasterKey(), encryptionMaterial.get(0).getQueryId(), encryptionMaterial.get(0).getSmkId(), commandType, stageInfo)); } return result; } /** * This is API function to parse the File Transfer Metadatas from a supplied PUT call response. * *

NOTE: It only supports PUT on S3/AZURE/GCS (i.e. NOT LOCAL_FS) * *

It also assumes there is no active SFSession * * @param jsonNode JSON doc returned by GS from PUT call * @return The file transfer metadatas for to-be-transferred files. * @throws SnowflakeSQLException if any error occurs */ public static List getFileTransferMetadatas(JsonNode jsonNode) throws SnowflakeSQLException { return getFileTransferMetadatas(jsonNode, null, null); } /** * This is API function to parse the File Transfer Metadatas from a supplied PUT call response. * *

NOTE: It only supports PUT on S3/AZURE/GCS (i.e. NOT LOCAL_FS) * *

It also assumes there is no active SFSession * * @param jsonNode JSON doc returned by GS from PUT call * @param queryId String last executed query id if available * @return The file transfer metadatas for to-be-transferred files. * @throws SnowflakeSQLException if any error occurs */ public static List getFileTransferMetadatas( JsonNode jsonNode, String queryId) throws SnowflakeSQLException { return getFileTransferMetadatas(jsonNode, queryId, null); } public static List getFileTransferMetadatas( JsonNode jsonNode, String queryId, InternalCallMarker internalCallMarker) throws SnowflakeSQLException { recordIfExternal("SnowflakeFileTransferAgent", "getFileTransferMetadatas", internalCallMarker); CommandType commandType = !jsonNode.path("data").path("command").isMissingNode() ? CommandType.valueOf(jsonNode.path("data").path("command").asText()) : CommandType.UPLOAD; if (commandType != CommandType.UPLOAD) { throw new SnowflakeSQLException( queryId, ErrorCode.INTERNAL_ERROR, "This API only supports PUT commands"); } JsonNode locationsNode = jsonNode.path("data").path("src_locations"); if (!locationsNode.isArray()) { throw new SnowflakeSQLException( queryId, ErrorCode.INTERNAL_ERROR, "src_locations must be an array"); } final String[] srcLocations; final List encryptionMaterial; try { srcLocations = mapper.readValue(locationsNode.toString(), String[].class); } catch (Exception ex) { throw new SnowflakeSQLException( queryId, ErrorCode.INTERNAL_ERROR, "Failed to parse the locations due to: " + ex.getMessage()); } try { encryptionMaterial = getEncryptionMaterial(commandType, jsonNode); } catch (Exception ex) { throw new SnowflakeSQLException( queryId, ErrorCode.INTERNAL_ERROR, "Failed to parse encryptionMaterial due to: " + ex.getMessage()); } // For UPLOAD we expect encryptionMaterial to have length 1 if (encryptionMaterial.size() != 1) { throw new SnowflakeSQLException( queryId, ErrorCode.INTERNAL_ERROR, "Encryption material for UPLOAD should have size 1 but have " + encryptionMaterial.size()); } final Set sourceFiles = expandFileNames(srcLocations, queryId); StageInfo stageInfo = getStageInfo(jsonNode, null /*SFSession*/); List result = new ArrayList<>(); if (stageInfo.getStageType() != StageInfo.StageType.GCS && stageInfo.getStageType() != StageInfo.StageType.AZURE && stageInfo.getStageType() != StageInfo.StageType.S3) { throw new SnowflakeSQLException( queryId, ErrorCode.INTERNAL_ERROR, "This API only supports S3/AZURE/GCS, received=" + stageInfo.getStageType()); } for (String sourceFilePath : sourceFiles) { String sourceFileName = sourceFilePath.substring(sourceFilePath.lastIndexOf("/") + 1); result.add( new SnowflakeFileTransferMetadataV1( stageInfo.getPresignedUrl(), sourceFileName, encryptionMaterial.get(0) != null ? encryptionMaterial.get(0).getQueryStageMasterKey() : null, encryptionMaterial.get(0) != null ? encryptionMaterial.get(0).getQueryId() : null, encryptionMaterial.get(0) != null ? encryptionMaterial.get(0).getSmkId() : null, commandType, stageInfo)); } return result; } @Override public boolean execute() throws SQLException { try { logger.debug("Start init metadata"); // initialize file metadata map initFileMetadata(); logger.debug("Start checking file types"); // check file compression type if (commandType == CommandType.UPLOAD) { processFileCompressionTypes(); } // Filter out files that are already existing in the destination. // GCS may or may not use presigned URL if (!overwrite && (stageInfo.getStageType() != StageInfo.StageType.GCS || !storageClient.requirePresignedUrl())) { logger.debug("Start filtering"); filterExistingFiles(); logger.debug("Filtering done"); } synchronized (canceled) { if (canceled) { logger.debug("File transfer canceled by user"); threadExecutor = null; return false; } } // create target directory for download command if (commandType == CommandType.DOWNLOAD) { File dir = new File(localLocation); if (!dir.exists()) { boolean created; if (session.isOwnerOnlyStageFilePermissionsEnabled()) { created = createOwnerOnlyPermissionDir(localLocation); } else { created = dir.mkdirs(); } if (created) { logger.debug("Directory created: {}", localLocation); } else { logger.debug("Directory not created {}", localLocation); } } downloadFiles(); } else if (sourceFromStream) { uploadStream(); } else { // separate files to big files list and small files list // big files will be uploaded in serial, while small files will be // uploaded concurrently. logger.debug("Start segregate files by size"); segregateFilesBySize(); if (bigSourceFiles != null) { logger.debug("Start uploading big files"); uploadFiles(bigSourceFiles, 1); logger.debug("End uploading big files"); } if (smallSourceFiles != null) { logger.debug("Start uploading small files"); uploadFiles(smallSourceFiles, parallel); logger.debug("End uploading small files"); } } // populate status rows to be returned to the client populateStatusRows(); return true; } finally { if (storageClient != null) { storageClient.shutdown(); } } } /** Helper to upload data from a stream */ private void uploadStream() throws SnowflakeSQLException { try { FileMetadata fileMetadata = fileMetadataMap.get(SRC_FILE_NAME_FOR_STREAM); if (fileMetadata.resultStatus == ResultStatus.SKIPPED) { logger.debug( "Skipping {}, status: {}, details: {}", SRC_FILE_NAME_FOR_STREAM, fileMetadata.resultStatus, fileMetadata.errorDetails); return; } threadExecutor = SnowflakeUtil.createDefaultExecutorService("sf-stream-upload-worker-", 1); RemoteStoreFileEncryptionMaterial encMat = encryptionMaterial.get(0); Future uploadTask = null; if (commandType == CommandType.UPLOAD) { uploadTask = threadExecutor.submit( getUploadFileCallable( stageInfo, SRC_FILE_NAME_FOR_STREAM, fileMetadata, (stageInfo.getStageType() == StageInfo.StageType.LOCAL_FS) ? null : StorageClientFactory.getFactory() .createClient(stageInfo, parallel, encMat, session), session, command, sourceStream, true, parallel, null, encMat, queryID)); } else if (commandType == CommandType.DOWNLOAD) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR); } threadExecutor.shutdown(); try { // Normal flow will never hit here. This is only for testing purposes if (isInjectedFileTransferExceptionEnabled() && SnowflakeFileTransferAgent.injectedFileTransferException instanceof InterruptedException) { throw (InterruptedException) SnowflakeFileTransferAgent.injectedFileTransferException; } // wait for the task to complete uploadTask.get(); } catch (InterruptedException ex) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.INTERRUPTED.getMessageCode(), SqlState.QUERY_CANCELED); } catch (ExecutionException ex) { throw new SnowflakeSQLException( queryID, ex.getCause(), SqlState.INTERNAL_ERROR, ErrorCode.FILE_OPERATION_UPLOAD_ERROR.getMessageCode()); } logger.debug("Done with uploading from a stream"); } finally { if (threadExecutor != null) { threadExecutor.shutdownNow(); threadExecutor = null; } } } /** Download a file from remote, and return an input stream */ @Override public InputStream downloadStream(String fileName) throws SnowflakeSQLException { logger.debug("Downloading file as stream: {}", fileName); if (stageInfo.getStageType() == StageInfo.StageType.LOCAL_FS) { logger.error("downloadStream function doesn't support local file system", false); throw new SnowflakeSQLException( queryID, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), session, "downloadStream function only supported in remote stages"); } remoteLocation remoteLocation = extractLocationAndPath(stageInfo.getLocation()); // when downloading files as stream there should be only one file in source files // let's fail fast when more than one file matches instead of fetching random one Set matchedFiles = sourceFiles.stream() .filter( sourceFileName -> { // We cannot match whole sourceFileName since it may be different e.g. for git // repositories so we match only raw filename String[] split = sourceFileName.split("/"); String fileNamePattern = ".*" + Pattern.quote(split[split.length - 1]) + "$"; return fileName.matches(fileNamePattern); }) .collect(Collectors.toSet()); if (matchedFiles.size() > 1) { throw new SnowflakeSQLException( queryID, SqlState.NO_DATA, ErrorCode.TOO_MANY_FILES_TO_DOWNLOAD_AS_STREAM.getMessageCode(), session, "There are more than one file matching " + fileName + ": " + String.join(",", matchedFiles)); } String sourceLocation = matchedFiles.stream() .findFirst() .orElseThrow( () -> new SnowflakeSQLException( queryID, SqlState.NO_DATA, ErrorCode.FILE_NOT_FOUND.getMessageCode(), session, "File not found: " + fileName)); if (!fileName.equals(sourceLocation)) { // filename may be different from source location e.g. in git repositories logger.debug("Changing file to download location from {} to {}", fileName, sourceLocation); } String stageFilePath = sourceLocation; if (!remoteLocation.path.isEmpty()) { stageFilePath = SnowflakeUtil.concatFilePathNames(remoteLocation.path, sourceLocation, "/"); } logger.debug("Stage file path for {} is {}", sourceLocation, stageFilePath); RemoteStoreFileEncryptionMaterial encMat = srcFileToEncMat.get(sourceLocation); String presignedUrl = srcFileToPresignedUrl.get(sourceLocation); return StorageClientFactory.getFactory() .createClient(stageInfo, parallel, encMat, session) .downloadToStream( session, command, parallel, remoteLocation.location, stageFilePath, stageInfo.getRegion(), presignedUrl, queryID); } /** Helper to download files from remote */ private void downloadFiles() throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil.createDefaultExecutorService("sf-file-download-worker-", 1); List> downloadFileFutures = new LinkedList<>(); for (String srcFile : sourceFiles) { FileMetadata fileMetadata = fileMetadataMap.get(srcFile); // Check if the result status is already set so that we don't need to // upload it if (fileMetadata.resultStatus != ResultStatus.UNKNOWN) { logger.debug( "Skipping {}, status: {}, details: {}", srcFile, fileMetadata.resultStatus, fileMetadata.errorDetails); continue; } RemoteStoreFileEncryptionMaterial encMat = srcFileToEncMat.get(srcFile); String presignedUrl = srcFileToPresignedUrl.get(srcFile); downloadFileFutures.add( threadExecutor.submit( getDownloadFileCallable( stageInfo, srcFile, localLocation, fileMetadataMap, (stageInfo.getStageType() == StageInfo.StageType.LOCAL_FS) ? null : StorageClientFactory.getFactory() .createClient(stageInfo, parallel, encMat, session), session, command, parallel, encMat, presignedUrl, queryID))); logger.debug("Submitted download job for: {}", srcFile); } threadExecutor.shutdown(); try { // wait for all threads to complete without timeout threadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); for (Future downloadFileFuture : downloadFileFutures) { if (downloadFileFuture.isDone()) { downloadFileFuture.get(); } } } catch (InterruptedException ex) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.INTERRUPTED.getMessageCode(), SqlState.QUERY_CANCELED); } catch (ExecutionException ex) { throw new SnowflakeSQLException( queryID, ex.getCause(), SqlState.INTERNAL_ERROR, ErrorCode.FILE_OPERATION_DOWNLOAD_ERROR.getMessageCode()); } logger.debug("Done with downloading"); } finally { if (threadExecutor != null) { threadExecutor.shutdownNow(); threadExecutor = null; } } } /** * This method is used in uploadFiles to delay the file upload for the given time, which is set as * a session parameter called "inject_wait_in_put." Normally this value is 0, but it is used in * testing. * * @param delayTime the number of seconds to sleep before uploading the file */ private void setUploadDelay(int delayTime) { if (delayTime > 0) { try { TimeUnit.SECONDS.sleep(delayTime); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } } /** * This method create a thread pool based on requested number of threads and upload the files * using the thread pool. * * @param fileList The set of files to upload * @param parallel degree of parallelism for the upload * @throws SnowflakeSQLException Will be thrown if uploading the files fails */ private void uploadFiles(Set fileList, int parallel) throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil.createDefaultExecutorService("sf-file-upload-worker-", parallel); List> uploadFileFutures = new LinkedList<>(); for (String srcFile : fileList) { FileMetadata fileMetadata = fileMetadataMap.get(srcFile); // Check if the result status is already set so that we don't need to // upload it if (fileMetadata.resultStatus != ResultStatus.UNKNOWN) { logger.debug( "Skipping {}, status: {}, details: {}", srcFile, fileMetadata.resultStatus, fileMetadata.errorDetails); continue; } /* * For small files, we upload files in parallel, so we don't * want the remote store uploader to upload parts in parallel for each file. * For large files, we upload them in serial, and we want remote store uploader * to upload parts in parallel for each file. This is the reason * for the parallel value. */ File srcFileObj = new File(srcFile); // PUT delay goes here!! int delay = session.getInjectWaitInPut(); setUploadDelay(delay); uploadFileFutures.add( threadExecutor.submit( getUploadFileCallable( stageInfo, srcFileObj.getPath(), fileMetadata, (stageInfo.getStageType() == StageInfo.StageType.LOCAL_FS) ? null : StorageClientFactory.getFactory() .createClient(stageInfo, parallel, encryptionMaterial.get(0), session), session, command, null, false, (parallel > 1 ? 1 : this.parallel), srcFileObj, encryptionMaterial.get(0), queryID))); logger.debug("Submitted copy job for: {}", srcFile); } // shut down the thread executor threadExecutor.shutdown(); try { // wait for all threads to complete without timeout threadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); for (Future uploadFileFuture : uploadFileFutures) { if (uploadFileFuture.isDone()) { uploadFileFuture.get(); } } } catch (InterruptedException ex) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.INTERRUPTED.getMessageCode(), SqlState.QUERY_CANCELED); } catch (ExecutionException ex) { throw new SnowflakeSQLException( queryID, ex.getCause(), SqlState.INTERNAL_ERROR, ErrorCode.FILE_OPERATION_UPLOAD_ERROR.getMessageCode()); } logger.debug("Done with uploading"); } finally { // shut down the thread pool in any case if (threadExecutor != null) { threadExecutor.shutdownNow(); threadExecutor = null; } } } private void segregateFilesBySize() { for (String srcFile : sourceFiles) { if ((new File(srcFile)).length() > bigFileThreshold) { if (bigSourceFiles == null) { bigSourceFiles = new HashSet(sourceFiles.size()); } bigSourceFiles.add(srcFile); } else { if (smallSourceFiles == null) { smallSourceFiles = new HashSet(sourceFiles.size()); } smallSourceFiles.add(srcFile); } } } public void cancel() { synchronized (canceled) { if (threadExecutor != null) { threadExecutor.shutdownNow(); threadExecutor = null; } canceled = true; } } /** * process a list of file paths separated by "," and expand the wildcards if any to generate the * list of paths for all files matched by the wildcards * * @param filePathList file path list * @return a set of file names that is matched * @throws SnowflakeSQLException if cannot find the file */ static Set expandFileNames(String[] filePathList, String queryId) throws SnowflakeSQLException { Set result = new HashSet(); // a location to file pattern map so that we only need to list the // same directory once when they appear in multiple times. Map> locationToFilePatterns; locationToFilePatterns = new HashMap>(); String cwd = systemGetProperty("user.dir"); for (String path : filePathList) { // replace ~ with user home if (path.startsWith("~")) { path = systemGetProperty("user.home") + path.substring(1); } // user may also specify files relative to current directory // add the current path if that is the case if (!(new File(path)).isAbsolute()) { logger.debug("Adding current working dir to relative file path."); path = cwd + localFSFileSep + path; } // check if the path contains any wildcards if (!path.contains("*") && !path.contains("?") && !(path.contains("[") && path.contains("]"))) { /* this file path doesn't have any wildcard, so we don't need to * expand it */ result.add(path); } else { // get the directory path int lastFileSepIndex = path.lastIndexOf(localFSFileSep); // SNOW-15203: if we don't find a default file sep, try "/" if it is not // the default file sep. if (lastFileSepIndex < 0 && !"/".equals(localFSFileSep)) { lastFileSepIndex = path.lastIndexOf("/"); } String loc = path.substring(0, lastFileSepIndex + 1); String filePattern = path.substring(lastFileSepIndex + 1); List filePatterns = locationToFilePatterns.get(loc); if (filePatterns == null) { filePatterns = new ArrayList(); locationToFilePatterns.put(loc, filePatterns); } filePatterns.add(filePattern); } } // For each location, list files and match against the patterns for (Map.Entry> entry : locationToFilePatterns.entrySet()) { try { File dir = new File(entry.getKey()); logger.debug( "Listing files under: {} with patterns: {}", entry.getKey(), entry.getValue().toString()); // Normal flow will never hit here. This is only for testing purposes if (isInjectedFileTransferExceptionEnabled() && injectedFileTransferException instanceof Exception) { throw (Exception) SnowflakeFileTransferAgent.injectedFileTransferException; } // The following currently ignore sub directories File[] filesMatchingPattern = dir.listFiles((FileFilter) new WildcardFileFilter(entry.getValue())); if (filesMatchingPattern != null) { for (File file : filesMatchingPattern) { result.add(file.getCanonicalPath()); } } else { logger.debug("No files under {} matching pattern {}", entry.getKey(), entry.getValue()); } } catch (Exception ex) { throw new SnowflakeSQLException( queryId, ex, SqlState.DATA_EXCEPTION, ErrorCode.FAIL_LIST_FILES.getMessageCode(), "Exception: " + ex.getMessage() + ", Dir=" + entry.getKey() + ", Patterns=" + entry.getValue().toString()); } } logger.debug("Expanded file paths: "); for (String filePath : result) { logger.debug("File: {}", filePath); } return result; } private static boolean pushFileToLocal( String stageLocation, String filePath, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutStr, SFBaseSession session, String queryId) throws SQLException { // replace ~ with user home stageLocation = stageLocation.replace("~", systemGetProperty("user.home")); try { logger.debug( "Copy file. srcFile: {}, destination: {}, destFileName: {}", filePath, stageLocation, destFileName); File destFile = new File(SnowflakeUtil.concatFilePathNames(stageLocation, destFileName, localFSFileSep)); if (fileBackedOutStr != null) { inputStream = fileBackedOutStr.asByteSource().openStream(); } FileUtils.copyInputStreamToFile(inputStream, destFile); } catch (Exception ex) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, ex.getMessage()); } return true; } private static boolean pullFileFromLocal( String sourceLocation, String filePath, String destLocation, String destFileName, SFBaseSession session, String queryId) throws SQLException { try { logger.debug( "Copy file. srcFile: {}, destination: {}, destFileName: {}", sourceLocation + localFSFileSep + filePath, destLocation, destFileName); File srcFile = new File(SnowflakeUtil.concatFilePathNames(sourceLocation, filePath, localFSFileSep)); FileUtils.copyFileToDirectory(srcFile, new File(destLocation)); } catch (Exception ex) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, ex.getMessage()); } return true; } private static void pushFileToRemoteStore( StageInfo stage, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutStr, long uploadSize, String digest, FileCompressionType compressionType, SnowflakeStorageClient initialClient, SFSession session, String command, int parallel, File srcFile, boolean uploadFromStream, RemoteStoreFileEncryptionMaterial encMat, String streamingIngestClientName, String streamingIngestClientKey, String queryId) throws SQLException, IOException { remoteLocation remoteLocation = extractLocationAndPath(stage.getLocation()); String origDestFileName = destFileName; if (remoteLocation.path != null && !remoteLocation.path.isEmpty()) { destFileName = remoteLocation.path + (!remoteLocation.path.endsWith("/") ? "/" : "") + destFileName; } logger.debug( "Upload object. Location: {}, key: {}, srcFile: {}, encryption: {}", remoteLocation.location, destFileName, srcFile, (ArgSupplier) () -> (encMat == null ? "NULL" : encMat.getSmkId() + "|" + encMat.getQueryId())); StorageObjectMetadata meta = StorageClientFactory.getFactory().createStorageMetadataObj(stage.getStageType()); meta.setContentLength(uploadSize); if (digest != null) { initialClient.addDigestMetadata(meta, digest); } if (compressionType != null && compressionType.isSupported()) { meta.setContentEncoding(compressionType.name().toLowerCase()); } if (streamingIngestClientName != null && streamingIngestClientKey != null) { initialClient.addStreamingIngestMetadata( meta, streamingIngestClientName, streamingIngestClientKey); } try { String presignedUrl = ""; if (initialClient.requirePresignedUrl()) { // need to replace file://mypath/myfile?.csv with file://mypath/myfile1.csv.gz String localFilePath = getLocalFilePathFromCommand(command, false); String commandWithExactPath = command.replace(localFilePath, origDestFileName); // then hand that to GS to get the actual presigned URL we'll use SFStatement statement = new SFStatement(session); JsonNode jsonNode = parseCommandInGS(statement, commandWithExactPath); if (!jsonNode.path("data").path("stageInfo").path("presignedUrl").isMissingNode()) { presignedUrl = jsonNode.path("data").path("stageInfo").path("presignedUrl").asText(); } } initialClient.upload( session, command, parallel, uploadFromStream, remoteLocation.location, srcFile, destFileName, inputStream, fileBackedOutStr, meta, stage.getRegion(), presignedUrl, queryId); } finally { if (uploadFromStream && inputStream != null) { inputStream.close(); } } } /** * Static API function to upload data without JDBC session. * *

NOTE: This function is developed based on getUploadFileCallable(). * * @param config Configuration to upload a file to cloud storage * @throws Exception if error occurs while data upload. */ public static void uploadWithoutConnection(SnowflakeFileTransferConfig config) throws Exception { logger.trace("Entering uploadWithoutConnection..."); SnowflakeFileTransferMetadataV1 metadata = (SnowflakeFileTransferMetadataV1) config.getSnowflakeFileTransferMetadata(); InputStream uploadStream = config.getUploadStream(); boolean requireCompress = config.getRequireCompress(); int networkTimeoutInMilli = config.getNetworkTimeoutInMilli(); OCSPMode ocspMode = config.getOcspMode(); Properties proxyProperties = config.getProxyProperties(); String streamingIngestClientName = config.getStreamingIngestClientName(); String streamingIngestClientKey = config.getStreamingIngestClientKey(); // Create HttpClient key HttpClientSettingsKey key = SnowflakeUtil.convertProxyPropertiesToHttpClientKey(ocspMode, proxyProperties); StageInfo stageInfo = metadata.getStageInfo(); stageInfo.setProxyProperties(proxyProperties); String destFileName = metadata.getPresignedUrlFileName(); logger.debug("Begin upload data for " + destFileName); long uploadSize; File fileToUpload = null; String digest = null; // Temp file that needs to be cleaned up when upload was successful FileBackedOutputStream fileBackedOutputStream = null; RemoteStoreFileEncryptionMaterial encMat = metadata.getEncryptionMaterial(); if (encMat.getQueryId() == null && encMat.getQueryStageMasterKey() == null && encMat.getSmkId() == null) { encMat = null; } // SNOW-16082: we should capture exception if we fail to compress or // calculate digest. try { if (requireCompress) { InputStreamWithMetadata compressedSizeAndStream = (encMat == null ? compressStreamWithGZIPNoDigest(uploadStream, /* session= */ null, null) : compressStreamWithGZIP(uploadStream, /* session= */ null, encMat.getQueryId())); fileBackedOutputStream = compressedSizeAndStream.fileBackedOutputStream; // update the size uploadSize = compressedSizeAndStream.size; digest = compressedSizeAndStream.digest; if (compressedSizeAndStream.fileBackedOutputStream.getFile() != null) { fileToUpload = compressedSizeAndStream.fileBackedOutputStream.getFile(); } logger.debug("New size after compression: {}", uploadSize); } else { // If it's not local_fs, we store our digest in the metadata // In local_fs, we don't need digest, and if we turn it on, // we will consume whole uploadStream, which local_fs uses. InputStreamWithMetadata result = computeDigest(uploadStream, true); digest = result.digest; fileBackedOutputStream = result.fileBackedOutputStream; uploadSize = result.size; if (result.fileBackedOutputStream.getFile() != null) { fileToUpload = result.fileBackedOutputStream.getFile(); } } logger.debug( "Started copying file to {}:{} destName: {} compressed ? {} size={}", stageInfo.getStageType().name(), stageInfo.getLocation(), destFileName, (requireCompress ? "yes" : "no"), uploadSize); SnowflakeStorageClient initialClient = StorageClientFactory.getFactory().createClient(stageInfo, 1, encMat, /* session= */ null); // Normal flow will never hit here. This is only for testing purposes if (isInjectedFileTransferExceptionEnabled()) { throw (Exception) SnowflakeFileTransferAgent.injectedFileTransferException; } String queryId = encMat != null && encMat.getQueryId() != null ? encMat.getQueryId() : null; switch (stageInfo.getStageType()) { case S3: case AZURE: pushFileToRemoteStore( metadata.getStageInfo(), destFileName, uploadStream, fileBackedOutputStream, uploadSize, digest, (requireCompress ? FileCompressionType.GZIP : null), initialClient, config.getSession(), config.getCommand(), 1, fileToUpload, (fileToUpload == null), encMat, streamingIngestClientName, streamingIngestClientKey, queryId); break; case GCS: // If the down-scoped token is used to upload file, one metadata may be used to upload // multiple files, so use the dest file name in config. destFileName = metadata.isForOneFile() ? metadata.getPresignedUrlFileName() : config.getDestFileName(); pushFileToRemoteStoreWithPresignedUrl( metadata.getStageInfo(), destFileName, uploadStream, fileBackedOutputStream, uploadSize, digest, (requireCompress ? FileCompressionType.GZIP : null), initialClient, networkTimeoutInMilli, key, 1, null, true, encMat, metadata.getPresignedUrl(), streamingIngestClientName, streamingIngestClientKey, queryId); break; } } catch (Exception ex) { if (!config.isSilentException()) { logger.error("Exception encountered during file upload in uploadWithoutConnection", ex); } throw ex; } finally { if (fileBackedOutputStream != null) { try { fileBackedOutputStream.reset(); } catch (IOException ex) { logger.debug("Failed to clean up temp file: {}", ex); } } } } /** * Push a file (or stream) to remote store with pre-signed URL without JDBC session. * *

NOTE: This function is developed based on pushFileToRemoteStore(). The main difference is * that the caller needs to provide pre-signed URL and the upload doesn't need JDBC session. */ private static void pushFileToRemoteStoreWithPresignedUrl( StageInfo stage, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutStr, long uploadSize, String digest, FileCompressionType compressionType, SnowflakeStorageClient initialClient, int networkTimeoutInMilli, HttpClientSettingsKey ocspModeAndProxyKey, int parallel, File srcFile, boolean uploadFromStream, RemoteStoreFileEncryptionMaterial encMat, String presignedUrl, String streamingIngestClientName, String streamingIngestClientKey, String queryId) throws SQLException, IOException { remoteLocation remoteLocation = extractLocationAndPath(stage.getLocation()); if (remoteLocation.path != null && !remoteLocation.path.isEmpty()) { destFileName = remoteLocation.path + (!remoteLocation.path.endsWith("/") ? "/" : "") + destFileName; } logger.debug( "Upload object. Location: {}, key: {}, srcFile: {}, encryption: {}", remoteLocation.location, destFileName, srcFile, (ArgSupplier) () -> (encMat == null ? "NULL" : encMat.getSmkId() + "|" + encMat.getQueryId())); StorageObjectMetadata meta = StorageClientFactory.getFactory().createStorageMetadataObj(stage.getStageType()); meta.setContentLength(uploadSize); if (digest != null) { initialClient.addDigestMetadata(meta, digest); } if (compressionType != null && compressionType.isSupported()) { meta.setContentEncoding(compressionType.name().toLowerCase()); } if (streamingIngestClientName != null && streamingIngestClientKey != null) { initialClient.addStreamingIngestMetadata( meta, streamingIngestClientName, streamingIngestClientKey); } try { initialClient.uploadWithPresignedUrlWithoutConnection( networkTimeoutInMilli, ocspModeAndProxyKey, parallel, uploadFromStream, remoteLocation.location, srcFile, destFileName, inputStream, fileBackedOutStr, meta, stage.getRegion(), presignedUrl, queryId); } finally { if (uploadFromStream && inputStream != null) { inputStream.close(); } } } /** * This static method is called when we are handling an expired token exception It retrieves a * fresh token from GS and then calls .renew() on the storage client to refresh itself with the * new token * * @param session a session object * @param command a command to be retried * @param client a Snowflake Storage client object * @throws SnowflakeSQLException if any error occurs */ public static void renewExpiredToken( SFSession session, String command, SnowflakeStorageClient client) throws SnowflakeSQLException { SFStatement statement = new SFStatement(session); JsonNode jsonNode = parseCommandInGS(statement, command); String queryId = jsonNode.path("data").path("queryId").asText(); Map stageCredentials = extractStageCreds(jsonNode, queryId); // renew client with the fresh token logger.debug("Renewing expired access token"); client.renew(stageCredentials); } private static void pullFileFromRemoteStore( StageInfo stage, String filePath, String destFileName, String localLocation, SnowflakeStorageClient initialClient, SFSession session, String command, int parallel, RemoteStoreFileEncryptionMaterial encMat, String presignedUrl, String queryId) throws SQLException, IOException { remoteLocation remoteLocation = extractLocationAndPath(stage.getLocation()); String stageFilePath = filePath; if (!remoteLocation.path.isEmpty()) { stageFilePath = SnowflakeUtil.concatFilePathNames(remoteLocation.path, filePath, "/"); } logger.debug( "Download object. Location: {}, key: {}, srcFile: {}, encryption: {}", remoteLocation.location, stageFilePath, filePath, (ArgSupplier) () -> (encMat == null ? "NULL" : encMat.getSmkId() + "|" + encMat.getQueryId())); initialClient.download( session, command, localLocation, destFileName, parallel, remoteLocation.location, stageFilePath, stage.getRegion(), presignedUrl, queryId); } /** * From the set of files intended to be uploaded/downloaded, derive a common prefix and use the * listObjects API to get the object summary for each object that has the common prefix. * *

For each returned object, we compare the size and digest with the local file and if they are * the same, we will not upload/download the file. * * @throws SnowflakeSQLException if any error occurs */ private void filterExistingFiles() throws SnowflakeSQLException { /* * Build a reverse map from destination file name to source file path * The map will be used for looking up the source file for destination * files that already exist in destination location and mark them to be * skipped for uploading/downloading */ Map destFileNameToSrcFileMap = new HashMap(fileMetadataMap.size()); logger.debug("Build reverse map from destination file name to source file"); for (Map.Entry entry : fileMetadataMap.entrySet()) { if (entry.getValue().destFileName != null) { String prevSrcFile = destFileNameToSrcFileMap.put(entry.getValue().destFileName, entry.getKey()); if (prevSrcFile != null) { FileMetadata prevFileMetadata = fileMetadataMap.get(prevSrcFile); prevFileMetadata.resultStatus = ResultStatus.COLLISION; prevFileMetadata.errorDetails = prevSrcFile + " has same name as " + entry.getKey(); } } else { logger.debug("No dest file name found for: {}", entry.getKey()); logger.debug("Status: {}", entry.getValue().resultStatus); } } // no files to be processed if (destFileNameToSrcFileMap.size() == 0) { return; } // determine greatest common prefix for all stage file names so that // we can call remote store API to list the objects and get their digest to compare // with local files String[] stageFileNames; if (commandType == CommandType.UPLOAD) { stageFileNames = destFileNameToSrcFileMap.keySet().toArray(new String[0]); } else { stageFileNames = destFileNameToSrcFileMap.values().toArray(new String[0]); } // find greatest common prefix for all stage file names Arrays.sort(stageFileNames); String greatestCommonPrefix = SnowflakeUtil.greatestCommonPrefix( stageFileNames[0], stageFileNames[stageFileNames.length - 1]); logger.debug("Greatest common prefix: {}", greatestCommonPrefix); // use the greatest common prefix to list objects under stage location if (stageInfo.getStageType() == StageInfo.StageType.S3 || stageInfo.getStageType() == StageInfo.StageType.AZURE || stageInfo.getStageType() == StageInfo.StageType.GCS) { logger.debug("Check existing files on remote storage for the common prefix"); remoteLocation storeLocation = extractLocationAndPath(stageInfo.getLocation()); StorageObjectSummaryCollection objectSummaries = null; int retryCount = 0; logger.debug("Start dragging object summaries from remote storage"); do { try { // Normal flow will never hit here. This is only for testing purposes if (isInjectedFileTransferExceptionEnabled() && SnowflakeFileTransferAgent.injectedFileTransferException instanceof StorageProviderException) { throw (StorageProviderException) SnowflakeFileTransferAgent.injectedFileTransferException; } objectSummaries = storageClient.listObjects( storeLocation.location, SnowflakeUtil.concatFilePathNames(storeLocation.path, greatestCommonPrefix, "/")); logger.debug("Received object summaries from remote storage"); } catch (Exception ex) { logger.debug("Listing objects for filtering encountered exception: {}", ex.getMessage()); // Need to unwrap StorageProviderException since handleStorageException only handle base // cause. if (ex instanceof StorageProviderException) { ex = (Exception) ex.getCause(); // Cause of StorageProviderException is always an Exception } storageClient.handleStorageException( ex, ++retryCount, "listObjects", session, command, queryID); continue; } try { compareAndSkipRemoteFiles(objectSummaries, destFileNameToSrcFileMap); break; // exit retry loop } catch (Exception ex) { // This exception retry logic is mainly for Azure iterator. Since Azure iterator is a lazy // iterator, // it can throw exceptions during the for-each calls. To be more specific, iterator apis, // e.g. hasNext(), may throw Storage service error. logger.debug( "Comparison with existing files in remote storage encountered exception.", ex); if (ex instanceof StorageProviderException) { ex = (Exception) ex.getCause(); // Cause of StorageProviderException is always an Exception } storageClient.handleStorageException( ex, ++retryCount, "compareRemoteFiles", session, command, queryID); } } while (retryCount <= storageClient.getMaxRetries()); } else if (stageInfo.getStageType() == StageInfo.StageType.LOCAL_FS) { for (String stageFileName : stageFileNames) { String stageFilePath = SnowflakeUtil.concatFilePathNames( stageInfo.getLocation(), stageFileName, localFSFileSep); File stageFile = new File(stageFilePath); // if stage file doesn't exist, no need to skip whether for // upload/download if (!stageFile.exists()) { continue; } String mappedSrcFile = (commandType == CommandType.UPLOAD) ? destFileNameToSrcFileMap.get(stageFileName) : stageFileName; String localFile = (commandType == CommandType.UPLOAD) ? mappedSrcFile : (localLocation + fileMetadataMap.get(mappedSrcFile).destFileName); if (commandType == CommandType.UPLOAD && stageFileName.equals(fileMetadataMap.get(mappedSrcFile).destFileName)) { skipFile(mappedSrcFile, stageFileName); continue; } // Check file size first, if they are different, we don't need // to check digest if (!fileMetadataMap.get(mappedSrcFile).requireCompress && stageFile.length() != (new File(localFile)).length()) { logger.debug( "Size diff between stage and local, will {} {}", commandType.name().toLowerCase(), mappedSrcFile); continue; } // stage file exist and either we will be compressing or // the dest file has same size as the source file size we will // compare digest values below String localFileHashText = null; String stageFileHashText = null; List fileBackedOutputStreams = new ArrayList<>(); InputStream localFileStream = null; try { // calculate the digest hash of the local file localFileStream = new FileInputStream(localFile); if (fileMetadataMap.get(mappedSrcFile).requireCompress) { logger.debug("Compressing stream for digest check"); InputStreamWithMetadata res = compressStreamWithGZIP(localFileStream, session, queryID); fileBackedOutputStreams.add(res.fileBackedOutputStream); localFileStream = res.fileBackedOutputStream.asByteSource().openStream(); } InputStreamWithMetadata res = computeDigest(localFileStream, false); localFileHashText = res.digest; fileBackedOutputStreams.add(res.fileBackedOutputStream); } catch (IOException | NoSuchAlgorithmException ex) { throw new SnowflakeSQLLoggedException( queryID, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Error reading local file: " + localFile); } finally { for (FileBackedOutputStream stream : fileBackedOutputStreams) { if (stream != null) { try { stream.reset(); } catch (IOException ex) { logger.debug("Failed to clean up temp file: {}", ex); } } } IOUtils.closeQuietly(localFileStream); } FileBackedOutputStream fileBackedOutputStream = null; InputStream stageFileStream = null; try { // calculate digest for stage file stageFileStream = new FileInputStream(stageFilePath); InputStreamWithMetadata res = computeDigest(stageFileStream, false); stageFileHashText = res.digest; fileBackedOutputStream = res.fileBackedOutputStream; } catch (IOException | NoSuchAlgorithmException ex) { throw new SnowflakeSQLLoggedException( queryID, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Error reading stage file: " + stageFilePath); } finally { try { if (fileBackedOutputStream != null) { fileBackedOutputStream.reset(); } } catch (IOException ex) { logger.debug("Failed to clean up temp file: {}", ex); } IOUtils.closeQuietly(stageFileStream); } // continue if digest is different so that we will process the file if (!stageFileHashText.equals(localFileHashText)) { logger.debug( "Digest diff between local and stage, will {} {}", commandType.name().toLowerCase(), mappedSrcFile); continue; } else { logger.debug("Digest matches between local and stage, will skip {}", mappedSrcFile); // skip the file given that the check sum is the same b/w source // and destination skipFile(mappedSrcFile, stageFileName); } } } } /** * For input objects, we compare the size and digest with the local files and if they are the * same, we will not upload/download the file. * * @param objectSummaries input objects collection * @param destFileNameToSrcFileMap map between dest file name and src file * @throws SnowflakeSQLException if any error occurs */ private void compareAndSkipRemoteFiles( StorageObjectSummaryCollection objectSummaries, Map destFileNameToSrcFileMap) throws SnowflakeSQLException { for (StorageObjectSummary obj : objectSummaries) { logger.debug( "Existing object: key: {} size: {} md5: {}", obj.getKey(), obj.getSize(), obj.getMD5()); int idxOfLastFileSep = obj.getKey().lastIndexOf("/"); String objFileName = obj.getKey().substring(idxOfLastFileSep + 1); // get the path to the local file so that we can calculate digest String mappedSrcFile = destFileNameToSrcFileMap.get(objFileName); // skip objects that don't have a corresponding file to be uploaded if (mappedSrcFile == null) { continue; } logger.debug( "Next compare digest for {} against {} on the remote store", mappedSrcFile, objFileName); String localFile = null; final boolean remoteEncrypted; try { // Normal flow will never hit here. This is only for testing purposes if (isInjectedFileTransferExceptionEnabled()) { throw (NoSuchAlgorithmException) SnowflakeFileTransferAgent.injectedFileTransferException; } localFile = (commandType == CommandType.UPLOAD) ? mappedSrcFile : (localLocation + objFileName); if (commandType == CommandType.DOWNLOAD && !(new File(localFile)).exists()) { logger.debug("File does not exist locally, will download {}", mappedSrcFile); continue; } // if it's an upload and there's already a file existing remotely with the same name, skip // uploading it if (commandType == CommandType.UPLOAD && objFileName.equals(fileMetadataMap.get(mappedSrcFile).destFileName)) { skipFile(mappedSrcFile, objFileName); continue; } // Check file size first, if their difference is bigger than the block // size, we don't need to compare digests if (!fileMetadataMap.get(mappedSrcFile).requireCompress && Math.abs(obj.getSize() - (new File(localFile)).length()) > 16) { logger.debug( "Size diff between remote and local, will {} {}", commandType.name().toLowerCase(), mappedSrcFile); continue; } // Get object metadata from remote storage // StorageObjectMetadata meta; try { meta = storageClient.getObjectMetadata(obj.getLocation(), obj.getKey()); } catch (StorageProviderException spEx) { // SNOW-14521: when file is not found, ok to upload if (spEx.isServiceException404()) { // log it logger.debug( "File returned from listing but found missing {} when getting its" + " metadata. Location: {}, key: {}", obj.getLocation(), obj.getKey()); // the file is not found, ok to upload continue; } // for any other exception, log an error logger.error("Fetching object metadata encountered exception: {}", spEx.getMessage()); throw spEx; } String objDigest = storageClient.getDigestMetadata(meta); remoteEncrypted = MatDesc.parse(meta.getUserMetadata().get(storageClient.getMatdescKey())) != null; // calculate the digest hash of the local file InputStream fileStream = null; String hashText = null; // Streams (potentially with temp files) to clean up final List fileBackedOutputStreams = new ArrayList<>(); try { fileStream = new FileInputStream(localFile); if (fileMetadataMap.get(mappedSrcFile).requireCompress) { logger.debug("Compressing stream for digest check"); InputStreamWithMetadata res = compressStreamWithGZIP(fileStream, session, queryID); fileStream = res.fileBackedOutputStream.asByteSource().openStream(); fileBackedOutputStreams.add(res.fileBackedOutputStream); } // If the remote file has our digest, compute the SHA-256 // for the local file // If the remote file does not have our digest but is unencrypted, // we compare the MD5 of the unencrypted local file to the ETag // of the S3 file. // Otherwise (remote file is encrypted, but has no sfc-digest), // no comparison is performed if (objDigest != null) { InputStreamWithMetadata res = computeDigest(fileStream, false); hashText = res.digest; fileBackedOutputStreams.add(res.fileBackedOutputStream); } else if (!remoteEncrypted) { hashText = DigestUtils.md5Hex(fileStream); } } finally { if (fileStream != null) { fileStream.close(); } for (FileBackedOutputStream stream : fileBackedOutputStreams) { if (stream != null) { try { stream.reset(); } catch (IOException ex) { logger.debug("Failed to clean up temp file: {}", ex); } } } } // continue so that we will upload the file if (hashText == null || // remote is encrypted & has no digest (objDigest != null && !hashText.equals(objDigest)) || // digest mismatch (objDigest == null && !hashText.equals(obj.getMD5()))) // ETag/MD5 mismatch { logger.debug( "Digest diff between remote store and local, will {} {}, " + "local digest: {}, remote store md5: {}", commandType.name().toLowerCase(), mappedSrcFile, hashText, obj.getMD5()); continue; } } catch (IOException | NoSuchAlgorithmException ex) { throw new SnowflakeSQLLoggedException( queryID, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Error reading: " + localFile); } logger.debug( "Digest same between remote store and local, will not upload {} {}", commandType.name().toLowerCase(), mappedSrcFile); skipFile(mappedSrcFile, objFileName); } } private void skipFile(String srcFilePath, String destFileName) { FileMetadata fileMetadata = fileMetadataMap.get(srcFilePath); if (fileMetadata != null) { if (fileMetadata.resultStatus == null || fileMetadata.resultStatus == ResultStatus.UNKNOWN) { logger.debug("Mark {} as skipped", srcFilePath); fileMetadata.resultStatus = ResultStatus.SKIPPED; fileMetadata.errorDetails = "File with same destination name and checksum already exists: " + destFileName; } else { logger.debug( "No need to mark as skipped for: {} status was already marked as: {}", srcFilePath, fileMetadata.resultStatus); } } } private void initFileMetadata() throws SnowflakeSQLException { // file metadata is keyed on source file names (which are local file names // for upload command and stage file names for download command) fileMetadataMap = new HashMap(sourceFiles.size()); if (commandType == CommandType.UPLOAD) { if (sourceFromStream) { FileMetadata fileMetadata = new FileMetadata(); fileMetadataMap.put(SRC_FILE_NAME_FOR_STREAM, fileMetadata); fileMetadata.srcFileName = SRC_FILE_NAME_FOR_STREAM; } else { for (String sourceFile : sourceFiles) { FileMetadata fileMetadata = new FileMetadata(); fileMetadataMap.put(sourceFile, fileMetadata); File file = new File(sourceFile); fileMetadata.srcFileName = file.getName(); fileMetadata.srcFileSize = file.length(); if (!file.exists()) { logger.debug("File doesn't exist: {}", sourceFile); throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.FILE_NOT_FOUND.getMessageCode(), SqlState.DATA_EXCEPTION, sourceFile); } else if (file.isDirectory()) { logger.debug("Not a file, but directory: {}", sourceFile); throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.FILE_IS_DIRECTORY.getMessageCode(), SqlState.DATA_EXCEPTION, sourceFile); } } } } else if (commandType == CommandType.DOWNLOAD) { for (String sourceFile : sourceFiles) { FileMetadata fileMetadata = new FileMetadata(); fileMetadataMap.put(sourceFile, fileMetadata); fileMetadata.srcFileName = sourceFile; fileMetadata.destFileName = extractSafeDestFileName(sourceFile, queryID); } } } static String extractSafeDestFileName(String sourceFile, String queryId) throws SnowflakeSQLException { if (sourceFile == null) { throw new SnowflakeSQLException( queryId, ErrorCode.INTERNAL_ERROR, "Source file path from server is null"); } int lastSeparator = Math.max(sourceFile.lastIndexOf('/'), sourceFile.lastIndexOf('\\')); String name = sourceFile.substring(lastSeparator + 1); if (name.isEmpty() || ".".equals(name) || "..".equals(name) || name.indexOf('\0') >= 0 || name.indexOf('/') >= 0 || name.indexOf('\\') >= 0 || name.indexOf(':') >= 0) { throw new SnowflakeSQLException( queryId, ErrorCode.INTERNAL_ERROR, "Invalid destination file name received from server: " + sourceFile); } return name; } /** * Derive compression type from mime type * * @param mimeTypeStr The mime type passed to us * @return the Optional for the compression type or Optional.empty() */ static Optional mimeTypeToCompressionType(String mimeTypeStr) { if (mimeTypeStr == null) { return Optional.empty(); } int slashIndex = mimeTypeStr.indexOf('/'); if (slashIndex < 0) { return Optional.empty(); // unable to find sub type } int semiColonIndex = mimeTypeStr.indexOf(';'); String subType; if (semiColonIndex < 0) { subType = mimeTypeStr.substring(slashIndex + 1).trim().toLowerCase(Locale.ENGLISH); } else { subType = mimeTypeStr.substring(slashIndex + 1, semiColonIndex); } if (isNullOrEmpty(subType)) { return Optional.empty(); } return FileCompressionType.lookupByMimeSubType(subType); } /** * Detect file compression type for all files to be uploaded * * @throws SnowflakeSQLException Will be thrown if the compression type is unknown or unsupported */ private void processFileCompressionTypes() throws SnowflakeSQLException { // see what user has told us about the source file compression types boolean autoDetect = true; FileCompressionType userSpecifiedSourceCompression = null; if (SOURCE_COMPRESSION_AUTO_DETECT.equalsIgnoreCase(sourceCompression)) { autoDetect = true; } else if (SOURCE_COMPRESSION_NONE.equalsIgnoreCase(sourceCompression)) { autoDetect = false; } else { Optional foundCompType = FileCompressionType.lookupByMimeSubType(sourceCompression.toLowerCase()); if (!foundCompType.isPresent()) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.COMPRESSION_TYPE_NOT_KNOWN.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED, sourceCompression); } userSpecifiedSourceCompression = foundCompType.get(); if (!userSpecifiedSourceCompression.isSupported()) { throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.COMPRESSION_TYPE_NOT_SUPPORTED.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED, sourceCompression); } autoDetect = false; } if (!sourceFromStream) { for (String srcFile : sourceFiles) { FileMetadata fileMetadata = fileMetadataMap.get(srcFile); if (fileMetadata.resultStatus == ResultStatus.NONEXIST || fileMetadata.resultStatus == ResultStatus.DIRECTORY) { continue; } File file = new File(srcFile); String srcFileName = file.getName(); String mimeTypeStr = null; FileCompressionType currentFileCompressionType = null; try { if (autoDetect) { // probe the file for compression type using tika file type detector mimeTypeStr = Files.probeContentType(file.toPath()); if (mimeTypeStr == null) { try (FileInputStream f = new FileInputStream(file)) { byte[] magic = new byte[4]; if (f.read(magic, 0, 4) == 4) { if (Arrays.equals(magic, new byte[] {'P', 'A', 'R', '1'})) { mimeTypeStr = "snowflake/parquet"; } else if (Arrays.equals( Arrays.copyOfRange(magic, 0, 3), new byte[] {'O', 'R', 'C'})) { mimeTypeStr = "snowflake/orc"; } } } } if (mimeTypeStr != null) { logger.debug("Mime type for {} is: {}", srcFile, mimeTypeStr); Optional foundCompType = mimeTypeToCompressionType(mimeTypeStr); if (foundCompType.isPresent()) { currentFileCompressionType = foundCompType.get(); } } // fallback: use file extension if (currentFileCompressionType == null) { mimeTypeStr = getMimeTypeFromFileExtension(srcFile); if (mimeTypeStr != null) { logger.debug("Mime type for {} is: {}", srcFile, mimeTypeStr); Optional foundCompType = mimeTypeToCompressionType(mimeTypeStr); if (foundCompType.isPresent()) { currentFileCompressionType = foundCompType.get(); } } } } else { currentFileCompressionType = userSpecifiedSourceCompression; } // check if the compression type is supported by us if (currentFileCompressionType != null) { fileMetadata.srcCompressionType = currentFileCompressionType; if (currentFileCompressionType.isSupported()) { // remember the compression type if supported fileMetadata.destCompressionType = currentFileCompressionType; fileMetadata.requireCompress = false; fileMetadata.destFileName = srcFileName; logger.debug( "File compression detected as {} for: {}", currentFileCompressionType.name(), srcFile); } else { // error if not supported throw new SnowflakeSQLLoggedException( queryID, session, ErrorCode.COMPRESSION_TYPE_NOT_SUPPORTED.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED, currentFileCompressionType.name()); } } else { // we want to auto compress the files unless the user has disabled it logger.debug("Compression not found for file: {}", srcFile); // Set compress flag fileMetadata.requireCompress = autoCompress; fileMetadata.srcCompressionType = null; if (autoCompress) { // We only support gzip auto compression fileMetadata.destFileName = srcFileName + FileCompressionType.GZIP.getFileExtension(); fileMetadata.destCompressionType = FileCompressionType.GZIP; } else { fileMetadata.destFileName = srcFileName; fileMetadata.destCompressionType = null; } } } catch (Exception ex) { // SNOW-13146: don't log severe message for user error if (ex instanceof SnowflakeSQLException) { logger.debug("Exception encountered when processing file compression types", ex); } else { logger.debug("Exception encountered when processing file compression types", ex); } fileMetadata.resultStatus = ResultStatus.ERROR; fileMetadata.errorDetails = ex.getMessage(); } } } else { // source from stream case FileMetadata fileMetadata = fileMetadataMap.get(SRC_FILE_NAME_FOR_STREAM); fileMetadata.srcCompressionType = userSpecifiedSourceCompression; if (compressSourceFromStream) { fileMetadata.destCompressionType = FileCompressionType.GZIP; fileMetadata.requireCompress = true; } else { fileMetadata.destCompressionType = userSpecifiedSourceCompression; fileMetadata.requireCompress = false; } // add gz extension if file name doesn't have it if (compressSourceFromStream && !destFileNameForStreamSource.endsWith(FileCompressionType.GZIP.getFileExtension())) { fileMetadata.destFileName = destFileNameForStreamSource + FileCompressionType.GZIP.getFileExtension(); } else { fileMetadata.destFileName = destFileNameForStreamSource; } } } /** * Derive mime type from file extension * * @param srcFile The source file name * @return the mime type derived from the file extension */ private String getMimeTypeFromFileExtension(String srcFile) { String srcFileLowCase = srcFile.toLowerCase(); for (FileCompressionType compressionType : FileCompressionType.values()) { if (srcFileLowCase.endsWith(compressionType.getFileExtension())) { return compressionType.getMimeType() + "/" + compressionType.getMimeSubTypes().get(0); } } return null; } /** * A small helper for extracting location name and path from full location path * * @param stageLocationPath stage location * @return remoteLocation object */ public static remoteLocation extractLocationAndPath(String stageLocationPath) { String location = stageLocationPath; String path = ""; // split stage location as location name and path if (stageLocationPath.contains("/")) { location = stageLocationPath.substring(0, stageLocationPath.indexOf("/")); path = stageLocationPath.substring(stageLocationPath.indexOf("/") + 1); } return new remoteLocation(location, path); } /** Generate status rows for each file */ private void populateStatusRows() { for (Map.Entry entry : fileMetadataMap.entrySet()) { FileMetadata fileMetadata = entry.getValue(); if (commandType == CommandType.UPLOAD) { statusRows.add( showEncryptionParameter ? new UploadCommandEncryptionFacade( fileMetadata.srcFileName, fileMetadata.destFileName, fileMetadata.resultStatus.name(), fileMetadata.errorDetails, fileMetadata.srcFileSize, fileMetadata.destFileSize, (fileMetadata.srcCompressionType == null) ? "NONE" : fileMetadata.srcCompressionType.name(), (fileMetadata.destCompressionType == null) ? "NONE" : fileMetadata.destCompressionType.name(), fileMetadata.isEncrypted) : new UploadCommandFacade( fileMetadata.srcFileName, fileMetadata.destFileName, fileMetadata.resultStatus.name(), fileMetadata.errorDetails, fileMetadata.srcFileSize, fileMetadata.destFileSize, (fileMetadata.srcCompressionType == null) ? "NONE" : fileMetadata.srcCompressionType.name(), (fileMetadata.destCompressionType == null) ? "NONE" : fileMetadata.destCompressionType.name())); } else if (commandType == CommandType.DOWNLOAD) { statusRows.add( showEncryptionParameter ? new DownloadCommandEncryptionFacade( fileMetadata.srcFileName.startsWith("/") ? fileMetadata.srcFileName.substring(1) : fileMetadata.srcFileName, fileMetadata.resultStatus.name(), fileMetadata.errorDetails, fileMetadata.destFileSize, fileMetadata.isEncrypted) : new DownloadCommandFacade( fileMetadata.srcFileName.startsWith("/") ? fileMetadata.srcFileName.substring(1) : fileMetadata.srcFileName, fileMetadata.resultStatus.name(), fileMetadata.errorDetails, fileMetadata.destFileSize)); } } /* we sort the result if the connection is in sorting mode */ Object sortProperty = null; sortProperty = session.getSessionPropertyByKey("sort"); boolean sortResult = sortProperty != null && (Boolean) sortProperty; if (sortResult) { Comparator comparator = (commandType == CommandType.UPLOAD) ? new Comparator() { public int compare(Object a, Object b) { String srcFileNameA = ((UploadCommandFacade) a).getSrcFile(); String srcFileNameB = ((UploadCommandFacade) b).getSrcFile(); return srcFileNameA.compareTo(srcFileNameB); } } : new Comparator() { public int compare(Object a, Object b) { String srcFileNameA = ((DownloadCommandFacade) a).getFile(); String srcFileNameB = ((DownloadCommandFacade) b).getFile(); return srcFileNameA.compareTo(srcFileNameB); } }; // sort the rows by source file names Collections.sort(statusRows, comparator); } } public Object getResultSet() throws SnowflakeSQLException { return new SFFixedViewResultSet(this, this.commandType, this.queryID); } public CommandType getCommandType() { return commandType; } /** * Handles an InvalidKeyException which indicates that the JCE component is not installed properly * * @deprecated use {@link #throwJCEMissingError(String, Exception, String)} * @param operation a string indicating the operation type, e.g. upload/download * @param ex The exception to be handled * @throws SnowflakeSQLException throws the error as a SnowflakeSQLException */ @Deprecated public static void throwJCEMissingError(String operation, Exception ex) throws SnowflakeSQLException { throwJCEMissingError(operation, ex, null); } /** * Handles an InvalidKeyException which indicates that the JCE component is not installed properly * * @param operation a string indicating the operation type, e.g. upload/download * @param ex The exception to be handled * @param queryId last query id if available * @throws SnowflakeSQLException throws the error as a SnowflakeSQLException */ public static void throwJCEMissingError(String operation, Exception ex, String queryId) throws SnowflakeSQLException { // Most likely cause: Unlimited strength policy files not installed String msg = "Strong encryption with Java JRE requires JCE " + "Unlimited Strength Jurisdiction Policy files. " + "Follow JDBC client installation instructions " + "provided by Snowflake or contact Snowflake Support."; logger.error( "JCE Unlimited Strength policy files missing: {}. {}.", ex.getMessage(), ex.getCause().getMessage()); String bootLib = systemGetProperty("sun.boot.library.path"); if (bootLib != null) { msg += " The target directory on your system is: " + Paths.get(bootLib, "security").toString(); logger.error(msg); } throw new SnowflakeSQLException( queryId, ex, SqlState.SYSTEM_ERROR, ErrorCode.AWS_CLIENT_ERROR.getMessageCode(), operation, msg); } /** * For handling IOException: No space left on device when attempting to download a file to a * location where there is not enough space. We don't want to retry on this exception. * * @deprecated use {@link #throwNoSpaceLeftError(SFSession, String, Exception, String)} * @param session the current session * @param operation the operation i.e. GET * @param ex the exception caught * @throws SnowflakeSQLLoggedException if not enough space left on device to download file. */ @Deprecated public static void throwNoSpaceLeftError(SFSession session, String operation, Exception ex) throws SnowflakeSQLLoggedException { throwNoSpaceLeftError(session, operation, ex, null); } /** * For handling IOException: No space left on device when attempting to download a file to a * location where there is not enough space. We don't want to retry on this exception. * * @param session the current session * @param operation the operation i.e. GET * @param ex the exception caught * @param queryId the query ID * @throws SnowflakeSQLLoggedException if not enough space left on device to download file. */ public static void throwNoSpaceLeftError( SFSession session, String operation, Exception ex, String queryId) throws SnowflakeSQLLoggedException { String exMessage = SnowflakeUtil.getRootCause(ex).getMessage(); if (exMessage != null && exMessage.equals(NO_SPACE_LEFT_ON_DEVICE_ERR)) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, ErrorCode.IO_ERROR.getMessageCode(), ex, "Encountered exception during " + operation + ": " + ex.getMessage()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeFileTransferConfig.java ================================================ package net.snowflake.client.internal.jdbc; import java.io.InputStream; import java.util.Properties; import net.snowflake.client.internal.core.OCSPMode; import net.snowflake.client.internal.core.SFSession; /** * This class manages the parameters to call SnowflakeFileTransferAgent.uploadWithoutConnection() */ public class SnowflakeFileTransferConfig { private SnowflakeFileTransferMetadata metadata; private InputStream uploadStream; private boolean requireCompress; private int networkTimeoutInMilli; private OCSPMode ocspMode; private Properties proxyProperties; private String prefix; private String destFileName; private SFSession session; // Optional, added for S3 and Azure private String command; // Optional, added for S3 and Azure private boolean useS3RegionalUrl; // only for S3 us-east-1 private link deployments private String streamingIngestClientName; private String streamingIngestClientKey; private boolean silentException; public SnowflakeFileTransferConfig(Builder builder) { this.metadata = builder.metadata; this.uploadStream = builder.uploadStream; this.requireCompress = builder.requireCompress; this.networkTimeoutInMilli = builder.networkTimeoutInMilli; this.ocspMode = builder.ocspMode; this.proxyProperties = builder.proxyProperties; this.prefix = builder.prefix; this.destFileName = builder.destFileName; this.session = builder.session; this.command = builder.command; this.useS3RegionalUrl = builder.useS3RegionalUrl; this.streamingIngestClientKey = builder.streamingIngestClientKey; this.streamingIngestClientName = builder.streamingIngestClientName; this.silentException = builder.silentException; } public SnowflakeFileTransferMetadata getSnowflakeFileTransferMetadata() { return metadata; } public InputStream getUploadStream() { return uploadStream; } public boolean getRequireCompress() { return requireCompress; } public int getNetworkTimeoutInMilli() { return networkTimeoutInMilli; } public OCSPMode getOcspMode() { return ocspMode; } public Properties getProxyProperties() { return proxyProperties; } public String getPrefix() { return prefix; } public String getDestFileName() { return destFileName; } public SFSession getSession() { return session; } public String getCommand() { return command; } public boolean getUseS3RegionalUrl() { return useS3RegionalUrl; } public String getStreamingIngestClientName() { return this.streamingIngestClientName; } public String getStreamingIngestClientKey() { return this.streamingIngestClientKey; } public boolean isSilentException() { return silentException; } // Builder class public static class Builder { private SnowflakeFileTransferMetadata metadata = null; private InputStream uploadStream = null; private boolean requireCompress = true; private int networkTimeoutInMilli = 0; private OCSPMode ocspMode = null; private Properties proxyProperties = null; private String prefix = null; private String destFileName = null; private SFSession session = null; private String command = null; private boolean useS3RegionalUrl = false; // only for S3 us-east-1 private link deployments private String streamingIngestClientName; private String streamingIngestClientKey; private boolean silentException = false; public static Builder newInstance() { return new Builder(); } private Builder() {} // Build method to deal with outer class // to return outer instance public SnowflakeFileTransferConfig build() throws IllegalArgumentException { // Validate required parameters if (metadata == null) { throw new IllegalArgumentException("Snowflake File Transfer metadata is needed."); } if (uploadStream == null) { throw new IllegalArgumentException("Upload data stream is needed."); } if (ocspMode == null) { throw new IllegalArgumentException("Upload OCSP mode is needed."); } // Create the object return new SnowflakeFileTransferConfig(this); } public Builder setSnowflakeFileTransferMetadata(SnowflakeFileTransferMetadata metadata) { this.metadata = metadata; return this; } public Builder setUploadStream(InputStream uploadStream) { this.uploadStream = uploadStream; return this; } public Builder setRequireCompress(boolean requireCompress) { this.requireCompress = requireCompress; return this; } public Builder setNetworkTimeoutInMilli(int networkTimeoutInMilli) { this.networkTimeoutInMilli = networkTimeoutInMilli; return this; } public Builder setOcspMode(OCSPMode ocspMode) { this.ocspMode = ocspMode; return this; } public Builder setProxyProperties(Properties proxyProperties) { this.proxyProperties = proxyProperties; return this; } public Builder setPrefix(String prefix) { this.prefix = prefix; return this; } public Builder setDestFileName(String destFileName) { this.destFileName = destFileName; return this; } public Builder setSFSession(SFSession session) { this.session = session; return this; } public Builder setCommand(String command) { this.command = command; return this; } public Builder setUseS3RegionalUrl(boolean useS3RegUrl) { this.useS3RegionalUrl = useS3RegUrl; return this; } /** * Streaming ingest client name, used to calculate streaming ingest billing per client * * @param streamingIngestClientName streaming ingest client name * @return Builder */ public Builder setStreamingIngestClientName(String streamingIngestClientName) { this.streamingIngestClientName = streamingIngestClientName; return this; } /** * Streaming ingest client key provided by Snowflake, used to calculate streaming ingest billing * per client * * @param streamingIngestClientKey streaming ingest client key * @return Builder */ public Builder setStreamingIngestClientKey(String streamingIngestClientKey) { this.streamingIngestClientKey = streamingIngestClientKey; return this; } /** * Do not log exception when occurred, default: false * * @param silentException should not log exception when occurred * @return Builder */ public Builder setSilentException(boolean silentException) { this.silentException = silentException; return this; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeFileTransferMetadata.java ================================================ package net.snowflake.client.internal.jdbc; public interface SnowflakeFileTransferMetadata { /** * Determine this metadata is for transferring one or multiple files. * * @return return true if it is for transferring one file. */ boolean isForOneFile(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeFileTransferMetadataV1.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import com.fasterxml.jackson.annotation.JsonProperty; import java.io.Serializable; import net.snowflake.client.internal.jdbc.SFBaseFileTransferAgent.CommandType; import net.snowflake.client.internal.jdbc.cloud.storage.StageInfo; import net.snowflake.common.core.RemoteStoreFileEncryptionMaterial; /** * A class to manage metadata for upload or download files. It is introduced for distributed data * processing. The typical use case is: 1. The cluster master has JDBC connection to the Snowflake * and it can generate this object for the file transfer. 2. The cluster master node can transfer * the object to cluster worker. 3. The cluster worker can upload or download data with the object * without JDBC Connection. * *

NOTE: When this class is created, it only supports UPLOAD for GCS. It is created for Snowflake * Spark Connector. */ public class SnowflakeFileTransferMetadataV1 implements SnowflakeFileTransferMetadata, Serializable { private static final long serialVersionUID = 1L; private String presignedUrl; private String presignedUrlFileName; private String encryptionMaterial_queryStageMasterKey; private String encryptionMaterial_queryId; private Long encryptionMaterial_smkId; private CommandType commandType; private StageInfo stageInfo; public SnowflakeFileTransferMetadataV1( String presignedUrl, String presignedUrlFileName, String encryptionMaterial_queryStageMasterKey, String encryptionMaterial_queryId, Long encryptionMaterial_smkId, CommandType commandType, StageInfo stageInfo) { this.presignedUrl = presignedUrl; this.presignedUrlFileName = presignedUrlFileName; this.encryptionMaterial_queryStageMasterKey = encryptionMaterial_queryStageMasterKey; this.encryptionMaterial_queryId = encryptionMaterial_queryId; this.encryptionMaterial_smkId = encryptionMaterial_smkId; this.commandType = commandType; this.stageInfo = stageInfo; } @Override public boolean isForOneFile() { // The presigned url is for one file and the down-scoped token can be used for multiple files. return stageInfo.getStageType() == StageInfo.StageType.GCS && !isNullOrEmpty(presignedUrl) && !"null".equalsIgnoreCase(presignedUrl); } @JsonProperty("presignedUrl") public String getPresignedUrl() { return presignedUrl; } public void setPresignedUrl(String presignedUrl) { this.presignedUrl = presignedUrl; } @JsonProperty("presignedUrlFileName") public String getPresignedUrlFileName() { return presignedUrlFileName; } public void setPresignedUrlFileName(String presignedUrlFileName) { this.presignedUrlFileName = presignedUrlFileName; } @JsonProperty("encryptionMaterial") public RemoteStoreFileEncryptionMaterial getEncryptionMaterial() { return new RemoteStoreFileEncryptionMaterial( encryptionMaterial_queryStageMasterKey, encryptionMaterial_queryId, encryptionMaterial_smkId); } public void setEncryptionMaterial( String encryptionMaterial_queryStageMasterKey, String encryptionMaterial_queryId, Long encryptionMaterial_smkId) { this.encryptionMaterial_queryStageMasterKey = encryptionMaterial_queryStageMasterKey; this.encryptionMaterial_queryId = encryptionMaterial_queryId; this.encryptionMaterial_smkId = encryptionMaterial_smkId; } @JsonProperty("commandType") public CommandType getCommandType() { return commandType; } public void setCommandType(CommandType commandType) { this.commandType = commandType; } @JsonProperty("stageInfo") public StageInfo getStageInfo() { return this.stageInfo; } public void setStageInfo(StageInfo stageInfo) { this.stageInfo = stageInfo; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeFixedView.java ================================================ package net.snowflake.client.internal.jdbc; import java.util.List; import net.snowflake.client.internal.core.SFBaseSession; /** An interface to use for returning query results from any java class */ public interface SnowflakeFixedView { List describeColumns(SFBaseSession session) throws Exception; List getNextRow() throws Exception; int getTotalRows(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeLoggedFeatureNotSupportedException.java ================================================ package net.snowflake.client.internal.jdbc; import java.sql.SQLFeatureNotSupportedException; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.jdbc.telemetry.SqlExceptionTelemetryHandler; import net.snowflake.common.core.SqlState; public class SnowflakeLoggedFeatureNotSupportedException extends SQLFeatureNotSupportedException { public SnowflakeLoggedFeatureNotSupportedException(SFBaseSession session) { super(); SqlExceptionTelemetryHandler.sendTelemetry( null, SqlState.FEATURE_NOT_SUPPORTED, -1, session, this); } public SnowflakeLoggedFeatureNotSupportedException(SFBaseSession session, String message) { super(message); SqlExceptionTelemetryHandler.sendTelemetry( null, SqlState.FEATURE_NOT_SUPPORTED, -1, session, this); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeParameterMetadata.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.jdbc.util.SnowflakeTypeHelper.convertStringToType; import java.sql.ParameterMetaData; import java.sql.SQLException; import net.snowflake.client.internal.core.MetaDataOfBinds; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFPreparedStatementMetaData; /** * Naive implementation of ParameterMetadata class. * *

This class is backed by SFStatementMetadata class, where metadata information is stored as * describe sql response. */ public class SnowflakeParameterMetadata implements ParameterMetaData { private SFPreparedStatementMetaData sfPreparedStatementMetaData; private SFBaseSession session; public SnowflakeParameterMetadata( SFPreparedStatementMetaData sfStatementMetaData, SFBaseSession session) { this.sfPreparedStatementMetaData = sfStatementMetaData; this.session = session; } @Override public int getParameterCount() throws SQLException { return sfPreparedStatementMetaData.getNumberOfBinds(); } @Override public int isNullable(int param) throws SQLException { MetaDataOfBinds paramInfo = sfPreparedStatementMetaData.getMetaDataForBindParam(param); if (paramInfo.isNullable()) { return ParameterMetaData.parameterNullable; } return ParameterMetaData.parameterNoNulls; } @Override public boolean isSigned(int param) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public int getPrecision(int param) throws SQLException { MetaDataOfBinds paramInfo = sfPreparedStatementMetaData.getMetaDataForBindParam(param); return paramInfo.getPrecision(); } @Override public int getScale(int param) throws SQLException { MetaDataOfBinds paramInfo = sfPreparedStatementMetaData.getMetaDataForBindParam(param); return paramInfo.getScale(); } @Override public int getParameterType(int param) throws SQLException { MetaDataOfBinds paramInfo = sfPreparedStatementMetaData.getMetaDataForBindParam(param); return convertStringToType(paramInfo.getTypeName()); } @Override public String getParameterTypeName(int param) throws SQLException { MetaDataOfBinds paramInfo = sfPreparedStatementMetaData.getMetaDataForBindParam(param); return paramInfo.getTypeName(); } @Override public String getParameterClassName(int param) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session); } @Override public int getParameterMode(int param) throws SQLException { throw new SnowflakeLoggedFeatureNotSupportedException(session); } @SuppressWarnings("unchecked") @Override public T unwrap(java.lang.Class iface) throws SQLException { if (!isWrapperFor(iface)) { throw new SQLException( this.getClass().getName() + " not unwrappable from " + iface.getName()); } return (T) this; } @Override public boolean isWrapperFor(java.lang.Class iface) throws SQLException { return iface.isInstance(this); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeReauthenticationRequest.java ================================================ package net.snowflake.client.internal.jdbc; import net.snowflake.client.api.exception.SnowflakeSQLException; /** SnowflakeReauthenticationRequest signals the reauthentication used for SSO */ public class SnowflakeReauthenticationRequest extends SnowflakeSQLException { private static final long serialVersionUID = 1L; public SnowflakeReauthenticationRequest( String queryId, String reason, String sqlState, int vendorCode) { super(queryId, reason, sqlState, vendorCode); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeResultChunk.java ================================================ package net.snowflake.client.internal.jdbc; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import net.snowflake.client.internal.util.SecretDetector; /** Class for result chunk */ public abstract class SnowflakeResultChunk { public boolean isReleased() { return released; } public void setReleased() { released = true; } public enum DownloadState { NOT_STARTED, IN_PROGRESS, SUCCESS, FAILURE } // url for result chunk private final String url; // url for result chunk, with any credentials present (e.g. SAS tokens) // masked private final String scrubbedUrl; // number of columns to expect final int colCount; // uncompressed size in bytes of this chunk int uncompressedSize; // row count final int rowCount; // download time for the chunk private long downloadTime; // parse time for the chunk private long parseTime; private DownloadState downloadState = DownloadState.NOT_STARTED; // lock for guarding shared chunk state between consumer and downloader private ReentrantLock lock = new ReentrantLock(); // a condition to signal from downloader to consumer private Condition downloadCondition = lock.newCondition(); // download error if any for the chunk private String downloadError; private boolean released = false; /** * Compute the memory necessary to store the data of this chunk * * @return necessary memory in bytes */ abstract long computeNeededChunkMemory(); /** Free the data stored in this chunk. Called when finish consuming the chunk */ abstract void freeData(); /** Reset all data structure used in this result chunk */ abstract void reset(); public SnowflakeResultChunk(String url, int rowCount, int colCount, int uncompressedSize) { this.url = url; this.scrubbedUrl = SecretDetector.maskSASToken(this.url); this.rowCount = rowCount; this.colCount = colCount; this.uncompressedSize = uncompressedSize; } public final String getUrl() { return url; } public final String getScrubbedUrl() { return this.scrubbedUrl; } public final int getRowCount() { return rowCount; } public final int getUncompressedSize() { return uncompressedSize; } public final int getColCount() { return this.colCount; } public long getDownloadTime() { return downloadTime; } public void setDownloadTime(long downloadTime) { this.downloadTime = downloadTime; } public long getParseTime() { return parseTime; } public void setParseTime(long parseTime) { this.parseTime = parseTime; } public ReentrantLock getLock() { return lock; } public Condition getDownloadCondition() { return downloadCondition; } public String getDownloadError() { return downloadError; } public void setDownloadError(String downloadError) { this.downloadError = downloadError; } public DownloadState getDownloadState() { return downloadState; } public void setDownloadState(DownloadState downloadState) { this.downloadState = downloadState; } long getTotalTime() { return downloadTime + parseTime; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeResultSetMetaDataV1.java ================================================ package net.snowflake.client.internal.jdbc; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Types; import java.util.List; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.api.resultset.SnowflakeResultSetMetaData; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFResultSetMetaData; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.util.SnowflakeTypeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Snowflake ResultSetMetaData */ public class SnowflakeResultSetMetaDataV1 implements ResultSetMetaData, SnowflakeResultSetMetaData { public enum QueryType { ASYNC, SYNC }; private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeResultSetMetaDataV1.class); private SFResultSetMetaData resultSetMetaData; private String queryId; private QueryType queryType = QueryType.SYNC; private SFBaseSession session; public SnowflakeResultSetMetaDataV1(SFResultSetMetaData resultSetMetaData) throws SnowflakeSQLException { this.resultSetMetaData = resultSetMetaData; this.queryId = resultSetMetaData.getQueryId(); this.session = resultSetMetaData.getSession(); } public void setQueryType(QueryType type) { this.queryType = type; } /** * @return query id */ public String getQueryID() throws SQLException { return this.queryId; } /** * Override the original query ID to provide the accurate query ID for metadata produced from an * SFAsyncResultSet. The original query ID is from the result_scan query. The user expects to * retrieve the query ID from the original query instead. */ public void setQueryIdForAsyncResults(String queryId) { this.queryId = queryId; } /** * @return list of column names */ public List getColumnNames() throws SQLException { return resultSetMetaData.getColumnNames(); } /** * @return index of the column by name, index starts from zero */ public int getColumnIndex(String columnName) throws SQLException { return resultSetMetaData.getColumnIndex(columnName); } public int getInternalColumnType(int column) throws SQLException { try { return resultSetMetaData.getInternalColumnType(column); } catch (SFException ex) { throw new SnowflakeSQLLoggedException( session, ex.getSqlState(), ex.getVendorCode(), ex.getCause(), ex.getParams()); } } @Override public List getColumnFields(int column) throws SQLException { return SnowflakeUtil.mapSFExceptionToSQLException( () -> resultSetMetaData.getColumnFields(column)); } @Override public int getVectorDimension(int column) throws SQLException { return resultSetMetaData.getDimension(column); } @Override public int getVectorDimension(String columnName) throws SQLException { return resultSetMetaData.getDimension(getColumnIndex(columnName) + 1); } @Override public T unwrap(Class iface) throws SQLException { logger.trace(" T unwrap(Class iface)", false); if (!iface.isInstance(this)) { throw new SQLException( this.getClass().getName() + " not unwrappable from " + iface.getName()); } return (T) this; } @Override public boolean isWrapperFor(Class iface) throws SQLException { logger.trace("boolean isWrapperFor(Class iface)", false); return iface.isInstance(this); } @Override public boolean isAutoIncrement(int column) throws SQLException { return resultSetMetaData.getIsAutoIncrement(column); } @Override public boolean isCaseSensitive(int column) throws SQLException { int colType = getColumnType(column); switch (colType) { // Note: SF types GEOGRAPHY, GEOMETRY are also represented as VARCHAR. case Types.VARCHAR: case Types.CHAR: case Types.STRUCT: case Types.ARRAY: return true; case Types.INTEGER: case Types.BIGINT: case Types.DECIMAL: case Types.DOUBLE: case Types.BOOLEAN: case Types.TIMESTAMP: case Types.TIMESTAMP_WITH_TIMEZONE: case Types.DATE: case Types.TIME: case Types.BINARY: default: return false; } } @Override public boolean isSearchable(int column) throws SQLException { return true; } @Override public boolean isCurrency(int column) throws SQLException { return false; } @Override public boolean isReadOnly(int column) throws SQLException { return true; // metadata column is always readonly } @Override public boolean isWritable(int column) throws SQLException { return false; // never writable } @Override public boolean isDefinitelyWritable(int column) throws SQLException { return false; // never writable } @Override public String getColumnClassName(int column) throws SQLException { logger.trace("String getColumnClassName(int column)", false); int type = this.getColumnType(column); return SnowflakeTypeUtil.javaTypeToClassName(type); } /** * @return column count * @throws java.sql.SQLException if failed to get column count */ @Override public int getColumnCount() throws SQLException { return resultSetMetaData.getColumnCount(); } @Override public boolean isSigned(int column) throws SQLException { return resultSetMetaData.isSigned(column); } @Override public String getColumnLabel(int column) throws SQLException { return resultSetMetaData.getColumnLabel(column); } @Override public String getColumnName(int column) throws SQLException { return resultSetMetaData.getColumnName(column); } @Override public int getPrecision(int column) throws SQLException { return resultSetMetaData.getPrecision(column); } @Override public int getScale(int column) throws SQLException { return resultSetMetaData.getScale(column); } @Override public int getColumnType(int column) throws SQLException { try { return resultSetMetaData.getColumnType(column); } catch (SFException ex) { throw new SnowflakeSQLLoggedException( session, ex.getSqlState(), ex.getVendorCode(), ex.getCause(), ex.getParams()); } } @Override public String getColumnTypeName(int column) throws SQLException { try { return resultSetMetaData.getColumnTypeName(column); } catch (SFException ex) { throw new SnowflakeSQLLoggedException( session, ex.getSqlState(), ex.getVendorCode(), ex.getCause(), ex.getParams()); } } @Override public int isNullable(int column) throws SQLException { return resultSetMetaData.isNullable(column); } @Override public String getCatalogName(int column) throws SQLException { if (this.queryType == QueryType.SYNC) { return resultSetMetaData.getCatalogName(column); } return ""; } @Override public String getSchemaName(int column) throws SQLException { if (this.queryType == QueryType.SYNC) { return resultSetMetaData.getSchemaName(column); } return ""; } @Override public String getTableName(int column) throws SQLException { if (this.queryType == QueryType.SYNC) { return resultSetMetaData.getTableName(column); } return ""; } @Override public int getColumnDisplaySize(int column) throws SQLException { return resultSetMetaData.getColumnDisplaySize(column); } public boolean isStructuredTypeColumn(int column) { return resultSetMetaData.isStructuredTypeColumn(column); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeResultSetSerializableV1.java ================================================ package net.snowflake.client.internal.jdbc; import static net.snowflake.client.internal.core.Constants.GB; import static net.snowflake.client.internal.core.Constants.MB; import static net.snowflake.client.internal.core.SessionUtil.CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE; import static net.snowflake.client.internal.core.SessionUtil.CLIENT_MEMORY_LIMIT; import static net.snowflake.client.internal.core.SessionUtil.CLIENT_PREFETCH_THREADS; import static net.snowflake.client.internal.core.SessionUtil.CLIENT_RESULT_CHUNK_SIZE; import static net.snowflake.client.internal.core.SessionUtil.DEFAULT_CLIENT_MEMORY_LIMIT; import static net.snowflake.client.internal.core.SessionUtil.DEFAULT_CLIENT_PREFETCH_THREADS; import static net.snowflake.client.internal.jdbc.SnowflakeChunkDownloader.NoOpChunkDownloader; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.recordIfExternal; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.Serializable; import java.nio.channels.ClosedByInterruptException; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.Base64; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeResultSetSerializable; import net.snowflake.client.internal.common.core.SFBinaryFormat; import net.snowflake.client.internal.core.ChunkDownloader; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.MetaDataOfBinds; import net.snowflake.client.internal.core.OCSPMode; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.QueryResultFormat; import net.snowflake.client.internal.core.ResultUtil; import net.snowflake.client.internal.core.SFArrowResultSet; import net.snowflake.client.internal.core.SFBaseResultSet; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFBaseStatement; import net.snowflake.client.internal.core.SFResultSet; import net.snowflake.client.internal.core.SFResultSetMetaData; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.core.SFStatementType; import net.snowflake.client.internal.core.SessionUtil; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.InternalCallMarker; import net.snowflake.client.internal.jdbc.telemetry.NoOpTelemetryClient; import net.snowflake.client.internal.jdbc.telemetry.Telemetry; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SnowflakeDateTimeFormat; import org.apache.arrow.memory.RootAllocator; import org.apache.arrow.vector.VectorSchemaRoot; import org.apache.arrow.vector.ipc.ArrowStreamReader; /** * This object is an intermediate object between result JSON from GS and ResultSet. Originally, it * is created from result JSON. And it can also be serializable. Logically, it stands for a part of * ResultSet. * *

A typical result JSON data section consists of the content of the first chunk file and file * metadata for the rest of chunk files e.g. URL, chunk size, etc. So this object consists of one * chunk data and a list of chunk file entries. In actual cases, it may only include chunk data or * chunk files entries. * *

This object is serializable, so it can be distributed to other threads or worker nodes for * distributed processing. */ public class SnowflakeResultSetSerializableV1 implements SnowflakeResultSetSerializable, Serializable { private static final long serialVersionUID = 1L; private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeResultSetSerializableV1.class); static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); private static final long LOW_MAX_MEMORY = GB; /** An Entity class to represent a chunk file metadata. */ public static class ChunkFileMetadata implements Serializable { private static final long serialVersionUID = 1L; String fileURL; int rowCount; int compressedByteSize; int uncompressedByteSize; public ChunkFileMetadata( String fileURL, int rowCount, int compressedByteSize, int uncompressedByteSize) { this.fileURL = fileURL; this.rowCount = rowCount; this.compressedByteSize = compressedByteSize; this.uncompressedByteSize = uncompressedByteSize; } public void setFileURL(String fileURL) { this.fileURL = fileURL; } public String getFileURL() { return fileURL; } public int getRowCount() { return rowCount; } public int getCompressedByteSize() { return compressedByteSize; } public int getUncompressedByteSize() { return uncompressedByteSize; } public String toString() { StringBuilder builder = new StringBuilder(1024); builder.append("RowCount: ").append(rowCount).append(", "); builder.append("CompressedSize: ").append(compressedByteSize).append(", "); builder.append("UnCompressedSize: ").append(uncompressedByteSize); return builder.toString(); } } // Below fields are for the data fields that this object wraps // For ARROW, firstChunkStringData is BASE64-encoded arrow file. // For JSON, it's string data for the json. String firstChunkStringData; int firstChunkRowCount; int chunkFileCount; List chunkFileMetadatas = new ArrayList<>(); byte[] firstChunkByteData; // below fields are used for building a ChunkDownloader which // uses http client to download chunk files int resultPrefetchThreads; String qrmk; Map chunkHeadersMap = new HashMap<>(); // Below fields are from session or statement SnowflakeConnectString snowflakeConnectionString; OCSPMode ocspMode; String serverUrl; HttpClientSettingsKey httpClientKey; int networkTimeoutInMilli; int authTimeout; int socketTimeout; int maxHttpRetries; boolean isResultColumnCaseInsensitive; int resultSetType; int resultSetConcurrency; int resultSetHoldability; boolean treatNTZAsUTC; boolean formatDateWithTimezone; boolean useSessionTimezone; boolean getDateUseNullTimezone; // Below are some metadata fields parsed from the result JSON node String queryId; String finalDatabaseName; String finalSchemaName; String finalRoleName; String finalWarehouseName; SFStatementType statementType; boolean totalRowCountTruncated; Map parameters = new HashMap<>(); int columnCount; private List resultColumnMetadata = new ArrayList<>(); long resultVersion; int numberOfBinds; boolean arrayBindSupported; long sendResultTime; List metaDataOfBinds = new ArrayList<>(); QueryResultFormat queryResultFormat; int sessionClientMemoryLimit; // Below fields are transient, they are generated from parameters transient TimeZone timeZone; transient Optional possibleSession = Optional.empty(); transient boolean honorClientTZForTimestampNTZ; transient SnowflakeDateTimeFormat timestampNTZFormatter; transient SnowflakeDateTimeFormat timestampLTZFormatter; transient SnowflakeDateTimeFormat timestampTZFormatter; transient SnowflakeDateTimeFormat dateFormatter; transient SnowflakeDateTimeFormat timeFormatter; transient SFBinaryFormat binaryFormatter; transient long memoryLimit; // Below fields are transient, they are generated on the fly. transient JsonNode firstChunkRowset = null; // only used for JSON result transient ChunkDownloader chunkDownloader = null; transient RootAllocator rootAllocator = null; // only used for ARROW result transient SFResultSetMetaData resultSetMetaData = null; transient ResultStreamProvider resultStreamProvider = new DefaultResultStreamProvider(); /** Default constructor. */ public SnowflakeResultSetSerializableV1() {} /** * This is copy constructor. * *

NOTE: The copy is NOT deep copy. * * @param toCopy the source object to be copied. */ private SnowflakeResultSetSerializableV1(SnowflakeResultSetSerializableV1 toCopy) { // Below fields are for the data fields that this object wraps this.firstChunkStringData = toCopy.firstChunkStringData; this.firstChunkRowCount = toCopy.firstChunkRowCount; this.chunkFileCount = toCopy.chunkFileCount; this.chunkFileMetadatas = toCopy.chunkFileMetadatas; this.firstChunkByteData = toCopy.firstChunkByteData; // below fields are used for building a ChunkDownloader this.resultPrefetchThreads = toCopy.resultPrefetchThreads; this.qrmk = toCopy.qrmk; this.chunkHeadersMap = toCopy.chunkHeadersMap; // Below fields are from session or statement this.snowflakeConnectionString = toCopy.snowflakeConnectionString; this.ocspMode = toCopy.ocspMode; this.serverUrl = toCopy.serverUrl; this.httpClientKey = toCopy.httpClientKey; this.networkTimeoutInMilli = toCopy.networkTimeoutInMilli; this.authTimeout = toCopy.authTimeout; this.socketTimeout = toCopy.socketTimeout; this.maxHttpRetries = toCopy.maxHttpRetries; this.isResultColumnCaseInsensitive = toCopy.isResultColumnCaseInsensitive; this.resultSetType = toCopy.resultSetType; this.resultSetConcurrency = toCopy.resultSetConcurrency; this.resultSetHoldability = toCopy.resultSetHoldability; this.treatNTZAsUTC = toCopy.treatNTZAsUTC; this.formatDateWithTimezone = toCopy.formatDateWithTimezone; this.useSessionTimezone = toCopy.useSessionTimezone; this.getDateUseNullTimezone = toCopy.getDateUseNullTimezone; // Below are some metadata fields parsed from the result JSON node this.queryId = toCopy.queryId; this.finalDatabaseName = toCopy.finalDatabaseName; this.finalSchemaName = toCopy.finalSchemaName; this.finalRoleName = toCopy.finalRoleName; this.finalWarehouseName = toCopy.finalWarehouseName; this.statementType = toCopy.statementType; this.totalRowCountTruncated = toCopy.totalRowCountTruncated; this.parameters = toCopy.parameters; this.columnCount = toCopy.columnCount; this.resultColumnMetadata = toCopy.resultColumnMetadata; this.resultVersion = toCopy.resultVersion; this.numberOfBinds = toCopy.numberOfBinds; this.arrayBindSupported = toCopy.arrayBindSupported; this.sendResultTime = toCopy.sendResultTime; this.metaDataOfBinds = toCopy.metaDataOfBinds; this.queryResultFormat = toCopy.queryResultFormat; this.possibleSession = toCopy.possibleSession; // Below fields are transient, they are generated from parameters this.timeZone = toCopy.timeZone; this.honorClientTZForTimestampNTZ = toCopy.honorClientTZForTimestampNTZ; this.timestampNTZFormatter = toCopy.timestampNTZFormatter; this.timestampLTZFormatter = toCopy.timestampLTZFormatter; this.timestampTZFormatter = toCopy.timestampTZFormatter; this.dateFormatter = toCopy.dateFormatter; this.timeFormatter = toCopy.timeFormatter; this.binaryFormatter = toCopy.binaryFormatter; this.memoryLimit = toCopy.memoryLimit; // Below fields are transient, they are generated on the fly. this.firstChunkRowset = toCopy.firstChunkRowset; this.chunkDownloader = toCopy.chunkDownloader; this.rootAllocator = toCopy.rootAllocator; this.resultSetMetaData = toCopy.resultSetMetaData; this.resultStreamProvider = toCopy.resultStreamProvider; } /** * @param rootNode result JSON node received from GS * @param sfSession the Snowflake session * @param sfStatement the Snowflake statement * @param resultStreamProvider a ResultStreamProvider for computing a custom data source for * result-file streams * @param disableChunksPrefetch is prefetch disabled * @throws SnowflakeSQLException if failed to parse the result JSON node */ protected SnowflakeResultSetSerializableV1( JsonNode rootNode, SFBaseSession sfSession, SFBaseStatement sfStatement, ResultStreamProvider resultStreamProvider, boolean disableChunksPrefetch) throws SnowflakeSQLException { SnowflakeUtil.checkErrorAndThrowException(rootNode); // get the query id this.queryId = rootNode.path("data").path("queryId").asText(); JsonNode databaseNode = rootNode.path("data").path("finalDatabaseName"); this.finalDatabaseName = databaseNode.isNull() ? (sfSession != null ? sfSession.getDatabase() : null) : databaseNode.asText(); JsonNode schemaNode = rootNode.path("data").path("finalSchemaName"); this.finalSchemaName = schemaNode.isNull() ? (sfSession != null ? sfSession.getSchema() : null) : schemaNode.asText(); JsonNode roleNode = rootNode.path("data").path("finalRoleName"); this.finalRoleName = roleNode.isNull() ? (sfSession != null ? sfSession.getRole() : null) : roleNode.asText(); JsonNode warehouseNode = rootNode.path("data").path("finalWarehouseName"); this.finalWarehouseName = warehouseNode.isNull() ? (sfSession != null ? sfSession.getWarehouse() : null) : warehouseNode.asText(); this.statementType = SFStatementType.lookUpTypeById(rootNode.path("data").path("statementTypeId").asLong()); this.totalRowCountTruncated = rootNode.path("data").path("totalTruncated").asBoolean(); this.possibleSession = Optional.ofNullable(sfSession); logger.debug("Query id: {}", this.queryId); Optional queryResultFormat = QueryResultFormat.lookupByName(rootNode.path("data").path("queryResultFormat").asText()); this.queryResultFormat = queryResultFormat.orElse(QueryResultFormat.JSON); // extract query context and save it in current session JsonNode queryContextNode = rootNode.path("data").path("queryContext"); String queryContext = queryContextNode.isNull() ? null : queryContextNode.toString(); if (!sfSession.isAsyncSession()) { sfSession.setQueryContext(queryContext); } // extract parameters this.parameters = SessionUtil.getCommonParams(rootNode.path("data").path("parameters")); if (this.parameters.isEmpty()) { this.parameters = new HashMap<>(sfSession.getCommonParameters()); this.setStatemementLevelParameters(sfStatement.getStatementParameters()); } // initialize column metadata this.columnCount = rootNode.path("data").path("rowtype").size(); for (int i = 0; i < this.columnCount; i++) { JsonNode colNode = rootNode.path("data").path("rowtype").path(i); SnowflakeColumnMetadata columnMetadata = new SnowflakeColumnMetadata(colNode, sfSession.isJdbcTreatDecimalAsInt(), sfSession); this.resultColumnMetadata.add(columnMetadata); logger.debug("Get column metadata: {}", (ArgSupplier) columnMetadata::toString); } this.resultStreamProvider = resultStreamProvider; // process the content of first chunk. if (this.queryResultFormat == QueryResultFormat.ARROW) { this.firstChunkStringData = rootNode.path("data").path("rowsetBase64").asText(); this.rootAllocator = new RootAllocator(Long.MAX_VALUE); // Set first chunk row count from firstChunkStringData this.setFirstChunkRowCountForArrow(); } else { this.firstChunkRowset = rootNode.path("data").path("rowset"); if (this.firstChunkRowset == null || this.firstChunkRowset.isMissingNode()) { this.firstChunkRowCount = 0; this.firstChunkStringData = null; this.firstChunkByteData = new byte[0]; } else { this.firstChunkRowCount = this.firstChunkRowset.size(); this.firstChunkStringData = this.firstChunkRowset.toString(); } } logger.debug("First chunk row count: {}", this.firstChunkRowCount); // parse file chunks this.parseChunkFiles(rootNode, sfStatement); // result version JsonNode versionNode = rootNode.path("data").path("version"); if (!versionNode.isMissingNode()) { this.resultVersion = versionNode.longValue(); } // number of binds JsonNode numberOfBindsNode = rootNode.path("data").path("numberOfBinds"); if (!numberOfBindsNode.isMissingNode()) { this.numberOfBinds = numberOfBindsNode.intValue(); } JsonNode arrayBindSupported = rootNode.path("data").path("arrayBindSupported"); this.arrayBindSupported = !arrayBindSupported.isMissingNode() && arrayBindSupported.asBoolean(); // time result sent by GS (epoch time in millis) JsonNode sendResultTimeNode = rootNode.path("data").path("sendResultTime"); if (!sendResultTimeNode.isMissingNode()) { this.sendResultTime = sendResultTimeNode.longValue(); } logger.debug("Result version: {}", this.resultVersion); // Bind parameter metadata JsonNode bindData = rootNode.path("data").path("metaDataOfBinds"); if (!bindData.isMissingNode()) { List returnVal = new ArrayList<>(); for (JsonNode child : bindData) { int precision = child.path("precision").asInt(); boolean nullable = child.path("nullable").asBoolean(); int scale = child.path("scale").asInt(); int byteLength = child.path("byteLength").asInt(); int length = child.path("length").asInt(); String name = child.path("name").asText(); String type = child.path("type").asText(); MetaDataOfBinds param = new MetaDataOfBinds(precision, nullable, scale, byteLength, length, name, type); returnVal.add(param); } this.metaDataOfBinds = returnVal; } // setup fields from sessions. this.ocspMode = sfSession.getOCSPMode(); this.serverUrl = sfSession.getServerUrl(); this.httpClientKey = sfSession.getHttpClientKey(); this.snowflakeConnectionString = sfSession.getSnowflakeConnectionString(); this.networkTimeoutInMilli = sfSession.getNetworkTimeoutInMilli(); this.authTimeout = 0; this.maxHttpRetries = sfSession.getMaxHttpRetries(); this.isResultColumnCaseInsensitive = sfSession.isResultColumnCaseInsensitive(); this.treatNTZAsUTC = sfSession.getTreatNTZAsUTC(); this.formatDateWithTimezone = sfSession.getFormatDateWithTimezone(); this.useSessionTimezone = sfSession.getUseSessionTimezone(); this.getDateUseNullTimezone = sfSession.getGetDateUseNullTimezone(); // setup transient fields from parameter this.setupFieldsFromParameters(); if (disableChunksPrefetch) { this.chunkDownloader = new NoOpChunkDownloader(); } else { this.chunkDownloader = (this.chunkFileCount > 0) // The chunk downloader will start prefetching // first few chunk files in background thread(s) ? new SnowflakeChunkDownloader(this) : new NoOpChunkDownloader(); } // Setup ResultSet metadata this.resultSetMetaData = new SFResultSetMetaData( this.getResultColumnMetadata(), this.queryId, sfSession, this.isResultColumnCaseInsensitive, this.timestampNTZFormatter, this.timestampLTZFormatter, this.timestampTZFormatter, this.dateFormatter, this.timeFormatter); } public void setRootAllocator(RootAllocator rootAllocator) { this.rootAllocator = rootAllocator; } public void setQueryResultFormat(QueryResultFormat queryResultFormat) { this.queryResultFormat = queryResultFormat; } public void setChunkFileCount(int chunkFileCount) { this.chunkFileCount = chunkFileCount; } public void setFirstChunkStringData(String firstChunkStringData) { this.firstChunkStringData = firstChunkStringData; } public void setFirstChunkByteData(byte[] firstChunkByteData) { this.firstChunkByteData = firstChunkByteData; } public void setChunkDownloader(ChunkDownloader chunkDownloader) { this.chunkDownloader = chunkDownloader; } public void setResultStreamProvider(ResultStreamProvider resultStreamProvider) { this.resultStreamProvider = resultStreamProvider; } public ResultStreamProvider getResultStreamProvider() { return getResultStreamProvider(null); } public ResultStreamProvider getResultStreamProvider(InternalCallMarker internalCallMarker) { recordIfExternal( "SnowflakeResultSetSerializableV1", "getResultStreamProvider", internalCallMarker); return resultStreamProvider; } public SFResultSetMetaData getSFResultSetMetaData() { return getSFResultSetMetaData(null); } public SFResultSetMetaData getSFResultSetMetaData(InternalCallMarker internalCallMarker) { recordIfExternal( "SnowflakeResultSetSerializableV1", "getSFResultSetMetaData", internalCallMarker); return resultSetMetaData; } public int getResultSetType() { return resultSetType; } public int getResultSetConcurrency() { return resultSetConcurrency; } public int getResultSetHoldability() { return resultSetHoldability; } public SnowflakeConnectString getSnowflakeConnectString() { return snowflakeConnectionString; } public OCSPMode getOCSPMode() { return ocspMode; } public String getServerURL() { return serverUrl; } public HttpClientSettingsKey getHttpClientKey() { return httpClientKey; } public String getQrmk() { return qrmk; } public int getNetworkTimeoutInMilli() { return networkTimeoutInMilli; } public int getAuthTimeout() { return authTimeout; } public int getSocketTimeout() { return socketTimeout; } public int getMaxHttpRetries() { return maxHttpRetries; } public int getResultPrefetchThreads() { return resultPrefetchThreads; } public long getMemoryLimit() { return memoryLimit; } public Map getChunkHeadersMap() { return chunkHeadersMap; } public List getChunkFileMetadatas() { return chunkFileMetadatas; } public RootAllocator getRootAllocator() { return rootAllocator; } public QueryResultFormat getQueryResultFormat() { return queryResultFormat; } public int getChunkFileCount() { return chunkFileCount; } public boolean isArrayBindSupported() { return arrayBindSupported; } public String getQueryId() { return queryId; } public String getFinalDatabaseName() { return finalDatabaseName; } public String getFinalSchemaName() { return finalSchemaName; } public String getFinalRoleName() { return finalRoleName; } public String getFinalWarehouseName() { return finalWarehouseName; } public SFStatementType getStatementType() { return statementType; } public boolean isTotalRowCountTruncated() { return totalRowCountTruncated; } public Map getParameters() { return parameters; } public int getColumnCount() { return columnCount; } public List getResultColumnMetadata() { return resultColumnMetadata; } public JsonNode getAndClearFirstChunkRowset() { JsonNode firstChunkRowset = this.firstChunkRowset; this.firstChunkRowset = null; return firstChunkRowset; } public int getFirstChunkRowCount() { return firstChunkRowCount; } public long getResultVersion() { return resultVersion; } public int getNumberOfBinds() { return numberOfBinds; } public ChunkDownloader getChunkDownloader() { return chunkDownloader; } public SnowflakeDateTimeFormat getTimestampNTZFormatter() { return timestampNTZFormatter; } public SnowflakeDateTimeFormat getTimestampLTZFormatter() { return timestampLTZFormatter; } public SnowflakeDateTimeFormat getTimestampTZFormatter() { return timestampTZFormatter; } public SnowflakeDateTimeFormat getDateFormatter() { return dateFormatter; } public SnowflakeDateTimeFormat getTimeFormatter() { return timeFormatter; } public TimeZone getTimeZone() { return timeZone; } public boolean isHonorClientTZForTimestampNTZ() { return honorClientTZForTimestampNTZ; } public SFBinaryFormat getBinaryFormatter() { return binaryFormatter; } public long getSendResultTime() { return sendResultTime; } public List getMetaDataOfBinds() { return metaDataOfBinds; } public String getFirstChunkStringData() { return firstChunkStringData; } public byte[] getFirstChunkByteData() { return firstChunkByteData; } public boolean getTreatNTZAsUTC() { return treatNTZAsUTC; } public boolean getFormatDateWithTimeZone() { return formatDateWithTimezone; } public boolean getUseSessionTimezone() { return useSessionTimezone; } public boolean getGetDateUseNullTimezone() { return getDateUseNullTimezone; } public Optional getSession() { return getSession(null); } public Optional getSession(InternalCallMarker internalCallMarker) { recordIfExternal("SnowflakeResultSetSerializableV1", "getSession", internalCallMarker); return possibleSession; } /** * A factory function to create SnowflakeResultSetSerializable object from result JSON node, using * the DefaultResultStreamProvider. * * @param rootNode result JSON node received from GS * @param sfSession the Snowflake session * @param sfStatement the Snowflake statement * @return processed ResultSetSerializable object * @throws SnowflakeSQLException if failed to parse the result JSON node */ public static SnowflakeResultSetSerializableV1 create( JsonNode rootNode, SFBaseSession sfSession, SFBaseStatement sfStatement) throws SnowflakeSQLException { return create(rootNode, sfSession, sfStatement, (InternalCallMarker) null); } public static SnowflakeResultSetSerializableV1 create( JsonNode rootNode, SFBaseSession sfSession, SFBaseStatement sfStatement, InternalCallMarker internalCallMarker) throws SnowflakeSQLException { recordIfExternal("SnowflakeResultSetSerializableV1", "create", internalCallMarker); return create( rootNode, sfSession, sfStatement, new DefaultResultStreamProvider(), internalCallMarker); } /** * A factory function to create SnowflakeResultSetSerializable object from result JSON node, with * an overridable ResultStreamProvider. * * @param rootNode result JSON node received from GS * @param sfSession the Snowflake session * @param sfStatement the Snowflake statement * @param resultStreamProvider a ResultStreamProvider for computing a custom data source for * result-file streams * @return processed ResultSetSerializable object * @throws SnowflakeSQLException if failed to parse the result JSON node */ public static SnowflakeResultSetSerializableV1 create( JsonNode rootNode, SFBaseSession sfSession, SFBaseStatement sfStatement, ResultStreamProvider resultStreamProvider) throws SnowflakeSQLException { return create(rootNode, sfSession, sfStatement, resultStreamProvider, null); } public static SnowflakeResultSetSerializableV1 create( JsonNode rootNode, SFBaseSession sfSession, SFBaseStatement sfStatement, ResultStreamProvider resultStreamProvider, InternalCallMarker internalCallMarker) throws SnowflakeSQLException { recordIfExternal("SnowflakeResultSetSerializableV1", "create", internalCallMarker); logger.trace("Entering create()", false); return new SnowflakeResultSetSerializableV1( rootNode, sfSession, sfStatement, resultStreamProvider, false); } /** * A factory function for internal usage only. It creates SnowflakeResultSetSerializableV1 with * NoOpChunksDownloader which disables chunks prefetch. * * @param rootNode JSON root node * @param sfSession SFBaseSession * @param sfStatement SFBaseStatement * @return SnowflakeResultSetSerializableV1 with NoOpChunksDownloader * @throws SnowflakeSQLException if an error occurs */ public static SnowflakeResultSetSerializableV1 createWithChunksPrefetchDisabled( JsonNode rootNode, SFBaseSession sfSession, SFBaseStatement sfStatement) throws SnowflakeSQLException { logger.trace("Entering create()", false); return new SnowflakeResultSetSerializableV1( rootNode, sfSession, sfStatement, new DefaultResultStreamProvider(), true); } /** * Some fields are generated from this.parameters, so generate them from this.parameters instead * of serializing them. */ private void setupFieldsFromParameters() { String sqlTimestampFormat = (String) ResultUtil.effectiveParamValue(this.parameters, "TIMESTAMP_OUTPUT_FORMAT"); // Special handling of specialized formatters, use a helper function this.timestampNTZFormatter = ResultUtil.specializedFormatter( this.parameters, "timestamp_ntz", "TIMESTAMP_NTZ_OUTPUT_FORMAT", sqlTimestampFormat); this.timestampLTZFormatter = ResultUtil.specializedFormatter( this.parameters, "timestamp_ltz", "TIMESTAMP_LTZ_OUTPUT_FORMAT", sqlTimestampFormat); this.timestampTZFormatter = ResultUtil.specializedFormatter( this.parameters, "timestamp_tz", "TIMESTAMP_TZ_OUTPUT_FORMAT", sqlTimestampFormat); String sqlDateFormat = (String) ResultUtil.effectiveParamValue(this.parameters, "DATE_OUTPUT_FORMAT"); this.dateFormatter = SnowflakeDateTimeFormat.fromSqlFormat(sqlDateFormat); logger.debug( "Sql date format: {}, java date format: {}", sqlDateFormat, (ArgSupplier) () -> this.dateFormatter.toSimpleDateTimePattern()); String sqlTimeFormat = (String) ResultUtil.effectiveParamValue(this.parameters, "TIME_OUTPUT_FORMAT"); this.timeFormatter = SnowflakeDateTimeFormat.fromSqlFormat(sqlTimeFormat); logger.debug( "Sql time format: {}, java time format: {}", sqlTimeFormat, (ArgSupplier) () -> this.timeFormatter.toSimpleDateTimePattern()); String timeZoneName = (String) ResultUtil.effectiveParamValue(this.parameters, "TIMEZONE"); this.timeZone = TimeZone.getTimeZone(timeZoneName); this.honorClientTZForTimestampNTZ = (boolean) ResultUtil.effectiveParamValue( this.parameters, "CLIENT_HONOR_CLIENT_TZ_FOR_TIMESTAMP_NTZ"); logger.debug("Honoring client TZ for timestamp_ntz? {}", this.honorClientTZForTimestampNTZ); String binaryFmt = (String) ResultUtil.effectiveParamValue(this.parameters, "BINARY_OUTPUT_FORMAT"); this.binaryFormatter = SFBinaryFormat.getSafeOutputFormat(binaryFmt); } /** * Parse the chunk file nodes from result JSON node * * @param rootNode result JSON node received from GS * @param sfStatement the snowflake statement */ private void parseChunkFiles(JsonNode rootNode, SFBaseStatement sfStatement) { JsonNode chunksNode = rootNode.path("data").path("chunks"); if (!chunksNode.isMissingNode()) { this.chunkFileCount = chunksNode.size(); // Try to get the Query Result Master Key JsonNode qrmkNode = rootNode.path("data").path("qrmk"); this.qrmk = qrmkNode.isMissingNode() ? null : qrmkNode.textValue(); // Determine the prefetch thread count and memoryLimit if (this.chunkFileCount > 0) { logger.debug("#chunks: {}, initialize chunk downloader", this.chunkFileCount); adjustMemorySettings(sfStatement); // Parse chunk header JsonNode chunkHeaders = rootNode.path("data").path("chunkHeaders"); if (chunkHeaders != null && !chunkHeaders.isMissingNode()) { Iterator> chunkHeadersIter = chunkHeaders.fields(); while (chunkHeadersIter.hasNext()) { Map.Entry chunkHeader = chunkHeadersIter.next(); logger.debug( "Add header key: {}, value: {}", chunkHeader.getKey(), chunkHeader.getValue().asText()); this.chunkHeadersMap.put(chunkHeader.getKey(), chunkHeader.getValue().asText()); } } // parse chunk files metadata e.g. url and row count for (int idx = 0; idx < this.chunkFileCount; idx++) { JsonNode chunkNode = chunksNode.get(idx); String url = chunkNode.path("url").asText(); int rowCount = chunkNode.path("rowCount").asInt(); int compressedSize = chunkNode.path("compressedSize").asInt(); int uncompressedSize = chunkNode.path("uncompressedSize").asInt(); this.chunkFileMetadatas.add( new ChunkFileMetadata(url, rowCount, compressedSize, uncompressedSize)); logger.debug( "Add chunk, url: {} rowCount: {} " + "compressedSize: {} uncompressedSize: {}", url, rowCount, compressedSize, uncompressedSize); } } } } private void adjustMemorySettings(SFBaseStatement sfStatement) { this.resultPrefetchThreads = DEFAULT_CLIENT_PREFETCH_THREADS; if (this.statementType.isSelect() && this.parameters.containsKey(CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE) && (boolean) this.parameters.get(CLIENT_ENABLE_CONSERVATIVE_MEMORY_USAGE)) { // use conservative memory settings this.resultPrefetchThreads = sfStatement.getConservativePrefetchThreads(); this.memoryLimit = sfStatement.getConservativeMemoryLimit(); int chunkSize = (int) this.parameters.get(CLIENT_RESULT_CHUNK_SIZE); logger.debug( "Enable conservative memory usage with prefetchThreads: {} and memoryLimit: {} and " + "resultChunkSize: {}", this.resultPrefetchThreads, this.memoryLimit, chunkSize); } else { // prefetch threads if (this.parameters.get(CLIENT_PREFETCH_THREADS) != null) { this.resultPrefetchThreads = (int) this.parameters.get(CLIENT_PREFETCH_THREADS); } this.memoryLimit = initMemoryLimit(this.parameters); } long maxChunkSize = (int) this.parameters.get(CLIENT_RESULT_CHUNK_SIZE) * MB; if (queryResultFormat == QueryResultFormat.ARROW && Runtime.getRuntime().maxMemory() < LOW_MAX_MEMORY && memoryLimit * 2 + maxChunkSize > Runtime.getRuntime().maxMemory()) { memoryLimit = Runtime.getRuntime().maxMemory() / 2 - maxChunkSize; logger.debug( "To avoid OOM for arrow buffer allocation, " + "memoryLimit {} should be less than half of the " + "maxMemory {} + maxChunkSize {}", memoryLimit, Runtime.getRuntime().maxMemory(), maxChunkSize); } if (sfStatement.getSFBaseSession(internalCallMarker()).getMemoryLimitForTesting() != SFBaseSession.MEMORY_LIMIT_UNSET) { memoryLimit = sfStatement.getSFBaseSession(internalCallMarker()).getMemoryLimitForTesting(); logger.debug("memoryLimit changed for testing purposes to {}", memoryLimit); } } /** * Calculate memory limit in bytes * * @param parameters The parameters for result JSON node * @return memory limit in bytes */ private static long initMemoryLimit(Map parameters) { // default setting long memoryLimit = DEFAULT_CLIENT_MEMORY_LIMIT * 1024 * 1024; long maxMemoryToUse = Runtime.getRuntime().maxMemory() * 8 / 10; if (parameters.get(CLIENT_MEMORY_LIMIT) != null) { // use the settings from the customer memoryLimit = (int) parameters.get(CLIENT_MEMORY_LIMIT) * 1024L * 1024L; if (DEFAULT_CLIENT_MEMORY_LIMIT == (int) parameters.get(CLIENT_MEMORY_LIMIT)) { // if the memory limit is the default value and best effort memory is enabled // set the memory limit to 80% of the maximum as the best effort memoryLimit = Math.max(memoryLimit, maxMemoryToUse); } } // always make sure memoryLimit <= 80% of the maximum memoryLimit = Math.min(memoryLimit, maxMemoryToUse); logger.debug("Set allowed memory usage to {} bytes", memoryLimit); return memoryLimit; } /** * If statement parameter values are available, set those values in the resultset list of * parameters so they overwrite the session-level cached parameter values. * * @param stmtParamsMap */ private void setStatemementLevelParameters(Map stmtParamsMap) { for (Map.Entry entry : stmtParamsMap.entrySet()) { this.parameters.put(entry.getKey(), entry.getValue()); } } /** * Setup all transient fields based on serialized fields and System Runtime. * * @throws SQLException if fails to setup any transient fields */ private void setupTransientFields() throws SQLException { // Setup transient fields from serialized fields setupFieldsFromParameters(); // Setup memory limitation from parameters and System Runtime. this.memoryLimit = initMemoryLimit(this.parameters); this.resultStreamProvider = new DefaultResultStreamProvider(); // Create below transient fields on the fly. if (QueryResultFormat.ARROW.equals(this.queryResultFormat)) { this.rootAllocator = new RootAllocator(Long.MAX_VALUE); this.firstChunkRowset = null; } else { this.rootAllocator = null; try { this.firstChunkRowset = (this.firstChunkStringData != null) ? mapper.readTree(this.firstChunkStringData) : null; } catch (IOException ex) { throw new SnowflakeSQLLoggedException( queryId, possibleSession.orElse(/* session = */ null), "The JSON data is invalid. The error is: " + ex.getMessage()); } } // Setup ResultSet metadata this.resultSetMetaData = new SFResultSetMetaData( this.getResultColumnMetadata(), this.queryId, null, // This is session less this.isResultColumnCaseInsensitive, this.timestampNTZFormatter, this.timestampLTZFormatter, this.timestampTZFormatter, this.dateFormatter, this.timeFormatter); // Allocate chunk downloader if necessary chunkDownloader = (this.chunkFileCount > 0) ? new SnowflakeChunkDownloader(this) : new NoOpChunkDownloader(); this.possibleSession = Optional.empty(); // we don't have session object during deserializing } /** * Split this object into small pieces based on the user specified data size. * * @param maxSizeInBytes the expected max data size wrapped in the result ResultSetSerializables * object. NOTE: if a result chunk size is greater than this value, the ResultSetSerializable * object will include one result chunk. * @return a list of SnowflakeResultSetSerializable * @throws SQLException if fails to split objects. */ public List splitBySize(long maxSizeInBytes) throws SQLException { List resultSetSerializables = new ArrayList<>(); if (this.chunkFileMetadatas.isEmpty() && this.firstChunkStringData == null) { throw new SnowflakeSQLLoggedException( queryId, this.possibleSession.orElse(/* session = */ null), "The Result Set serializable is invalid."); } // In the beginning, only the first data chunk is included in the result // serializable, so the chunk files are removed from the copy. // NOTE: make sure to handle the case that the first data chunk doesn't // exist. SnowflakeResultSetSerializableV1 curResultSetSerializable = new SnowflakeResultSetSerializableV1(this); curResultSetSerializable.chunkFileMetadatas = new ArrayList<>(); curResultSetSerializable.chunkFileCount = 0; for (int idx = 0; idx < this.chunkFileCount; idx++) { ChunkFileMetadata curChunkFileMetadata = this.getChunkFileMetadatas().get(idx); // If the serializable object has reach the max size, // save current one and create new one. if ((curResultSetSerializable.getUncompressedDataSizeInBytes() > 0) && (maxSizeInBytes < (curResultSetSerializable.getUncompressedDataSizeInBytes() + curChunkFileMetadata.getUncompressedByteSize()))) { resultSetSerializables.add(curResultSetSerializable); // Create new result serializable and reset it as empty curResultSetSerializable = new SnowflakeResultSetSerializableV1(this); curResultSetSerializable.chunkFileMetadatas = new ArrayList<>(); curResultSetSerializable.chunkFileCount = 0; curResultSetSerializable.firstChunkStringData = null; curResultSetSerializable.firstChunkRowCount = 0; curResultSetSerializable.firstChunkRowset = null; curResultSetSerializable.firstChunkByteData = new byte[0]; } // Append this chunk file to result serializable object curResultSetSerializable.getChunkFileMetadatas().add(curChunkFileMetadata); curResultSetSerializable.chunkFileCount++; } // Add the last result serializable object into result. resultSetSerializables.add(curResultSetSerializable); return resultSetSerializables; } /** * Get ResultSet from the ResultSet Serializable object so that the user can access the data. * * @param resultSetRetrieveConfig The extra info to retrieve the result set. * @return a ResultSet which represents for the data wrapped in the object */ public ResultSet getResultSet(ResultSetRetrieveConfig resultSetRetrieveConfig) throws SQLException { return getResultSet(resultSetRetrieveConfig, null); } public ResultSet getResultSet( ResultSetRetrieveConfig resultSetRetrieveConfig, InternalCallMarker internalCallMarker) throws SQLException { recordIfExternal("SnowflakeResultSetSerializableV1", "getResultSet", internalCallMarker); // Adjust OCSP cache server if necessary. try { SessionUtil.resetOCSPUrlIfNecessary(resultSetRetrieveConfig.getSfFullURL()); } catch (IOException e) { throw new SnowflakeSQLLoggedException( queryId, /*session = */ null, // There is no connection ErrorCode.INTERNAL_ERROR, "Hit exception when adjusting OCSP cache server. The original message is: " + e.getMessage()); } return getResultSetInternal(resultSetRetrieveConfig.getProxyProperties(), internalCallMarker); } /** * Get ResultSet from the ResultSet Serializable object so that the user can access the data. * * @param info The proxy sever information if proxy is necessary. * @return a ResultSet which represents for the data wrapped in the object */ private ResultSet getResultSetInternal(Properties info, InternalCallMarker internalCallMarker) throws SQLException { // Setup proxy info if necessary this.httpClientKey = SnowflakeUtil.convertProxyPropertiesToHttpClientKey(ocspMode, info); // Setup transient fields setupTransientFields(); // This result set is sessionless, so it doesn't support telemetry. Telemetry telemetryClient = new NoOpTelemetryClient(); // The use case is distributed processing, so sortResult is not necessary. boolean sortResult = false; // Setup base result set. SFBaseResultSet sfBaseResultSet = null; switch (getQueryResultFormat()) { case ARROW: { sfBaseResultSet = new SFArrowResultSet(this, telemetryClient, sortResult); break; } case JSON: { sfBaseResultSet = new SFResultSet( this, getSession(internalCallMarker).orElse(new SFSession()), telemetryClient, sortResult); break; } default: throw new SnowflakeSQLLoggedException( queryId, this.possibleSession.orElse(/*session = */ null), ErrorCode.INTERNAL_ERROR, "Unsupported query result format: " + getQueryResultFormat().name()); } // Create result set SnowflakeResultSetV1 resultSetV1 = new SnowflakeResultSetV1(sfBaseResultSet, this); return resultSetV1; } // Set the row count for first result chunk by parsing the chunk data. private void setFirstChunkRowCountForArrow() throws SnowflakeSQLException { firstChunkRowCount = 0; firstChunkByteData = new byte[0]; // If the first chunk doesn't exist or empty, set it as 0 if (firstChunkStringData == null || firstChunkStringData.isEmpty()) { firstChunkRowCount = 0; firstChunkByteData = new byte[0]; } // Parse the Arrow result chunk else if (getQueryResultFormat().equals(QueryResultFormat.ARROW)) { // Below code is developed based on SFArrowResultSet.buildFirstChunk // and ArrowResultChunk.readArrowStream() byte[] bytes = Base64.getDecoder().decode(firstChunkStringData); firstChunkByteData = bytes; VectorSchemaRoot root = null; RootAllocator localRootAllocator = (rootAllocator != null) ? rootAllocator : new RootAllocator(Long.MAX_VALUE); try (ByteArrayInputStream is = new ByteArrayInputStream(bytes); ArrowStreamReader reader = new ArrowStreamReader(is, localRootAllocator)) { root = reader.getVectorSchemaRoot(); while (reader.loadNextBatch()) { firstChunkRowCount += root.getRowCount(); root.clear(); } } catch (ClosedByInterruptException cbie) { // SNOW-755756: sometimes while reading from arrow stream, this exception can occur with // null message. // Log an interrupted message instead of throwing this exception. logger.debug("Interrupted when loading Arrow first chunk row count.", cbie); } catch (Exception ex) { throw new SnowflakeSQLLoggedException( queryId, possibleSession.orElse(/* session = */ null), ErrorCode.INTERNAL_ERROR, "Fail to retrieve row count for first arrow chunk: " + ex.getMessage()); } finally { if (root != null) { root.clear(); } } } else { // This shouldn't happen throw new SnowflakeSQLLoggedException( queryId, this.possibleSession.orElse(/*session = */ null), ErrorCode.INTERNAL_ERROR, "setFirstChunkRowCountForArrow() should only be called for Arrow."); } } /** * Retrieve total row count included in the ResultSet Serializable object. * *

GS sends the data of first chunk and metadata of the other chunk if exist to client, so this * function calculates the row count for all of them. * * @return the total row count from metadata */ public long getRowCount() throws SQLException { // Get row count for first chunk if it exists. long totalRowCount = firstChunkRowCount; // Get row count from chunk file metadata for (ChunkFileMetadata chunkFileMetadata : chunkFileMetadatas) { totalRowCount += chunkFileMetadata.rowCount; } return totalRowCount; } /** * Retrieve compressed data size in the ResultSet Serializable object. * *

GS sends the data of first chunk and metadata of the other chunks if exist to client, so * this function calculates the data size for all of them. NOTE: if first chunk exists, this * function uses its uncompressed data size as its compressed data size in this calculation though * it is not compressed. * * @return the total compressed data size in bytes from metadata */ public long getCompressedDataSizeInBytes() throws SQLException { long totalCompressedDataSize = 0; // Count the data size for the first chunk if it exists. if (firstChunkStringData != null) { totalCompressedDataSize += firstChunkStringData.length(); } for (ChunkFileMetadata chunkFileMetadata : chunkFileMetadatas) { totalCompressedDataSize += chunkFileMetadata.compressedByteSize; } return totalCompressedDataSize; } /** * Retrieve Uncompressed data size in the ResultSet Serializable object. * *

GS sends the data of first chunk and metadata of the other chunk if exist to client, so this * function calculates the data size for all of them. * * @return the total uncompressed data size in bytes from metadata */ public long getUncompressedDataSizeInBytes() throws SQLException { long totalUncompressedDataSize = 0; // Count the data size for the first chunk if it exists. if (firstChunkStringData != null) { totalUncompressedDataSize += firstChunkStringData.length(); } for (ChunkFileMetadata chunkFileMetadata : chunkFileMetadatas) { totalUncompressedDataSize += chunkFileMetadata.uncompressedByteSize; } return totalUncompressedDataSize; } public String toString() { StringBuilder builder = new StringBuilder(16 * 1024); builder.append("hasFirstChunk: ").append(this.firstChunkStringData != null).append("\n"); builder.append("RowCountInFirstChunk: ").append(this.firstChunkRowCount).append("\n"); builder.append("queryResultFormat: ").append(this.queryResultFormat).append("\n"); builder.append("chunkFileCount: ").append(this.chunkFileCount).append("\n"); for (ChunkFileMetadata chunkFileMetadata : chunkFileMetadatas) { builder.append("\t").append(chunkFileMetadata.toString()).append("\n"); } return builder.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeResultSetV1.java ================================================ package net.snowflake.client.internal.jdbc; import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.net.URL; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; import java.sql.Date; import java.sql.NClob; import java.sql.Ref; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; import java.sql.SQLWarning; import java.sql.SQLXML; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.util.Calendar; import java.util.List; import java.util.Map; import java.util.TimeZone; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeResultSet; import net.snowflake.client.api.resultset.SnowflakeResultSetSerializable; import net.snowflake.client.internal.api.implementation.resultset.SnowflakeBaseResultSet; import net.snowflake.client.internal.api.implementation.statement.SnowflakeStatementImpl; import net.snowflake.client.internal.core.SFBaseResultSet; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.arrow.StructObjectWrapper; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Snowflake ResultSet implementation */ public class SnowflakeResultSetV1 extends SnowflakeBaseResultSet implements SnowflakeResultSet, ResultSet { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeResultSetV1.class); /** * Constructor takes an inputstream from the API response that we get from executing a SQL * statement. * *

The constructor will fetch the first row (if any) so that it can initialize the * ResultSetMetaData. * * @param sfBaseResultSet snowflake core base result rest object * @param statement query statement that generates this result set * @throws SQLException if failed to construct snowflake result set metadata */ public SnowflakeResultSetV1(SFBaseResultSet sfBaseResultSet, Statement statement) throws SQLException { super(statement); this.sfBaseResultSet = sfBaseResultSet; this.resultSetMetaData = new SnowflakeResultSetMetaDataV1(sfBaseResultSet.getMetaData()); } /** * Constructor takes a result set serializable object to create a sessionless result set. * * @param sfBaseResultSet snowflake core base result rest object * @param resultSetSerializable The result set serializable object which includes all metadata to * create the result set * @throws SQLException if fails to create the result set object */ public SnowflakeResultSetV1( SFBaseResultSet sfBaseResultSet, SnowflakeResultSetSerializableV1 resultSetSerializable) throws SQLException { super(resultSetSerializable); this.sfBaseResultSet = sfBaseResultSet; this.resultSetMetaData = new SnowflakeResultSetMetaDataV1(sfBaseResultSet.getMetaData()); } /** * Advance to next row * * @return true if next row exists, false otherwise * @throws SQLException if failed to move to the next row */ @Override public boolean next() throws SQLException { // exception try { return sfBaseResultSet.next(); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } @Override public void close() throws SQLException { close(true); } public void close(boolean removeClosedResultSetFromStatement) throws SQLException { // no SQLException is raised. sfBaseResultSet.close(); if (removeClosedResultSetFromStatement && statement.isWrapperFor(SnowflakeStatementImpl.class)) { statement.unwrap(SnowflakeStatementImpl.class).removeClosedResultSet(this); } } public String getQueryID() { return sfBaseResultSet.getQueryId(); } public boolean wasNull() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return sfBaseResultSet.wasNull(); } public String getString(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getString(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public boolean getBoolean(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getBoolean(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } @Override public byte getByte(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getByte(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public short getShort(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getShort(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public int getInt(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getInt(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public long getLong(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getLong(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public float getFloat(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getFloat(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public double getDouble(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getDouble(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public Date getDate(int columnIndex, TimeZone tz) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getDate(columnIndex, tz); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public Time getTime(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getTime(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public Timestamp getTimestamp(int columnIndex, TimeZone tz) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getTimestamp(columnIndex, tz); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public ResultSetMetaData getMetaData() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return resultSetMetaData; } public Object getObject(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); Object object = SnowflakeUtil.mapSFExceptionToSQLException(() -> sfBaseResultSet.getObject(columnIndex)); if (object == null) { return null; } if (object instanceof StructObjectWrapper) { StructObjectWrapper structObjectWrapper = (StructObjectWrapper) object; if (resultSetMetaData.isStructuredTypeColumn(columnIndex) && structObjectWrapper.getJsonString() != null) { return structObjectWrapper.getJsonString(); } return structObjectWrapper.getObject(); } return object; } public Array getArray(int columnIndex) throws SQLException { if (!resultSetMetaData.isStructuredTypeColumn(columnIndex)) { throw new SnowflakeLoggedFeatureNotSupportedException(session); } raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getArray(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public BigDecimal getBigDecimal(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getBigDecimal(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } @Deprecated public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getBigDecimal(columnIndex, scale); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public byte[] getBytes(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); try { return sfBaseResultSet.getBytes(columnIndex); } catch (SFException ex) { throw new SnowflakeSQLException( ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } } public int getRow() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return sfBaseResultSet.getRow(); } public boolean isFirst() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return sfBaseResultSet.isFirst(); } public boolean isClosed() throws SQLException { // no exception is raised. return sfBaseResultSet.isClosed(); } @Override public boolean isLast() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return sfBaseResultSet.isLast(); } @Override public boolean isAfterLast() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return sfBaseResultSet.isAfterLast(); } @Override public boolean isBeforeFirst() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return sfBaseResultSet.isBeforeFirst(); } @Override public boolean isWrapperFor(Class iface) throws SQLException { logger.trace("boolean isWrapperFor(Class iface)", false); return iface.isInstance(this); } @SuppressWarnings("unchecked") @Override public T unwrap(Class iface) throws SQLException { logger.trace(" T unwrap(Class iface)", false); if (!iface.isInstance(this)) { throw new SQLException( this.getClass().getName() + " not unwrappable from " + iface.getName()); } return (T) this; } /** * Get a list of ResultSetSerializables for the ResultSet in order to parallel processing * * @param maxSizeInBytes The expected max data size wrapped in the ResultSetSerializables object. * NOTE: this parameter is intended to make the data size in each serializable object to be * less than it. But if user specifies a small value which may be smaller than the data size * of one result chunk. So the definition can't be guaranteed completely. For this special * case, one serializable object is used to wrap the data chunk. * @return a list of ResultSetSerializables. * @throws SQLException If it fails to get the ResultSetSerializable objects. */ @Override public List getResultSetSerializables(long maxSizeInBytes) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return sfBaseResultSet.getResultSetSerializables(maxSizeInBytes); } /** Empty result set */ public static class EmptyResultSet implements ResultSet { private boolean isClosed; public EmptyResultSet() { isClosed = false; } private void raiseSQLExceptionIfResultSetIsClosed() throws SQLException { if (isClosed()) { throw new SnowflakeSQLException(ErrorCode.RESULTSET_ALREADY_CLOSED); } } @Override public boolean wasNull() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public boolean next() throws SQLException { return false; // no exception } @Override public void close() throws SQLException { isClosed = true; } @Override public boolean getBoolean(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public int getInt(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public long getLong(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0L; } @Override public float getFloat(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return (float) 0; } @Override public double getDouble(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return (double) 0; } @Override public short getShort(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return (short) 0; } @Override public byte getByte(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return (byte) 0; } @Override public String getString(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return ""; } @Override public byte[] getBytes(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return new byte[0]; } @Override public Date getDate(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Time getTime(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public InputStream getAsciiStream(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Deprecated @Override public InputStream getUnicodeStream(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public InputStream getBinaryStream(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public String getString(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return ""; } @Override public boolean getBoolean(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public byte getByte(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public short getShort(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public int getInt(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public long getLong(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public float getFloat(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public double getDouble(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Deprecated @Override public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public byte[] getBytes(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return new byte[0]; } @Override public Date getDate(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Time getTime(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Timestamp getTimestamp(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public BigDecimal getBigDecimal(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public boolean isBeforeFirst() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public boolean isAfterLast() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public boolean isFirst() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public boolean isLast() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public void beforeFirst() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void afterLast() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public boolean first() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public boolean last() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public int getRow() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public boolean absolute(int row) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public boolean relative(int rows) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public boolean previous() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public void setFetchDirection(int direction) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public int getFetchDirection() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public void setFetchSize(int rows) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public int getFetchSize() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public int getType() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public int getConcurrency() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public boolean rowUpdated() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public boolean rowInserted() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public boolean rowDeleted() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } @Override public void updateNull(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBoolean(int columnIndex, boolean x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateByte(int columnIndex, byte x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateShort(int columnIndex, short x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateInt(int columnIndex, int x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateLong(int columnIndex, long x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateFloat(int columnIndex, float x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateDouble(int columnIndex, double x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateString(int columnIndex, String x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBytes(int columnIndex, byte[] x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateDate(int columnIndex, Date x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateTime(int columnIndex, Time x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateObject(int columnIndex, Object x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNull(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBoolean(String columnLabel, boolean x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateByte(String columnLabel, byte x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateShort(String columnLabel, short x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateInt(String columnLabel, int x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateLong(String columnLabel, long x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateFloat(String columnLabel, float x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateDouble(String columnLabel, double x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateString(String columnLabel, String x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBytes(String columnLabel, byte[] x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateDate(String columnLabel, Date x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateTime(String columnLabel, Time x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateObject(String columnLabel, Object x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void insertRow() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateRow() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void deleteRow() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void refreshRow() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void cancelRowUpdates() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void moveToInsertRow() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void moveToCurrentRow() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public Statement getStatement() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Object getObject(int columnIndex, Map> map) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Ref getRef(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Blob getBlob(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Clob getClob(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Array getArray(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Object getObject(String columnLabel, Map> map) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Ref getRef(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Blob getBlob(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Clob getClob(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Array getArray(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Date getDate(int columnIndex, Calendar cal) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Date getDate(String columnLabel, Calendar cal) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Time getTime(int columnIndex, Calendar cal) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Time getTime(String columnLabel, Calendar cal) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public URL getURL(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public URL getURL(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public void updateRef(int columnIndex, Ref x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateRef(String columnLabel, Ref x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBlob(int columnIndex, Blob x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBlob(String columnLabel, Blob x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateClob(int columnIndex, Clob x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateClob(String columnLabel, Clob x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateArray(int columnIndex, Array x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateArray(String columnLabel, Array x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public RowId getRowId(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public RowId getRowId(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public void updateRowId(int columnIndex, RowId x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateRowId(String columnLabel, RowId x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public int getHoldability() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public boolean isClosed() throws SQLException { return isClosed; // no exception } @Override public void updateNString(int columnIndex, String nString) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNString(String columnLabel, String nString) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNClob(int columnIndex, NClob nClob) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNClob(String columnLabel, NClob nClob) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public NClob getNClob(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public NClob getNClob(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public SQLXML getSQLXML(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public SQLXML getSQLXML(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public String getNString(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public String getNString(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Reader getNCharacterStream(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Reader getNCharacterStream(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateClob(int columnIndex, Reader reader) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateClob(String columnLabel, Reader reader) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNClob(int columnIndex, Reader reader) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public void updateNClob(String columnLabel, Reader reader) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public T getObject(int columnIndex, Class type) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public T getObject(String columnLabel, Class type) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Deprecated @Override public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public InputStream getAsciiStream(String columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Deprecated @Override public InputStream getUnicodeStream(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public InputStream getBinaryStream(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public SQLWarning getWarnings() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public void clearWarnings() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); } @Override public String getCursorName() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public ResultSetMetaData getMetaData() throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Object getObject(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Object getObject(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public int findColumn(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return 0; } @Override public Reader getCharacterStream(int columnIndex) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public Reader getCharacterStream(String columnLabel) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public T unwrap(Class iface) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return null; } @Override public boolean isWrapperFor(Class iface) throws SQLException { raiseSQLExceptionIfResultSetIsClosed(); return false; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeRichResultSetSerializableV1.java ================================================ package net.snowflake.client.internal.jdbc; import com.fasterxml.jackson.databind.JsonNode; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.QueryResultFormat; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFBaseStatement; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class SnowflakeRichResultSetSerializableV1 extends SnowflakeResultSetSerializableV1 { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeRichResultSetSerializableV1.class); private String richResultsFirstChunkStringData; private int richResultsFirstChunkRowCount; private int richResultsChunkFileCount; private int richResultsColumnCount; private final List richResultsChunkFilesMetadata = new ArrayList<>(); private byte[] richResultsFirstChunkByteData; private String richResultsQrmk; private final Map richResultsChunkHeadersMap = new HashMap<>(); private final List richResultsColumnMetadata = new ArrayList<>(); private QueryResultFormat richResultsQueryResultFormat; transient JsonNode richResultFirstChunkRowset = null; /** * A factory function for internal usage only. It creates SnowflakeRichResultSetSerializableV1 * with NoOpChunksDownloader which disables chunks prefetch. * * @param rootNode JSON root node * @param sfSession SFBaseSession * @param sfStatement SFBaseStatement * @return SnowflakeRichResultSetSerializableV1 with NoOpChunksDownloader * @throws SnowflakeSQLException if an error occurs */ public static SnowflakeRichResultSetSerializableV1 createWithChunksPrefetchDisabled( JsonNode rootNode, SFBaseSession sfSession, SFBaseStatement sfStatement) throws SnowflakeSQLException { return new SnowflakeRichResultSetSerializableV1( rootNode, sfSession, sfStatement, new DefaultResultStreamProvider(), true); } private SnowflakeRichResultSetSerializableV1( JsonNode rootNode, SFBaseSession sfSession, SFBaseStatement sfStatement, ResultStreamProvider resultStreamProvider, boolean disableChunksPrefetch) throws SnowflakeSQLException { super(rootNode, sfSession, sfStatement, resultStreamProvider, disableChunksPrefetch); if (!rootNode.at("/richResult").isMissingNode()) { JsonNode richResultsNode = rootNode.path("richResult"); Optional queryResultFormat = QueryResultFormat.lookupByName(richResultsNode.path("queryResultFormat").asText()); this.richResultsQueryResultFormat = queryResultFormat.orElse(QueryResultFormat.JSON); initializeColumnMetadata(richResultsNode, sfSession); initializeFirstChunkData(richResultsNode, this.queryResultFormat); initializeChunkFiles(richResultsNode); } else { logger.debug("Unable to initialize rich results metadata, no \"richResult\" node"); } } private void initializeColumnMetadata(JsonNode richResultsNode, SFBaseSession sfSession) throws SnowflakeSQLException { this.richResultsColumnCount = richResultsNode.path("rowtype").size(); for (int i = 0; i < this.richResultsColumnCount; i++) { JsonNode colNode = richResultsNode.path("rowtype").path(i); SnowflakeRichResultsColumnMetadata columnMetadata = new SnowflakeRichResultsColumnMetadata( colNode, sfSession.isJdbcTreatDecimalAsInt(), sfSession); this.richResultsColumnMetadata.add(columnMetadata); logger.debug("Get column metadata: {}", (ArgSupplier) columnMetadata::toString); } } private void initializeFirstChunkData( JsonNode richResultsNode, QueryResultFormat queryResultFormat) { if (queryResultFormat == QueryResultFormat.ARROW) { this.richResultsFirstChunkStringData = richResultsNode.path("rowsetBase64").asText(); } else { this.richResultFirstChunkRowset = richResultsNode.path("rowset"); if (this.richResultFirstChunkRowset == null || this.richResultFirstChunkRowset.isMissingNode()) { this.richResultsFirstChunkRowCount = 0; this.richResultsFirstChunkStringData = null; this.richResultsFirstChunkByteData = new byte[0]; } else { this.richResultsFirstChunkRowCount = this.richResultFirstChunkRowset.size(); this.richResultsFirstChunkStringData = this.richResultFirstChunkRowset.toString(); } logger.debug("First rich results chunk row count: {}", this.richResultsFirstChunkRowCount); } } private void initializeChunkFiles(JsonNode richResultsNode) { JsonNode chunksNode = richResultsNode.path("chunks"); if (!chunksNode.isMissingNode()) { this.richResultsChunkFileCount = chunksNode.size(); JsonNode qrmkNode = richResultsNode.path("qrmk"); this.richResultsQrmk = qrmkNode.isMissingNode() ? null : qrmkNode.textValue(); if (this.richResultsChunkFileCount > 0) { logger.debug("Number of rich results metadata chunks: {}", this.richResultsChunkFileCount); initializeChunkHeaders(richResultsNode); initializeChunkFilesMetadata(chunksNode); } } } private void initializeChunkHeaders(JsonNode richResultsNode) { JsonNode chunkHeaders = richResultsNode.path("chunkHeaders"); if (chunkHeaders != null && !chunkHeaders.isMissingNode()) { Iterator> chunkHeadersIter = chunkHeaders.fields(); while (chunkHeadersIter.hasNext()) { Map.Entry chunkHeader = chunkHeadersIter.next(); logger.debug( "Add header key: {}, value: {}", chunkHeader.getKey(), chunkHeader.getValue().asText()); this.richResultsChunkHeadersMap.put(chunkHeader.getKey(), chunkHeader.getValue().asText()); } } } private void initializeChunkFilesMetadata(JsonNode chunksNode) { for (int idx = 0; idx < this.richResultsChunkFileCount; idx++) { JsonNode chunkNode = chunksNode.get(idx); String url = chunkNode.path("url").asText(); int rowCount = chunkNode.path("rowCount").asInt(); int compressedSize = chunkNode.path("compressedSize").asInt(); int uncompressedSize = chunkNode.path("uncompressedSize").asInt(); this.richResultsChunkFilesMetadata.add( new ChunkFileMetadata(url, rowCount, compressedSize, uncompressedSize)); logger.debug( "Add rich results metadata chunk, url: {} rowCount: {} " + "compressedSize: {} uncompressedSize: {}", url, rowCount, compressedSize, uncompressedSize); } } public String getRichResultsFirstChunkStringData() { return richResultsFirstChunkStringData; } public int getRichResultsFirstChunkRowCount() { return richResultsFirstChunkRowCount; } public int getRichResultsChunkFileCount() { return richResultsChunkFileCount; } public int getRichResultsColumnCount() { return richResultsColumnCount; } public List getRichResultsChunkFilesMetadata() { return richResultsChunkFilesMetadata; } public byte[] getRichResultsFirstChunkByteData() { return richResultsFirstChunkByteData; } public String getRichResultsQrmk() { return richResultsQrmk; } public Map getRichResultsChunkHeadersMap() { return richResultsChunkHeadersMap; } public List getRichResultsColumnMetadata() { return richResultsColumnMetadata; } public QueryResultFormat getRichResultsQueryResultFormat() { return richResultsQueryResultFormat; } public JsonNode getRichResultFirstChunkRowset() { return richResultFirstChunkRowset; } public static class SnowflakeRichResultsColumnMetadata extends SnowflakeColumnMetadata { private final int columnIndex; public SnowflakeRichResultsColumnMetadata( JsonNode colNode, boolean jdbcTreatDecimalAsInt, SFBaseSession session) throws SnowflakeSQLLoggedException { super(colNode, jdbcTreatDecimalAsInt, session); this.columnIndex = colNode.path("columnIndexing").asInt(); } public int getColumnIndex() { return columnIndex; } @Override public String toString() { return super.toString() + ",columnIndex=" + columnIndex; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeSQLExceptionWithRetryContext.java ================================================ package net.snowflake.client.internal.jdbc; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; /** * Internal exception class that extends SnowflakeSQLException with additional retry context * information. This class is used internally by the JDBC driver for retry logic and should not be * exposed to customers. * *

This exception carries metadata about retry attempts, timeouts, and elapsed time that is used * by internal components (RestRequest, SessionUtil) to manage connection and authentication retry * logic. */ public class SnowflakeSQLExceptionWithRetryContext extends SnowflakeSQLException { private static final long serialVersionUID = 1L; private final int retryCount; private final boolean isSocketTimeoutNoBackoff; private final long elapsedSeconds; /** * Constructs a new exception with retry context information. * * @param errorCode the error code * @param retryCount the number of retry attempts made * @param isSocketTimeoutNoBackoff whether the socket timeout occurred without backoff * @param elapsedSeconds the elapsed time in seconds */ public SnowflakeSQLExceptionWithRetryContext( ErrorCode errorCode, int retryCount, boolean isSocketTimeoutNoBackoff, long elapsedSeconds) { super(errorCode); this.retryCount = retryCount; this.isSocketTimeoutNoBackoff = isSocketTimeoutNoBackoff; this.elapsedSeconds = elapsedSeconds; } /** * Gets the retry count for this exception. * * @return the number of retry attempts */ public int getRetryCount() { return retryCount; } /** * Checks if the socket timeout occurred without backoff. * * @return true if socket timeout had no backoff, false otherwise */ public boolean isSocketTimeoutNoBackoff() { return isSocketTimeoutNoBackoff; } /** * Gets the elapsed time in seconds. * * @return elapsed seconds */ public long getElapsedSeconds() { return elapsedSeconds; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeSimulatedUploadFailure.java ================================================ package net.snowflake.client.internal.jdbc; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Snowflake Loader exception for Test. This should only be valid in tests. */ public class SnowflakeSimulatedUploadFailure extends RuntimeException { private static final long serialVersionUID = 1L; private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeSimulatedUploadFailure.class); public SnowflakeSimulatedUploadFailure() { super(); logger.error("This constructor should not be used.", false); } public SnowflakeSimulatedUploadFailure(String filename) { super("Simulated upload failure for " + filename); logger.debug("{}. This should show up only in tests.", this.getMessage()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeTimeWithTimezone.java ================================================ package net.snowflake.client.internal.jdbc; import java.sql.Time; import java.sql.Timestamp; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; import java.util.TimeZone; /** * Time with toString() overridden to display time values in session timezone. Only relevant for * timestamp objects fetched as times. Normal time objects do not have a timezone associated with * them. */ public class SnowflakeTimeWithTimezone extends Time { int nanos = 0; boolean useSessionTimeZone = false; ZoneOffset offset = ZoneOffset.UTC; public SnowflakeTimeWithTimezone(long time, int nanos, boolean useSessionTimeZone) { super(time); this.nanos = nanos; this.useSessionTimeZone = useSessionTimeZone; } public SnowflakeTimeWithTimezone( Timestamp ts, TimeZone sessionTimeZone, boolean useSessionTimeZone) { super(ts.getTime()); this.nanos = ts.getNanos(); this.useSessionTimeZone = useSessionTimeZone; if (sessionTimeZone != null) { this.offset = ZoneId.of(sessionTimeZone.getID()).getRules().getOffset(ts.toInstant()); } } public int getNano() { return nanos; } public ZoneOffset getOffset() { return offset; } /** * Returns a string representation in session's timezone so as to display "wallclock time" * * @return a string representation of the object */ public synchronized String toString() { if (!useSessionTimeZone) { return super.toString(); } DateTimeFormatter formatter = DateTimeFormatter.ofPattern("HH:mm:ss"); LocalDateTime ldt = LocalDateTime.ofEpochSecond( SnowflakeUtil.getSecondsFromMillis(this.getTime()), this.nanos, this.offset); return ldt.format(formatter); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeTimestampWithTimezone.java ================================================ package net.snowflake.client.internal.jdbc; import java.sql.Timestamp; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.TimeZone; /** * Timestamp with toString() overridden to display timestamp in session timezone. The default * timezone is UTC if no timezone is specified. */ public class SnowflakeTimestampWithTimezone extends Timestamp { private static final long serialVersionUID = 1L; private TimeZone timezone = TimeZone.getTimeZone("UTC"); public SnowflakeTimestampWithTimezone(long seconds, int nanoseconds, TimeZone timezone) { super(seconds); this.setNanos(nanoseconds); this.timezone = timezone; } public SnowflakeTimestampWithTimezone(Timestamp ts, TimeZone timezone) { this(ts.getTime(), ts.getNanos(), timezone); } public SnowflakeTimestampWithTimezone(Timestamp ts) { this(ts.getTime(), ts.getNanos(), TimeZone.getTimeZone("UTC")); } /** * Gets the timezone. * * @return the timezone. */ public TimeZone getTimezone() { return this.timezone; } /** * Converts this timestamp to a zoned date time. * * @return the zoned date time corresponding to this timestamp. */ public ZonedDateTime toZonedDateTime() { return ZonedDateTime.ofInstant(toInstant(), this.timezone.toZoneId()); } /** * Returns a string representation in UTC * * @return a string representation of the object */ public synchronized String toString() { int trailingZeros = 0; int tmpNanos = this.getNanos(); if (tmpNanos == 0) { trailingZeros = 8; } else { while (tmpNanos % 10 == 0) { tmpNanos /= 10; trailingZeros++; } } final String baseFormat = "uuuu-MM-dd HH:mm:ss."; StringBuilder buf = new StringBuilder(baseFormat.length() + 9 - trailingZeros); buf.append(baseFormat); for (int i = 0; i < 9 - trailingZeros; ++i) { buf.append("S"); } DateTimeFormatter formatter = DateTimeFormatter.ofPattern(buf.toString()); ZoneOffset offset = ZoneId.of(timezone.getID()).getRules().getOffset(this.toInstant()); LocalDateTime ldt = LocalDateTime.ofEpochSecond( SnowflakeUtil.getSecondsFromMillis(this.getTime()), this.getNanos(), offset); return ldt.format(formatter); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeUseDPoPNonceException.java ================================================ package net.snowflake.client.internal.jdbc; public class SnowflakeUseDPoPNonceException extends RuntimeException { private final String nonce; public SnowflakeUseDPoPNonceException(String nonce) { this.nonce = nonce; } public String getNonce() { return nonce; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/SnowflakeUtil.java ================================================ package net.snowflake.client.internal.jdbc; import static java.util.Arrays.stream; import static net.snowflake.client.api.resultset.SnowflakeType.GEOGRAPHY; import static net.snowflake.client.internal.core.Constants.OAUTH_ACCESS_TOKEN_EXPIRED_GS_CODE; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.io.PrintWriter; import java.io.StringWriter; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.sql.SQLException; import java.sql.Time; import java.sql.Types; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.Random; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import java.util.stream.Collectors; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.FieldMetadata; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.api.implementation.resultset.FieldMetadataImpl; import net.snowflake.client.internal.core.Constants; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.OCSPMode; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.util.SnowflakeTypeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.ThrowingCallable; import net.snowflake.common.core.SqlState; import net.snowflake.common.util.ClassUtil; import net.snowflake.common.util.FixedViewColumn; import org.apache.commons.io.IOUtils; import org.apache.http.Header; import org.apache.http.HttpResponse; import org.apache.http.NameValuePair; public class SnowflakeUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeUtil.class); private static final ObjectMapper OBJECT_MAPPER = ObjectMapperFactory.getObjectMapper(); private static final Set directoryOwnerOnlyPermission = PosixFilePermissions.fromString("rwx------"); // reauthenticate private static final int ID_TOKEN_EXPIRED_GS_CODE = 390110; private static final int SESSION_NOT_EXIST_GS_CODE = 390111; private static final int MASTER_TOKEN_NOTFOUND = 390113; private static final int MASTER_EXPIRED_GS_CODE = 390114; private static final int MASTER_TOKEN_INVALID_GS_CODE = 390115; private static final int ID_TOKEN_INVALID_LOGIN_REQUEST_GS_CODE = 390195; public static final String BIG_DECIMAL_STR = "big decimal"; public static final String FLOAT_STR = "float"; public static final String DOUBLE_STR = "double"; public static final String DURATION_STR = "duration"; public static final String BOOLEAN_STR = "boolean"; public static final String SHORT_STR = "short"; public static final String INT_STR = "int"; public static final String LONG_STR = "long"; public static final String PERIOD_STR = "period"; public static final String TIME_STR = "time"; public static final String TIMESTAMP_STR = "timestamp"; public static final String DATE_STR = "date"; public static final String BYTE_STR = "byte"; public static final String BYTES_STR = "byte array"; public static String mapJson(Object ob) throws JsonProcessingException { return OBJECT_MAPPER.writeValueAsString(ob); } public static void checkErrorAndThrowExceptionIncludingReauth(JsonNode rootNode) throws SnowflakeSQLException { checkErrorAndThrowExceptionSub(rootNode, true); } public static void checkErrorAndThrowException(JsonNode rootNode) throws SnowflakeSQLException { checkErrorAndThrowExceptionSub(rootNode, false); } public static long getEpochTimeInMicroSeconds() { Instant timestamp = Instant.now(); long micros = TimeUnit.SECONDS.toMicros(timestamp.getEpochSecond()) + TimeUnit.NANOSECONDS.toMicros(timestamp.getNano()); return micros; } /** * Check the error in the JSON node and generate an exception based on information extracted from * the node. * * @param rootNode json object contains error information * @param raiseReauthenticateError raises SnowflakeReauthenticationRequest if true * @throws SnowflakeSQLException the exception get from the error in the json */ private static void checkErrorAndThrowExceptionSub( JsonNode rootNode, boolean raiseReauthenticateError) throws SnowflakeSQLException { // no need to throw exception if success if (rootNode.path("success").asBoolean()) { return; } String errorMessage; String sqlState; int errorCode; String queryId = "unknown"; // if we have sqlstate in data, it's a sql error if (!rootNode.path("data").path("sqlState").isMissingNode()) { sqlState = rootNode.path("data").path("sqlState").asText(); errorCode = rootNode.path("data").path("errorCode").asInt(); queryId = rootNode.path("data").path("queryId").asText(); errorMessage = rootNode.path("message").asText(); } else { sqlState = SqlState.INTERNAL_ERROR; // use internal error sql state // check if there is an error code in the envelope if (!rootNode.path("code").isMissingNode()) { errorCode = rootNode.path("code").asInt(); errorMessage = rootNode.path("message").asText(); } else { errorCode = ErrorCode.INTERNAL_ERROR.getMessageCode(); errorMessage = "no_error_code_from_server"; try (PrintWriter writer = new PrintWriter("output.json", "UTF-8")) { writer.print(rootNode.toString()); } catch (Exception ex) { logger.debug("{}", ex); } } } if (raiseReauthenticateError) { switch (errorCode) { case ID_TOKEN_EXPIRED_GS_CODE: case SESSION_NOT_EXIST_GS_CODE: case MASTER_TOKEN_NOTFOUND: case MASTER_EXPIRED_GS_CODE: case MASTER_TOKEN_INVALID_GS_CODE: case ID_TOKEN_INVALID_LOGIN_REQUEST_GS_CODE: case OAUTH_ACCESS_TOKEN_EXPIRED_GS_CODE: throw new SnowflakeReauthenticationRequest(queryId, errorMessage, sqlState, errorCode); } } throw new SnowflakeSQLException(queryId, errorMessage, sqlState, errorCode); } /** * This method should only be used internally * * @param colNode JsonNode * @param jdbcTreatDecimalAsInt true if should treat Decimal as Int * @param session SFBaseSession * @return SnowflakeColumnMetadata * @throws SnowflakeSQLException if an error occurs */ @Deprecated public static SnowflakeColumnMetadata extractColumnMetadata( JsonNode colNode, boolean jdbcTreatDecimalAsInt, SFBaseSession session) throws SnowflakeSQLException { return new SnowflakeColumnMetadata(colNode, jdbcTreatDecimalAsInt, session); } static ColumnTypeInfo getSnowflakeType( String internalColTypeName, String extColTypeName, JsonNode udtOutputType, SFBaseSession session, int fixedColType, boolean isStructuredType, boolean isVectorType) throws SnowflakeSQLLoggedException { SnowflakeType baseType = SnowflakeTypeUtil.fromStringOrNull(internalColTypeName); if (baseType == null) { // Unknown Snowflake type (e.g. UUID) — report as OTHER with the actual type name return new ColumnTypeInfo( Types.OTHER, defaultIfNull(extColTypeName, internalColTypeName.toUpperCase(Locale.ROOT)), SnowflakeType.ANY); } ColumnTypeInfo columnTypeInfo; switch (baseType) { case TEXT: columnTypeInfo = new ColumnTypeInfo(Types.VARCHAR, defaultIfNull(extColTypeName, "VARCHAR"), baseType); break; case CHAR: columnTypeInfo = new ColumnTypeInfo(Types.CHAR, defaultIfNull(extColTypeName, "CHAR"), baseType); break; case INTEGER: columnTypeInfo = new ColumnTypeInfo(Types.INTEGER, defaultIfNull(extColTypeName, "INTEGER"), baseType); break; case DECFLOAT: columnTypeInfo = new ColumnTypeInfo(Types.DECIMAL, "DECFLOAT", baseType); break; case FIXED: if (isVectorType) { columnTypeInfo = new ColumnTypeInfo(Types.INTEGER, defaultIfNull(extColTypeName, "INTEGER"), baseType); } else { columnTypeInfo = new ColumnTypeInfo(fixedColType, defaultIfNull(extColTypeName, "NUMBER"), baseType); } break; case REAL: if (isVectorType) { columnTypeInfo = new ColumnTypeInfo(Types.FLOAT, defaultIfNull(extColTypeName, "FLOAT"), baseType); } else { columnTypeInfo = new ColumnTypeInfo(Types.DOUBLE, defaultIfNull(extColTypeName, "DOUBLE"), baseType); } break; case TIMESTAMP: case TIMESTAMP_LTZ: columnTypeInfo = new ColumnTypeInfo( SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ, defaultIfNull(extColTypeName, "TIMESTAMPLTZ"), baseType); break; case INTERVAL_YEAR_MONTH: columnTypeInfo = new ColumnTypeInfo( SnowflakeType.EXTRA_TYPES_YEAR_MONTH_INTERVAL, defaultIfNull(extColTypeName, "INTERVAL_YEAR_MONTH"), baseType); break; case INTERVAL_DAY_TIME: columnTypeInfo = new ColumnTypeInfo( SnowflakeType.EXTRA_TYPES_DAY_TIME_INTERVAL, defaultIfNull(extColTypeName, "INTERVAL_DAY_TIME"), baseType); break; case TIMESTAMP_NTZ: // if the column type is changed to EXTRA_TYPES_TIMESTAMP_NTZ, update also JsonSqlInput columnTypeInfo = new ColumnTypeInfo( Types.TIMESTAMP, defaultIfNull(extColTypeName, "TIMESTAMPNTZ"), baseType); break; case TIMESTAMP_TZ: columnTypeInfo = new ColumnTypeInfo( SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ, defaultIfNull(extColTypeName, "TIMESTAMPTZ"), baseType); break; case DATE: columnTypeInfo = new ColumnTypeInfo(Types.DATE, defaultIfNull(extColTypeName, "DATE"), baseType); break; case TIME: columnTypeInfo = new ColumnTypeInfo(Types.TIME, defaultIfNull(extColTypeName, "TIME"), baseType); break; case BOOLEAN: columnTypeInfo = new ColumnTypeInfo(Types.BOOLEAN, defaultIfNull(extColTypeName, "BOOLEAN"), baseType); break; case VECTOR: columnTypeInfo = new ColumnTypeInfo( SnowflakeType.EXTRA_TYPES_VECTOR, defaultIfNull(extColTypeName, "VECTOR"), baseType); break; case ARRAY: int columnType = isStructuredType ? Types.ARRAY : Types.VARCHAR; columnTypeInfo = new ColumnTypeInfo(columnType, defaultIfNull(extColTypeName, "ARRAY"), baseType); break; case MAP: columnTypeInfo = new ColumnTypeInfo(Types.STRUCT, defaultIfNull(extColTypeName, "OBJECT"), baseType); break; case OBJECT: if (isStructuredType) { boolean isGeoType = "GEOMETRY".equals(extColTypeName) || "GEOGRAPHY".equals(extColTypeName); int type = isGeoType ? Types.VARCHAR : Types.STRUCT; columnTypeInfo = new ColumnTypeInfo(type, defaultIfNull(extColTypeName, "OBJECT"), baseType); } else { columnTypeInfo = new ColumnTypeInfo(Types.VARCHAR, defaultIfNull(extColTypeName, "OBJECT"), baseType); } break; case VARIANT: columnTypeInfo = new ColumnTypeInfo(Types.VARCHAR, defaultIfNull(extColTypeName, "VARIANT"), baseType); break; case BINARY: columnTypeInfo = new ColumnTypeInfo(Types.BINARY, defaultIfNull(extColTypeName, "BINARY"), baseType); break; case GEOGRAPHY: case GEOMETRY: int colType = Types.VARCHAR; extColTypeName = (baseType == GEOGRAPHY) ? "GEOGRAPHY" : "GEOMETRY"; if (!udtOutputType.isMissingNode()) { SnowflakeType outputType = SnowflakeTypeUtil.fromStringOrNull(udtOutputType.asText()); if (outputType != null) { switch (outputType) { case OBJECT: case TEXT: colType = Types.VARCHAR; break; case BINARY: colType = Types.BINARY; } } } columnTypeInfo = new ColumnTypeInfo(colType, extColTypeName, baseType); break; default: throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unknown column type: " + internalColTypeName); } return columnTypeInfo; } private static String defaultIfNull(String extColTypeName, String defaultValue) { return Optional.ofNullable(extColTypeName).orElse(defaultValue); } static List createFieldsMetadata( ArrayNode fieldsJson, boolean jdbcTreatDecimalAsInt, String parentInternalColumnTypeName) throws SnowflakeSQLLoggedException { List fields = new ArrayList<>(); for (JsonNode node : fieldsJson) { String colName; if (!node.path("fieldType").isEmpty()) { colName = node.path("fieldName").asText(); node = node.path("fieldType"); } else { colName = node.path("name").asText(); } int scale = node.path("scale").asInt(); int precision = node.path("precision").asInt(); String internalColTypeName = node.path("type").asText(); boolean nullable = node.path("nullable").asBoolean(); int length = node.path("length").asInt(); boolean fixed = node.path("fixed").asBoolean(); int fixedColType = jdbcTreatDecimalAsInt && scale == 0 ? Types.BIGINT : Types.DECIMAL; List internalFields = getFieldMetadata(jdbcTreatDecimalAsInt, parentInternalColumnTypeName, node); JsonNode outputType = node.path("outputType"); JsonNode extColTypeNameNode = node.path("extTypeName"); String extColTypeName = null; if (!extColTypeNameNode.isMissingNode() && !isNullOrEmpty(extColTypeNameNode.asText())) { extColTypeName = extColTypeNameNode.asText(); } ColumnTypeInfo columnTypeInfo = getSnowflakeType( internalColTypeName, extColTypeName, outputType, null, fixedColType, internalFields.size() > 0, isVectorType(parentInternalColumnTypeName)); fields.add( new FieldMetadataImpl( colName, columnTypeInfo.getExtColTypeName(), columnTypeInfo.getColumnType(), nullable, length, precision, scale, fixed, columnTypeInfo.getSnowflakeType(), internalFields)); } return fields; } static boolean isVectorType(String internalColumnTypeName) { return internalColumnTypeName.equalsIgnoreCase("vector"); } static List getFieldMetadata( boolean jdbcTreatDecimalAsInt, String internalColumnTypeName, JsonNode node) throws SnowflakeSQLLoggedException { if (!node.path("fields").isEmpty()) { ArrayNode internalFieldsJson = (ArrayNode) node.path("fields"); return createFieldsMetadata( internalFieldsJson, jdbcTreatDecimalAsInt, internalColumnTypeName); } else { return new ArrayList<>(); } } public static String javaTypeToSFTypeString(int javaType, SFBaseSession session) throws SnowflakeSQLException { return SnowflakeTypeUtil.javaTypeToSFType(javaType, session).name(); } public static SnowflakeType javaTypeToSFType(int javaType, SFBaseSession session) throws SnowflakeSQLException { return SnowflakeTypeUtil.javaTypeToSFType(javaType, session); } /** * A small function for concatenating two file paths by making sure one and only one path * separator is placed between the two paths. * *

This is necessary since for S3 file name, having different number of file separators in a * path will mean different files. * *

Typical use case is to concatenate a file name to a directory. * * @param leftPath left path * @param rightPath right path * @param fileSep file separator * @return concatenated file path */ static String concatFilePathNames(String leftPath, String rightPath, String fileSep) { String leftPathTrimmed = leftPath.trim(); String rightPathTrimmed = rightPath.trim(); if (leftPathTrimmed.isEmpty()) { return rightPath; } if (leftPathTrimmed.endsWith(fileSep) && rightPathTrimmed.startsWith(fileSep)) { return leftPathTrimmed + rightPathTrimmed.substring(1); } else if (!leftPathTrimmed.endsWith(fileSep) && !rightPathTrimmed.startsWith(fileSep)) { return leftPathTrimmed + fileSep + rightPathTrimmed; } else { return leftPathTrimmed + rightPathTrimmed; } } static String greatestCommonPrefix(String val1, String val2) { if (val1 == null || val2 == null) { return null; } StringBuilder greatestCommonPrefix = new StringBuilder(); int len = Math.min(val1.length(), val2.length()); for (int idx = 0; idx < len; idx++) { if (val1.charAt(idx) == val2.charAt(idx)) { greatestCommonPrefix.append(val1.charAt(idx)); } else { break; } } return greatestCommonPrefix.toString(); } static List describeFixedViewColumns( Class clazz, SFBaseSession session) throws SnowflakeSQLException { Field[] columns = ClassUtil.getAnnotatedDeclaredFields(clazz, FixedViewColumn.class, true); Arrays.sort(columns, new FixedViewColumn.OrdinalComparatorForFields()); List rowType = new ArrayList(); for (Field column : columns) { FixedViewColumn columnAnnotation = column.getAnnotation(FixedViewColumn.class); String typeName; int colType; Class type = column.getType(); SnowflakeType stype = SnowflakeType.TEXT; if (type == Integer.TYPE) { colType = Types.INTEGER; typeName = "INTEGER"; stype = SnowflakeType.INTEGER; } if (type == Long.TYPE) { colType = Types.DECIMAL; typeName = "DECIMAL"; stype = SnowflakeType.INTEGER; } else if (type == String.class) { colType = Types.VARCHAR; typeName = "VARCHAR"; stype = SnowflakeType.TEXT; } else { throw new SnowflakeSQLLoggedException( session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unsupported column type: " + type.getName()); } // TODO: we hard code some of the values below but can change them // later to derive from annotation as well. rowType.add( new SnowflakeColumnMetadata( columnAnnotation.name(), // column name colType, // column type false, // nullable 20480, // length 10, // precision 0, // scale typeName, // type name true, stype, // fixed new ArrayList<>(), "", // database "", // schema "", false, // isAutoincrement 0 // dimension )); } return rowType; } /** * A utility to log response details. * *

Used when there is an error in http response * * @param response http response get from server * @param logger logger object */ public static void logResponseDetails(HttpResponse response, SFLogger logger) { if (response == null) { logger.error("null response", false); return; } // log the response if (response.getStatusLine() != null) { logger.error("Response status line reason: {}", response.getStatusLine().getReasonPhrase()); } // log each header from response Header[] headers = response.getAllHeaders(); if (headers != null) { for (Header header : headers) { logger.debug("Header name: {}, value: {}", header.getName(), header.getValue()); } } // log response if (response.getEntity() != null) { try { StringWriter writer = new StringWriter(); BufferedReader bufferedReader = new BufferedReader(new InputStreamReader((response.getEntity().getContent()))); IOUtils.copy(bufferedReader, writer); logger.error("Response content: {}", writer.toString()); } catch (IOException ex) { logger.error("Failed to read content due to exception: " + "{}", ex.getMessage()); } } } /** * Returns a new thread pool configured with the default settings. * * @param threadNamePrefix prefix of the thread name * @param parallel the number of concurrency * @return A new thread pool configured with the default settings. */ public static ThreadPoolExecutor createDefaultExecutorService( final String threadNamePrefix, final int parallel) { ThreadFactory threadFactory = new ThreadFactory() { private int threadCount = 1; public Thread newThread(Runnable r) { Thread thread = new Thread(r); thread.setName(threadNamePrefix + threadCount++); return thread; } }; return (ThreadPoolExecutor) Executors.newFixedThreadPool(parallel, threadFactory); } public static Throwable getRootCause(Exception ex) { Throwable cause = ex; while (cause.getCause() != null) { cause = cause.getCause(); } return cause; } public static boolean isBlank(String input) { if ("".equals(input) || input == null) { return true; } for (char c : input.toCharArray()) { if (!Character.isWhitespace(c)) { return false; } } return true; } private static final String ALPHA_NUMERIC_STRING = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; public static String randomAlphaNumeric(int count) { StringBuilder builder = new StringBuilder(); Random random = new Random(); while (count-- != 0) { int character = random.nextInt(ALPHA_NUMERIC_STRING.length()); builder.append(ALPHA_NUMERIC_STRING.charAt(character)); } return builder.toString(); } /** * System.getProperty wrapper. If System.getProperty raises a SecurityException, it is ignored and * returns null. * * @param property the property name * @return the property value if set, otherwise null. */ public static String systemGetProperty(String property) { try { return System.getProperty(property); } catch (SecurityException ex) { // logger may be null during SnowflakeUtil. (circular init via SFLoggerFactory) if (logger != null) { logger.debug("Security exception raised: {}", ex.getMessage()); } return null; } } /** * System.setProperty wrapper. If System.setProperty raises a SecurityException, it is ignored. * * @param property the property name * @param value the property value */ public static void systemSetProperty(String property, String value) { try { System.setProperty(property, value); } catch (SecurityException ex) { // logger may be null during SnowflakeUtil. (circular init via SFLoggerFactory) if (logger != null) { logger.debug("Security exception raised: {}", ex.getMessage()); } } } /** * System.getenv wrapper. If System.getenv raises a SecurityException, it is ignored and returns * null. * * @param env the environment variable name. * @return the environment variable value if set, otherwise null. */ public static String systemGetEnv(String env) { try { return System.getenv(env); } catch (SecurityException ex) { // logger may be null during SnowflakeUtil. (circular init via SFLoggerFactory) if (logger != null) { logger.debug( "Failed to get environment variable {}. Security exception raised: {}", env, ex.getMessage()); } } return null; } /** * System.setEnv function. Can be used for unit tests. * * @param key key * @param value value */ public static void systemSetEnv(String key, String value) { try { Map env = System.getenv(); Class cl = env.getClass(); Field field = cl.getDeclaredField("m"); field.setAccessible(true); Map writableEnv = (Map) field.get(env); writableEnv.put(key, value); // To an environment variable is set on Windows, it uses a different map to store the values // when the system.getenv(VAR_NAME) is used its required to update in this additional place. if (Constants.getOS() == Constants.OS.WINDOWS) { Class pe = Class.forName("java.lang.ProcessEnvironment"); Method getenv = pe.getDeclaredMethod("getenv", String.class); getenv.setAccessible(true); Field props = pe.getDeclaredField("theCaseInsensitiveEnvironment"); props.setAccessible(true); Map writableEnvForGet = (Map) props.get(null); writableEnvForGet.put(key, value); } } catch (Exception e) { logger.error( "Failed to set environment variable {}. Exception raised: {}", key, e.getMessage()); } } /** * System.unsetEnv function to remove a system environment parameter in the map * * @param key key value */ public static void systemUnsetEnv(String key) { try { Map env = System.getenv(); Class cl = env.getClass(); Field field = cl.getDeclaredField("m"); field.setAccessible(true); Map writableEnv = (Map) field.get(env); writableEnv.remove(key); } catch (Exception e) { logger.error( "Failed to remove environment variable {}. Exception raised: {}", key, e.getMessage()); } } /** * Setup JDBC proxy properties if necessary. * * @param mode OCSP mode * @param info proxy server properties. * @return HttpClientSettingsKey * @throws SnowflakeSQLException if an error occurs */ public static HttpClientSettingsKey convertProxyPropertiesToHttpClientKey( OCSPMode mode, Properties info) throws SnowflakeSQLException { // Setup proxy properties. if (info != null && info.size() > 0 && info.getProperty(SFSessionProperty.USE_PROXY.getPropertyKey()) != null) { Boolean useProxy = Boolean.valueOf(info.getProperty(SFSessionProperty.USE_PROXY.getPropertyKey())); if (useProxy) { // set up other proxy related values. String proxyHost = info.getProperty(SFSessionProperty.PROXY_HOST.getPropertyKey()); int proxyPort; try { proxyPort = Integer.parseInt(info.getProperty(SFSessionProperty.PROXY_PORT.getPropertyKey())); } catch (NumberFormatException | NullPointerException e) { throw new SnowflakeSQLException( ErrorCode.INVALID_PROXY_PROPERTIES, "Could not parse port number"); } String proxyUser = info.getProperty(SFSessionProperty.PROXY_USER.getPropertyKey()); String proxyPassword = info.getProperty(SFSessionProperty.PROXY_PASSWORD.getPropertyKey()); String nonProxyHosts = info.getProperty(SFSessionProperty.NON_PROXY_HOSTS.getPropertyKey()); String proxyProtocol = info.getProperty(SFSessionProperty.PROXY_PROTOCOL.getPropertyKey()); String userAgentSuffix = info.getProperty(SFSessionProperty.USER_AGENT_SUFFIX.getPropertyKey()); Boolean gzipDisabled = isNullOrEmpty(info.getProperty(SFSessionProperty.GZIP_DISABLED.getPropertyKey())) ? false : Boolean.valueOf( info.getProperty(SFSessionProperty.GZIP_DISABLED.getPropertyKey())); // create key for proxy properties return new HttpClientSettingsKey( mode, proxyHost, proxyPort, nonProxyHosts, proxyUser, proxyPassword, proxyProtocol, userAgentSuffix, gzipDisabled); } } // if no proxy properties, return key with only OCSP mode return new HttpClientSettingsKey(mode); } /** * Round the time value from milliseconds to seconds so the seconds can be used to create * SimpleDateFormatter. Negative values have to be rounded to the next negative value, while * positive values should be cut off with no rounding. * * @param millis milliseconds * @return seconds as long value */ public static long getSecondsFromMillis(long millis) { long returnVal; if (millis < 0) { returnVal = (long) Math.ceil((double) Math.abs(millis) / 1000); returnVal *= -1; } else { returnVal = millis / 1000; } return returnVal; } /** * Get the time value in session timezone instead of UTC calculation done by java.sql.Time. * * @param time time in seconds * @param nanos nanoseconds * @return time in session timezone */ public static Time getTimeInSessionTimezone(Long time, int nanos) { LocalDateTime lcd = LocalDateTime.ofEpochSecond(time, nanos, ZoneOffset.UTC); Time ts = Time.valueOf(lcd.toLocalTime()); // Time.valueOf() will create the time without the nanoseconds i.e. only hh:mm:ss // Using calendar to add the nanoseconds back to time Calendar c = Calendar.getInstance(); c.setTimeInMillis(ts.getTime()); c.add(Calendar.MILLISECOND, nanos / 1000000); ts.setTime(c.getTimeInMillis()); return ts; } /** * Helper function to convert system properties to boolean * * @param systemProperty name of the system property * @param defaultValue default value used * @return the value of the system property as boolean, else the default value */ public static boolean convertSystemPropertyToBooleanValue( String systemProperty, boolean defaultValue) { String systemPropertyValue = systemGetProperty(systemProperty); if (systemPropertyValue != null) { return Boolean.parseBoolean(systemPropertyValue); } return defaultValue; } /** * Helper function to convert environment variable to boolean * * @param envVariableKey property name of the environment variable * @param defaultValue default value used * @return the value of the environment variable as boolean, else the default value */ public static boolean convertSystemGetEnvToBooleanValue( String envVariableKey, boolean defaultValue) { String environmentVariableValue = systemGetEnv(envVariableKey); if (environmentVariableValue != null) { return Boolean.parseBoolean(environmentVariableValue); } return defaultValue; } public static T mapSFExceptionToSQLException(ThrowingCallable action) throws SQLException { try { return action.call(); } catch (SFException e) { throw new SQLException(e); } } public static String getJsonNodeStringValue(JsonNode node) throws SFException { if (node.isNull()) { return null; } return node.isValueNode() ? node.asText() : node.toString(); } /** * Method introduced to avoid inconsistencies in custom headers handling, since these are defined * on drivers side e.g. some drivers might internally convert headers to canonical form. * * @param input map input * @return case insensitive map */ public static Map createCaseInsensitiveMap(Map input) { Map caseInsensitiveMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (input != null) { caseInsensitiveMap.putAll(input); } return caseInsensitiveMap; } /** * toCaseInsensitiveMap, but adjusted to Headers[] argument type * * @param headers array of headers * @return case insensitive map */ public static Map createCaseInsensitiveMap(Header[] headers) { if (headers != null) { return createCaseInsensitiveMap( stream(headers) .collect(Collectors.toMap(NameValuePair::getName, NameValuePair::getValue))); } else { return new TreeMap<>(String.CASE_INSENSITIVE_ORDER); } } /** * create a directory with Owner only permission (0600) * * @param location the directory location * @return true if directory was created successfully, false otherwise */ public static boolean createOwnerOnlyPermissionDir(String location) { if (isWindows()) { File dir = new File(location); return dir.mkdirs(); } boolean isDirCreated = true; Path dir = Paths.get(location); try { Files.createDirectory( dir, PosixFilePermissions.asFileAttribute(directoryOwnerOnlyPermission)); } catch (IOException e) { logger.error( "Failed to set OwnerOnly permission for {}. This may cause the file download to fail ", location); isDirCreated = false; } return isDirCreated; } public static void assureOnlyUserAccessibleFilePermissions( File file, boolean isOwnerOnlyStageFilePermissionsEnabled) throws IOException { if (isWindows()) { return; } if (!isOwnerOnlyStageFilePermissionsEnabled) { // If the owner only stage file permissions are not enabled, we do not need to set the file // permissions. return; } boolean disableUserPermissions = file.setReadable(false, false) && file.setWritable(false, false) && file.setExecutable(false, false); boolean setOwnerPermissionsOnly = file.setReadable(true, true) && file.setWritable(true, true); if (disableUserPermissions && setOwnerPermissionsOnly) { logger.info("Successfuly set OwnerOnly permission for {}. ", file.getAbsolutePath()); } else { file.delete(); logger.error( "Failed to set OwnerOnly permission for {}. Failed to download", file.getAbsolutePath()); throw new IOException( String.format( "Failed to set OwnerOnly permission for %s. Failed to download", file.getAbsolutePath())); } } /** * Check whether the OS is Windows * * @return boolean */ public static boolean isWindows() { return Constants.getOS() == Constants.OS.WINDOWS; } public static boolean isNullOrEmpty(String str) { return str == null || str.isEmpty(); } /** Returns {@code true} when the node exists and carries a non-null value. */ public static boolean isJsonNodePresent(JsonNode node) { return !node.isMissingNode() && !node.isNull(); } /** * Converts Byte array to hex string * * @param bytes a byte array * @return a string in hexadecimal code */ public static String byteToHexString(byte[] bytes) { final char[] hexArray = "0123456789ABCDEF".toCharArray(); char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = hexArray[v >>> 4]; hexChars[j * 2 + 1] = hexArray[v & 0x0F]; } return new String(hexChars); } /** * Converts a simple wildcard pattern (where only '*' is special) into a safe regex string by * quoting all literal parts via Pattern.quote(), preventing any regex metacharacters from being * interpreted. This avoids ReDoS vulnerabilities when patterns originate from user input. */ public static Pattern globToSafePattern(String glob) { String safeRegex = Arrays.stream(glob.split("\\*", -1)).map(Pattern::quote).collect(Collectors.joining(".*")); return Pattern.compile(safeRegex, Pattern.CASE_INSENSITIVE); } public static boolean hostnameMatchesGlob(String hostname, String pattern) { if (hostname == null || pattern == null) { return false; } return globToSafePattern(pattern.trim()).matcher(hostname).matches(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/AwsSdkGCPSigner.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.util.HashMap; import java.util.List; import java.util.Map; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; public class AwsSdkGCPSigner implements Signer { private final String bearerToken; public AwsSdkGCPSigner(String bearerToken) { this.bearerToken = bearerToken; } private static final Map headerMap = new HashMap() { { put("x-amz-storage-class", "x-goog-storage-class"); put("x-amz-acl", "x-goog-acl"); put("x-amz-date", "x-goog-date"); put("x-amz-copy-source", "x-goog-copy-source"); put("x-amz-metadata-directive", "x-goog-metadata-directive"); put("x-amz-copy-source-if-match", "x-goog-copy-source-if-match"); put("x-amz-copy-source-if-none-match", "x-goog-copy-source-if-none-match"); put("x-amz-copy-source-if-unmodified-since", "x-goog-copy-source-if-unmodified-since"); put("x-amz-copy-source-if-modified-since", "x-goog-copy-source-if-modified-since"); } }; @Override public SdkHttpFullRequest sign( SdkHttpFullRequest request, ExecutionAttributes executionAttributes) { SdkHttpFullRequest.Builder requestBuilder = request.toBuilder(); // Remove any existing Authorization header (from AWS signing) requestBuilder.removeHeader("Authorization"); // Add the Bearer token for GCP authentication if (bearerToken != null && !bearerToken.isEmpty()) { requestBuilder.putHeader("Authorization", "Bearer " + bearerToken); } if (request.method() == SdkHttpMethod.GET) { requestBuilder.putHeader("Accept-Encoding", "gzip,deflate"); } // Create a copy of headers for iteration to avoid concurrent modification Map> headersCopy = new HashMap<>(request.headers()); for (Map.Entry> entry : headersCopy.entrySet()) { String entryKey = entry.getKey().toLowerCase(); if (headerMap.containsKey(entryKey)) { // Add the mapped Google Cloud header for (String value : entry.getValue()) { requestBuilder.putHeader(headerMap.get(entryKey), value); } } else if (entryKey.startsWith("x-amz-meta-")) { // Transform x-amz-meta- headers to x-goog-meta- String googleMetaHeader = entryKey.replace("x-amz-meta-", "x-goog-meta-"); for (String value : entry.getValue()) { requestBuilder.putHeader(googleMetaHeader, value); } } } return requestBuilder.build(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/AzureObjectSummariesIterator.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import com.azure.storage.blob.models.BlobItem; import java.util.Iterator; import java.util.NoSuchElementException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Iterator class for ObjectSummary objects on Azure Returns platform-independent instances * (StorageObjectSummary) */ public class AzureObjectSummariesIterator implements Iterator { private static final SFLogger logger = SFLoggerFactory.getLogger(AzureObjectSummariesIterator.class); private final Iterator itemIterator; private final String location; /* * Constructs a summaries iterator object from an iterable derived by a * lostBlobs method * @param azCloudBlobIterable an iterable set of ListBlobItems */ public AzureObjectSummariesIterator(Iterable azCloudBlobIterable, String location) { itemIterator = azCloudBlobIterable.iterator(); this.location = location; } public boolean hasNext() { // SNOW-442579 azure itemIterator.hasNext() is a lazy operation, which may cause // StorageException. And it seems Azure wraps the StorageException within the // NoSuchElementException. try { return itemIterator.hasNext(); } catch (NoSuchElementException ex) { logger.debug("Failed to run azure iterator.hasNext().", ex); throw new StorageProviderException( (Exception) ex.getCause()); // ex.getCause() should be StorageException } } public StorageObjectSummary next() { BlobItem blobItem = itemIterator.next(); // In the new Azure SDK, BlobItem is the standard type for blob listings // No need to check for CloudBlob vs CloudDirectory as in the old SDK return StorageObjectSummary.createFromAzureBlobItem(blobItem, location); } public void remove() { throw new UnsupportedOperationException("remove() method not supported"); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/CloudStorageProxyFactory.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import com.azure.core.http.ProxyOptions; import com.google.api.client.http.apache.v2.ApacheHttpTransport; import com.google.auth.http.HttpTransportFactory; import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Properties; import java.util.Set; import java.util.stream.Collectors; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.HttpProtocol; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.core.SdkProxyRoutePlanner; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.log.SFLoggerUtil; import org.apache.http.HttpHost; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.HttpClientBuilder; import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; public class CloudStorageProxyFactory { private static final SFLogger logger = SFLoggerFactory.getLogger(CloudStorageProxyFactory.class); // ── Extraction (shared logic) ────────────────────────────────────────────── /** * Extracts proxy settings from a session's HttpClientSettingsKey. * * @return ProxySettings or null if no proxy is configured */ static ProxySettings extractFromKey(HttpClientSettingsKey key) { if (key != null && key.usesProxy()) { return new ProxySettings( key.getProxyHost(), key.getProxyPort(), key.getProxyHttpProtocol(), key.getProxyUser(), key.getProxyPassword(), key.getNonProxyHosts()); } return null; } /** * Extracts proxy settings from sessionless proxy properties. * * @return ProxySettings or null if no proxy is configured * @throws SnowflakeSQLException on invalid port number */ static ProxySettings extractFromProperties(Properties proxyProperties) throws SnowflakeSQLException { if (proxyProperties != null && proxyProperties.size() > 0 && proxyProperties.getProperty(SFSessionProperty.USE_PROXY.getPropertyKey()) != null) { Boolean useProxy = Boolean.valueOf( proxyProperties.getProperty(SFSessionProperty.USE_PROXY.getPropertyKey())); if (useProxy) { String proxyHost = proxyProperties.getProperty(SFSessionProperty.PROXY_HOST.getPropertyKey()); int proxyPort; try { proxyPort = Integer.parseInt( proxyProperties.getProperty(SFSessionProperty.PROXY_PORT.getPropertyKey())); } catch (NumberFormatException | NullPointerException e) { throw new SnowflakeSQLException( ErrorCode.INVALID_PROXY_PROPERTIES, "Could not parse port number"); } String proxyUser = proxyProperties.getProperty(SFSessionProperty.PROXY_USER.getPropertyKey()); String proxyPassword = proxyProperties.getProperty(SFSessionProperty.PROXY_PASSWORD.getPropertyKey()); String nonProxyHosts = proxyProperties.getProperty(SFSessionProperty.NON_PROXY_HOSTS.getPropertyKey()); String proxyProtocol = proxyProperties.getProperty(SFSessionProperty.PROXY_PROTOCOL.getPropertyKey()); HttpProtocol protocol = (!isNullOrEmpty(proxyProtocol) && proxyProtocol.equalsIgnoreCase("https")) ? HttpProtocol.HTTPS : HttpProtocol.HTTP; return new ProxySettings( proxyHost, proxyPort, protocol, proxyUser, proxyPassword, nonProxyHosts); } } return null; } // ── CSP conversion methods ───────────────────────────────────────────────── static ProxyConfiguration toS3ProxyConfiguration(ProxySettings s) { ProxyConfiguration.Builder proxyBuilder = ProxyConfiguration.builder() .scheme(s.getProtocol().getScheme()) .host(s.getHost()) .port(s.getPort()) .useEnvironmentVariableValues(false) .useSystemPropertyValues(false); if (s.hasNonProxyHosts()) { proxyBuilder.nonProxyHosts(prepareNonProxyHostsForS3(s.getNonProxyHosts())); } if (s.hasCredentials()) { proxyBuilder.username(s.getUser()); proxyBuilder.password(s.getPassword()); } return proxyBuilder.build(); } static ProxyOptions toAzureProxyOptions(ProxySettings s) { ProxyOptions proxyOptions = new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(s.getHost(), s.getPort())); if (s.hasCredentials()) { proxyOptions.setCredentials(s.getUser(), s.getPassword()); } proxyOptions.setNonProxyHosts(s.getNonProxyHosts()); return proxyOptions; } static HttpTransportFactory toGCSHttpTransportFactory(ProxySettings s) { HttpClientBuilder clientBuilder = HttpClientBuilder.create(); clientBuilder.setProxy(new HttpHost(s.getHost(), s.getPort(), s.getProtocol().getScheme())); SdkProxyRoutePlanner routePlanner = new SdkProxyRoutePlanner(s.getHost(), s.getPort(), s.getProtocol(), s.getNonProxyHosts()); clientBuilder.setRoutePlanner(routePlanner); if (s.hasCredentials()) { BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); credentialsProvider.setCredentials( new AuthScope(s.getHost(), s.getPort()), new UsernamePasswordCredentials(s.getUser(), s.getPassword())); clientBuilder.setDefaultCredentialsProvider(credentialsProvider); } final ApacheHttpTransport transport = new ApacheHttpTransport(clientBuilder.build()); return () -> transport; } // ── S3 convenience methods ───────────────────────────────────────────────── public static ProxyConfiguration createProxyConfigurationForS3(HttpClientSettingsKey key) { ProxySettings s = extractFromKey(key); if (s == null) { logger.debug("Omitting S3 proxy setup"); return null; } logProxySettings("S3", s); return toS3ProxyConfiguration(s); } public static ProxyConfiguration createSessionlessProxyConfigurationForS3( Properties proxyProperties) throws SnowflakeSQLException { ProxySettings s = extractFromProperties(proxyProperties); if (s == null) { logger.debug("Omitting sessionless S3 proxy setup"); return null; } logProxySettings("sessionless S3", s); return toS3ProxyConfiguration(s); } // ── Azure convenience methods ────────────────────────────────────────────── public static ProxyOptions createProxyOptionsForAzure(HttpClientSettingsKey key) { ProxySettings s = extractFromKey(key); if (s == null) { logger.debug("Omitting Azure proxy setup"); return null; } logProxySettings("Azure", s); return toAzureProxyOptions(s); } public static ProxyOptions createSessionlessProxyOptionsForAzure(Properties proxyProperties) throws SnowflakeSQLException { ProxySettings s = extractFromProperties(proxyProperties); if (s == null) { logger.debug("Omitting sessionless Azure proxy setup"); return null; } logProxySettings("sessionless Azure", s); return toAzureProxyOptions(s); } // ── GCS convenience methods ──────────────────────────────────────────────── public static HttpTransportFactory createHttpTransportForGCS(HttpClientSettingsKey key) { ProxySettings s = extractFromKey(key); if (s == null) { logger.debug("Omitting GCS proxy setup"); return null; } logProxySettings("GCS", s); return toGCSHttpTransportFactory(s); } public static HttpTransportFactory createSessionlessHttpTransportForGCS( Properties proxyProperties) throws SnowflakeSQLException { ProxySettings s = extractFromProperties(proxyProperties); if (s == null) { logger.debug("Omitting sessionless GCS proxy setup"); return null; } logProxySettings("sessionless GCS", s); return toGCSHttpTransportFactory(s); } // ── Helpers ──────────────────────────────────────────────────────────────── static Set prepareNonProxyHostsForS3(String nonProxyHosts) { return Arrays.stream(nonProxyHosts.split("\\|")) .map(String::trim) .map(host -> SnowflakeUtil.globToSafePattern(host).pattern()) .collect(Collectors.toSet()); } private static void logProxySettings(String label, ProxySettings s) { logger.debug( "Setting {}, proxy. Host: {}, port: {}, protocol: {}, non-proxy hosts: {}, user: {}, password is {}", label, s.getHost(), s.getPort(), s.getProtocol(), s.getNonProxyHosts(), s.getUser(), SFLoggerUtil.isVariableProvided(s.getPassword())); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/CommonObjectMetadata.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.util.Map; import java.util.TreeMap; import net.snowflake.client.internal.jdbc.SnowflakeUtil; /** * Implements platform-independent interface Azure BLOB and GCS object metadata * *

Only the metadata accessors and mutators used by the JDBC client currently are supported, * additional methods should be added as needed */ public class CommonObjectMetadata implements StorageObjectMetadata { private long contentLength; private final Map userDefinedMetadata; private String contentEncoding; CommonObjectMetadata() { userDefinedMetadata = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); } /* * Constructs a common metadata object * from the set of parameters that the JDBC client is using */ CommonObjectMetadata( long contentLength, String contentEncoding, Map userDefinedMetadata) { this.contentEncoding = contentEncoding; this.contentLength = contentLength; this.userDefinedMetadata = SnowflakeUtil.createCaseInsensitiveMap(userDefinedMetadata); } /** * @return returns a Map/key-value pairs of metadata properties */ @Override public Map getUserMetadata() { return userDefinedMetadata; } /** * @return returns the size of object in bytes */ @Override public long getContentLength() { return contentLength; } /** Sets size of the associated object in bytes */ @Override public void setContentLength(long contentLength) { this.contentLength = contentLength; } /** Adds the key value pair of custom user-metadata for the associated object. */ @Override public void addUserMetadata(String key, String value) { userDefinedMetadata.put(key, value); } /** * Sets the optional Content-Encoding HTTP header specifying what content encodings, have been * applied to the object and what decoding mechanisms must be applied, in order to obtain the * media-type referenced by the Content-Type field. */ @Override public void setContentEncoding(String encoding) { contentEncoding = encoding; } /* * @return returns the content encoding type */ @Override public String getContentEncoding() { return contentEncoding; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/EncryptionProvider.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static java.nio.file.StandardOpenOption.CREATE; import static java.nio.file.StandardOpenOption.READ; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.channels.FileChannel; import java.nio.file.Files; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.security.NoSuchProviderException; import java.security.SecureRandom; import java.util.Base64; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.SecretKey; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import net.snowflake.client.internal.jdbc.MatDesc; import net.snowflake.common.core.RemoteStoreFileEncryptionMaterial; /** Handles encryption and decryption using AES CBC (for files) and ECB (for keys). */ public class EncryptionProvider { private static final String AES = "AES"; private static final String FILE_CIPHER = "AES/CBC/PKCS5Padding"; private static final String KEY_CIPHER = "AES/ECB/PKCS5Padding"; private static final int BUFFER_SIZE = 2 * 1024 * 1024; // 2 MB private static ThreadLocal secRnd = new ThreadLocal<>().withInitial(SecureRandom::new); /** * Decrypt a InputStream * * @param inputStream input stream * @param keyBase64 keyBase64 * @param ivBase64 ivBase64 * @param encMat RemoteStoreFileEncryptionMaterial * @return InputStream * @throws NoSuchPaddingException when padding mechanism is not available for this environment * @throws NoSuchAlgorithmException when the requested algorithm is not available for this * environment * @throws InvalidKeyException when there is an issue with the key value * @throws BadPaddingException when the data is not padded as expected * @throws IllegalBlockSizeException when the length of data is incorrect * @throws InvalidAlgorithmParameterException when the provided KeyStore has no trustAnchors */ public static InputStream decryptStream( InputStream inputStream, String keyBase64, String ivBase64, RemoteStoreFileEncryptionMaterial encMat) throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidKeyException, BadPaddingException, IllegalBlockSizeException, InvalidAlgorithmParameterException { byte[] kekBytes = Base64.getDecoder().decode(encMat.getQueryStageMasterKey()); byte[] keyBytes = Base64.getDecoder().decode(keyBase64); byte[] ivBytes = Base64.getDecoder().decode(ivBase64); SecretKey kek = new SecretKeySpec(kekBytes, 0, kekBytes.length, AES); Cipher keyCipher = Cipher.getInstance(KEY_CIPHER); keyCipher.init(Cipher.DECRYPT_MODE, kek); byte[] fileKeyBytes = keyCipher.doFinal(keyBytes); SecretKey fileKey = new SecretKeySpec(fileKeyBytes, AES); Cipher dataCipher = Cipher.getInstance(FILE_CIPHER); IvParameterSpec ivy = new IvParameterSpec(ivBytes); dataCipher.init(Cipher.DECRYPT_MODE, fileKey, ivy); return new CipherInputStream(inputStream, dataCipher); } /* * decrypt * Decrypts a file given the key and iv. Uses AES decryption. */ public static void decrypt( File file, String keyBase64, String ivBase64, RemoteStoreFileEncryptionMaterial encMat) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeyException, IllegalBlockSizeException, BadPaddingException, InvalidAlgorithmParameterException, IOException { byte[] keyBytes = Base64.getDecoder().decode(keyBase64); byte[] ivBytes = Base64.getDecoder().decode(ivBase64); byte[] kekBytes = Base64.getDecoder().decode(encMat.getQueryStageMasterKey()); final SecretKey fileKey; // Decrypt file key { final Cipher keyCipher = Cipher.getInstance(KEY_CIPHER); SecretKey kek = new SecretKeySpec(kekBytes, 0, kekBytes.length, AES); keyCipher.init(Cipher.DECRYPT_MODE, kek); byte[] fileKeyBytes = keyCipher.doFinal(keyBytes); // previous version: fileKey = new SecretKeySpec(fileKeyBytes, offset = 0, len = qsmk.length, // AES); // This incorrectly assumes fileKey is always same length as qsmk. If we perform put from // jdbc, fileKey and qsmk are same length, // but in the case of AwsStorageClient.putObjectInternal() in GS code, they are not. This // leads to some decryption bugs. // See: SnowflakeDriverLatestIt.testS3PutInGs fileKey = new SecretKeySpec(fileKeyBytes, AES); } // Decrypt file { final Cipher fileCipher = Cipher.getInstance(FILE_CIPHER); final IvParameterSpec iv = new IvParameterSpec(ivBytes); final byte[] buffer = new byte[BUFFER_SIZE]; fileCipher.init(Cipher.DECRYPT_MODE, fileKey, iv); long totalBytesRead = 0; // Overwrite file contents buffer-wise with decrypted data try (InputStream is = Files.newInputStream(file.toPath(), READ); InputStream cis = new CipherInputStream(is, fileCipher); OutputStream os = Files.newOutputStream(file.toPath(), CREATE); ) { int bytesRead; while ((bytesRead = cis.read(buffer)) > -1) { os.write(buffer, 0, bytesRead); totalBytesRead += bytesRead; } } // Discard any padding that the encrypted file had try (FileChannel fc = new FileOutputStream(file, true).getChannel()) { fc.truncate(totalBytesRead); } } } /** * encrypt Encrypts a file using AES encryption. The key and iv are generated. The matdesc field * is added to the metadata object. The key and iv are added to the JSON block in the * encryptionData metadata object. */ public static CipherInputStream encrypt( StorageObjectMetadata meta, long originalContentLength, InputStream src, RemoteStoreFileEncryptionMaterial encMat, SnowflakeStorageClient client) throws InvalidKeyException, InvalidAlgorithmParameterException, NoSuchAlgorithmException, NoSuchProviderException, NoSuchPaddingException, FileNotFoundException, IllegalBlockSizeException, BadPaddingException { final byte[] decodedKey = Base64.getDecoder().decode(encMat.getQueryStageMasterKey()); final int keySize = decodedKey.length; final byte[] fileKeyBytes = new byte[keySize]; final byte[] ivData; final CipherInputStream cis; final int blockSize; { final Cipher fileCipher = Cipher.getInstance(FILE_CIPHER); blockSize = fileCipher.getBlockSize(); // Create IV ivData = new byte[blockSize]; secRnd.get().nextBytes(ivData); final IvParameterSpec iv = new IvParameterSpec(ivData); // Create file key secRnd.get().nextBytes(fileKeyBytes); SecretKey fileKey = new SecretKeySpec(fileKeyBytes, 0, keySize, AES); // Init cipher fileCipher.init(Cipher.ENCRYPT_MODE, fileKey, iv); // Create encrypting input stream cis = new CipherInputStream(src, fileCipher); } // Encrypt the file key with the QSMK { final Cipher keyCipher = Cipher.getInstance(KEY_CIPHER); SecretKey queryStageMasterKey = new SecretKeySpec(decodedKey, 0, keySize, AES); // Init cipher keyCipher.init(Cipher.ENCRYPT_MODE, queryStageMasterKey); byte[] encryptedKey = keyCipher.doFinal(fileKeyBytes); // Store metadata MatDesc matDesc = new MatDesc(encMat.getSmkId(), encMat.getQueryId(), keySize * 8); // Round up length to next multiple of the block size // Sizes that are multiples of the block size need to be padded to next // multiple long contentLength = ((originalContentLength + blockSize) / blockSize) * blockSize; // THIS MUTATES METADATA TO ADD ENCRYPTION DATA client.addEncryptionMetadata(meta, matDesc, ivData, encryptedKey, contentLength); } return cis; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/GCSAccessStrategy.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.io.File; import java.io.InputStream; import java.util.Map; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.util.SFPair; interface GCSAccessStrategy { StorageObjectSummaryCollection listObjects(String remoteStorageLocation, String prefix); StorageObjectMetadata getObjectMetadata(String remoteStorageLocation, String prefix); Map download( int parallelism, String remoteStorageLocation, String stageFilePath, File localFile) throws InterruptedException; SFPair> downloadToStream( String remoteStorageLocation, String stageFilePath, boolean isEncrypting); void uploadWithDownScopedToken( int parallelism, String remoteStorageLocation, String destFileName, String contentEncoding, Map metadata, long contentLength, InputStream content, String queryId) throws InterruptedException; boolean handleStorageException( Exception ex, int retryCount, String operation, SFSession session, String command, String queryId, SnowflakeGCSClient gcsClient) throws SnowflakeSQLException; void shutdown(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/GCSAccessStrategyAwsSdk.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.createDefaultExecutorService; import static net.snowflake.client.internal.jdbc.cloud.storage.S3ErrorHandler.retryRequestWithExponentialBackoff; import static net.snowflake.client.internal.jdbc.cloud.storage.S3ErrorHandler.throwIfClientExceptionOrMaxRetryReached; import java.io.BufferedInputStream; import java.io.File; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ThreadPoolExecutor; import java.util.stream.Collectors; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.internal.core.HeaderCustomizerHttpRequestInterceptor; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SFPair; import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.ListObjectsRequest; import software.amazon.awssdk.services.s3.model.ListObjectsResponse; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.transfer.s3.S3TransferManager; import software.amazon.awssdk.transfer.s3.model.DownloadFileRequest; import software.amazon.awssdk.transfer.s3.model.FileDownload; import software.amazon.awssdk.transfer.s3.model.Upload; import software.amazon.awssdk.transfer.s3.model.UploadRequest; class GCSAccessStrategyAwsSdk implements GCSAccessStrategy { private static final SFLogger logger = SFLoggerFactory.getLogger(GCSAccessStrategyAwsSdk.class); private final S3AsyncClient amazonClient; GCSAccessStrategyAwsSdk(StageInfo stage, SFBaseSession session) throws SnowflakeSQLException { String accessToken = (String) stage.getCredentials().get("GCS_ACCESS_TOKEN"); Optional oEndpoint = stage.gcsCustomEndpoint(); String endpoint = "https://storage.googleapis.com"; if (oEndpoint.isPresent()) { endpoint = oEndpoint.get(); } if (stage.getStorageAccount() != null && endpoint.startsWith(stage.getStorageAccount())) { endpoint = endpoint.replaceFirst(stage.getStorageAccount() + ".", ""); } S3AsyncClientBuilder clientBuilder; try { clientBuilder = S3AsyncClient.builder() .region(Region.US_WEST_2) // dummy region, just to satisfy the builder .forcePathStyle(false) .endpointOverride(new URI(endpoint)); } catch (URISyntaxException e) { throw new SnowflakeSQLException( ErrorCode.FILE_TRANSFER_ERROR, "Could not parse Google storage endpoint: " + endpoint); } ClientOverrideConfiguration.Builder overrideConfiguration = ClientOverrideConfiguration.builder(); // Add signer interceptor for bearer token auth and header mapping overrideConfiguration.putAdvancedOption( SdkAdvancedClientOption.SIGNER, new AwsSdkGCPSigner(accessToken)); ProxyConfiguration proxyConfiguration; if (session != null) { proxyConfiguration = CloudStorageProxyFactory.createProxyConfigurationForS3(session.getHttpClientKey()); } else { proxyConfiguration = CloudStorageProxyFactory.createSessionlessProxyConfigurationForS3( stage.getProxyProperties()); } if (session instanceof SFSession) { List headersCustomizers = ((SFSession) session).getHttpHeadersCustomizers(); if (headersCustomizers != null && !headersCustomizers.isEmpty()) { overrideConfiguration.addExecutionInterceptor( new HeaderCustomizerHttpRequestInterceptor(headersCustomizers)); } } clientBuilder.overrideConfiguration(overrideConfiguration.build()); // Use anonymous credentials to minimize AWS signing clientBuilder.credentialsProvider(AnonymousCredentialsProvider.create()); clientBuilder.httpClientBuilder( NettyNioAsyncHttpClient.builder().proxyConfiguration(proxyConfiguration)); amazonClient = clientBuilder.build(); } @Override public StorageObjectSummaryCollection listObjects(String remoteStorageLocation, String prefix) throws StorageProviderException { ListObjectsResponse objListing = amazonClient .listObjects( ListObjectsRequest.builder().bucket(remoteStorageLocation).prefix(prefix).build()) .join(); return new StorageObjectSummaryCollection(objListing.contents(), remoteStorageLocation); } @Override public StorageObjectMetadata getObjectMetadata(String remoteStorageLocation, String prefix) { HeadObjectResponse response = amazonClient .headObject( HeadObjectRequest.builder().bucket(remoteStorageLocation).key(prefix).build()) .join(); S3ObjectMetadata metadata = new S3ObjectMetadata(response); Map userMetadata = response.metadata().entrySet().stream() .filter(entry -> entry.getKey().startsWith("x-goog-meta-")) .collect( Collectors.toMap( e -> e.getKey().replaceFirst("x-goog-meta-", ""), Map.Entry::getValue)); metadata.setUserMetadata(userMetadata); return metadata; } @Override public Map download( int parallelism, String remoteStorageLocation, String stageFilePath, File localFile) throws InterruptedException { logger.debug( "Starting download of file from S3 stage path: {} to {}", stageFilePath, localFile.getAbsolutePath()); logger.debug("Creating executor service for transfer manager with {} threads", parallelism); try (S3TransferManager tx = S3TransferManager.builder() .s3Client(amazonClient) .executor(createDefaultExecutorService("s3-transfer-manager-downloader-", parallelism)) .build()) { // download files from s3 FileDownload fileDownload = tx.downloadFile( DownloadFileRequest.builder() .getObjectRequest( GetObjectRequest.builder() .bucket(remoteStorageLocation) .key(stageFilePath) .build()) .destination(localFile.toPath()) .build()); // Pull object metadata from S3 StorageObjectMetadata meta = this.getObjectMetadata(remoteStorageLocation, stageFilePath); Map metaMap = SnowflakeUtil.createCaseInsensitiveMap(meta.getUserMetadata()); fileDownload.completionFuture().join(); return metaMap; } } @Override public SFPair> downloadToStream( String remoteStorageLocation, String stageFilePath, boolean isEncrypting) { CompletableFuture> streamFuture = amazonClient.getObject( GetObjectRequest.builder().bucket(remoteStorageLocation).key(stageFilePath).build(), AsyncResponseTransformer.toBlockingInputStream()); CompletableFuture metaFuture = amazonClient.headObject( HeadObjectRequest.builder().bucket(remoteStorageLocation).key(stageFilePath).build()); HeadObjectResponse meta = metaFuture.join(); InputStream stream = streamFuture.join(); Map metaMap = SnowflakeUtil.createCaseInsensitiveMap(meta.metadata()); return SFPair.of(stream, metaMap); } @Override public void uploadWithDownScopedToken( int parallelism, String remoteStorageLocation, String destFileName, String contentEncoding, Map metadata, long contentLength, InputStream content, String queryId) { S3ObjectMetadata s3ObjectMetadata = new S3ObjectMetadata(); s3ObjectMetadata.setContentEncoding(contentEncoding); s3ObjectMetadata.setContentLength(contentLength); s3ObjectMetadata.setUserMetadata(metadata); PutObjectRequest request = (s3ObjectMetadata) .getS3PutObjectRequest().toBuilder() .bucket(remoteStorageLocation) .key(destFileName) .build(); logger.debug("Creating executor service for transfer manager with {} threads", parallelism); ThreadPoolExecutor executorService = createDefaultExecutorService("s3-transfer-manager-uploader-", parallelism); try (S3TransferManager tx = S3TransferManager.builder().s3Client(amazonClient).executor(executorService).build()) { // upload files to s3 final Upload upload = tx.upload( UploadRequest.builder() .putObjectRequest(request) .requestBody( AsyncRequestBody.fromInputStream( // wrapping with BufferedInputStream to mitigate // https://github.com/aws/aws-sdk-java-v2/issues/6174 new BufferedInputStream(content), request.contentLength(), executorService)) .build()); upload.completionFuture().join(); logger.info("Uploaded data from input stream to S3 location: {}.", destFileName); } } @Override public boolean handleStorageException( Exception ex, int retryCount, String operation, SFSession session, String command, String queryId, SnowflakeGCSClient gcsClient) throws SnowflakeSQLException { Throwable cause = ex.getCause(); if (cause instanceof SdkException) { logger.debug("GCSAccessStrategyAwsSdk: " + cause.getMessage()); if (retryCount > gcsClient.getMaxRetries() || S3ErrorHandler.isClientException400Or404(cause)) { throwIfClientExceptionOrMaxRetryReached( operation, session, command, queryId, gcsClient, cause); } else { retryRequestWithExponentialBackoff( ex, retryCount, operation, session, command, gcsClient, queryId, cause); } return true; } else { return false; } } @Override public void shutdown() { if (this.amazonClient != null) { this.amazonClient.close(); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/GCSDefaultAccessStrategy.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static net.snowflake.client.internal.core.Constants.CLOUD_STORAGE_CREDENTIALS_EXPIRED; import com.google.api.gax.paging.Page; import com.google.api.gax.rpc.FixedHeaderProvider; import com.google.auth.http.HttpTransportFactory; import com.google.auth.oauth2.AccessToken; import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.NoCredentials; import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Blob; import com.google.cloud.storage.BlobId; import com.google.cloud.storage.BlobInfo; import com.google.cloud.storage.HttpStorageOptions; import com.google.cloud.storage.Storage; import com.google.cloud.storage.StorageException; import com.google.cloud.storage.StorageOptions; import java.io.File; import java.io.InputStream; import java.nio.channels.Channels; import java.util.Map; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.SnowflakeFileTransferAgent; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SFPair; import net.snowflake.common.core.SqlState; class GCSDefaultAccessStrategy implements GCSAccessStrategy { private static final SFLogger logger = SFLoggerFactory.getLogger(GCSDefaultAccessStrategy.class); private Storage gcsClient = null; GCSDefaultAccessStrategy(StageInfo stage, SFSession session) throws SnowflakeSQLException { HttpTransportFactory transportFactory; if (session != null) { transportFactory = CloudStorageProxyFactory.createHttpTransportForGCS(session.getHttpClientKey()); } else { transportFactory = CloudStorageProxyFactory.createSessionlessHttpTransportForGCS(stage.getProxyProperties()); } String accessToken = (String) stage.getCredentials().get("GCS_ACCESS_TOKEN"); if (accessToken != null) { // We are authenticated with an oauth access token. StorageOptions.Builder builder = StorageOptions.newBuilder(); overrideHost(stage, builder); if (SnowflakeGCSClient.areDisabledGcsDefaultCredentials(session)) { logger.debug( "Adding explicit credentials to avoid default credential lookup by the GCS client"); builder.setCredentials(GoogleCredentials.create(new AccessToken(accessToken, null))); } if (transportFactory != null) { builder.setTransportOptions( HttpTransportOptions.newBuilder().setHttpTransportFactory(transportFactory).build()); } // Using GoogleCredential with access token will cause IllegalStateException when the token // is expired and trying to refresh, which cause error cannot be caught. Instead, set a // header so we can caught the error code. this.gcsClient = builder .setHeaderProvider( FixedHeaderProvider.create("Authorization", "Bearer " + accessToken)) .build() .getService(); } else { // Use anonymous authentication. HttpStorageOptions.Builder builder = HttpStorageOptions.newBuilder().setCredentials(NoCredentials.getInstance()); overrideHost(stage, builder); if (transportFactory != null) { builder.setTransportOptions( HttpTransportOptions.newBuilder().setHttpTransportFactory(transportFactory).build()); } this.gcsClient = builder.build().getService(); } } private static void overrideHost(StageInfo stage, StorageOptions.Builder builder) { stage .gcsCustomEndpoint() .ifPresent( host -> { if (host.startsWith("https://")) { builder.setHost(host); } else { builder.setHost("https://" + host); } }); } @Override public StorageObjectSummaryCollection listObjects(String remoteStorageLocation, String prefix) { try { logger.debug( "Listing objects in the bucket {} with prefix {}", remoteStorageLocation, prefix); Page blobs = this.gcsClient.list(remoteStorageLocation, Storage.BlobListOption.prefix(prefix)); return new StorageObjectSummaryCollection(blobs); } catch (Exception e) { logger.debug("Failed to list objects", false); throw new StorageProviderException(e); } } @Override public StorageObjectMetadata getObjectMetadata(String remoteStorageLocation, String prefix) { try { BlobId blobId = BlobId.of(remoteStorageLocation, prefix); Blob blob = gcsClient.get(blobId); // GCS returns null if the blob was not found // By design, our storage platform expects to see a "blob not found" situation // as a RemoteStorageProviderException // Hence, we throw a RemoteStorageProviderException if (blob == null) { throw new StorageProviderException( new StorageException( 404, // because blob not found "Blob" + blobId.getName() + " not found in bucket " + blobId.getBucket())); } return new CommonObjectMetadata( blob.getSize(), blob.getContentEncoding(), blob.getMetadata()); } catch (StorageException ex) { throw new StorageProviderException(ex); } } @Override public Map download( int parallelism, String remoteStorageLocation, String stageFilePath, File localFile) { BlobId blobId = BlobId.of(remoteStorageLocation, stageFilePath); Blob blob = gcsClient.get(blobId); if (blob == null) { throw new StorageProviderException( new StorageException( 404, // because blob not found "Blob" + blobId.getName() + " not found in bucket " + blobId.getBucket())); } logger.debug("Starting download without presigned URL", false); blob.downloadTo(localFile.toPath(), Blob.BlobSourceOption.shouldReturnRawInputStream(true)); // Get the user-defined BLOB metadata return SnowflakeUtil.createCaseInsensitiveMap(blob.getMetadata()); } @Override public SFPair> downloadToStream( String remoteStorageLocation, String stageFilePath, boolean isEncrypting) { BlobId blobId = BlobId.of(remoteStorageLocation, stageFilePath); Blob blob = gcsClient.get(blobId); if (blob == null) { throw new StorageProviderException( new StorageException( 404, // because blob not found "Blob" + blobId.getName() + " not found in bucket " + blobId.getBucket())); } InputStream inputStream = Channels.newInputStream(blob.reader()); Map userDefinedMetadata = null; if (isEncrypting) { // Get the user-defined BLOB metadata userDefinedMetadata = SnowflakeUtil.createCaseInsensitiveMap(blob.getMetadata()); } return SFPair.of(inputStream, userDefinedMetadata); } @Override public void uploadWithDownScopedToken( int parallelism, String remoteStorageLocation, String destFileName, String contentEncoding, Map metadata, long contentLength, InputStream content, String queryId) { BlobId blobId = BlobId.of(remoteStorageLocation, destFileName); BlobInfo blobInfo = BlobInfo.newBuilder(blobId) .setContentEncoding(contentEncoding) .setMetadata(metadata) .build(); gcsClient.create(blobInfo, content); } @Override public boolean handleStorageException( Exception ex, int retryCount, String operation, SFSession session, String command, String queryId, SnowflakeGCSClient gcsClient) throws SnowflakeSQLException { if (ex instanceof StorageException) { // NOTE: this code path only handle Access token based operation, // presigned URL is not covered. Presigned Url do not raise // StorageException StorageException se = (StorageException) ex; // If we have exceeded the max number of retries, propagate the error if (retryCount > gcsClient.getMaxRetries()) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, StorageHelper.getOperationException(operation).getMessageCode(), se, operation, se.getCode(), se.getMessage(), se.getReason()); } else { logger.debug( "Encountered exception ({}) during {}, retry count: {}", ex.getMessage(), operation, retryCount); logger.debug("Stack trace: ", ex); // exponential backoff up to a limit int backoffInMillis = gcsClient.getRetryBackoffMin(); if (retryCount > 1) { backoffInMillis <<= (Math.min(retryCount - 1, gcsClient.getRetryBackoffMaxExponent())); } try { logger.debug("Sleep for {} milliseconds before retry", backoffInMillis); Thread.sleep(backoffInMillis); } catch (InterruptedException ex1) { // ignore } if (se.getCode() == 401 && command != null) { if (session != null) { // A 401 indicates that the access token has expired, // we need to refresh the GCS client with the new token SnowflakeFileTransferAgent.renewExpiredToken(session, command, gcsClient); } else { throw new SnowflakeSQLException( queryId, se.getMessage(), CLOUD_STORAGE_CREDENTIALS_EXPIRED, "GCS credentials have expired"); } } } return true; } else { return false; } } @Override public void shutdown() { // nothing to do here } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/GcmEncryptionProvider.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static java.nio.file.StandardOpenOption.CREATE; import static java.nio.file.StandardOpenOption.READ; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.channels.FileChannel; import java.nio.file.Files; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; import java.util.Base64; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.SecretKey; import javax.crypto.spec.GCMParameterSpec; import javax.crypto.spec.SecretKeySpec; import net.snowflake.client.internal.jdbc.MatDesc; import net.snowflake.common.core.RemoteStoreFileEncryptionMaterial; class GcmEncryptionProvider { private static final int TAG_LENGTH_IN_BITS = 128; private static final int IV_LENGTH_IN_BYTES = 12; private static final String AES = "AES"; private static final String FILE_CIPHER = "AES/GCM/NoPadding"; private static final String KEY_CIPHER = "AES/GCM/NoPadding"; private static final int BUFFER_SIZE = 8 * 1024 * 1024; // 2 MB private static final ThreadLocal random = new ThreadLocal<>().withInitial(SecureRandom::new); private static final Base64.Decoder base64Decoder = Base64.getDecoder(); static InputStream encrypt( StorageObjectMetadata meta, long originalContentLength, InputStream src, RemoteStoreFileEncryptionMaterial encMat, SnowflakeStorageClient client, byte[] dataAad, byte[] keyAad) throws InvalidKeyException, InvalidAlgorithmParameterException, IllegalBlockSizeException, BadPaddingException, NoSuchPaddingException, NoSuchAlgorithmException { byte[] kek = base64Decoder.decode(encMat.getQueryStageMasterKey()); int keySize = kek.length; byte[] keyBytes = new byte[keySize]; byte[] dataIvBytes = new byte[IV_LENGTH_IN_BYTES]; byte[] keyIvBytes = new byte[IV_LENGTH_IN_BYTES]; initRandomIvsAndFileKey(dataIvBytes, keyIvBytes, keyBytes); byte[] encryptedKey = encryptKey(kek, keyBytes, keyIvBytes, keyAad); CipherInputStream cis = encryptContent(src, keyBytes, dataIvBytes, dataAad); addEncryptionMetadataToStorageClient( meta, originalContentLength, encMat, client, keySize, encryptedKey, dataIvBytes, keyIvBytes, keyAad, dataAad); return cis; } private static void initRandomIvsAndFileKey( byte[] dataIvData, byte[] fileKeyIvData, byte[] fileKeyBytes) { random.get().nextBytes(dataIvData); random.get().nextBytes(fileKeyIvData); random.get().nextBytes(fileKeyBytes); } private static byte[] encryptKey(byte[] kekBytes, byte[] keyBytes, byte[] keyIvData, byte[] aad) throws InvalidKeyException, InvalidAlgorithmParameterException, IllegalBlockSizeException, BadPaddingException, NoSuchPaddingException, NoSuchAlgorithmException { SecretKey kek = new SecretKeySpec(kekBytes, 0, kekBytes.length, AES); GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(TAG_LENGTH_IN_BITS, keyIvData); Cipher keyCipher = Cipher.getInstance(KEY_CIPHER); keyCipher.init(Cipher.ENCRYPT_MODE, kek, gcmParameterSpec); if (aad != null) { keyCipher.updateAAD(aad); } return keyCipher.doFinal(keyBytes); } private static CipherInputStream encryptContent( InputStream src, byte[] keyBytes, byte[] dataIvBytes, byte[] aad) throws InvalidKeyException, InvalidAlgorithmParameterException, NoSuchPaddingException, NoSuchAlgorithmException { SecretKey fileKey = new SecretKeySpec(keyBytes, 0, keyBytes.length, AES); GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(TAG_LENGTH_IN_BITS, dataIvBytes); Cipher fileCipher = Cipher.getInstance(FILE_CIPHER); fileCipher.init(Cipher.ENCRYPT_MODE, fileKey, gcmParameterSpec); if (aad != null) { fileCipher.updateAAD(aad); } return new CipherInputStream(src, fileCipher); } private static void addEncryptionMetadataToStorageClient( StorageObjectMetadata meta, long contentLength, RemoteStoreFileEncryptionMaterial encMat, SnowflakeStorageClient client, int keySize, byte[] encryptedKey, byte[] dataIvData, byte[] keyIvData, byte[] keyAad, byte[] dataAad) { MatDesc matDesc = new MatDesc(encMat.getSmkId(), encMat.getQueryId(), keySize * 8); client.addEncryptionMetadataForGcm( meta, matDesc, encryptedKey, dataIvData, keyIvData, keyAad, dataAad, contentLength); } static void decryptFile( File file, String encryptedFileKeyBase64, String dataIvBase64, String keyIvBase64, RemoteStoreFileEncryptionMaterial encMat, String dataAadBase64, String keyAadBase64) throws InvalidKeyException, IllegalBlockSizeException, BadPaddingException, InvalidAlgorithmParameterException, IOException, NoSuchPaddingException, NoSuchAlgorithmException { byte[] encryptedKeyBytes = base64Decoder.decode(encryptedFileKeyBase64); byte[] dataIvBytes = base64Decoder.decode(dataIvBase64); byte[] keyIvBytes = base64Decoder.decode(keyIvBase64); byte[] kekBytes = base64Decoder.decode(encMat.getQueryStageMasterKey()); byte[] keyAad = base64Decoder.decode(keyAadBase64); byte[] dataAad = base64Decoder.decode(dataAadBase64); byte[] keyBytes = decryptKey(kekBytes, keyIvBytes, encryptedKeyBytes, keyAad); decryptContentFromFile(file, keyBytes, dataIvBytes, dataAad); } static InputStream decryptStream( InputStream inputStream, String encryptedKeyBase64, String dataIvBase64, String keyIvBase64, RemoteStoreFileEncryptionMaterial encMat, String dataAad, String keyAad) throws InvalidKeyException, BadPaddingException, IllegalBlockSizeException, InvalidAlgorithmParameterException, NoSuchPaddingException, NoSuchAlgorithmException { byte[] encryptedKeyBytes = base64Decoder.decode(encryptedKeyBase64); byte[] ivBytes = base64Decoder.decode(dataIvBase64); byte[] kekIvBytes = base64Decoder.decode(keyIvBase64); byte[] dataAadBytes = base64Decoder.decode(dataAad); byte[] keyAadBytes = base64Decoder.decode(keyAad); byte[] kekBytes = base64Decoder.decode(encMat.getQueryStageMasterKey()); byte[] fileKeyBytes = decryptKey(kekBytes, kekIvBytes, encryptedKeyBytes, keyAadBytes); return decryptContentFromStream(inputStream, ivBytes, fileKeyBytes, dataAadBytes); } private static CipherInputStream decryptContentFromStream( InputStream inputStream, byte[] ivBytes, byte[] fileKeyBytes, byte[] aad) throws InvalidKeyException, InvalidAlgorithmParameterException, NoSuchPaddingException, NoSuchAlgorithmException { GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(TAG_LENGTH_IN_BITS, ivBytes); SecretKey fileKey = new SecretKeySpec(fileKeyBytes, AES); Cipher fileCipher = Cipher.getInstance(FILE_CIPHER); fileCipher.init(Cipher.DECRYPT_MODE, fileKey, gcmParameterSpec); if (aad != null) { fileCipher.updateAAD(aad); } return new CipherInputStream(inputStream, fileCipher); } private static void decryptContentFromFile( File file, byte[] fileKeyBytes, byte[] cekIvBytes, byte[] aad) throws InvalidKeyException, InvalidAlgorithmParameterException, IOException, NoSuchPaddingException, NoSuchAlgorithmException { SecretKey fileKey = new SecretKeySpec(fileKeyBytes, AES); GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(TAG_LENGTH_IN_BITS, cekIvBytes); byte[] buffer = new byte[BUFFER_SIZE]; Cipher fileCipher = Cipher.getInstance(FILE_CIPHER); fileCipher.init(Cipher.DECRYPT_MODE, fileKey, gcmParameterSpec); if (aad != null) { fileCipher.updateAAD(aad); } long totalBytesRead = 0; try (InputStream is = Files.newInputStream(file.toPath(), READ); InputStream cis = new CipherInputStream(is, fileCipher); OutputStream os = Files.newOutputStream(file.toPath(), CREATE)) { int bytesRead; while ((bytesRead = cis.read(buffer)) > -1) { os.write(buffer, 0, bytesRead); totalBytesRead += bytesRead; } } try (FileOutputStream fos = new FileOutputStream(file, true); FileChannel fc = fos.getChannel()) { fc.truncate(totalBytesRead); } } private static byte[] decryptKey(byte[] kekBytes, byte[] ivBytes, byte[] keyBytes, byte[] aad) throws InvalidKeyException, InvalidAlgorithmParameterException, IllegalBlockSizeException, BadPaddingException, NoSuchPaddingException, NoSuchAlgorithmException { SecretKey kek = new SecretKeySpec(kekBytes, 0, kekBytes.length, AES); GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(TAG_LENGTH_IN_BITS, ivBytes); Cipher keyCipher = Cipher.getInstance(KEY_CIPHER); keyCipher.init(Cipher.DECRYPT_MODE, kek, gcmParameterSpec); if (aad != null) { keyCipher.updateAAD(aad); } return keyCipher.doFinal(keyBytes); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/GcsObjectSummariesIterator.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import com.google.api.gax.paging.Page; import com.google.cloud.storage.Blob; import java.util.Iterator; /** * Iterator class for ObjectSummary objects on GCS objects. Returns platform-independent instances * (StorageObjectSummary) */ public class GcsObjectSummariesIterator implements Iterator { private final Iterator blobIterator; public GcsObjectSummariesIterator(Page blobs) { this.blobIterator = blobs.iterateAll().iterator(); } @Override public boolean hasNext() { return this.blobIterator.hasNext(); } @Override public StorageObjectSummary next() { Blob blob = this.blobIterator.next(); return StorageObjectSummary.createFromGcsBlob(blob); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/ProxySettings.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import net.snowflake.client.internal.core.HttpProtocol; /** Immutable POJO holding extracted proxy parameters, shared across all CSP proxy builders. */ class ProxySettings { private final String host; private final int port; private final HttpProtocol protocol; private final String user; private final String password; private final String nonProxyHosts; ProxySettings( String host, int port, HttpProtocol protocol, String user, String password, String nonProxyHosts) { this.host = host; this.port = port; this.protocol = protocol; this.user = user; this.password = password; this.nonProxyHosts = nonProxyHosts; } String getHost() { return host; } int getPort() { return port; } HttpProtocol getProtocol() { return protocol; } String getUser() { return user; } String getPassword() { return password; } String getNonProxyHosts() { return nonProxyHosts; } boolean hasCredentials() { return !isNullOrEmpty(user) && !isNullOrEmpty(password); } boolean hasNonProxyHosts() { return !isNullOrEmpty(nonProxyHosts); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/QueryIdHelper.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import net.snowflake.common.core.RemoteStoreFileEncryptionMaterial; class QueryIdHelper { static String queryIdFromEncMatOr(RemoteStoreFileEncryptionMaterial encMat, String queryId) { return encMat != null && encMat.getQueryId() != null ? encMat.getQueryId() : queryId; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/S3ErrorHandler.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static net.snowflake.client.internal.core.Constants.CLOUD_STORAGE_CREDENTIALS_EXPIRED; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.SnowflakeFileTransferAgent; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.SqlState; import org.apache.http.HttpStatus; import software.amazon.awssdk.core.exception.SdkServiceException; import software.amazon.awssdk.services.s3.model.S3Exception; public class S3ErrorHandler { private static final SFLogger logger = SFLoggerFactory.getLogger(S3ErrorHandler.class); /** Checks the status code of the exception to see if it's a 400 or 404. */ static boolean isClientException400Or404(Throwable ex) { if (ex instanceof SdkServiceException) { SdkServiceException sdkEx = (SdkServiceException) ex; return sdkEx.statusCode() == HttpStatus.SC_NOT_FOUND || sdkEx.statusCode() == HttpStatus.SC_BAD_REQUEST; } return false; } static void retryRequestWithExponentialBackoff( Exception ex, int retryCount, String operation, SFSession session, String command, SnowflakeStorageClient s3Client, String queryId, Throwable cause) throws SnowflakeSQLException { logger.debug( "Encountered exception ({}) during {}, retry count: {}", ex.getMessage(), operation, retryCount); logger.debug("Stack trace: ", ex); // exponential backoff up to a limit int backoffInMillis = s3Client.getRetryBackoffMin(); if (retryCount > 1) { backoffInMillis <<= (Math.min(retryCount - 1, s3Client.getRetryBackoffMaxExponent())); } try { logger.debug("Sleep for {} milliseconds before retry", backoffInMillis); Thread.sleep(backoffInMillis); } catch (InterruptedException ex1) { // ignore } // If the exception indicates that the AWS token has expired, // we need to refresh our S3 client with the new token if (cause instanceof S3Exception) { S3Exception e = (S3Exception) cause; if (e.awsErrorDetails() != null && SnowflakeS3Client.EXPIRED_AWS_TOKEN_ERROR_CODE.equalsIgnoreCase( e.awsErrorDetails().errorCode())) { // If session is null we cannot renew the token so throw the ExpiredToken exception if (session != null) { SnowflakeFileTransferAgent.renewExpiredToken(session, command, s3Client); } else { throw new SnowflakeSQLException( queryId, e.awsErrorDetails().errorCode(), CLOUD_STORAGE_CREDENTIALS_EXPIRED, "S3 credentials have expired"); } } } } static void throwIfClientExceptionOrMaxRetryReached( String operation, SFSession session, String command, String queryId, SnowflakeStorageClient s3Client, Throwable cause) throws SnowflakeSQLException { String extendedRequestId = "none"; if (cause instanceof S3Exception) { S3Exception e = (S3Exception) cause; extendedRequestId = e.extendedRequestId(); } if (cause instanceof SdkServiceException) { SdkServiceException ex1 = (SdkServiceException) cause; // The AWS credentials might have expired when server returns error 400 and // does not return the ExpiredToken error code. // If session is null we cannot renew the token so throw the exception if (ex1.statusCode() == HttpStatus.SC_BAD_REQUEST && session != null) { SnowflakeFileTransferAgent.renewExpiredToken(session, command, s3Client); } else { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, StorageHelper.getOperationException(operation).getMessageCode(), ex1, operation, ex1.getMessage(), ex1.requestId(), extendedRequestId); } } else { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, StorageHelper.getOperationException(operation).getMessageCode(), cause, operation, cause.getMessage()); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/S3ObjectMetadata.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.util.HashMap; import java.util.Map; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.PutObjectRequest; /** * s3 implementation of platform independent StorageObjectMetadata interface * *

It only supports a limited set of metadata properties currently used by the JDBC client */ public class S3ObjectMetadata implements StorageObjectMetadata { private Map userMetadata = new HashMap<>(); private Long contentLength; private String contentEncoding; S3ObjectMetadata() {} public S3ObjectMetadata(HeadObjectResponse meta) { userMetadata = meta.metadata(); contentLength = meta.contentLength(); contentEncoding = meta.contentEncoding(); } public S3ObjectMetadata(PutObjectRequest meta) { userMetadata = meta.metadata(); contentLength = meta.contentLength(); contentEncoding = meta.contentEncoding(); } @Override public Map getUserMetadata() { return SnowflakeUtil.createCaseInsensitiveMap(userMetadata); } public Map setUserMetadata(Map metadata) { return this.userMetadata = metadata; } @Override public long getContentLength() { return this.contentLength; } @Override public void setContentLength(long contentLength) { this.contentLength = contentLength; } @Override public void addUserMetadata(String key, String value) { userMetadata.put(key, value); } @Override public void setContentEncoding(String encoding) { this.contentEncoding = encoding; } @Override public String getContentEncoding() { return contentEncoding; } /** * @return Returns the encapsulated AWS S3 metadata request */ PutObjectRequest getS3PutObjectRequest() { return PutObjectRequest.builder() .metadata(userMetadata) .contentLength(contentLength) .contentEncoding(contentEncoding) .checksumAlgorithm(ChecksumAlgorithm.CRC32) .build(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/S3ObjectSummariesIterator.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.util.Iterator; import java.util.List; import software.amazon.awssdk.services.s3.model.S3Object; /** * Iterator class for ObjectSummary objects on S3 Wraps an iterator of S3 object summaries and * returns platform independent instances (StorageObjectSummary) */ public class S3ObjectSummariesIterator implements Iterator { // Encapsulated S3 iterator private Iterator s3ObjSummariesIterator; private String bucket; /* * Constructs a summaries iterator object from S3Object summary list * derived from the AWS client * @param s3ObjectSummaries a list of S3ObjectSummaries to construct from */ public S3ObjectSummariesIterator(List s3ObjectSummaries, String bucket) { s3ObjSummariesIterator = s3ObjectSummaries.iterator(); this.bucket = bucket; } public boolean hasNext() { return s3ObjSummariesIterator.hasNext(); } public StorageObjectSummary next() { // Get the next S3 summary object and return it as a platform-agnostic object // (StorageObjectSummary) S3Object s3Obj = s3ObjSummariesIterator.next(); return StorageObjectSummary.createFromS3ObjectSummary(s3Obj, bucket); } public void remove() { throw new UnsupportedOperationException("remove() method not supported"); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/SnowflakeAzureClient.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static net.snowflake.client.internal.core.Constants.CLOUD_STORAGE_CREDENTIALS_EXPIRED; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static net.snowflake.client.internal.jdbc.cloud.storage.CloudStorageProxyFactory.createProxyOptionsForAzure; import static net.snowflake.client.internal.jdbc.cloud.storage.CloudStorageProxyFactory.createSessionlessProxyOptionsForAzure; import com.azure.core.http.ProxyOptions; import com.azure.core.http.rest.Response; import com.azure.core.util.HttpClientOptions; import com.azure.storage.blob.BlobClient; import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.blob.BlobServiceClient; import com.azure.storage.blob.BlobServiceClientBuilder; import com.azure.storage.blob.models.BlobItem; import com.azure.storage.blob.models.BlobProperties; import com.azure.storage.blob.models.BlobStorageException; import com.azure.storage.blob.models.DownloadRetryOptions; import com.azure.storage.blob.models.ListBlobsOptions; import com.azure.storage.blob.models.ParallelTransferOptions; import com.azure.storage.blob.options.BlobDownloadToFileOptions; import com.azure.storage.blob.options.BlobParallelUploadOptions; import com.azure.storage.blob.specialized.BlockBlobClient; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; import java.nio.file.OpenOption; import java.nio.file.StandardOpenOption; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.util.AbstractMap.SimpleEntry; import java.util.ArrayList; import java.util.Base64; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import javax.crypto.BadPaddingException; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.FileBackedOutputStream; import net.snowflake.client.internal.jdbc.MatDesc; import net.snowflake.client.internal.jdbc.SnowflakeFileTransferAgent; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SFPair; import net.snowflake.client.internal.util.Stopwatch; import net.snowflake.common.core.RemoteStoreFileEncryptionMaterial; import net.snowflake.common.core.SqlState; import org.apache.commons.io.IOUtils; /** Encapsulates the Azure Storage client and all Azure Storage operations and logic */ public class SnowflakeAzureClient implements SnowflakeStorageClient { private static final String localFileSep = systemGetProperty("file.separator"); private static final String AZ_ENCRYPTIONDATAPROP = "encryptiondata"; private static final String AZ_STREAMING_INGEST_CLIENT_NAME = "ingestclientname"; private static final String AZ_STREAMING_INGEST_CLIENT_KEY = "ingestclientkey"; private int encryptionKeySize = 0; // used for PUTs private StageInfo stageInfo; private RemoteStoreFileEncryptionMaterial encMat; private BlobServiceClient azStorageClient; private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeAzureClient.class); private SFBaseSession session; private SnowflakeAzureClient() {} ; /* * Factory method for a SnowflakeAzureClient object * @param stage The stage information that the client will operate on * @param encMat The encryption material * required to decrypt/encrypt content in stage */ public static SnowflakeAzureClient createSnowflakeAzureClient( StageInfo stage, RemoteStoreFileEncryptionMaterial encMat, SFBaseSession sfSession) throws SnowflakeSQLException { logger.debug( "Initializing Snowflake Azure client with encryption: {}", encMat != null ? "true" : "false"); SnowflakeAzureClient azureClient = new SnowflakeAzureClient(); azureClient.setupAzureClient(stage, encMat, sfSession); return azureClient; } /* * Initializes the Azure client * This method is used during the object construction, but also to * reset/recreate the encapsulated CloudBlobClient object with new * credentials (after SAS token expiration) * @param stage The stage information that the client will operate on * @param encMat The encryption material * required to decrypt/encrypt content in stage * @throws IllegalArgumentException when invalid credentials are used */ private void setupAzureClient( StageInfo stage, RemoteStoreFileEncryptionMaterial encMat, SFBaseSession sfSession) throws IllegalArgumentException, SnowflakeSQLException { // Save the client creation parameters so that we can reuse them, // to reset the Azure client. this.stageInfo = stage; this.encMat = encMat; this.session = sfSession; logger.debug("Setting up the Azure client ", false); try { BlobServiceClientBuilder builder = new BlobServiceClientBuilder(); builder.endpoint( buildAzureStorageEndpointURI(stage.getEndPoint(), stage.getStorageAccount()).toString()); String sasToken = (String) stage.getCredentials().get("AZURE_SAS_TOKEN"); if (sasToken != null) { builder.sasToken(sasToken); } if (stage.getIsClientSideEncrypted() && encMat != null) { byte[] decodedKey = Base64.getDecoder().decode(encMat.getQueryStageMasterKey()); encryptionKeySize = decodedKey.length * 8; if (encryptionKeySize != 128 && encryptionKeySize != 192 && encryptionKeySize != 256) { throw new SnowflakeSQLLoggedException( QueryIdHelper.queryIdFromEncMatOr(encMat, null), session, ErrorCode.FILE_TRANSFER_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "unsupported key size", encryptionKeySize); } } ProxyOptions proxyOptions; if (session != null) { proxyOptions = createProxyOptionsForAzure(session.getHttpClientKey()); } else { proxyOptions = createSessionlessProxyOptionsForAzure(stage.getProxyProperties()); } HttpClientOptions clientOptions = new HttpClientOptions(); clientOptions.setProxyOptions(proxyOptions); builder.clientOptions(clientOptions); this.azStorageClient = builder.buildClient(); } catch (URISyntaxException ex) { throw new IllegalArgumentException("invalid_azure_credentials"); } } // Returns the Max number of retry attempts @Override public int getMaxRetries() { if (session != null && session .getConnectionPropertiesMap() .containsKey(SFSessionProperty.PUT_GET_MAX_RETRIES)) { return (int) session.getConnectionPropertiesMap().get(SFSessionProperty.PUT_GET_MAX_RETRIES); } return 25; } // Returns the max exponent for multiplying backoff with the power of 2, the value // of 4 will give us 16secs as the max number of time to sleep before retry @Override public int getRetryBackoffMaxExponent() { return 4; } // Returns the min number of milliseconds to sleep before retry @Override public int getRetryBackoffMin() { return 1000; } /** * @return Returns true if encryption is enabled */ @Override public boolean isEncrypting() { return encryptionKeySize > 0 && this.stageInfo.getIsClientSideEncrypted(); } /** * @return Returns the size of the encryption key */ @Override public int getEncryptionKeySize() { return encryptionKeySize; } /** * Re-creates the encapsulated storage client with a fresh access token * * @param stageCredentials a Map (as returned by GS) which contains the new credential properties * @throws SnowflakeSQLException failure to renew the client */ @Override public void renew(Map stageCredentials) throws SnowflakeSQLException { logger.debug("Renewing the Azure client"); stageInfo.setCredentials(stageCredentials); setupAzureClient(stageInfo, encMat, session); } /** shuts down the client */ @Override public void shutdown() { /* Not available */ } /** * For a set of remote storage objects under a remote location and a given prefix/path returns * their properties wrapped in ObjectSummary objects * * @param remoteStorageLocation location, i.e. container for Azure * @param prefix the prefix/path to list under * @return a collection of storage summary objects * @throws StorageProviderException Azure storage exception */ @Override public StorageObjectSummaryCollection listObjects(String remoteStorageLocation, String prefix) throws StorageProviderException { try { BlobContainerClient container = azStorageClient.getBlobContainerClient(remoteStorageLocation); List blobItems = container.listBlobs(new ListBlobsOptions().setPrefix(prefix), null).stream() .collect(Collectors.toList()); return new StorageObjectSummaryCollection(blobItems, remoteStorageLocation); } catch (BlobStorageException ex) { logger.debug("Failed to list objects: {}", ex); throw new StorageProviderException(ex); } } /** * Returns the metadata properties for a remote storage object * * @param remoteStorageLocation location, i.e. bucket for S3 * @param prefix the prefix/path of the object to retrieve * @return storage metadata object * @throws StorageProviderException azure storage exception */ @Override public StorageObjectMetadata getObjectMetadata(String remoteStorageLocation, String prefix) throws StorageProviderException { CommonObjectMetadata azureObjectMetadata; try { BlobContainerClient blobContainerClient = azStorageClient.getBlobContainerClient(remoteStorageLocation); BlobClient blobClient = blobContainerClient.getBlobClient(prefix); BlockBlobClient blockBlobClient = blobClient.getBlockBlobClient(); BlobProperties properties = blockBlobClient.getProperties(); Map userDefinedMetadata = SnowflakeUtil.createCaseInsensitiveMap(properties.getMetadata()); long contentLength = properties.getBlobSize(); String contentEncoding = properties.getContentEncoding(); azureObjectMetadata = new CommonObjectMetadata(contentLength, contentEncoding, userDefinedMetadata); } catch (BlobStorageException ex) { logger.debug( "Failed to retrieve BLOB metadata: {} - {}", ex.getErrorCode(), formatStorageExtendedErrorInformation(ex)); throw new StorageProviderException(ex); } return azureObjectMetadata; } /** * Download a file from remote storage. * * @param session session object * @param command command to download file * @param localLocation local file path * @param destFileName destination file name * @param parallelism number of threads for parallel downloading * @param remoteStorageLocation remote storage location, i.e. bucket for S3 * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl Unused in Azure * @param queryId last query id * @throws SnowflakeSQLException download failure */ @Override public void download( SFSession session, String command, String localLocation, String destFileName, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); String localFilePath = localLocation + localFileSep + destFileName; logger.debug( "Starting download of file from Azure stage path: {} to {}", stageFilePath, localFilePath); localFilePath = trimLeadingSlashIfOnWindows(localFilePath); int retryCount = 0; do { try { File localFile = new File(localFilePath); BlobContainerClient blobContainerClient = azStorageClient.getBlobContainerClient(remoteStorageLocation); BlobClient blobClient = blobContainerClient.getBlobClient(stageFilePath); BlockBlobClient blockBlobClient = blobClient.getBlockBlobClient(); Set options = new HashSet<>(); options.add(StandardOpenOption.CREATE); options.add(StandardOpenOption.WRITE); options.add(StandardOpenOption.TRUNCATE_EXISTING); BlobDownloadToFileOptions downloadOptions = new BlobDownloadToFileOptions(localFilePath) .setDownloadRetryOptions(new DownloadRetryOptions().setMaxRetryRequests(0)) .setOpenOptions(options) .setParallelTransferOptions( new com.azure.storage.common.ParallelTransferOptions() .setMaxConcurrency(parallelism)); Response response = blockBlobClient.downloadToFileWithResponse(downloadOptions, null, null); SnowflakeUtil.assureOnlyUserAccessibleFilePermissions( localFile, session.isOwnerOnlyStageFilePermissionsEnabled()); stopwatch.stop(); long downloadMillis = stopwatch.elapsedMillis(); Map userDefinedMetadata = SnowflakeUtil.createCaseInsensitiveMap(response.getValue().getMetadata()); if (this.isEncrypting() && this.getEncryptionKeySize() <= 256) { decryptFile( session, remoteStorageLocation, queryId, userDefinedMetadata, stopwatch, localFile, downloadMillis, retryCount); } else { logger.info( "Azure file {} downloaded to {}. It took {} ms with {} retries", remoteStorageLocation, localFile.getAbsolutePath(), downloadMillis, retryCount); } return; } catch (Exception ex) { logger.debug("Download unsuccessful {}", ex); handleAzureException( ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, this, queryId); } } while (retryCount <= getMaxRetries()); throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: download unsuccessful without exception!"); } /** * If path on Windows looks like "/C:/..." we need to manually trim the leading slash. Otherwise, * it fails with InvalidPathException */ private static String trimLeadingSlashIfOnWindows(String localFilePath) { if (SnowflakeUtil.isWindows() && localFilePath.startsWith("/")) { logger.debug("Trimming leading slash for Windows path: {}", localFilePath); return localFilePath.substring(1); } return localFilePath; } private void decryptFile( SFSession session, String remoteStorageLocation, String queryId, Map userDefinedMetadata, Stopwatch stopwatch, File localFile, long downloadMillis, int retryCount) throws SnowflakeSQLException, NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeyException, IllegalBlockSizeException, BadPaddingException, InvalidAlgorithmParameterException, IOException { EncryptionData encryptionData = validateAndGetEncryptionData(session, queryId, userDefinedMetadata, stopwatch); // Decrypt file try { EncryptionProvider.decrypt(localFile, encryptionData.key, encryptionData.iv, this.encMat); stopwatch.stop(); long decryptMillis = stopwatch.elapsedMillis(); logger.info( "Azure file {} downloaded to {}. It took {} ms (download: {} ms, decryption: {} ms) with {} retries", remoteStorageLocation, localFile.getAbsolutePath(), downloadMillis + decryptMillis, downloadMillis, decryptMillis, retryCount); } catch (Exception ex) { logger.error("Error decrypting file", ex); throw ex; } } /** * Download a file from remote storage * * @param session session object * @param command command to download file * @param parallelism number of threads for parallel downloading * @param remoteStorageLocation remote storage location, i.e. bucket for s3 * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl Unused in Azure * @param queryId last query id * @return input file stream * @throws SnowflakeSQLException when download failure */ @Override public InputStream downloadToStream( SFSession session, String command, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { logger.debug( "Starting download of file from Azure stage path: {} to input stream", stageFilePath); Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); int retryCount = 0; do { try { BlobContainerClient blobContainerClient = azStorageClient.getBlobContainerClient(remoteStorageLocation); BlobClient blobClient = blobContainerClient.getBlobClient(stageFilePath); BlockBlobClient blockBlobClient = blobClient.getBlockBlobClient(); InputStream stream = blockBlobClient.openInputStream(); stopwatch.stop(); long downloadMillis = stopwatch.elapsedMillis(); Map userDefinedMetadata = SnowflakeUtil.createCaseInsensitiveMap(blockBlobClient.getProperties().getMetadata()); if (this.isEncrypting() && this.getEncryptionKeySize() <= 256) { return decryptStream( session, stageFilePath, queryId, userDefinedMetadata, stopwatch, stream, downloadMillis, retryCount); } else { logger.info( "Azure file {} downloaded to input stream. Download took {} ms with {} retries", stageFilePath, downloadMillis, retryCount); return stream; } } catch (Exception ex) { logger.debug("Downloading unsuccessful {}", ex); handleAzureException( ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, this, queryId); } } while (retryCount < getMaxRetries()); throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: download unsuccessful without exception!"); } private InputStream decryptStream( SFSession session, String stageFilePath, String queryId, Map userDefinedMetadata, Stopwatch stopwatch, InputStream stream, long downloadMillis, int retryCount) throws SnowflakeSQLException, NoSuchPaddingException, NoSuchAlgorithmException, InvalidKeyException, BadPaddingException, IllegalBlockSizeException, InvalidAlgorithmParameterException { EncryptionData encryptionData = validateAndGetEncryptionData(session, queryId, userDefinedMetadata, stopwatch); try { InputStream is = EncryptionProvider.decryptStream(stream, encryptionData.key, encryptionData.iv, encMat); stopwatch.stop(); long decryptMillis = stopwatch.elapsedMillis(); logger.info( "Azure file {} downloaded to input stream. It took {} ms " + "(download: {} ms, decryption: {} ms) with {} retries", stageFilePath, downloadMillis + decryptMillis, downloadMillis, decryptMillis, retryCount); return is; } catch (Exception ex) { logger.error("Error in decrypting file", ex); throw ex; } } private EncryptionData validateAndGetEncryptionData( SFSession session, String queryId, Map userDefinedMetadata, Stopwatch stopwatch) throws SnowflakeSQLException { if (!userDefinedMetadata.containsKey(AZ_ENCRYPTIONDATAPROP)) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "Encryption data not found in the metadata of a file being downloaded"); } SimpleEntry encryptionData = parseEncryptionData(userDefinedMetadata.get(AZ_ENCRYPTIONDATAPROP), queryId); String key = encryptionData.getKey(); String iv = encryptionData.getValue(); stopwatch.restart(); if (key == null || iv == null) { throw new SnowflakeSQLLoggedException( queryId, session, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "File metadata incomplete"); } return new EncryptionData(key, iv); } /** * Upload a file/stream to remote storage * * @param session session object * @param command upload command * @param parallelism number of threads for parallel uploading * @param uploadFromStream true if upload source is stream * @param remoteStorageLocation storage container name * @param srcFile source file if not uploading from a stream * @param destFileName file name on remote storage after upload * @param inputStream stream used for uploading if fileBackedOutputStream is null * @param fileBackedOutputStream stream used for uploading if not null * @param meta object meta data * @param stageRegion region name where the stage persists * @param presignedUrl Unused in Azure * @param queryId last query id * @throws SnowflakeSQLException if upload failed even after retry */ @Override public void upload( SFSession session, String command, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { logger.info( StorageHelper.getStartUploadLog( "Azure", uploadFromStream, inputStream, fileBackedOutputStream, srcFile, destFileName)); final List toClose = new ArrayList<>(); long originalContentLength = meta.getContentLength(); SFPair uploadStreamInfo = createUploadStream( srcFile, uploadFromStream, inputStream, meta, originalContentLength, fileBackedOutputStream, toClose, queryId); if (!(meta instanceof CommonObjectMetadata)) { throw new IllegalArgumentException("Unexpected metadata object type"); } int retryCount = 0; Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); do { try { logger.debug("Try uploading retryCount: {}, parallelism: {}", retryCount, parallelism); InputStream fileInputStream = uploadStreamInfo.left; BlobContainerClient blobContainerClient = azStorageClient.getBlobContainerClient(remoteStorageLocation); BlobClient blobClient = blobContainerClient.getBlobClient(destFileName); // Set the user-defined/Snowflake metadata and upload the BLOB logger.info("Uploading file: {}, with metadata:", destFileName); BlobParallelUploadOptions parallelUploadOptions = new BlobParallelUploadOptions(fileInputStream) .setParallelTransferOptions( new ParallelTransferOptions().setMaxConcurrency(parallelism)) .setMetadata(meta.getUserMetadata()); blobClient.uploadWithResponse(parallelUploadOptions, null, null); stopwatch.stop(); if (uploadFromStream) { logger.info( "Uploaded data from input stream to Azure location: {}. It took {} ms with {} retries", remoteStorageLocation, stopwatch.elapsedMillis(), retryCount); } else { logger.info( "Uploaded file {} to Azure location: {}. It took {} ms with {} retries", srcFile.getAbsolutePath(), remoteStorageLocation, stopwatch.elapsedMillis(), retryCount); } // close any open streams in the "toClose" list and return for (FileInputStream is : toClose) { IOUtils.closeQuietly(is); } return; } catch (Exception ex) { logger.debug("Error while uploading file", ex); handleAzureException( ex, ++retryCount, StorageHelper.UPLOAD, session, command, this, queryId); if (uploadFromStream && fileBackedOutputStream == null) { throw new SnowflakeSQLException( queryId, ex, SqlState.SYSTEM_ERROR, ErrorCode.UPLOAD_ERROR.getMessageCode(), "Encountered exception during upload: " + ex.getMessage() + "\nCannot retry upload from stream."); } uploadStreamInfo = createUploadStream( srcFile, uploadFromStream, inputStream, meta, originalContentLength, fileBackedOutputStream, toClose, queryId); } } while (retryCount <= getMaxRetries()); for (FileInputStream is : toClose) { IOUtils.closeQuietly(is); } throw new SnowflakeSQLException( queryId, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Unexpected: upload unsuccessful without exception!"); } /** * Handles exceptions thrown by Azure Storage * * @param ex the exception to handle * @param retryCount current number of retries, incremented by the caller before each call * @param operation string that indicates the function/operation that was taking place, when the * exception was raised, for example "upload" * @param session the current SFSession object used by the client * @param command the command attempted at the time of the exception * @param queryId last query id * @throws SnowflakeSQLException exceptions not handled */ @Override public void handleStorageException( Exception ex, int retryCount, String operation, SFSession session, String command, String queryId) throws SnowflakeSQLException { handleAzureException(ex, retryCount, operation, session, command, this, queryId); } private SFPair createUploadStream( File srcFile, boolean uploadFromStream, InputStream inputStream, StorageObjectMetadata meta, long originalContentLength, FileBackedOutputStream fileBackedOutputStream, List toClose, String queryId) throws SnowflakeSQLException { logger.debug( "createUploadStream({}, {}, {}, {}, {}, {})", this, srcFile, uploadFromStream, inputStream, fileBackedOutputStream, toClose); final InputStream stream; FileInputStream srcFileStream = null; try { if (isEncrypting() && getEncryptionKeySize() <= 256) { try { final InputStream uploadStream = uploadFromStream ? (fileBackedOutputStream != null ? fileBackedOutputStream.asByteSource().openStream() : inputStream) : (srcFileStream = new FileInputStream(srcFile)); toClose.add(srcFileStream); // Encrypt stream = EncryptionProvider.encrypt( meta, originalContentLength, uploadStream, this.encMat, this); uploadFromStream = true; } catch (Exception ex) { logger.error("Failed to encrypt input", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Failed to encrypt input", ex.getMessage()); } } else { if (uploadFromStream) { if (fileBackedOutputStream != null) { stream = fileBackedOutputStream.asByteSource().openStream(); } else { stream = inputStream; } } else { srcFileStream = new FileInputStream(srcFile); toClose.add(srcFileStream); stream = srcFileStream; } } } catch (FileNotFoundException ex) { logger.error("Failed to open input file", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Failed to open input file", ex.getMessage()); } catch (IOException ex) { logger.error("Failed to open input stream", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex, "Failed to open input stream", ex.getMessage()); } return SFPair.of(stream, uploadFromStream); } /** * Handles exceptions thrown by Azure Storage It will retry transient errors as defined by the * Azure Client retry policy It will re-create the client if the SAS token has expired, and re-try * * @param ex the exception to handle * @param retryCount current number of retries, incremented by the caller before each call * @param operation string that indicates the function/operation that was taking place, when the * exception was raised, for example "upload" * @param session the current SFSession object used by the client * @param command the command attempted at the time of the exception * @param azClient the current Snowflake Azure client object * @throws SnowflakeSQLException exceptions not handled */ private static void handleAzureException( Exception ex, int retryCount, String operation, SFSession session, String command, SnowflakeAzureClient azClient, String queryId) throws SnowflakeSQLException { // no need to retry if it is invalid key exception if (ex.getCause() instanceof InvalidKeyException) { // Most likely cause is that the unlimited strength policy files are not installed // Log the error and throw a message that explains the cause SnowflakeFileTransferAgent.throwJCEMissingError(operation, ex, queryId); } // If there is no space left in the download location, java.io.IOException is thrown. // Don't retry. if (SnowflakeUtil.getRootCause(ex) instanceof IOException) { SnowflakeFileTransferAgent.throwNoSpaceLeftError(session, operation, ex, queryId); } BlobStorageException se = null; if (ex instanceof BlobStorageException) { se = (BlobStorageException) ex; } else if (SnowflakeUtil.getRootCause(ex) instanceof BlobStorageException) { se = (BlobStorageException) SnowflakeUtil.getRootCause(ex); } if (se != null) { if (se.getStatusCode() == 403) { // A 403 indicates that the SAS token has expired, // we need to refresh the Azure client with the new token if (session != null) { SnowflakeFileTransferAgent.renewExpiredToken(session, command, azClient); } else { // If session is null we cannot renew the token so throw the ExpiredToken exception throw new SnowflakeSQLException( queryId, se.getErrorCode().toString(), CLOUD_STORAGE_CREDENTIALS_EXPIRED, "Azure credentials may have expired"); } } // If we have exceeded the max number of retries, propagate the error // no need for back off and retry if the file does not exist if (retryCount > azClient.getMaxRetries() || se.getStatusCode() == 404) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, ErrorCode.AZURE_SERVICE_ERROR.getMessageCode(), se, operation, se.getErrorCode(), se.getStatusCode(), se.getMessage(), formatStorageExtendedErrorInformation(se)); } else { logger.debug( "Encountered exception ({}) during {}, retry count: {}", se.getMessage(), operation, retryCount); logger.debug("Stack trace: ", ex); // exponential backoff up to a limit int backoffInMillis = azClient.getRetryBackoffMin(); if (retryCount > 1) { backoffInMillis <<= (Math.min(retryCount - 1, azClient.getRetryBackoffMaxExponent())); } try { logger.debug("Sleep for {} milliseconds before retry", backoffInMillis); Thread.sleep(backoffInMillis); } catch (InterruptedException ex1) { // ignore } if (se.getStatusCode() == 403) { // A 403 indicates that the SAS token has expired, // we need to refresh the Azure client with the new token SnowflakeFileTransferAgent.renewExpiredToken(session, command, azClient); } } } else { if (ex instanceof InterruptedException || SnowflakeUtil.getRootCause(ex) instanceof SocketTimeoutException) { if (retryCount > azClient.getMaxRetries()) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, ErrorCode.FILE_TRANSFER_ERROR.getMessageCode(), ex, "Encountered exception during " + operation + ": " + ex.getMessage()); } else { logger.debug( "Encountered exception ({}) during {}, retry count: {}", ex.getMessage(), operation, retryCount); } } else { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, ErrorCode.FILE_TRANSFER_ERROR.getMessageCode(), ex, "Encountered exception during " + operation + ": " + ex.getMessage()); } } } /** * Format the StorageExtendedErrorInformation to a String. * * @param info the StorageExtendedErrorInformation object * @return */ static String formatStorageExtendedErrorInformation(BlobStorageException info) { return "StorageExceptionExtendedErrorInformation: {ErrorCode=" + info.getErrorCode() + ", ErrorMessage=" + info.getServiceMessage() + "}"; } /** * Builds a URI to an Azure Storage account endpoint * * @param storageEndPoint the storage endpoint name * @param storageAccount the storage account name */ private static URI buildAzureStorageEndpointURI(String storageEndPoint, String storageAccount) throws URISyntaxException { URI storageEndpoint = new URI("https", storageAccount + "." + storageEndPoint + "/", null, null); return storageEndpoint; } /** * buildEncryptionMetadataJSON Takes the base64-encoded iv and key and creates the JSON block to * be used as the encryptiondata metadata field on the blob. */ private String buildEncryptionMetadataJSON(String iv64, String key64) { return String.format( "{\"EncryptionMode\":\"FullBlob\",\"WrappedContentKey\"" + ":{\"KeyId\":\"symmKey1\",\"EncryptedKey\":\"%s\"" + ",\"Algorithm\":\"AES_CBC_256\"},\"EncryptionAgent\":" + "{\"Protocol\":\"1.0\",\"EncryptionAlgorithm\":" + "\"AES_CBC_256\"},\"ContentEncryptionIV\":\"%s\"" + ",\"KeyWrappingMetadata\":{\"EncryptionLibrary\":" + "\"Java 5.3.0\"}}", key64, iv64); } /** * parseEncryptionData Takes the json string in the encryptiondata metadata field of the encrypted * blob and parses out the key and iv. Returns the pair as key = key, iv = value. */ private SimpleEntry parseEncryptionData(String jsonEncryptionData, String queryId) throws SnowflakeSQLException { ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); JsonFactory factory = mapper.getFactory(); try { JsonParser parser = factory.createParser(jsonEncryptionData); JsonNode encryptionDataNode = mapper.readTree(parser); String iv = encryptionDataNode.get("ContentEncryptionIV").asText(); String key = encryptionDataNode.get("WrappedContentKey").get("EncryptedKey").asText(); return new SimpleEntry(key, iv); } catch (Exception ex) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, ErrorCode.FILE_TRANSFER_ERROR.getMessageCode(), ex, "Error parsing encryption data as json" + ": " + ex.getMessage()); } } /** Returns the material descriptor key */ @Override public String getMatdescKey() { return "matdesc"; } /** Adds encryption metadata to the StorageObjectMetadata object */ @Override public void addEncryptionMetadata( StorageObjectMetadata meta, MatDesc matDesc, byte[] ivData, byte[] encryptedKey, long contentLength) { meta.addUserMetadata(getMatdescKey(), matDesc.toString()); meta.addUserMetadata( AZ_ENCRYPTIONDATAPROP, buildEncryptionMetadataJSON( Base64.getEncoder().encodeToString(ivData), Base64.getEncoder().encodeToString(encryptedKey))); meta.setContentLength(contentLength); } /** Adds digest metadata to the StorageObjectMetadata object */ @Override public void addDigestMetadata(StorageObjectMetadata meta, String digest) { if (!SnowflakeUtil.isBlank(digest)) { // Azure doesn't allow hyphens in the name of a metadata field. meta.addUserMetadata("sfcdigest", digest); } } /** Gets digest metadata to the StorageObjectMetadata object */ @Override public String getDigestMetadata(StorageObjectMetadata meta) { return meta.getUserMetadata().get("sfcdigest"); } /** * Adds streaming ingest metadata to the StorageObjectMetadata object, used for streaming ingest * per client billing calculation */ @Override public void addStreamingIngestMetadata( StorageObjectMetadata meta, String clientName, String clientKey) { meta.addUserMetadata(AZ_STREAMING_INGEST_CLIENT_NAME, clientName); meta.addUserMetadata(AZ_STREAMING_INGEST_CLIENT_KEY, clientKey); } /** Gets streaming ingest client name to the StorageObjectMetadata object */ @Override public String getStreamingIngestClientName(StorageObjectMetadata meta) { return meta.getUserMetadata().get(AZ_STREAMING_INGEST_CLIENT_NAME); } /** Gets streaming ingest client key to the StorageObjectMetadata object */ @Override public String getStreamingIngestClientKey(StorageObjectMetadata meta) { return meta.getUserMetadata().get(AZ_STREAMING_INGEST_CLIENT_KEY); } private static class EncryptionData { public final String key; public final String iv; public EncryptionData(String key, String iv) { this.key = key; this.iv = iv; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/SnowflakeGCSClient.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.convertSystemPropertyToBooleanValue; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.createCaseInsensitiveMap; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.getRootCause; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isBlank; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.SocketTimeoutException; import java.net.URISyntaxException; import java.security.InvalidKeyException; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Base64; import java.util.List; import java.util.Map; import java.util.Map.Entry; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.HttpResponseContextDto; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.FileBackedOutputStream; import net.snowflake.client.internal.jdbc.MatDesc; import net.snowflake.client.internal.jdbc.RestRequest; import net.snowflake.client.internal.jdbc.SnowflakeFileTransferAgent; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.jdbc.telemetry.ExecTimeTelemetryData; import net.snowflake.client.internal.log.ArgSupplier; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SFPair; import net.snowflake.client.internal.util.Stopwatch; import net.snowflake.common.core.RemoteStoreFileEncryptionMaterial; import net.snowflake.common.core.SqlState; import org.apache.commons.io.IOUtils; import org.apache.http.HttpResponse; import org.apache.http.client.HttpResponseException; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.utils.URIBuilder; import org.apache.http.entity.InputStreamEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.util.EntityUtils; /** Encapsulates the GCS Storage client and all GCS operations and logic */ public class SnowflakeGCSClient implements SnowflakeStorageClient { public static final String DISABLE_GCS_DEFAULT_CREDENTIALS_PROPERTY_NAME = "net.snowflake.jdbc.disableGcsDefaultCredentials"; private static final String GCS_ENCRYPTIONDATAPROP = "encryptiondata"; private static final String localFileSep = systemGetProperty("file.separator"); private static final String GCS_METADATA_PREFIX = "x-goog-meta-"; private static final String GCS_STREAMING_INGEST_CLIENT_NAME = "ingestclientname"; private static final String GCS_STREAMING_INGEST_CLIENT_KEY = "ingestclientkey"; private int encryptionKeySize = 0; // used for PUTs private StageInfo stageInfo; private RemoteStoreFileEncryptionMaterial encMat; private SFSession session = null; private GCSAccessStrategy gcsAccessStrategy = null; private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeGCSClient.class); private SnowflakeGCSClient() {} /* * Factory method for a SnowflakeGCSClient object * @param stage The stage information that the client will operate on * @param encMat The encryption material * required to decrypt/encrypt content in stage */ public static SnowflakeGCSClient createSnowflakeGCSClient( StageInfo stage, RemoteStoreFileEncryptionMaterial encMat, SFSession session) throws SnowflakeSQLException { logger.debug( "Initializing Snowflake GCS client with encryption: {}", encMat != null ? "true" : "false"); SnowflakeGCSClient sfGcsClient = new SnowflakeGCSClient(); sfGcsClient.setupGCSClient(stage, encMat, session); return sfGcsClient; } // Returns the Max number of retry attempts @Override public int getMaxRetries() { if (session != null && session .getConnectionPropertiesMap() .containsKey(SFSessionProperty.PUT_GET_MAX_RETRIES)) { return (int) session.getConnectionPropertiesMap().get(SFSessionProperty.PUT_GET_MAX_RETRIES); } return 25; } // Returns the max exponent for multiplying backoff with the power of 2, the value // of 4 will give us 16secs as the max number of time to sleep before retry @Override public int getRetryBackoffMaxExponent() { return 4; } // Returns the min number of milliseconds to sleep before retry @Override public int getRetryBackoffMin() { return 1000; } /** * @return Returns true if encryption is enabled */ @Override public boolean isEncrypting() { return encryptionKeySize > 0 && this.stageInfo.getIsClientSideEncrypted(); } /** * @return Returns the size of the encryption key */ @Override public int getEncryptionKeySize() { return encryptionKeySize; } /** * @return Whether this client requires the use of presigned URLs for upload and download instead * of credentials that work for all files uploaded/ downloaded to a stage path. True for GCS. */ @Override public boolean requirePresignedUrl() { Map credentialsMap = stageInfo.getCredentials(); return !(credentialsMap != null && credentialsMap.containsKey("GCS_ACCESS_TOKEN")); } @Override public void renew(Map stageCredentials) throws SnowflakeSQLException { logger.debug("Renewing the Snowflake GCS client"); stageInfo.setCredentials(stageCredentials); setupGCSClient(stageInfo, encMat, session); } @Override public void shutdown() { if (this.gcsAccessStrategy != null) { this.gcsAccessStrategy.shutdown(); } } /** * listObjects gets all the objects in a path * * @param remoteStorageLocation bucket name * @param prefix Path * @return a collection of storage summary objects * @throws StorageProviderException cloud storage provider error */ @Override public StorageObjectSummaryCollection listObjects(String remoteStorageLocation, String prefix) throws StorageProviderException { return this.gcsAccessStrategy.listObjects(remoteStorageLocation, prefix); } @Override public StorageObjectMetadata getObjectMetadata(String remoteStorageLocation, String prefix) throws StorageProviderException { return this.gcsAccessStrategy.getObjectMetadata(remoteStorageLocation, prefix); } /** * Download a file from remote storage. * * @param session session object * @param command command to download file * @param localLocation local file path * @param destFileName destination file name * @param parallelism [ not used by the GCP implementation ] * @param remoteStorageLocation remote storage location, i.e. bucket for S3 * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl Credential to use for download * @param queryId last query id * @throws SnowflakeSQLException download failure */ @Override public void download( SFSession session, String command, String localLocation, String destFileName, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { String localFilePath = localLocation + localFileSep + destFileName; logger.debug( "Starting download of file from GCS stage path: {} to {}", stageFilePath, localFilePath); int retryCount = 0; Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); File localFile = new File(localFilePath); do { try { String key = null; String iv = null; long downloadMillis = 0; if (!isNullOrEmpty(presignedUrl)) { logger.debug("Starting download with presigned URL", false); URIBuilder uriBuilder = new URIBuilder(presignedUrl); HttpGet httpRequest = new HttpGet(uriBuilder.build()); httpRequest.addHeader("accept-encoding", "GZIP"); logger.debug("Fetching result: {}", scrubPresignedUrl(presignedUrl)); CloseableHttpClient httpClient = HttpUtil.getHttpClientWithoutDecompression( session.getHttpClientKey(), session.getHttpHeadersCustomizers()); // Get the file on storage using the presigned url HttpResponseContextDto responseDto = RestRequest.executeWithRetries( httpClient, httpRequest, session.getNetworkTimeoutInMilli() / 1000, // retry timeout 0, session.getHttpClientSocketTimeout(), getMaxRetries(), 0, // no socket timeout injection null, // no canceling false, // no cookie false, // no retry false, // no request_guid true, // retry on HTTP 403 false, new ExecTimeTelemetryData(), session, session.getHttpClientKey(), session.getHttpHeadersCustomizers(), true); HttpResponse response = responseDto.getHttpResponse(); logger.debug( "Call returned for URL: {}", (ArgSupplier) () -> scrubPresignedUrl(this.stageInfo.getPresignedUrl())); if (isSuccessStatusCode(response.getStatusLine().getStatusCode())) { try { InputStream bodyStream = response.getEntity().getContent(); byte[] buffer = new byte[8 * 1024]; int bytesRead; OutputStream outStream = new FileOutputStream(localFile); while ((bytesRead = bodyStream.read(buffer)) != -1) { outStream.write(buffer, 0, bytesRead); } outStream.flush(); outStream.close(); bodyStream.close(); SnowflakeUtil.assureOnlyUserAccessibleFilePermissions( localFile, session.isOwnerOnlyStageFilePermissionsEnabled()); if (isEncrypting()) { Map userDefinedHeaders = createCaseInsensitiveMap(response.getAllHeaders()); AbstractMap.SimpleEntry encryptionData = parseEncryptionData( userDefinedHeaders.get(GCS_METADATA_PREFIX + GCS_ENCRYPTIONDATAPROP), queryId); key = encryptionData.getKey(); iv = encryptionData.getValue(); } stopwatch.stop(); downloadMillis = stopwatch.elapsedMillis(); logger.debug("Download successful", false); } catch (IOException ex) { logger.debug("Download unsuccessful {}", ex); handleStorageException( ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, queryId); } } else { Exception ex = new HttpResponseException( response.getStatusLine().getStatusCode(), EntityUtils.toString(response.getEntity())); handleStorageException( ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, queryId); } } else { Map userDefinedMetadata = this.gcsAccessStrategy.download( parallelism, remoteStorageLocation, stageFilePath, localFile); SnowflakeUtil.assureOnlyUserAccessibleFilePermissions( localFile, session.isOwnerOnlyStageFilePermissionsEnabled()); stopwatch.stop(); downloadMillis = stopwatch.elapsedMillis(); logger.debug("Download successful", false); if (isEncrypting()) { if (!userDefinedMetadata.isEmpty()) { AbstractMap.SimpleEntry encryptionData = parseEncryptionData(userDefinedMetadata.get(GCS_ENCRYPTIONDATAPROP), queryId); key = encryptionData.getKey(); iv = encryptionData.getValue(); } } } if (!isNullOrEmpty(iv) && !isNullOrEmpty(key) && this.isEncrypting() && this.getEncryptionKeySize() <= 256) { if (key == null || iv == null) { throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "File metadata incomplete"); } // Decrypt file try { stopwatch.start(); EncryptionProvider.decrypt(localFile, key, iv, this.encMat); stopwatch.stop(); long decryptMillis = stopwatch.elapsedMillis(); logger.info( "GCS file {} downloaded to {}. It took {} ms (download: {} ms, decryption: {} ms) with {} retries", stageFilePath, localFile.getAbsolutePath(), downloadMillis + decryptMillis, downloadMillis, decryptMillis, retryCount); } catch (Exception ex) { logger.error("Error decrypting file", ex); throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Cannot decrypt file"); } } else { logger.info( "GCS file {} downloaded to {}. It took {} ms with {} retries", stageFilePath, localFile.getAbsolutePath(), downloadMillis, retryCount); } return; } catch (Exception ex) { logger.debug("Download unsuccessful {}", ex); handleStorageException(ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, queryId); } } while (retryCount <= getMaxRetries()); throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: download unsuccessful without exception!"); } /** * Download a file from remote storage * * @param session session object * @param command command to download file * @param parallelism number of threads for parallel downloading * @param remoteStorageLocation remote storage location, i.e. bucket for s3 * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl Signed credential for download * @param queryId last query id * @return input file stream * @throws SnowflakeSQLException when download failure */ @Override public InputStream downloadToStream( SFSession session, String command, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { logger.debug( "Starting download of file from GCS stage path: {} to input stream", stageFilePath); int retryCount = 0; Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); InputStream inputStream = null; long downloadMillis = 0; do { try { String key = null; String iv = null; if (!isNullOrEmpty(presignedUrl)) { logger.debug("Starting download with presigned URL", false); URIBuilder uriBuilder = new URIBuilder(presignedUrl); HttpGet httpRequest = new HttpGet(uriBuilder.build()); httpRequest.addHeader("accept-encoding", "GZIP"); logger.debug("Fetching result: {}", scrubPresignedUrl(presignedUrl)); CloseableHttpClient httpClient = HttpUtil.getHttpClientWithoutDecompression( session.getHttpClientKey(), session.getHttpHeadersCustomizers()); // Put the file on storage using the presigned url HttpResponse response = RestRequest.executeWithRetries( httpClient, httpRequest, session.getNetworkTimeoutInMilli() / 1000, // retry timeout 0, session.getHttpClientSocketTimeout(), getMaxRetries(), 0, // no socket timeout injection null, // no canceling false, // no cookie false, // no retry false, // no request_guid true, // retry on HTTP 403 false, new ExecTimeTelemetryData(), session, session.getHttpClientKey(), session.getHttpHeadersCustomizers(), true) .getHttpResponse(); logger.debug( "Call returned for URL: {}", (ArgSupplier) () -> scrubPresignedUrl(this.stageInfo.getPresignedUrl())); if (isSuccessStatusCode(response.getStatusLine().getStatusCode())) { try { inputStream = response.getEntity().getContent(); if (isEncrypting()) { Map userDefinedHeaders = createCaseInsensitiveMap(response.getAllHeaders()); AbstractMap.SimpleEntry encryptionData = parseEncryptionData( userDefinedHeaders.get(GCS_METADATA_PREFIX + GCS_ENCRYPTIONDATAPROP), queryId); key = encryptionData.getKey(); iv = encryptionData.getValue(); } stopwatch.stop(); downloadMillis = stopwatch.elapsedMillis(); logger.debug("Download successful", false); } catch (IOException ex) { logger.debug("Download unsuccessful {}", ex); handleStorageException( ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, queryId); } } else { Exception ex = new HttpResponseException( response.getStatusLine().getStatusCode(), EntityUtils.toString(response.getEntity())); handleStorageException( ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, queryId); } } else { SFPair> pair = this.gcsAccessStrategy.downloadToStream( remoteStorageLocation, stageFilePath, isEncrypting()); inputStream = pair.left; if (isEncrypting()) { // Get the user-defined BLOB metadata Map userDefinedMetadata = pair.right; AbstractMap.SimpleEntry encryptionData = parseEncryptionData(userDefinedMetadata.get(GCS_ENCRYPTIONDATAPROP), queryId); key = encryptionData.getKey(); iv = encryptionData.getValue(); } stopwatch.stop(); downloadMillis = stopwatch.elapsedMillis(); } if (this.isEncrypting() && this.getEncryptionKeySize() <= 256) { stopwatch.restart(); if (key == null || iv == null) { throw new SnowflakeSQLException( queryId, SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "File metadata incomplete"); } // Decrypt file try { if (inputStream != null) { inputStream = EncryptionProvider.decryptStream(inputStream, key, iv, this.encMat); stopwatch.stop(); long decryptMillis = stopwatch.elapsedMillis(); logger.info( "GCS file {} downloaded to stream. It took {} ms (download: {} ms, decryption: {} ms) with {} retries", stageFilePath, downloadMillis + decryptMillis, downloadMillis, decryptMillis, retryCount); return inputStream; } } catch (Exception ex) { logger.error("Error decrypting file", ex); throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Cannot decrypt file"); } } else { logger.info( "GCS file {} downloaded to stream. Download took {} ms with {} retries", stageFilePath, downloadMillis, retryCount); } return inputStream; } catch (Exception ex) { logger.debug("Download unsuccessful {}", ex); handleStorageException(ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, queryId); } } while (retryCount <= getMaxRetries()); throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: download unsuccessful without exception!"); } /** * Upload a file (-stream) to remote storage with Pre-signed URL without JDBC session. * * @param networkTimeoutInMilli Network timeout for the upload * @param ocspModeAndProxyKey OCSP mode and proxy settings for the upload. * @param parallelism number of threads do parallel uploading * @param uploadFromStream true if upload source is stream * @param remoteStorageLocation s3 bucket name * @param srcFile source file if not uploading from a stream * @param destFileName file name on remote storage after upload * @param inputStream stream used for uploading if fileBackedOutputStream is null * @param fileBackedOutputStream stream used for uploading if not null * @param meta object meta data * @param stageRegion region name where the stage persists * @param presignedUrl presigned URL for upload. Used by GCP. * @param queryId last query id * @throws SnowflakeSQLException if upload failed */ @Override public void uploadWithPresignedUrlWithoutConnection( int networkTimeoutInMilli, HttpClientSettingsKey ocspModeAndProxyKey, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { logger.info( StorageHelper.getStartUploadLog( "GCS", uploadFromStream, inputStream, fileBackedOutputStream, srcFile, destFileName)); final List toClose = new ArrayList<>(); long originalContentLength = meta.getContentLength(); SFPair uploadStreamInfo = createUploadStream( srcFile, uploadFromStream, inputStream, meta, originalContentLength, fileBackedOutputStream, toClose, queryId); if (!(meta instanceof CommonObjectMetadata)) { throw new IllegalArgumentException("Unexpected metadata object type"); } Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); if (isNullOrEmpty(presignedUrl) || "null".equalsIgnoreCase(presignedUrl)) { logger.debug("Starting upload with downscoped token"); uploadWithDownScopedToken( parallelism, remoteStorageLocation, destFileName, meta.getContentEncoding(), meta.getUserMetadata(), meta.getContentLength(), uploadStreamInfo.left, queryId); logger.debug("Upload successful with downscoped token"); } else { logger.debug("Starting upload with presigned url"); uploadWithPresignedUrl( networkTimeoutInMilli, (int) HttpUtil.getSocketTimeout().toMillis(), meta.getContentEncoding(), meta.getUserMetadata(), uploadStreamInfo.left, presignedUrl, ocspModeAndProxyKey, queryId); logger.debug("Upload successfully with presigned url"); } stopwatch.stop(); if (uploadFromStream) { logger.info( "Uploaded data from input stream to GCS location: {}. It took {} ms", remoteStorageLocation, stopwatch.elapsedMillis()); } else { logger.info( "Uploaded file {} to GCS location: {}. It took {} ms", srcFile.getAbsolutePath(), remoteStorageLocation, stopwatch.elapsedMillis()); } // close any open streams in the "toClose" list and return for (FileInputStream is : toClose) { IOUtils.closeQuietly(is); } } /** * Upload a file/stream to remote storage * * @param session session object * @param command upload command * @param parallelism [ not used by the GCP implementation ] * @param uploadFromStream true if upload source is stream * @param remoteStorageLocation storage container name * @param srcFile source file if not uploading from a stream * @param destFileName file name on remote storage after upload * @param inputStream stream used for uploading if fileBackedOutputStream is null * @param fileBackedOutputStream stream used for uploading if not null * @param meta object meta data * @param stageRegion region name where the stage persists * @param presignedUrl Credential used for upload of a file * @param queryId last query id * @throws SnowflakeSQLException if upload failed even after retry */ @Override public void upload( SFSession session, String command, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { logger.info( StorageHelper.getStartUploadLog( "GCS", uploadFromStream, inputStream, fileBackedOutputStream, srcFile, destFileName)); final List toClose = new ArrayList<>(); long originalContentLength = meta.getContentLength(); SFPair uploadStreamInfo = createUploadStream( srcFile, uploadFromStream, inputStream, meta, originalContentLength, fileBackedOutputStream, toClose, queryId); if (!(meta instanceof CommonObjectMetadata)) { throw new IllegalArgumentException("Unexpected metadata object type"); } Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); if (!isNullOrEmpty(presignedUrl)) { logger.debug("Starting upload with downscope token", false); uploadWithPresignedUrl( session.getNetworkTimeoutInMilli(), session.getHttpClientSocketTimeout(), meta.getContentEncoding(), meta.getUserMetadata(), uploadStreamInfo.left, presignedUrl, session.getHttpClientKey(), queryId); stopwatch.stop(); logger.debug("Upload successful", false); if (uploadFromStream) { logger.info( "Uploaded data from input stream to GCS location: {}. It took {} ms", remoteStorageLocation, stopwatch.elapsedMillis()); } else { logger.info( "Uploaded file {} to GCS location: {}. It took {} ms", srcFile.getAbsolutePath(), remoteStorageLocation, stopwatch.elapsedMillis()); } // close any open streams in the "toClose" list and return for (FileInputStream is : toClose) { IOUtils.closeQuietly(is); } return; } // No presigned URL. This codepath is for when we have a token instead. int retryCount = 0; do { try { logger.debug("Starting upload", false); uploadWithDownScopedToken( parallelism, remoteStorageLocation, destFileName, meta.getContentEncoding(), meta.getUserMetadata(), meta.getContentLength(), uploadStreamInfo.left, queryId); stopwatch.stop(); logger.debug("Upload successful", false); if (uploadFromStream) { logger.info( "Uploaded data from input stream to GCS location: {}. It took {} ms", remoteStorageLocation, stopwatch.elapsedMillis()); } else { logger.info( "Uploaded file {} to GCS location: {}. It took {} ms", srcFile.getAbsolutePath(), remoteStorageLocation, stopwatch.elapsedMillis()); } // close any open streams in the "toClose" list and return for (FileInputStream is : toClose) { IOUtils.closeQuietly(is); } return; } catch (Exception ex) { handleStorageException(ex, ++retryCount, StorageHelper.UPLOAD, session, command, queryId); if (uploadFromStream && fileBackedOutputStream == null) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), ex, "Encountered exception during upload: " + ex.getMessage() + "\nCannot retry upload from stream."); } uploadStreamInfo = createUploadStream( srcFile, uploadFromStream, inputStream, meta, originalContentLength, fileBackedOutputStream, toClose, queryId); } } while (retryCount <= getMaxRetries()); for (FileInputStream is : toClose) { IOUtils.closeQuietly(is); } throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: upload unsuccessful without exception!"); } /** * Upload file with down scoped token. * * @param remoteStorageLocation storage container name * @param destFileName file name on remote storage after upload * @param contentEncoding Object's content encoding. We do special things for "gzip" * @param metadata Custom metadata to be uploaded with the object * @param content File content */ private void uploadWithDownScopedToken( int parallelism, String remoteStorageLocation, String destFileName, String contentEncoding, Map metadata, long contentLength, InputStream content, String queryId) throws SnowflakeSQLException { logger.debug("Uploading file {} to bucket {}", destFileName, remoteStorageLocation); try { this.gcsAccessStrategy.uploadWithDownScopedToken( parallelism, remoteStorageLocation, destFileName, contentEncoding, metadata, contentLength, content, queryId); } catch (Exception e) { handleStorageException(e, 0, StorageHelper.UPLOAD, session, queryId); SnowflakeSQLException wrappedException; if (e instanceof SnowflakeSQLException) { wrappedException = (SnowflakeSQLException) e; } else { wrappedException = new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), e, "Encountered exception during " + StorageHelper.UPLOAD + ": " + e.getMessage()); } throw wrappedException; } } /** * Performs upload using a presigned URL * * @param networkTimeoutInMilli Network timeout * @param contentEncoding Object's content encoding. We do special things for "gzip" * @param metadata Custom metadata to be uploaded with the object * @param content File content * @param presignedUrl Credential to upload the object * @param ocspAndProxyKey OCSP mode and proxy settings for httpclient * @throws SnowflakeSQLException */ private void uploadWithPresignedUrl( int networkTimeoutInMilli, int httpClientSocketTimeout, String contentEncoding, Map metadata, InputStream content, String presignedUrl, HttpClientSettingsKey ocspAndProxyKey, String queryId) throws SnowflakeSQLException { try { URIBuilder uriBuilder = new URIBuilder(presignedUrl); HttpPut httpRequest = new HttpPut(uriBuilder.build()); logger.debug("Fetching result: {}", scrubPresignedUrl(presignedUrl)); // We set the contentEncoding to blank for GZIP files. We don't want GCS to think // our gzip files are gzips because it makes them download uncompressed, and // none of the other providers do that. There's essentially no way for us // to prevent that behavior. Bad Google. if ("gzip".equals(contentEncoding)) { contentEncoding = ""; } httpRequest.addHeader("content-encoding", contentEncoding); for (Entry entry : metadata.entrySet()) { httpRequest.addHeader(GCS_METADATA_PREFIX + entry.getKey(), entry.getValue()); } InputStreamEntity contentEntity = new InputStreamEntity(content, -1); httpRequest.setEntity(contentEntity); CloseableHttpClient httpClient = HttpUtil.getHttpClient(ocspAndProxyKey, session.getHttpHeadersCustomizers()); // Put the file on storage using the presigned url HttpResponse response = RestRequest.executeWithRetries( httpClient, httpRequest, networkTimeoutInMilli / 1000, // retry timeout 0, httpClientSocketTimeout, // socket timeout in ms getMaxRetries(), 0, // no socket timeout injection null, // no canceling false, // no cookie false, // no url retry query parameters false, // no request_guid true, // retry on HTTP 403 true, // disable retry new ExecTimeTelemetryData(), session, ocspAndProxyKey, session.getHttpHeadersCustomizers(), false) .getHttpResponse(); logger.debug( "Call returned for URL: {}", (ArgSupplier) () -> scrubPresignedUrl(this.stageInfo.getPresignedUrl())); if (!isSuccessStatusCode(response.getStatusLine().getStatusCode())) { Exception ex = new HttpResponseException( response.getStatusLine().getStatusCode(), EntityUtils.toString(response.getEntity())); handleStorageException(ex, 0, StorageHelper.UPLOAD, session, null, queryId); } } catch (URISyntaxException e) { throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: upload presigned URL invalid"); } catch (Exception e) { throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: upload with presigned url failed"); } } /** * When we log the URL, make sure we don't log the credential * * @param presignedUrl Presigned URL with full signature * @return Just the object path */ private String scrubPresignedUrl(String presignedUrl) { if (isNullOrEmpty(presignedUrl)) { return ""; } int indexOfQueryString = presignedUrl.lastIndexOf("?"); indexOfQueryString = indexOfQueryString > 0 ? indexOfQueryString : presignedUrl.length() - 1; return presignedUrl.substring(0, indexOfQueryString); } private SFPair createUploadStream( File srcFile, boolean uploadFromStream, InputStream inputStream, StorageObjectMetadata meta, long originalContentLength, FileBackedOutputStream fileBackedOutputStream, List toClose, String queryId) throws SnowflakeSQLException { logger.debug( "createUploadStream({}, {}, {}, {}, {}, {})", this, srcFile, uploadFromStream, inputStream, fileBackedOutputStream, toClose); final InputStream stream; FileInputStream srcFileStream = null; try { if (isEncrypting() && getEncryptionKeySize() <= 256) { try { final InputStream uploadStream = uploadFromStream ? (fileBackedOutputStream != null ? fileBackedOutputStream.asByteSource().openStream() : inputStream) : (srcFileStream = new FileInputStream(srcFile)); toClose.add(srcFileStream); // Encrypt stream = EncryptionProvider.encrypt( meta, originalContentLength, uploadStream, this.encMat, this); uploadFromStream = true; } catch (Exception ex) { logger.error("Failed to encrypt input", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), ex, "Failed to encrypt input", ex.getMessage()); } } else { if (uploadFromStream) { if (fileBackedOutputStream != null) { stream = fileBackedOutputStream.asByteSource().openStream(); } else { stream = inputStream; } } else { srcFileStream = new FileInputStream(srcFile); toClose.add(srcFileStream); stream = srcFileStream; } } } catch (FileNotFoundException ex) { logger.error("Failed to open input file", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), ex, "Failed to open input file", ex.getMessage()); } catch (IOException ex) { logger.error("Failed to open input stream", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), ex, "Failed to open input stream", ex.getMessage()); } return SFPair.of(stream, uploadFromStream); } @Override public void handleStorageException( Exception ex, int retryCount, String operation, SFSession session, String command, String queryId) throws SnowflakeSQLException { // no need to retry if it is invalid key exception if (ex.getCause() instanceof InvalidKeyException) { // Most likely cause is that the unlimited strength policy files are not installed // Log the error and throw a message that explains the cause SnowflakeFileTransferAgent.throwJCEMissingError(operation, ex, queryId); } // If there is no space left in the download location, java.io.IOException is thrown. // Don't retry. if (getRootCause(ex) instanceof IOException) { SnowflakeFileTransferAgent.throwNoSpaceLeftError(session, operation, ex, queryId); } if (this.gcsAccessStrategy.handleStorageException( ex, retryCount, operation, session, command, queryId, this)) { // exception is handled in gcsAccessStrategy.handleStorageException } else if (ex instanceof InterruptedException || getRootCause(ex) instanceof SocketTimeoutException) { if (retryCount > getMaxRetries()) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, StorageHelper.getOperationException(operation).getMessageCode(), ex, "Encountered exception during " + operation + ": " + ex.getMessage()); } else { logger.debug( "Encountered exception ({}) during {}, retry count: {}", ex.getMessage(), operation, retryCount); } } else { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, StorageHelper.getOperationException(operation).getMessageCode(), ex, "Encountered exception during " + operation + ": " + ex.getMessage()); } } /** Returns the material descriptor key */ @Override public String getMatdescKey() { return "matdesc"; } /** Adds encryption metadata to the StorageObjectMetadata object */ @Override public void addEncryptionMetadata( StorageObjectMetadata meta, MatDesc matDesc, byte[] ivData, byte[] encryptedKey, long contentLength) { meta.addUserMetadata(getMatdescKey(), matDesc.toString()); meta.addUserMetadata( GCS_ENCRYPTIONDATAPROP, buildEncryptionMetadataJSON( Base64.getEncoder().encodeToString(ivData), Base64.getEncoder().encodeToString(encryptedKey))); meta.setContentLength(contentLength); } /* * buildEncryptionMetadataJSON * Takes the base64-encoded iv and key and creates the JSON block to be * used as the encryptiondata metadata field on the blob. */ private String buildEncryptionMetadataJSON(String iv64, String key64) { return String.format( "{\"EncryptionMode\":\"FullBlob\",\"WrappedContentKey\"" + ":{\"KeyId\":\"symmKey1\",\"EncryptedKey\":\"%s\"" + ",\"Algorithm\":\"AES_CBC_256\"},\"EncryptionAgent\":" + "{\"Protocol\":\"1.0\",\"EncryptionAlgorithm\":" + "\"AES_CBC_256\"},\"ContentEncryptionIV\":\"%s\"" + ",\"KeyWrappingMetadata\":{\"EncryptionLibrary\":" + "\"Java 5.3.0\"}}", key64, iv64); } /* * parseEncryptionData * Takes the json string in the encryptiondata metadata field of the encrypted * blob and parses out the key and iv. Returns the pair as key = key, iv = value. */ private AbstractMap.SimpleEntry parseEncryptionData( String jsonEncryptionData, String queryId) throws SnowflakeSQLException { ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); JsonFactory factory = mapper.getFactory(); try { JsonParser parser = factory.createParser(jsonEncryptionData); JsonNode encryptionDataNode = mapper.readTree(parser); String iv = encryptionDataNode.get("ContentEncryptionIV").asText(); String key = encryptionDataNode.get("WrappedContentKey").get("EncryptedKey").asText(); return new AbstractMap.SimpleEntry<>(key, iv); } catch (Exception ex) { throw new SnowflakeSQLException( queryId, ex, SqlState.SYSTEM_ERROR, ErrorCode.FILE_TRANSFER_ERROR.getMessageCode(), "Error parsing encryption data as json" + ": " + ex.getMessage()); } } /** Adds digest metadata to the StorageObjectMetadata object */ @Override public void addDigestMetadata(StorageObjectMetadata meta, String digest) { if (!isBlank(digest)) { meta.addUserMetadata("sfc-digest", digest); } } /** Gets digest metadata to the StorageObjectMetadata object */ @Override public String getDigestMetadata(StorageObjectMetadata meta) { return meta.getUserMetadata().get("sfc-digest"); } /* * Initializes the GCS client * This method is used during the object construction, but also to * reset/recreate the encapsulated CloudBlobClient object with new * credentials (after token expiration) * @param stage The stage information that the client will operate on * @param encMat The encryption material * required to decrypt/encrypt content in stage * @throws IllegalArgumentException when invalid credentials are used */ private void setupGCSClient( StageInfo stage, RemoteStoreFileEncryptionMaterial encMat, SFSession session) throws IllegalArgumentException, SnowflakeSQLException { // Save the client creation parameters so that we can reuse them, // to reset the GCS client. this.stageInfo = stage; this.encMat = encMat; this.session = session; logger.debug("Setting up the GCS client ", false); try { boolean overrideAwsAccessStrategy = Boolean.valueOf(systemGetEnv("SNOWFLAKE_GCS_FORCE_VIRTUAL_STYLE_DOMAINS")); if (stage.getUseVirtualUrl() || overrideAwsAccessStrategy) { this.gcsAccessStrategy = new GCSAccessStrategyAwsSdk(stage, session); } else { this.gcsAccessStrategy = new GCSDefaultAccessStrategy(stage, session); } if (encMat != null) { byte[] decodedKey = Base64.getDecoder().decode(encMat.getQueryStageMasterKey()); encryptionKeySize = decodedKey.length * 8; if (encryptionKeySize != 128 && encryptionKeySize != 192 && encryptionKeySize != 256) { throw new SnowflakeSQLException( QueryIdHelper.queryIdFromEncMatOr(encMat, null), SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "unsupported key size", encryptionKeySize); } } } catch (Exception ex) { throw new IllegalArgumentException("invalid_gcs_credentials"); } } protected static boolean areDisabledGcsDefaultCredentials(SFSession session) { return session != null && session.getDisableGcsDefaultCredentials() || convertSystemPropertyToBooleanValue(DISABLE_GCS_DEFAULT_CREDENTIALS_PROPERTY_NAME, true); } private static boolean isSuccessStatusCode(int code) { return code < 300 && code >= 200; } /** * Adds streaming ingest metadata to the StorageObjectMetadata object, used for streaming ingest * per client billing calculation */ @Override public void addStreamingIngestMetadata( StorageObjectMetadata meta, String clientName, String clientKey) { meta.addUserMetadata(GCS_STREAMING_INGEST_CLIENT_NAME, clientName); meta.addUserMetadata(GCS_STREAMING_INGEST_CLIENT_KEY, clientKey); } @Override public String getStreamingIngestClientName(StorageObjectMetadata meta) { return meta.getUserMetadata().get(GCS_STREAMING_INGEST_CLIENT_NAME); } @Override public String getStreamingIngestClientKey(StorageObjectMetadata meta) { return meta.getUserMetadata().get(GCS_STREAMING_INGEST_CLIENT_KEY); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/SnowflakeS3Client.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.createDefaultExecutorService; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.getRootCause; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static net.snowflake.client.internal.jdbc.cloud.storage.S3ErrorHandler.retryRequestWithExponentialBackoff; import static net.snowflake.client.internal.jdbc.cloud.storage.S3ErrorHandler.throwIfClientExceptionOrMaxRetryReached; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.SocketTimeoutException; import java.net.URI; import java.security.InvalidKeyException; import java.time.Duration; import java.util.ArrayList; import java.util.Base64; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Properties; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.internal.core.HeaderCustomizerHttpRequestInterceptor; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.FileBackedOutputStream; import net.snowflake.client.internal.jdbc.MatDesc; import net.snowflake.client.internal.jdbc.SnowflakeFileTransferAgent; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SFPair; import net.snowflake.client.internal.util.Stopwatch; import net.snowflake.common.core.RemoteStoreFileEncryptionMaterial; import net.snowflake.common.core.SqlState; import org.apache.commons.io.IOUtils; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.ListObjectsRequest; import software.amazon.awssdk.services.s3.model.ListObjectsResponse; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.ServerSideEncryption; import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; import software.amazon.awssdk.transfer.s3.S3TransferManager; import software.amazon.awssdk.transfer.s3.model.DownloadFileRequest; import software.amazon.awssdk.transfer.s3.model.FileDownload; import software.amazon.awssdk.transfer.s3.model.Upload; import software.amazon.awssdk.transfer.s3.model.UploadRequest; /** Wrapper around AmazonS3Client. */ public class SnowflakeS3Client implements SnowflakeStorageClient { private static final SFLogger logger = SFLoggerFactory.getLogger(SnowflakeS3Client.class); private static final String localFileSep = systemGetProperty("file.separator"); private static final String AMZ_KEY = "x-amz-key"; private static final String AMZ_IV = "x-amz-iv"; private static final String S3_STREAMING_INGEST_CLIENT_NAME = "ingestclientname"; private static final String S3_STREAMING_INGEST_CLIENT_KEY = "ingestclientkey"; private static final int EXECUTOR_SHUTDOWN_TIMEOUT_SECONDS = 5; // expired AWS token error code protected static final String EXPIRED_AWS_TOKEN_ERROR_CODE = "ExpiredToken"; private int encryptionKeySize = 0; // used for PUTs private S3AsyncClient amazonClient = null; private RemoteStoreFileEncryptionMaterial encMat = null; private ClientConfiguration clientConfig = null; private Properties proxyProperties = null; private String stageRegion = null; private String stageEndPoint = null; // FIPS endpoint, if needed private SFBaseSession session = null; private boolean isClientSideEncrypted = true; private boolean isUseS3RegionalUrl = false; public SnowflakeS3Client( Map stageCredentials, ClientConfiguration clientConfig, RemoteStoreFileEncryptionMaterial encMat, Properties proxyProperties, String stageRegion, String stageEndPoint, boolean isClientSideEncrypted, SFBaseSession session, boolean useS3RegionalUrl) throws SnowflakeSQLException { logger.debug( "Initializing Snowflake S3 client with encryption: {}, client side encrypted: {}", encMat != null, isClientSideEncrypted); this.session = session; this.isUseS3RegionalUrl = useS3RegionalUrl; setupSnowflakeS3Client( stageCredentials, clientConfig, encMat, proxyProperties, stageRegion, stageEndPoint, isClientSideEncrypted, session); } private void setupSnowflakeS3Client( Map stageCredentials, ClientConfiguration clientConfig, RemoteStoreFileEncryptionMaterial encMat, Properties proxyProperties, String stageRegion, String stageEndPoint, boolean isClientSideEncrypted, SFBaseSession session) throws SnowflakeSQLException { // Save the client creation parameters so that we can reuse them, // to reset the AWS client. We won't save the awsCredentials since // we will be refreshing that, every time we reset the AWS client this.clientConfig = clientConfig; this.stageRegion = stageRegion; this.encMat = encMat; this.proxyProperties = proxyProperties; this.stageEndPoint = stageEndPoint; // FIPS endpoint, if needed this.session = session; this.isClientSideEncrypted = isClientSideEncrypted; logger.debug("Setting up AWS client ", false); // Retrieve S3 stage credentials String awsID = (String) stageCredentials.get("AWS_KEY_ID"); String awsKey = (String) stageCredentials.get("AWS_SECRET_KEY"); String awsToken = (String) stageCredentials.get("AWS_TOKEN"); // initialize aws credentials AwsCredentials awsCredentials = (awsToken != null) ? AwsSessionCredentials.create(awsID, awsKey, awsToken) : AwsBasicCredentials.create(awsID, awsKey); ProxyConfiguration proxyConfiguration; if (session != null) { proxyConfiguration = CloudStorageProxyFactory.createProxyConfigurationForS3(session.getHttpClientKey()); } else { proxyConfiguration = CloudStorageProxyFactory.createSessionlessProxyConfigurationForS3(proxyProperties); } S3AsyncClientBuilder clientBuilder = S3AsyncClient.builder() .credentialsProvider(StaticCredentialsProvider.create(awsCredentials)); Region region = Region.of(stageRegion); if (this.stageEndPoint != null && !this.stageEndPoint.isEmpty() && !"null".equals(this.stageEndPoint)) { String endpointForOverride = this.stageEndPoint; String lower = endpointForOverride.toLowerCase(Locale.ROOT); if (!lower.startsWith("https://") && !lower.startsWith("http://")) { logger.debug( "AWS S3 Client: stage endpoint {} has no scheme, normalizing for URI creation.", this.stageEndPoint); endpointForOverride = "https://" + endpointForOverride; } clientBuilder.endpointOverride(URI.create(endpointForOverride)); clientBuilder.region(region); } else { if (this.isUseS3RegionalUrl) { String domainSuffixForRegionalUrl = getDomainSuffixForRegionalUrl(region.id()); String regionalEndpoint = "https://s3." + region.id() + "." + domainSuffixForRegionalUrl; clientBuilder.endpointOverride(URI.create(regionalEndpoint)); clientBuilder.region(region); } else { clientBuilder.region(region); } } // Explicitly force to use virtual address style clientBuilder.forcePathStyle(false); clientBuilder.httpClientBuilder( NettyNioAsyncHttpClient.builder() .maxConcurrency(clientConfig.getMaxConnections()) .connectionAcquisitionTimeout(Duration.ofSeconds(60)) .proxyConfiguration(proxyConfiguration) .connectionTimeout(Duration.ofMillis(clientConfig.connectionTimeout)) .readTimeout(Duration.ofMillis(clientConfig.socketTimeout)) .writeTimeout(Duration.ofMillis(clientConfig.socketTimeout))); clientBuilder.multipartEnabled(true); clientBuilder.multipartConfiguration( MultipartConfiguration.builder().thresholdInBytes(16L * 1024 * 1024).build()); ClientOverrideConfiguration.Builder configurationBuilder = ClientOverrideConfiguration.builder(); if (session instanceof SFSession) { List headersCustomizers = ((SFSession) session).getHttpHeadersCustomizers(); if (headersCustomizers != null && !headersCustomizers.isEmpty()) { configurationBuilder.addExecutionInterceptor( new HeaderCustomizerHttpRequestInterceptor(headersCustomizers)); } } clientBuilder.overrideConfiguration(configurationBuilder.build()); if (encMat != null) { byte[] decodedKey = Base64.getDecoder().decode(encMat.getQueryStageMasterKey()); encryptionKeySize = decodedKey.length * 8; if (encryptionKeySize != 128 && encryptionKeySize != 192 && encryptionKeySize != 256) { throw new SnowflakeSQLLoggedException( QueryIdHelper.queryIdFromEncMatOr(encMat, null), session, ErrorCode.FILE_TRANSFER_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, "unsupported key size", encryptionKeySize); } } amazonClient = clientBuilder.build(); } static String getDomainSuffixForRegionalUrl(String regionName) { return regionName.toLowerCase().startsWith("cn-") ? "amazonaws.com.cn" : "amazonaws.com"; } // Returns the Max number of retry attempts @Override public int getMaxRetries() { if (session != null && session .getConnectionPropertiesMap() .containsKey(SFSessionProperty.PUT_GET_MAX_RETRIES)) { return (int) session.getConnectionPropertiesMap().get(SFSessionProperty.PUT_GET_MAX_RETRIES); } return 25; } // Returns the max exponent for multiplying backoff with the power of 2, the value // of 4 will give us 16secs as the max number of time to sleep before retry @Override public int getRetryBackoffMaxExponent() { return 4; } // Returns the min number of milliseconds to sleep before retry @Override public int getRetryBackoffMin() { return 1000; } @Override public boolean isEncrypting() { return encryptionKeySize > 0 && isClientSideEncrypted; } @Override public int getEncryptionKeySize() { return encryptionKeySize; } /** * Renew the S3 client with fresh AWS credentials/access token * * @param stageCredentials a Map of new AWS credential properties, to refresh the client with (as * returned by GS) * @throws SnowflakeSQLException if any error occurs */ @Override public void renew(Map stageCredentials) throws SnowflakeSQLException { logger.debug("Renewing the Snowflake S3 client"); // We renew the client with fresh credentials and with its original parameters setupSnowflakeS3Client( stageCredentials, this.clientConfig, this.encMat, this.proxyProperties, this.stageRegion, this.stageEndPoint, this.isClientSideEncrypted, this.session); } @Override public void shutdown() { logger.debug("Shutting down the Snowflake S3 client"); amazonClient.close(); } @Override public StorageObjectSummaryCollection listObjects(String remoteStorageLocation, String prefix) throws StorageProviderException { ListObjectsResponse objListing = amazonClient .listObjects( ListObjectsRequest.builder().bucket(remoteStorageLocation).prefix(prefix).build()) .join(); return new StorageObjectSummaryCollection(objListing.contents(), remoteStorageLocation); } @Override public StorageObjectMetadata getObjectMetadata(String remoteStorageLocation, String prefix) throws StorageProviderException { return new S3ObjectMetadata( amazonClient .headObject( HeadObjectRequest.builder().bucket(remoteStorageLocation).key(prefix).build()) .join()); } /** * Download a file from S3. * * @param session session object * @param command command to download file * @param localLocation local file path * @param destFileName destination file name * @param parallelism number of threads for parallel downloading * @param remoteStorageLocation s3 bucket name * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl Not used in S3 * @param queryId last query id * @throws SnowflakeSQLException if download failed without an exception * @throws SnowflakeSQLException if failed to decrypt downloaded file * @throws SnowflakeSQLException if file metadata is incomplete */ @Override public void download( SFSession session, String command, String localLocation, String destFileName, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); String localFilePath = localLocation + localFileSep + destFileName; logger.debug( "Starting download of file from S3 stage path: {} to {}", stageFilePath, localFilePath); int retryCount = 0; do { ThreadPoolExecutor executorService = null; S3TransferManager tx = null; try { File localFile = new File(localFilePath); logger.debug("Creating executor service for transfer manager with {} threads", parallelism); executorService = createDefaultExecutorService("s3-transfer-manager-downloader-", parallelism); // download files from s3 tx = S3TransferManager.builder().s3Client(amazonClient).executor(executorService).build(); FileDownload fileDownload = tx.downloadFile( DownloadFileRequest.builder() .getObjectRequest( GetObjectRequest.builder() .bucket(remoteStorageLocation) .key(stageFilePath) .build()) .destination(localFile.toPath()) .build()); // Pull object metadata from S3 CompletableFuture metaFuture = amazonClient.headObject( HeadObjectRequest.builder() .bucket(remoteStorageLocation) .key(stageFilePath) .build()); fileDownload.completionFuture().join(); HeadObjectResponse meta = metaFuture.join(); Map metaMap = SnowflakeUtil.createCaseInsensitiveMap(meta.metadata()); String key = metaMap.get(AMZ_KEY); String iv = metaMap.get(AMZ_IV); SnowflakeUtil.assureOnlyUserAccessibleFilePermissions( localFile, session.isOwnerOnlyStageFilePermissionsEnabled()); stopwatch.stop(); long downloadMillis = stopwatch.elapsedMillis(); if (this.isEncrypting()) { stopwatch.restart(); if (key == null || iv == null) { throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "File metadata incomplete"); } // Decrypt file try { EncryptionProvider.decrypt(localFile, key, iv, this.encMat); stopwatch.stop(); long decryptMillis = stopwatch.elapsedMillis(); logger.info( "S3 file {} downloaded to {}. It took {} ms (download: {} ms, decryption: {} ms) with {} retries", stageFilePath, localFile.getAbsolutePath(), downloadMillis + decryptMillis, downloadMillis, decryptMillis, retryCount); } catch (Exception ex) { logger.error("Error decrypting file", ex); throw ex; } } else { logger.info( "S3 file {} downloaded to {}. It took {} ms with {} retries", stageFilePath, localFile.getAbsolutePath(), downloadMillis, retryCount); } return; } catch (Exception ex) { handleS3Exception( ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, this, queryId); } finally { closeTransferManagerShutdownExecutor("download", tx, executorService); } } while (retryCount <= getMaxRetries()); throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: download unsuccessful without exception!"); } /** * Download a file from remote storage * * @param session session object * @param command command to download file * @param parallelism number of threads for parallel downloading * @param remoteStorageLocation remote storage location, i.e. bucket for s3 * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl Not used in S3 * @param queryId last query id * @return input file stream * @throws SnowflakeSQLException when download failure */ @Override public InputStream downloadToStream( SFSession session, String command, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { logger.debug("Starting download of file from S3 stage path: {} to input stream", stageFilePath); Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); int retryCount = 0; do { try { CompletableFuture> streamFuture = amazonClient.getObject( GetObjectRequest.builder().bucket(remoteStorageLocation).key(stageFilePath).build(), AsyncResponseTransformer.toBlockingInputStream()); CompletableFuture metaFuture = amazonClient.headObject( HeadObjectRequest.builder() .bucket(remoteStorageLocation) .key(stageFilePath) .build()); HeadObjectResponse meta = metaFuture.join(); InputStream stream = streamFuture.join(); stopwatch.stop(); long downloadMillis = stopwatch.elapsedMillis(); Map metaMap = SnowflakeUtil.createCaseInsensitiveMap(meta.metadata()); String key = metaMap.get(AMZ_KEY); String iv = metaMap.get(AMZ_IV); if (this.isEncrypting()) { stopwatch.restart(); if (key == null || iv == null) { throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "File metadata incomplete"); } try { InputStream is = EncryptionProvider.decryptStream(stream, key, iv, encMat); stopwatch.stop(); long decryptMillis = stopwatch.elapsedMillis(); logger.info( "S3 file {} downloaded to input stream. It took {} ms " + "(download: {} ms, decryption: {} ms) with {} retries", stageFilePath, downloadMillis + decryptMillis, downloadMillis, decryptMillis, retryCount); return is; } catch (Exception ex) { logger.error("Error in decrypting file", ex); throw ex; } } else { logger.info( "S3 file {} downloaded to input stream. Download took {} ms with {} retries", stageFilePath, downloadMillis, retryCount); } return stream; } catch (Exception ex) { handleS3Exception( ex, ++retryCount, StorageHelper.DOWNLOAD, session, command, this, queryId); } } while (retryCount <= getMaxRetries()); throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.DOWNLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: download unsuccessful without exception!"); } /** * Upload a file (-stream) to S3. * * @param session session object * @param command upload command * @param parallelism number of threads do parallel uploading * @param uploadFromStream true if upload source is stream * @param remoteStorageLocation s3 bucket name * @param srcFile source file if not uploading from a stream * @param destFileName file name on s3 after upload * @param inputStream stream used for uploading if fileBackedOutputStream is null * @param fileBackedOutputStream stream used for uploading if not null * @param meta object meta data * @param stageRegion region name where the stage persists * @param presignedUrl Not used in S3 * @param queryId last query id * @throws SnowflakeSQLException if upload failed even after retry */ @Override public void upload( SFSession session, String command, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { logger.info( StorageHelper.getStartUploadLog( "S3", uploadFromStream, inputStream, fileBackedOutputStream, srcFile, destFileName)); final long originalContentLength = meta.getContentLength(); final List toClose = new ArrayList<>(); SFPair uploadStreamInfo = createUploadStream( srcFile, uploadFromStream, inputStream, fileBackedOutputStream, meta, originalContentLength, toClose, queryId); S3TransferManager tx = null; int retryCount = 0; Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); do { tx = null; ThreadPoolExecutor executorService = null; try { PutObjectRequest.Builder putRequestBuilder = ((S3ObjectMetadata) meta) .getS3PutObjectRequest().toBuilder() .bucket(remoteStorageLocation) .key(destFileName); logger.debug("Creating executor service for transfer manager with {} threads", parallelism); executorService = createDefaultExecutorService("s3-transfer-manager-uploader-", parallelism); // upload files to s3 tx = S3TransferManager.builder().s3Client(amazonClient).executor(executorService).build(); final Upload myUpload; if (!this.isClientSideEncrypted) { // since we're not client-side encrypting, make sure we're server-side encrypting with // SSE-S3 putRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256); } PutObjectRequest request = putRequestBuilder.build(); if (uploadStreamInfo.right) { myUpload = tx.upload( UploadRequest.builder() .putObjectRequest(request) .requestBody( AsyncRequestBody.fromInputStream( // wrapping with BufferedInputStream to mitigate // https://github.com/aws/aws-sdk-java-v2/issues/6174 new BufferedInputStream(uploadStreamInfo.left), request.contentLength(), executorService)) .build()); } else { myUpload = tx.upload( UploadRequest.builder() .putObjectRequest(request) .requestBody(AsyncRequestBody.fromFile(srcFile)) .build()); } myUpload.completionFuture().join(); stopwatch.stop(); long uploadMillis = stopwatch.elapsedMillis(); // get out for (FileInputStream is : toClose) { IOUtils.closeQuietly(is); } if (uploadFromStream) { logger.info( "Uploaded data from input stream to S3 location: {}. It took {} ms with {} retries", destFileName, uploadMillis, retryCount); } else { logger.info( "Uploaded file {} to S3 location: {}. It took {} ms with {} retries", srcFile.getAbsolutePath(), destFileName, uploadMillis, retryCount); } return; } catch (Exception ex) { handleS3Exception(ex, ++retryCount, StorageHelper.UPLOAD, session, command, this, queryId); if (uploadFromStream && fileBackedOutputStream == null) { throw new SnowflakeSQLException( queryId, ex, SqlState.SYSTEM_ERROR, ErrorCode.IO_ERROR.getMessageCode(), "Encountered exception during upload: " + ex.getMessage() + "\nCannot retry upload from stream."); } uploadStreamInfo = createUploadStream( srcFile, uploadFromStream, inputStream, fileBackedOutputStream, meta, originalContentLength, toClose, queryId); } finally { closeTransferManagerShutdownExecutor("upload", tx, executorService); } } while (retryCount <= getMaxRetries()); for (FileInputStream is : toClose) { IOUtils.closeQuietly(is); } throw new SnowflakeSQLLoggedException( queryId, session, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), SqlState.INTERNAL_ERROR, "Unexpected: upload unsuccessful without exception!"); } private SFPair createUploadStream( File srcFile, boolean uploadFromStream, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, long originalContentLength, List toClose, String queryId) throws SnowflakeSQLException { logger.debug( "createUploadStream({}, {}, {}, {}, {}, {}, {}) " + "keySize: {}", this, srcFile, uploadFromStream, inputStream, fileBackedOutputStream, meta, toClose, this.getEncryptionKeySize()); final InputStream result; FileInputStream srcFileStream = null; if (isEncrypting()) { try { final InputStream uploadStream = uploadFromStream ? (fileBackedOutputStream != null ? fileBackedOutputStream.asByteSource().openStream() : inputStream) : (srcFileStream = new FileInputStream(srcFile)); toClose.add(srcFileStream); // Encrypt result = EncryptionProvider.encrypt( meta, originalContentLength, uploadStream, this.encMat, this); uploadFromStream = true; } catch (Exception ex) { logger.error("Failed to encrypt input", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), ex, "Failed to encrypt input", ex.getMessage()); } } else { try { result = uploadFromStream ? (fileBackedOutputStream != null ? fileBackedOutputStream.asByteSource().openStream() : inputStream) : (srcFileStream = new FileInputStream(srcFile)); toClose.add(srcFileStream); } catch (FileNotFoundException ex) { logger.error("Failed to open input file", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), ex, "Failed to open input file", ex.getMessage()); } catch (IOException ex) { logger.error("Failed to open input stream", ex); throw new SnowflakeSQLLoggedException( queryId, session, SqlState.INTERNAL_ERROR, StorageHelper.getOperationException(StorageHelper.UPLOAD).getMessageCode(), ex, "Failed to open input stream", ex.getMessage()); } } return SFPair.of(result, uploadFromStream); } @Override public void handleStorageException( Exception ex, int retryCount, String operation, SFSession session, String command, String queryId) throws SnowflakeSQLException { handleS3Exception(ex, retryCount, operation, session, command, this, queryId); } private static void handleS3Exception( Exception ex, int retryCount, String operation, SFSession session, String command, SnowflakeS3Client s3Client, String queryId) throws SnowflakeSQLException { // no need to retry if it is invalid key exception if (ex.getCause() instanceof InvalidKeyException) { // Most likely cause is that the unlimited strength policy files are not installed // Log the error and throw a message that explains the cause SnowflakeFileTransferAgent.throwJCEMissingError(operation, ex, queryId); } // If there is no space left in the download location, java.io.IOException is thrown. // Don't retry. if (getRootCause(ex) instanceof IOException) { SnowflakeFileTransferAgent.throwNoSpaceLeftError(session, operation, ex, queryId); } // Don't retry if max retries has been reached or the error code is 404/400 Throwable cause = ex.getCause(); if (cause instanceof SdkException) { logger.debug("SdkException: " + ex.getMessage()); if (retryCount > s3Client.getMaxRetries() || S3ErrorHandler.isClientException400Or404(cause)) { throwIfClientExceptionOrMaxRetryReached( operation, session, command, queryId, s3Client, cause); } else { retryRequestWithExponentialBackoff( ex, retryCount, operation, session, command, s3Client, queryId, cause); } } else { if (ex instanceof InterruptedException || getRootCause(ex) instanceof SocketTimeoutException || ex instanceof CompletionException) { if (retryCount > s3Client.getMaxRetries()) { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, StorageHelper.getOperationException(operation).getMessageCode(), ex, "Encountered exception during " + operation + ": " + ex.getMessage()); } else { logger.debug( "Encountered exception ({}) during {}, retry count: {}", ex.getMessage(), operation, retryCount); } } else { throw new SnowflakeSQLLoggedException( queryId, session, SqlState.SYSTEM_ERROR, StorageHelper.getOperationException(operation).getMessageCode(), ex, "Encountered exception during " + operation + ": " + ex.getMessage()); } } } /* Returns the material descriptor key */ @Override public String getMatdescKey() { return "x-amz-matdesc"; } /* Adds encryption metadata to the StorageObjectMetadata object */ @Override public void addEncryptionMetadata( StorageObjectMetadata meta, MatDesc matDesc, byte[] ivData, byte[] encryptedKey, long contentLength) { meta.addUserMetadata(getMatdescKey(), matDesc.toString()); meta.addUserMetadata(AMZ_KEY, Base64.getEncoder().encodeToString(encryptedKey)); meta.addUserMetadata(AMZ_IV, Base64.getEncoder().encodeToString(ivData)); meta.setContentLength(contentLength); } /* Adds digest metadata to the StorageObjectMetadata object */ @Override public void addDigestMetadata(StorageObjectMetadata meta, String digest) { meta.addUserMetadata("sfc-digest", digest); } /* Gets digest metadata to the StorageObjectMetadata object */ @Override public String getDigestMetadata(StorageObjectMetadata meta) { return meta.getUserMetadata().get("sfc-digest"); } /* * Adds streaming ingest metadata to the StorageObjectMetadata object, used for streaming ingest * per client billing calculation */ @Override public void addStreamingIngestMetadata( StorageObjectMetadata meta, String clientName, String clientKey) { meta.addUserMetadata(S3_STREAMING_INGEST_CLIENT_NAME, clientName); meta.addUserMetadata(S3_STREAMING_INGEST_CLIENT_KEY, clientKey); } @Override public String getStreamingIngestClientName(StorageObjectMetadata meta) { return meta.getUserMetadata().get(S3_STREAMING_INGEST_CLIENT_NAME); } @Override public String getStreamingIngestClientKey(StorageObjectMetadata meta) { return meta.getUserMetadata().get(S3_STREAMING_INGEST_CLIENT_KEY); } public static class ClientConfiguration { private final int maxConnections; private final int maxErrorRetry; private final int connectionTimeout; private final int socketTimeout; public ClientConfiguration( int maxConnections, int maxErrorRetry, int connectionTimeout, int socketTimeout) { this.maxConnections = maxConnections; this.maxErrorRetry = maxErrorRetry; this.connectionTimeout = connectionTimeout; this.socketTimeout = socketTimeout; } public int getMaxConnections() { return maxConnections; } public int getMaxErrorRetry() { return maxErrorRetry; } public int getConnectionTimeout() { return connectionTimeout; } public int getSocketTimeout() { return socketTimeout; } } private static void closeTransferManagerShutdownExecutor( String name, S3TransferManager tx, ThreadPoolExecutor executor) { try { if (tx != null) { tx.close(); } } catch (Exception e) { logger.warn("Failed to close S3 {} transfer manager", name, e); } finally { if (executor != null) { try { executor.shutdown(); if (!executor.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS)) { logger.warn( "S3 {} executor did not terminate within {} seconds, forcing shutdown", name, EXECUTOR_SHUTDOWN_TIMEOUT_SECONDS); executor.shutdownNow(); } } catch (InterruptedException e) { // The only checked exception from awaitTermination so need to reset the interrupt flag logger.warn("S3 {} executor shutdown interrupted, forcing shutdown", name); executor.shutdownNow(); Thread.currentThread().interrupt(); } catch (Exception e) { logger.warn("Failed to shut down S3 {} executor, forcing shutdown", name, e); executor.shutdownNow(); } } } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/SnowflakeStorageClient.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.io.File; import java.io.InputStream; import java.util.Map; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.HttpClientSettingsKey; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.client.internal.jdbc.FileBackedOutputStream; import net.snowflake.client.internal.jdbc.MatDesc; import net.snowflake.common.core.SqlState; /** Interface for storage client provider implementations */ public interface SnowflakeStorageClient { /** * @return Returns the Max number of retry attempts */ int getMaxRetries(); /** * Returns the max exponent for multiplying backoff with the power of 2, the value of 4 will give * us 16secs as the max number of time to sleep before retry * * @return Returns the exponent */ int getRetryBackoffMaxExponent(); /** * @return Returns the min number of milliseconds to sleep before retry */ int getRetryBackoffMin(); /** * @return Returns true if encryption is enabled */ boolean isEncrypting(); /** * @return Returns the size of the encryption key */ int getEncryptionKeySize(); /** * @return Whether this client requires the use of presigned URLs for upload and download instead * of credentials that work for all files uploaded/ downloaded to a stage path. True for GCS. */ default boolean requirePresignedUrl() { return false; } /** * Re-creates the encapsulated storage client with a fresh access token * * @param stageCredentials a Map (as returned by GS) which contains the new credential properties * @throws SnowflakeSQLException failure to renew the storage client */ void renew(Map stageCredentials) throws SnowflakeSQLException; /** shuts down the client */ void shutdown(); /** * For a set of remote storage objects under a remote location and a given prefix/path returns * their properties wrapped in ObjectSummary objects * * @param remoteStorageLocation location, i.e. bucket for S3 * @param prefix the prefix to list * @return a collection of storage summary objects * @throws StorageProviderException cloud storage provider error */ StorageObjectSummaryCollection listObjects(String remoteStorageLocation, String prefix) throws StorageProviderException; /** * Returns the metadata properties for a remote storage object * * @param remoteStorageLocation location, i.e. bucket for S3 * @param prefix the prefix/path of the object to retrieve * @return storage metadata object * @throws StorageProviderException cloud storage provider error */ StorageObjectMetadata getObjectMetadata(String remoteStorageLocation, String prefix) throws StorageProviderException; /** * Download a file from remote storage. * * @deprecated use {@link #download(SFSession, String, String, String, int, String, String, * String, String, String)} * @param connection connection object * @param command command to download file * @param localLocation local file path * @param destFileName destination file name * @param parallelism number of threads for parallel downloading * @param remoteStorageLocation remote storage location, i.e. bucket for S3 * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl presigned URL for download. Used by GCP. * @throws SnowflakeSQLException download failure */ @Deprecated default void download( SFSession connection, String command, String localLocation, String destFileName, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl) throws SnowflakeSQLException { download( connection, command, localLocation, destFileName, parallelism, remoteStorageLocation, stageFilePath, stageRegion, presignedUrl, null); } /** * Download a file from remote storage. * * @param connection connection object * @param command command to download file * @param localLocation local file path * @param destFileName destination file name * @param parallelism number of threads for parallel downloading * @param remoteStorageLocation remote storage location, i.e. bucket for S3 * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl presigned URL for download. Used by GCP. * @param queryId last query id * @throws SnowflakeSQLException download failure */ void download( SFSession connection, String command, String localLocation, String destFileName, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException; /** * Download a file from remote storage * * @deprecated use {@link #download(SFSession, String, String, String, int, String, String, * String, String, String)} * @param connection connection object * @param command command to download file * @param parallelism number of threads for parallel downloading * @param remoteStorageLocation remote storage location, i.e. bucket for s3 * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl presigned URL for download. Used by GCP. * @return input file stream * @throws SnowflakeSQLException when download failure */ @Deprecated default InputStream downloadToStream( SFSession connection, String command, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl) throws SnowflakeSQLException { return downloadToStream( connection, command, parallelism, remoteStorageLocation, stageFilePath, stageRegion, presignedUrl, null); } /** * Download a file from remote storage * * @param connection connection object * @param command command to download file * @param parallelism number of threads for parallel downloading * @param remoteStorageLocation remote storage location, i.e. bucket for s3 * @param stageFilePath stage file path * @param stageRegion region name where the stage persists * @param presignedUrl presigned URL for download. Used by GCP. * @param queryId last query id * @return input file stream * @throws SnowflakeSQLException when download failure */ InputStream downloadToStream( SFSession connection, String command, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException; /** * Upload a file (-stream) to remote storage * * @deprecated user {@link #upload(SFSession, String, int, boolean, String, File, String, * InputStream, FileBackedOutputStream, StorageObjectMetadata, String, String, String)} * @param connection connection object * @param command upload command * @param parallelism number of threads do parallel uploading * @param uploadFromStream true if upload source is stream * @param remoteStorageLocation s3 bucket name * @param srcFile source file if not uploading from a stream * @param destFileName file name on remote storage after upload * @param inputStream stream used for uploading if fileBackedOutputStream is null * @param fileBackedOutputStream stream used for uploading if not null * @param meta object meta data * @param stageRegion region name where the stage persists * @param presignedUrl presigned URL for upload. Used by GCP. * @throws SnowflakeSQLException if upload failed even after retry */ @Deprecated default void upload( SFSession connection, String command, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl) throws SnowflakeSQLException { upload( connection, command, parallelism, uploadFromStream, remoteStorageLocation, srcFile, destFileName, inputStream, fileBackedOutputStream, meta, stageRegion, presignedUrl, null); } /** * Upload a file (-stream) to remote storage * * @param connection connection object * @param command upload command * @param parallelism number of threads do parallel uploading * @param uploadFromStream true if upload source is stream * @param remoteStorageLocation s3 bucket name * @param srcFile source file if not uploading from a stream * @param destFileName file name on remote storage after upload * @param inputStream stream used for uploading if fileBackedOutputStream is null * @param fileBackedOutputStream stream used for uploading if not null * @param meta object meta data * @param stageRegion region name where the stage persists * @param presignedUrl presigned URL for upload. Used by GCP. * @param queryId last query id * @throws SnowflakeSQLException if upload failed even after retry */ void upload( SFSession connection, String command, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException; /** * Upload a file (-stream) to remote storage with Pre-signed URL without JDBC connection. * *

NOTE: This function is only supported when pre-signed URL is used. * * @deprecated use {@link #uploadWithPresignedUrlWithoutConnection(int, HttpClientSettingsKey, * int, boolean, String, File, String, InputStream, FileBackedOutputStream, * StorageObjectMetadata, String, String, String)} This method was left to keep backward * compatibility * @param networkTimeoutInMilli Network timeout for the upload * @param ocspModeAndProxyKey OCSP mode and proxy settings for the upload. * @param parallelism number of threads do parallel uploading * @param uploadFromStream true if upload source is stream * @param remoteStorageLocation s3 bucket name * @param srcFile source file if not uploading from a stream * @param destFileName file name on remote storage after upload * @param inputStream stream used for uploading if fileBackedOutputStream is null * @param fileBackedOutputStream stream used for uploading if not null * @param meta object meta data * @param stageRegion region name where the stage persists * @param presignedUrl presigned URL for upload. Used by GCP. * @throws SnowflakeSQLException if upload failed even after retry */ @Deprecated default void uploadWithPresignedUrlWithoutConnection( int networkTimeoutInMilli, HttpClientSettingsKey ocspModeAndProxyKey, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl) throws SnowflakeSQLException { uploadWithPresignedUrlWithoutConnection( networkTimeoutInMilli, ocspModeAndProxyKey, parallelism, uploadFromStream, remoteStorageLocation, srcFile, destFileName, inputStream, fileBackedOutputStream, meta, stageRegion, presignedUrl, null); } /** * Upload a file (-stream) to remote storage with Pre-signed URL without JDBC connection. * *

NOTE: This function is only supported when pre-signed URL is used. * * @param networkTimeoutInMilli Network timeout for the upload * @param ocspModeAndProxyKey OCSP mode and proxy settings for the upload. * @param parallelism number of threads do parallel uploading * @param uploadFromStream true if upload source is stream * @param remoteStorageLocation s3 bucket name * @param srcFile source file if not uploading from a stream * @param destFileName file name on remote storage after upload * @param inputStream stream used for uploading if fileBackedOutputStream is null * @param fileBackedOutputStream stream used for uploading if not null * @param meta object meta data * @param stageRegion region name where the stage persists * @param presignedUrl presigned URL for upload. Used by GCP. * @param queryId last query id * @throws SnowflakeSQLException if upload failed even after retry */ default void uploadWithPresignedUrlWithoutConnection( int networkTimeoutInMilli, HttpClientSettingsKey ocspModeAndProxyKey, int parallelism, boolean uploadFromStream, String remoteStorageLocation, File srcFile, String destFileName, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, StorageObjectMetadata meta, String stageRegion, String presignedUrl, String queryId) throws SnowflakeSQLException { if (!requirePresignedUrl()) { throw new SnowflakeSQLLoggedException( queryId, null, ErrorCode.INTERNAL_ERROR.getMessageCode(), SqlState.INTERNAL_ERROR, /*session = */ "uploadWithPresignedUrlWithoutConnection" + " only works for pre-signed URL."); } } /** * Handles exceptions thrown by the remote storage provider * * @deprecated use {@link #handleStorageException(Exception, int, String, SFSession, String, * String)} * @param ex the exception to handle * @param retryCount current number of retries, incremented by the caller before each call * @param operation string that indicates the function/operation that was taking place, when the * exception was raised, for example StorageHelper.UPLOAD * @param connection the current SFSession object used by the client * @param command the command attempted at the time of the exception * @throws SnowflakeSQLException exceptions that were not handled, or retried past what the retry * policy allows, are propagated */ @Deprecated default void handleStorageException( Exception ex, int retryCount, String operation, SFSession connection, String command) throws SnowflakeSQLException { handleStorageException(ex, retryCount, operation, connection, command, null); } /** * Handles exceptions thrown by the remote storage provider * * @param ex the exception to handle * @param retryCount current number of retries, incremented by the caller before each call * @param operation string that indicates the function/operation that was taking place, when the * exception was raised, for example StorageHelper.UPLOAD * @param connection the current SFSession object used by the client * @param command the command attempted at the time of the exception * @param queryId last query id * @throws SnowflakeSQLException exceptions that were not handled, or retried past what the retry * policy allows, are propagated */ void handleStorageException( Exception ex, int retryCount, String operation, SFSession connection, String command, String queryId) throws SnowflakeSQLException; /** * Returns the material descriptor key * * @return the material descriptor key */ String getMatdescKey(); /** * Adds encryption metadata to the StorageObjectMetadata object for AES-ECB/AES-CBC * * @param meta the storage metadata object to add the encryption info to * @param matDesc the material descriptor * @param ivData the initialization vector * @param encryptedKey the encrypted content encryption key * @param contentLength the length of the encrypted content */ void addEncryptionMetadata( StorageObjectMetadata meta, MatDesc matDesc, byte[] ivData, byte[] encryptedKey, long contentLength); /** * Adds encryption metadata to the StorageObjectMetadata object for AES-GCM/AES-GCM * * @param meta the storage metadata object to add the encryption info to * @param matDesc the material descriptor * @param encryptedKey encrypted key * @param dataIvBytes the initialization vector for data * @param keyIvBytes the initialization vector for file key * @param keyAad the additional authenticated data for file key * @param dataAad the additional authenticated data for data * @param contentLength the length of the encrypted content */ default void addEncryptionMetadataForGcm( StorageObjectMetadata meta, MatDesc matDesc, byte[] encryptedKey, byte[] dataIvBytes, byte[] keyIvBytes, byte[] keyAad, byte[] dataAad, long contentLength) { // TODO GCM SNOW-1431870 } /** * Adds digest metadata to the StorageObjectMetadata object * * @param meta the storage metadata object to add the digest to * @param digest the digest metadata to add */ void addDigestMetadata(StorageObjectMetadata meta, String digest); /** * Gets digest metadata to the StorageObjectMetadata object * * @param meta the metadata object to extract the digest metadata from * @return the digest metadata value */ String getDigestMetadata(StorageObjectMetadata meta); /** * Adds streaming ingest metadata to the StorageObjectMetadata object, used for streaming ingest * per client billing calculation * * @param meta the storage metadata object to add the digest to * @param clientName streaming ingest client name * @param clientKey streaming ingest client key, provided by Snowflake */ void addStreamingIngestMetadata(StorageObjectMetadata meta, String clientName, String clientKey); /** * Gets streaming ingest client name to the StorageObjectMetadata object * * @param meta StorageObjectMetadata * @return Client name */ String getStreamingIngestClientName(StorageObjectMetadata meta); /** * Gets streaming ingest client key to the StorageObjectMetadata object * * @param meta StorageObjectMetadata * @return Client key */ String getStreamingIngestClientKey(StorageObjectMetadata meta); } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/StageInfo.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.io.Serializable; import java.util.Map; import java.util.Optional; import java.util.Properties; /** Encapsulates all the required stage properties used by GET/PUT for Azure, GCS and S3 stages */ public class StageInfo implements Serializable { // me-central2 GCS region always use regional urls // TODO SNOW-1818804: the value is hardcoded now, but it should be server driven private static final String GCS_REGION_ME_CENTRAL_2 = "me-central2"; public enum StageType { S3, AZURE, LOCAL_FS, GCS } private static final long serialVersionUID = 1L; private StageType stageType; // The stage type private String location; // The container or bucket private Map credentials; // the credentials required for the stage private String region; // S3/GCS region // An endpoint (Azure, AWS FIPS and GCS custom endpoint override) private String endPoint; private String storageAccount; // The Azure Storage account (Azure only) private String presignedUrl; // GCS gives us back a presigned URL instead of a cred private boolean isClientSideEncrypted; // whether to encrypt/decrypt files on the stage // whether to use s3 regional URL (AWS Only) // TODO SNOW-1818804: this field will be deprecated when the server returns {@link // #useRegionalUrl} private boolean useS3RegionalUrl; // whether to use regional URL (AWS and GCS only) private boolean useRegionalUrl; // whether to use virtual style URLs (GCP in SPCS only) private boolean useVirtualUrl; private Properties proxyProperties; /* * Creates a StageInfo object * Validates that the necessary Stage info arguments are specified * * @param locationType the type of stage, i.e. AZURE/S3 * @param location The container/bucket * @param credentials Map of cloud provider credentials * @param region The geographic region where the stage is located (S3 only) * @param endPoint The Azure Storage end point (Azure only) * @param storageAccount The Azure Storage account (azure only) * @param isClientSideEncrypted Whether the stage should use client-side encryption * @throws IllegalArgumentException one or more parameters required were missing */ public static StageInfo createStageInfo( String locationType, String location, Map credentials, String region, String endPoint, String storageAccount, boolean isClientSideEncrypted) throws IllegalArgumentException { StageType stageType; // Ensure that all the required parameters are specified switch (locationType) { case "AZURE": stageType = StageType.AZURE; if (!isSpecified(location) || !isSpecified(endPoint) || !isSpecified(storageAccount) || credentials == null) { throw new IllegalArgumentException("Incomplete parameters specified for Azure stage"); } break; case "S3": stageType = StageType.S3; if (!isSpecified(location) || !isSpecified(region) || credentials == null) { throw new IllegalArgumentException("Incomplete parameters specified for S3 stage"); } break; case "GCS": stageType = StageType.GCS; if (!isSpecified(location) || credentials == null) { throw new IllegalArgumentException("Incomplete parameters specified for GCS stage"); } break; case "LOCAL_FS": stageType = StageType.LOCAL_FS; if (!isSpecified(location)) { throw new IllegalArgumentException("Incomplete parameters specific for local stage"); } break; default: throw new IllegalArgumentException("Invalid stage type: " + locationType); } return new StageInfo( stageType, location, credentials, region, endPoint, storageAccount, isClientSideEncrypted); } /* * StageInfo constructor, accessible only via the createStageInfo method * Assumes valid parameters are specified *clear * * @param stageType the type of stage, i.e. AZURE/S3 * @param location The container/bucket * @param credentials Map of cloud provider credentials * @param region The geographic region where the stage is located (S3 only) * @param endPoint The Azure Storage end point (Azure only) * @param storageAccount The Azure Storage account (azure only) * @param isClientSideEncrypted Whether the stage uses client-side encryption */ private StageInfo( StageType stageType, String location, Map credentials, String region, String endPoint, String storageAccount, boolean isClientSideEncrypted) { this.stageType = stageType; this.location = location; this.credentials = credentials; this.region = region; this.endPoint = endPoint; this.storageAccount = storageAccount; this.isClientSideEncrypted = isClientSideEncrypted; } public StageType getStageType() { return stageType; } public String getLocation() { return location; } public Map getCredentials() { return credentials; } public void setCredentials(Map credentials) { this.credentials = credentials; } public String getRegion() { return region; } public String getEndPoint() { return endPoint; } public String getStorageAccount() { return storageAccount; } public String getPresignedUrl() { return presignedUrl; } public void setPresignedUrl(String presignedUrl) { this.presignedUrl = presignedUrl; } public boolean getIsClientSideEncrypted() { return isClientSideEncrypted; } public void setUseS3RegionalUrl(boolean useS3RegionalUrl) { this.useS3RegionalUrl = useS3RegionalUrl; } public boolean getUseS3RegionalUrl() { return useS3RegionalUrl; } public void setUseRegionalUrl(boolean useRegionalUrl) { this.useRegionalUrl = useRegionalUrl; } public boolean getUseRegionalUrl() { return useRegionalUrl; } public void setUseVirtualUrl(boolean useVirtualUrl) { this.useVirtualUrl = useVirtualUrl; } public boolean getUseVirtualUrl() { return useVirtualUrl; } private static boolean isSpecified(String arg) { return !(arg == null || arg.equalsIgnoreCase("")); } public void setProxyProperties(Properties proxyProperties) { this.proxyProperties = proxyProperties; } public Properties getProxyProperties() { return proxyProperties; } public Optional gcsCustomEndpoint() { if (stageType != StageType.GCS) { return Optional.empty(); } if (endPoint != null && !endPoint.trim().isEmpty() && !"null".equals(endPoint)) { return Optional.of(endPoint); } if (GCS_REGION_ME_CENTRAL_2.equalsIgnoreCase(region) || useRegionalUrl) { return Optional.of(String.format("storage.%s.rep.googleapis.com", region.toLowerCase())); } return Optional.empty(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/StorageClientFactory.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.util.Map; import java.util.Properties; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.RemoteStoreFileEncryptionMaterial; /** * Factory object for abstracting the creation of storage client objects: SnowflakeStorageClient and * StorageObjectMetadata */ public class StorageClientFactory { private static final SFLogger logger = SFLoggerFactory.getLogger(StorageClientFactory.class); private static StorageClientFactory factory; private StorageClientFactory() {} /** * Creates or returns the single instance of the factory object * * @return the storage client instance */ public static StorageClientFactory getFactory() { if (factory == null) { factory = new StorageClientFactory(); } return factory; } /** * Creates a storage client based on the value of stageLocationType * * @param stage the stage properties * @param parallel the degree of parallelism to be used by the client * @param encMat encryption material for the client * @param session SFSession * @return a SnowflakeStorageClient interface to the instance created * @throws SnowflakeSQLException if any error occurs */ public SnowflakeStorageClient createClient( StageInfo stage, int parallel, RemoteStoreFileEncryptionMaterial encMat, SFSession session) throws SnowflakeSQLException { logger.debug("Creating storage client. Client type: {}", stage.getStageType().name()); switch (stage.getStageType()) { case S3: boolean useS3RegionalUrl = stage.getUseS3RegionalUrl() || stage.getUseRegionalUrl() || session != null && session.getUseRegionalS3EndpointsForPresignedURL(); return createS3Client( stage.getCredentials(), parallel, encMat, stage.getProxyProperties(), stage.getRegion(), stage.getEndPoint(), stage.getIsClientSideEncrypted(), session, useS3RegionalUrl); case AZURE: return createAzureClient(stage, encMat, session); case GCS: return createGCSClient(stage, encMat, session); default: // We don't create a storage client for FS_LOCAL, // so we should only find ourselves here if an unsupported // remote storage client type is specified throw new IllegalArgumentException( "Unsupported storage client specified: " + stage.getStageType().name()); } } /** * Creates a SnowflakeS3ClientObject which encapsulates the Amazon S3 client * * @param stageCredentials Map of stage credential properties * @param parallel degree of parallelism * @param encMat encryption material for the client * @param stageRegion the region where the stage is located * @param stageEndPoint the FIPS endpoint for the stage, if needed * @param isClientSideEncrypted whether client-side encryption should be used * @param session the active session * @param useS3RegionalUrl * @return the SnowflakeS3Client instance created * @throws SnowflakeSQLException failure to create the S3 client */ private SnowflakeS3Client createS3Client( Map stageCredentials, int parallel, RemoteStoreFileEncryptionMaterial encMat, Properties proxyProperties, String stageRegion, String stageEndPoint, boolean isClientSideEncrypted, SFBaseSession session, boolean useS3RegionalUrl) throws SnowflakeSQLException { final int S3_TRANSFER_MAX_RETRIES = 3; logger.debug("Creating S3 client with encryption: {}", (encMat == null ? "no" : "yes")); SnowflakeS3Client s3Client; SnowflakeS3Client.ClientConfiguration clientConfig = new SnowflakeS3Client.ClientConfiguration( parallel + 1, S3_TRANSFER_MAX_RETRIES, (int) HttpUtil.getConnectionTimeout().toMillis(), (int) HttpUtil.getSocketTimeout().toMillis()); logger.debug( "S3 client configuration: maxConnection: {}, connectionTimeout: {}, " + "socketTimeout: {}, maxErrorRetry: {}", clientConfig.getMaxConnections(), clientConfig.getConnectionTimeout(), clientConfig.getSocketTimeout(), clientConfig.getMaxErrorRetry()); try { s3Client = new SnowflakeS3Client( stageCredentials, clientConfig, encMat, proxyProperties, stageRegion, stageEndPoint, isClientSideEncrypted, session, useS3RegionalUrl); } catch (Exception ex) { logger.debug("Exception creating s3 client", ex); throw ex; } logger.debug("S3 Storage client created", false); return s3Client; } /** * Creates a storage provider specific metadata object, accessible via the platform independent * interface * * @param stageType determines the implementation to be created * @return the implementation of StorageObjectMetadata */ public StorageObjectMetadata createStorageMetadataObj(StageInfo.StageType stageType) { switch (stageType) { case S3: return new S3ObjectMetadata(); case AZURE: case GCS: // GCS's metadata object looks just like Azure's (Map), // so for now we'll use the same class. return new CommonObjectMetadata(); default: // An unsupported remote storage client type was specified // We don't create/implement a storage client for FS_LOCAL, // so we should never end up here while running on local file system throw new IllegalArgumentException("Unsupported stage type specified: " + stageType.name()); } } /** * Creates a SnowflakeAzureClientObject which encapsulates the Azure Storage client * * @param stage Stage information * @param encMat encryption material for the client * @param session * @return the SnowflakeS3Client instance created */ private SnowflakeAzureClient createAzureClient( StageInfo stage, RemoteStoreFileEncryptionMaterial encMat, SFBaseSession session) throws SnowflakeSQLException { logger.debug("Creating Azure client with encryption: {}", (encMat == null ? "no" : "yes")); SnowflakeAzureClient azureClient; try { azureClient = SnowflakeAzureClient.createSnowflakeAzureClient(stage, encMat, session); } catch (Exception ex) { logger.debug("Exception creating Azure Storage client", ex); throw ex; } logger.debug("Azure Storage client created", false); return azureClient; } /** * Creates a SnowflakeGCSClient object which encapsulates the GCS Storage client * * @param stage Stage information * @param encMat encryption material for the client * @return the SnowflakeGCSClient instance created */ private SnowflakeGCSClient createGCSClient( StageInfo stage, RemoteStoreFileEncryptionMaterial encMat, SFSession session) throws SnowflakeSQLException { logger.debug("Creating GCS client with encryption: {}", (encMat == null ? "no" : "yes")); SnowflakeGCSClient gcsClient; try { gcsClient = SnowflakeGCSClient.createSnowflakeGCSClient(stage, encMat, session); } catch (Exception ex) { logger.debug("Exception creating GCS Storage client", ex); throw ex; } logger.debug("GCS Storage client created", false); return gcsClient; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/StorageHelper.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.io.File; import java.io.InputStream; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.internal.jdbc.FileBackedOutputStream; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class StorageHelper { private static final SFLogger logger = SFLoggerFactory.getLogger(StorageHelper.class); protected static final String DOWNLOAD = "download"; protected static final String UPLOAD = "upload"; static String getStartUploadLog( String serviceName, boolean uploadFromStream, InputStream inputStream, FileBackedOutputStream fileBackedOutputStream, File srcFile, String destFileName) { if (uploadFromStream && fileBackedOutputStream != null) { File file = fileBackedOutputStream.getFile(); String fileBackedOutputStreamType = file == null ? "byte stream" : ("file: " + file.getAbsolutePath()); return "Starting upload from stream (" + fileBackedOutputStreamType + ") to " + serviceName + " location: " + destFileName; } else if (uploadFromStream && inputStream != null) { return "Starting upload from input stream to " + serviceName + " location: " + destFileName; } else { return "Starting upload from file " + srcFile.getAbsolutePath() + " to " + serviceName + " location: " + destFileName; } } static ErrorCode getOperationException(String operation) { switch (operation) { case UPLOAD: return ErrorCode.UPLOAD_ERROR; case DOWNLOAD: return ErrorCode.DOWNLOAD_ERROR; default: logger.warn( "Unknown operation: {}. Returning fallback error code: ErrorCode.FILE_TRANSFER_ERROR", operation); return ErrorCode.FILE_TRANSFER_ERROR; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/StorageObjectMetadata.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import java.util.Map; /** * Interface for platform-independent remote storage object metadata, modeled after the S3 * ObjectMetadata class * *

Only the metadata accessors and mutators used by the Client currently are supported, * additional methods should be added as needed */ public interface StorageObjectMetadata { /** * @return returns a Map/key-value pairs of metadata properties */ Map getUserMetadata(); /** * @return returns the size of object in bytes */ long getContentLength(); /** * Sets size of the associated object in bytes * * @param contentLength the length of content */ void setContentLength(long contentLength); /** * Adds the key value pair of custom user-metadata for the associated object. * * @param key the key of user metadata * @param value the value of user metadata */ void addUserMetadata(String key, String value); /** * Sets the optional Content-Encoding HTTP header specifying what content encodings, have been * applied to the object and what decoding mechanisms must be applied, in order to obtain the * media-type referenced by the Content-Type field. * * @param encoding the encoding name using in HTTP header Content-Encoding */ void setContentEncoding(String encoding); /* * @return returns the content encoding type */ String getContentEncoding(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/StorageObjectSummary.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import com.azure.storage.blob.models.BlobItem; import com.azure.storage.blob.models.BlobItemProperties; import com.google.cloud.storage.Blob; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import software.amazon.awssdk.services.s3.model.S3Object; /** Storage platform agnostic class that encapsulates remote storage object properties */ public class StorageObjectSummary { private static final SFLogger logger = SFLoggerFactory.getLogger(StorageObjectSummary.class); private String location; // location translates to "bucket" for S3 private String key; private String md5; private long size; /** * Constructs a StorageObjectSummary object from the S3 equivalent S3ObjectSummary * * @param location Location of the S3 object * @param key Key of the S3Object * @param md5 The MD5 hash of the object * @param size The size of the S3 object */ private StorageObjectSummary(String location, String key, String md5, long size) { this.location = location; this.key = key; this.md5 = md5; this.size = size; } /** * Constructs a StorageObjectSummary object from the S3 equivalent S3ObjectSummary * * @param objSummary the AWS S3 ObjectSummary object to copy from * @param bucket the AWS S3 bucket name * @return the ObjectSummary object created */ public static StorageObjectSummary createFromS3ObjectSummary(S3Object objSummary, String bucket) { return new StorageObjectSummary( bucket, objSummary.key(), // S3 ETag is not always MD5, but since this code path is only // used in skip duplicate files in PUT command, It's not // critical to guarantee that it's MD5 objSummary.eTag(), objSummary.size()); } /** * Creates a platform-agnostic ObjectSummary from an Azure BlobItem * * @param blobItem an Azure BlobItem object * @return the ObjectSummary object created */ public static StorageObjectSummary createFromAzureBlobItem(BlobItem blobItem, String location) throws StorageProviderException { try { long size; String key = blobItem.getName(); BlobItemProperties blobProperties = blobItem.getProperties(); byte[] contentMd5 = blobProperties.getContentMd5(); String md5 = contentMd5 != null ? SnowflakeUtil.byteToHexString(contentMd5) : null; size = blobProperties.getContentLength(); return new StorageObjectSummary(location, key, md5, size); } catch (Exception ex) { logger.debug("Failed to create StorageObjectSummary from Azure BlobItem: {}", ex); throw new StorageProviderException(ex); } } /** * createFromGcsBlob creates a StorageObjectSummary from a GCS blob object * * @param blob GCS blob object * @return a new StorageObjectSummary */ public static StorageObjectSummary createFromGcsBlob(Blob blob) { String bucketName = blob.getBucket(); String path = blob.getName(); String hexMD5 = blob.getMd5ToHexString(); long size = blob.getSize(); return new StorageObjectSummary(bucketName, path, hexMD5, size); } /** * @return returns the location of the object */ public String getLocation() { return location; } /** * @return returns the key property of the object */ public String getKey() { return key; } /** * @return returns the MD5 hash of the object */ public String getMD5() { return md5; } /** * @return returns the size property of the object */ public long getSize() { return size; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/StorageObjectSummaryCollection.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import com.azure.storage.blob.models.BlobItem; import com.google.api.gax.paging.Page; import com.google.cloud.storage.Blob; import java.util.Iterator; import java.util.List; import software.amazon.awssdk.services.s3.model.S3Object; /** * Provides and iterator over storage object summaries from all supported cloud storage providers */ public class StorageObjectSummaryCollection implements Iterable { private enum storageType { S3, AZURE, GCS }; private final storageType sType; private List s3ObjSummariesList = null; private Iterable azCLoudBlobIterable = null; private Page gcsIterablePage = null; // explicitly store bucket name for S3 because S3Object does not contain bucket info private String bucketName = null; // Constructs platform-agnostic collection of object summaries from S3 objects public StorageObjectSummaryCollection(List s3ObjectSummaries, String bucketName) { this.s3ObjSummariesList = s3ObjectSummaries; sType = storageType.S3; this.bucketName = bucketName; } // Constructs platform-agnostic collection of object summaries from an Azure CloudBlobDirectory // object public StorageObjectSummaryCollection(Iterable azCLoudBlobIterable, String bucketName) { this.azCLoudBlobIterable = azCLoudBlobIterable; sType = storageType.AZURE; this.bucketName = bucketName; } public StorageObjectSummaryCollection(Page gcsIterablePage) { this.gcsIterablePage = gcsIterablePage; sType = storageType.GCS; } @Override public Iterator iterator() { switch (sType) { case S3: return new S3ObjectSummariesIterator(s3ObjSummariesList, this.bucketName); case AZURE: return new AzureObjectSummariesIterator(azCLoudBlobIterable, bucketName); case GCS: return new GcsObjectSummariesIterator(this.gcsIterablePage); default: throw new IllegalArgumentException("Unspecified storage provider"); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/cloud/storage/StorageProviderException.java ================================================ package net.snowflake.client.internal.jdbc.cloud.storage; import org.apache.http.HttpStatus; import software.amazon.awssdk.core.exception.SdkServiceException; /** * Custom exception class to signal a remote provider exception in a platform-independent manner. */ public class StorageProviderException extends RuntimeException { private static final long serialVersionUID = 1L; /** * Constructor that accepts an arbitrary Exception. * * @param ex An Exception to be treated as transient. */ public StorageProviderException(Exception ex) { super(ex); } /** * Method to obtain the original provider exception that led to this exception being thrown. * * @return The original provider exception that led to this exception. */ public Exception getOriginalProviderException() { return (Exception) (super.getCause()); } /** * Returns true if this is an exception corresponding to a HTTP 404 error returned by the storage * provider. * * @return true if the specified exception is an SdkServiceException instance and if it was thrown * because of a 404, false otherwise. */ public boolean isServiceException404() { Throwable cause = getCause(); if (cause instanceof SdkServiceException) { SdkServiceException asEx = (SdkServiceException) cause; return (asEx.statusCode() == HttpStatus.SC_NOT_FOUND); } return false; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/diagnostic/CertificateDiagnosticCheck.java ================================================ package net.snowflake.client.internal.jdbc.diagnostic; import java.io.IOException; import java.net.MalformedURLException; import java.net.Proxy; import java.net.URL; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocketFactory; import javax.net.ssl.TrustManager; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class CertificateDiagnosticCheck extends DiagnosticCheck { private static final String SECURE_SOCKET_PROTOCOL = "TLS"; private static final SFLogger logger = SFLoggerFactory.getLogger(CertificateDiagnosticCheck.class); public CertificateDiagnosticCheck(ProxyConfig proxyConfig) { super("SSL/TLS Certificate Test", proxyConfig); } @Override protected void doCheck(SnowflakeEndpoint snowflakeEndpoint) { String hostname = snowflakeEndpoint.getHost(); String port = Integer.toString(snowflakeEndpoint.getPort()); if (snowflakeEndpoint.isSslEnabled()) { String urlString = "https://" + hostname + ":" + port; try { SSLContext sslContext = SSLContext.getInstance(SECURE_SOCKET_PROTOCOL); sslContext.init(null, new TrustManager[] {new DiagnosticTrustManager()}, null); HttpsURLConnection.setDefaultSSLSocketFactory(sslContext.getSocketFactory()); Proxy proxy = this.proxyConf.getProxy(snowflakeEndpoint); new URL(urlString).openConnection(proxy).connect(); } catch (NoSuchAlgorithmException e) { logger.error( "None of the security provider's implementation of SSLContextSpi supports " + SECURE_SOCKET_PROTOCOL, e); } catch (KeyManagementException e) { logger.error("Failed to initialize SSLContext", e); } catch (MalformedURLException e) { logger.error("Failed to create new URL object: " + urlString, e); } catch (IOException e) { logger.error("Failed to open a connection to: " + urlString, e); } catch (Exception e) { logger.error( "Unexpected error occurred when trying to retrieve certificate from: " + hostname, e); } finally { HttpsURLConnection.setDefaultSSLSocketFactory( (SSLSocketFactory) SSLSocketFactory.getDefault()); } } else { logger.info("Host " + hostname + ":" + port + " is not secure. Skipping certificate check."); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/diagnostic/DiagnosticCheck.java ================================================ package net.snowflake.client.internal.jdbc.diagnostic; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; abstract class DiagnosticCheck { protected final String name; protected final ProxyConfig proxyConf; private static final SFLogger logger = SFLoggerFactory.getLogger(DiagnosticCheck.class); abstract void doCheck(SnowflakeEndpoint snowflakeEndpoint); final void run(SnowflakeEndpoint snowflakeEndpoint) { logger.info("JDBC Diagnostics - {}: hostname: {}", this.name, snowflakeEndpoint.getHost()); doCheck(snowflakeEndpoint); } protected DiagnosticCheck(String name, ProxyConfig proxyConf) { this.name = name; this.proxyConf = proxyConf; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/diagnostic/DiagnosticContext.java ================================================ package net.snowflake.client.internal.jdbc.diagnostic; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.File; import java.io.IOException; import java.net.Proxy; import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.Map; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; public class DiagnosticContext { private static final SFLogger logger = SFLoggerFactory.getLogger(DiagnosticContext.class); private static final String JAVAX_NET_DEBUG = "javax.net.debug"; private static final String JAVAX_TRUSTSTORE = "javax.net.ssl.trustStore"; private ProxyConfig proxyConf; private List endpoints = new ArrayList<>(); private final DiagnosticCheck[] tests; public DiagnosticContext( String allowListFile, Map connectionPropertiesMap) { createProxyConfiguration(connectionPropertiesMap); try { JsonNode jsonNode = readAllowListJsonFile(allowListFile); for (JsonNode objectNode : jsonNode) { String type = objectNode.get("type").asText(); String host = objectNode.get("host").asText(); int port = objectNode.get("port").asInt(); SnowflakeEndpoint e = new SnowflakeEndpoint(type, host, port); endpoints.add(e); } } catch (IOException e) { logger.error("Failed to read allowlist file: ", e); } catch (Exception e) { logger.error("Failed to parse data in allowlist file: " + allowListFile, e); } tests = new DiagnosticCheck[] { new DnsDiagnosticCheck(proxyConf), new TcpDiagnosticCheck(proxyConf), new CertificateDiagnosticCheck(proxyConf), new HttpAndHttpsDiagnosticCheck(proxyConf) }; } /** This constructor is only used for testing */ DiagnosticContext(Map connectionPropertiesMap) { createProxyConfiguration(connectionPropertiesMap); tests = null; } private void createProxyConfiguration(Map connectionPropertiesMap) { String proxyHost = (String) connectionPropertiesMap.get(SFSessionProperty.PROXY_HOST); int proxyPort = (connectionPropertiesMap.get(SFSessionProperty.PROXY_PORT) == null) ? -1 : Integer.parseInt((String) connectionPropertiesMap.get(SFSessionProperty.PROXY_PORT)); String nonProxyHosts = (String) connectionPropertiesMap.get(SFSessionProperty.NON_PROXY_HOSTS); proxyConf = new ProxyConfig(proxyHost, proxyPort, nonProxyHosts); } public void runDiagnostics() { logEnvironmentInfo(); // Loop through endpoints and run diagnostic test on each one of them for (DiagnosticCheck test : tests) { for (SnowflakeEndpoint endpoint : endpoints) { test.run(endpoint); } } } private JsonNode readAllowListJsonFile(String jsonFilePath) throws IOException { ObjectMapper objectMapper = new ObjectMapper(); File allowListFile = new File(jsonFilePath); return objectMapper.readTree(allowListFile); } public void logEnvironmentInfo() { logger.info("Getting environment information"); logger.info("Current truststore used: " + getTrustStoreLocation()); logger.info("-Dnetworkaddress.cache.ttl: " + systemGetProperty("networkaddress.cache.ttl")); logger.info( "-Dnetworkaddress.cache.negative.ttl: " + systemGetProperty("networkaddress.cache.negative.ttl")); logger.info("-Djavax.net.debug: " + systemGetProperty(JAVAX_NET_DEBUG)); } private boolean isNullOrEmpty(String a) { return a == null || a.isEmpty(); } /** * We determine the truststore in use based on the JSSE documentation: * *

1.) If the javax.net.ssl.trustStore property is defined, then the TrustManagerFactory * attempts to find a file using the file name specified by that system property, and uses that * file for the KeyStore parameter. If the javax.net.ssl.trustStorePassword system property is * also defined, then its value is used to check the integrity of the data in the truststore * before opening it. * *

If the javax.net.ssl.trustStore property is defined but the specified file does not exist, * then a default TrustManager using an empty keystore is created. * *

2.) If the javax.net.ssl.trustStore system property was not specified, then: - if the file * java-home/lib/security/jssecacerts exists, that file is used; - if the file * java-home/lib/security/cacerts exists, that file is used; - if neither of these files exists, * then the SSL cipher suite is anonymous, does not perform any authentication, and thus does not * need a truststore. */ private String getTrustStoreLocation() { String trustStore = systemGetProperty(JAVAX_TRUSTSTORE); String javaHome = systemGetProperty("java.home"); if (isNullOrEmpty(javaHome)) { return ""; } Path javaSecurityPath = FileSystems.getDefault().getPath(javaHome, "lib", "security"); logger.info("JAVA_HOME: " + javaHome); if (isNullOrEmpty(trustStore)) { logger.info("-D{} is null", JAVAX_TRUSTSTORE); Path jssecacertsPath = FileSystems.getDefault().getPath(javaSecurityPath.toString(), "jssecacerts"); Path cacertsPath = FileSystems.getDefault().getPath(javaSecurityPath.toString(), "cacerts"); logger.info("Checking if jssecacerts or cacerts exist"); if (Files.exists(jssecacertsPath)) { logger.info(jssecacertsPath.toString() + " exists"); trustStore = jssecacertsPath.toString(); } else if (Files.exists(cacertsPath)) { logger.info(cacertsPath.toString() + " exists"); trustStore = cacertsPath.toString(); } } else { logger.info("-D{} is set by user: {}", JAVAX_TRUSTSTORE, trustStore); } return trustStore; } String getHttpProxyHost() { return proxyConf.getHttpProxyHost(); } int getHttpProxyPort() { return proxyConf.getHttpProxyPort(); } String getHttpsProxyHost() { return proxyConf.getHttpsProxyHost(); } int getHttpsProxyPort() { return proxyConf.getHttpsProxyPort(); } String getHttpNonProxyHosts() { return proxyConf.getNonProxyHosts(); } List getEndpoints() { return endpoints; } Proxy getProxy(SnowflakeEndpoint snowflakeEndpoint) { return this.proxyConf.getProxy(snowflakeEndpoint); } boolean isProxyEnabled() { return proxyConf.isProxyEnabled(); } boolean isProxyEnabledOnJvm() { return proxyConf.isProxyEnabledOnJvm(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/diagnostic/DiagnosticTrustManager.java ================================================ package net.snowflake.client.internal.jdbc.diagnostic; import java.net.Socket; import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; import javax.net.ssl.SSLEngine; import javax.net.ssl.X509ExtendedTrustManager; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class DiagnosticTrustManager extends X509ExtendedTrustManager { private static final SFLogger logger = SFLoggerFactory.getLogger(DiagnosticTrustManager.class); @Override public void checkServerTrusted(X509Certificate[] certs, String authType) { printCertificates(certs); } @Override public void checkServerTrusted(X509Certificate[] certs, String authType, SSLEngine engine) { printCertificates(certs); } @Override public void checkServerTrusted(X509Certificate[] certs, String authType, Socket sc) { printCertificates(certs); } @Override public void checkClientTrusted(X509Certificate[] chain, String authType) { // do nothing } @Override public void checkClientTrusted(X509Certificate[] chain, String authType, Socket sc) { // do nothing } @Override public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine engine) { // do nothing } @Override public X509Certificate[] getAcceptedIssuers() { // This implementation is not needed, so we're returning an empty array return new X509Certificate[0]; } private void printCertificates(X509Certificate[] chainCerts) { logger.info("Printing certificate chain"); StringBuilder sb = new StringBuilder(); int i = 0; for (X509Certificate x509Cert : chainCerts) { try { sb.append("\nCertificate[").append(i).append("]:").append("\n"); sb.append("Subject: ").append(x509Cert.getSubjectDN()).append("\n"); sb.append("Issuer: ").append(x509Cert.getIssuerDN()).append("\n"); sb.append("Valid from: ").append(x509Cert.getNotBefore()).append("\n"); sb.append("Not Valid After: ").append(x509Cert.getNotAfter()).append("\n"); sb.append("Subject Alternative Names: ") .append(x509Cert.getSubjectAlternativeNames()) .append("\n"); sb.append("Issuer Alternative Names: ") .append(x509Cert.getIssuerAlternativeNames()) .append("\n"); sb.append("Serial: ").append(x509Cert.getSerialNumber().toString(16)).append("\n"); logger.info(sb.toString()); i++; } catch (CertificateParsingException e) { logger.error("Error parsing certificate", e); } catch (Exception e) { logger.error("Unexpected error occurred when parsing certificate", e); } } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/diagnostic/DnsDiagnosticCheck.java ================================================ package net.snowflake.client.internal.jdbc.diagnostic; import java.net.Inet4Address; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Hashtable; import javax.naming.Context; import javax.naming.NamingEnumeration; import javax.naming.NamingException; import javax.naming.directory.Attribute; import javax.naming.directory.Attributes; import javax.naming.directory.DirContext; import javax.naming.spi.NamingManager; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class DnsDiagnosticCheck extends DiagnosticCheck { private static final SFLogger logger = SFLoggerFactory.getLogger(DnsDiagnosticCheck.class); private final String INITIAL_DNS_CONTEXT = "com.sun.jndi.dns.DnsContextFactory"; DnsDiagnosticCheck(ProxyConfig proxyConfig) { super("DNS Lookup Test", proxyConfig); } @Override protected void doCheck(SnowflakeEndpoint snowflakeEndpoint) { getCnameRecords(snowflakeEndpoint); getArecords(snowflakeEndpoint); } private void getCnameRecords(SnowflakeEndpoint snowflakeEndpoint) { String hostname = snowflakeEndpoint.getHost(); try { Hashtable env = new Hashtable<>(); env.put(Context.INITIAL_CONTEXT_FACTORY, INITIAL_DNS_CONTEXT); DirContext dirCtx = (DirContext) NamingManager.getInitialContext(env); Attributes attrs1 = dirCtx.getAttributes(snowflakeEndpoint.getHost(), new String[] {"CNAME"}); NamingEnumeration attrs = attrs1.getAll(); StringBuilder sb = new StringBuilder(); sb.append("\nCNAME:\n"); while (attrs.hasMore()) { Attribute a = attrs.next(); NamingEnumeration values = a.getAll(); while (values.hasMore()) { sb.append(values.next()); sb.append("\n"); } } logger.info(sb.toString()); } catch (NamingException e) { logger.error("Error occurred when getting CNAME record for host " + hostname, e); } catch (Exception e) { logger.error("Unexpected error occurred when getting CNAME record for host " + hostname, e); } } private void getArecords(SnowflakeEndpoint snowflakeEndpoint) { String hostname = snowflakeEndpoint.getHost(); try { InetAddress[] addresses = InetAddress.getAllByName(hostname); StringBuilder sb = new StringBuilder(); sb.append("\nA Records:\n"); for (InetAddress ip : addresses) { if (ip instanceof Inet4Address) { sb.append(ip.getHostAddress()); sb.append("\n"); } // Check if this is a private link endpoint and if the ip address // returned by the DNS query is a private IP address as expected. if (snowflakeEndpoint.isPrivateLink() && !ip.isSiteLocalAddress()) { logger.error( "Public IP address was returned for {}. Please review your DNS configurations.", hostname); } } logger.info(sb.toString()); } catch (UnknownHostException e) { logger.error("DNS query failed for host: " + snowflakeEndpoint.getHost(), e); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/diagnostic/HttpAndHttpsDiagnosticCheck.java ================================================ package net.snowflake.client.internal.jdbc.diagnostic; import java.io.IOException; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.Proxy; import java.net.URL; import java.util.List; import java.util.Map; import javax.net.ssl.HttpsURLConnection; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class HttpAndHttpsDiagnosticCheck extends DiagnosticCheck { private static final SFLogger logger = SFLoggerFactory.getLogger(HttpAndHttpsDiagnosticCheck.class); private final String HTTP_SCHEMA = "http://"; private final String HTTPS_SCHEMA = "https://"; HttpAndHttpsDiagnosticCheck(ProxyConfig proxyConfig) { super("HTTP/HTTPS Connection Test", proxyConfig); } @Override protected void doCheck(SnowflakeEndpoint snowflakeEndpoint) { // We have to replace underscores with hyphens because the JDK doesn't allow underscores in the // hostname String hostname = snowflakeEndpoint.getHost().replace('_', '-'); try { Proxy proxy = this.proxyConf.getProxy(snowflakeEndpoint); StringBuilder sb = new StringBuilder(); String urlString = (snowflakeEndpoint.isSslEnabled()) ? HTTPS_SCHEMA + hostname : HTTP_SCHEMA + hostname; URL url = new URL(urlString); HttpURLConnection con = (snowflakeEndpoint.isSslEnabled()) ? (HttpsURLConnection) url.openConnection(proxy) : (HttpURLConnection) url.openConnection(proxy); logger.info("Response from server: {} {}", con.getResponseCode(), con.getResponseMessage()); sb.append("Headers:\n"); Map> headerFields = con.getHeaderFields(); for (Map.Entry> header : headerFields.entrySet()) { sb.append(header.getKey()).append(": ").append(header.getValue()).append("\n"); } logger.info(sb.toString()); } catch (MalformedURLException e) { logger.error( "The URL format is incorrect, please check your allowlist JSON file for errors.", e); } catch (IOException e) { logger.error("Could not send an HTTP/HTTPS request to host " + hostname, e); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/diagnostic/ProxyConfig.java ================================================ package net.snowflake.client.internal.jdbc.diagnostic; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.net.InetSocketAddress; import java.net.Proxy; import java.util.Optional; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * This class is used to represent the proxy configurations passed to the JDBC driver either as JVM * arguments or connection parameters. The class determines which proxy settings take precedence and * should be used by the diagnostic tests. We normalize configurations where empty strings for * hostnames and -1 for ports represent the absence of a configuration. * *

The order of precedence is: * *

1.) Connection parameters (proxy configurations passed to the constructor) 2.) JVM arguments * *

The useProxy parameter is ignored. If the proxy is configured using the JVM and someone wants * to bypass that at the connection-level then they would need to set the following connection * parameters: proxyHost=127.0.0.1 proxyPort=8080 nonProxyHosts=* * *

i.e. bypass the proxy host when connecting to any host. */ class ProxyConfig { private String proxyHost; private int proxyPort; private String nonProxyHosts; private String jvmHttpProxyHost; private String jvmHttpsProxyHost; private int jvmHttpProxyPort; private int jvmHttpsProxyPort; private String jvmNonProxyHosts; private String finalHttpProxyHost = ""; private String finalHttpsProxyHost = ""; private int finalHttpProxyPort = -1; private int finalHttpsProxyPort = -1; private String finalNonProxyHosts = ""; private boolean isProxyEnabled = false; private boolean isProxyEnabledOnJvm = false; private final String JVM_HTTP_PROXY_HOST = "http.proxyHost"; private final String JVM_HTTPS_PROXY_HOST = "https.proxyHost"; private final String JVM_HTTP_PROXY_PORT = "http.proxyPort"; private final String JVM_HTTPS_PROXY_PORT = "https.proxyPort"; private final String JVM_HTTP_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final SFLogger logger = SFLoggerFactory.getLogger(ProxyConfig.class); public String getHttpProxyHost() { return finalHttpProxyHost; } public String getHttpsProxyHost() { return finalHttpsProxyHost; } public int getHttpProxyPort() { return finalHttpProxyPort; } public int getHttpsProxyPort() { return finalHttpsProxyPort; } public String getNonProxyHosts() { return finalNonProxyHosts; } public void setProxyHost(String proxyHost) { this.proxyHost = proxyHost; } public void setProxyPort(int proxyPort) { this.proxyPort = proxyPort; } public void setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = nonProxyHosts; } public ProxyConfig(String proxyHost, int proxyPort, String nonProxyHosts) { jvmHttpProxyHost = Optional.ofNullable(systemGetProperty(JVM_HTTP_PROXY_HOST)).orElse(""); jvmHttpsProxyHost = Optional.ofNullable(systemGetProperty(JVM_HTTPS_PROXY_HOST)).orElse(""); jvmHttpProxyPort = Optional.ofNullable(systemGetProperty(JVM_HTTP_PROXY_PORT)) .map(Integer::parseInt) .orElse(-1); jvmHttpsProxyPort = Optional.ofNullable(systemGetProperty(JVM_HTTPS_PROXY_PORT)) .map(Integer::parseInt) .orElse(-1); jvmNonProxyHosts = Optional.ofNullable(systemGetProperty(JVM_HTTP_NON_PROXY_HOSTS)).orElse(""); this.proxyHost = Optional.ofNullable(proxyHost).orElse(""); this.proxyPort = proxyPort; this.nonProxyHosts = Optional.ofNullable(nonProxyHosts).orElse(""); resolveProxyConfigurations(); } public ProxyConfig() { this(null, -1, null); } public boolean isProxyEnabled() { return isProxyEnabled; } public boolean isProxyEnabledOnJvm() { return isProxyEnabledOnJvm; } /** * This method reviews both the JVM and connection parameter configurations then concludes which * settings to use 1.) Check if proxy settings were passed in the connection parameters, if so, * then we use that right away. 2.) If connection parameters were not passed, then review JVM * arguments and use those. 3.) If neither were set, then don't use any proxy settings (default). */ private void resolveProxyConfigurations() { // Both proxyHost and proxyPort connection parameters must be present. StringBuilder sb = new StringBuilder(); logger.info("Resolving proxy configurations"); sb.append("Proxy Configurations picked up from "); if (!proxyHost.isEmpty() && proxyPort != -1) { finalHttpProxyHost = proxyHost; finalHttpsProxyHost = proxyHost; finalHttpProxyPort = proxyPort; finalHttpsProxyPort = proxyPort; finalNonProxyHosts = nonProxyHosts; isProxyEnabled = true; sb.append("connection parameters:\n"); sb.append("proxyHost: ").append(proxyHost).append("\n"); sb.append("proxyPort: ").append(proxyPort).append("\n"); sb.append("nonProxyHosts: ").append(nonProxyHosts); } else if ((!jvmHttpProxyHost.isEmpty() && jvmHttpProxyPort != -1) || (!jvmHttpsProxyHost.isEmpty() && jvmHttpsProxyPort != -1)) { finalHttpProxyHost = jvmHttpProxyHost; finalHttpProxyPort = jvmHttpProxyPort; finalHttpsProxyHost = jvmHttpsProxyHost; finalHttpsProxyPort = jvmHttpsProxyPort; finalNonProxyHosts = jvmNonProxyHosts; isProxyEnabled = true; isProxyEnabledOnJvm = true; sb.append("JVM arguments:\n"); sb.append("-D").append(JVM_HTTP_PROXY_HOST).append("=").append(jvmHttpProxyHost).append("\n"); sb.append("-D").append(JVM_HTTP_PROXY_PORT).append("=").append(jvmHttpProxyPort).append("\n"); sb.append("-D") .append(JVM_HTTPS_PROXY_HOST) .append("=") .append(jvmHttpsProxyHost) .append("\n"); sb.append("-D") .append(JVM_HTTPS_PROXY_PORT) .append("=") .append(jvmHttpsProxyPort) .append("\n"); } logger.info(sb.toString()); } protected boolean isBypassProxy(String hostname) { String nonProxyHosts = getNonProxyHosts(); if (nonProxyHosts == null || nonProxyHosts.isEmpty()) { return false; } String[] nonProxyHostsArray = nonProxyHosts.split("\\|"); for (String pattern : nonProxyHostsArray) { if (SnowflakeUtil.hostnameMatchesGlob(hostname, pattern)) { return true; } } return false; } public Proxy getProxy(SnowflakeEndpoint endpoint) { if (!isProxyEnabled || isBypassProxy(endpoint.getHost())) { return Proxy.NO_PROXY; } else if (endpoint.isSslEnabled()) { return (isHttpsProxyEnabled()) ? new Proxy( Proxy.Type.HTTP, new InetSocketAddress(finalHttpsProxyHost, finalHttpsProxyPort)) : Proxy.NO_PROXY; } return (isHttpProxyEnabled()) ? new Proxy(Proxy.Type.HTTP, new InetSocketAddress(finalHttpProxyHost, finalHttpProxyPort)) : Proxy.NO_PROXY; } /* Check that both http proxy host and http proxy port are set, only then do we consider that http proxy is enabled. */ private boolean isHttpProxyEnabled() { return (!finalHttpProxyHost.isEmpty() || finalHttpProxyPort != -1); } /* Check that both https proxy host and http proxy port are set, only then do we consider that http proxy is enabled. */ private boolean isHttpsProxyEnabled() { return (!finalHttpsProxyHost.isEmpty() || finalHttpsProxyPort != -1); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/diagnostic/SnowflakeEndpoint.java ================================================ package net.snowflake.client.internal.jdbc.diagnostic; import net.snowflake.client.internal.core.PrivateLinkDetector; /* The SnowflakeEndpoint class represents an endpoint as returned by the System$allowlist() SQL function. Example: [{"type":"SNOWFLAKE_DEPLOYMENT","host":"snowhouse.snowflakecomputing.com","port":443},{"type":"SNOWFLAKE_DEPLOYMENT_REGIONLESS","host":"sfcogsops-snowhouse_aws_us_west_2.snowflakecomputing.com","port":443},{"type":"STAGE","host":"sfc-ds2-customer-stage.s3.amazonaws.com","port":443},{"type":"STAGE","host":"sfc-ds2-customer-stage.s3.us-west-2.amazonaws.com","port":443},{"type":"STAGE","host":"sfc-ds2-customer-stage.s3-us-west-2.amazonaws.com","port":443},{"type":"SNOWSQL_REPO","host":"sfc-repo.snowflakecomputing.com","port":443},{"type":"OUT_OF_BAND_TELEMETRY","host":"client-telemetry.snowflakecomputing.com","port":443},{"type":"OCSP_CACHE","host":"ocsp.snowflakecomputing.com","port":80},{"type":"DUO_SECURITY","host":"api-35a58de5.duosecurity.com","port":443},{"type":"CLIENT_FAILOVER","host":"sfcogsops-snowhouseprimary.snowflakecomputing.com","port":443},{"type":"OCSP_RESPONDER","host":"o.ss2.us","port":80},{"type":"OCSP_RESPONDER","host":"ocsp.r2m02.amazontrust.com","port":80},{"type":"OCSP_RESPONDER","host":"ocsp.sca1b.amazontrust.com","port":80},{"type":"OCSP_RESPONDER","host":"ocsp.rootg2.amazontrust.com","port":80},{"type":"OCSP_RESPONDER","host":"ocsp.rootca1.amazontrust.com","port":80},{"type":"SNOWSIGHT_DEPLOYMENT","host":"app.snowflake.com","port":443},{"type":"SNOWSIGHT_DEPLOYMENT","host":"apps-api.c1.us-west-2.aws.app.snowflake.com","port":443}] */ class SnowflakeEndpoint { private final String type; private final String host; private final int port; private final boolean isSecure; public SnowflakeEndpoint(String type, String host, int port) { this.type = type; this.host = host; this.port = port; this.isSecure = (this.port == 443); } public String getType() { return this.type; } public String getHost() { return this.host; } public boolean isSslEnabled() { return this.isSecure; } public int getPort() { return this.port; } public boolean isPrivateLink() { return PrivateLinkDetector.isPrivateLink(host); } @Override public String toString() { return this.host + ":" + this.port; } @Override public boolean equals(Object o) { boolean isSnowflakeEndpoint = o instanceof SnowflakeEndpoint; if (!isSnowflakeEndpoint) { return false; } if (!((SnowflakeEndpoint) o).getHost().equals(this.host)) { return false; } if (((SnowflakeEndpoint) o).getPort() != this.port) { return false; } if (!((SnowflakeEndpoint) o).getType().equals(this.type)) { return false; } return true; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/diagnostic/TcpDiagnosticCheck.java ================================================ package net.snowflake.client.internal.jdbc.diagnostic; import java.io.IOException; import java.net.InetSocketAddress; import java.net.Proxy; import java.net.Socket; import java.net.SocketTimeoutException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; class TcpDiagnosticCheck extends DiagnosticCheck { private static final SFLogger logger = SFLoggerFactory.getLogger(TcpDiagnosticCheck.class); TcpDiagnosticCheck(ProxyConfig proxyConfig) { super("TCP Connection Test", proxyConfig); } protected void doCheck(SnowflakeEndpoint snowflakeEndpoint) { String hostname = snowflakeEndpoint.getHost(); int connectTimeoutMillis = 60000; int port = snowflakeEndpoint.getPort(); Proxy proxy = proxyConf.getProxy(snowflakeEndpoint); try (Socket socket = new Socket(proxy)) { socket.bind(null); logger.info( "Establishing TCP connection: {} -> {}:{}", socket.getLocalSocketAddress(), snowflakeEndpoint.getHost(), snowflakeEndpoint.getPort()); socket.connect(new InetSocketAddress(hostname, port), connectTimeoutMillis); logger.info( "Established a TCP connection successfully: {} -> {}", socket.getLocalSocketAddress(), socket.getRemoteSocketAddress()); } catch (SocketTimeoutException e) { logger.error( "Could not establish TCP connection within timeout of " + connectTimeoutMillis + "ms", e); } catch (IOException e) { logger.error("Error connecting to host " + hostname + ":" + port, e); } catch (Exception e) { logger.error("Unexpected error occurred when connecting to host " + hostname + ":" + port, e); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/CSVMetricsExporter.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.BufferedWriter; import java.io.FileWriter; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.Optional; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; // This class is internal - features implemented in this class should be treated as internal // features // and may be changed in the future. You shouldn't depend on these metrics. public class CSVMetricsExporter { private static final SFLogger logger = SFLoggerFactory.getLogger(CSVMetricsExporter.class); static final String CSV_EXPORTER_FILE_PROPERTY = "metrics.csv.exporter.file"; static final String CSV_EXPORTER_FLUSH_SIZE_PROPERTY = "metrics.csv.exporter.flush.size"; private final List entries = new ArrayList<>(); private final String filePath; private final Integer flushSize; CSVMetricsExporter(String filePath, int flushSize) { this.filePath = filePath; this.flushSize = flushSize; Runtime.getRuntime().addShutdownHook(new Thread(this::flush)); } private static CSVMetricsExporter instance; public static synchronized CSVMetricsExporter getDefaultInstance() { if (instance == null) { String filePath = systemGetProperty(CSV_EXPORTER_FILE_PROPERTY); int limit = Integer.parseInt( Optional.ofNullable(systemGetProperty(CSV_EXPORTER_FLUSH_SIZE_PROPERTY)).orElse("1")); instance = new CSVMetricsExporter(filePath, limit); } return instance; } public synchronized void save(ExecTimeTelemetryData data) { if (isNullOrEmpty(filePath)) { return; } entries.add(data); if (entries.size() >= flushSize) { flush(); } } private synchronized void flush() { if (isNullOrEmpty(filePath)) { return; } if (entries.isEmpty()) { return; } Path path = Paths.get(filePath); try { Files.createDirectories(path.getParent()); boolean fileExists = Files.exists(path); try (BufferedWriter writer = new BufferedWriter(new FileWriter(filePath, true))) { if (!fileExists) { writer.write( "timestamp,sessionId,requestId,queryId,queryText,executeToSendTime,bindTime,gzipTime,httpClientTime,responseIOStreamTime,processResultChunkTime,createResultSetTime,queryTime"); writer.newLine(); } for (ExecTimeTelemetryData data : entries) { writer.write(formatCsvRow(data)); writer.newLine(); } writer.flush(); } } catch (IOException e) { logger.warn("Failed to write metrics to CSV file: {}", filePath, e); } finally { // it's better to drop some metrics than have OOM entries.clear(); } } private String formatCsvRow(ExecTimeTelemetryData data) { return String.join( ",", escapeCsvValue(String.valueOf(data.getTimestamp())), escapeCsvValue(data.getSessionId()), escapeCsvValue(data.getRequestId()), escapeCsvValue(data.getQueryId()), escapeCsvValue(data.getQueryText()), String.valueOf(data.getExecuteToSend().getTime()), String.valueOf(data.getBind().getTime()), String.valueOf(data.getGzip().getTime()), String.valueOf(data.getHttpClient().getTime()), String.valueOf(data.getResponseIOStream().getTime()), String.valueOf(data.getProcessResultChunk().getTime()), String.valueOf(data.getCreateResultSet().getTime()), String.valueOf(data.getQuery().getTime())); } private String escapeCsvValue(String value) { if (value == null) { return ""; } // If value contains comma, quote, or newline, wrap in quotes and escape internal quotes if (value.contains(",") || value.contains("\"") || value.contains("\n") || value.contains("\r")) { return "\"" + value.replace("\"", "\"\"") + "\""; } return value; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/ExecTimeTelemetryData.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import java.time.LocalDateTime; import net.minidev.json.JSONObject; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.util.TimeMeasurement; public class ExecTimeTelemetryData { // Measures time from when the client initiated a query (with executeQuery) until it is sent via // HTTP. private final TimeMeasurement executeToSend = new TimeMeasurement(); // Measures time from when the client initiated a query (with executeQuery) until the control is // returned to the user. private final TimeMeasurement query = new TimeMeasurement(); // Measures time from when binding preparation is started (including pushing to stage, if needed) // until it is ended. private final TimeMeasurement bind = new TimeMeasurement(); // Measures time spent on compressing the request. private final TimeMeasurement gzip = new TimeMeasurement(); // Measures time spent on HTTP roundtrip, except for downloading a response body. private final TimeMeasurement httpClient = new TimeMeasurement(); // Measures time spent on response body download. private final TimeMeasurement responseIOStream = new TimeMeasurement(); // Measures time spent on parsing result chunk. private final TimeMeasurement processResultChunk = new TimeMeasurement(); // Measures time spent on creating a result set from parsed data. private final TimeMeasurement createResultSet = new TimeMeasurement(); private String batchId; private String queryId; private String queryFunction; private int retryCount = 0; private String retryLocations = ""; private Boolean ocspEnabled = false; boolean sendData = true; private String requestId; private String sessionId; private String queryText; private final LocalDateTime timestamp; public ExecTimeTelemetryData(String queryFunction, String batchId) { this.timestamp = LocalDateTime.now(); this.query.setStart(); this.executeToSend.setStart(); this.queryFunction = queryFunction; this.batchId = batchId; if (!TelemetryService.getInstance().isHTAPEnabled()) { this.sendData = false; } } public ExecTimeTelemetryData() { this.timestamp = LocalDateTime.now(); this.sendData = false; } public void setBindStart() { bind.setStart(); } public void setOCSPStatus(Boolean ocspEnabled) { this.ocspEnabled = ocspEnabled; } public void setBindEnd() { this.bind.setEnd(); } public void setHttpClientStart() { httpClient.setStart(); } public void setHttpClientEnd() { httpClient.setEnd(); } public void setGzipStart() { gzip.setStart(); } public void setGzipEnd() { gzip.setEnd(); } public void setQueryEnd() { query.setEnd(); } public void setExecuteToSendQueryEnd() { executeToSend.setEnd(); } public void setQueryId(String queryId) { this.queryId = queryId; } public void setProcessResultChunkStart() { processResultChunk.setStart(); } public void setProcessResultChunkEnd() { processResultChunk.setEnd(); } public void setResponseIOStreamStart() { responseIOStream.setStart(); } public void setResponseIOStreamEnd() { responseIOStream.setEnd(); } public void setCreateResultSetStart() { createResultSet.setStart(); } public void setCreateResultSetEnd() { createResultSet.setEnd(); } public void incrementRetryCount() { this.retryCount++; } public void setRequestId(String requestId) { this.requestId = requestId; } public void addRetryLocation(String location) { if (isNullOrEmpty(this.retryLocations)) { this.retryLocations = location; } else { this.retryLocations = this.retryLocations.concat(", ").concat(location); } } long getTotalQueryTime() { return query.getTime(); } long getResultProcessingTime() { if (createResultSet.getEnd() == 0 || processResultChunk.getStart() == 0) { return -1; } return createResultSet.getEnd() - processResultChunk.getStart(); } long getHttpRequestTime() { return httpClient.getTime(); } long getResultSetCreationTime() { return createResultSet.getTime(); } public void setSessionId(String sessionId) { this.sessionId = sessionId; } public void setQueryText(String sql) { this.queryText = sql; } public String getSessionId() { return sessionId; } public String getQueryText() { return queryText; } public String getRequestId() { return requestId; } public String getQueryId() { return queryId; } public TimeMeasurement getExecuteToSend() { return executeToSend; } public TimeMeasurement getBind() { return bind; } public TimeMeasurement getGzip() { return gzip; } public TimeMeasurement getHttpClient() { return httpClient; } public TimeMeasurement getResponseIOStream() { return responseIOStream; } public TimeMeasurement getProcessResultChunk() { return processResultChunk; } public TimeMeasurement getCreateResultSet() { return createResultSet; } public TimeMeasurement getQuery() { return query; } public LocalDateTime getTimestamp() { return timestamp; } public String generateTelemetry() { CSVMetricsExporter.getDefaultInstance().save(this); if (this.sendData) { String eventType = "ExecutionTimeRecord"; JSONObject value = new JSONObject(); String valueStr; value.put("eventType", eventType); value.put("Timestamp", this.timestamp.toString()); value.put("QueryStart", this.query.getStart()); value.put("ExecuteToSendStart", this.executeToSend.getStart()); value.put("ExecuteToSendEnd", this.executeToSend.getEnd()); value.put("BindStart", this.bind.getStart()); value.put("BindEnd", this.bind.getEnd()); value.put("GzipStart", this.gzip.getStart()); value.put("GzipEnd", this.gzip.getEnd()); value.put("HttpClientStart", this.httpClient.getStart()); value.put("HttpClientEnd", this.httpClient.getEnd()); value.put("ResponseIOStreamStart", this.responseIOStream.getStart()); value.put("ResponseIOStreamEnd", this.responseIOStream.getEnd()); value.put("ProcessResultChunkStart", this.processResultChunk.getStart()); value.put("ProcessResultChunkEnd", this.processResultChunk.getEnd()); value.put("CreateResultSetStart", this.createResultSet.getStart()); value.put("CreateResultSetEnd", this.createResultSet.getEnd()); value.put("QueryEnd", this.query.getEnd()); value.put("BatchID", this.batchId); value.put("QueryID", this.queryId); value.put("RequestID", this.requestId); value.put("QueryFunction", this.queryFunction); value.put("RetryCount", this.retryCount); value.put("RetryLocations", this.retryLocations); value.put("ocspEnabled", this.ocspEnabled); value.put("ElapsedQueryTime", getTotalQueryTime()); value.put("ElapsedResultProcessTime", getResultProcessingTime()); value.put("Urgent", true); valueStr = value.toString(); // Avoid adding exception stacktrace to user logs. TelemetryService.getInstance().logExecutionTimeTelemetryEvent(value, eventType); return valueStr; } return ""; } public String toString() { return "Query id: " + this.queryId + ", query function: " + this.queryFunction + ", batch id: " + this.batchId + ", request id: " + this.requestId + ", total query time: " + getTotalQueryTime() / 1000 + " ms" + ", prepare to sent time: " + this.executeToSend.getTime() / 1000 + " ms" + ", result processing time: " + getResultProcessingTime() / 1000 + " ms" + ", result set creation time: " + getResultSetCreationTime() / 1000 + " ms" + ", http request time: " + getHttpRequestTime() / 1000 + " ms" + ", retry count: " + this.retryCount; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/InternalApiTelemetryTracker.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Tracks calls to public methods in internal packages and reports them as in-band telemetry. */ public class InternalApiTelemetryTracker { private static final SFLogger logger = SFLoggerFactory.getLogger(InternalApiTelemetryTracker.class); private static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); private static final ConcurrentHashMap methodCallCounts = new ConcurrentHashMap<>(); /** Marker used by internal call paths to skip external-usage telemetry. */ public static final class InternalCallMarker { private InternalCallMarker() {} } private static final InternalCallMarker INTERNAL_CALL_MARKER = new InternalCallMarker(); private InternalApiTelemetryTracker() {} public static InternalCallMarker internalCallMarker() { return INTERNAL_CALL_MARKER; } public static void recordIfExternal( String className, String methodName, InternalCallMarker internalCallMarker) { if (internalCallMarker == null) { record(className, methodName); } } static void record(String className, String methodName) { methodCallCounts .computeIfAbsent(className + "#" + methodName, k -> new AtomicLong(0)) .incrementAndGet(); } public static void flush(Telemetry client) { if (client == null || methodCallCounts.isEmpty()) { return; } try { ObjectNode message = mapper.createObjectNode(); message.put(TelemetryField.TYPE.toString(), TelemetryField.INTERNAL_API_USAGE.toString()); message.put("source", "JDBC"); ObjectNode methods = mapper.createObjectNode(); methodCallCounts.forEach( (key, count) -> { long value = count.getAndSet(0); if (value > 0) { methods.put(key, value); } }); methodCallCounts.entrySet().removeIf(e -> e.getValue().get() == 0); if (methods.isEmpty()) { return; } message.set("methods", methods); client.addLogToBatch(new TelemetryData(message, System.currentTimeMillis())); logger.debug("Flushed internal API usage telemetry with {} distinct methods", methods.size()); } catch (Exception e) { logger.debug("Failed to flush internal API telemetry: {}", e.getMessage()); } } static void resetForTesting() { methodCallCounts.clear(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/NoOpTelemetryClient.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; /** Telemetry client that is doing nothing. Mainly used in testing code */ public class NoOpTelemetryClient implements Telemetry { @Override public void addLogToBatch(TelemetryData log) {} @Override public void close() {} @Override public Future sendBatchAsync() { return CompletableFuture.completedFuture(true); } @Override public void postProcess(String queryId, String sqlState, int vendorCode, Throwable ex) {} } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/PreSessionTelemetryClient.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * A telemetry client that buffers telemetry data until a real telemetry client becomes available. * Used for scenario where telemetry needs to be collected before a session is established, such as * during SSL/TLS setup and certificate validation. */ public class PreSessionTelemetryClient implements Telemetry { private static final SFLogger logger = SFLoggerFactory.getLogger(PreSessionTelemetryClient.class); private final List bufferedData = new ArrayList<>(); private final Lock lock = new ReentrantLock(); private Telemetry realTelemetryClient = null; private boolean closed = false; // Prevent potential memory issues by limiting buffer size private static final int MAX_BUFFER_SIZE = 1000; @Override public void addLogToBatch(TelemetryData log) { if (closed || log == null) { return; } lock.lock(); try { if (realTelemetryClient != null) { // Real client available, use it directly realTelemetryClient.addLogToBatch(log); } else { if (bufferedData.size() < MAX_BUFFER_SIZE) { bufferedData.add(log); logger.debug("Buffered telemetry data, buffer size: {}", bufferedData.size()); } else { logger.debug( "Telemetry buffer full (size: {}), dropping telemetry data to prevent memory issues", MAX_BUFFER_SIZE); } } } finally { lock.unlock(); } } public void setRealTelemetryClient(Telemetry realClient) { lock.lock(); try { if (closed) { logger.debug("PreSessionTelemetryClient is closed, ignoring real client"); return; } this.realTelemetryClient = realClient; flushBufferedData(realClient); } finally { lock.unlock(); } } private void flushBufferedData(Telemetry realClient) { for (TelemetryData data : bufferedData) { try { realClient.addLogToBatch(data); } catch (Exception e) { logger.debug("Failed to flush buffered telemetry data: {}", e.getMessage()); } } bufferedData.clear(); } @Override public Future sendBatchAsync() { lock.lock(); try { if (realTelemetryClient != null) { return realTelemetryClient.sendBatchAsync(); } return CompletableFuture.completedFuture(true); } finally { lock.unlock(); } } @Override public void close() { lock.lock(); try { if (closed) { return; } closed = true; if (realTelemetryClient != null) { try { realTelemetryClient.close(); } catch (Exception e) { logger.debug("Error closing telemetry client: {}", e.getMessage()); } } if (!bufferedData.isEmpty()) { logger.debug( "Closing PreSessionTelemetryClient with {} unflushed entries", bufferedData.size()); } bufferedData.clear(); } finally { lock.unlock(); } } @Override public void postProcess(String queryId, String sqlState, int vendorCode, Throwable ex) { lock.lock(); try { if (realTelemetryClient != null) { realTelemetryClient.postProcess(queryId, sqlState, vendorCode, ex); } } finally { lock.unlock(); } } public boolean hasRealTelemetryClient() { return realTelemetryClient != null; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/RevocationCheckTelemetryData.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; public class RevocationCheckTelemetryData { private String crlUrl; private long timeParsingCrl; private long timeDownloadingCrl; private long crlBytes; private int numberOfRevokedCertificates; public void setTimeParsingCrl(long timeParsingCrl) { this.timeParsingCrl = timeParsingCrl; } public void setTimeDownloadingCrl(long timeDownloadingCrl) { this.timeDownloadingCrl = timeDownloadingCrl; } public void setCrlUrl(String crlUrl) { this.crlUrl = crlUrl; } public void setCrlBytes(long crlBytes) { this.crlBytes = crlBytes; } public void setNumberOfRevokedCertificates(int numberOfRevokedCertificates) { this.numberOfRevokedCertificates = numberOfRevokedCertificates; } public TelemetryData buildTelemetry() { return TelemetryUtil.buildCrlData( crlUrl, crlBytes, numberOfRevokedCertificates, timeDownloadingCrl, timeParsingCrl); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/SqlExceptionTelemetryHandler.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.PrintWriter; import java.io.StringWriter; import java.sql.SQLException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; import net.minidev.json.JSONObject; import net.snowflake.client.api.driver.SnowflakeDriver; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryEvent; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryService; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.common.core.LoginInfoDTO; import net.snowflake.common.core.SqlState; /** Handler for SQL exception telemetry reporting. */ public class SqlExceptionTelemetryHandler { private static final SFLogger logger = SFLoggerFactory.getLogger(SqlExceptionTelemetryHandler.class); /** * Send telemetry data for a SQL exception. This method attempts to send data via in-band * telemetry if a session is available, falling back to out-of-band telemetry if needed. * * @param queryId query ID if exists * @param sqlState SQL state * @param vendorCode vendor code * @param session session object (needed for in-band telemetry, may be null) * @param ex the SQLException being reported */ public static void sendTelemetry( String queryId, String sqlState, int vendorCode, SFBaseSession session, SQLException ex) { Telemetry ibInstance = null; // if session is not null, try sending data using in-band telemetry if (session != null) { ibInstance = session.getTelemetryClient(internalCallMarker()); } // if in-band instance is successfully created, compile sql exception data into an in-band // telemetry log if (ibInstance != null) { ObjectNode ibValue = TelemetryUtil.createIBValue( queryId, sqlState, vendorCode, TelemetryField.SQL_EXCEPTION, null, null); // try to send in-band data asynchronously ExecutorService threadExecutor = Executors.newSingleThreadExecutor(); Telemetry finalIbInstance = ibInstance; try { threadExecutor.submit( () -> { boolean inBandSuccess; Future sendInBand = sendInBandTelemetryMessage(ibValue, ex, finalIbInstance); // record whether in band telemetry message sent with boolean value inBandSuccess try { inBandSuccess = sendInBand.get(10, TimeUnit.SECONDS); } catch (Exception e) { inBandSuccess = false; } // In-band failed so send OOB telemetry instead if (!inBandSuccess) { logger.debug( "In-band telemetry message failed to send. Sending out-of-band message instead"); JSONObject oobValue = createOOBValue(queryId, sqlState, vendorCode); sendOutOfBandTelemetryMessage(oobValue, ex, TelemetryService.getInstance()); } }); } finally { // Send the shutdown signal to the executor service threadExecutor.shutdown(); // Add an extra hook in the telemetry client, if extra error handling is needed ibInstance.postProcess(queryId, sqlState, vendorCode, ex); } } // In-band is not possible so send OOB telemetry instead else { JSONObject oobValue = createOOBValue(queryId, sqlState, vendorCode); sendOutOfBandTelemetryMessage(oobValue, ex, TelemetryService.getInstance()); } } /** * Create a TelemetryEvent log from the JSONObject and exception and send it via OOB telemetry. * * @param value JSONObject containing relevant exception information * @param ex the SQLException being reported * @param oobInstance out-of-band telemetry instance */ private static void sendOutOfBandTelemetryMessage( JSONObject value, SQLException ex, TelemetryService oobInstance) { TelemetryEvent.LogBuilder logBuilder = new TelemetryEvent.LogBuilder(); StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); ex.printStackTrace(pw); String stackTrace = maskStacktrace(sw.toString()); value.put("Stacktrace", stackTrace); value.put("Exception", ex.getClass().getSimpleName()); TelemetryEvent log = logBuilder.withName("Exception: " + ex.getClass().getSimpleName()).withValue(value).build(); oobInstance.report(log); } /** * Create a TelemetryClient log and send it via in-band telemetry. * * @param value ObjectNode containing exception information * @param ex the SQLException being reported * @param ibInstance telemetry instance * @return future indicating whether the message was sent successfully */ private static Future sendInBandTelemetryMessage( ObjectNode value, SQLException ex, Telemetry ibInstance) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); ex.printStackTrace(pw); String stackTrace = maskStacktrace(sw.toString()); value.put("Stacktrace", stackTrace); value.put("Exception", ex.getClass().getSimpleName()); // For SQLFeatureNotSupportedExceptions, add in reason for failure as " not // supported" if (value.get("SQLState").toString().contains(SqlState.FEATURE_NOT_SUPPORTED)) { String reason = ""; StackTraceElement[] stackTraceArray = ex.getStackTrace(); if (stackTraceArray.length >= 1) { reason = ex.getStackTrace()[0].getMethodName() + " not supported"; } value.put("reason", reason); } ibInstance.addLogToBatch(TelemetryUtil.buildJobData(value)); return ibInstance.sendBatchAsync(); } /** * Helper function to remove sensitive data (error message, reason) from the stacktrace. * * @param stackTrace original stacktrace * @return stack trace with sensitive data removed */ public static String maskStacktrace(String stackTrace) { Pattern STACKTRACE_BEGINNING = Pattern.compile( "(com|net)(\\.snowflake\\.client\\.api\\.exception\\.Snowflake|\\.snowflake\\.client\\.internal\\.exception\\.Snowflake|\\.snowflake\\.client\\.jdbc\\.Snowflake)(SQLLogged|LoggedFeatureNotSupported|SQL)(Exception)([\\s\\S]*?)(\\n\\t?at\\snet|com\\.)", Pattern.MULTILINE | Pattern.CASE_INSENSITIVE); Matcher matcher = STACKTRACE_BEGINNING.matcher(stackTrace); // Remove the reason from after the stack trace (in group #5 of regex pattern) if (matcher.find()) { return matcher.replaceAll("$1$2$3$4$6"); } return stackTrace; } /** * Helper function to create JSONObject node for OOB telemetry log. * * @param queryId query ID * @param sqlState the SQL state * @param vendorCode the vendor code * @return JSONObject with data about SQLException */ public static JSONObject createOOBValue(String queryId, String sqlState, int vendorCode) { JSONObject oobValue = new JSONObject(); oobValue.put(TelemetryField.TYPE.toString(), TelemetryField.SQL_EXCEPTION.toString()); oobValue.put(TelemetryField.DRIVER_TYPE.toString(), LoginInfoDTO.SF_JDBC_APP_ID); oobValue.put( TelemetryField.DRIVER_VERSION.toString(), SnowflakeDriver.getImplementationVersion()); if (!isNullOrEmpty(queryId)) { oobValue.put(TelemetryField.QUERY_ID.toString(), queryId); } if (!isNullOrEmpty(sqlState)) { oobValue.put(TelemetryField.SQL_STATE.toString(), sqlState); } if (vendorCode != TelemetryUtil.NO_VENDOR_CODE) { oobValue.put(TelemetryField.ERROR_NUMBER.toString(), vendorCode); } return oobValue; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/Telemetry.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import java.util.concurrent.Future; public interface Telemetry { /** * Attempt to add log to batch, and suppress exceptions thrown in case of failure * * @param log entry to add */ void addLogToBatch(TelemetryData log); /** Close telemetry connector and send any unsubmitted logs */ void close(); /** * Send all cached logs to server * * @return future indicating whether the logs were sent successfully */ Future sendBatchAsync(); /** * A hook for post-processing after sending telemetry data. Can be used, for example, for * additional error handling. * * @param queryId The query id * @param sqlState The SQL state as defined in net.snowflake.common.core.SqlState * @param vendorCode The vendor code for localized messages * @param ex The throwable that caused this. */ void postProcess(String queryId, String sqlState, int vendorCode, Throwable ex); } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/TelemetryClient.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import static net.snowflake.client.internal.jdbc.telemetry.InternalApiTelemetryTracker.internalCallMarker; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.IOException; import java.rmi.UnexpectedException; import java.sql.Connection; import java.sql.SQLException; import java.util.LinkedList; import java.util.Objects; import java.util.concurrent.Future; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.core.HttpUtil; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.core.SFSession; import net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryThreadPool; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.Stopwatch; import org.apache.http.HttpHeaders; import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; /** Telemetry Service Interface */ public class TelemetryClient implements Telemetry { private static final SFLogger logger = SFLoggerFactory.getLogger(TelemetryClient.class); private static final String SF_PATH_TELEMETRY = "/telemetry/send"; private static final String SF_PATH_TELEMETRY_SESSIONLESS = "/telemetry/send/sessionless"; // if the number of cached logs is larger than this threshold, // the telemetry connector will flush the buffer automatically. private final int forceFlushSize; private static final int DEFAULT_FORCE_FLUSH_SIZE = 100; private final String serverUrl; private final String telemetryUrl; private final SFSession session; private LinkedList logBatch; private static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); private boolean isClosed; // HTTP client object used to communicate with other machine private final CloseableHttpClient httpClient; // the authorization type speficied in sessionless header private String authType; // JWT/OAuth token private String token; private Object locker = new Object(); // false if meet any error when sending metrics private boolean isTelemetryServiceAvailable = true; // Retry timeout for the HTTP request private static final int TELEMETRY_HTTP_RETRY_TIMEOUT_IN_SEC = 1000; private TelemetryClient(SFSession session, int flushSize) { this.session = session; this.serverUrl = session.getUrl(); this.httpClient = null; if (this.serverUrl.endsWith("/")) { this.telemetryUrl = this.serverUrl.substring(0, this.serverUrl.length() - 1) + SF_PATH_TELEMETRY; } else { this.telemetryUrl = this.serverUrl + SF_PATH_TELEMETRY; } this.logBatch = new LinkedList<>(); this.isClosed = false; this.forceFlushSize = flushSize; } /** * Constructor for creating a sessionless telemetry client * * @param httpClient client object used to communicate with other machine * @param serverUrl server url * @param authType authorization type, should be either KEYPAIR_JWY or OAUTH * @param flushSize maximum size of telemetry batch before flush */ private TelemetryClient( CloseableHttpClient httpClient, String serverUrl, String authType, int flushSize) { this.session = null; this.serverUrl = serverUrl; this.httpClient = httpClient; if (!Objects.equals(authType, "KEYPAIR_JWT") && !Objects.equals(authType, "OAUTH")) { throw new IllegalArgumentException( "Invalid authType, should be \"KEYPAIR_JWT\" or \"OAUTH\""); } this.authType = authType; if (this.serverUrl.endsWith("/")) { this.telemetryUrl = this.serverUrl.substring(0, this.serverUrl.length() - 1) + SF_PATH_TELEMETRY_SESSIONLESS; } else { this.telemetryUrl = this.serverUrl + SF_PATH_TELEMETRY_SESSIONLESS; } this.logBatch = new LinkedList<>(); this.isClosed = false; this.forceFlushSize = flushSize; logger.debug( "Initializing telemetry client with telemetry url: {}, flush size: {}, auth type: {}", telemetryUrl, forceFlushSize, authType); } /** * Return whether the client can be used to add/send metrics * * @return whether client is enabled */ public boolean isTelemetryEnabled() { return (this.session == null || this.session.isClientTelemetryEnabled()) && this.isTelemetryServiceAvailable; } /** Disable any use of the client to add/send metrics */ public void disableTelemetry() { logger.debug("Disabling telemetry"); this.isTelemetryServiceAvailable = false; } /** * Initialize the telemetry connector * * @param conn connection with the session to use for the connector * @param flushSize maximum size of telemetry batch before flush * @return a telemetry connector */ public static Telemetry createTelemetry(Connection conn, int flushSize) { try { return createTelemetry( (SFSession) conn.unwrap(SnowflakeConnectionImpl.class).getSFBaseSession(internalCallMarker()), flushSize); } catch (SQLException ex) { logger.debug("Input connection is not a SnowflakeConnection", false); return null; } } /** * Initialize the telemetry connector * * @param conn connection with the session to use for the connector * @return a telemetry connector */ public static Telemetry createTelemetry(Connection conn) { return createTelemetry(conn, DEFAULT_FORCE_FLUSH_SIZE); } /** * Initialize the telemetry connector * * @param session session to use for telemetry dumps * @return a telemetry connector */ public static Telemetry createTelemetry(SFSession session) { return createTelemetry(session, DEFAULT_FORCE_FLUSH_SIZE); } /** * Initialize the telemetry connector * * @param session session to use for telemetry dumps * @param flushSize maximum size of telemetry batch before flush * @return a telemetry connector */ public static Telemetry createTelemetry(SFSession session, int flushSize) { return new TelemetryClient(session, flushSize); } /** * Initialize the sessionless telemetry connector using KEYPAIR_JWT as the default auth type * * @param httpClient client object used to communicate with other machine * @param serverUrl server url * @return a telemetry connector */ public static Telemetry createSessionlessTelemetry( CloseableHttpClient httpClient, String serverUrl) { // By default, use KEYPAIR_JWT as the auth type return createSessionlessTelemetry( httpClient, serverUrl, "KEYPAIR_JWT", DEFAULT_FORCE_FLUSH_SIZE); } /** * Initialize the sessionless telemetry connector * * @param httpClient client object used to communicate with other machine * @param serverUrl server url * @param authType authorization type for sessionless telemetry * @return a telemetry connector */ public static Telemetry createSessionlessTelemetry( CloseableHttpClient httpClient, String serverUrl, String authType) { return createSessionlessTelemetry(httpClient, serverUrl, authType, DEFAULT_FORCE_FLUSH_SIZE); } /** * Initialize the sessionless telemetry connector * * @param httpClient client object used to communicate with other machine * @param serverUrl server url * @param authType authorization type for sessionless telemetry * @param flushSize maximum size of telemetry batch before flush * @return a telemetry connector */ public static Telemetry createSessionlessTelemetry( CloseableHttpClient httpClient, String serverUrl, String authType, int flushSize) { return new TelemetryClient(httpClient, serverUrl, authType, flushSize); } /** * Add log to batch to be submitted to telemetry. Send batch if forceFlushSize reached * * @param log entry to add */ @Override public void addLogToBatch(TelemetryData log) { if (isClosed) { logger.debug("Telemetry already closed", false); return; } if (!isTelemetryEnabled()) { return; // if disable, do nothing } synchronized (locker) { this.logBatch.add(log); } int logBatchSize = this.logBatch.size(); if (logBatchSize >= this.forceFlushSize) { logger.debug("Force flushing telemetry batch of size: {}", logBatchSize); this.sendBatchAsync(); } } /** * Add log to batch to be submitted to telemetry. Send batch if forceFlushSize reached * * @param message json node of log * @param timeStamp timestamp to use for log */ public void addLogToBatch(ObjectNode message, long timeStamp) { this.addLogToBatch(new TelemetryData(message, timeStamp)); } /** Close telemetry connector and send any unsubmitted logs */ @Override public void close() { if (isClosed) { logger.debug("Telemetry client already closed", false); return; } try { // sendBatch when close is synchronous, otherwise client might be closed // before data was sent. sendBatchAsync().get(); } catch (Throwable e) { logger.debug("Error when sending batch data, {}", e); } finally { this.isClosed = true; } } /** * Return whether the client has been closed * * @return whether client is closed */ public boolean isClosed() { return this.isClosed; } @Override public Future sendBatchAsync() { return TelemetryThreadPool.getInstance() .submit( () -> { try { return this.sendBatch(); } catch (Throwable e) { logger.debug("Failed to send telemetry data, {}", e); return false; } }); } @Override public void postProcess(String queryId, String sqlState, int vendorCode, Throwable ex) { // This is a no-op. } /** * Send all cached logs to server * * @return whether the logs were sent successfully * @throws IOException if closed or uploading batch fails */ private boolean sendBatch() throws IOException { if (isClosed) { throw new IOException("Telemetry connector is closed"); } if (!isTelemetryEnabled()) { return false; } LinkedList tmpList; synchronized (locker) { tmpList = this.logBatch; this.logBatch = new LinkedList<>(); } if (this.session != null && this.session.isClosed()) { throw new UnexpectedException("Session is closed when sending log"); } if (!tmpList.isEmpty()) { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); // session shared with JDBC String payload = logsToString(tmpList); logger.debugNoMask("Payload of telemetry is : " + payload); HttpPost post = new HttpPost(this.telemetryUrl); post.setEntity(new StringEntity(payload)); post.setHeader("Content-type", "application/json"); if (this.session == null) { post.setHeader(HttpHeaders.AUTHORIZATION, "Bearer " + this.token); post.setHeader("X-Snowflake-Authorization-Token-Type", this.authType); post.setHeader(HttpHeaders.ACCEPT, "application/json"); } else { post.setHeader( HttpHeaders.AUTHORIZATION, "Snowflake Token=\"" + this.session.getSessionToken(internalCallMarker()) + "\""); } String response = null; try { response = this.session == null ? HttpUtil.executeGeneralRequest( post, TELEMETRY_HTTP_RETRY_TIMEOUT_IN_SEC, 0, (int) HttpUtil.getSocketTimeout().toMillis(), 0, this.httpClient, null) : HttpUtil.executeGeneralRequest( post, TELEMETRY_HTTP_RETRY_TIMEOUT_IN_SEC, 0, this.session.getHttpClientSocketTimeout(), 0, this.session.getHttpClientKey(), this.session); stopwatch.stop(); logger.debug( "Sending telemetry took {} ms. Batch size: {}", stopwatch.elapsedMillis(), tmpList.size()); } catch (SnowflakeSQLException e) { disableTelemetry(); // when got error like 404 or bad request, disable telemetry in this // telemetry instance logger.error( "Telemetry request failed, response: {}, exception: {}", response, e.getMessage()); return false; } } return true; } /** * Send a log to the server, along with any existing logs waiting to be sent * * @param log entry to send * @return whether the logs were sent successfully * @throws IOException if closed or uploading batch fails */ public boolean sendLog(TelemetryData log) throws IOException { addLogToBatch(log); return sendBatch(); } /** * Send a log to the server, along with any existing logs waiting to be sent * * @param message json node of log * @param timeStamp timestamp to use for log * @return whether the logs were sent successfully * @throws IOException if closed or uploading batch fails */ public boolean sendLog(ObjectNode message, long timeStamp) throws IOException { return this.sendLog(new TelemetryData(message, timeStamp)); } /** * convert a list of log to a JSON object * * @param telemetryData a list of log * @return the result json string */ static ObjectNode logsToJson(LinkedList telemetryData) { ObjectNode node = mapper.createObjectNode(); ArrayNode logs = mapper.createArrayNode(); for (TelemetryData data : telemetryData) { logs.add(data.toJson()); } node.set("logs", logs); return node; } /** * convert a list of log to a JSON String * * @param telemetryData a list of log * @return the result json string */ static String logsToString(LinkedList telemetryData) { return logsToJson(telemetryData).toString(); } /** * For test use only * * @return the number of cached logs */ public int bufferSize() { return this.logBatch.size(); } /** * For test use only * * @return a copy of the logs currently in the buffer */ public LinkedList logBuffer() { return new LinkedList<>(this.logBatch); } /** * Refresh the JWT/OAuth token * * @param token latest JWT/OAuth token */ public void refreshToken(String token) { this.token = token; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/TelemetryData.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.client.internal.util.SecretDetector; public class TelemetryData { // message is a json node private final ObjectNode message; private final long timeStamp; private static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); // Only allow code in same package to construct TelemetryData TelemetryData(ObjectNode message, long timeStamp) { this.message = (ObjectNode) SecretDetector.maskJacksonNode(message); this.timeStamp = timeStamp; } public long getTimeStamp() { return timeStamp; } public ObjectNode getMessage() { return message; } public ObjectNode toJson() { ObjectNode node = mapper.createObjectNode(); node.put("timestamp", this.timeStamp + ""); node.set("message", this.message); return node; } @Override public String toString() { return toJson().toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/TelemetryField.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; // TODO: SNOW-2223750 Refactor this enum, as it contains the possible values of the field "type" and // is misleading. Separate values from the field names. public enum TelemetryField { // Fields TYPE("type"), VALUE("value"), DRIVER_TYPE("DriverType"), DRIVER_VERSION("DriverVersion"), QUERY_ID("QueryID"), SQL_STATE("SQLState"), ERROR_NUMBER("ErrorNumber"), ERROR_MESSAGE("ErrorMessage"), REASON("reason"), // Values of the field "type" // we use "client_" as a prefix for all metrics on the client side TIME_CONSUME_FIRST_RESULT("client_time_consume_first_result"), TIME_CONSUME_LAST_RESULT("client_time_consume_last_result"), TIME_WAITING_FOR_CHUNKS("client_time_waiting_for_chunks"), TIME_DOWNLOADING_CHUNKS("client_time_downloading_chunks"), TIME_PARSING_CHUNKS("client_time_parsing_chunks"), TIME_DOWNLOADING_CRL("client_time_downloading_crl"), TIME_PARSING_CRL("client_time_parsing_crl"), CLIENT_CRL_STATS("client_crl_stats"), CRL_URL("client_crl_url"), CRL_BYTES("client_crl_bytes"), CRL_REVOKED_CERTIFICATES("client_revoked_certificates"), FAILED_BIND_SERIALIZATION("client_failed_bind_serialization"), FAILED_BIND_UPLOAD("client_failed_bind_upload"), FAILED_BIND_OTHER("client_failed_bind_other"), SQL_EXCEPTION("client_sql_exception"), METADATA_METRICS("client_metadata_api_metrics"), HTTP_EXCEPTION("client_http_exception"), OCSP_EXCEPTION("client_ocsp_exception"), INTERNAL_API_USAGE("client_internal_api_usage"), HEARTBEAT_MAX_THREADS_EXCEEDED("client_heartbeat_max_threads_exceeded"); public final String field; TelemetryField(String field) { this.field = field; } @Override public String toString() { return field; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetry/TelemetryUtil.java ================================================ package net.snowflake.client.internal.jdbc.telemetry; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import net.snowflake.client.api.driver.SnowflakeDriver; import net.snowflake.client.internal.core.ObjectMapperFactory; import net.snowflake.common.core.LoginInfoDTO; public class TelemetryUtil { private static final ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); public static final int NO_VENDOR_CODE = -1; @Deprecated public static final String TYPE = "type"; @Deprecated public static final String QUERY_ID = "query_id"; @Deprecated public static final String VALUE = "value"; /** * Create a simple TelemetryData instance for Job metrics using given parameters * * @param queryId the id of the query * @param field the field to log (represents the "type" field in telemetry) * @param value the value to log for the field * @return TelemetryData instance constructed from parameters */ public static TelemetryData buildJobData(String queryId, TelemetryField field, long value) { ObjectNode obj = mapper.createObjectNode(); obj.put(TelemetryField.TYPE.toString(), field.toString()); obj.put(TelemetryField.QUERY_ID.toString(), queryId); obj.put(TelemetryField.VALUE.toString(), value); return new TelemetryData(obj, System.currentTimeMillis()); } public static TelemetryData buildJobData(ObjectNode obj) { return new TelemetryData(obj, System.currentTimeMillis()); } /** * Helper function to create ObjectNode for IB telemetry log * * @param queryId query ID * @param sqlState the SQL state * @param errorNumber the error number * @param type the telemetry field type * @param errorMessage the error message * @param reason the reason for the error * @return ObjectNode for IB telemetry log */ public static ObjectNode createIBValue( String queryId, String sqlState, int errorNumber, TelemetryField type, String errorMessage, String reason) { ObjectNode ibValue = mapper.createObjectNode(); ibValue.put(TelemetryField.TYPE.toString(), type.toString()); ibValue.put(TelemetryField.DRIVER_TYPE.toString(), LoginInfoDTO.SF_JDBC_APP_ID); ibValue.put( TelemetryField.DRIVER_VERSION.toString(), SnowflakeDriver.getImplementationVersion()); if (!isNullOrEmpty(queryId)) { ibValue.put(TelemetryField.QUERY_ID.toString(), queryId); } if (!isNullOrEmpty(sqlState)) { ibValue.put(TelemetryField.SQL_STATE.toString(), sqlState); } if (errorNumber != NO_VENDOR_CODE) { ibValue.put(TelemetryField.ERROR_NUMBER.toString(), errorNumber); } if (!isNullOrEmpty(errorMessage)) { ibValue.put(TelemetryField.ERROR_MESSAGE.toString(), errorMessage); } if (!isNullOrEmpty(reason)) { ibValue.put(TelemetryField.REASON.toString(), reason); } return ibValue; } public static TelemetryData buildCrlData( String crlUrl, long crlBytes, int revokedCertificates, long downloadTime, long parseTime) { ObjectNode obj = mapper.createObjectNode(); obj.put(TelemetryField.TYPE.toString(), TelemetryField.CLIENT_CRL_STATS.toString()); obj.put(TelemetryField.CRL_URL.toString(), crlUrl); obj.put(TelemetryField.CRL_BYTES.toString(), crlBytes); obj.put(TelemetryField.CRL_REVOKED_CERTIFICATES.toString(), revokedCertificates); obj.put(TelemetryField.TIME_DOWNLOADING_CRL.toString(), downloadTime); obj.put(TelemetryField.TIME_PARSING_CRL.toString(), parseTime); return new TelemetryData(obj, System.currentTimeMillis()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetryOOB/TelemetryEvent.java ================================================ package net.snowflake.client.internal.jdbc.telemetryOOB; import java.io.PrintWriter; import java.io.StringWriter; import java.util.HashMap; import net.minidev.json.JSONArray; import net.minidev.json.JSONObject; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.UUIDUtils; import net.snowflake.client.internal.util.SFTimestamp; import net.snowflake.client.internal.util.SecretDetector; import net.snowflake.common.core.ResourceBundleManager; /** Telemetry Event Class */ public class TelemetryEvent extends JSONObject { private static final long serialVersionUID = 1L; private static final int schemaVersion = 1; public enum Type { Metric, Log } /** Build metric json object */ public static class MetricBuilder extends Builder { public MetricBuilder withException(Exception ex) { this.withName("Exception:" + ex.getMessage()); this.withValue(1); return this; } public MetricBuilder() { super(MetricBuilder.class); } public MetricBuilder withValue(int value) { body.put("Value", value); return this; } public MetricBuilder withValue(float value) { body.put("Value", value); return this; } public TelemetryEvent build() { TelemetryEvent event = super.build(); event.put("Type", Type.Metric); return event; } } /** Build log json object */ public static class LogBuilder extends Builder { public LogBuilder() { super(LogBuilder.class); } /** * build a log event for an exception including the full stack trace * * @param ex The exception to build a log event * @return The log event builder */ public LogBuilder withException(Exception ex) { this.withName("Exception:" + ex.getMessage()); StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); ex.printStackTrace(pw); String stackTrace = sw.toString(); // stack trace as a string this.withValue(stackTrace); return this; } public LogBuilder withException(final SFException ex) { this.withName("Exception:" + ex.getMessage()); StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); ex.printStackTrace(pw); String stackTrace = sw.toString(); // stack trace as a string this.withValue(stackTrace); return this; } public LogBuilder withValue(String value) { body.put("Value", SecretDetector.maskSecrets(value)); return this; } public LogBuilder withValue(JSONObject value) { body.put("Value", SecretDetector.maskJsonObject(value)); return this; } public LogBuilder withValue(JSONArray value) { body.put("Value", SecretDetector.maskJsonArray(value)); return this; } public TelemetryEvent build() { TelemetryEvent event = super.build(); event.put("Type", Type.Log); return event; } } private static class Builder { protected final Class builderClass; protected TelemetryEvent body = new TelemetryEvent(); protected HashMap tags = new HashMap<>(); private static final String version = ResourceBundleManager.getSingleton("net.snowflake.client.jdbc.version") .getLocalizedMessage("version"); private static final String driver = "JDBC"; public Builder(Class builderClass) { this.builderClass = builderClass; withTag("driver", driver); withTag("version", version); TelemetryService instance = TelemetryService.getInstance(); withTag("telemetryServerDeployment", instance.getServerDeploymentName()); withTag("connectionString", instance.getDriverConnectionString()); JSONObject context = instance.getContext(); if (context != null) { for (String key : context.keySet()) { Object val = context.get(key); if (val != null) { withTag( "ctx_" + key.toLowerCase(), SecretDetector.maskParameterValue(key, val.toString())); } } } } public T withName(String name) { body.put("Name", SecretDetector.maskSecrets(name)); return builderClass.cast(this); } public T withTag(String name, int value) { return withTag(name, Integer.toString(value)); } public T withTag(String name, String value) { if (value != null && value.length() > 0) { tags.put(name, SecretDetector.maskSecrets(value)); } return builderClass.cast(this); } protected TelemetryEvent build() { body.put("UUID", UUIDUtils.getUUID().toString()); body.put("Created_On", SFTimestamp.getUTCNow()); body.put("SchemaVersion", schemaVersion); this.putMap("Tags", tags); return body; } private void putMap(String name, HashMap map) { JSONObject tags = new JSONObject(); for (String key : map.keySet()) { tags.put(key, map.get(key)); } body.put(name, tags); } } /** * @return the deployment of this event */ public String getDeployment() { JSONArray tags = (JSONArray) this.get("Tags"); for (Object tag : tags) { JSONObject json = (JSONObject) tag; if (json.get("Name").toString().compareTo("deployment") == 0) { return json.get("Value").toString(); } } return "Unknown"; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetryOOB/TelemetryService.java ================================================ package net.snowflake.client.internal.jdbc.telemetryOOB; import java.io.PrintWriter; import java.io.StringWriter; import java.security.cert.CertificateException; import java.util.Arrays; import java.util.HashSet; import java.util.Map; import java.util.Properties; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import net.minidev.json.JSONArray; import net.minidev.json.JSONObject; import net.snowflake.client.internal.jdbc.SnowflakeConnectString; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import net.snowflake.client.internal.util.SecretDetector; import net.snowflake.client.internal.util.Stopwatch; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; /** * Out of Band Telemetry Service This is a thread safe singleton queue containing telemetry messages */ public class TelemetryService { private static final SFLogger logger = SFLoggerFactory.getLogger(TelemetryService.class); private static ThreadLocal _threadLocal = new ThreadLocal() { @Override protected TelemetryService initialValue() { return new TelemetryService(); } }; // Global parameters: private static final String TELEMETRY_SERVER_URL_PATTERN = "https://(sfcdev\\.|sfctest\\.|)?client-telemetry\\.[a-z0-9\\.\\-]*snowflake[computing]?\\" + ".com/enqueue"; /** * control which deployments are enabled: the service skips all events for the disabled * deployments */ private static HashSet ENABLED_DEPLOYMENT = new HashSet<>( Arrays.asList( TELEMETRY_SERVER_DEPLOYMENT.DEV.name, TELEMETRY_SERVER_DEPLOYMENT.REG.name, TELEMETRY_SERVER_DEPLOYMENT.QA1.name, TELEMETRY_SERVER_DEPLOYMENT.PREPROD3.name, TELEMETRY_SERVER_DEPLOYMENT.PROD.name, TELEMETRY_SERVER_DEPLOYMENT.K8TEST.name)); // connection string for current connection private String connStr = ""; // current snowflake connection string private SnowflakeConnectString sfConnStr; /** * @return return thread local instance */ public static TelemetryService getInstance() { return _threadLocal.get(); } private static final int DEFAULT_NUM_OF_RETRY_TO_TRIGGER_TELEMETRY = 10; /** the number of retry to trigger the HTTP timeout telemetry event */ private int numOfRetryToTriggerTelemetry = DEFAULT_NUM_OF_RETRY_TO_TRIGGER_TELEMETRY; // local parameters /** the context (e.g., connection properties) to be included in the telemetry events */ private JSONObject context; public void resetNumOfRetryToTriggerTelemetry() { numOfRetryToTriggerTelemetry = DEFAULT_NUM_OF_RETRY_TO_TRIGGER_TELEMETRY; } public int getNumOfRetryToTriggerTelemetry() { return numOfRetryToTriggerTelemetry; } public void setNumOfRetryToTriggerTelemetry(int num) { numOfRetryToTriggerTelemetry = num; } private TELEMETRY_SERVER_DEPLOYMENT serverDeployment = TELEMETRY_SERVER_DEPLOYMENT.PROD; /** * control enable/disable the whole service: disabled service will skip added events and uploading * to the server */ private static boolean enabled = true; private static boolean htapEnabled = false; private static final Object enableLock = new Object(); private static final Object enableHTAPLock = new Object(); public static void enable() { synchronized (enableLock) { logger.debug("Enabling out-of-band telemetry", false); enabled = true; } } public static void disable() { synchronized (enableLock) { logger.debug("Disabling out-of-band telemetry", false); enabled = false; } } public static void enableHTAP() { synchronized (enableHTAPLock) { logger.debug("Enabling out-of-band HTAP telemetry"); htapEnabled = true; } } public static void disableHTAP() { synchronized (enableHTAPLock) { logger.debug("Disabling out-of-band HTAP telemetry"); htapEnabled = false; } } public static void disableOOBTelemetry() { disable(); disableHTAP(); } public boolean isEnabled() { synchronized (enableLock) { return enabled; } } public boolean isHTAPEnabled() { synchronized (enableHTAPLock) { return htapEnabled; } } public JSONObject getContext() { return context; } /** * Note: Only used for IT * * @param params parameter map */ public void updateContextForIT(Map params) { Properties info = new Properties(); for (String key : params.keySet()) { Object val = params.get(key); if (val != null) { info.put(key, val); } } SnowflakeConnectString conStr = SnowflakeConnectString.parse(params.get("uri"), info); this.updateContext(conStr); } public void updateContext(SnowflakeConnectString conStr) { if (conStr != null) { sfConnStr = conStr; configureDeployment(conStr); context = new JSONObject(); for (Map.Entry entry : conStr.getParameters().entrySet()) { String k = entry.getKey(); Object v = entry.getValue(); if (!SecretDetector.isSensitive(k)) { context.put(k, v); } } } } private TELEMETRY_SERVER_DEPLOYMENT manuallyConfigureDeployment(String dep) { switch (dep) { case "K8TEST": return TELEMETRY_SERVER_DEPLOYMENT.K8TEST; case "REG": return TELEMETRY_SERVER_DEPLOYMENT.REG; case "DEV": return TELEMETRY_SERVER_DEPLOYMENT.DEV; case "QA1": return TELEMETRY_SERVER_DEPLOYMENT.QA1; case "PREPROD": return TELEMETRY_SERVER_DEPLOYMENT.PREPROD3; case "PROD": return TELEMETRY_SERVER_DEPLOYMENT.PROD; default: return null; } } /** * configure telemetry deployment based on connection url and info Note: it is not thread-safe * while connecting to different deployments simultaneously. * * @param conStr Connect String */ private void configureDeployment(SnowflakeConnectString conStr) { if (!conStr.isValid()) { return; } connStr = conStr.toString(); String account = conStr.getAccount(); int port = conStr.getPort(); // default value TELEMETRY_SERVER_DEPLOYMENT deployment = TELEMETRY_SERVER_DEPLOYMENT.PROD; Map conParams = conStr.getParameters(); if (conParams.containsKey("TELEMETRYDEPLOYMENT")) { String conDeployment = String.valueOf(conParams.get("TELEMETRYDEPLOYMENT")).trim().toUpperCase(); deployment = manuallyConfigureDeployment(conDeployment); if (deployment != null) { this.setDeployment(deployment); return; } } if (conStr.getHost().contains("reg") || conStr.getHost().contains("local")) { deployment = TELEMETRY_SERVER_DEPLOYMENT.REG; if (port == 8080) { deployment = TELEMETRY_SERVER_DEPLOYMENT.DEV; } } else if (conStr.getHost().contains("qa1") || account.contains("qa1")) { deployment = TELEMETRY_SERVER_DEPLOYMENT.QA1; } else if (conStr.getHost().contains("preprod3")) { deployment = TELEMETRY_SERVER_DEPLOYMENT.PREPROD3; } else if (conStr.getHost().contains("snowflake.temptest")) { deployment = TELEMETRY_SERVER_DEPLOYMENT.QA1; } this.setDeployment(deployment); } /** * whether the telemetry service is enabled for current deployment * * @return true if the telemetry service is enabled for current deployment */ public boolean isDeploymentEnabled() { return ENABLED_DEPLOYMENT.contains(this.serverDeployment.name); } public String getDriverConnectionString() { return this.connStr; } public SnowflakeConnectString getSnowflakeConnectionString() { return sfConnStr; } private enum TELEMETRY_API { SFCTEST( "https://sfctest.client-telemetry.snowflakecomputing.com/enqueue", "rRNY3EPNsB4U89XYuqsZKa7TSxb9QVX93yNM4tS6"), // pragma: allowlist secret SFCDEV( "https://sfcdev.client-telemetry.snowflakecomputing.com/enqueue", "kyTKLWpEZSaJnrzTZ63I96QXZHKsgfqbaGmAaIWf"), // pragma: allowlist secret PROD( "https://client-telemetry.snowflakecomputing.com/enqueue", "wLpEKqnLOW9tGNwTjab5N611YQApOb3t9xOnE1rX"), // pragma: allowlist secret K8TEST("https://client-telemetry.ordevmisc1.us-west-2.aws-dev.app.snowflake.com/enqueue", ""); private final String url; // Note that this key is public available and only used as usage plan for // throttling private final String apiKey; TELEMETRY_API(String host, String key) { this.url = host; this.apiKey = key; } } public enum TELEMETRY_SERVER_DEPLOYMENT { DEV("dev", TELEMETRY_API.SFCDEV), REG("reg", TELEMETRY_API.SFCDEV), QA1("qa1", TELEMETRY_API.SFCDEV), PREPROD3("preprod3", TELEMETRY_API.SFCDEV), PROD("prod", TELEMETRY_API.PROD), K8TEST("k8test", TELEMETRY_API.K8TEST); private String name; private String url; private final String apiKey; TELEMETRY_SERVER_DEPLOYMENT(String name, TELEMETRY_API api) { this.name = name; this.url = api.url; this.apiKey = api.apiKey; } public String getURL() { return url; } public String getName() { return name; } public String getApiKey() { return apiKey; } public void setURL(String url) { this.url = url; } } public void setDeployment(TELEMETRY_SERVER_DEPLOYMENT deployment) { logger.debug("Setting out-of-band telemetry sever deployment to {}", deployment); serverDeployment = deployment; } public String getServerDeploymentName() { return serverDeployment.name; } private AtomicInteger eventCnt = new AtomicInteger(); private AtomicInteger clientFailureCnt = new AtomicInteger(); private AtomicInteger serverFailureCnt = new AtomicInteger(); private String lastClientError = ""; /** * @return the number of events successfully reported by this service */ public int getEventCount() { return eventCnt.get(); } /** * @return the number of times an event was attempted to be reported but failed due to a * client-side error */ public int getClientFailureCount() { return clientFailureCnt.get(); } /** * @return the number of times an event was attempted to be reported but failed due to a * server-side error */ public int getServerFailureCount() { return serverFailureCnt.get(); } /** * @return the string containing the most recent failed response */ public String getLastClientError() { return this.lastClientError; } /** Count one more successfully reported events */ public void count() { eventCnt.incrementAndGet(); } /** * Report the event to the telemetry server in a new thread * * @param event TelemetryEvent */ public void report(TelemetryEvent event) { reportChooseEvent(event, /* isHTAP */ false); } public void reportChooseEvent(TelemetryEvent event, boolean isHTAP) { if ((!enabled && !isHTAP) || (!htapEnabled && isHTAP) || event == null || event.isEmpty()) { return; } // Start a new thread to upload without blocking the current thread Runnable runUpload = new TelemetryUploader( this, exportQueueToString(event), exportQueueToLogString(event), isHTAP); TelemetryThreadPool.getInstance().execute(runUpload); } /** * Convert an event to a payload in string * * @param event TelemetryEvent * @return the string payload */ public String exportQueueToString(TelemetryEvent event) { JSONArray logs = new JSONArray(); logs.add(event); return logs.toString(); } public String exportQueueToLogString(TelemetryEvent event) { JSONArray logs = new JSONArray(); logs.add(event); return JSONArray.toJSONString(logs, new SecretDetector.SecretDetectorJSONStyle()); } static class TelemetryUploader implements Runnable { private TelemetryService instance; private String payload; private String payloadLogStr; private boolean isHTAP; private static final int TIMEOUT = 5000; // 5 second timeout limit private static final RequestConfig config = RequestConfig.custom() .setConnectionRequestTimeout(TIMEOUT) .setConnectionRequestTimeout(TIMEOUT) .setSocketTimeout(TIMEOUT) .build(); public TelemetryUploader( TelemetryService _instance, String _payload, String _payloadLogStr, boolean _isHTAP) { instance = _instance; payload = _payload; payloadLogStr = _payloadLogStr; isHTAP = _isHTAP; } public void run() { if (!isHTAP && !instance.enabled) { return; } if (isHTAP && !instance.htapEnabled) { return; } if (!instance.isDeploymentEnabled()) { // skip the disabled deployment logger.debug("Skip the disabled deployment: ", instance.serverDeployment.name); return; } if (!instance.serverDeployment.url.matches(TELEMETRY_SERVER_URL_PATTERN)) { // skip the disabled deployment logger.debug("Ignore invalid url: ", instance.serverDeployment.url); return; } uploadPayload(); } private void uploadPayload() { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); logger.debugNoMask( "Running out-of-band telemetry uploader. The payload is: " + payloadLogStr); CloseableHttpResponse response = null; boolean success = true; try { HttpPost post = new HttpPost(instance.serverDeployment.url); post.setEntity(new StringEntity(payload)); post.setHeader("Content-type", "application/json"); post.setHeader("x-api-key", instance.serverDeployment.getApiKey()); try (CloseableHttpClient httpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build()) { response = httpClient.execute(post); int statusCode = response.getStatusLine().getStatusCode(); if (statusCode == 200) { logger.debug("Out-of-band telemetry server request success: {}", response, true); instance.count(); } else if (statusCode == 429) { logger.debug( "Out-of-band telemetry server request hit server cap on response: {}", response); instance.serverFailureCnt.incrementAndGet(); } else { logger.debug("Out-of-band telemetry server request error: {}", response, true); instance.lastClientError = response.toString(); instance.clientFailureCnt.incrementAndGet(); success = false; } logger.debug(EntityUtils.toString(response.getEntity(), "UTF-8"), true); response.close(); } } catch (Exception e) { // exception from here is always captured logger.debug( "Out-of-band telemetry request failed, Exception response: {}, exception: {}", response, e.getMessage()); String res = "null"; if (response != null) { res = response.toString(); } instance.lastClientError = "Response: " + res + "; Error: " + e.getMessage(); instance.clientFailureCnt.incrementAndGet(); success = false; } finally { stopwatch.stop(); logger.debug( "Out-of-band telemetry request success: {} and clean the current queue. It took {} ms." + " Total successful events: {}, total unsuccessful events: {} (client failures: {}, server failures: {})", success, stopwatch.elapsedMillis(), instance.eventCnt, instance.clientFailureCnt.get() + instance.serverFailureCnt.get(), instance.clientFailureCnt, instance.serverFailureCnt); } } } /** * log OCSP exception to telemetry * * @param eventType event type * @param telemetryData JSON telemetry data * @param ex CertificateException */ public void logOCSPExceptionTelemetryEvent( String eventType, JSONObject telemetryData, CertificateException ex) { if (enabled) { String eventName = "OCSPException"; TelemetryEvent.LogBuilder logBuilder = new TelemetryEvent.LogBuilder(); if (ex != null) { telemetryData.put("exceptionMessage", ex.getLocalizedMessage()); StringWriter sw = new StringWriter(); ex.printStackTrace(new PrintWriter(sw)); telemetryData.put("exceptionStackTrace", sw.toString()); } TelemetryEvent log = logBuilder .withName(eventName) .withValue(telemetryData) .withTag("eventType", eventType) .build(); this.report(log); } } /** * log error http response to telemetry * * @param eventName the event name * @param request the HttpRequestBase * @param injectSocketTimeout the socket timeout * @param canceling cancelling * @param withoutCookies without cookies * @param includeRetryParameters include retry parameters * @param includeRequestGuid include rest GUID * @param response the CloseableHttpResponse * @param savedEx the saved exception * @param breakRetryReason the break retry reason * @param retryTimeout the retry timeout * @param retryCount retry count * @param sqlState the SQL state * @param errorCode the error code */ public void logHttpRequestTelemetryEvent( String eventName, HttpRequestBase request, int injectSocketTimeout, AtomicBoolean canceling, boolean withoutCookies, boolean includeRetryParameters, boolean includeRequestGuid, CloseableHttpResponse response, final Exception savedEx, String breakRetryReason, long retryTimeout, int retryCount, String sqlState, int errorCode) { if (enabled) { TelemetryEvent.LogBuilder logBuilder = new TelemetryEvent.LogBuilder(); JSONObject value = new JSONObject(); value.put("request", request.toString()); value.put("injectSocketTimeout", injectSocketTimeout); value.put("canceling", canceling == null ? "null" : canceling.get()); value.put("withoutCookies", withoutCookies); value.put("includeRetryParameters", includeRetryParameters); value.put("includeRequestGuid", includeRequestGuid); value.put("breakRetryReason", breakRetryReason); value.put("retryTimeout", retryTimeout); value.put("retryCount", retryCount); value.put("sqlState", sqlState); value.put("errorCode", errorCode); int responseStatusCode = -1; if (response != null) { value.put("response", response.toString()); value.put("responseStatusLine", response.getStatusLine().toString()); if (response.getStatusLine() != null) { responseStatusCode = response.getStatusLine().getStatusCode(); value.put("responseStatusCode", responseStatusCode); } } else { value.put("response", null); } if (savedEx != null) { value.put("exceptionMessage", savedEx.getLocalizedMessage()); StringWriter sw = new StringWriter(); savedEx.printStackTrace(new PrintWriter(sw)); value.put("exceptionStackTrace", sw.toString()); } TelemetryEvent log = logBuilder .withName(eventName) .withValue(value) .withTag("sqlState", sqlState) .withTag("errorCode", errorCode) .withTag("responseStatusCode", responseStatusCode) .build(); this.report(log); } } /** * log execution times from various processing slices * * @param telemetryData JSON telemetry data * @param eventName the event name */ public void logExecutionTimeTelemetryEvent(JSONObject telemetryData, String eventName) { if (htapEnabled) { TelemetryEvent.LogBuilder logBuilder = new TelemetryEvent.LogBuilder(); TelemetryEvent log = logBuilder .withName(eventName) .withValue(telemetryData) .withTag("eventType", eventName) .build(); this.reportChooseEvent(log, /* isHTAP */ true); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/telemetryOOB/TelemetryThreadPool.java ================================================ package net.snowflake.client.internal.jdbc.telemetryOOB; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; /** * A singleton class which wrapped the ExecutorService, which is used to submit telemetry data * asynchronously to server */ public class TelemetryThreadPool { private static final int CORE_POOL_SIZE = 10; private final ExecutorService uploader; private static TelemetryThreadPool instance; public static TelemetryThreadPool getInstance() { if (instance == null) { synchronized (TelemetryThreadPool.class) { if (instance == null) { instance = new TelemetryThreadPool(); } } } return instance; } /** * Private constructor to initialize the singleton instance. * *

Configures a thread pool that scales dynamically based on workload. The pool starts with * zero threads and will create new threads on demand up to a maximum of 10. If all 10 threads are * active, new tasks are placed in an unbounded queue to await execution. * *

To conserve resources, threads that are idle for more than 30 seconds are terminated, * allowing the pool to shrink back to zero during periods of inactivity. */ private TelemetryThreadPool() { // Create a thread factory that creates daemon threads to prevent blocking JVM termination ThreadFactory daemonThreadFactory = r -> { Thread thread = Executors.defaultThreadFactory().newThread(r); thread.setName("telemetry-uploader-" + thread.getId()); thread.setDaemon(true); return thread; }; uploader = new ThreadPoolExecutor( CORE_POOL_SIZE, // core size CORE_POOL_SIZE, // max size 30L, // keep alive time TimeUnit.SECONDS, new LinkedBlockingQueue<>(), // work queue daemonThreadFactory // thread factory ); // Allow core threads to time out and be terminated when idle. ((ThreadPoolExecutor) uploader).allowCoreThreadTimeOut(true); } public void execute(Runnable task) { uploader.execute(task); } public Future submit(Callable task) { return uploader.submit(task); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/util/DriverUtil.java ================================================ package net.snowflake.client.internal.jdbc.util; import net.snowflake.client.api.driver.SnowflakeDriver; public class DriverUtil { public static String getImplementationVersion() { return SnowflakeDriver.getImplementationVersion(); } /** * Utility method to verify if the standard or fips snowflake-jdbc driver is being used. * * @return the title of the implementation, null is returned if it is not known. */ static String getImplementationTitle() { Package pkg = Package.getPackage("net.snowflake.client.internal.jdbc"); return pkg != null ? pkg.getImplementationTitle() : "snowflake-jdbc"; } /** * Utility method to get the complete jar name with version. * * @return the jar name with version */ public static String getJdbcJarname() { return String.format("%s-%s", getImplementationTitle(), getImplementationVersion()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/util/SnowflakeTypeHelper.java ================================================ package net.snowflake.client.internal.jdbc.util; import java.math.BigDecimal; import java.sql.Timestamp; import java.sql.Types; import java.time.Duration; import java.time.Period; import java.util.HashSet; import java.util.Set; import net.snowflake.client.api.resultset.SnowflakeType; /** * Internal helper class for SnowflakeType conversions and utilities. This class contains nested * enums, constants, and utility methods that are used internally by the JDBC driver but should not * be part of the public API. * *

Note: This is an internal API and should not be used by customers. */ public final class SnowflakeTypeHelper { private SnowflakeTypeHelper() { // Prevent instantiation } public static final String DATE_OR_TIME_FORMAT_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSSXXX"; public static final String TIMESTAMP_FORMAT_PATTERN = "yyyy-MM-dd'T'HH:mm:ss."; public static final String TIMESTAMP_FORMAT_TZ_PATTERN = "XXX"; public static final String TIME_FORMAT_PATTERN = "HH:mm:ss.SSS"; private static final byte[] BYTE_ARRAY = new byte[0]; public static final String BINARY_CLASS_NAME = BYTE_ARRAY.getClass().getName(); /** * Converts text of data type (returned from SQL query) into Types type, represented by an int. * * @param typeName type name * @return int representation of type from {@link java.sql.Types} */ public static int convertStringToType(String typeName) { int retval = Types.NULL; if (typeName == null || typeName.trim().isEmpty()) { return retval; } // Trim all whitespace and extra information off typeName so it can be interpreted by switch // statement. Ex: turns // "NUMBER(38,0)" -> "NUMBER" and "FLOAT NOT NULL" ->" FLOAT" String typeNameTrimmed = typeName.trim(); if (typeNameTrimmed.contains("(")) { typeNameTrimmed = typeNameTrimmed.substring(0, typeNameTrimmed.indexOf('(')); } if (typeNameTrimmed.contains(" ")) { typeNameTrimmed = typeNameTrimmed.substring(0, typeNameTrimmed.indexOf(' ')); } switch (typeNameTrimmed.toLowerCase()) { case "number": case "numeric": retval = Types.NUMERIC; break; case "decfloat": retval = SnowflakeType.EXTRA_TYPES_DECFLOAT; break; case "decimal": retval = Types.DECIMAL; break; case "int": case "integer": case "byteint": retval = Types.INTEGER; break; case "tinyint": retval = Types.TINYINT; break; case "smallint": retval = Types.SMALLINT; break; case "bigint": retval = Types.BIGINT; break; case "float": case "float4": case "float8": retval = Types.FLOAT; break; case "double": case "double precision": retval = Types.DOUBLE; break; case "real": retval = Types.REAL; break; case "char": case "character": retval = Types.CHAR; break; case "varchar": case "string": case "text": retval = Types.VARCHAR; break; case "binary": retval = Types.BINARY; break; case "varbinary": retval = Types.VARBINARY; break; case "boolean": retval = Types.BOOLEAN; break; case "date": retval = Types.DATE; break; case "time": retval = Types.TIME; break; case "timestamp": case "datetime": case "timestamp_ntz": retval = Types.TIMESTAMP; break; case "timestamp_ltz": case "timestamp_tz": retval = Types.TIMESTAMP_WITH_TIMEZONE; break; case "interval_year_month": retval = SnowflakeType.EXTRA_TYPES_YEAR_MONTH_INTERVAL; break; case "interval_day_time": retval = SnowflakeType.EXTRA_TYPES_DAY_TIME_INTERVAL; break; case "variant": retval = Types.OTHER; break; case "object": retval = Types.JAVA_OBJECT; break; case "vector": retval = SnowflakeType.EXTRA_TYPES_VECTOR; break; case "array": retval = Types.ARRAY; break; default: retval = Types.OTHER; break; } return retval; } /** * Determines if a Java SQL type is signed. * * @param type the Java SQL type from {@link java.sql.Types} * @return true if the type is signed (INTEGER, DECIMAL, or DOUBLE) */ public static boolean isJavaTypeSigned(int type) { return type == Types.INTEGER || type == Types.DECIMAL || type == Types.DOUBLE; } /** Internal enum representing Java data types for Snowflake columns. */ public enum JavaDataType { JAVA_STRING(String.class), JAVA_LONG(Long.class), JAVA_DOUBLE(Double.class), JAVA_BIGDECIMAL(BigDecimal.class), JAVA_TIMESTAMP(Timestamp.class), JAVA_PERIOD(Period.class), JAVA_DURATION(Duration.class), JAVA_BYTES(byte[].class), JAVA_BOOLEAN(Boolean.class), JAVA_OBJECT(Object.class); JavaDataType(Class c) { this._class = c; } private Class _class; } /** Internal enum representing Java SQL types with convenient lookup methods. */ public enum JavaSQLType { ARRAY(Types.ARRAY), DATALINK(Types.DATALINK), BIGINT(Types.BIGINT), BINARY(Types.BINARY), BIT(Types.BIT), BLOB(Types.BLOB), BOOLEAN(Types.BOOLEAN), CHAR(Types.CHAR), CLOB(Types.CLOB), DATE(Types.DATE), DECIMAL(Types.DECIMAL), DISTINCT(Types.DISTINCT), DOUBLE(Types.DOUBLE), FLOAT(Types.FLOAT), INTEGER(Types.INTEGER), JAVA_OBJECT(Types.JAVA_OBJECT), LONGNVARCHAR(Types.LONGNVARCHAR), LONGVARBINARY(Types.LONGVARBINARY), LONGVARCHAR(Types.LONGVARCHAR), NCHAR(Types.NCHAR), NCLOB(Types.NCLOB), NULL(Types.NULL), NUMERIC(Types.NUMERIC), NVARCHAR(Types.NVARCHAR), OTHER(Types.OTHER), REAL(Types.REAL), REF(Types.REF), REF_CURSOR(Types.REF_CURSOR), ROWID(Types.ROWID), SMALLINT(Types.SMALLINT), SQLXML(Types.SQLXML), STRUCT(Types.STRUCT), TIME(Types.TIME), TIME_WITH_TIMEZONE(Types.TIME_WITH_TIMEZONE), TIMESTAMP(Types.TIMESTAMP), TIMESTAMP_WITH_TIMEZONE(Types.TIMESTAMP_WITH_TIMEZONE), TINYINT(Types.TINYINT), VARBINARY(Types.VARBINARY), VARCHAR(Types.VARCHAR), VECTOR(Types.ARRAY); private final int type; public static final Set ALL_TYPES = new HashSet<>(); static { ALL_TYPES.add(ARRAY); ALL_TYPES.add(DATALINK); ALL_TYPES.add(BIGINT); ALL_TYPES.add(BINARY); ALL_TYPES.add(BIT); ALL_TYPES.add(BLOB); ALL_TYPES.add(BOOLEAN); ALL_TYPES.add(CHAR); ALL_TYPES.add(CLOB); ALL_TYPES.add(DATE); ALL_TYPES.add(DECIMAL); ALL_TYPES.add(DISTINCT); ALL_TYPES.add(DOUBLE); ALL_TYPES.add(FLOAT); ALL_TYPES.add(INTEGER); ALL_TYPES.add(JAVA_OBJECT); ALL_TYPES.add(LONGNVARCHAR); ALL_TYPES.add(LONGVARBINARY); ALL_TYPES.add(LONGVARCHAR); ALL_TYPES.add(NCHAR); ALL_TYPES.add(NCLOB); ALL_TYPES.add(NULL); ALL_TYPES.add(NUMERIC); ALL_TYPES.add(NVARCHAR); ALL_TYPES.add(OTHER); ALL_TYPES.add(REAL); ALL_TYPES.add(REF); ALL_TYPES.add(REF_CURSOR); ALL_TYPES.add(ROWID); ALL_TYPES.add(SMALLINT); ALL_TYPES.add(SQLXML); ALL_TYPES.add(STRUCT); ALL_TYPES.add(TIME); ALL_TYPES.add(TIME_WITH_TIMEZONE); ALL_TYPES.add(TIMESTAMP); ALL_TYPES.add(TIMESTAMP_WITH_TIMEZONE); ALL_TYPES.add(TINYINT); ALL_TYPES.add(VARBINARY); ALL_TYPES.add(VARCHAR); ALL_TYPES.add(VECTOR); } JavaSQLType(int type) { this.type = type; } public int getType() { return type; } /** * Find a JavaSQLType by its integer type value. * * @param type the integer type from {@link java.sql.Types} * @return the corresponding JavaSQLType, or null if not found */ public static JavaSQLType find(int type) { for (JavaSQLType t : ALL_TYPES) { if (t.type == type) { return t; } } return null; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/jdbc/util/SnowflakeTypeUtil.java ================================================ package net.snowflake.client.internal.jdbc.util; import java.math.BigDecimal; import java.sql.SQLException; import java.sql.Time; import java.sql.Timestamp; import java.text.DateFormat; import java.time.Duration; import java.time.Period; import java.util.Date; import java.util.Locale; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.common.core.SFBinary; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.exception.SnowflakeSQLLoggedException; import net.snowflake.common.core.SqlState; /** * Internal utility class for SnowflakeType conversions and formatting. These methods are used * internally by the driver and should not be considered part of the public API. */ public class SnowflakeTypeUtil { /** * Converts a string to a SnowflakeType enum value. * * @param name the type name * @return the corresponding SnowflakeType * @throws IllegalArgumentException if the type name is not a known SnowflakeType */ public static SnowflakeType fromString(String name) { return SnowflakeType.valueOf(name.toUpperCase(Locale.ROOT)); } /** * Converts a string to a SnowflakeType enum value, returning null for unknown types. * * @param name the type name * @return the corresponding SnowflakeType, or null if the type name is not recognized */ public static SnowflakeType fromStringOrNull(String name) { try { return fromString(name); } catch (IllegalArgumentException e) { return null; } } /** * Gets the Java data type for a Snowflake type. * * @param type the Snowflake type * @return the corresponding Java data type */ public static SnowflakeTypeHelper.JavaDataType getJavaType(SnowflakeType type) { return getJavaType(type, false); } /** * Gets the Java data type for a Snowflake type. * * @param type the Snowflake type * @param isStructuredType whether this is a structured type * @return the corresponding Java data type */ public static SnowflakeTypeHelper.JavaDataType getJavaType( SnowflakeType type, boolean isStructuredType) { // TODO structuredType fill for Array and Map: SNOW-1234216, SNOW-1234214 switch (type) { case TEXT: return SnowflakeTypeHelper.JavaDataType.JAVA_STRING; case CHAR: return SnowflakeTypeHelper.JavaDataType.JAVA_STRING; case INTEGER: return SnowflakeTypeHelper.JavaDataType.JAVA_LONG; case FIXED: case DECFLOAT: return SnowflakeTypeHelper.JavaDataType.JAVA_BIGDECIMAL; case REAL: return SnowflakeTypeHelper.JavaDataType.JAVA_DOUBLE; case TIMESTAMP: case TIME: case TIMESTAMP_LTZ: case TIMESTAMP_NTZ: case TIMESTAMP_TZ: case DATE: return SnowflakeTypeHelper.JavaDataType.JAVA_TIMESTAMP; case INTERVAL_YEAR_MONTH: return SnowflakeTypeHelper.JavaDataType.JAVA_PERIOD; case INTERVAL_DAY_TIME: return SnowflakeTypeHelper.JavaDataType.JAVA_DURATION; case BOOLEAN: return SnowflakeTypeHelper.JavaDataType.JAVA_BOOLEAN; case ARRAY: case VARIANT: case VECTOR: return SnowflakeTypeHelper.JavaDataType.JAVA_STRING; case BINARY: return SnowflakeTypeHelper.JavaDataType.JAVA_BYTES; case ANY: return SnowflakeTypeHelper.JavaDataType.JAVA_OBJECT; case OBJECT: if (isStructuredType) { return SnowflakeTypeHelper.JavaDataType.JAVA_OBJECT; } else { return SnowflakeTypeHelper.JavaDataType.JAVA_STRING; } default: // Those are not supported, but no reason to panic return SnowflakeTypeHelper.JavaDataType.JAVA_STRING; } } /** * Returns a lexical value of an object that is suitable for Snowflake import serialization * * @param o Java object representing value in Snowflake. * @param dateFormat java.sql.Date or java.sqlTime format * @param timeFormat java.sql.Time format * @param timestampFormat first part of java.sql.Timestamp format * @param timestampTzFormat last part of java.sql.Timestamp format * @return String representation of it that can be used for creating a load file */ public static String lexicalValue( Object o, DateFormat dateFormat, DateFormat timeFormat, DateFormat timestampFormat, DateFormat timestampTzFormat) { if (o == null) { return null; } Class c = o.getClass(); if (c == Date.class || c == java.sql.Date.class) { return synchronizeFormat(o, dateFormat); } if (c == java.sql.Time.class) { return synchronizeFormat(o, timeFormat); } if (c == java.sql.Timestamp.class) { String stdFmt = o.toString(); String nanos = stdFmt.substring(stdFmt.indexOf('.') + 1); String ret1 = synchronizeFormat(o, timestampFormat); String ret2 = synchronizeFormat(o, timestampTzFormat); return ret1 + nanos + ret2; } if (c == Double.class) { return Double.toHexString((Double) o); } if (c == Float.class) { return Float.toHexString((Float) o); } if (c == Integer.class) { return o.toString(); } if (c == Period.class) { return o.toString(); } if (c == Duration.class) { return o.toString(); } if (c == BigDecimal.class) { return o.toString(); } if (c == byte[].class) { return new SFBinary((byte[]) o).toHex(); } return String.valueOf(o); } private static synchronized String synchronizeFormat(Object o, DateFormat sdf) { return sdf.format(o); } /** * Escapes a string value for CSV format. * * @param value the value to escape * @return the escaped value */ public static String escapeForCSV(String value) { if (value == null) { return ""; // null => an empty string without quotes } if (value.isEmpty()) { return "\"\""; // an empty string => an empty string with quotes } if (value.indexOf('"') >= 0 || value.indexOf('\n') >= 0 || value.indexOf(',') >= 0 || value.indexOf('\\') >= 0) { // anything else including double quotes or commas will have quotes return '"' + value.replaceAll("\"", "\"\"") + '"'; } else { return value; } } /** * Converts a Java SQL type to a Snowflake type. * * @param javaType the Java SQL type (from {@link java.sql.Types}) * @param session the session object * @return the corresponding Snowflake type * @throws net.snowflake.client.api.exception.SnowflakeSQLException if the type is not supported */ public static SnowflakeType javaTypeToSFType(int javaType, SFBaseSession session) throws net.snowflake.client.api.exception.SnowflakeSQLException { switch (javaType) { case java.sql.Types.INTEGER: case java.sql.Types.BIGINT: case java.sql.Types.DECIMAL: case java.sql.Types.NUMERIC: case java.sql.Types.SMALLINT: case java.sql.Types.TINYINT: return SnowflakeType.FIXED; case java.sql.Types.CHAR: case java.sql.Types.VARCHAR: return SnowflakeType.TEXT; case java.sql.Types.BINARY: return SnowflakeType.BINARY; case java.sql.Types.FLOAT: case java.sql.Types.DOUBLE: return SnowflakeType.REAL; case java.sql.Types.DATE: return SnowflakeType.DATE; case java.sql.Types.TIME: return SnowflakeType.TIME; case java.sql.Types.TIMESTAMP: return SnowflakeType.TIMESTAMP; case java.sql.Types.BOOLEAN: return SnowflakeType.BOOLEAN; case java.sql.Types.STRUCT: return SnowflakeType.OBJECT; case java.sql.Types.ARRAY: return SnowflakeType.ARRAY; case java.sql.Types.NULL: return SnowflakeType.ANY; default: throw new SnowflakeSQLLoggedException( session, ErrorCode.DATA_TYPE_NOT_SUPPORTED.getMessageCode(), SqlState.FEATURE_NOT_SUPPORTED, javaType); } } /** * Converts a Java SQL type to a Java class name. * * @param type the Java SQL type (from {@link java.sql.Types}) * @return the corresponding Java class name * @throws SQLException if the type is not supported */ public static String javaTypeToClassName(int type) throws SQLException { switch (type) { case java.sql.Types.VARCHAR: case java.sql.Types.CHAR: case java.sql.Types.STRUCT: case java.sql.Types.ARRAY: return String.class.getName(); case java.sql.Types.BINARY: return SnowflakeTypeHelper.BINARY_CLASS_NAME; case java.sql.Types.INTEGER: return Integer.class.getName(); case java.sql.Types.DECIMAL: return BigDecimal.class.getName(); case java.sql.Types.DOUBLE: return Double.class.getName(); case java.sql.Types.TIMESTAMP: case java.sql.Types.TIMESTAMP_WITH_TIMEZONE: return Timestamp.class.getName(); case java.sql.Types.DATE: return java.sql.Date.class.getName(); case java.sql.Types.TIME: return Time.class.getName(); case java.sql.Types.BOOLEAN: return Boolean.class.getName(); case java.sql.Types.BIGINT: return Long.class.getName(); case java.sql.Types.SMALLINT: return Short.class.getName(); default: throw new java.sql.SQLFeatureNotSupportedException( String.format("No corresponding Java type is found for java.sql.Type: %d", type)); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/loader/BufferStage.java ================================================ package net.snowflake.client.internal.loader; import static java.nio.charset.StandardCharsets.UTF_8; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.concurrent.atomic.AtomicLong; import java.util.zip.GZIPOutputStream; import net.snowflake.client.api.loader.Loader; import net.snowflake.client.api.loader.Operation; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Class representing a unit of work for uploader. Corresponds to a collection of data files for a * single processing stage. */ public class BufferStage { private static final SFLogger logger = SFLoggerFactory.getLogger(BufferStage.class); public enum State { CREATED, LOADING, LOADED, EMPTY, UPLOADED, VALIDATED, VALIDATED_CLEANED, ERROR, PROCESSED, CLEANED, REMOVED }; public static final int FILE_BUCKET_SIZE = 64; // threshold to schedule processing public static final long FILE_SIZE = 50L * 1024L * 1024L; // individual file, 50Mb private State _state; private final File _directory; private final String _location; private final String _stamp; private final Operation _op; private final long _csvFileBucketSize; private final long _csvFileSize; // Last stage in a loader gets to terminate private volatile boolean _terminate = false; private String _id; // Data bytes (uncompressed) in the current file private int _currentSize = 0; // Total number of rows submitted to this stage private int _rowCount = 0; // Number of files in the stage private int _fileCount = 0; // Counter for ID generation private static AtomicLong MARK = new AtomicLong(1); // Parent loader private StreamLoader _loader; // Current output stream private OutputStream _outstream = null; // Current file private File _file = null; // List of all scheduled uploaders private ArrayList _uploaders = new ArrayList<>(); BufferStage(StreamLoader loader, Operation op, long csvFileBucketSize, long csvFileSize) { logger.debug("Operation: {}", op); _state = State.CREATED; _loader = loader; _stamp = new SimpleDateFormat("yyyyMMdd'_'HHmmss'_'SSS").format(new Date()); _csvFileBucketSize = csvFileBucketSize; _csvFileSize = csvFileSize; long mark = MARK.getAndIncrement() % 10000000; // Security Fix: A table name can include slashes and dots, so if a table // name is used as part of a file name, the file can be created // outside of the given directory. This replaces slashes with underscores. _location = BufferStage.escapeFileSeparatorChar(_loader.getTable()) + File.separatorChar + op.name() + File.separatorChar + _stamp + "_" + _loader.getNoise() + '_' + mark; _id = BufferStage.escapeFileSeparatorChar(_loader.getTable()) + "_" + _stamp + '_' + mark; String localStageDirectory = _loader.getBase() + File.separatorChar + _location; _directory = new File(localStageDirectory); if (!_directory.mkdirs()) { RuntimeException ex = new RuntimeException( "Could not initialize the local staging area. " + "Make sure the directory is writable and readable: " + localStageDirectory); _loader.abort(ex); throw ex; } _op = op; openFile(); } /** Create local file for caching data before upload */ private synchronized void openFile() { try { String fName = _directory.getAbsolutePath() + File.separatorChar + StreamLoader.FILE_PREFIX + _stamp + _fileCount; if (_loader._compressDataBeforePut) { fName += StreamLoader.FILE_SUFFIX; } logger.debug("openFile: {}", fName); OutputStream fileStream = new FileOutputStream(fName); if (_loader._compressDataBeforePut) { OutputStream gzipOutputStream = new GZIPOutputStream(fileStream, 64 * 1024, true) { { def.setLevel((int) _loader._compressLevel); } }; _outstream = new BufferedOutputStream(gzipOutputStream); } else { _outstream = new BufferedOutputStream(fileStream); } _file = new File(fName); _fileCount++; } catch (IOException ex) { _loader.abort(new Loader.ConnectionError(Utils.getCause(ex))); } } private static byte[] newLineBytes = "\n".getBytes(UTF_8); // not thread safe boolean stageData(final byte[] line) throws IOException { if (this._rowCount % 10000 == 0) { logger.debug("rowCount: {}, currentSize: {}", this._rowCount, _currentSize); } _outstream.write(line); _currentSize += line.length; _outstream.write(newLineBytes); this._rowCount++; if (_loader._testRemoteBadCSV) { // inject garbage for a negative test case // The file will be uploaded to the stage, but COPY command will // fail and raise LoaderError _outstream.write(new byte[] {(byte) 0x01, (byte) 0x02}); _outstream.write(newLineBytes); this._rowCount++; } if (_currentSize >= this._csvFileSize) { logger.debug( "name: {}, currentSize: {}, Threshold: {}," + " fileCount: {}, fileBucketSize: {}", _file.getAbsolutePath(), _currentSize, this._csvFileSize, _fileCount, this._csvFileBucketSize); _outstream.flush(); _outstream.close(); _outstream = null; FileUploader fu = new FileUploader(_loader, _location, _file); fu.upload(); _uploaders.add(fu); openFile(); _currentSize = 0; } return _fileCount > this._csvFileBucketSize; } /** * Wait for all files to finish uploading and schedule stage for processing * * @throws IOException raises an exception if IO error occurs */ void completeUploading() throws IOException { logger.debug( "name: {}, currentSize: {}, Threshold: {}," + " fileCount: {}, fileBucketSize: {}", _file.getAbsolutePath(), _currentSize, this._csvFileSize, _fileCount, this._csvFileBucketSize); _outstream.flush(); _outstream.close(); // last file if (_currentSize > 0) { FileUploader fu = new FileUploader(_loader, _location, _file); fu.upload(); _uploaders.add(fu); } else { // delete empty file _file.delete(); } for (FileUploader fu : _uploaders) { // Finish all files being uploaded fu.join(); } // Delete the directory once we are done (for easier tracking // of what is going on) _directory.deleteOnExit(); if (this._rowCount == 0) { setState(State.EMPTY); } } public String getRemoteLocation() { return remoteSeparator(_location); } Operation getOp() { return _op; } public boolean isTerminate() { return _terminate; } public void setTerminate(boolean terminate) { this._terminate = terminate; } public String getId() { return _id; } public void setId(String _id) { this._id = _id; } public State state() { return _state; } public void setState(State state) { if (_state != state) { // Logging goes here // Need to keep trace of states. _state = state; } } int getRowCount() { return _rowCount; } // convert any back slashes to forward slashes if necessary when converting // a local filename to a one suitable for S3 private String remoteSeparator(String fname) { if (File.separatorChar == '\\') { return fname.replace("\\", "/"); } else { return fname; } } /** * Escape file separator char to underscore. This prevents the file name from using file path * separator. * * @param fname The file name to escape * @return escaped file name */ private static String escapeFileSeparatorChar(String fname) { if (File.separatorChar == '\\') { return fname.replaceAll(File.separator + File.separator, "_"); } else { return fname.replaceAll(File.separator, "_"); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/loader/FileUploader.java ================================================ package net.snowflake.client.internal.loader; import java.io.File; import java.sql.ResultSet; import java.sql.Statement; import net.snowflake.client.api.loader.Loader; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.jdbc.SnowflakeFileTransferAgent; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Class responsible for uploading a single data file. */ public class FileUploader implements Runnable { private static final SFLogger logger = SFLoggerFactory.getLogger(PutQueue.class); private static final int RETRY = 6; private final Thread _thread; private final StreamLoader _loader; private final String _stage; private final File _file; FileUploader(StreamLoader loader, String stage, File file) { logger.trace("Creating new FileUploader", false); _loader = loader; _thread = new Thread(this); _thread.setName("FileUploaderThread"); _stage = stage; _file = file; } public synchronized void upload() { // throttle up will wait if too many files are uploading logger.trace("Creating new FileUploader", false); _loader.throttleUp(); _thread.start(); } @Override public void run() { Throwable previousException = null; try { for (int attempt = 0; attempt <= RETRY; attempt++) { if (attempt == RETRY) { if (previousException != null) { _loader.abort( new Loader.ConnectionError( String.format( "File could not be uploaded to remote stage " + "after retrying %d times: %s", RETRY, _file.getCanonicalPath()), Utils.getCause(previousException))); } else { _loader.abort( new Loader.ConnectionError( String.format( "File could not be uploaded to remote stage " + "after retrying %d times: %s", RETRY, _file.getCanonicalPath()))); } break; } if (attempt > 0) { logger.debug("Will retry PUT after {} seconds", Math.pow(2, attempt)); Thread.sleep(1000 * ((int) Math.pow(2, attempt))); } // In test mode force fail first file if (_loader._testMode) { // TEST MODE if (attempt < 2) { _loader .getPutConnection() .unwrap(SnowflakeConnectionImpl.class) .setInjectFileUploadFailure(_file.getName()); } else { // so that retry now succeeds. _loader .getPutConnection() .unwrap(SnowflakeConnectionImpl.class) .setInjectFileUploadFailure(null); } } // Upload local files to a remote stage // No double quote is added _loader.getRemoteStage(), since // it is most likely "~". If not, we may need to double quote // them. String remoteStage = "@" + _loader.getRemoteStage() + "/" + remoteSeparator(_stage); String putStatement = "PUT " + (attempt > 0 ? "/* retry:" + attempt + " */ " : "") + "'file://" + _file.getCanonicalPath().replaceAll("\\\\", "\\\\\\\\") + "' '" + remoteStage + "' parallel=10" // upload chunks in parallel + " overwrite=true"; // skip file existence check if (_loader._compressDataBeforePut) { putStatement += " auto_compress=false" + " SOURCE_COMPRESSION=gzip"; } else if (_loader._compressFileByPut) { putStatement += " auto_compress=true"; } else { // don't compress file at all putStatement += " auto_compress=false"; } Statement statement = _loader.getPutConnection().createStatement(); try { logger.debug("Put Statement start: {}", putStatement); statement.execute(putStatement); logger.debug("Put Statement end: {}", putStatement); ResultSet putResult = statement.getResultSet(); putResult.next(); String file = localSeparator( putResult.getString(SnowflakeFileTransferAgent.UploadColumns.source.name())); String status = putResult.getString(SnowflakeFileTransferAgent.UploadColumns.status.name()); String message = putResult.getString(SnowflakeFileTransferAgent.UploadColumns.message.name()); if (status != null && status.equals(SnowflakeFileTransferAgent.ResultStatus.UPLOADED.name())) { // UPLOAD is success _file.delete(); break; } else { // The log level should be WARNING for a single upload failure. if (message.startsWith("Simulated upload failure")) { logger.debug( "Failed to upload a file:" + " status={}," + " filename={}," + " message={}", status, file, message); } else { logger.debug( "Failed to upload a file:" + " status={}," + " filename={}," + " message={}", status, file, message); } } } catch (Throwable t) { // The log level for unknown error is set to SEVERE logger.error( String.format( "Failed to PUT on attempt: attempt=[%s], " + "Message=[%s]", attempt, t.getMessage()), t.getCause()); previousException = t; } } } catch (Throwable t) { logger.error("PUT exception", t); _loader.abort(new Loader.ConnectionError(t.getMessage(), t.getCause())); } finally { _loader.throttleDown(); } } public void join() { logger.trace("Joining threads", false); try { _thread.join(0); } catch (InterruptedException ex) { logger.error(ex.getMessage(), ex); } } /** * convert any back slashes to forward slashes if necessary when converting a local filename to a * one suitable for S3 * * @param fname a file name to PUT * @return A fname string for S3 */ private String remoteSeparator(String fname) { if (File.separatorChar == '\\') { return fname.replace("\\", "/"); } else { return fname; } } /** * convert any forward slashes to back slashes if necessary when converting a S3 file name to a * local file name * * @param fname a file name to PUT * @return A fname string for the local FS (Windows/other Unix like OS) */ private String localSeparator(String fname) { if (File.separatorChar == '\\') { return fname.replace("/", "\\"); } else { return fname; } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/loader/OnError.java ================================================ package net.snowflake.client.internal.loader; import java.util.regex.Pattern; /** COPY ON_ERROR option */ class OnError { private static final Pattern validPattern = Pattern.compile("(?i)(?:ABORT_STATEMENT|CONTINUE|SKIP_FILE(?:_\\d+%?)?)"); /** Default behavior for ON_ERROR for Loader API. */ static final String DEFAULT = "CONTINUE"; private OnError() {} /** * Validates ON_ERROR value and return true if valid otherwise false. * * @param value ON_ERROR value * @return true if valid otherwise false. */ static boolean validate(String value) { return value != null && validPattern.matcher(value).matches(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/loader/ProcessQueue.java ================================================ package net.snowflake.client.internal.loader; import static java.lang.Math.toIntExact; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.LinkedList; import java.util.List; import net.snowflake.client.api.loader.LoadResultListener; import net.snowflake.client.api.loader.Loader; import net.snowflake.client.api.loader.LoadingError; import net.snowflake.client.api.loader.Operation; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * This class is responsible for processing a collection of uploaded data files represented by * BufferStage class */ public class ProcessQueue implements Runnable { private static final SFLogger logger = SFLoggerFactory.getLogger(ProcessQueue.class); private final Thread _thread; private final StreamLoader _loader; public ProcessQueue(StreamLoader loader) { logger.debug("", false); _loader = loader; _thread = new Thread(this); _thread.setName("ProcessQueueThread"); _thread.start(); } @Override public void run() { while (true) { BufferStage stage = null; Connection conn = _loader.getProcessConnection(); State currentState = State.INITIALIZE; String currentCommand = null; try { stage = _loader.takeProcess(); if (stage.getRowCount() == 0) { // Nothing was written to that stage if (stage.isTerminate()) { break; } else { continue; } } // Place where the files are. // No double quote is added _loader.getRemoteStage(), since // it is mostly likely to be "~". If not, we may need to double quote // them. String remoteStage = "@" + _loader.getRemoteStage() + "/" + stage.getRemoteLocation(); // process uploaded files // Loader.abort() and finish() are also synchronized on this synchronized (_loader) { String updateKeys = getOn(_loader.getKeys(), "T", "S"); if (stage.getOp() != Operation.INSERT && updateKeys.isEmpty()) { _loader.abort(new RuntimeException("No update key column is specified for the job.")); } if (_loader.isAborted()) { if (!_loader._preserveStageFile) { currentCommand = "RM '" + remoteStage + "'"; logger.debug(currentCommand, true); conn.createStatement().execute(currentCommand); } else { logger.debug( "Error occurred. The remote stage is preserved for " + "further investigation: {}", remoteStage); } if (stage.isTerminate()) { break; } else { continue; } // Do not do anything to this stage. // Everything was rolled back upon abort() call } // Create a temporary table to hold all uploaded data long loaded = 0; long parsed = 0; int errorCount = 0; String lastErrorRow = ""; // Create temp table to load data (may have a subset of columns) logger.debug("Creating Temporary Table: name={}", stage.getId()); currentState = State.CREATE_TEMP_TABLE; List allColumns = getAllColumns(conn); // use like to make sure columns in temporary table // contains properties (e.g., NOT NULL) from the source table currentCommand = "CREATE TEMPORARY TABLE \"" + stage.getId() + "\" LIKE " + _loader.getFullTableName(); List selectedColumns = _loader.getColumns(); conn.createStatement().execute(currentCommand); // In case clustering key exists, drop it from the temporary table so that unused // columns can be dropped from the table without errors. String dropClusteringKey = "alter table \"" + stage.getId() + "\" drop clustering key"; conn.createStatement().execute(dropClusteringKey); // the temp table can contain only a subset of columns // so remove unselected columns for (String col : allColumns) { if (!selectedColumns.contains(col)) { String dropUnSelectedColumn = "alter table \"" + stage.getId() + "\" drop column \"" + col + "\""; conn.createStatement().execute(dropUnSelectedColumn); } } // Load data there logger.debug( "COPY data in the stage to table:" + " stage={}," + " name={}", remoteStage, stage.getId()); currentState = State.COPY_INTO_TABLE; currentCommand = "COPY INTO \"" + stage.getId() + "\" FROM '" + remoteStage + "' on_error='" + _loader._onError + "'" + " file_format=(" + " field_optionally_enclosed_by='\"'" + " empty_field_as_null=" + Boolean.toString(!_loader._copyEmptyFieldAsEmpty) + ")"; ResultSet rs = conn.createStatement().executeQuery(currentCommand); while (rs.next()) { // Get the number of rows actually loaded loaded += rs.getLong("rows_loaded"); // Get the number of rows parsed parsed += rs.getLong("rows_parsed"); } int errorRecordCount = toIntExact(parsed - loaded); logger.debug( "errorRecordCount=[{}]," + " parsed=[{}]," + " loaded=[{}]", errorRecordCount, parsed, loaded); LoadResultListener listener = _loader.getListener(); listener.addErrorRecordCount(errorRecordCount); if (loaded == stage.getRowCount()) { // successfully loaded everything logger.debug( "COPY command successfully finished:" + " stage={}," + " name={}", remoteStage, stage.getId()); listener.addErrorCount(0); } else { logger.debug( "Found errors in COPY command:" + " stage={}," + " name={}", remoteStage, stage.getId()); if (listener.needErrors()) { currentState = State.COPY_INTO_TABLE_ERROR; currentCommand = "COPY INTO \"" + stage.getId() + "\" FROM '" + remoteStage + "' validation_mode='return_all_errors'" + " file_format=(" + "field_optionally_enclosed_by='\"'" + "empty_field_as_null=" + Boolean.toString(!_loader._copyEmptyFieldAsEmpty) + ")"; ResultSet errorsSet = conn.createStatement().executeQuery(currentCommand); Loader.DataError dataError = null; while (errorsSet.next()) { errorCount++; String rn = errorsSet.getString(LoadingError.ErrorProperty.ROW_NUMBER.name()); if (rn != null && !lastErrorRow.equals(rn)) { // de-duping records with multiple errors lastErrorRow = rn; } LoadingError loadError = new LoadingError(errorsSet, stage, _loader); listener.addError(loadError); if (dataError == null) { dataError = loadError.getException(); } } logger.debug("errorCount: {}", errorCount); listener.addErrorCount(errorCount); if (listener.throwOnError()) { // stop operation and raise the error _loader.abort(dataError); if (!_loader._preserveStageFile) { logger.debug("RM: {}", remoteStage); conn.createStatement().execute("RM '" + remoteStage + "'"); } else { logger.error( "Error occurred. The remote stage is preserved for " + "further investigation: {}", remoteStage); } if (stage.isTerminate()) { break; } else { continue; } } } } stage.setState(BufferStage.State.VALIDATED); // Generate set and values statement StringBuilder setStatement = null; StringBuilder valueStatement = null; if (stage.getOp() != Operation.INSERT && stage.getOp() != Operation.DELETE) { setStatement = new StringBuilder(" "); valueStatement = new StringBuilder("("); for (int c = 0; c < _loader.getColumns().size(); ++c) { String column = _loader.getColumns().get(c); if (c > 0) { setStatement.append(", "); valueStatement.append(" , "); } setStatement .append("T.\"") .append(column) .append("\"=") .append("S.\"") .append(column) .append("\""); valueStatement.append("S.\"").append(column).append("\""); } valueStatement.append(")"); } // generate statement for processing currentState = State.INGEST_DATA; String loadStatement; switch (stage.getOp()) { case INSERT: { loadStatement = "INSERT INTO " + _loader.getFullTableName() + "(" + _loader.getColumnsAsString() + ")" + " SELECT " + _loader.getStageColumnsAsString() + " FROM \"" + stage.getId() + "\""; break; } case DELETE: { loadStatement = "DELETE FROM " + _loader.getFullTableName() + " T USING \"" + stage.getId() + "\" AS S WHERE " + updateKeys; break; } case MODIFY: { loadStatement = "MERGE INTO " + _loader.getFullTableName() + " T USING \"" + stage.getId() + "\" AS S ON " + updateKeys + " WHEN MATCHED THEN UPDATE SET " + setStatement; break; } case UPSERT: { loadStatement = "MERGE INTO " + _loader.getFullTableName() + " T USING \"" + stage.getId() + "\" AS S ON " + updateKeys + " WHEN MATCHED THEN UPDATE SET " + setStatement + " WHEN NOT MATCHED THEN INSERT(" + _loader.getColumnsAsString() + ") VALUES" + valueStatement; break; } default: loadStatement = ""; } currentCommand = loadStatement; logger.debug("Load Statement: {}", loadStatement); Statement s = conn.createStatement(); s.execute(loadStatement); stage.setState(BufferStage.State.PROCESSED); currentState = State.FINISH; currentCommand = null; switch (stage.getOp()) { case INSERT: case UPSERT: { _loader.getListener().addProcessedRecordCount(stage.getOp(), stage.getRowCount()); _loader.getListener().addOperationRecordCount(stage.getOp(), s.getUpdateCount()); break; } case DELETE: case MODIFY: { // the number of successful DELETE is the number // of processed rows and not the number of given // rows. _loader.getListener().addProcessedRecordCount(stage.getOp(), s.getUpdateCount()); _loader.getListener().addOperationRecordCount(stage.getOp(), s.getUpdateCount()); break; } } // delete stage file if all success conn.createStatement().execute("RM '" + remoteStage + "'"); if (stage.isTerminate()) { break; } } } catch (InterruptedException ex) { logger.error("Interrupted", ex); break; } catch (Exception ex) { String msg = String.format("State: %s, %s, %s", currentState, currentCommand, ex.getMessage()); _loader.abort(new Loader.ConnectionError(msg, Utils.getCause(ex))); logger.error(msg, true); if (stage == null || stage.isTerminate()) { break; } } } } private List getAllColumns(final Connection conn) throws SQLException { List columns = new LinkedList<>(); ResultSet result = conn.createStatement() .executeQuery("show " + "columns" + " in " + _loader.getFullTableName()); while (result.next()) { String col = result.getString("column_name"); columns.add(col); } return columns; } private String getOn(List keys, String L, String R) { if (keys == null) { return ""; } // L and R don't need to be quoted. StringBuilder sb = keys.size() > 1 ? new StringBuilder(64) : new StringBuilder(); for (int i = 0; i < keys.size(); i++) { if (i > 0) { sb.append("AND "); } sb.append(L); sb.append(".\""); sb.append(keys.get(i)); sb.append("\" = "); sb.append(R); sb.append(".\""); sb.append(keys.get(i)); sb.append("\" "); } return sb.toString(); } public void join() { logger.trace("Joining threads", false); try { _thread.join(0); } catch (InterruptedException ex) { logger.debug("Exception: ", ex); } } private enum State { INITIALIZE, CREATE_TEMP_TABLE, COPY_INTO_TABLE, COPY_INTO_TABLE_ERROR, INGEST_DATA, FINISH } } ================================================ FILE: src/main/java/net/snowflake/client/internal/loader/PutQueue.java ================================================ package net.snowflake.client.internal.loader; import java.io.IOException; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** * Queue that sequentially finalizes BufferStage uploads and schedules them for processing in * ProcessQueue. */ public class PutQueue implements Runnable { private static final SFLogger logger = SFLoggerFactory.getLogger(PutQueue.class); private final Thread _thread; private final StreamLoader _loader; public PutQueue(StreamLoader loader) { logger.trace("Creating new PutQueue", false); _loader = loader; _thread = new Thread(this); _thread.setName("PutQueueThread"); _thread.start(); } @Override public void run() { while (true) { BufferStage stage = null; try { stage = _loader.takePut(); if (stage.getRowCount() == 0) { // Nothing was written to that stage if (stage.isTerminate()) { _loader.queueProcess(stage); stage.completeUploading(); break; } else { continue; } } // Uploads the stage stage.completeUploading(); // Schedules it for processing _loader.queueProcess(stage); if (stage.isTerminate()) { break; } } catch (InterruptedException | IOException ex) { logger.error("Exception: ", ex); break; } finally { } } } public void join() { try { _thread.join(0); } catch (InterruptedException ex) { logger.error("Exception: ", ex); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/loader/StreamLoader.java ================================================ package net.snowflake.client.internal.loader; import static java.nio.charset.StandardCharsets.UTF_8; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.File; import java.io.IOException; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.ResultSet; import java.sql.SQLException; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.TimeZone; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.zip.Deflater; import net.snowflake.client.api.loader.LoadResultListener; import net.snowflake.client.api.loader.Loader; import net.snowflake.client.api.loader.LoaderProperty; import net.snowflake.client.api.loader.LoadingError; import net.snowflake.client.api.loader.Operation; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.jdbc.util.SnowflakeTypeHelper; import net.snowflake.client.internal.jdbc.util.SnowflakeTypeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Stream Loader */ public class StreamLoader implements Loader, Runnable { private static final SFLogger logger = SFLoggerFactory.getLogger(StreamLoader.class); private static final String SYSTEM_PARAMETER_PREFIX = "net.snowflake.client.loader."; // Temporary directory used for data cache private static final String tmpdir = systemGetProperty("java.io.tmpdir"); private static final String BASE = tmpdir + (!(tmpdir.endsWith("/") || tmpdir.endsWith("\\")) ? File.separatorChar : "") + "snowflake" + File.separatorChar + "stage"; static final String FILE_PREFIX = "stream_"; static final String FILE_SUFFIX = ".gz"; /** Default batch row size */ private static final long DEFAULT_BATCH_ROW_SIZE = -1L; public static DatabaseMetaData metadata; private BufferStage _stage = null; private Operation _op = null; private boolean _startTransaction = false; // force not to truncate in case where loader.start is called in // multiple times in a single job. private boolean _is_first_start_call = true; // force not to commit or rollback in case where loader.finish is called // in multiple times in a single job. private boolean _is_last_finish_call = true; private boolean _oneBatch = false; private boolean _truncate = false; private String _before = null; private String _after = null; private ArrayBlockingQueue _queueData; private Thread _thread; private ArrayBlockingQueue _queuePut; private PutQueue _put; private ArrayBlockingQueue _queueProcess; private ProcessQueue _process; private String _remoteStage = "~"; private String _table; private String _schema; private String _database; private List _columns; private Map _vectorColumnsNameAndSize = new HashMap(); // Vector type can be FLOAT or INT private String _vectorType; private List _keys; private long _batchRowSize = DEFAULT_BATCH_ROW_SIZE; private long _csvFileBucketSize = BufferStage.FILE_BUCKET_SIZE; private long _csvFileSize = BufferStage.FILE_SIZE; boolean _testRemoteBadCSV = false; // TEST: inject bad csv in remote stage boolean _preserveStageFile = false; // reserve stage file private boolean _useLocalTimezone = false; // use local timezone instead of UTC private boolean _mapTimeToTimestamp = false; // map TIME to TIMESTAMP. Informatica V1 connector behavior boolean _compressDataBeforePut = true; // compress data before PUT boolean _compressFileByPut = false; // compress file by PUT long _compressLevel = Deflater.BEST_SPEED; // compression level used to compress data before PUT String _onError = OnError.DEFAULT; boolean _copyEmptyFieldAsEmpty = false; // COPY command option to set EMPTY_FIELD_AS_NULL = false boolean _testMode = false; private final Connection _putConn; private final Connection _processConn; // a per-instance bit of random noise to make filenames more unique private final String _noise; // Track fatal errors private AtomicBoolean _active = new AtomicBoolean(false); private AtomicBoolean _aborted = new AtomicBoolean(false); private RuntimeException _abortCause = new ConnectionError("Unknown exception"); private AtomicInteger _throttleCounter = new AtomicInteger(0); private final GregorianCalendar _calendarUTC = new GregorianCalendar(TimeZone.getTimeZone("UTC")); private GregorianCalendar _calendarLocal; /** Resets calendar when start the job. */ private void resetCalendar() { _calendarUTC.clear(); _calendarLocal = new GregorianCalendar(TimeZone.getDefault()); _calendarLocal.clear(); } private DateFormat _dateFormat; private DateFormat _timeFormat; private DateFormat _timestampFormat; private DateFormat _timestampTzFormat; public StreamLoader( Map properties, Connection putConnection, Connection processConnection) { _putConn = putConnection; _processConn = processConnection; // Sort properties by ordinal to ensure columns is processed after table, schema and db to // execute more performant metadata queries properties.entrySet().stream() .sorted(Map.Entry.comparingByKey(Enum::compareTo)) .forEach(e -> setProperty(e.getKey(), e.getValue())); _noise = SnowflakeUtil.randomAlphaNumeric(6); } @Override public void setProperty(LoaderProperty property, Object value) { switch (property) { case tableName: _table = (String) value; break; case schemaName: _schema = (String) value; break; case databaseName: _database = (String) value; break; case remoteStage: _remoteStage = (String) value; break; case columns: if (value == null) { _columns = null; } else { final List typeCheckedColumns = new ArrayList<>(); for (Object e : (List) value) { typeCheckedColumns.add((String) e); } _columns = typeCheckedColumns; setVectorColumns(); } break; case keys: if (value == null) { _keys = null; } else { final List typeCheckedKeys = new ArrayList<>(); for (Object e : (List) value) { typeCheckedKeys.add((String) e); } _keys = typeCheckedKeys; } break; case operation: _op = (Operation) value; break; case startTransaction: _startTransaction = Boolean.valueOf(String.valueOf(value)); break; case oneBatch: _oneBatch = Boolean.valueOf(String.valueOf(value)); break; case truncateTable: _truncate = Boolean.valueOf(String.valueOf(value)); break; case executeBefore: _before = String.valueOf(value); break; case executeAfter: _after = String.valueOf(value); break; case isFirstStartCall: _is_first_start_call = Boolean.valueOf(String.valueOf(value)); break; case isLastFinishCall: _is_last_finish_call = Boolean.valueOf(String.valueOf(value)); break; case batchRowSize: _batchRowSize = parseLongValue(LoaderProperty.batchRowSize, value); break; case csvFileBucketSize: _csvFileBucketSize = parseLongValue(LoaderProperty.csvFileBucketSize, value); break; case csvFileSize: _csvFileSize = parseLongValue(LoaderProperty.csvFileSize, value); break; case preserveStageFile: _preserveStageFile = Boolean.valueOf(String.valueOf(value)); break; case useLocalTimezone: _useLocalTimezone = Boolean.valueOf(String.valueOf(value)); break; case copyEmptyFieldAsEmpty: _copyEmptyFieldAsEmpty = Boolean.valueOf(String.valueOf(value)); break; case mapTimeToTimestamp: // NOTE: this is a special flag to change mapping // from TIME. Informatica connector v1 maps to TIMESTAMP // but a legitimate behavior is supposed to be to TIME. _mapTimeToTimestamp = Boolean.valueOf(String.valueOf(value)); break; case compressDataBeforePut: _compressDataBeforePut = Boolean.valueOf(String.valueOf(value)); break; case compressFileByPut: _compressFileByPut = Boolean.valueOf(String.valueOf(value)); break; case compressLevel: _compressLevel = parseLongValue(LoaderProperty.compressLevel, value); if ((_compressLevel < Deflater.BEST_SPEED || _compressLevel > Deflater.BEST_COMPRESSION) && _compressLevel != Deflater.DEFAULT_COMPRESSION) { throw new IllegalArgumentException("invalid compression level"); } break; case onError: String v = String.valueOf(value); _onError = OnError.validate(v) ? v : OnError.DEFAULT; break; case testRemoteBadCSV: _testRemoteBadCSV = Boolean.valueOf(String.valueOf(value)); break; default: // nop, this should ever happens } } private long parseLongValue(LoaderProperty name, Object value) { long ret; if (value instanceof String) { ret = Long.valueOf((String) value); } else if (value instanceof Long) { ret = (Long) value; } else if (value instanceof Integer) { ret = Long.valueOf((Integer) value); } else { throw new IllegalArgumentException( String.format("'%s' Must be a LONG value", name.toString())); } return ret; } private void setPropertyBySystemProperty() { final String BATCH_ROW_SIZE_KEY = SYSTEM_PARAMETER_PREFIX + "batchRowSize"; final String CSV_FILE_BUCKET_SIZE = SYSTEM_PARAMETER_PREFIX + "csvFileBucketSize"; final String CSV_FILE_SIZE = SYSTEM_PARAMETER_PREFIX + "csvFileSize"; final String COMPRESS_DATA_BEFORE_PUT_KEY = SYSTEM_PARAMETER_PREFIX + "compressDataBeforePut"; final String COMPRESS_FILE_BY_PUT_KEY = SYSTEM_PARAMETER_PREFIX + "compressFileByPut"; final String COMPRESS_LEVEL = SYSTEM_PARAMETER_PREFIX + "compressLevel"; Properties props = System.getProperties(); for (String propKey : props.stringPropertyNames()) { String value = props.getProperty(propKey); if (BATCH_ROW_SIZE_KEY.equals(propKey)) { _batchRowSize = parseLongValue(LoaderProperty.batchRowSize, value); } else if (CSV_FILE_BUCKET_SIZE.equals(propKey)) { _csvFileBucketSize = parseLongValue(LoaderProperty.csvFileBucketSize, value); } else if (CSV_FILE_SIZE.equals(propKey)) { _csvFileSize = parseLongValue(LoaderProperty.csvFileSize, value); } else if (COMPRESS_DATA_BEFORE_PUT_KEY.equals(propKey)) { _compressDataBeforePut = Boolean.valueOf(value); } else if (COMPRESS_FILE_BY_PUT_KEY.equals(propKey)) { _compressFileByPut = Boolean.valueOf(value); } else if (COMPRESS_LEVEL.equals(propKey)) { _compressLevel = Long.valueOf(value); } } } private void initDateFormats() { resetCalendar(); _dateFormat = new SimpleDateFormat(SnowflakeTypeHelper.DATE_OR_TIME_FORMAT_PATTERN); if (_mapTimeToTimestamp) { // same format for TIME to TIMESTAMP _timeFormat = _dateFormat; } else { _timeFormat = new SimpleDateFormat(SnowflakeTypeHelper.TIME_FORMAT_PATTERN); } _timestampFormat = new SimpleDateFormat(SnowflakeTypeHelper.TIMESTAMP_FORMAT_PATTERN); _timestampTzFormat = new SimpleDateFormat(SnowflakeTypeHelper.TIMESTAMP_FORMAT_TZ_PATTERN); Calendar cal = !_useLocalTimezone ? _calendarUTC : _calendarLocal; _dateFormat.setCalendar(cal); _timeFormat.setCalendar(cal); _timestampFormat.setCalendar(cal); _timestampTzFormat.setCalendar(cal); } /** Starts the loader */ @Override public void start() { logger.debug("Start Loading", false); // validate parameters validateParameters(); if (_op == null) { this.abort(new ConnectionError("Loader started with no operation")); return; } initDateFormats(); initQueues(); if (_is_first_start_call) { // is this the first start call? try { if (_startTransaction) { logger.debug("Begin Transaction", false); _processConn.createStatement().execute("begin transaction"); } else { logger.debug("No Transaction started", false); } } catch (SQLException ex) { abort(new Loader.ConnectionError("Failed to start Transaction", Utils.getCause(ex))); } if (_truncate) { truncateTargetTable(); } try { if (_before != null) { logger.debug("Running Execute Before SQL", false); _processConn.createStatement().execute(_before); } } catch (SQLException ex) { abort( new Loader.ConnectionError( String.format("Execute Before SQL failed to run: %s", _before), Utils.getCause(ex))); } } } private void validateParameters() { logger.debug("Validate Parameters", false); if (Operation.INSERT != this._op) { if (this._keys == null || this._keys.isEmpty()) { throw new ConnectionError("Updating operations require keys"); } } setPropertyBySystemProperty(); logger.debug( "Database Name: {}, Schema Name: {}, Table Name: {}, " + "Remote Stage: {}, Columns: {}, Keys: {}, Operation: {}, " + "Start Transaction: {}, OneBatch: {}, Truncate Table: {}, " + "Execute Before: {}, Execute After: {}, Batch Row Size: {}, " + "CSV File Bucket Size: {}, CSV File Size: {}, Preserve Stage File: {}, " + "Use Local TimeZone: {}, Copy Empty Field As Empty: {}, " + "MapTimeToTimestamp: {}, Compress Data before PUT: {}, " + "Compress File By Put: {}, Compress Level: {}, OnError: {}", _database, _schema, _table, _remoteStage, _columns, _keys, _op, _startTransaction, _oneBatch, _truncate, _before, _after, _batchRowSize, _csvFileBucketSize, _csvFileSize, _preserveStageFile, _useLocalTimezone, _copyEmptyFieldAsEmpty, _mapTimeToTimestamp, _compressDataBeforePut, _compressFileByPut, _compressLevel, _onError); } String getNoise() { return _noise; } public void abort(RuntimeException t) { synchronized (this) { // Abort once, keep first error. logger.debug("Exception received. Aborting...", t); if (_aborted.getAndSet(true)) { return; } if (t != null) { _abortCause = t; } // Rollback and do not process anything else rollback(); } } boolean isAborted() { synchronized (this) { // don't synchronize unless the caller does return _aborted.get(); } } @Override public void rollback() { logger.debug("Rollback", false); try { terminate(); logger.debug("Rollback", false); this._processConn.createStatement().execute("rollback"); } catch (SQLException ex) { logger.error(ex.getMessage(), ex); } } @Override public void submitRow(final Object[] row) { try { if (_aborted.get()) { if (_listener.throwOnError()) { throw _abortCause; } return; } } catch (Exception ex) { abort(new Loader.ConnectionError("Throwing Error", Utils.getCause(ex))); } byte[] data = null; try { if (!_active.get()) { logger.debug("Inactive loader. Row ignored", false); return; } data = createCSVRecord(row); } catch (Exception ex) { abort(new Loader.ConnectionError("Creating data set for CSV", Utils.getCause(ex))); } try { writeBytes(data); _listener.addSubmittedRowCount(1); if (_listener.needSuccessRecords()) { _listener.recordProvided(_op, row); } } catch (Exception ex) { abort(new Loader.ConnectionError("Writing Bytes to CSV files", Utils.getCause(ex))); } if (_batchRowSize > 0 && _listener.getSubmittedRowCount() > 0 && (_listener.getSubmittedRowCount() % _batchRowSize) == 0) { logger.debug( "Flushing Queue: Submitted Row Count: {}, Batch Row Size: {}", _listener.getSubmittedRowCount(), _batchRowSize); // flush data loading try { flushQueues(); } catch (Exception ex) { abort(new Loader.ConnectionError("Flush Queues", Utils.getCause(ex))); } try { initQueues(); } catch (Exception ex) { abort(new Loader.ConnectionError("Init Queues", Utils.getCause(ex))); } } } /** Initializes queues */ private void initQueues() { logger.debug("Init Queues", false); if (_active.getAndSet(true)) { // NOP if the loader is already active return; } // start PUT and PROCESS queues _queuePut = new ArrayBlockingQueue<>(48); _queueProcess = new ArrayBlockingQueue<>(48); _put = new PutQueue(this); _process = new ProcessQueue(this); // Start queue. NOTE: This is not actively used _queueData = new ArrayBlockingQueue<>(1024); _thread = new Thread(this); _thread.setName("StreamLoaderThread"); _thread.start(); // Create stage _stage = new BufferStage(this, _op, _csvFileBucketSize, _csvFileSize); } /** Flushes data by joining PUT and PROCESS queues */ private void flushQueues() { // Terminate data loading thread. logger.debug("Flush Queues", false); try { _queueData.put(new byte[0]); _thread.join(10000); if (_thread.isAlive()) { _thread.interrupt(); } } catch (Exception ex) { String msg = "Failed to join StreamLoader queue: " + ex.getMessage(); logger.error(msg, ex); throw new DataError(msg, Utils.getCause(ex)); } // Put last stage on queue terminate(); // wait for the processing to finish _put.join(); _process.join(); if (_aborted.get()) { // Loader was aborted due to an exception. // It was rolled back at that time. // LOGGER.log(Level.WARNING, // "Loader had been previously aborted by error", _abortCause); throw _abortCause; } } private void writeBytes(final byte[] data) throws IOException, InterruptedException { // this loader was aborted if (_aborted.get()) { return; } boolean full = _stage.stageData(data); if (full && !_oneBatch) { // if Buffer stage is full and NOT one batch mode, // queue PUT request. queuePut(_stage); _stage = new BufferStage(this, _op, _csvFileBucketSize, _csvFileSize); } } private void truncateTargetTable() { try { // TODO: could be replaced with TRUNCATE? _processConn.createStatement().execute("DELETE FROM " + this.getFullTableName()); } catch (SQLException ex) { logger.error(ex.getMessage(), ex); abort(new Loader.ConnectionError(Utils.getCause(ex))); } } public void setVectorColumnType(String vectorType) { this._vectorType = vectorType; } public void setVectorColumns() { try { DatabaseMetaData dbmd = _processConn.getMetaData(); for (String col : _columns) { try (ResultSet rs = dbmd.getColumns(_database, _schema, _table, col)) { rs.next(); if (isColumnTypeVector(rs.getString(6))) { _vectorColumnsNameAndSize.put(col, rs.getInt(7)); } } } } catch (SQLException e) { logger.error(e.getMessage(), e); abort(new Loader.ConnectionError(Utils.getCause(e))); } } private boolean isColumnTypeVector(String col) { return col != null && col.equalsIgnoreCase("vector"); } @Override public void run() { try { while (true) { byte[] data = this._queueData.take(); if (data.length == 0) { break; } this.writeBytes(data); } } catch (Exception ex) { logger.error(ex.getMessage(), ex); abort(new Loader.ConnectionError(Utils.getCause(ex))); } } private byte[] createCSVRecord(final Object[] data) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < data.length; ++i) { if (i > 0) { sb.append(','); } sb.append( SnowflakeTypeUtil.escapeForCSV( SnowflakeTypeUtil.lexicalValue( data[i], _dateFormat, _timeFormat, _timestampFormat, _timestampTzFormat))); } return sb.toString().getBytes(UTF_8); } /** * Finishes loader * * @throws Exception an exception raised in finishing loader. */ @Override public void finish() throws Exception { logger.debug("Finish Loading", false); flushQueues(); if (_is_last_finish_call) { try { if (_after != null) { logger.debug("Running Execute After SQL", false); _processConn.createStatement().execute(_after); } // Loader successfully completed. Commit and return. _processConn.createStatement().execute("commit"); logger.debug("Committed", false); } catch (SQLException ex) { try { _processConn.createStatement().execute("rollback"); } catch (SQLException ex0) { logger.debug("Failed to rollback", false); } logger.debug(String.format("Execute After SQL failed to run: %s", _after), ex); throw new Loader.ConnectionError(Utils.getCause(ex)); } } } @Override public void close() throws Exception { logger.debug("Close Loader", false); try { this._processConn.close(); this._putConn.close(); } catch (SQLException ex) { logger.error(ex.getMessage(), ex); throw new ConnectionError(Utils.getCause(ex)); } } /** Set active to false (no-op if not active), add a stage with terminate flag onto the queue */ private void terminate() { logger.debug("Terminate Loader", false); boolean active = _active.getAndSet(false); if (!active) { return; // No-op } if (_stage == null) { _stage = new BufferStage(this, Operation.INSERT, _csvFileBucketSize, _csvFileSize); } _stage.setTerminate(true); try { queuePut(_stage); } catch (InterruptedException ex) { logger.error("Unknown Error", ex); } logger.debug("Snowflake loader terminating", false); } // If operation changes, existing stage needs to be scheduled for processing. @Override public void resetOperation(Operation op) { if (op.equals(_op)) { // no-op return; } logger.debug("Operation is changing from {} to {}", _op, op); _op = op; if (_stage != null) { try { queuePut(_stage); } catch (InterruptedException ex) { logger.error(_stage.getId(), ex); } } _stage = new BufferStage(this, _op, _csvFileBucketSize, _csvFileSize); } public String getTable() { return _table; } String getBase() { return BASE; } Connection getPutConnection() { return _putConn; } Connection getProcessConnection() { return _processConn; } String getRemoteStage() { return _remoteStage; } List getKeys() { return this._keys; } List getColumns() { return this._columns; } Map getVectorColumns() { return this._vectorColumnsNameAndSize; } String getColumnsAsString() { // comma separate list of column names StringBuilder sb = new StringBuilder("\""); for (int i = 0; i < _columns.size(); i++) { if (i > 0) { sb.append("\",\""); } sb.append(_columns.get(i)); } sb.append("\""); return sb.toString(); } String getFullTableName() { return (_database == null ? "" : ("\"" + _database + "\".")) + (_schema == null ? "" : ("\"" + _schema + "\".")) + "\"" + _table + "\""; } public LoadResultListener getListener() { return _listener; } @Override public void setListener(LoadResultListener _listener) { this._listener = _listener; } private void queuePut(BufferStage stage) throws InterruptedException { _queuePut.put(stage); } BufferStage takePut() throws InterruptedException { return _queuePut.take(); } void queueProcess(BufferStage stage) throws InterruptedException { _queueProcess.put(stage); } BufferStage takeProcess() throws InterruptedException { return _queueProcess.take(); } void throttleUp() { int open = this._throttleCounter.incrementAndGet(); logger.debug("PUT Throttle Up: {}", open); if (open > 8) { logger.debug( "Will retry scheduling file for upload after {} seconds", (Math.pow(2, open - 7))); try { Thread.sleep(1000 * ((int) Math.pow(2, open - 7))); } catch (InterruptedException ex) { logger.error("Exception occurs while waiting", ex); } } } void throttleDown() { int throttleLevel = this._throttleCounter.decrementAndGet(); logger.debug("PUT Throttle Down: {}", throttleLevel); if (throttleLevel < 0) { logger.debug("Unbalanced throttle", false); _throttleCounter.set(0); } logger.debug("Connector throttle {}", throttleLevel); } private LoadResultListener _listener = new LoadResultListener() { private final AtomicInteger errorCount = new AtomicInteger(0); private final AtomicInteger errorRecordCount = new AtomicInteger(0); private final AtomicInteger submittedRowCount = new AtomicInteger(0); @Override public boolean needErrors() { return false; } @Override public boolean needSuccessRecords() { return false; } @Override public void addError(LoadingError error) {} @Override public boolean throwOnError() { return false; } @Override public void recordProvided(Operation op, Object[] record) {} @Override public void addProcessedRecordCount(Operation op, int i) {} @Override public void addOperationRecordCount(Operation op, int i) {} @Override public int getErrorCount() { return errorCount.get(); } @Override public int getErrorRecordCount() { return errorRecordCount.get(); } @Override public void resetErrorCount() { errorCount.set(0); } @Override public void resetErrorRecordCount() { errorRecordCount.set(0); } @Override public void addErrorCount(int count) { errorCount.addAndGet(count); } @Override public void addErrorRecordCount(int count) { errorRecordCount.addAndGet(count); } @Override public void resetSubmittedRowCount() { submittedRowCount.set(0); } @Override public void addSubmittedRowCount(int count) { submittedRowCount.addAndGet(count); } @Override public int getSubmittedRowCount() { return submittedRowCount.get(); } }; void setTestMode(boolean mode) { this._testMode = mode; } public String getStageColumnsAsString() { // if there are no vector columns in the target table just select * is needed from the staging // table. if (_vectorColumnsNameAndSize.isEmpty()) { return "*"; } if (_vectorType == null) { throw new IllegalArgumentException( "Target table with vector columns must use setVectorColumnType with \"INT\" or \"FLOAT\""); } StringBuilder sb = new StringBuilder(); for (int i = 0; i < _columns.size(); i++) { String colName = _columns.get(i); if (_vectorColumnsNameAndSize.containsKey(colName)) { sb.append( colName + "::VECTOR(" + _vectorType + ", " + _vectorColumnsNameAndSize.get(colName) + ")"); } else { sb.append("\""); sb.append(colName); sb.append("\""); } if (i != _columns.size() - 1) { sb.append(", "); } } return sb.toString(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/loader/Utils.java ================================================ package net.snowflake.client.internal.loader; /** Utils class for Loader API */ public class Utils { /** * Find the root cause of the exception * * @param e throwable object * @return the throwable cause */ public static Throwable getCause(Throwable e) { Throwable cause = null; Throwable result = e; while (null != (cause = result.getCause()) && (result != cause)) { result = cause; } return result; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/ArgSupplier.java ================================================ package net.snowflake.client.internal.log; /** * An interface for representing lambda expressions that supply values to placeholders in message * formats. * *

E.g., {@code Logger.debug("Value: {}", (ArgSupplier) () -> getValue());} */ @FunctionalInterface public interface ArgSupplier { /** * Get value * * @return Object value. */ Object get(); } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/CommonsLoggingWrapper.java ================================================ package net.snowflake.client.internal.log; import org.apache.commons.logging.Log; /** * This is a wrapper class of apache commons logging which uses SFLogger to use driver configuration * (via java.util.logging or SLF4J) and mask secrets. Wrapper does not hide trace and debug * messages. */ public class CommonsLoggingWrapper implements Log { private final SFLogger logger; public CommonsLoggingWrapper(String className) { this.logger = SFLoggerFactory.getLogger(className); } public void debug(Object msg) { logger.debug(String.valueOf(msg), true); } public void debug(Object msg, Throwable t) { logger.debug(String.valueOf(msg), t); } public void error(Object msg) { logger.error(String.valueOf(msg), true); } public void error(Object msg, Throwable t) { logger.error(String.valueOf(msg), t); } public void fatal(Object msg) { this.error(msg); } public void fatal(Object msg, Throwable t) { this.error(msg, t); } public void info(Object msg) { logger.info(String.valueOf(msg), true); } public void info(Object msg, Throwable t) { logger.info(String.valueOf(msg), t); } public boolean isDebugEnabled() { return logger.isDebugEnabled(); } public boolean isErrorEnabled() { return logger.isErrorEnabled(); } public boolean isFatalEnabled() { return logger.isErrorEnabled(); } public boolean isInfoEnabled() { return logger.isInfoEnabled(); } public boolean isTraceEnabled() { return logger.isTraceEnabled(); } public boolean isWarnEnabled() { return logger.isWarnEnabled(); } public void trace(Object msg) { logger.debug(String.valueOf(msg), true); } public void trace(Object msg, Throwable t) { logger.trace(String.valueOf(msg), t); } public void warn(Object msg) { logger.warn(String.valueOf(msg)); } public void warn(Object msg, Throwable t) { logger.warn(String.valueOf(msg), t); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/CommonsLoggingWrapperMode.java ================================================ package net.snowflake.client.internal.log; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.util.stream.Collectors; import java.util.stream.Stream; enum CommonsLoggingWrapperMode { /** All logs from commons logging are passed to SFLogger (check {@link CommonsLoggingWrapper}) */ ALL, /** * The default behaviour is forwarding all logs to java.util.logging from commons logging (check * {@link JDK14JCLWrapper}), no logs are forwarded to SLF4J logger (check {@link SLF4JJCLWrapper}) */ DEFAULT, /** * Logs from commons logging are not forwarded and commons logging is not reconfigured - may be * the option when if you need to replace commons logging with the SLF4J bridge when thin jar is * used */ OFF; static final String JAVA_PROPERTY = "net.snowflake.jdbc.commons_logging_wrapper"; static CommonsLoggingWrapperMode detect() { String value = systemGetProperty(JAVA_PROPERTY); if (value == null) { return DEFAULT; } try { return CommonsLoggingWrapperMode.valueOf(value); } catch (Exception e) { throw new IllegalArgumentException( "Unknown commons logging wrapper value '" + value + "', expected one of: " + Stream.of(CommonsLoggingWrapperMode.values()) .map(Enum::name) .collect(Collectors.joining(", "))); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/JDK14JCLWrapper.java ================================================ package net.snowflake.client.internal.log; import org.apache.commons.logging.Log; /* This is a wrapper class of snowflake JDK14Logger for apache Jakarta Commons Logging * (the logging framework used by apache httpclient4.5 package) to choose to give us * the ability to filter out sensitive data. */ public class JDK14JCLWrapper implements Log { private final SFLogger logger; public JDK14JCLWrapper(String className) { this.logger = new JDK14Logger(className); } SFLogger getLogger() { return logger; } public void debug(Object msg) { // do nothing } public void debug(Object msg, Throwable t) { // do nothing } public void error(Object msg) { logger.error(String.valueOf(msg), true); } public void error(Object msg, Throwable t) { logger.error(String.valueOf(msg), t); } public void fatal(Object msg) { this.error(msg); } public void fatal(Object msg, Throwable t) { this.error(msg, t); } public void info(Object msg) { logger.info(String.valueOf(msg), true); } public void info(Object msg, Throwable t) { logger.info(String.valueOf(msg), t); } public boolean isDebugEnabled() { return false; } public boolean isErrorEnabled() { return logger.isErrorEnabled(); } public boolean isFatalEnabled() { return logger.isErrorEnabled(); } public boolean isInfoEnabled() { return logger.isInfoEnabled(); } public boolean isTraceEnabled() { return false; } public boolean isWarnEnabled() { return logger.isWarnEnabled(); } public void trace(Object msg) { // do nothing } public void trace(Object msg, Throwable t) { // do nothing } public void warn(Object msg) { logger.warn(String.valueOf(msg)); } public void warn(Object msg, Throwable t) { logger.warn(String.valueOf(msg), t); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/JDK14Logger.java ================================================ package net.snowflake.client.internal.log; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import java.io.IOException; import java.text.MessageFormat; import java.util.Arrays; import java.util.HashSet; import java.util.Set; import java.util.logging.ConsoleHandler; import java.util.logging.FileHandler; import java.util.logging.Handler; import java.util.logging.Level; import java.util.logging.Logger; import java.util.logging.SimpleFormatter; import net.snowflake.client.internal.core.EventHandler; import net.snowflake.client.internal.core.EventUtil; import net.snowflake.client.internal.core.SFSessionProperty; import net.snowflake.client.internal.util.MaskedException; import net.snowflake.client.internal.util.SecretDetector; /** * Use java.util.logging to implements SFLogger. * *

Log Level mapping from SFLogger to java.util.logging: ERROR -- SEVERE WARN -- WARNING INFO -- * INFO DEBUG -- FINE TRACE -- FINEST */ public class JDK14Logger implements SFLogger { private Logger jdkLogger; private Set logMethods = new HashSet<>(Arrays.asList("debug", "error", "info", "trace", "warn", "debugNoMask")); private static boolean isLoggerInit = false; public static String STDOUT = "STDOUT"; private static final StdOutConsoleHandler STD_OUT_CONSOLE_HANDLER = new StdOutConsoleHandler(); public JDK14Logger(String name) { this.jdkLogger = Logger.getLogger(name); } static { String javaLoggingConsoleStdOut = systemGetProperty(SFSessionProperty.JAVA_LOGGING_CONSOLE_STD_OUT.getPropertyKey()); if ("true".equalsIgnoreCase(javaLoggingConsoleStdOut)) { String javaLoggingConsoleStdOutThreshold = systemGetProperty( SFSessionProperty.JAVA_LOGGING_CONSOLE_STD_OUT_THRESHOLD.getPropertyKey()); useStdOutConsoleHandler(javaLoggingConsoleStdOutThreshold); } } public static void useStdOutConsoleHandler(String threshold) { Level thresholdLevel = threshold != null ? tryParse(threshold) : null; Logger rootLogger = Logger.getLogger(""); for (Handler handler : rootLogger.getHandlers()) { if (handler instanceof ConsoleHandler) { rootLogger.removeHandler(handler); if (thresholdLevel != null) { rootLogger.addHandler(new StdErrOutThresholdAwareConsoleHandler(thresholdLevel)); } else { rootLogger.addHandler(new StdOutConsoleHandler()); } break; } } } private static Level tryParse(String threshold) { try { return Level.parse(threshold); } catch (Exception e) { throw new UnknownJavaUtilLoggingLevelException(threshold); } } static void resetToDefaultConsoleHandler() { Logger rootLogger = Logger.getLogger(""); for (Handler handler : rootLogger.getHandlers()) { if (handler instanceof StdErrOutThresholdAwareConsoleHandler || handler instanceof StdOutConsoleHandler) { rootLogger.removeHandler(handler); rootLogger.addHandler(new ConsoleHandler()); break; } } } public boolean isDebugEnabled() { return this.jdkLogger.isLoggable(Level.FINE); } public boolean isErrorEnabled() { return this.jdkLogger.isLoggable(Level.SEVERE); } public boolean isInfoEnabled() { return this.jdkLogger.isLoggable(Level.INFO); } public boolean isTraceEnabled() { return this.jdkLogger.isLoggable(Level.FINEST); } public boolean isWarnEnabled() { return this.jdkLogger.isLoggable(Level.WARNING); } public void debug(String msg, boolean isMasked) { logInternal(Level.FINE, msg, isMasked); } // This function is used to display unmasked, potentially sensitive log information for internal // regression testing purposes. Do not use otherwise. public void debugNoMask(String msg) { logInternal(Level.FINE, msg, false); } public void debug(String msg, Object... arguments) { logInternal(Level.FINE, msg, arguments); } public void debug(String msg, Throwable t) { logInternal(Level.FINE, msg, t); } public void error(String msg, boolean isMasked) { logInternal(Level.SEVERE, msg, isMasked); } public void error(String msg, Object... arguments) { logInternal(Level.SEVERE, msg, arguments); } public void error(String msg, Throwable t) { logInternal(Level.SEVERE, msg, t); } public void info(String msg, boolean isMasked) { logInternal(Level.INFO, msg, isMasked); } public void info(String msg, Object... arguments) { logInternal(Level.INFO, msg, arguments); } public void info(String msg, Throwable t) { logInternal(Level.INFO, msg, t); } public void trace(String msg, boolean isMasked) { logInternal(Level.FINEST, msg, isMasked); } public void trace(String msg, Object... arguments) { logInternal(Level.FINEST, msg, arguments); } public void trace(String msg, Throwable t) { logInternal(Level.FINEST, msg, t); } public void warn(String msg, boolean isMasked) { logInternal(Level.WARNING, msg, isMasked); } public void warn(String msg, Object... arguments) { logInternal(Level.WARNING, msg, arguments); } public void warn(String msg, Throwable t) { logInternal(Level.WARNING, msg, t); } private void logInternal(Level level, String msg, boolean masked) { if (jdkLogger.isLoggable(level)) { String[] source = findSourceInStack(); jdkLogger.logp( level, source[0], source[1], masked == true ? SecretDetector.maskSecrets(msg) : msg); } } private void logInternal(Level level, String msg, Object... arguments) { if (jdkLogger.isLoggable(level)) { String[] source = findSourceInStack(); String message = ""; try { message = MessageFormat.format(refactorString(msg), evaluateLambdaArgs(arguments)); } catch (IllegalArgumentException e) { message = "Unable to format msg: " + msg; } jdkLogger.logp(level, source[0], source[1], SecretDetector.maskSecrets(message)); } } private void logInternal(Level level, String msg, Throwable t) { // add logger message here if (jdkLogger.isLoggable(level)) { String[] source = findSourceInStack(); Throwable masked = (t == null) ? null : new MaskedException(t); jdkLogger.logp(level, source[0], source[1], SecretDetector.maskSecrets(msg), masked); } } public static void addHandler(Handler handler) { Logger snowflakeLogger = Logger.getLogger(SFFormatter.CLASS_NAME_PREFIX); snowflakeLogger.addHandler(handler); } public static void removeHandler(Handler handler) { Logger snowflakeLogger = Logger.getLogger(SFFormatter.CLASS_NAME_PREFIX); snowflakeLogger.removeHandler(handler); } public static void setUseParentHandlers(boolean value) { Logger snowflakeLogger = Logger.getLogger(SFFormatter.CLASS_NAME_PREFIX); snowflakeLogger.setUseParentHandlers(value); } public static void setLevel(Level level) { Logger snowflakeLogger = Logger.getLogger(SFFormatter.CLASS_NAME_PREFIX); snowflakeLogger.setLevel(level); } public static Level getLevel() { Logger snowflakeLogger = Logger.getLogger(SFFormatter.CLASS_NAME_PREFIX); return snowflakeLogger.getLevel(); } /** * This is way to enable logging in JDBC through TRACING parameter or sf client config file. * * @param level log level * @param logPath log path * @throws IOException if there is an error writing to the log */ public static synchronized void instantiateLogger(Level level, String logPath) throws IOException { if (!isLoggerInit) { loggerInit(level, logPath); isLoggerInit = true; } } /** * Since we use SLF4J ways of formatting string we need to refactor message string if we have * arguments. For example, in sl4j, this string can be formatted with 2 arguments * *

ex.1: Error happened in {} on {} * *

And if two arguments are provided, error message can be formatted. * *

However, in java.util.logging, to achieve formatted error message, Same string should be * converted to * *

ex.2: Error happened in {0} on {1} * *

Which represented first arguments and second arguments will be replaced in the corresponding * places. * *

This method will convert string in ex.1 to ex.2 * * @param original original string * @return refactored string */ private String refactorString(String original) { StringBuilder sb = new StringBuilder(); int argCount = 0; for (int i = 0; i < original.length(); i++) { if (original.charAt(i) == '{' && i < original.length() - 1 && original.charAt(i + 1) == '}') { sb.append(String.format("{%d}", argCount)); argCount++; i++; } else { sb.append(original.charAt(i)); } } return sb.toString(); } /** * Used to find the index of the source class/method in current stack This method will locate the * source as the first method after logMethods * * @return an array of size two, first element is className and second is methodName */ private String[] findSourceInStack() { StackTraceElement[] stackTraces = Thread.currentThread().getStackTrace(); String[] results = new String[2]; for (int i = 0; i < stackTraces.length; i++) { if (logMethods.contains(stackTraces[i].getMethodName())) { // since already find the highest logMethods, find the first method after this one // and is not a logMethods. This is done to avoid multiple wrapper over log methods for (int j = i; j < stackTraces.length; j++) { if (!logMethods.contains(stackTraces[j].getMethodName())) { results[0] = stackTraces[j].getClassName(); results[1] = stackTraces[j].getMethodName(); return results; } } } } return results; } private static void loggerInit(Level level, String outputPath) throws IOException { Logger snowflakeLoggerInformaticaV1 = Logger.getLogger(SFFormatter.INFORMATICA_V1_CLASS_NAME_PREFIX); // setup event handler EventHandler eventHandler = EventUtil.getEventHandlerInstance(); eventHandler.setLevel(Level.INFO); eventHandler.setFormatter(new SimpleFormatter()); JDK14Logger.addHandler(eventHandler); snowflakeLoggerInformaticaV1.setLevel(level); JDK14Logger.setLevel(level); snowflakeLoggerInformaticaV1.addHandler(eventHandler); if (STDOUT.equalsIgnoreCase(outputPath)) { // Initialize console handler ConsoleHandler consoleHandler = new ConsoleHandler(); consoleHandler.setLevel(level); consoleHandler.setFormatter(new SFFormatter()); JDK14Logger.addHandler(consoleHandler); snowflakeLoggerInformaticaV1.addHandler(consoleHandler); } else { // Initialize file handler. // get log count and size String defaultLogSizeVal = systemGetProperty("snowflake.jdbc.log.size"); String defaultLogCountVal = systemGetProperty("snowflake.jdbc.log.count"); // default log size to 1 GB int logSize = 1000000000; // default number of log files to rotate to 2 int logCount = 2; if (defaultLogSizeVal != null) { try { logSize = Integer.parseInt(defaultLogSizeVal); } catch (Exception ex) { ; } } if (defaultLogCountVal != null) { try { logCount = Integer.parseInt(defaultLogCountVal); } catch (Exception ex) { ; } } // write log file to tmp directory FileHandler fileHandler = new FileHandler(outputPath, logSize, logCount, true); fileHandler.setFormatter(new SFFormatter()); fileHandler.setLevel(level); JDK14Logger.addHandler(fileHandler); snowflakeLoggerInformaticaV1.addHandler(fileHandler); } } private static Object[] evaluateLambdaArgs(Object... args) { final Object[] result = new Object[args.length]; for (int i = 0; i < args.length; i++) { result[i] = args[i] instanceof ArgSupplier ? ((ArgSupplier) args[i]).get() : args[i]; } return result; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/SFFormatter.java ================================================ package net.snowflake.client.internal.log; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.Date; import java.util.TimeZone; import java.util.logging.Formatter; import java.util.logging.Handler; import java.util.logging.LogRecord; /** SFFormatter */ public class SFFormatter extends Formatter { private static final DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); static { df.setTimeZone(TimeZone.getTimeZone("UTC")); } // Fixed to "net.snowflake.client" since SnowflakeDriver moved to api.driver package // Previously calculated dynamically, but should always be the root client package public static final String CLASS_NAME_PREFIX = "net.snowflake.client"; public static final String INFORMATICA_V1_CLASS_NAME_PREFIX = "com.snowflake"; @Override public String format(LogRecord record) { int lineNumber = -1; String className = record.getSourceClassName(); final String methodName = record.getSourceMethodName(); StackTraceElement[] stackTraces = Thread.currentThread().getStackTrace(); for (StackTraceElement ste : stackTraces) { if (className.equals(ste.getClassName()) && methodName.equals(ste.getMethodName())) { lineNumber = ste.getLineNumber(); break; } } if (className.startsWith(CLASS_NAME_PREFIX)) { className = "n.s.c" + className.substring(CLASS_NAME_PREFIX.length()); } else if (className.startsWith(INFORMATICA_V1_CLASS_NAME_PREFIX)) { className = "c.s" + className.substring(INFORMATICA_V1_CLASS_NAME_PREFIX.length()); } StringBuilder builder = new StringBuilder(1000); builder.append(df.format(new Date(record.getMillis()))).append(" "); builder.append(className).append(" "); builder.append(record.getLevel()).append(" "); builder.append(methodName).append(":"); builder.append(lineNumber).append(" - "); builder.append(formatMessage(record)); builder.append("\n"); return builder.toString(); } @Override public String getHead(Handler h) { return super.getHead(h); } @Override public String getTail(Handler h) { return super.getTail(h); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/SFLogLevel.java ================================================ package net.snowflake.client.internal.log; /** Extended log levels for snowflake package. */ public enum SFLogLevel { // OFF is highest level, no logs will be shown at this level. // We can extend this enum to add levels for perf instrumentation and network request/response. OFF(50, "OFF"), ERROR(40, "ERROR"), WARN(30, "WARN"), INFO(20, "INFO"), DEBUG(10, "DEBUG"), TRACE(0, "TRACE"); private final int levelInt; private final String levelStr; SFLogLevel(int levelInt, String levelStr) { this.levelInt = levelInt; this.levelStr = levelStr; } /** * Method to parse the input loglevel string and returns corresponding loglevel. This method uses * case in-sensitive matching. * * @param levelStr log level string * @return SFLogLevel */ public static SFLogLevel getLogLevel(String levelStr) { for (SFLogLevel level : SFLogLevel.values()) { if (level.levelStr.equalsIgnoreCase(levelStr)) { return level; } } // Default is off. return SFLogLevel.OFF; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/SFLogger.java ================================================ package net.snowflake.client.internal.log; /** * Interface used by JDBC driver to log information * *

Five levels are included in this interface, from high to low: ERROR WARN INFO DEBUG TRACE */ public interface SFLogger { /** * Is debug level enabled? * * @return true if the trace level is DEBUG */ boolean isDebugEnabled(); /** * Is error level enabled? * * @return true if the trace level is ERROR */ boolean isErrorEnabled(); /** * Is info level enabled? * * @return true if the trace level is INFO */ boolean isInfoEnabled(); /** * Is trace level enabled? * * @return true if the trace level is TRACE */ boolean isTraceEnabled(); /** * Is warn level enabled? * * @return true if the trace level is WARN */ boolean isWarnEnabled(); void debug(String msg, boolean isMasked); void debugNoMask(String msg); /** * Logs message at DEBUG level. * * @param msg Message or message format * @param arguments objects that supply value to placeholders in the message format. Expensive * operations that supply these values can be specified using lambdas implementing {@link * ArgSupplier} so that they are run only if the message is going to be logged. E.g., {@code * Logger.debug("Value: {}", (ArgSupplier) () -> expensiveOperation());} */ void debug(String msg, Object... arguments); void debug(String msg, Throwable t); void error(String msg, boolean isMasked); /** * Logs message at ERROR level. * * @param msg Message or message format * @param arguments objects that supply value to placeholders in the message format. Expensive * operations that supply these values can be specified using lambdas implementing {@link * ArgSupplier} so that they are run only if the message is going to be logged. E.g., {@code * Logger.warn("Value: {}", (ArgSupplier) () -> expensiveOperation());} */ void error(String msg, Object... arguments); void error(String msg, Throwable t); void info(String msg, boolean isMasked); /** * Logs message at INFO level. * * @param msg Message or message format * @param arguments objects that supply value to placeholders in the message format. Expensive * operations that supply these values can be specified using lambdas implementing {@link * ArgSupplier} so that they are run only if the message is going to be logged. E.g., {@code * Logger.info("Value: {}", (ArgSupplier) () -> expensiveOperation());} */ void info(String msg, Object... arguments); void info(String msg, Throwable t); void trace(String msg, boolean isMasked); /** * Logs message at TRACE level. * * @param msg Message or message format * @param arguments objects that supply value to placeholders in the message format. Expensive * operations that supply these values can be specified using lambdas implementing {@link * ArgSupplier} so that they are run only if the message is going to be logged. E.g., {@code * Logger.trace("Value: {}", (ArgSupplier) () -> expensiveOperation());} */ void trace(String msg, Object... arguments); void trace(String msg, Throwable t); void warn(String msg, boolean isMasked); /** * Logs message at WARN level. * * @param msg Message or message format * @param arguments objects that supply value to placeholders in the message format. Expensive * operations that supply these values can be specified using lambdas implementing {@link * ArgSupplier} so that they are run only if the message is going to be logged. E.g., {@code * Logger.warn("Value: {}", (ArgSupplier) () -> expensiveOperation());} */ void warn(String msg, Object... arguments); void warn(String msg, Throwable t); } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/SFLoggerFactory.java ================================================ package net.snowflake.client.internal.log; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; /** Used to create SFLogger instance */ public class SFLoggerFactory { private static LoggerImpl loggerImplementation; enum LoggerImpl { SLF4JLOGGER("net.snowflake.client.log.SLF4JLogger"), JDK14LOGGER("net.snowflake.client.log.JDK14Logger"); private String loggerImplClassName; LoggerImpl(String loggerClass) { this.loggerImplClassName = loggerClass; } public String getLoggerImplClassName() { return this.loggerImplClassName; } public static LoggerImpl fromString(String loggerImplClassName) { if (loggerImplClassName != null) { for (LoggerImpl imp : LoggerImpl.values()) { if (loggerImplClassName.equalsIgnoreCase(imp.getLoggerImplClassName())) { return imp; } } } return null; } } /** * @param clazz Class type that the logger is instantiated * @return An SFLogger instance given the name of the class */ public static SFLogger getLogger(Class clazz) { // only need to determine the logger implementation only once if (loggerImplementation == null) { String logger = systemGetProperty("net.snowflake.jdbc.loggerImpl"); loggerImplementation = LoggerImpl.fromString(logger); if (loggerImplementation == null) { // default to use java util logging loggerImplementation = LoggerImpl.JDK14LOGGER; } } switch (loggerImplementation) { case SLF4JLOGGER: return new SLF4JLogger(clazz); case JDK14LOGGER: default: return new JDK14Logger(clazz.getName()); } } public static String getLoggerImplementationName() { // Ensure the implementation is initialized if (loggerImplementation == null) { getLogger(SFLoggerFactory.class); } switch (loggerImplementation) { case SLF4JLOGGER: return "SLF4J"; case JDK14LOGGER: default: return "JUL"; } } /** * A replacement for getLogger function, whose parameter is Class<?>, when Class<?> is * inaccessible. For example, the name we have is an alias name of a class, we can't get the * correct Class<?> by the given name. * * @param name name to indicate the class (might be different with the class name) that the logger * is instantiated * @return An SFLogger instance given the name */ public static SFLogger getLogger(String name) { if (loggerImplementation == null) { String logger = systemGetProperty("net.snowflake.jdbc.loggerImpl"); loggerImplementation = LoggerImpl.fromString(logger); if (loggerImplementation == null) { // default to use java util logging loggerImplementation = LoggerImpl.JDK14LOGGER; } } switch (loggerImplementation) { case SLF4JLOGGER: return new SLF4JLogger(name); case JDK14LOGGER: default: return new JDK14Logger(name); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/SFLoggerUtil.java ================================================ package net.snowflake.client.internal.log; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isNullOrEmpty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.apache.commons.logging.LogFactory; public class SFLoggerUtil { private static final String NOT_PROVIDED_LOG = "not provided"; private static final String PROVIDED_LOG = "provided"; public static void initializeSnowflakeLogger() { String logger = systemGetProperty("net.snowflake.jdbc.loggerImpl"); SFLoggerFactory.LoggerImpl loggerImplementation = SFLoggerFactory.LoggerImpl.fromString(logger); if (loggerImplementation == null) { loggerImplementation = SFLoggerFactory.LoggerImpl.JDK14LOGGER; } CommonsLoggingWrapperMode commonsLoggingWrapperMode = CommonsLoggingWrapperMode.detect(); if (commonsLoggingWrapperMode == CommonsLoggingWrapperMode.OFF) { return; } try { SnowflakeUtil.systemSetProperty( "org.apache.commons.logging.LogFactory", "org.apache.commons.logging.impl.LogFactoryImpl"); } catch (SecurityException ex) { // SecurityManager denied setProperty; logging still works with default backend } LogFactory logFactory = LogFactory.getFactory(); if (commonsLoggingWrapperMode == CommonsLoggingWrapperMode.ALL) { logFactory.setAttribute( "org.apache.commons.logging.Log", CommonsLoggingWrapper.class.getName()); return; } switch (loggerImplementation) { case SLF4JLOGGER: logFactory.setAttribute( "org.apache.commons.logging.Log", "net.snowflake.client.internal.log.SLF4JJCLWrapper"); break; case JDK14LOGGER: default: logFactory.setAttribute( "org.apache.commons.logging.Log", "net.snowflake.client.internal.log.JDK14JCLWrapper"); } } public static String isVariableProvided(T variable) { if (variable instanceof String) { return (isNullOrEmpty((String) variable)) ? NOT_PROVIDED_LOG : PROVIDED_LOG; } return variable == null ? NOT_PROVIDED_LOG : PROVIDED_LOG; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/SFToJavaLogMapper.java ================================================ package net.snowflake.client.internal.log; import java.util.HashMap; import java.util.logging.Level; /** Utility class to map SFLogLevels to java.util.logging.Level; */ public class SFToJavaLogMapper { private static HashMap levelMap = new HashMap<>(); static { levelMap.put(SFLogLevel.TRACE, java.util.logging.Level.FINEST); levelMap.put(SFLogLevel.DEBUG, java.util.logging.Level.FINE); levelMap.put(SFLogLevel.INFO, java.util.logging.Level.INFO); levelMap.put(SFLogLevel.WARN, java.util.logging.Level.WARNING); levelMap.put(SFLogLevel.ERROR, java.util.logging.Level.SEVERE); levelMap.put(SFLogLevel.OFF, java.util.logging.Level.OFF); } public static java.util.logging.Level toJavaUtilLoggingLevel(SFLogLevel level) { return levelMap.getOrDefault(level, java.util.logging.Level.OFF); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/SLF4JJCLWrapper.java ================================================ package net.snowflake.client.internal.log; import org.apache.commons.logging.Log; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /* Although this class doesn't really include SLF4J, this class play the role of * a wrapper class of snowflake SLF4JLogger for apache Jakarta Commons * Logging (the logging framework used by apache httpclient4.5 package) * to choose to give us the ability to filter out sensitive data. * * The reason why we don't unify this class and JDK14JCLWrapper class is that the * way SLF4J gets the log functions caller prevents us to use an extra wrapper. If * we really wrap up SLF4J, the log will not catch correct log callers. */ public class SLF4JJCLWrapper implements Log { private Logger slf4jLogger; public SLF4JJCLWrapper(String name) { slf4jLogger = LoggerFactory.getLogger(name); } Logger getLogger() { return slf4jLogger; } public void debug(Object message) { // do nothing } public void debug(Object msg, Throwable t) {} public void error(Object msg) {} public void error(Object msg, Throwable t) {} public void fatal(Object msg) {} public void fatal(Object msg, Throwable t) {} public void info(Object msg) {} public void info(Object msg, Throwable t) {} public void trace(Object msg) {} public void trace(Object msg, Throwable t) {} public void warn(Object msg) {} public void warn(Object msg, Throwable t) {} public boolean isDebugEnabled() { return false; } public boolean isErrorEnabled() { return this.slf4jLogger.isErrorEnabled(); } public boolean isFatalEnabled() { return this.slf4jLogger.isErrorEnabled(); } public boolean isInfoEnabled() { return this.slf4jLogger.isInfoEnabled(); } public boolean isTraceEnabled() { return false; } public boolean isWarnEnabled() { return this.slf4jLogger.isWarnEnabled(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/SLF4JLogger.java ================================================ package net.snowflake.client.internal.log; import net.snowflake.client.internal.util.MaskedException; import net.snowflake.client.internal.util.SecretDetector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.helpers.FormattingTuple; import org.slf4j.helpers.MessageFormatter; import org.slf4j.spi.LocationAwareLogger; public class SLF4JLogger implements SFLogger { private Logger slf4jLogger; private boolean isLocationAwareLogger; private static final String FQCN = SLF4JLogger.class.getName(); public SLF4JLogger(Class clazz) { slf4jLogger = LoggerFactory.getLogger(clazz); isLocationAwareLogger = slf4jLogger instanceof LocationAwareLogger; } public SLF4JLogger(String name) { slf4jLogger = LoggerFactory.getLogger(name); isLocationAwareLogger = slf4jLogger instanceof LocationAwareLogger; } public boolean isDebugEnabled() { return this.slf4jLogger.isDebugEnabled(); } public boolean isErrorEnabled() { return this.slf4jLogger.isErrorEnabled(); } public boolean isInfoEnabled() { return this.slf4jLogger.isInfoEnabled(); } public boolean isTraceEnabled() { return this.slf4jLogger.isTraceEnabled(); } public boolean isWarnEnabled() { return this.slf4jLogger.isWarnEnabled(); } public void debug(String msg, boolean isMasked) { msg = isMasked == true ? SecretDetector.maskSecrets(msg) : msg; if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.DEBUG_INT, msg, null, null); } else { slf4jLogger.debug(msg); } } // This function is used to display unmasked, potentially sensitive log information for internal // regression testing purposes. Do not use otherwise public void debugNoMask(String msg) { if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.DEBUG_INT, msg, null, null); } else { slf4jLogger.debug(msg); } } public void debug(String msg, Object... arguments) { // use this as format example for JDK14Logger. if (isDebugEnabled()) { FormattingTuple ft = MessageFormatter.arrayFormat(msg, evaluateLambdaArgs(arguments)); this.debug(SecretDetector.maskSecrets(ft.getMessage()), false); } } public void debug(String msg, Throwable t) { msg = SecretDetector.maskSecrets(msg); Throwable masked = (t == null) ? null : new MaskedException(t); if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.DEBUG_INT, msg, null, masked); } else { slf4jLogger.debug(msg, masked); } } public void error(String msg, boolean isMasked) { msg = isMasked == true ? SecretDetector.maskSecrets(msg) : msg; if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.ERROR_INT, msg, null, null); } else { slf4jLogger.error(msg); } } public void error(String msg, Object... arguments) { if (isErrorEnabled()) { FormattingTuple ft = MessageFormatter.arrayFormat(msg, evaluateLambdaArgs(arguments)); this.error(SecretDetector.maskSecrets(ft.getMessage()), false); } } public void error(String msg, Throwable t) { msg = SecretDetector.maskSecrets(msg); Throwable masked = (t == null) ? null : new MaskedException(t); if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.ERROR_INT, msg, null, masked); } else { slf4jLogger.error(msg, masked); } } public void info(String msg, boolean isMasked) { msg = isMasked == true ? SecretDetector.maskSecrets(msg) : msg; if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.INFO_INT, msg, null, null); } else { slf4jLogger.info(msg); } } public void info(String msg, Object... arguments) { if (isInfoEnabled()) { FormattingTuple ft = MessageFormatter.arrayFormat(msg, evaluateLambdaArgs(arguments)); this.info(SecretDetector.maskSecrets(ft.getMessage()), false); } } public void info(String msg, Throwable t) { msg = SecretDetector.maskSecrets(msg); Throwable masked = (t == null) ? null : new MaskedException(t); if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.INFO_INT, msg, null, masked); } else { slf4jLogger.error(msg, masked); } } public void trace(String msg, boolean isMasked) { msg = isMasked == true ? SecretDetector.maskSecrets(msg) : msg; if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.TRACE_INT, msg, null, null); } else { slf4jLogger.trace(msg); } } public void trace(String msg, Object... arguments) { if (isTraceEnabled()) { FormattingTuple ft = MessageFormatter.arrayFormat(msg, evaluateLambdaArgs(arguments)); this.trace(SecretDetector.maskSecrets(ft.getMessage()), false); } } public void trace(String msg, Throwable t) { msg = SecretDetector.maskSecrets(msg); Throwable masked = (t == null) ? null : new MaskedException(t); if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.TRACE_INT, msg, null, masked); } else { slf4jLogger.trace(msg, masked); } } public void warn(String msg, boolean isMasked) { msg = isMasked == true ? SecretDetector.maskSecrets(msg) : msg; if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.WARN_INT, msg, null, null); } else { slf4jLogger.error(msg); } } public void warn(String msg, Object... arguments) { if (isWarnEnabled()) { FormattingTuple ft = MessageFormatter.arrayFormat(msg, evaluateLambdaArgs(arguments)); this.warn(SecretDetector.maskSecrets(ft.getMessage()), false); } } public void warn(String msg, Throwable t) { msg = SecretDetector.maskSecrets(msg); Throwable masked = (t == null) ? null : new MaskedException(t); if (isLocationAwareLogger) { ((LocationAwareLogger) slf4jLogger) .log(null, FQCN, LocationAwareLogger.WARN_INT, msg, null, masked); } else { slf4jLogger.error(msg, masked); } } private static Object[] evaluateLambdaArgs(Object... args) { final Object[] result = new Object[args.length]; for (int i = 0; i < args.length; i++) { result[i] = args[i] instanceof ArgSupplier ? ((ArgSupplier) args[i]).get() : args[i]; } return result; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/StdErrOutThresholdAwareConsoleHandler.java ================================================ package net.snowflake.client.internal.log; import java.util.logging.ConsoleHandler; import java.util.logging.Level; import java.util.logging.LogRecord; import java.util.logging.SimpleFormatter; import java.util.logging.StreamHandler; class StdErrOutThresholdAwareConsoleHandler extends StreamHandler { private final ConsoleHandler stdErrConsoleHandler = new ConsoleHandler(); private final Level threshold; public StdErrOutThresholdAwareConsoleHandler(Level threshold) { super(System.out, new SimpleFormatter()); this.threshold = threshold; } @Override public void publish(LogRecord record) { if (record.getLevel().intValue() > threshold.intValue()) { stdErrConsoleHandler.publish(record); } else { super.publish(record); flush(); } } @Override public void close() { flush(); stdErrConsoleHandler.close(); } Level getThreshold() { return threshold; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/StdOutConsoleHandler.java ================================================ package net.snowflake.client.internal.log; import java.util.logging.LogRecord; import java.util.logging.SimpleFormatter; import java.util.logging.StreamHandler; class StdOutConsoleHandler extends StreamHandler { public StdOutConsoleHandler() { // configure with specific defaults for ConsoleHandler super(System.out, new SimpleFormatter()); } @Override public void publish(LogRecord record) { super.publish(record); flush(); } @Override public void close() { flush(); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/log/UnknownJavaUtilLoggingLevelException.java ================================================ package net.snowflake.client.internal.log; import java.util.logging.Level; import java.util.stream.Collectors; import java.util.stream.Stream; class UnknownJavaUtilLoggingLevelException extends RuntimeException { private static final String AVAILABLE_LEVELS = Stream.of( Level.OFF, Level.SEVERE, Level.WARNING, Level.INFO, Level.CONFIG, Level.FINE, Level.FINER, Level.FINEST, Level.ALL) .map(Level::getName) .collect(Collectors.joining(", ")); UnknownJavaUtilLoggingLevelException(String threshold) { super( "Unknown java util logging level: " + threshold + ", expected one of: " + AVAILABLE_LEVELS); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/Converter.java ================================================ package net.snowflake.client.internal.util; import net.snowflake.client.internal.core.SFException; /** Functional interface used to convert data to expected type */ @FunctionalInterface public interface Converter { T convert(Object object) throws SFException; } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/DecorrelatedJitterBackoff.java ================================================ package net.snowflake.client.internal.util; import java.util.concurrent.ThreadLocalRandom; /** * Decorrelated Jitter backoff * *

https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ */ public class DecorrelatedJitterBackoff { private final long base; private final long cap; public DecorrelatedJitterBackoff(long base, long cap) { this.base = base; this.cap = cap; } public long nextSleepTime(long sleep) { return Math.min(cap, ThreadLocalRandom.current().nextLong(base, sleep * 3)); } public long getJitterForLogin(long currentTime) { double multiplicationFactor = chooseRandom(-1, 1); long jitter = (long) (multiplicationFactor * currentTime * 0.5); return jitter; } public double chooseRandom(double min, double max) { return min + (Math.random() * (max - min)); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/EnvironmentProvider.java ================================================ package net.snowflake.client.internal.util; /** * Interface for providing environment variables to enable thread-safe testing. This abstraction * allows dependency injection of environment variable access, making code testable with instance * mocks (as opposed to static SnowflakeUtil) that work across threads. */ public interface EnvironmentProvider { String getEnv(String name); } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/LibcDetails.java ================================================ package net.snowflake.client.internal.util; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; import net.snowflake.client.internal.core.Constants; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.commons.io.IOUtils; /** * Detects the libc family (glibc / musl) and version on Linux for telemetry and minicore platform * targeting purposes. */ public final class LibcDetails { private static final SFLogger logger = SFLoggerFactory.getLogger(LibcDetails.class); public static final String GLIBC = "glibc"; public static final String MUSL = "musl"; static final String DEFAULT_LDD_PATH = "/usr/bin/ldd"; // e.g. "GNU C Library (Ubuntu GLIBC 2.31-0ubuntu9.16) stable release version 2.31." private static final Pattern RE_GLIBC_VERSION = Pattern.compile("LIBC[-a-z0-9 ).]*?(\\d+\\.\\d+)", Pattern.CASE_INSENSITIVE); // e.g. "Version 1.2.3" private static final Pattern RE_MUSL_VERSION = Pattern.compile("Version\\s+(\\d+\\.\\d+[\\d.]*)", Pattern.CASE_INSENSITIVE); // Word-boundary family markers - guard against false positives like "muslib" or "muscle". private static final Pattern RE_MUSL_MARKER = Pattern.compile("\\bmusl\\b"); private static final Pattern RE_GLIBC_NAME_MARKER = Pattern.compile("\\bGNU C Library\\b"); private static final Pattern RE_GLIBC_GETCONF_MARKER = Pattern.compile("\\bglibc\\b"); private static final long EXEC_TIMEOUT_MS = 200; private static LibcInfo cachedResult; private LibcDetails() {} /** Returns the cached libc details, performing detection on first call. */ public static synchronized LibcInfo load() { if (cachedResult == null) { cachedResult = detect(); } return cachedResult; } private static LibcInfo detect() { if (Constants.getOS() != Constants.OS.LINUX) { logger.trace("Libc detection skipped: not running on Linux"); return new LibcInfo(null, null); } LibcInfo fromFs = detectFromFilesystem(Paths.get(DEFAULT_LDD_PATH)); if (fromFs != null && fromFs.getFamily() != null && fromFs.getVersion() != null) { return fromFs; } LibcInfo fromCmd = detectFromCommand(); if (fromCmd == null) { return fromFs != null ? fromFs : new LibcInfo(null, null); } if (fromFs == null || fromFs.getFamily() == null) { return fromCmd; } // Family already detected; only adopt the command-derived version if both strategies agree // on the family. Mixing version from a different family would yield a corrupt result. if (fromFs.getVersion() == null && fromFs.getFamily().equals(fromCmd.getFamily())) { return new LibcInfo(fromFs.getFamily(), fromCmd.getVersion()); } return fromFs; } static LibcInfo detectFromFilesystem(Path lddPath) { try { byte[] bytes = Files.readAllBytes(lddPath); String content = new String(bytes, StandardCharsets.UTF_8); return parseLddContent(content); } catch (IOException e) { logger.debug("Failed to read libc details from {}: {}", lddPath, e.getMessage()); return null; } catch (Exception e) { logger.debug("Unexpected error reading libc details from {}: {}", lddPath, e.getMessage()); return null; } } static LibcInfo parseLddContent(String content) { if (content == null || content.isEmpty()) { return null; } String family; Pattern versionRe; if (RE_MUSL_MARKER.matcher(content).find()) { family = MUSL; versionRe = RE_MUSL_VERSION; } else if (RE_GLIBC_NAME_MARKER.matcher(content).find()) { family = GLIBC; versionRe = RE_GLIBC_VERSION; } else { return null; } Matcher m = versionRe.matcher(content); String version = m.find() ? m.group(1) : null; return new LibcInfo(family, version); } static LibcInfo detectFromCommand() { String output = runLibcVersionCommands(); if (output == null) { return null; } return parseCommandOutput(output); } static LibcInfo parseCommandOutput(String output) { if (output == null || output.isEmpty()) { return null; } String[] lines = output.split("\\R+"); String getconfLine = lines.length > 0 ? lines[0] : null; String lddLine1 = lines.length > 1 ? lines[1] : null; String lddLine2 = lines.length > 2 ? lines[2] : null; if (getconfLine != null && RE_GLIBC_GETCONF_MARKER.matcher(getconfLine).find()) { String[] parts = getconfLine.trim().split("\\s+"); String version = parts.length > 1 ? parts[1] : null; return new LibcInfo(GLIBC, version); } if (lddLine1 != null && RE_MUSL_MARKER.matcher(lddLine1).find()) { String version = null; if (lddLine2 != null) { String[] parts = lddLine2.trim().split("\\s+"); if (parts.length > 1) { version = parts[1]; } } return new LibcInfo(MUSL, version); } return null; } /** * Runs {@code getconf GNU_LIBC_VERSION} and {@code ldd --version} via {@code /bin/sh} and returns * the combined stdout/stderr, or {@code null} on failure. */ private static String runLibcVersionCommands() { ProcessBuilder pb = new ProcessBuilder( "/bin/sh", "-c", "getconf GNU_LIBC_VERSION 2>&1 || true; ldd --version 2>&1 || true"); pb.redirectErrorStream(true); Process process = null; try { process = pb.start(); String output = IOUtils.toString(process.getInputStream(), StandardCharsets.UTF_8); if (!process.waitFor(EXEC_TIMEOUT_MS, TimeUnit.MILLISECONDS)) { process.destroyForcibly(); logger.debug("Libc version command timed out after {}ms", EXEC_TIMEOUT_MS); return null; } return output; } catch (IOException e) { logger.debug("Failed to run libc version command: {}", e.getMessage()); return null; } catch (InterruptedException e) { Thread.currentThread().interrupt(); logger.debug("Interrupted while running libc version command: {}", e.getMessage()); return null; } catch (Exception e) { logger.debug("Unexpected error running libc version command: {}", e.getMessage()); return null; } finally { if (process != null && process.isAlive()) { process.destroyForcibly(); } } } /** Visible for testing. Resets the cache so that {@link #load()} re-detects on next call. */ static synchronized void resetCacheForTesting() { cachedResult = null; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/LibcInfo.java ================================================ package net.snowflake.client.internal.util; /** * Immutable libc detection result returned by {@link LibcDetails#load()}. * *

Both {@code family} and {@code version} may be {@code null} - e.g. on non-Linux platforms * (both null), or on musl when no version source could be parsed (family set, version null). */ public final class LibcInfo { private final String family; private final String version; public LibcInfo(String family, String version) { this.family = family; this.version = version; } public String getFamily() { return family; } public String getVersion() { return version; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/MaskedException.java ================================================ package net.snowflake.client.internal.util; /** Wrapper exception that ensures any secret in log output is masked via {@link SecretDetector}. */ public class MaskedException extends RuntimeException { private final Throwable inner; public MaskedException(Throwable inner) { // Avoid capturing an extra stack trace; we'll copy the inner frames below. super(null, null, true, true); this.inner = inner; if (inner != null) { setStackTrace(inner.getStackTrace()); } } @Override public String getMessage() { return SecretDetector.maskSecrets(inner == null ? null : inner.getMessage()); } @Override public String getLocalizedMessage() { return SecretDetector.maskSecrets(inner == null ? null : inner.getLocalizedMessage()); } @Override public String toString() { return SecretDetector.maskSecrets(inner == null ? null : inner.toString()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/OsReleaseDetails.java ================================================ package net.snowflake.client.internal.util; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import net.snowflake.client.internal.core.Constants; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; /** Parses Linux distribution details from /etc/os-release for telemetry purposes. */ public class OsReleaseDetails { private static final SFLogger logger = SFLoggerFactory.getLogger(OsReleaseDetails.class); private static final String DEFAULT_OS_RELEASE_PATH = "/etc/os-release"; private static final Pattern KEY_VALUE_PATTERN = Pattern.compile("^([A-Z0-9_]+)=(.*)$"); private static final Set ALLOWED_KEYS = Collections.unmodifiableSet( new HashSet<>( Arrays.asList( "NAME", "PRETTY_NAME", "ID", "IMAGE_ID", "IMAGE_VERSION", "BUILD_ID", "VERSION", "VERSION_ID"))); private static Map cachedOsDetails = null; public static synchronized Map load() { if (cachedOsDetails != null) { return cachedOsDetails; } if (Constants.getOS() != Constants.OS.LINUX) { logger.trace("OS details collection skipped: not running on Linux"); cachedOsDetails = Collections.emptyMap(); return cachedOsDetails; } cachedOsDetails = loadFromPath(Paths.get(DEFAULT_OS_RELEASE_PATH)); return cachedOsDetails; } static Map loadFromPath(Path path) { try { byte[] bytes = Files.readAllBytes(path); String content = new String(bytes, StandardCharsets.UTF_8); return parse(content); } catch (IOException e) { logger.debug("Failed to read OS details from {}: {}", path, e.getMessage()); return Collections.emptyMap(); } catch (Exception e) { logger.debug("Unexpected error reading OS details from {}: {}", path, e.getMessage()); return Collections.emptyMap(); } } static Map parse(String content) { Map details = new HashMap<>(); if (content == null || content.isEmpty()) { return details; } for (String line : content.split("\n")) { line = line.trim(); if (line.isEmpty() || line.startsWith("#")) { continue; } Matcher matcher = KEY_VALUE_PATTERN.matcher(line); if (matcher.matches()) { String key = matcher.group(1).trim(); if (ALLOWED_KEYS.contains(key)) { String value = parseValue(matcher.group(2)); details.put(key, value); } } } return Collections.unmodifiableMap(details); } private static String parseValue(String value) { if (value == null) { return null; } value = value.trim(); if (value.isEmpty()) { return value; } char firstChar = value.charAt(0); // Handle quoted values (single or double quotes) if (firstChar == '"' || firstChar == '\'') { int endQuote = value.indexOf(firstChar, 1); return endQuote > 0 ? value.substring(1, endQuote) : value.substring(1).trim(); } // Unquoted value - strip inline comment if present int commentIndex = value.indexOf('#'); return commentIndex > 0 ? value.substring(0, commentIndex).trim() : value; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/Platform.java ================================================ package net.snowflake.client.internal.util; /** Enum representing all detectable cloud platforms and identity providers. */ public enum Platform { IS_AWS_LAMBDA("is_aws_lambda"), IS_AZURE_FUNCTION("is_azure_function"), IS_GCE_CLOUD_RUN_SERVICE("is_gce_cloud_run_service"), IS_GCE_CLOUD_RUN_JOB("is_gce_cloud_run_job"), IS_GITHUB_ACTION("is_github_action"), IS_EC2_INSTANCE("is_ec2_instance"), HAS_AWS_IDENTITY("has_aws_identity"), IS_AZURE_VM("is_azure_vm"), HAS_AZURE_MANAGED_IDENTITY("has_azure_managed_identity"), IS_GCE_VM("is_gce_vm"), HAS_GCP_IDENTITY("has_gcp_identity"); private final String value; Platform(String value) { this.value = value; } public String getValue() { return value; } @Override public String toString() { return value; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/PlatformDetector.java ================================================ package net.snowflake.client.internal.util; import java.io.IOException; import java.net.SocketTimeoutException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.core.auth.wif.AwsAttestationService; import net.snowflake.client.internal.core.auth.wif.PlatformDetectionUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPut; public class PlatformDetector { private static final SFLogger logger = SFLoggerFactory.getLogger(PlatformDetector.class); private static final int DEFAULT_DETECTION_TIMEOUT_MS = 200; private static List cachedDetectedPlatforms = null; // AWS platform detection constants private static final String AWS_LAMBDA_TASK_ROOT = "LAMBDA_TASK_ROOT"; // Azure platform detection constants private static final String AZURE_FUNCTIONS_WORKER_RUNTIME = "FUNCTIONS_WORKER_RUNTIME"; private static final String AZURE_FUNCTIONS_EXTENSION_VERSION = "FUNCTIONS_EXTENSION_VERSION"; private static final String AZURE_WEBJOBS_STORAGE = "AzureWebJobsStorage"; private static final String AZURE_IDENTITY_HEADER = "IDENTITY_HEADER"; // GCP platform detection constants private static final String GCP_K_SERVICE = "K_SERVICE"; private static final String GCP_K_REVISION = "K_REVISION"; private static final String GCP_K_CONFIGURATION = "K_CONFIGURATION"; private static final String GCP_CLOUD_RUN_JOB = "CLOUD_RUN_JOB"; private static final String GCP_CLOUD_RUN_EXECUTION = "CLOUD_RUN_EXECUTION"; // GitHub Actions detection constants private static final String GITHUB_ACTIONS = "GITHUB_ACTIONS"; // Default cloud metadata service URLs private static final String DEFAULT_METADATA_SERVICE_BASE_URL = "http://169.254.169.254"; // IPv6 fallback for EC2 IMDS on IPv6-only instances (IPv4 unreachable) private static final String DEFAULT_AWS_METADATA_IPV6_BASE_URL = "http://[fd00:ec2::254]"; private static final String DEFAULT_GCP_METADATA_BASE_URL = "http://metadata.google.internal"; // Metadata service headers and values private static final String AWS_METADATA_TOKEN_TTL_HEADER = "X-aws-ec2-metadata-token-ttl-seconds"; private static final String AWS_METADATA_TOKEN_HEADER = "X-aws-ec2-metadata-token"; private static final String AWS_METADATA_TOKEN_TTL_VALUE = "21600"; private static final String AZURE_METADATA_HEADER = "Metadata"; private static final String AZURE_METADATA_VALUE = "True"; private static final String GCP_METADATA_FLAVOR_HEADER = "Metadata-Flavor"; private static final String GCP_METADATA_FLAVOR_VALUE = "Google"; // Metadata service endpoints paths private static final String AWS_TOKEN_ENDPOINT_PATH = "/latest/api/token"; private static final String AWS_INSTANCE_IDENTITY_ENDPOINT_PATH = "/latest/dynamic/instance-identity/document"; private static final String AZURE_INSTANCE_ENDPOINT_PATH = "/metadata/instance?api-version=2021-02-01"; private static final String AZURE_IDENTITY_ENDPOINT_PATH = "/metadata/identity/oauth2/token?api-version=2018-02-01&resource="; private static final String GCP_SERVICE_ACCOUNT_ENDPOINT_PATH = "/computeMetadata/v1/instance/service-accounts/default/email"; // Azure managed identity resource URL private static final String AZURE_MANAGEMENT_RESOURCE_URL = "https://management.azure.com"; // Timeout suffix for platform names private static final String TIMEOUT_SUFFIX = "_timeout"; // Instance fields for configurable URLs and environment provider (for testing) private final String awsMetadataBaseUrl; private final String awsMetadataIpv6BaseUrl; private final String azureMetadataBaseUrl; private final String gcpMetadataBaseUrl; private final EnvironmentProvider environmentProvider; // Default constructor for production use public PlatformDetector() { this.awsMetadataBaseUrl = DEFAULT_METADATA_SERVICE_BASE_URL; this.awsMetadataIpv6BaseUrl = DEFAULT_AWS_METADATA_IPV6_BASE_URL; this.azureMetadataBaseUrl = DEFAULT_METADATA_SERVICE_BASE_URL; this.gcpMetadataBaseUrl = DEFAULT_GCP_METADATA_BASE_URL; this.environmentProvider = new SnowflakeEnvironmentProvider(); } /** Constructor for testing purposes - allows overriding both URLs and environment provider */ PlatformDetector( String awsMetadataBaseUrl, String awsMetadataIpv6BaseUrl, String azureMetadataBaseUrl, String gcpMetadataBaseUrl, EnvironmentProvider environmentProvider) { this.awsMetadataBaseUrl = awsMetadataBaseUrl; this.awsMetadataIpv6BaseUrl = awsMetadataIpv6BaseUrl; this.azureMetadataBaseUrl = azureMetadataBaseUrl; this.gcpMetadataBaseUrl = gcpMetadataBaseUrl; this.environmentProvider = environmentProvider; } private enum DetectionState { DETECTED, NOT_DETECTED, TIMEOUT } /** * Get cached platform detection results. If platform detection has not been performed yet, * initializes the cache. * * @return list of detected platform strings */ public static synchronized List getCachedPlatformDetection() { if (cachedDetectedPlatforms != null) { return cachedDetectedPlatforms; } logger.debug( "Platform detection cache miss. Initializing with default timeout: {}ms", DEFAULT_DETECTION_TIMEOUT_MS); PlatformDetector detector = new PlatformDetector(); AwsAttestationService attestationService = new AwsAttestationService(); List result = detectPlatformsAndCache(detector, attestationService); logger.debug("Platform detection cache initialized: {}", result); return result; } static synchronized List detectPlatformsAndCache( PlatformDetector detector, AwsAttestationService attestationService) { List detectedPlatforms = detector.detectPlatforms(DEFAULT_DETECTION_TIMEOUT_MS, attestationService); cachedDetectedPlatforms = Collections.unmodifiableList(detectedPlatforms); return cachedDetectedPlatforms; } /** * Detect all potential platforms that the current environment may be running on. Swallows all * exceptions and returns an empty list if any exception occurs. * * @param platformDetectionTimeoutMs Timeout value for platform detection requests in * milliseconds. If null, defaults to DEFAULT_DETECTION_TIMEOUT_MS. If 0, skips * network-dependent checks. * @return List of detected platform names. Platforms that timed out will have "_timeout" suffix * appended to their name. Returns empty list if any exception occurs during detection. */ List detectPlatforms( Integer platformDetectionTimeoutMs, AwsAttestationService attestationService) { try { int timeoutMs = platformDetectionTimeoutMs != null ? platformDetectionTimeoutMs : DEFAULT_DETECTION_TIMEOUT_MS; // Run environment-only checks synchronously (no network calls) Map platforms = new HashMap<>(); platforms.put(Platform.IS_AWS_LAMBDA, isAwsLambda()); platforms.put(Platform.IS_AZURE_FUNCTION, isAzureFunction()); platforms.put(Platform.IS_GCE_CLOUD_RUN_SERVICE, isGcpCloudRunService()); platforms.put(Platform.IS_GCE_CLOUD_RUN_JOB, isGcpCloudRunJob()); platforms.put(Platform.IS_GITHUB_ACTION, isGithubAction()); if (timeoutMs != 0) { ExecutorService executor = Executors.newFixedThreadPool(6); try { Map> futures = new HashMap<>(); futures.put( Platform.IS_EC2_INSTANCE, CompletableFuture.supplyAsync(() -> isEc2Instance(timeoutMs), executor)); futures.put( Platform.HAS_AWS_IDENTITY, CompletableFuture.supplyAsync( () -> hasAwsIdentity(attestationService, timeoutMs), executor)); futures.put( Platform.IS_AZURE_VM, CompletableFuture.supplyAsync(() -> isAzureVm(timeoutMs), executor)); futures.put( Platform.HAS_AZURE_MANAGED_IDENTITY, CompletableFuture.supplyAsync(() -> hasAzureManagedIdentity(timeoutMs), executor)); futures.put( Platform.IS_GCE_VM, CompletableFuture.supplyAsync(() -> isGceVm(timeoutMs), executor)); futures.put( Platform.HAS_GCP_IDENTITY, CompletableFuture.supplyAsync(() -> hasGcpIdentity(timeoutMs), executor)); // Wait for all futures to complete with timeout for (Map.Entry> entry : futures.entrySet()) { try { DetectionState result = entry.getValue().get(timeoutMs, TimeUnit.MILLISECONDS); platforms.put(entry.getKey(), result); } catch (TimeoutException e) { logger.debug("Platform detection timed out for: {}", entry.getKey()); platforms.put(entry.getKey(), DetectionState.TIMEOUT); entry.getValue().cancel(true); } catch (Exception e) { logger.debug("Platform detection failed for {}: {}", entry.getKey(), e.getMessage()); platforms.put(entry.getKey(), DetectionState.NOT_DETECTED); entry.getValue().cancel(true); } } } finally { executor.shutdown(); try { if (!executor.awaitTermination(5, TimeUnit.SECONDS)) { executor.shutdownNow(); } } catch (InterruptedException e) { executor.shutdownNow(); Thread.currentThread().interrupt(); } } } List detectedPlatforms = getDetectedPlatforms(platforms); logger.debug("Platform detection completed. Detected platforms: {}", detectedPlatforms); return detectedPlatforms; } catch (Exception e) { logger.debug("Platform detection failed with exception: {}", e.getMessage()); return new ArrayList<>(); } } private static List getDetectedPlatforms(Map platforms) { List detectedPlatforms = new ArrayList<>(); for (Map.Entry entry : platforms.entrySet()) { Platform platform = entry.getKey(); DetectionState state = entry.getValue(); if (state == DetectionState.DETECTED) { detectedPlatforms.add(platform.getValue()); } else if (state == DetectionState.TIMEOUT) { detectedPlatforms.add(platform.getValue() + TIMEOUT_SUFFIX); } // NOT_DETECTED platforms are not included in the result list } return detectedPlatforms; } private static boolean isTimeoutException(Exception e) { if (e instanceof SocketTimeoutException) { return true; } if (e instanceof TimeoutException) { return true; } if (e instanceof SnowflakeSQLException) { String message = e.getMessage(); return message != null && (message.contains("timeout") || message.contains("timed out") || message.contains("elapsed time") || message.toLowerCase().contains("timeout")); } // Check for nested timeout exceptions Throwable cause = e.getCause(); if (cause instanceof SocketTimeoutException || cause instanceof TimeoutException) { return true; } return false; } private static String executeHttpGet(String uri, Map headers, int timeoutMs) throws SnowflakeSQLException, IOException { HttpGet request = new HttpGet(uri); // Add headers in pairs (key, value) for (Map.Entry entry : headers.entrySet()) { request.setHeader(entry.getKey(), entry.getValue()); } return PlatformDetectionUtil.performPlatformDetectionRequest(request, timeoutMs); } private static String executeHttpPut(String uri, Map headers, int timeoutMs) throws SnowflakeSQLException, IOException { HttpPut request = new HttpPut(uri); // Add headers in pairs (key, value) for (Map.Entry entry : headers.entrySet()) { request.setHeader(entry.getKey(), entry.getValue()); } return PlatformDetectionUtil.performPlatformDetectionRequest(request, timeoutMs); } // Shared daemon executor for EC2 IMDS probes. Cached pool: threads live briefly // during platform detection, daemon so they never block JVM shutdown, named for // log/thread-dump visibility. private static final ExecutorService EC2_PROBE_EXECUTOR = Executors.newCachedThreadPool( r -> { Thread t = new Thread(r, "snowflake-jdbc-ec2-imds-probe"); t.setDaemon(true); return t; }); private DetectionState isEc2Instance(int timeoutMs) { // Probe IPv4 and IPv6 IMDS endpoints concurrently and return as soon as either // succeeds. On dual-stack hosts the IPv4 probe wins quickly; on IPv6-only EC2 // instances (where the IPv4 link-local address is unreachable) the IPv6 probe // succeeds. We never wait longer than timeoutMs total. long deadlineNanos = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeoutMs); CompletableFuture ipv4Future = CompletableFuture.supplyAsync( () -> probeEc2Instance(awsMetadataBaseUrl, timeoutMs), EC2_PROBE_EXECUTOR); CompletableFuture ipv6Future = CompletableFuture.supplyAsync( () -> probeEc2Instance(awsMetadataIpv6BaseUrl, timeoutMs), EC2_PROBE_EXECUTOR); try { DetectionState firstResult = awaitFirstDetectionOrDeadline(ipv4Future, ipv6Future, deadlineNanos); if (firstResult == DetectionState.DETECTED) { return DetectionState.DETECTED; } // First probe did not detect; wait for the other within the remaining budget. DetectionState secondResult = awaitRemaining(ipv4Future, ipv6Future, deadlineNanos); if (secondResult == DetectionState.DETECTED) { return DetectionState.DETECTED; } if (firstResult == DetectionState.TIMEOUT || secondResult == DetectionState.TIMEOUT) { return DetectionState.TIMEOUT; } return DetectionState.NOT_DETECTED; } finally { ipv4Future.cancel(true); ipv6Future.cancel(true); } } private static DetectionState awaitFirstDetectionOrDeadline( CompletableFuture a, CompletableFuture b, long deadlineNanos) { while (true) { long remainingNanos = deadlineNanos - System.nanoTime(); if (remainingNanos <= 0) { return DetectionState.TIMEOUT; } try { DetectionState result = (DetectionState) CompletableFuture.anyOf(a, b).get(remainingNanos, TimeUnit.NANOSECONDS); if (result == DetectionState.DETECTED) { return result; } // One probe finished without detecting; return its state so the caller can // wait on the other. return result; } catch (TimeoutException e) { return DetectionState.TIMEOUT; } catch (Exception e) { // A probe threw; treat as NOT_DETECTED and let the caller consider the other. return DetectionState.NOT_DETECTED; } } } private static DetectionState awaitRemaining( CompletableFuture a, CompletableFuture b, long deadlineNanos) { CompletableFuture pending = a.isDone() ? b : a; long remainingNanos = deadlineNanos - System.nanoTime(); if (remainingNanos <= 0) { return DetectionState.TIMEOUT; } try { return pending.get(remainingNanos, TimeUnit.NANOSECONDS); } catch (TimeoutException e) { return DetectionState.TIMEOUT; } catch (Exception e) { return DetectionState.NOT_DETECTED; } } private DetectionState probeEc2Instance(String baseUrl, int timeoutMs) { try { // First try to get IMDSv2 token String token = null; try { String tokenResponse = executeHttpPut( baseUrl + AWS_TOKEN_ENDPOINT_PATH, Collections.singletonMap( AWS_METADATA_TOKEN_TTL_HEADER, AWS_METADATA_TOKEN_TTL_VALUE), timeoutMs); if (tokenResponse != null && !tokenResponse.trim().isEmpty()) { token = tokenResponse.trim(); logger.debug("Successfully obtained IMDSv2 token from {}", baseUrl); } } catch (Exception e) { logger.debug( "Failed to get IMDSv2 token from {}, will try IMDSv1: {}", baseUrl, e.getMessage()); } // Try to get instance identity document Map headers = new HashMap<>(); if (token != null) { headers.put(AWS_METADATA_TOKEN_HEADER, token); } String response = executeHttpGet(baseUrl + AWS_INSTANCE_IDENTITY_ENDPOINT_PATH, headers, timeoutMs); if (response != null && !response.trim().isEmpty()) { logger.debug("Successfully detected EC2 instance via metadata service at {}", baseUrl); return DetectionState.DETECTED; } } catch (Exception e) { logger.debug("EC2 instance detection failed at {}: {}", baseUrl, e.getMessage()); if (isTimeoutException(e)) { return DetectionState.TIMEOUT; } } return DetectionState.NOT_DETECTED; } private DetectionState isAwsLambda() { return checkAllEnvironmentVariables(AWS_LAMBDA_TASK_ROOT) ? DetectionState.DETECTED : DetectionState.NOT_DETECTED; } private static DetectionState hasAwsIdentity( AwsAttestationService attestationService, int timeoutMs) { return PlatformDetectionUtil.hasValidAwsIdentityForWif(attestationService, timeoutMs) ? DetectionState.DETECTED : DetectionState.NOT_DETECTED; } private DetectionState isAzureVm(int timeoutMs) { try { Map headers = Collections.singletonMap(AZURE_METADATA_HEADER, AZURE_METADATA_VALUE); String response = executeHttpGet(getAzureMetadataInstanceEndpoint(), headers, timeoutMs); if (response != null && !response.trim().isEmpty()) { logger.debug("Successfully detected Azure VM via metadata service"); return DetectionState.DETECTED; } } catch (Exception e) { logger.debug("Azure VM detection failed: {}", e.getMessage()); if (isTimeoutException(e)) { return DetectionState.TIMEOUT; } } return DetectionState.NOT_DETECTED; } private DetectionState isAzureFunction() { return checkAllEnvironmentVariables( AZURE_FUNCTIONS_WORKER_RUNTIME, AZURE_FUNCTIONS_EXTENSION_VERSION, AZURE_WEBJOBS_STORAGE) ? DetectionState.DETECTED : DetectionState.NOT_DETECTED; } private DetectionState isManagedIdentityAvailableOnAzureVm(int timeoutMs, String resource) { try { String endpoint = getAzureManagedIdentityEndpoint(resource); Map headers = Collections.singletonMap(AZURE_METADATA_HEADER, AZURE_METADATA_VALUE); String response = executeHttpGet(endpoint, headers, timeoutMs); if (response != null && !response.trim().isEmpty()) { logger.debug("Successfully detected Azure managed identity"); return DetectionState.DETECTED; } } catch (Exception e) { logger.debug("Azure managed identity detection failed: {}", e.getMessage()); if (isTimeoutException(e)) { return DetectionState.TIMEOUT; } } return DetectionState.NOT_DETECTED; } private DetectionState hasAzureManagedIdentity(int timeoutMs) { // Check environment variable first (for Azure Functions) if (isAzureFunction() == DetectionState.DETECTED) { if (checkAllEnvironmentVariables(AZURE_IDENTITY_HEADER)) { logger.debug("Detected Azure managed identity via IDENTITY_HEADER environment variable"); return DetectionState.DETECTED; } } // Check via metadata service (for Azure VMs) return isManagedIdentityAvailableOnAzureVm(timeoutMs, AZURE_MANAGEMENT_RESOURCE_URL); } private DetectionState isGceVm(int timeoutMs) { try { Map headers = Collections.singletonMap(GCP_METADATA_FLAVOR_HEADER, GCP_METADATA_FLAVOR_VALUE); String response = executeHttpGet(getGcpMetadataBaseEndpoint(), headers, timeoutMs); if (response != null) { logger.debug("Successfully detected GCE VM via metadata service"); return DetectionState.DETECTED; } } catch (Exception e) { logger.debug("GCE VM detection failed: {}", e.getMessage()); } return DetectionState.NOT_DETECTED; } private DetectionState isGcpCloudRunService() { return checkAllEnvironmentVariables(GCP_K_SERVICE, GCP_K_REVISION, GCP_K_CONFIGURATION) ? DetectionState.DETECTED : DetectionState.NOT_DETECTED; } private DetectionState isGcpCloudRunJob() { return checkAllEnvironmentVariables(GCP_CLOUD_RUN_JOB, GCP_CLOUD_RUN_EXECUTION) ? DetectionState.DETECTED : DetectionState.NOT_DETECTED; } private DetectionState hasGcpIdentity(int timeoutMs) { try { Map headers = Collections.singletonMap(GCP_METADATA_FLAVOR_HEADER, GCP_METADATA_FLAVOR_VALUE); String response = executeHttpGet(getGcpServiceAccountEndpoint(), headers, timeoutMs); if (response != null) { logger.debug("Successfully detected GCP identity via metadata service"); return DetectionState.DETECTED; } } catch (Exception e) { logger.debug("GCP identity detection failed: {}", e.getMessage()); if (isTimeoutException(e)) { return DetectionState.TIMEOUT; } } return DetectionState.NOT_DETECTED; } private DetectionState isGithubAction() { return checkAllEnvironmentVariables(GITHUB_ACTIONS) ? DetectionState.DETECTED : DetectionState.NOT_DETECTED; } private boolean checkAllEnvironmentVariables(String... variableNames) { if (variableNames == null || variableNames.length == 0) { return false; } for (String varName : variableNames) { String value = environmentProvider.getEnv(varName); if (value == null || value.trim().isEmpty()) { logger.debug("Environment variable {} is not set or empty", varName); return false; } } logger.debug( "All environment variables are present: {}", java.util.Arrays.toString(variableNames)); return true; } private String getAzureMetadataInstanceEndpoint() { return azureMetadataBaseUrl + AZURE_INSTANCE_ENDPOINT_PATH; } private String getAzureManagedIdentityEndpoint(String resource) { return azureMetadataBaseUrl + AZURE_IDENTITY_ENDPOINT_PATH + resource; } private String getGcpMetadataBaseEndpoint() { return gcpMetadataBaseUrl; } private String getGcpServiceAccountEndpoint() { return gcpMetadataBaseUrl + GCP_SERVICE_ACCOUNT_ENDPOINT_PATH; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/SFPair.java ================================================ package net.snowflake.client.internal.util; import java.util.Objects; public class SFPair { public L left; public R right; public static SFPair of(L l, R r) { return new SFPair<>(l, r); } private SFPair(L left, R right) { this.left = left; this.right = right; } @Override public boolean equals(Object other) { if (other == null) { return false; } if (other == this) { return true; } if (!(SFPair.class.isInstance(other))) { return false; } SFPair pair2 = (SFPair) other; return Objects.equals(this.left, pair2.left) && Objects.equals(this.right, pair2.right); } @Override public int hashCode() { int result = 0; if (left != null) { result += 37 * left.hashCode(); } if (right != null) { result += right.hashCode(); } return result; } @Override public String toString() { return "[ " + left + ", " + right + " ]"; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/SFTimestamp.java ================================================ package net.snowflake.client.internal.util; import java.text.SimpleDateFormat; import java.util.Date; import java.util.TimeZone; public class SFTimestamp { /** * Get current time in UTC in the following format * * @return String representation in this format: yyyy-MM-dd HH:mm:ss */ public static String getUTCNow() { SimpleDateFormat dateFormatGmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); dateFormatGmt.setTimeZone(TimeZone.getTimeZone("GMT")); // Time in GMT return dateFormatGmt.format(new Date()); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/SecretDetector.java ================================================ package net.snowflake.client.internal.util; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import net.minidev.json.JSONArray; import net.minidev.json.JSONObject; import net.minidev.json.JSONStyle; /** Search for credentials in sql and/or other text */ public class SecretDetector { // "\\s*" refers to >= 0 spaces, "[^']" refers to chars other than `'` private static final Pattern AWS_KEY_PATTERN = Pattern.compile( "(aws_key_id|aws_secret_key|access_key_id|secret_access_key)(\\s*=\\s*)'([^']+)'", Pattern.CASE_INSENSITIVE); // Used for detecting tokens in serialized JSON private static final Pattern AWS_TOKEN_PATTERN = Pattern.compile( "(accessToken|tempToken|keySecret)\"\\s*:\\s*\"([a-z0-9/+]{32,}={0,2})\"", Pattern.CASE_INSENSITIVE); // Used for detecting OAuth tokens in serialized JSON private static final Pattern OAUTH_JSON_PATTERN = Pattern.compile( "(access_token|refresh_token)\"" + "\\s*:\\s*" + "\"([a-zA-Z0-9!\"#$%&'\\()*+,-./:;<=>?@\\[\\]^_`\\{|\\}~]{3,})\"", Pattern.CASE_INSENSITIVE); // Signature added in the query string of a URL in SAS based authentication // for S3 or Azure Storage requests private static final Pattern SAS_TOKEN_PATTERN = Pattern.compile( "(sig|signature|AWSAccessKeyId|password|passcode)=([a-z0-9%/+]{16,})", Pattern.CASE_INSENSITIVE); // Search for password pattern private static final Pattern PASSWORD_PATTERN = Pattern.compile( "(password|passcode|pwd)" + "([\'\"\\s:=]+)" + "([a-z0-9!\"#$%&'\\()*+,-./:;<=>?@\\[\\]^_`\\{|\\}~]{6,})", Pattern.CASE_INSENSITIVE); // "-----BEGIN PRIVATE KEY----- // #pragma: allowlist secret // [PRIVATE-KEY] // -----END PRIVATE KEY-----" private static final Pattern PRIVATE_KEY_PATTERN = Pattern.compile( "-----BEGIN PRIVATE KEY-----" // #pragma: allowlist secret + "\\\\n([a-z0-9/+=\\\\n]{32,})\\\\n" + "-----END PRIVATE KEY-----", Pattern.MULTILINE | Pattern.CASE_INSENSITIVE); // "privateKeyData": "[PRIVATE-KEY]" private static final Pattern PRIVATE_KEY_DATA_PATTERN = Pattern.compile( "\"privateKeyData\": \"([a-z0-9/+=\\\\n]{10,})\"", Pattern.MULTILINE | Pattern.CASE_INSENSITIVE); private static final Pattern CONNECTION_TOKEN_PATTERN = Pattern.compile( "(token|assertion content)" + "(['\"\\s:=]+)" + "([a-z0-9=/_\\-+]{8,})", Pattern.CASE_INSENSITIVE); private static final Pattern ENCRYPTION_MATERIAL_PATTERN = Pattern.compile("\"encryptionMaterial\"\\s*:\\s*\\{.*?\\}", Pattern.CASE_INSENSITIVE); // only attempt to find secrets in its leading 100Kb SNOW-30961 private static final int MAX_LENGTH = 100 * 1000; private static String[] SENSITIVE_NAMES = { "access_key_id", "accesstoken", "aws_key_id", "aws_secret_key", "awsaccesskeyid", "keysecret", "passcode", "password", "privatekey", "privatekeydata", "secret_access_key", "sig", "signature", "temptoken", "oauthClientId", "oauthClientSecret", "accessToken", "refreshToken" }; private static Set SENSITIVE_NAME_SET = new HashSet<>(Arrays.asList(SENSITIVE_NAMES)); /** * Check whether the name is sensitive * * @param name the name * @return true if the name is sensitive. */ public static boolean isSensitive(String name) { return SENSITIVE_NAME_SET.contains(name.toLowerCase()); } /** * Determine whether a connection parameter should be masked if parameter values are being * printed. Sensitive parameters include passwords and token values. Helper function for * maskParameterValue(). * * @param name of the parameter * @return true if the parameter should be masked */ private static boolean isSensitiveParameter(String name) { Pattern PASSWORD_IN_NAME = Pattern.compile( ".*?(password|pwd|token|proxyuser|privatekey|passcode|proxypassword|private_key_base|oauthClientSecret|oauthClientId).*?", Pattern.CASE_INSENSITIVE); Matcher matcher = PASSWORD_IN_NAME.matcher(name); return isSensitive(name) || matcher.matches(); } /** * Mask sensitive parameter values. Used currently for connection parameters whose values are to * be recorded for each session. * * @param key parameter key * @param value parameter value, which is sometimes masked * @return the original value if the parameter key does not mark it as sensitive, or return a * masked text if the key is determined to be sensitive. */ public static String maskParameterValue(String key, String value) { if (isSensitiveParameter(key)) { return "****"; } return value; } private static String filterAWSKeys(String text) { if (text == null) { return null; } Matcher matcher = AWS_KEY_PATTERN.matcher(text.length() <= MAX_LENGTH ? text : text.substring(0, MAX_LENGTH)); if (matcher.find()) { return matcher.replaceAll("$1$2'****'"); } return text; } private static String filterSASTokens(String text) { if (text == null) { return null; } Matcher matcher = SAS_TOKEN_PATTERN.matcher( text.length() <= MAX_LENGTH ? text : text.substring(0, MAX_LENGTH)); if (matcher.find()) { return matcher.replaceAll("$1=****"); } return text; } private static String filterPassword(String text) { if (text == null) { return null; } Matcher matcher = PASSWORD_PATTERN.matcher( text.length() <= MAX_LENGTH ? text : text.substring(0, MAX_LENGTH)); if (matcher.find()) { return matcher.replaceAll("$1$2**** "); } return text; } private static String filterConnectionTokens(String text) { if (text == null) { return null; } Matcher matcher = CONNECTION_TOKEN_PATTERN.matcher( text.length() <= MAX_LENGTH ? text : text.substring(0, MAX_LENGTH)); if (matcher.find()) { return matcher.replaceAll("$1$2****"); } return text; } /** * mask AWS secret in the input string * * @param sql The sql text to mask * @return masked string */ public static String maskAWSSecret(String sql) { return filterAWSKeys(sql); } /** * Masks SAS token(s) in the input string * * @param text Text which may contain SAS token(s) * @return Masked string */ public static String maskSASToken(String text) { return filterSASTokens(text); } /** * Masks any secrets present in the input string. This currently checks for SAS tokens ({@link * SecretDetector#maskSASToken(String)}) and AWS keys ({@link * SecretDetector#maskAWSSecret(String)}. * * @param text Text which may contain secrets * @return Masked string */ public static String maskSecrets(String text) { if (text == null) { return null; } return filterAccessTokens( filterConnectionTokens( filterPassword( filterSASTokens( filterAWSKeys(filterOAuthTokens(filterEncryptionMaterial(text))))))); } /** * Masks any secrets present in the OAuth token request JSON response. * * @param text Text which may contain secrets * @return Masked string */ public static String filterOAuthTokens(String text) { if (text == null) { return null; } Matcher matcher = OAUTH_JSON_PATTERN.matcher( text.length() <= MAX_LENGTH ? text : text.substring(0, MAX_LENGTH)); if (matcher.find()) { return matcher.replaceAll("$1\":\"****\""); } return text; } /** * Filter access tokens that might be buried in JSON. Currently only used to filter the * scopedCreds passed for XP binary downloads * * @param message the message text which may contain secrets * @return Return filtered message */ public static String filterAccessTokens(String message) { Matcher awsMatcher = AWS_TOKEN_PATTERN.matcher(message); // aws if (awsMatcher.find()) { message = awsMatcher.replaceAll("$1\":\"XXXX\""); } // azure Matcher azureMatcher = SAS_TOKEN_PATTERN.matcher(message); if (azureMatcher.find()) { message = azureMatcher.replaceAll("$1=XXXX"); } // GCS Matcher gcsMatcher = PRIVATE_KEY_PATTERN.matcher(message); if (gcsMatcher.find()) { message = gcsMatcher.replaceAll( "-----BEGIN PRIVATE KEY-----" // #pragma: allowlist secret + "\\\\nXXXX\\\\n" + "-----END PRIVATE KEY-----"); } gcsMatcher = PRIVATE_KEY_DATA_PATTERN.matcher(message); if (gcsMatcher.find()) { message = gcsMatcher.replaceAll("\"privateKeyData\": \"XXXX\""); } return message; } /** * Filter encryption material that may be buried inside a JSON string. * * @param message the message text which may contain encryption material * @return Return filtered message */ public static String filterEncryptionMaterial(String message) { if (message == null) { return null; } Matcher matcher = ENCRYPTION_MATERIAL_PATTERN.matcher( message.length() <= MAX_LENGTH ? message : message.substring(0, MAX_LENGTH)); if (matcher.find()) { return matcher.replaceAll("\"encryptionMaterial\" : ****"); } return message; } public static JSONObject maskJsonObject(JSONObject json) { for (Map.Entry entry : json.entrySet()) { if (entry.getValue() instanceof String) { entry.setValue(maskSecrets((String) entry.getValue())); } else if (entry.getValue() instanceof JSONArray) { maskJsonArray((JSONArray) entry.getValue()); } else if (entry.getValue() instanceof JSONObject) { maskJsonObject((JSONObject) entry.getValue()); } } return json; } public static JSONArray maskJsonArray(JSONArray array) { for (int i = 0; i < array.size(); i++) { Object node = array.get(i); if (node instanceof JSONObject) { maskJsonObject((JSONObject) node); } else if (node instanceof JSONArray) { maskJsonArray((JSONArray) node); } else if (node instanceof String) { array.set(i, SecretDetector.maskSecrets((String) node)); } // for other types, we can just leave it untouched } return array; } public static JsonNode maskJacksonNode(JsonNode node) { if (node.isTextual()) { String maskedText = SecretDetector.maskSecrets(node.textValue()); if (!maskedText.equals(node.textValue())) { return new TextNode(maskedText); } } else if (node.isObject()) { ObjectNode objNode = (ObjectNode) node; Iterator fieldNames = objNode.fieldNames(); while (fieldNames.hasNext()) { String fieldName = fieldNames.next(); JsonNode tmpNode = maskJacksonNode(objNode.get(fieldName)); if (objNode.get(fieldName).isTextual()) { objNode.set(fieldName, tmpNode); } } } else if (node.isArray()) { ArrayNode arrayNode = (ArrayNode) node; for (int i = 0; i < arrayNode.size(); i++) { JsonNode tmpNode = maskJacksonNode(arrayNode.get(i)); if (arrayNode.get(i).isTextual()) { arrayNode.set(i, tmpNode); } } } return node; } // This class aims to parse minidev.json's node better public static class SecretDetectorJSONStyle extends JSONStyle { public SecretDetectorJSONStyle() { super(); } public void objectNext(Appendable out) throws IOException { out.append(", "); } public void arrayStop(Appendable out) throws IOException { out.append("] "); } public void arrayNextElm(Appendable out) throws IOException { out.append(", "); } } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/SnowflakeEnvironmentProvider.java ================================================ package net.snowflake.client.internal.util; import net.snowflake.client.internal.jdbc.SnowflakeUtil; /** * Implementation of EnvironmentProvider that delegates to SnowflakeUtil. This wrapper enables * thread-safe testing while maintaining existing behavior. */ public class SnowflakeEnvironmentProvider implements EnvironmentProvider { @Override public String getEnv(String name) { return SnowflakeUtil.systemGetEnv(name); } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/Stopwatch.java ================================================ package net.snowflake.client.internal.util; /** Stopwatch class used to calculate the time between start and stop. */ public class Stopwatch { private boolean isStarted = false; private long startTime; private long elapsedTime; /** * Starts the Stopwatch. * * @throws IllegalStateException when Stopwatch is already running. */ public void start() { if (isStarted) { throw new IllegalStateException("Stopwatch is already running"); } isStarted = true; startTime = System.nanoTime(); } /** * Stops the Stopwatch. * * @throws IllegalStateException when Stopwatch was not yet started or is already stopped. */ public void stop() { if (!isStarted) { if (startTime == 0) { throw new IllegalStateException("Stopwatch has not been started"); } throw new IllegalStateException("Stopwatch is already stopped"); } isStarted = false; elapsedTime = System.nanoTime() - startTime; } /** Resets the instance to it's initial state. */ public void reset() { isStarted = false; startTime = 0; elapsedTime = 0; } /** Restarts the instance. */ public void restart() { isStarted = true; startTime = System.nanoTime(); elapsedTime = 0; } /** * Get the elapsed time (in ms) between the stopTime and startTime. * * @return elapsed milliseconds between stopTime and startTime * @throws IllegalStateException when Stopwatch has not been started yet */ public long elapsedMillis() { return elapsedNanos() / 1_000_000; } /** * Get the elapsed time (in nanoseconds) between the stopTime and startTime. * * @return elapsed nanoseconds between stopTime and startTime * @throws IllegalStateException when Stopwatch has not been started yet */ public long elapsedNanos() { if (isStarted) { return (System.nanoTime() - startTime); } if (startTime == 0) { throw new IllegalStateException("Stopwatch has not been started"); } return elapsedTime; } /** * Get the instance status. * * @return true if the stopwatch is running, false otherwise */ public boolean isStarted() { return isStarted; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/ThrowingBiCallable.java ================================================ package net.snowflake.client.internal.util; @FunctionalInterface public interface ThrowingBiCallable { void apply(A a, B b) throws T; } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/ThrowingBiFunction.java ================================================ package net.snowflake.client.internal.util; @FunctionalInterface public interface ThrowingBiFunction { R apply(A a, B b) throws T; } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/ThrowingCallable.java ================================================ package net.snowflake.client.internal.util; @FunctionalInterface public interface ThrowingCallable { A call() throws T; } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/ThrowingFunction.java ================================================ package net.snowflake.client.internal.util; @FunctionalInterface public interface ThrowingFunction { R apply(A a) throws T; } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/ThrowingTriCallable.java ================================================ package net.snowflake.client.internal.util; @FunctionalInterface public interface ThrowingTriCallable { void apply(A a, B b, C c) throws T; } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/ThrowingTriFunction.java ================================================ package net.snowflake.client.internal.util; @FunctionalInterface public interface ThrowingTriFunction { R apply(A a, B b, C c) throws T; } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/TimeMeasurement.java ================================================ package net.snowflake.client.internal.util; import net.snowflake.client.internal.jdbc.SnowflakeUtil; /** Class keeping the start and stop time in epoch microseconds. */ public class TimeMeasurement { private long start; private long end; /** * Get the start time as epoch time in microseconds. * * @return the start time as epoch time in microseconds. */ public long getStart() { return start; } /** Set the start time as current epoch time in microseconds. */ public void setStart() { this.start = SnowflakeUtil.getEpochTimeInMicroSeconds(); } /** * Get the stop time as epoch time in microseconds. * * @return the stop time as epoch time in microseconds. */ public long getEnd() { return end; } /** Set the stop time as current epoch time in microseconds. */ public void setEnd() { this.end = SnowflakeUtil.getEpochTimeInMicroSeconds(); } /** * Get the microseconds between the stop and start time. * * @return difference between stop and start in microseconds. If one of the variables is not * initialized, it returns -1 */ public long getTime() { if (start == 0 || end == 0) { return -1; } return end - start; } } ================================================ FILE: src/main/java/net/snowflake/client/internal/util/VariableTypeArray.java ================================================ package net.snowflake.client.internal.util; public class VariableTypeArray { public int[] intArr; public long[] longArr; public VariableTypeArray(int[] arr1, long[] arr2) { intArr = arr1; longArr = arr2; } } ================================================ FILE: src/main/java/net/snowflake/client/jdbc/SnowflakeDriver.java ================================================ package net.snowflake.client.jdbc; import java.sql.Driver; /** * This is left in to ensure backward compatibility for old customers that are still using the * legacy net.snowflake.client.jdbc.SnowflakeDriver. Ideally, we want to remove this class and have * all customers move to net.snowflake.client.api.driver.SnowflakeDriver. * * @deprecated Use {@link net.snowflake.client.api.driver.SnowflakeDriver} instead */ @Deprecated public class SnowflakeDriver extends net.snowflake.client.api.driver.SnowflakeDriver implements Driver {} ================================================ FILE: src/main/java-fat-jar/net/snowflake/client/internal/log/SFBridgeLogger.java ================================================ package net.snowflake.client.internal.log; import org.slf4j.helpers.MarkerIgnoringBase; /** * SLF4J {@link org.slf4j.Logger} implementation that bridges to the Snowflake JDBC driver's {@link * SFLogger} abstraction. * *

This class is used by the shaded SLF4J inside the fat jar, so that internal dependency logging * (AWS SDK v2, Azure SDK, Google Cloud SDK) flows through {@link SFLoggerFactory} and ultimately to * JUL (default) or the user's SLF4J binding. * *

Extends {@link MarkerIgnoringBase} to handle all Marker-accepting overloads by delegating to * the non-Marker versions. */ public class SFBridgeLogger extends MarkerIgnoringBase { private static final long serialVersionUID = 1L; private final transient SFLogger delegate; SFBridgeLogger(String name) { this.name = name; this.delegate = SFLoggerFactory.getLogger(name); } // --- Trace --- @Override public boolean isTraceEnabled() { return delegate.isTraceEnabled(); } @Override public void trace(String msg) { if (delegate.isTraceEnabled()) { delegate.trace(msg, false); } } @Override public void trace(String format, Object arg) { if (delegate.isTraceEnabled()) { delegate.trace(format, arg); } } @Override public void trace(String format, Object arg1, Object arg2) { if (delegate.isTraceEnabled()) { delegate.trace(format, arg1, arg2); } } @Override public void trace(String format, Object... arguments) { if (delegate.isTraceEnabled()) { delegate.trace(format, arguments); } } @Override public void trace(String msg, Throwable t) { if (delegate.isTraceEnabled()) { delegate.trace(msg, t); } } // --- Debug --- @Override public boolean isDebugEnabled() { return delegate.isDebugEnabled(); } @Override public void debug(String msg) { if (delegate.isDebugEnabled()) { delegate.debug(msg, false); } } @Override public void debug(String format, Object arg) { if (delegate.isDebugEnabled()) { delegate.debug(format, arg); } } @Override public void debug(String format, Object arg1, Object arg2) { if (delegate.isDebugEnabled()) { delegate.debug(format, arg1, arg2); } } @Override public void debug(String format, Object... arguments) { if (delegate.isDebugEnabled()) { delegate.debug(format, arguments); } } @Override public void debug(String msg, Throwable t) { if (delegate.isDebugEnabled()) { delegate.debug(msg, t); } } // --- Info --- @Override public boolean isInfoEnabled() { return delegate.isInfoEnabled(); } @Override public void info(String msg) { if (delegate.isInfoEnabled()) { delegate.info(msg, false); } } @Override public void info(String format, Object arg) { if (delegate.isInfoEnabled()) { delegate.info(format, arg); } } @Override public void info(String format, Object arg1, Object arg2) { if (delegate.isInfoEnabled()) { delegate.info(format, arg1, arg2); } } @Override public void info(String format, Object... arguments) { if (delegate.isInfoEnabled()) { delegate.info(format, arguments); } } @Override public void info(String msg, Throwable t) { if (delegate.isInfoEnabled()) { delegate.info(msg, t); } } // --- Warn --- @Override public boolean isWarnEnabled() { return delegate.isWarnEnabled(); } @Override public void warn(String msg) { if (delegate.isWarnEnabled()) { delegate.warn(msg, false); } } @Override public void warn(String format, Object arg) { if (delegate.isWarnEnabled()) { delegate.warn(format, arg); } } @Override public void warn(String format, Object arg1, Object arg2) { if (delegate.isWarnEnabled()) { delegate.warn(format, arg1, arg2); } } @Override public void warn(String format, Object... arguments) { if (delegate.isWarnEnabled()) { delegate.warn(format, arguments); } } @Override public void warn(String msg, Throwable t) { if (delegate.isWarnEnabled()) { delegate.warn(msg, t); } } // --- Error --- @Override public boolean isErrorEnabled() { return delegate.isErrorEnabled(); } @Override public void error(String msg) { if (delegate.isErrorEnabled()) { delegate.error(msg, false); } } @Override public void error(String format, Object arg) { if (delegate.isErrorEnabled()) { delegate.error(format, arg); } } @Override public void error(String format, Object arg1, Object arg2) { if (delegate.isErrorEnabled()) { delegate.error(format, arg1, arg2); } } @Override public void error(String format, Object... arguments) { if (delegate.isErrorEnabled()) { delegate.error(format, arguments); } } @Override public void error(String msg, Throwable t) { if (delegate.isErrorEnabled()) { delegate.error(msg, t); } } } ================================================ FILE: src/main/java-fat-jar/net/snowflake/client/internal/log/SFBridgeLoggerFactory.java ================================================ package net.snowflake.client.internal.log; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.slf4j.ILoggerFactory; import org.slf4j.Logger; /** * Logger factory for the shaded SLF4J bridge. Returns {@link SFBridgeLogger} instances that * delegate to {@link SFLoggerFactory}. */ public class SFBridgeLoggerFactory implements ILoggerFactory { private final ConcurrentMap loggerMap = new ConcurrentHashMap<>(); @Override public Logger getLogger(String name) { return loggerMap.computeIfAbsent(name, SFBridgeLogger::new); } } ================================================ FILE: src/main/java-fat-jar/net/snowflake/client/internal/log/SFBridgeServiceProvider.java ================================================ package net.snowflake.client.internal.log; import net.snowflake.client.internal.driver.DriverVersionProperties; import org.slf4j.ILoggerFactory; import org.slf4j.IMarkerFactory; import org.slf4j.helpers.BasicMarkerFactory; import org.slf4j.helpers.NOPMDCAdapter; import org.slf4j.spi.MDCAdapter; import org.slf4j.spi.SLF4JServiceProvider; /** * SLF4J service provider that bridges the shaded SLF4J (used internally by AWS SDK v2, Azure SDK, * Google Cloud SDK) to the Snowflake JDBC driver's own logging abstraction ({@link * SFLoggerFactory}). * *

This class is discovered by the shaded SLF4J's {@link java.util.ServiceLoader} mechanism. The * shade plugin rewrites the SPI registration file and all {@code org.slf4j} references in this * class to the shaded namespace, so it integrates with the shaded SLF4J — not the user's real * SLF4J. * *

This allows internal dependency logging to flow through the driver's {@link SFLoggerFactory}, * which routes to either JUL (default) or the user's SLF4J binding (when opted in). */ public class SFBridgeServiceProvider implements SLF4JServiceProvider { private static final String REQUESTED_API_VERSION = DriverVersionProperties.get("slf4j.version"); private ILoggerFactory loggerFactory; private IMarkerFactory markerFactory; private MDCAdapter mdcAdapter; @Override public ILoggerFactory getLoggerFactory() { return loggerFactory; } @Override public IMarkerFactory getMarkerFactory() { return markerFactory; } @Override public MDCAdapter getMDCAdapter() { return mdcAdapter; } @Override public String getRequestedApiVersion() { return REQUESTED_API_VERSION; } @Override public void initialize() { loggerFactory = new SFBridgeLoggerFactory(); markerFactory = new BasicMarkerFactory(); mdcAdapter = new NOPMDCAdapter(); } } ================================================ FILE: src/main/javadoc/licenses.html ================================================ Open Source License Disclosure - Snowflake JDBC


Open Source License Disclosure

The following notices are required by licensors of software used in the Snowflake JDBC.

Table Of Contents

All Licenses

Common Licenses


All Licenses

  1. AWS SDK for Java

    https://github.com/aws/aws-sdk-java
    This project is licensed under Apache License 2.0, which is included below.

  2. FasterXML jackson-core

    https://github.com/FasterXML/jackson-core
    This project is licensed under Apache License 2.0, which is included below.

  3. FasterXML jackson-databind

    https://github.com/FasterXML/jackson-databind
    This project is licensed under Apache License 2.0, which is included below.

  4. Google APIs Client Library for Java

    https://github.com/googleapis/google-api-java-client
    This project is licensed under Apache License 2.0, which is included below.

  5. Google Auth Library

    https://github.com/googleapis/google-auth-library-java
    Copyright 2014, Google Inc. All rights reserved.
    This project is licensed under BSD New license, which is included below.

  6. Google Cloud Storage Client for Java

    https://github.com/googleapis/java-storage
    This project is licensed under Apache License 2.0, which is included below.

  7. Guava: Google Core Libraries for Java

    https://github.com/google/guava
    This project is licensed under Apache License 2.0, which is included below.

  8. Google HTTP Client Library for Java

    https://github.com/googleapis/google-http-java-client
    This project is licensed under Apache License 2.0, which is included below.

  9. Microsoft Azure Storage SDK for Java

    https://github.com/Azure/azure-storage-java
    Copyright Microsoft Corporation
    This project is licensed under Apache License 2.0, which is included below.

  10. Nimbus JOSE+JWT

    https://bitbucket.org/connect2id/nimbus-jose-jwt/wiki/Home
    This project is licensed under Apache License 2.0, which is included below.

  11. Apache Commons IO

    https://github.com/apache/commons-io
    This project is licensed under Apache License 2.0, which is included below.

  12. Java Servlet

    https://javaee.github.io/servlet-spec/
    Copyright (c) 1997-2018 Oracle and/or its affiliates. All rights reserved.
    Copyright 2004 The Apache Software Foundation
    This project is licensed under CDDL + GPLv2 with classpath exception, which are included below.

  13. Apache Arrow

    https://github.com/apache/arrow
    This project is licensed under Apache License 2.0, which is included below.

  14. Apache HttpComponents Client

    https://github.com/apache/httpcomponents-client
    This project is licensed under Apache License 2.0, which is included below.

  15. Apache Tika

    https://github.com/apache/tika
    This project is licensed under Apache License 2.0, which is included below.

  16. Bouncy Castle Crypto

    https://www.bouncycastle.org/java.html
    Copyright (c) 2000 - 2020 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org)

    Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

    The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

  17. jsoup: Java HTML Parser

    https://github.com/jhy/jsoup/
    Copyright (c) 2009-2020 Jonathan Hedley
    This project is licensed under MIT License, which is included below.

  18. Java Native Access (JNA)

    https://github.com/java-native-access/jna
    This project (JNA) is licensed under Apache License 2.0. (starting with JNA version 4.0.0), which is included below.

Common Licenses

Apache License

Version 2.0, January 2004
http://www.apache.org/licenses/

TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
  1. Definitions
    "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
    "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
    "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
    "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
    "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
    "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
    "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
    "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
    "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
    "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
  2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
  3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
  4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
    (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
    (b) You must cause any modified files to carry prominent notices stating that You changed the files; and
    (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
    (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
    You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
  5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
  6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
  7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
  8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
  9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

THE 3-CLAUSE BSD LICENSE

Note: This license has also been called the "New BSD License" or "Modified BSD License". See also the 2-clause BSD License.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
  2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
  3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

MIT License

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

GNU LESSER GENERAL PUBLIC LICENSE

Version 2.1, February 1999
https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html

    Copyright (C) 1991, 1999 Free Software Foundation, Inc.
    51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
    Everyone is permitted to copy and distribute verbatim copies
    of this license document, but changing it is not allowed.

    [This is the first released version of the Lesser GPL.  It also counts
     as the successor of the GNU Library Public License, version 2, hence
     the version number 2.1.]
    

Preamble

The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users.

This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below.

When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things.

To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it.

For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights.

We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library.

To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others.

Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license.

Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs.

When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library.

We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances.

For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License.

In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system.

Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library.

The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run.

TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION

0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you".

A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables.

The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".)

"Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library.

Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does.

1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library.

You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.

2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:

  • a) The modified work must itself be a software library.
  • b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change.
  • c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License.
  • d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful.

    (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.)

These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.

Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library.

In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.

3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices.

Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy.

This option is useful when you wish to copy part of the code of the Library into a program that is not a library.

4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange.

If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code.

5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License.

However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables.

When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law.

If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.)

Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself.

6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications.

You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things:

  • a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.)
  • b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with.
  • c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution.
  • d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place.
  • e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy.

For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.

It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute.

7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things:

  • a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above.
  • b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work.

8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.

9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it.

10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License.

11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library.

If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances.

It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.

This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.

12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.

13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.

Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation.

14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.

NO WARRANTY

15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.

END OF TERMS AND CONDITIONS

COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1

  1. Definitions.
    1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications.
    1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
    1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
    1.4. "Executable" means the Covered Software in any form other than Source Code.
    1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License.
    1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
    1.7. "License" means this document.
    1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
    1.9. "Modifications" means the Source Code and Executable form of any of the following:
    A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
    B. Any new file that contains any part of the Original Software or previous Modification; or
    C. Any new file that is contributed or otherwise made available under the terms of this License.
    1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License.
    1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
    1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
    1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
  2. License Grants.
    2.1. The Initial Developer Grant.
    Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
    (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
    (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
    (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
    (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
    2.2. Contributor Grant.
    Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
    (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
    (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
    (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
    (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
  3. Distribution Obligations.
    3.1. Availability of Source Code.
    Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
    3.2. Modifications.
    The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
    3.3. Required Notices.
    You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
    3.4. Application of Additional Terms.
    You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
    3.5. Distribution of Executable Versions.
    You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
    3.6. Larger Works.
    You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
  4. Versions of the License.
    4.1. New Versions
    Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
    4.2. Effect of New Versions.
    You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
    4.3. Modified Versions.
    When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
  5. DISCLAIMER OF WARRANTY.
    COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
  6. TERMINATION.
    6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
    6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
    6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license.
    6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
  7. LIMITATION OF LIABILITY.
    UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
  8. U.S. GOVERNMENT END USERS.
    The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. § 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
  9. MISCELLANEOUS.
    This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
  10. RESPONSIBILITY FOR CLAIMS.
    As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.

------------------------------------------------------------------------

NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)

The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.

GNU GENERAL PUBLIC LICENSE

Version 2, June 1991
https://www.gnu.org/licenses/old-licenses/gpl-2.0.html

      Copyright (C) 1989, 1991 Free Software Foundation, Inc.
      51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA

      Everyone is permitted to copy and distribute verbatim copies
      of this license document, but changing it is not allowed.
      

Preamble

The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too.

When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.

To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.

For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.

We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.

Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.

Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.

The precise terms and conditions for copying, distribution and modification follow.

TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION

0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".

Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.

1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.

You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.

2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:

a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)

These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.

Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.

In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.

3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:

a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)

The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.

If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.

4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.

5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.

6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.

7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.

If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.

It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.

This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.

8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.

9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.

Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.

10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.

NO WARRANTY

11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.

END OF TERMS AND CONDITIONS

================================================ FILE: src/main/javadoc/overview.html ================================================ Open Source License Disclosure - Snowflake JDBC
Open Source License Disclosure - Snowflake JDBC ================================================ FILE: src/main/resources/META-INF/com.boomi.Dependencies ================================================ extended_security ================================================ FILE: src/main/resources/META-INF/services/java.nio.file.spi.FileTypeDetector ================================================ net.snowflake.client.internal.core.FileTypeDetector ================================================ FILE: src/main/resources/META-INF/services/java.sql.Driver ================================================ net.snowflake.client.api.driver.SnowflakeDriver ================================================ FILE: src/main/resources/net/snowflake/client/jdbc/jdbc_error_messages.properties ================================================ # # Exception messages. # Those error code are associated to symbols in ErrorCode.java # 200001=JDBC driver internal error: {0}. 200002=JDBC driver not able to connect to Snowflake. Error code: {0}, Message: {1}. 200003=JDBC driver Interrupt exception encountered. 200004=Copy command does not support compression type {0}. 200005=JDBC driver: query has been canceled by user. 200006=Copy command does not recognize compression type {0}. 200007=Error encountered when listing file: {0}. 200008=File not found.\nCause: specified file does not exist: {0}\nUsage: to \ specify files, use "file://" prefix followed by path to files. A path can be \ relative or absolute. Example: put file:///tmp/bla* @table. 200009=File not found.\nCause: specified file is a directory: {0}\nUsage: to \ specify files, use "file://" prefix followed by path to files. A path can be \ relative or absolute. Example: put file:///tmp/bla* @table. 200010=Connection property specified more than once: {0} 200011=Missing user name. 200012=Missing password. 200013=S3 operation failed: Operation={0}, Error type={1}, Error code={2}, \ Error message={3}, Request id={4}, Extended request id={5} 200014=Maximum size for a query result has been exceeded. 200015=JDBC driver encountered communication error. Message: {0}. 200016=JDBC driver encountered IO error. Message: {0}. 200017=Download location is not a directory: {0}. 200018=Data type not supported for binding: {0}. 200019=Client side sorting is not supported when the result is split into \ multiple chunks. 200020=AWS operation failed: Operation={0}, Error message={1} 200021=Invalid SQL text: {0} 200022=Bad response received from server. Response body: {0}. Possible causes \ are network issues or server side internal errors. 200023=Array bind for values with mixed types not supported. Previous type: {0}, \ Current type: {1} at Column: {2}, Row: {3}. 200024=Statement is closed. 200025=Statement running a query already. 200026=Missing server URL. 200027=Number of session parameters has exceeded the supported limit ({0}). 200028=Missing required connection property: {0}. 200029=Invalid connection URL: {0}. 200030=Statement parameter specified more than once: {0}. 200031=Number of statement parameters has exceeded the supported limit: ({0}). 200032=Invalid column index: {0} 200033=Invalid parameter value type: {0}, expected type: {1}. 200034=Row not found. 200035=Feature unsupported: {0}. 200036=Invalid state: {0}. A potential cause is closing of a connection when a \ query is still running. 200037=Result set has been closed. 200038=Cannot convert value in the driver from type:{0} to type:{1}, value={2}. 200039=The specified authenticator is not accepted by your Snowflake account \ configuration. Please contact your local system administrator to get the \ correct URL to use. 200040=Identity provider configuration for the specified authenticator does \ not match with your Snowflake account configuration (destination URL \ mismatch). Please contact your local system administrator. 200041=Connection property value {0} is invalid. Value specified by user: {1}, \ returned by server: {2}. 200042=Statement ''{0}'' cannot be executed using current API. 200043=Statement ''{0}'' prepare failed. Result set metadata is missing. 200044=Azure storage operation failed: Operation={0}, Error code={1}, Status code={2}, \ Error message={3}, Extended error info={4} 200045=Private key provided is invalid or not supported: {0} 200046=Failed to generate jwt token. 200047=Invalid parameter value {0} for parameter type {1}. 200048=Query executed successfully, but the first statement returned an update \ count (result set required). 200049=Update executed successfully, but the first statement returned a result \ set (update count required). 200050=The number of child result ID's received ({0}) was different from the \ number of child statement types ({1}). 200052=Connection has been closed. 200053=Non-Fatal incident: {0}. 200054=Index value of {0} is out of bounds. Acceptable range: 1 to {1}. 200055=No valid metadata available for binding parameters. 200056=Invalid application name of {1}. Name must start with a letter and contain no special characters. 200057=Only allowable value is application name. 200058=Value is too large to be stored as integer at batch index {0}. Use executeLargeBatch() instead. 200059=Invalid Connect String: {0}. 200061=GCS operation failed: Operation={0}, Error code={1}, Message={2}, Reason={3} 200062=Authentication timed out. 200063=Invalid data - Cannot be parsed and converted to structured type. 200064=The values for 'disableOCSPChecks' and 'insecureMode' must be identical. 200065=Too many files to download as stream 200066=JDBC driver file operation error while performing stage upload. 200067=JDBC driver file operation error while performing stage download. 200068=Error during OAuth Authorization Code authentication: {0} 200069=Error during OAuth Client Credentials authentication: {0} 200070=Error during obtaining OAuth access token using refresh token: {0} 200071=Error during Workload Identity authentication: {0} 200072=MFA enabled in Okta is not supported with this authenticator type. \ Please use 'externalbrowser' instead or a different authentication method. 200073=Invalid certificate revocation mode: {0}. 200074=OCSP and certificate revocation mode checks cannot be enabled at the same time. 253000=Error during file transfer: {0} 253003=Error during file upload to stage: {0} 253002=Error during file download to stage: {0} 254000=Error during OCSP validation: {0} 290000=Error during HTTP request: {0} ================================================ FILE: src/main/resources/net/snowflake/client/jdbc/jdbc_error_messages_fr.properties ================================================ # # Exception messages # ================================================ FILE: src/main/resources/net/snowflake/client/jdbc/version.properties ================================================ version=${project.version} slf4j.version=${slf4j.version} ================================================ FILE: src/main/resources-fat-jar/META-INF/services/org.slf4j.spi.SLF4JServiceProvider ================================================ net.snowflake.client.internal.log.SFBridgeServiceProvider ================================================ FILE: src/test/java/com/snowflake/client/jdbc/SnowflakeDriverIT.java ================================================ package com.snowflake.client.jdbc; import static org.junit.jupiter.api.Assertions.assertTrue; import java.sql.Connection; import java.sql.SQLException; import net.snowflake.client.AbstractDriverIT; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CONNECTION) public class SnowflakeDriverIT extends AbstractDriverIT { @Test public void testConnection() throws SQLException { Connection con = getConnection(DONT_INJECT_SOCKET_TIMEOUT, null, false, true); con.close(); assertTrue(con.isClosed()); con.close(); // ensure no exception } } ================================================ FILE: src/test/java/net/snowflake/client/.gitignore ================================================ ingest ================================================ FILE: src/test/java/net/snowflake/client/AbstractDriverIT.java ================================================ package net.snowflake.client; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import com.google.common.base.Strings; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Paths; import java.sql.Connection; import java.sql.Date; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.sql.Timestamp; import java.util.Calendar; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.TimeZone; import java.util.logging.Level; import java.util.logging.Logger; import javax.annotation.Nullable; /** Base test class with common constants, data structures and methods */ public class AbstractDriverIT { public static final String DRIVER_CLASS = "net.snowflake.client.api.driver.SnowflakeDriver"; public static final String DRIVER_CLASS_COM = "com.snowflake.client.jdbc.SnowflakeDriver"; public static final int DONT_INJECT_SOCKET_TIMEOUT = 0; // data files protected static final String TEST_DATA_FILE = "orders_100.csv"; protected static final String TEST_DATA_FILE_2 = "orders_101.csv"; protected static final String[] fileNames = {TEST_DATA_FILE, TEST_DATA_FILE_2}; private static Logger logger = Logger.getLogger(AbstractDriverIT.class.getName()); protected final int ERROR_CODE_BIND_VARIABLE_NOT_ALLOWED_IN_VIEW_OR_UDF_DEF = 2210; private static String getConnPropValueFromEnv(String connectionType, String propKey) { String envKey = String.format("SNOWFLAKE_%s_%s", connectionType, propKey); return TestUtil.systemGetEnv(envKey); } public static Map getConnectionParameters(String accountName) { return getConnectionParameters(accountName, "TEST"); } /** * getConnectionParameters is to obtain connection params from Env * * @param accountName the connection could be different with different accounts * @param connectionType use connectionType is either "TEST"(default) or "ORG" * @return properties' key-value map -- In the connection json files the parameters' format is * like below and these key/values have been flattened to a bunch of env variables of these * junit tests. *

"testconnection": { "SNOWFLAKE_TEST_ACCOUNT": "...", ... "SNOWFLAKE_TEST_ROLE": ".." }, * } */ public static Map getConnectionParameters( String accountName, String connectionType) { Map params = new HashMap<>(); String account; String host; if (accountName == null) { account = getConnPropValueFromEnv(connectionType, "ACCOUNT"); host = getConnPropValueFromEnv(connectionType, "HOST"); } else { account = accountName; // By default, the test will run against reg deployment. // If developer needs to run in IntelliJ, you can set this env as ".dev.local" String deployment = getConnPropValueFromEnv(connectionType, "DEPLOYMENT"); if (Strings.isNullOrEmpty(deployment)) { deployment = ".reg.local"; } host = accountName.trim() + deployment; } assertThat( "set SNOWFLAKE_TEST_ACCOUNT environment variable to the account name.", !Strings.isNullOrEmpty(account)); params.put("account", account); if (Strings.isNullOrEmpty(host)) { host = account + ".snowflakecomputing.com"; } assertThat( "set SNOWFLAKE_TEST_HOST environment variable to the host name.", !Strings.isNullOrEmpty(host)); params.put("host", host); String protocol = getConnPropValueFromEnv(connectionType, "PROTOCOL"); String ssl; if ("http".equals(protocol)) { ssl = "off"; } else { ssl = "on"; } params.put("ssl", ssl); String user = getConnPropValueFromEnv(connectionType, "USER"); assertThat("set SNOWFLAKE_TEST_USER environment variable.", !Strings.isNullOrEmpty(user)); params.put("user", user); String privateKeyFile = getConnPropValueFromEnv(connectionType, "PRIVATE_KEY_FILE"); if (!Strings.isNullOrEmpty(privateKeyFile)) { String workspace = System.getenv("WORKSPACE"); if (workspace != null) { params.put( "private_key_file", java.nio.file.Paths.get(workspace, privateKeyFile).toString()); } else { params.put("private_key_file", privateKeyFile); } params.put("authenticator", "SNOWFLAKE_JWT"); String privateKeyPwd = getConnPropValueFromEnv(connectionType, "PRIVATE_KEY_PWD"); if (!Strings.isNullOrEmpty(privateKeyPwd)) { params.put("private_key_pwd", privateKeyPwd); } } else { String password = getConnPropValueFromEnv(connectionType, "PASSWORD"); if (!Strings.isNullOrEmpty(password)) { params.put("password", password); } else { throw new IllegalStateException( "Neither SNOWFLAKE_TEST_PRIVATE_KEY_FILE nor SNOWFLAKE_TEST_PASSWORD environment variable is set. Please configure one of them for authentication."); } } String port = getConnPropValueFromEnv(connectionType, "PORT"); if (Strings.isNullOrEmpty(port)) { if ("on".equals(ssl)) { port = "443"; } else { port = "80"; } } assertThat("set SNOWFLAKE_TEST_PORT environment variable.", !Strings.isNullOrEmpty(port)); params.put("port", port); String database = getConnPropValueFromEnv(connectionType, "DATABASE"); assertThat( "set SNOWFLAKE_TEST_DATABASE environment variable.", !Strings.isNullOrEmpty(database)); params.put("database", database); String schema = getConnPropValueFromEnv(connectionType, "SCHEMA"); assertThat("set SNOWFLAKE_TEST_SCHEMA environment variable.", !Strings.isNullOrEmpty(schema)); params.put("schema", schema); String role = getConnPropValueFromEnv(connectionType, "ROLE"); assertThat("set SNOWFLAKE_TEST_ROLE environment variable.", !Strings.isNullOrEmpty(role)); params.put("role", role); String warehouse = getConnPropValueFromEnv(connectionType, "WAREHOUSE"); assertThat( "set SNOWFLAKE_TEST_WAREHOUSE environment variable.", !Strings.isNullOrEmpty(warehouse)); params.put("warehouse", warehouse); params.put("uri", String.format("jdbc:snowflake://%s:%s", host, port)); String adminUser = getConnPropValueFromEnv(connectionType, "ADMIN_USER"); params.put("adminUser", adminUser); String adminPassword = getConnPropValueFromEnv(connectionType, "ADMIN_PASSWORD"); params.put("adminPassword", adminPassword); String ssoUser = getConnPropValueFromEnv(connectionType, "SSO_USER"); params.put("ssoUser", ssoUser); String ssoPassword = getConnPropValueFromEnv(connectionType, "SSO_PASSWORD"); params.put("ssoPassword", ssoPassword); return params; } public static Map getConnectionParameters() { return getConnectionParameters(null); } /** * Gets a connection with default session parameter settings, but tunable query api version and * socket timeout setting * * @param paramProperties connection properties * @return Connection a database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection(Properties paramProperties) throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, paramProperties, false, false); } /** * Gets a connection with custom account name, but otherwise default settings * * @return Connection a database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection(String accountName) throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, null, false, false, accountName); } /** * Gets a connection with custom account name and some property set, useful for testing property * on specific account * * @return Connection a database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection(String accountName, Properties paramProperties) throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, paramProperties, false, false, accountName); } /** * Gets a connection with default settings * * @return Connection a database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection() throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, null, false, false); } /** * Gets a connection with default session parameter settings, but tunable query api version and * socket timeout setting * * @param injectSocketTimeout number of seconds to inject in connection * @return Connection a database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection(int injectSocketTimeout) throws SQLException { return getConnection(injectSocketTimeout, null, false, false); } /** * Gets a connection with Snowflake admin * * @return Connection a database connection * @throws SQLException raised if any error occurs */ protected static Connection getSnowflakeAdminConnection() throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, null, true, false); } /** * Gets a connection with Snowflake admin * * @param paramProperties connection properties * @return Connection a database connection * @throws SQLException raised if any error occurs */ protected static Connection getSnowflakeAdminConnection(Properties paramProperties) throws SQLException { return getConnection(DONT_INJECT_SOCKET_TIMEOUT, paramProperties, true, false); } /** * Gets a connection in same way as function below but with default account (gotten from * environment variables) * * @param injectSocketTimeout * @param paramProperties * @param isAdmin * @param usesCom * @return * @throws SQLException */ public static Connection getConnection( int injectSocketTimeout, Properties paramProperties, boolean isAdmin, boolean usesCom) throws SQLException { return getConnection(injectSocketTimeout, paramProperties, isAdmin, usesCom, null); } /** * Gets a connection for the custom session parameter settings and tunable query api version and * socket timeout setting * * @param injectSocketTimeout number of seconds to inject in connection * @param paramProperties connection properties * @param isAdmin is Snowflake admin user? * @param usesCom uses com.snowflake instead of net.snowflake? * @return Connection database connection * @throws SQLException raised if any error occurs */ public static Connection getConnection( int injectSocketTimeout, @Nullable Properties paramProperties, boolean isAdmin, boolean usesCom, String accountName) throws SQLException { // Load Snowflake JDBC class String driverClass = DRIVER_CLASS; if (usesCom) { driverClass = DRIVER_CLASS_COM; } try { Class.forName(driverClass); } catch (Exception e) { logger.log(Level.SEVERE, "Cannot find Driver", e); throw new RuntimeException(e.getCause()); } Map params = getConnectionParameters(accountName); // build connection properties Properties properties = new Properties(); if (isAdmin) { assertThat( "set SNOWFLAKE_TEST_ADMIN_USER environment variable.", !Strings.isNullOrEmpty(params.get("adminUser"))); assertThat( "set SNOWFLAKE_TEST_ADMIN_PASSWORD environment variable.", !Strings.isNullOrEmpty(params.get("adminPassword"))); properties.put("user", params.get("adminUser")); properties.put("password", params.get("adminPassword")); properties.put("role", "accountadmin"); properties.put("account", "snowflake"); } else { properties.put("user", params.get("user")); properties.put("role", params.get("role")); properties.put("account", params.get("account")); if (!Strings.isNullOrEmpty(params.get("private_key_file"))) { properties.put("private_key_file", params.get("private_key_file")); properties.put("authenticator", params.get("authenticator")); if (!Strings.isNullOrEmpty(params.get("private_key_pwd"))) { properties.put("private_key_pwd", params.get("private_key_pwd")); } } else if (!Strings.isNullOrEmpty(params.get("password"))) { properties.put("password", params.get("password")); } } properties.put("db", params.get("database")); properties.put("schema", params.get("schema")); properties.put("warehouse", params.get("warehouse")); properties.put("ssl", params.get("ssl")); properties.put("internal", Boolean.TRUE.toString()); // TODO: do we need this? properties.put("insecureMode", false); // use OCSP for all tests. if (injectSocketTimeout > 0) { properties.put("injectSocketTimeout", String.valueOf(injectSocketTimeout)); } // Set the session parameter properties if (paramProperties != null) { for (Map.Entry entry : paramProperties.entrySet()) { properties.put(entry.getKey(), entry.getValue()); } } String uri = properties.getProperty("host") != null && properties.getProperty("port") != null ? String.format( "jdbc:snowflake://%s%s:%s", properties.getProperty("protocol"), properties.getProperty("host"), properties.getProperty("port")) : params.get("uri"); return DriverManager.getConnection(uri, properties); } /** * Close SQL Objects * * @param resultSet a result set object * @param statement a statement object * @param connection a connection * @throws SQLException raised if any error occurs */ public void closeSQLObjects(ResultSet resultSet, Statement statement, Connection connection) throws SQLException { if (resultSet != null) { resultSet.close(); } if (statement != null) { statement.close(); } if (connection != null) { connection.close(); } } /** * Close SQL Objects * * @param statement a statement object * @param connection a connection * @throws SQLException raised if any error occurs */ public void closeSQLObjects(Statement statement, Connection connection) throws SQLException { if (statement != null) { statement.close(); } if (connection != null) { connection.close(); } } /** * Get a full path of the file in Resource * * @param fileName a file name * @return a full path name of the file */ public static String getFullPathFileInResource(String fileName) { ClassLoader classLoader = AbstractDriverIT.class.getClassLoader(); URL url = classLoader.getResource(fileName); if (url != null) { try { return Paths.get(url.toURI()).toAbsolutePath().toString(); } catch (URISyntaxException ex) { throw new RuntimeException("Unable to get absolute path: " + fileName); } } else { throw new RuntimeException("No file is found: " + fileName); } } public static void connectAndVerifySimpleQuery(Properties props) throws SQLException { try (Connection con = DriverManager.getConnection( String.format("jdbc:snowflake://%s:%s", props.get("host"), props.get("port")), props); Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery("select 1")) { assertTrue(rs.next()); assertEquals(1, rs.getInt(1)); } } protected static Timestamp buildTimestamp( int year, int month, int day, int hour, int minute, int second, int fractionInNanoseconds) { Calendar cal = Calendar.getInstance(); cal.set(year, month, day, hour, minute, second); Timestamp ts = new Timestamp(cal.getTime().getTime()); ts.setNanos(fractionInNanoseconds); return ts; } protected static Date buildDate(int year, int month, int day) { Calendar cal = Calendar.getInstance(); cal.set(year, month, day, 0, 0, 0); cal.set(Calendar.MILLISECOND, 0); return new Date(cal.getTime().getTime()); } protected static Date buildDateWithTZ(int year, int month, int day, TimeZone tz) { Calendar cal = Calendar.getInstance(); cal.setTimeZone(tz); cal.set(year, month, day, 0, 0, 0); cal.set(Calendar.MILLISECOND, 0); return new Date(cal.getTime().getTime()); } } ================================================ FILE: src/test/java/net/snowflake/client/AssumptionUtils.java ================================================ package net.snowflake.client; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static org.junit.jupiter.api.Assumptions.assumeFalse; import static org.junit.jupiter.api.Assumptions.assumeTrue; import net.snowflake.client.internal.core.Constants; public class AssumptionUtils { public static void assumeNotRunningOnGithubActionsMac() { assumeFalse(isRunningOnGithubActions() && Constants.getOS() == Constants.OS.MAC); } public static void assumeNotRunningOnJava8() { assumeFalse(systemGetProperty("java.version").startsWith("1.8.0")); } public static void assumeNotRunningOnJava21() { assumeFalse(systemGetProperty("java.version").startsWith("21.")); } public static void assumeRunningOnGithubActions() { assumeTrue(isRunningOnGithubActions()); } public static boolean isRunningOnGithubActions() { return TestUtil.systemGetEnv("GITHUB_ACTIONS") != null; } public static void assumeRunningOnLinuxMac() { assumeTrue(Constants.getOS() == Constants.OS.LINUX || Constants.getOS() == Constants.OS.MAC); } } ================================================ FILE: src/test/java/net/snowflake/client/SystemPropertyOverrider.java ================================================ package net.snowflake.client; public class SystemPropertyOverrider { private final String propertyName; private final String oldValue; public SystemPropertyOverrider(String propertyName, String newValue) { this.propertyName = propertyName; this.oldValue = System.getProperty(propertyName); System.setProperty(propertyName, newValue); } public void rollback() { if (oldValue != null) { System.setProperty(propertyName, oldValue); } else { System.clearProperty(propertyName); } } } ================================================ FILE: src/test/java/net/snowflake/client/TestUtil.java ================================================ package net.snowflake.client; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.matchesPattern; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import java.sql.SQLException; import java.sql.Statement; import java.util.Arrays; import java.util.List; import java.util.Random; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.function.Consumer; import java.util.regex.Pattern; import java.util.stream.Collectors; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.hamcrest.MatcherAssert; public class TestUtil { private static final SFLogger logger = SFLoggerFactory.getLogger(TestUtil.class); private static final Pattern QUERY_ID_REGEX = Pattern.compile("[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}"); public static final String GENERATED_SCHEMA_PREFIX = "GENERATED_"; public static final String ESCAPED_GENERATED_SCHEMA_PREFIX = GENERATED_SCHEMA_PREFIX.replaceAll("_", "\\\\_"); private static final List schemaGeneratedInTestsPrefixes = Arrays.asList( GENERATED_SCHEMA_PREFIX, "GITHUB_", // created by JDBC CI jobs before tests "GH_JOB_", // created by other drivers tests e.g. Python "JDBCPERF", // created in JDBC perf tests "SCHEMA_" // created by other drivers tests e.g. Scala ); public static boolean isSchemaGeneratedInTests(String schema) { return schemaGeneratedInTestsPrefixes.stream().anyMatch(prefix -> schema.startsWith(prefix)); } /** * Util function to assert a piece will throw exception and assert on the error code * * @param errorCode expected error code * @param testCode the code that will run and throws exception */ public static void assertSFException(int errorCode, TestRunInterface testCode) { SFException e = assertThrows(SFException.class, testCode::run); assertThat(e.getVendorCode(), is(errorCode)); } /** Functional interface used to run a piece of code which throws SFException */ @FunctionalInterface public interface TestRunInterface { void run() throws SFException; } /** * System.getenv wrapper. If System.getenv raises a SecurityException, it is ignored and returns * null. * * @deprecated This method should be replaced by SnowflakeUtil.systemGetEnv. *

This is replicated from SnowflakeUtil.systemGetEnv, because the old driver doesn't have * that function for the tests to use it. Replace this function call with * SnowflakeUtil.systemGetEnv when it is available. * @param env the environment variable name. * @return the environment variable value if set, otherwise null. */ @Deprecated public static String systemGetEnv(String env) { try { return System.getenv(env); } catch (SecurityException ex) { logger.debug( "Failed to get environment variable {}. Security exception raised: {}", env, ex.getMessage()); } return null; } public static void assertValidQueryId(String queryId) { assertNotNull(queryId); MatcherAssert.assertThat( "Expecting " + queryId + " is a valid UUID", queryId, matchesPattern(QUERY_ID_REGEX)); } /** * Creates schema and deletes it at the end of the passed function execution * * @param statement statement * @param schemaName name of schema to create and delete after lambda execution * @param action action to execute when schema was created * @throws Exception when any error occurred */ public static void withSchema(Statement statement, String schemaName, ThrowingRunnable action) throws Exception { try { statement.execute("CREATE OR REPLACE SCHEMA " + schemaName); action.run(); } finally { statement.execute("DROP SCHEMA " + schemaName); } } /** * Creates schema and deletes it at the end of the passed function execution * * @param statement statement * @param action action to execute when schema was created * @throws Exception when any error occurred */ public static void withRandomSchema( Statement statement, ThrowingConsumer action) throws Exception { String customSchema = GENERATED_SCHEMA_PREFIX + SnowflakeUtil.randomAlphaNumeric(5).toUpperCase(); try { statement.execute("CREATE OR REPLACE SCHEMA " + customSchema); action.accept(customSchema); } finally { statement.execute("DROP SCHEMA " + customSchema); } } public interface MethodRaisesSQLException { void run() throws SQLException; } public static void expectSnowflakeLoggedFeatureNotSupportedException(MethodRaisesSQLException f) { SQLException ex = assertThrows(SQLException.class, f::run); assertEquals(ex.getClass().getSimpleName(), "SnowflakeLoggedFeatureNotSupportedException"); } /** * Compares two string values both values are cleaned of whitespaces * * @param expected expected value * @param actual actual value */ public static void assertEqualsIgnoringWhitespace(String expected, String actual) { assertEquals(expected.replaceAll("\\s+", ""), actual.replaceAll("\\s+", "")); } public static String randomTableName(String jiraId) { return ("TEST_" + (jiraId != null ? jiraId : "") + "_" + UUID.randomUUID()) .replaceAll("-", "_"); } public static List randomIntList(int length, int modulo) { return new Random() .ints() .limit(length) .mapToObj(i -> Math.abs(i) % modulo) .collect(Collectors.toList()); } public static CompletableFuture asyncAssert( ExecutorService executor, Callable supplier, Consumer assertion) { return CompletableFuture.supplyAsync( () -> { try { return supplier.call(); } catch (Exception e) { throw new RuntimeException(e); } }, executor) .thenAccept(assertion); } // this allows exact metadata searches instead of pattern matching public static String escapeUnderscore(String input) { return input.replace("_", "\\_"); } } ================================================ FILE: src/test/java/net/snowflake/client/ThrowingConsumer.java ================================================ package net.snowflake.client; @FunctionalInterface public interface ThrowingConsumer { void accept(A parameter) throws T; } ================================================ FILE: src/test/java/net/snowflake/client/ThrowingRunnable.java ================================================ package net.snowflake.client; @FunctionalInterface public interface ThrowingRunnable { void run() throws Exception; } ================================================ FILE: src/test/java/net/snowflake/client/annotations/DontRunOnGithubActions.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @DisabledIfEnvironmentVariable(named = "GITHUB_ACTIONS", matches = ".*") public @interface DontRunOnGithubActions {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/DontRunOnJava21.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledOnJre; import org.junit.jupiter.api.condition.JRE; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @DisabledOnJre(JRE.JAVA_21) public @interface DontRunOnJava21 {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/DontRunOnJava8.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledOnJre; import org.junit.jupiter.api.condition.JRE; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @DisabledOnJre(JRE.JAVA_8) public @interface DontRunOnJava8 {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/DontRunOnJenkins.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @DisabledIfEnvironmentVariable(named = "SNOWFLAKE_TEST_HOST", matches = ".*.reg.local") public @interface DontRunOnJenkins {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/DontRunOnTestaccount.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @DisabledIfEnvironmentVariable(named = "SNOWFLAKE_TEST_ACCOUNT", matches = "testaccount") public @interface DontRunOnTestaccount {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/DontRunOnThinJar.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @DisabledIfEnvironmentVariable(named = "ADDITIONAL_MAVEN_PROFILE", matches = "-Dthin-jar") public @interface DontRunOnThinJar {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/DontRunOnWindows.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledOnOs; import org.junit.jupiter.api.condition.OS; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @DisabledOnOs(OS.WINDOWS) public @interface DontRunOnWindows {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnAWS.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @EnabledIfEnvironmentVariable(named = "CLOUD_PROVIDER", matches = "(?i)AWS(?-i)") public @interface RunOnAWS {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnAzure.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @EnabledIfEnvironmentVariable(named = "CLOUD_PROVIDER", matches = "(?i)Azure(?-i)") public @interface RunOnAzure {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnGCP.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; @Target({ElementType.METHOD, ElementType.TYPE}) @Retention(RetentionPolicy.RUNTIME) @EnabledIfEnvironmentVariable(named = "CLOUD_PROVIDER", matches = "(?i)GCP(?-i)") public @interface RunOnGCP {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnGithubActionsNotMac.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledOnOs; import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; import org.junit.jupiter.api.condition.OS; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @EnabledIfEnvironmentVariable(named = "GITHUB_ACTIONS", matches = ".*") @DisabledOnOs(OS.MAC) public @interface RunOnGithubActionsNotMac {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnLinux.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.EnabledOnOs; import org.junit.jupiter.api.condition.OS; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @EnabledOnOs({OS.LINUX, OS.AIX}) public @interface RunOnLinux {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnLinuxOrMac.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.EnabledOnOs; import org.junit.jupiter.api.condition.OS; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @EnabledOnOs({OS.MAC, OS.LINUX, OS.AIX}) public @interface RunOnLinuxOrMac {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnMac.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.EnabledOnOs; import org.junit.jupiter.api.condition.OS; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @EnabledOnOs(OS.MAC) public @interface RunOnMac {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnTestaccountNotOnGithubActions.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable; import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @EnabledIfEnvironmentVariable(named = "SNOWFLAKE_TEST_ACCOUNT", matches = "testaccount") @DisabledIfEnvironmentVariable(named = "GITHUB_ACTIONS", matches = ".*") public @interface RunOnTestaccountNotOnGithubActions {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnWindows.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.EnabledOnOs; import org.junit.jupiter.api.condition.OS; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @EnabledOnOs(OS.WINDOWS) public @interface RunOnWindows {} ================================================ FILE: src/test/java/net/snowflake/client/annotations/RunOnWindowsOrMac.java ================================================ package net.snowflake.client.annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.junit.jupiter.api.condition.EnabledOnOs; import org.junit.jupiter.api.condition.OS; @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) @EnabledOnOs({OS.WINDOWS, OS.MAC}) public @interface RunOnWindowsOrMac {} ================================================ FILE: src/test/java/net/snowflake/client/api/driver/SnowflakeDriverTest.java ================================================ package net.snowflake.client.api.driver; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import org.junit.jupiter.api.Test; /** Test class for SnowflakeDriver methods. */ public class SnowflakeDriverTest { @Test public void testStaticVersionMatchesManifest() { String manifestVersion = SnowflakeDriver.versionResourceBundleManager.getLocalizedMessage("version"); assertNotNull(manifestVersion, "Manifest version should not be null"); // Remove -SNAPSHOT suffix if present for comparison String normalizedManifestVersion = manifestVersion.replace("-SNAPSHOT", ""); assertEquals( SnowflakeDriver.getImplementationVersion(), normalizedManifestVersion, "Static version should match manifest version"); } } ================================================ FILE: src/test/java/net/snowflake/client/api/exception/SqlFeatureNotSupportedTelemetryTest.java ================================================ package net.snowflake.client.api.exception; import static org.junit.jupiter.api.Assertions.assertEquals; import com.fasterxml.jackson.databind.node.ObjectNode; import net.minidev.json.JSONObject; import net.snowflake.client.api.driver.SnowflakeDriver; import net.snowflake.client.internal.jdbc.telemetry.SqlExceptionTelemetryHandler; import net.snowflake.client.internal.jdbc.telemetry.TelemetryField; import net.snowflake.client.internal.jdbc.telemetry.TelemetryUtil; import org.junit.jupiter.api.Test; public class SqlFeatureNotSupportedTelemetryTest { String queryId = "test-query-idfake"; String SQLState = "00000"; int vendorCode = 27; String driverVersion = SnowflakeDriver.getImplementationVersion(); String comparison = "{\"type\":\"client_sql_exception\",\"DriverType\":\"JDBC\",\"DriverVersion\":\"" + driverVersion + "\"," + "\"QueryID\":\"" + queryId + "\",\"SQLState\":\"" + SQLState + "\",\"ErrorNumber\":" + vendorCode + "}"; /** Test that creating in-band objectNode looks as expected */ @Test public void testCreateIBValue() { ObjectNode ibValue = TelemetryUtil.createIBValue( queryId, SQLState, vendorCode, TelemetryField.SQL_EXCEPTION, null, null); assertEquals(comparison, ibValue.toString()); } /** Test that creating out-of-band JSONObject contains all attributes it needs */ @Test public void testCreateOOBValue() { JSONObject oobValue = SqlExceptionTelemetryHandler.createOOBValue(queryId, SQLState, vendorCode); assertEquals("client_sql_exception", oobValue.get("type").toString()); assertEquals("JDBC", oobValue.get("DriverType").toString()); assertEquals(driverVersion, oobValue.get("DriverVersion").toString()); assertEquals(queryId, oobValue.get("QueryID").toString()); assertEquals(SQLState, oobValue.get("SQLState").toString()); assertEquals(vendorCode, oobValue.get("ErrorNumber")); } @Test public void testMaskStacktrace() { // Unmasked stacktrace containing reason for failure after the exception type String snowflakeSQLStacktrace = "net.snowflake.client.internal.exception.SnowflakeSQLLoggedException: This is a test exception.\n" + "\tat net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryServiceIT.generateDummyException(TelemetryServiceIT.java:211)\n"; // Masked stacktrace with reason removed String maskedSnowflakeSQLStacktrace = "net.snowflake.client.internal.exception.SnowflakeSQLLoggedException\n" + "\tat net.snowflake.client.internal.jdbc.telemetryOOB.TelemetryServiceIT.generateDummyException(TelemetryServiceIT.java:211)\n"; // Sometimes reason can be multiple lines String multipleLineReasonMessage = "net.snowflake.client.api.exception.SnowflakeSQLException: Error parsing JSON: {\"dsadas\n" + "adsa\":12311}\n" + " File 'VvCSoHWHrB/0.CSV.gz', line 1, character 0\n" + " Row 1, column \"SPARK_TEST_TABLE_8417843441957284451\"[\"VAR\":1]\n" + " If you would like to continue loading when an error is encountered, use other values such as " + "'SKIP_FILE' or 'CONTINUE' for the ON_ERROR option. For more information on loading options, please " + "run 'info loading_data' in a SQL client.\n" + "\tat net.snowflake.client.internal.jdbc.SnowflakeUtil.checkErrorAndThrowExceptionSub(SnowflakeUtil.java:124)\n"; String maskedMultipleLineReasonMessage = "net.snowflake.client.api.exception.SnowflakeSQLException\n" + "\tat net.snowflake.client.internal.jdbc.SnowflakeUtil.checkErrorAndThrowExceptionSub(SnowflakeUtil.java:124)\n"; assertEquals( maskedSnowflakeSQLStacktrace, SqlExceptionTelemetryHandler.maskStacktrace(snowflakeSQLStacktrace)); // Unmasked stacktrace for SQLFeatureNotSupportedException. Contains reason as well String featureNotSupportedStacktrace = "net.snowflake.client.api.exception.SnowflakeLoggedFeatureNotSupportedException: Not supported!\n" + "\tat net.snowflake.client.internal.api.implementation.statement.SnowflakeStatementImpl.execute(SnowflakeStatementImpl.java:344)\n"; // Masked stacktrace String maskedFeatureNotSupportedStacktrace = "net.snowflake.client.api.exception.SnowflakeLoggedFeatureNotSupportedException\n" + "\tat net.snowflake.client.internal.api.implementation.statement.SnowflakeStatementImpl.execute(SnowflakeStatementImpl.java:344)\n"; assertEquals( maskedFeatureNotSupportedStacktrace, SqlExceptionTelemetryHandler.maskStacktrace(featureNotSupportedStacktrace)); assertEquals( maskedMultipleLineReasonMessage, SqlExceptionTelemetryHandler.maskStacktrace(multipleLineReasonMessage)); } } ================================================ FILE: src/test/java/net/snowflake/client/api/pooling/LogicalConnectionAlreadyClosedLatestIT.java ================================================ package net.snowflake.client.api.pooling; import java.sql.Connection; import java.sql.SQLException; import java.util.Map; import javax.sql.PooledConnection; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.jdbc.BaseJDBCTest; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CONNECTION) public class LogicalConnectionAlreadyClosedLatestIT extends BaseJDBCTest { @Test public void testLogicalConnectionAlreadyClosed() throws SQLException { Map properties = getConnectionParameters(); SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource.setUrl(properties.get("uri")); poolDataSource.setPortNumber(Integer.parseInt(properties.get("port"))); poolDataSource.setSsl("on".equals(properties.get("ssl"))); poolDataSource.setAccount(properties.get("account")); poolDataSource.setUser(properties.get("user")); // Use private key authentication if available, otherwise password if (properties.get("private_key_file") != null && !properties.get("private_key_file").isEmpty()) { poolDataSource.setPrivateKeyFile( properties.get("private_key_file"), properties.get("private_key_pwd")); } else { poolDataSource.setPassword(properties.get("password")); } PooledConnection pooledConnection = poolDataSource.getPooledConnection(); Connection logicalConnection = pooledConnection.getConnection(); logicalConnection.close(); expectConnectionAlreadyClosedException(logicalConnection::getMetaData); expectConnectionAlreadyClosedException(logicalConnection::getAutoCommit); expectConnectionAlreadyClosedException(logicalConnection::commit); expectConnectionAlreadyClosedException(logicalConnection::rollback); expectConnectionAlreadyClosedException(logicalConnection::isReadOnly); expectConnectionAlreadyClosedException(logicalConnection::getCatalog); expectConnectionAlreadyClosedException(logicalConnection::getSchema); expectConnectionAlreadyClosedException(logicalConnection::getTransactionIsolation); expectConnectionAlreadyClosedException(logicalConnection::getWarnings); expectConnectionAlreadyClosedException(logicalConnection::clearWarnings); expectConnectionAlreadyClosedException(() -> logicalConnection.nativeSQL("select 1")); expectConnectionAlreadyClosedException(() -> logicalConnection.setAutoCommit(false)); expectConnectionAlreadyClosedException(() -> logicalConnection.setReadOnly(false)); expectConnectionAlreadyClosedException(() -> logicalConnection.setCatalog("fakedb")); expectConnectionAlreadyClosedException(() -> logicalConnection.setSchema("fakedb")); expectConnectionAlreadyClosedException( () -> logicalConnection.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED)); expectConnectionAlreadyClosedException(() -> logicalConnection.createArrayOf("faketype", null)); } } ================================================ FILE: src/test/java/net/snowflake/client/api/pooling/LogicalConnectionFeatureNotSupportedLatestIT.java ================================================ package net.snowflake.client.api.pooling; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Savepoint; import java.sql.Statement; import java.util.HashMap; import java.util.Map; import javax.sql.PooledConnection; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.jdbc.BaseJDBCTest; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CONNECTION) public class LogicalConnectionFeatureNotSupportedLatestIT extends BaseJDBCTest { @Test public void testLogicalConnectionFeatureNotSupported() throws SQLException { Map properties = getConnectionParameters(); SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource.setUrl(properties.get("uri")); poolDataSource.setPortNumber(Integer.parseInt(properties.get("port"))); poolDataSource.setSsl("on".equals(properties.get("ssl"))); poolDataSource.setAccount(properties.get("account")); poolDataSource.setUser(properties.get("user")); // Use private key authentication if available, otherwise password if (properties.get("private_key_file") != null && !properties.get("private_key_file").isEmpty()) { poolDataSource.setPrivateKeyFile( properties.get("private_key_file"), properties.get("private_key_pwd")); } else { poolDataSource.setPassword(properties.get("password")); } PooledConnection pooledConnection = poolDataSource.getPooledConnection(); Connection logicalConnection = pooledConnection.getConnection(); expectFeatureNotSupportedException(() -> logicalConnection.rollback(new FakeSavepoint())); expectFeatureNotSupportedException( () -> logicalConnection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE)); expectFeatureNotSupportedException( () -> logicalConnection.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ)); expectFeatureNotSupportedException( () -> logicalConnection.prepareStatement("select 1", new int[] {1, 2})); expectFeatureNotSupportedException( () -> logicalConnection.prepareStatement("select 1", new String[] {"c1", "c2"})); expectFeatureNotSupportedException( () -> logicalConnection.prepareStatement( "select 1", ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_READ_ONLY)); expectFeatureNotSupportedException( () -> logicalConnection.prepareStatement( "select 1", ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT)); expectFeatureNotSupportedException( () -> logicalConnection.createStatement( ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_READ_ONLY)); expectFeatureNotSupportedException(() -> logicalConnection.setTypeMap(new HashMap<>())); expectFeatureNotSupportedException(logicalConnection::setSavepoint); expectFeatureNotSupportedException(() -> logicalConnection.setSavepoint("fake")); expectFeatureNotSupportedException( () -> logicalConnection.releaseSavepoint(new FakeSavepoint())); expectFeatureNotSupportedException(logicalConnection::createBlob); expectFeatureNotSupportedException(logicalConnection::createNClob); expectFeatureNotSupportedException(logicalConnection::createSQLXML); expectFeatureNotSupportedException( () -> logicalConnection.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT)); expectFeatureNotSupportedException( () -> logicalConnection.createStruct("fakeType", new Object[] {})); expectFeatureNotSupportedException( () -> logicalConnection.prepareStatement("select 1", Statement.RETURN_GENERATED_KEYS)); } class FakeSavepoint implements Savepoint { @Override public int getSavepointId() throws SQLException { return 0; } @Override public String getSavepointName() throws SQLException { return ""; } } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/AuthConnectionParameters.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetEnv; import java.util.Properties; public class AuthConnectionParameters { static final String SSO_USER = systemGetEnv("SNOWFLAKE_AUTH_TEST_BROWSER_USER"); static final String SNOWFLAKE_USER = systemGetEnv("SNOWFLAKE_AUTH_TEST_SNOWFLAKE_USER"); static final String HOST = systemGetEnv("SNOWFLAKE_AUTH_TEST_HOST"); static final String SSO_PASSWORD = systemGetEnv("SNOWFLAKE_AUTH_TEST_OKTA_PASS"); static final String OKTA = systemGetEnv("SNOWFLAKE_AUTH_TEST_OKTA_NAME"); static final String OAUTH_PASSWORD = systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_USER_PASSWORD"); static final String SNOWFLAKE_INTERNAL_ROLE = systemGetEnv("SNOWFLAKE_AUTH_TEST_INTERNAL_OAUTH_SNOWFLAKE_ROLE"); static Properties getBaseConnectionParameters() { Properties properties = new Properties(); properties.put("host", HOST); properties.put("port", systemGetEnv("SNOWFLAKE_AUTH_TEST_PORT")); properties.put("role", systemGetEnv("SNOWFLAKE_AUTH_TEST_ROLE")); properties.put("account", systemGetEnv("SNOWFLAKE_AUTH_TEST_ACCOUNT")); properties.put("db", systemGetEnv("SNOWFLAKE_AUTH_TEST_DATABASE")); properties.put("schema", systemGetEnv("SNOWFLAKE_AUTH_TEST_SCHEMA")); properties.put("warehouse", systemGetEnv("SNOWFLAKE_AUTH_TEST_WAREHOUSE")); properties.put("CLIENT_STORE_TEMPORARY_CREDENTIAL", false); return properties; } static Properties getExternalBrowserConnectionParameters() { Properties properties = getBaseConnectionParameters(); properties.put("user", SSO_USER); properties.put("authenticator", "externalbrowser"); return properties; } static Properties getStoreIDTokenConnectionParameters() { Properties properties = getExternalBrowserConnectionParameters(); properties.put("CLIENT_STORE_TEMPORARY_CREDENTIAL", true); return properties; } static Properties getOktaConnectionParameters() { Properties properties = getBaseConnectionParameters(); properties.put("user", SSO_USER); properties.put("password", SSO_PASSWORD); properties.put("authenticator", systemGetEnv("SNOWFLAKE_AUTH_TEST_OAUTH_URL")); return properties; } static Properties getOauthConnectionParameters(String token) { Properties properties = getBaseConnectionParameters(); properties.put("user", SSO_USER); properties.put("authenticator", "OAUTH"); properties.put("token", token); return properties; } static Properties getMfaConnectionParameters() { Properties properties = getBaseConnectionParameters(); properties.put("user", systemGetEnv("SNOWFLAKE_AUTH_TEST_MFA_USER")); properties.put("password", systemGetEnv("SNOWFLAKE_AUTH_TEST_MFA_PASSWORD")); properties.put("authenticator", "USERNAME_PASSWORD_MFA"); return properties; } static Properties getOAuthExternalAuthorizationCodeConnectionParameters() { Properties properties = getBaseConnectionParameters(); properties.put("authenticator", "OAUTH_AUTHORIZATION_CODE"); properties.put( "oauthClientId", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_CLIENT_ID")); properties.put( "oauthClientSecret", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_CLIENT_SECRET")); properties.put( "oauthRedirectURI", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_REDIRECT_URI")); properties.put( "oauthAuthorizationUrl", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_AUTH_URL")); properties.put( "oauthTokenRequestUrl", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_TOKEN")); properties.put("user", SSO_USER); return properties; } static Properties getOAuthSnowflakeAuthorizationCodeConnectionParameters() { Properties properties = getBaseConnectionParameters(); properties.put("authenticator", "OAUTH_AUTHORIZATION_CODE"); properties.put( "oauthClientId", systemGetEnv("SNOWFLAKE_AUTH_TEST_INTERNAL_OAUTH_SNOWFLAKE_CLIENT_ID")); properties.put( "oauthClientSecret", systemGetEnv("SNOWFLAKE_AUTH_TEST_INTERNAL_OAUTH_SNOWFLAKE_CLIENT_SECRET")); properties.put( "oauthRedirectURI", systemGetEnv("SNOWFLAKE_AUTH_TEST_INTERNAL_OAUTH_SNOWFLAKE_REDIRECT_URI")); properties.put("role", systemGetEnv("SNOWFLAKE_AUTH_TEST_INTERNAL_OAUTH_SNOWFLAKE_ROLE")); properties.put("user", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_CLIENT_ID")); return properties; } static Properties getOAuthSnowflakeWildcardsAuthorizationCodeConnectionParameters() { Properties properties = getBaseConnectionParameters(); properties.put("authenticator", "OAUTH_AUTHORIZATION_CODE"); properties.put( "oauthClientId", systemGetEnv("SNOWFLAKE_AUTH_TEST_INTERNAL_OAUTH_SNOWFLAKE_WILDCARDS_CLIENT_ID")); properties.put( "oauthClientSecret", systemGetEnv("SNOWFLAKE_AUTH_TEST_INTERNAL_OAUTH_SNOWFLAKE_WILDCARDS_CLIENT_SECRET")); properties.put("role", systemGetEnv("SNOWFLAKE_AUTH_TEST_INTERNAL_OAUTH_SNOWFLAKE_ROLE")); properties.put("user", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_CLIENT_ID")); return properties; } static Properties getOAuthOktaClientCredentialParameters() { Properties properties = getBaseConnectionParameters(); properties.put("authenticator", "OAUTH_CLIENT_CREDENTIALS"); properties.put( "oauthClientId", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_CLIENT_ID")); properties.put( "oauthClientSecret", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_CLIENT_SECRET")); properties.put( "oauthTokenRequestUrl", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_TOKEN")); properties.put("user", systemGetEnv("SNOWFLAKE_AUTH_TEST_EXTERNAL_OAUTH_OKTA_CLIENT_ID")); return properties; } static Properties getPATConnectionParameters() { Properties properties = getBaseConnectionParameters(); properties.put("user", SSO_USER); properties.put("authenticator", "PROGRAMMATIC_ACCESS_TOKEN"); return properties; } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/AuthTestHelper.java ================================================ package net.snowflake.client.authentication; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Properties; import java.util.concurrent.TimeUnit; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.core.SessionUtil; public class AuthTestHelper { private Exception exception; private String idToken; private String accessToken; private final boolean runAuthTestsManually; public AuthTestHelper() { this.runAuthTestsManually = Boolean.parseBoolean(System.getenv("RUN_AUTH_TESTS_MANUALLY")); } public Thread getConnectAndExecuteSimpleQueryThread(Properties props, String sessionParameters) { return new Thread(() -> connectAndExecuteSimpleQuery(props, sessionParameters)); } public Thread getConnectAndExecuteSimpleQueryThread(Properties props) { return new Thread(() -> connectAndExecuteSimpleQuery(props, null)); } public void verifyExceptionIsThrown(String message) { assertThat("Expected exception not thrown", this.exception.getMessage(), is(message)); } public void verifyExceptionIsNotThrown() { assertThat("Unexpected exception thrown", this.exception, nullValue()); } public void connectAndProvideCredentials(Thread provideCredentialsThread, Thread connectThread) throws InterruptedException { if (runAuthTestsManually) { connectThread.start(); connectThread.join(); } else { provideCredentialsThread.start(); connectThread.start(); provideCredentialsThread.join(); connectThread.join(); } } public void provideCredentials(String scenario, String login, String password) { try { String provideBrowserCredentialsPath = "/externalbrowser/provideBrowserCredentials.js"; ProcessBuilder processBuilder = new ProcessBuilder("node", provideBrowserCredentialsPath, scenario, login, password); Process process = processBuilder.start(); process.waitFor(15, TimeUnit.SECONDS); } catch (Exception e) { throw new RuntimeException(e); } } public void cleanBrowserProcesses() { if (!runAuthTestsManually) { String cleanBrowserProcessesPath = "/externalbrowser/cleanBrowserProcesses.js"; ProcessBuilder processBuilder = new ProcessBuilder("node", cleanBrowserProcessesPath); try { Process process = processBuilder.start(); process.waitFor(15, TimeUnit.SECONDS); } catch (InterruptedException | IOException e) { throw new RuntimeException(e); } } } public List getTotp(String seed) { if (runAuthTestsManually) { System.err.println( "ERROR: TOTP code needs to be setup manually when running auth tests manually"); return Collections.emptyList(); } try { String totpGeneratorPath = "/externalbrowser/totpGenerator.js"; ProcessBuilder processBuilder = new ProcessBuilder("node", totpGeneratorPath, seed); Process process = processBuilder.start(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) { String output = reader.readLine(); process.waitFor(40, TimeUnit.SECONDS); return Arrays.asList(output.trim().split("\\s+")); } } catch (Exception e) { throw new RuntimeException(e); } } public List getTotp() { return getTotp(""); } public boolean connectAndExecuteSimpleQueryWithMfaToken( Properties props, List totpCodes) { for (int i = 0; i < totpCodes.size(); i++) { String totpCode = totpCodes.get(i); props.put("passcode", totpCode); this.exception = null; connectAndExecuteSimpleQuery(props, null); if (this.exception == null) { return true; } else { String errorMsg = this.exception.getMessage(); System.out.println("TOTP code " + (i + 1) + " failed: " + errorMsg); if (errorMsg.contains("TOTP Invalid")) { System.out.println("TOTP/MFA error detected."); } else { System.out.println("Non-TOTP error detected: " + errorMsg); break; } } } return false; } public static void deleteIdToken() { SessionUtil.deleteIdTokenCache( AuthConnectionParameters.HOST, AuthConnectionParameters.SSO_USER); } public static void deleteIdToken(String host, String user) { SessionUtil.deleteIdTokenCache(host, user); } public static void deleteOauthToken() { SessionUtil.deleteOAuthAccessTokenCache( AuthConnectionParameters.OKTA, AuthConnectionParameters.SSO_USER); } public static void deleteOauthToken(String host, String user) { SessionUtil.deleteOAuthAccessTokenCache(host, user); } public static void deleteOauthRefreshToken(String host, String user) { SessionUtil.deleteOAuthRefreshTokenCache(host, user); } public void connectAndExecuteSimpleQuery(Properties props, String sessionParameters) { String url = String.format("jdbc:snowflake://%s", props.get("host")); if (sessionParameters != null) { url += "?" + sessionParameters; } try (Connection con = DriverManager.getConnection(url, props); Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery("select 1")) { assertTrue(rs.next()); int value = rs.getInt(1); assertEquals(1, value); saveToken(con); saveAccessToken(con); } catch (SQLException e) { this.exception = e; } } private void saveToken(Connection con) throws SnowflakeSQLException { SnowflakeConnectionImpl sfcon = (SnowflakeConnectionImpl) con; this.idToken = sfcon.getSfSession().getIdToken(); } private void saveAccessToken(Connection con) throws SnowflakeSQLException { SnowflakeConnectionImpl sfcon = (SnowflakeConnectionImpl) con; this.accessToken = sfcon.getSfSession().getAccessToken(); } public String getIdToken() { return idToken; } public String getAccessToken() { return accessToken; } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/ExternalBrowserLatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.getExternalBrowserConnectionParameters; import java.io.IOException; import java.util.Properties; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) class ExternalBrowserLatestIT { String login = AuthConnectionParameters.SSO_USER; String password = AuthConnectionParameters.SSO_PASSWORD; AuthTestHelper authTestHelper = new AuthTestHelper(); Properties properties; @BeforeEach public void setUp() throws IOException { AuthTestHelper.deleteIdToken(); properties = getExternalBrowserConnectionParameters(); } @AfterEach public void tearDown() { authTestHelper.cleanBrowserProcesses(); AuthTestHelper.deleteIdToken(); } @Test void shouldAuthenticateUsingExternalBrowser() throws InterruptedException { Thread provideCredentialsThread = new Thread(() -> authTestHelper.provideCredentials("success", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread(properties); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); } @Test void shouldThrowErrorForMismatchedUsername() throws InterruptedException { properties.put("user", "differentUsername"); Thread provideCredentialsThread = new Thread(() -> authTestHelper.provideCredentials("success", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread(properties); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsThrown( "The user you were trying to authenticate as differs from the user currently logged in at the IDP."); } @Test void shouldThrowErrorForWrongCredentials() throws InterruptedException { String login = "itsnotanaccount.com"; String password = "fakepassword"; Thread provideCredentialsThread = new Thread(() -> authTestHelper.provideCredentials("fail", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread( properties, "BROWSER_RESPONSE_TIMEOUT=10"); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsThrown( "JDBC driver encountered communication error. Message: External browser authentication failed within timeout of 10000 milliseconds."); } @Test void shouldThrowErrorForBrowserTimeout() throws InterruptedException { Thread provideCredentialsThread = new Thread(() -> authTestHelper.provideCredentials("timeout", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread( properties, "BROWSER_RESPONSE_TIMEOUT=1"); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsThrown( "JDBC driver encountered communication error. Message: External browser authentication failed within timeout of 1000 milliseconds."); } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/IdTokenLatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.getStoreIDTokenConnectionParameters; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.junit.jupiter.api.Assumptions.assumeTrue; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; @Tag(TestTags.AUTHENTICATION) @TestMethodOrder(MethodOrderer.OrderAnnotation.class) class IdTokenLatestIT { String login = AuthConnectionParameters.SSO_USER; String password = AuthConnectionParameters.SSO_PASSWORD; AuthTestHelper authTestHelper = new AuthTestHelper(); private static String firstToken; @BeforeAll public static void globalSetUp() { AuthTestHelper.deleteIdToken(); } @AfterEach public void tearDown() { authTestHelper.cleanBrowserProcesses(); } @Test @Order(1) void shouldAuthenticateUsingExternalBrowserAndSaveToken() throws InterruptedException { Thread provideCredentialsThread = new Thread(() -> authTestHelper.provideCredentials("success", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread(getStoreIDTokenConnectionParameters()); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); firstToken = authTestHelper.getIdToken(); assertThat("Id token was not saved", firstToken, notNullValue()); } @Test @Order(2) void shouldAuthenticateUsingTokenWithoutBrowser() { verifyFirstTokenWasSaved(); authTestHelper.connectAndExecuteSimpleQuery(getStoreIDTokenConnectionParameters(), null); authTestHelper.verifyExceptionIsNotThrown(); } @Test @Order(3) void shouldOpenBrowserAgainWhenTokenIsDeleted() throws InterruptedException { verifyFirstTokenWasSaved(); AuthTestHelper.deleteIdToken(); Thread provideCredentialsThread = new Thread(() -> authTestHelper.provideCredentials("success", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread(getStoreIDTokenConnectionParameters()); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); String secondToken = authTestHelper.getIdToken(); assertThat("Id token was not saved", secondToken, notNullValue()); assertThat("Id token was not updated", secondToken, not(firstToken)); } private void verifyFirstTokenWasSaved() { assumeTrue(firstToken != null, "token was not saved, skipping test"); } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/MFALatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.getMfaConnectionParameters; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.List; import java.util.Properties; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) public class MFALatestIT { AuthTestHelper authTestHelper; @BeforeEach public void setUp() throws IOException { authTestHelper = new AuthTestHelper(); } @Test void testMfaSuccessful() { Properties connectionParameters = getMfaConnectionParameters(); connectionParameters.put("CLIENT_REQUEST_MFA_TOKEN", true); List totpCodes = authTestHelper.getTotp(); assertTrue(totpCodes.size() > 0, "Expected to get TOTP codes but got none"); boolean connectionSuccess = authTestHelper.connectAndExecuteSimpleQueryWithMfaToken(connectionParameters, totpCodes); assertTrue( connectionSuccess, "Failed to connect with any of the " + totpCodes.size() + " TOTP codes"); authTestHelper.verifyExceptionIsNotThrown(); connectionParameters.remove("passcode"); AuthTestHelper cacheTestHelper = new AuthTestHelper(); cacheTestHelper.connectAndExecuteSimpleQuery(connectionParameters, null); cacheTestHelper.verifyExceptionIsNotThrown(); } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/OauthLatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.getOauthConnectionParameters; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.List; import java.util.Properties; import java.util.stream.Collectors; import java.util.stream.Stream; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) public class OauthLatestIT { AuthTestHelper authTestHelper; @BeforeEach public void setUp() throws IOException { authTestHelper = new AuthTestHelper(); } @Test void shouldAuthenticateUsingOauth() throws IOException { authTestHelper.connectAndExecuteSimpleQuery(getOauthConnectionParameters(getToken()), null); authTestHelper.verifyExceptionIsNotThrown(); } @Test void shouldThrowErrorForInvalidToken() { authTestHelper.connectAndExecuteSimpleQuery(getOauthConnectionParameters("invalidToken"), null); authTestHelper.verifyExceptionIsThrown("Invalid OAuth access token. "); } @Test void shouldThrowErrorForMismatchedOauthUsername() throws IOException { Properties properties = getOauthConnectionParameters(getToken()); properties.put("user", "differentUsername"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown( "The user you were trying to authenticate as differs from the user tied to the access token."); } private String getToken() throws IOException { List data = Stream.of( "username=" + System.getenv("SNOWFLAKE_AUTH_TEST_OKTA_USER"), "password=" + System.getenv("SNOWFLAKE_AUTH_TEST_OKTA_PASS"), "grant_type=password", "scope=session:role:" + System.getenv("SNOWFLAKE_AUTH_TEST_ROLE").toLowerCase()) .collect(Collectors.toList()); String auth = System.getenv("SNOWFLAKE_AUTH_TEST_OAUTH_CLIENT_ID") + ":" + System.getenv("SNOWFLAKE_AUTH_TEST_OAUTH_CLIENT_SECRET"); String encodedAuth = Base64.getEncoder().encodeToString(auth.getBytes(StandardCharsets.UTF_8)); URL url = new URL(System.getenv("SNOWFLAKE_AUTH_TEST_OAUTH_URL")); HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty( "Content-Type", "application/x-www-form-urlencoded;charset=UTF-8"); connection.setRequestProperty("Authorization", "Basic " + encodedAuth); connection.setDoOutput(true); try (DataOutputStream out = new DataOutputStream(connection.getOutputStream())) { out.writeBytes(String.join("&", data)); out.flush(); } int responseCode = connection.getResponseCode(); assertThat("Failed to get access token, response code: " + responseCode, responseCode, is(200)); ObjectMapper mapper = new ObjectMapper(); JsonNode jsonNode; try (InputStream inputStream = connection.getInputStream()) { jsonNode = mapper.readTree(inputStream); } return jsonNode.get("access_token").asText(); } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/OauthOktaAuthorizationCodeLatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.getOAuthExternalAuthorizationCodeConnectionParameters; import java.io.IOException; import java.util.Properties; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) public class OauthOktaAuthorizationCodeLatestIT { String login = AuthConnectionParameters.SSO_USER; String password = AuthConnectionParameters.SSO_PASSWORD; AuthTestHelper authTestHelper = new AuthTestHelper(); Properties properties; @BeforeEach public void setUp() throws IOException { AuthTestHelper.deleteIdToken(); AuthTestHelper.deleteOauthToken(); properties = getOAuthExternalAuthorizationCodeConnectionParameters(); } @AfterEach public void cleanUp() { authTestHelper.cleanBrowserProcesses(); } @AfterAll public static void tearDown() { AuthTestHelper.deleteIdToken(); AuthTestHelper.deleteOauthToken(); } @Test void shouldAuthenticateUsingExternalOauthOktaAuthorizationCode() throws InterruptedException { Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials("externalOauthOktaSuccess", login, password)); Thread connectThread = new Thread(() -> authTestHelper.connectAndExecuteSimpleQuery(properties, null)); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); } @Test void shouldThrowErrorForMismatchedOauthOktaUsername() throws InterruptedException { properties.setProperty("user", "invalidUser@snowflake.com"); Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials("externalOauthOktaSuccess", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread(properties, null); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsThrown( "The user you were trying to authenticate as differs from the user tied to the access token."); } @Test void shouldThrowErrorForOauthOktaTimeout() throws InterruptedException { properties.put("BROWSER_RESPONSE_TIMEOUT", "0"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown( "Error during OAuth Authorization Code authentication: Authorization request timed out. " + "Snowflake driver did not receive authorization code back to the redirect URI. Verify your security integration and driver configuration."); } @Test void shouldAuthenticateUsingTokenCacheForOauthOkta() throws InterruptedException { properties.put("CLIENT_STORE_TEMPORARY_CREDENTIAL", true); Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials("externalOauthOktaSuccess", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread(properties, null); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsNotThrown(); } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/OauthOktaClientCredentialsLatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.OKTA; import static net.snowflake.client.authentication.AuthConnectionParameters.getOAuthOktaClientCredentialParameters; import java.io.IOException; import java.util.Properties; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) public class OauthOktaClientCredentialsLatestIT { Properties properties = getOAuthOktaClientCredentialParameters(); String login = properties.getProperty("user"); AuthTestHelper authTestHelper = new AuthTestHelper(); @BeforeEach public void setUp() throws IOException { AuthTestHelper.deleteIdToken(AuthConnectionParameters.HOST, login); AuthTestHelper.deleteOauthToken(OKTA, login); properties = getOAuthOktaClientCredentialParameters(); } @AfterAll public static void tearDown() { Properties properties = getOAuthOktaClientCredentialParameters(); AuthTestHelper.deleteIdToken(AuthConnectionParameters.HOST, properties.getProperty("user")); AuthTestHelper.deleteOauthToken(OKTA, properties.getProperty("user")); } @Test void shouldAuthenticateUsingSnowflakeOauthClientCredentials() { authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsNotThrown(); } @Test void shouldThrowErrorForClientCredentialsMismatchedUsername() throws InterruptedException { properties.put("user", "invalidUser@snowflake.com"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown( "The user you were trying to authenticate as differs from the user tied to the access token."); } @Test void shouldThrowErrorForUnauthorizedClientCredentials() throws InterruptedException, SnowflakeSQLException { properties.put("oauthClientId", "invalidClientId"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown( "Error during OAuth Client Credentials authentication: JDBC driver encountered communication error. Message: HTTP status=401."); } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/OauthSnowflakeAuthorizationCodeLatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.OAUTH_PASSWORD; import static net.snowflake.client.authentication.AuthConnectionParameters.getOAuthSnowflakeAuthorizationCodeConnectionParameters; import java.io.IOException; import java.util.Properties; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) public class OauthSnowflakeAuthorizationCodeLatestIT { Properties properties = getOAuthSnowflakeAuthorizationCodeConnectionParameters(); String login = properties.getProperty("user"); String password = OAUTH_PASSWORD; AuthTestHelper authTestHelper = new AuthTestHelper(); @BeforeEach public void setUp() throws IOException { authTestHelper.cleanBrowserProcesses(); AuthTestHelper.deleteIdToken(AuthConnectionParameters.HOST, login); AuthTestHelper.deleteOauthToken(AuthConnectionParameters.HOST, login); AuthTestHelper.deleteOauthRefreshToken(AuthConnectionParameters.HOST, login); properties = getOAuthSnowflakeAuthorizationCodeConnectionParameters(); } @AfterEach public void cleanUp() { authTestHelper.cleanBrowserProcesses(); } @AfterAll public static void tearDown() { Properties properties = getOAuthSnowflakeAuthorizationCodeConnectionParameters(); AuthTestHelper.deleteIdToken(AuthConnectionParameters.HOST, properties.getProperty("user")); AuthTestHelper.deleteOauthToken(AuthConnectionParameters.HOST, properties.getProperty("user")); AuthTestHelper.deleteOauthRefreshToken( AuthConnectionParameters.HOST, properties.getProperty("user")); } @Test void shouldAuthenticateUsingSnowflakeOauthAuthorizationCode() throws InterruptedException { Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials( "internalOauthSnowflakeSuccess", login, password)); Thread connectThread = new Thread(() -> authTestHelper.connectAndExecuteSimpleQuery(properties, null)); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); } @Test void shouldThrowErrorForMismatchedOauthSnowflakeUsername() throws InterruptedException { properties.setProperty("user", "invalidUser@snowflake.com"); Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials( "internalOauthSnowflakeSuccess", login, password)); Thread connectThread = new Thread(() -> authTestHelper.connectAndExecuteSimpleQuery(properties, null)); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsThrown( "The user you were trying to authenticate as differs from the user tied to the access token."); } @Test void shouldThrowErrorForOauthSnowflakeTimeout() throws InterruptedException { properties.put("BROWSER_RESPONSE_TIMEOUT", "0"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown( "Error during OAuth Authorization Code authentication: Authorization request timed out. " + "Snowflake driver did not receive authorization code back to the redirect URI. Verify your security integration and driver configuration."); } @Test void shouldAuthenticateUsingTokenCacheOauthSnowflake() throws InterruptedException { properties.put("CLIENT_STORE_TEMPORARY_CREDENTIAL", true); Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials( "internalOauthSnowflakeSuccess", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread(properties, null); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsNotThrown(); } @Test void shouldNotAuthenticateUsingTokenCacheOauthSnowflake() throws InterruptedException { properties.put("CLIENT_STORE_TEMPORARY_CREDENTIAL", true); properties.remove("user"); Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials( "internalOauthSnowflakeSuccess", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread(properties, null); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); properties.put("BROWSER_RESPONSE_TIMEOUT", "5"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown( "Error during OAuth Authorization Code authentication: Authorization request timed out. " + "Snowflake driver did not receive authorization code back to the redirect URI. Verify your security integration and driver configuration."); } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/OauthSnowflakeAuthorizationCodeWildcardsLatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.OAUTH_PASSWORD; import static net.snowflake.client.authentication.AuthConnectionParameters.getOAuthSnowflakeWildcardsAuthorizationCodeConnectionParameters; import java.io.IOException; import java.util.Properties; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) public class OauthSnowflakeAuthorizationCodeWildcardsLatestIT { Properties properties = getOAuthSnowflakeWildcardsAuthorizationCodeConnectionParameters(); String login = properties.getProperty("user"); String password = OAUTH_PASSWORD; AuthTestHelper authTestHelper = new AuthTestHelper(); @BeforeEach public void setUp() throws IOException { authTestHelper.cleanBrowserProcesses(); AuthTestHelper.deleteIdToken(AuthConnectionParameters.HOST, login); AuthTestHelper.deleteOauthToken(AuthConnectionParameters.HOST, login); AuthTestHelper.deleteOauthRefreshToken(AuthConnectionParameters.HOST, login); properties = getOAuthSnowflakeWildcardsAuthorizationCodeConnectionParameters(); } @AfterEach public void cleanUp() { authTestHelper.cleanBrowserProcesses(); } @AfterAll public static void tearDown() { Properties properties = getOAuthSnowflakeWildcardsAuthorizationCodeConnectionParameters(); AuthTestHelper.deleteIdToken(AuthConnectionParameters.HOST, properties.getProperty("user")); AuthTestHelper.deleteOauthToken(AuthConnectionParameters.HOST, properties.getProperty("user")); AuthTestHelper.deleteOauthRefreshToken( AuthConnectionParameters.HOST, properties.getProperty("user")); } @Test void shouldAuthenticateUsingSnowflakeOauthAuthorizationCode() throws InterruptedException { Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials( "internalOauthSnowflakeSuccess", login, password)); Thread connectThread = new Thread(() -> authTestHelper.connectAndExecuteSimpleQuery(properties, null)); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); } @Test void shouldThrowErrorForMismatchedOauthSnowflakeUsername() throws InterruptedException { properties.setProperty("user", "invalidUser@snowflake.com"); Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials( "internalOauthSnowflakeSuccess", login, password)); Thread connectThread = new Thread(() -> authTestHelper.connectAndExecuteSimpleQuery(properties, null)); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsThrown( "The user you were trying to authenticate as differs from the user tied to the access token."); } @Test void shouldThrowErrorForOauthSnowflakeTimeout() throws InterruptedException { properties.put("BROWSER_RESPONSE_TIMEOUT", "0"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown( "Error during OAuth Authorization Code authentication: Authorization request timed out. " + "Snowflake driver did not receive authorization code back to the redirect URI. Verify your security integration and driver configuration."); } @Test void shouldAuthenticateUsingTokenCacheOauthSnowflake() throws InterruptedException { properties.put("CLIENT_STORE_TEMPORARY_CREDENTIAL", true); Thread provideCredentialsThread = new Thread( () -> authTestHelper.provideCredentials( "internalOauthSnowflakeSuccess", login, password)); Thread connectThread = authTestHelper.getConnectAndExecuteSimpleQueryThread(properties, null); authTestHelper.connectAndProvideCredentials(provideCredentialsThread, connectThread); authTestHelper.verifyExceptionIsNotThrown(); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsNotThrown(); } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/OktaAuthLatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.SSO_USER; import static net.snowflake.client.authentication.AuthConnectionParameters.getOktaConnectionParameters; import java.io.IOException; import java.util.Properties; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) class OktaAuthLatestIT { AuthTestHelper authTestHelper; Properties properties; @BeforeEach public void setUp() throws IOException { authTestHelper = new AuthTestHelper(); properties = getOktaConnectionParameters(); } @Test void shouldAuthenticateUsingOkta() { authTestHelper.connectAndExecuteSimpleQuery(getOktaConnectionParameters(), null); authTestHelper.verifyExceptionIsNotThrown(); } @Test void shouldAuthenticateUsingOktaWithOktaUsernameParam() { properties.replace("user", "differentUsername"); authTestHelper.connectAndExecuteSimpleQuery(properties, "oktausername=" + SSO_USER); authTestHelper.verifyExceptionIsNotThrown(); } @Test void shouldThrowErrorForWrongOktaCredentials() { properties.put("user", "invalidUsername"); properties.put("password", "fakepassword"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown( "JDBC driver encountered communication error. Message: HTTP status=401."); } @Test void shouldThrowErrorForWrongOktaCredentialsInOktaUsernameParam() { properties.replace("user", "differentUsername"); authTestHelper.connectAndExecuteSimpleQuery(properties, "oktausername=invalidUser"); authTestHelper.verifyExceptionIsThrown( "JDBC driver encountered communication error. Message: HTTP status=401."); } @Test void shouldThrowErrorForWrongOktaUrl() { properties.put("authenticator", "https://invalid.okta.com/"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown( "The specified authenticator is not accepted by your Snowflake account configuration. Please contact your local system administrator to get the correct URL to use."); } @Test @Disabled // todo SNOW-1852279 implement error handling for invalid URL void shouldThrowErrorForWrongUrlWithoutOktaPath() { properties.put("authenticator", "https://invalid.abc.com/"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown("todo"); } } ================================================ FILE: src/test/java/net/snowflake/client/authentication/PATLatestIT.java ================================================ package net.snowflake.client.authentication; import static net.snowflake.client.authentication.AuthConnectionParameters.HOST; import static net.snowflake.client.authentication.AuthConnectionParameters.SNOWFLAKE_INTERNAL_ROLE; import static net.snowflake.client.authentication.AuthConnectionParameters.SNOWFLAKE_USER; import static net.snowflake.client.authentication.AuthConnectionParameters.getOktaConnectionParameters; import static net.snowflake.client.authentication.AuthConnectionParameters.getPATConnectionParameters; import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Properties; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) public class PATLatestIT { AuthTestHelper authTestHelper; String patName; @BeforeEach public void setUp() throws IOException { authTestHelper = new AuthTestHelper(); } @Test void shouldAuthenticateUsingPAT() { Properties properties = getPATConnectionParameters(); properties.put("token", getPAT()); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsNotThrown(); removePAT(); } @Test void shouldThrowErrorForInvalidPAT() { Properties properties = getPATConnectionParameters(); properties.put("token", "invalidToken"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown("Programmatic access token is invalid."); } @Test void shouldThrowErrorForMismatchedPATUsername() throws IOException { Properties properties = getPATConnectionParameters(); properties.put("token", getPAT()); properties.put("user", "differentUsername"); authTestHelper.connectAndExecuteSimpleQuery(properties, null); authTestHelper.verifyExceptionIsThrown("Programmatic access token is invalid."); removePAT(); } private String getPAT() { patName = "PAT_JDBC_" + generateRandomSuffix(); String command = String.format( "alter user %s add programmatic access token %s ROLE_RESTRICTION = '%s'", SNOWFLAKE_USER, patName, SNOWFLAKE_INTERNAL_ROLE); return connectUsingDifferentAuthMethodAndExecuteCommand(command, true); } private void removePAT() { String command = String.format( "alter user %s remove programmatic access token %s;", SNOWFLAKE_USER, patName); connectUsingDifferentAuthMethodAndExecuteCommand(command, false); } private String connectUsingDifferentAuthMethodAndExecuteCommand( String command, boolean shouldReturnToken) { Properties properties = getOktaConnectionParameters(); String url = String.format("jdbc:snowflake://%s", HOST); try (Connection con = DriverManager.getConnection(url, properties); Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery(command)) { if (shouldReturnToken && rs.next()) { return rs.getString("token_secret"); } return null; } catch (SQLException e) { throw new RuntimeException(e); } } private String generateRandomSuffix() { SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmssSSS"); return sdf.format(new Date()); } } ================================================ FILE: src/test/java/net/snowflake/client/category/TestTags.java ================================================ package net.snowflake.client.category; public class TestTags { private TestTags() {} public static final String ARROW = "arrow"; public static final String CONNECTION = "connection"; public static final String CORE = "core"; public static final String DIAGNOSTIC = "diagnostic"; public static final String LOADER = "loader"; public static final String DATABASE_META_DATA = "databaseMetaData"; public static final String OTHERS = "others"; public static final String RESULT_SET = "resultSet"; public static final String STATEMENT = "statement"; public static final String AUTHENTICATION = "authentication"; public static final String WIF = "wif"; public static final String TESTING = "testing"; } ================================================ FILE: src/test/java/net/snowflake/client/internal/api/implementation/metadata/SnowflakeDatabaseMetaDataImplColumnSizeTest.java ================================================ package net.snowflake.client.internal.api.implementation.metadata; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.sql.Types; import java.util.stream.Stream; import net.snowflake.client.api.resultset.SnowflakeType; import net.snowflake.client.internal.jdbc.SnowflakeColumnMetadata; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; public class SnowflakeDatabaseMetaDataImplColumnSizeTest { static Stream lengthBasedTypes() { return Stream.of( Arguments.of(Types.VARCHAR, 100), Arguments.of(Types.CHAR, 50), Arguments.of(Types.BINARY, 200), Arguments.of(Types.VARBINARY, 250)); } @ParameterizedTest @MethodSource("lengthBasedTypes") public void testGetColumnSizeByLength(int type, int length) { SnowflakeColumnMetadata metadata = mock(SnowflakeColumnMetadata.class); when(metadata.getType()).thenReturn(type); when(metadata.getLength()).thenReturn(length); assertEquals(length, SnowflakeDatabaseMetaDataImpl.getColumnSize(metadata)); } static Stream precisionBasedTypes() { return Stream.of( Types.DECIMAL, Types.NUMERIC, Types.BIGINT, Types.INTEGER, Types.SMALLINT, Types.TINYINT, Types.FLOAT, Types.DOUBLE, Types.REAL, SnowflakeType.EXTRA_TYPES_DECFLOAT, Types.DATE, Types.TIME, Types.TIMESTAMP, Types.TIMESTAMP_WITH_TIMEZONE, SnowflakeType.EXTRA_TYPES_TIMESTAMP_LTZ, SnowflakeType.EXTRA_TYPES_TIMESTAMP_TZ, SnowflakeType.EXTRA_TYPES_TIMESTAMP_NTZ); } @ParameterizedTest @MethodSource("precisionBasedTypes") public void testGetColumnSizeByPrecision(int type) { int precision = 38; SnowflakeColumnMetadata metadata = mock(SnowflakeColumnMetadata.class); when(metadata.getType()).thenReturn(type); when(metadata.getPrecision()).thenReturn(precision); assertEquals(precision, SnowflakeDatabaseMetaDataImpl.getColumnSize(metadata)); } @Test public void testGetColumnSizeVector() { SnowflakeColumnMetadata metadata = mock(SnowflakeColumnMetadata.class); when(metadata.getType()).thenReturn(SnowflakeType.EXTRA_TYPES_VECTOR); when(metadata.getDimension()).thenReturn(128); assertEquals(128, SnowflakeDatabaseMetaDataImpl.getColumnSize(metadata)); } @ParameterizedTest @ValueSource(ints = {Types.BOOLEAN, Types.ARRAY, Types.STRUCT}) public void testGetColumnSizeOther(int type) { SnowflakeColumnMetadata metadata = mock(SnowflakeColumnMetadata.class); when(metadata.getType()).thenReturn(type); assertNull(SnowflakeDatabaseMetaDataImpl.getColumnSize(metadata)); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/api/implementation/pooling/ConnectionPoolingDataSourceIT.java ================================================ package net.snowflake.client.internal.api.implementation.pooling; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.List; import java.util.Map; import javax.sql.ConnectionEvent; import javax.sql.ConnectionEventListener; import javax.sql.PooledConnection; import net.snowflake.client.AbstractDriverIT; import net.snowflake.client.api.pooling.SnowflakeConnectionPoolDataSource; import net.snowflake.client.api.pooling.SnowflakeConnectionPoolDataSourceFactory; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CONNECTION) public class ConnectionPoolingDataSourceIT extends AbstractDriverIT { @Test public void testPooledConnection() throws SQLException { Map properties = getConnectionParameters(); SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource.setUrl(properties.get("uri")); poolDataSource.setPortNumber(Integer.parseInt(properties.get("port"))); poolDataSource.setSsl("on".equals(properties.get("ssl"))); poolDataSource.setAccount(properties.get("account")); poolDataSource.setUser(properties.get("user")); // Use private key authentication if available, otherwise password if (properties.get("private_key_file") != null && !properties.get("private_key_file").isEmpty()) { poolDataSource.setPrivateKeyFile( properties.get("private_key_file"), properties.get("private_key_pwd")); } else { poolDataSource.setPassword(properties.get("password")); } PooledConnection pooledConnection = poolDataSource.getPooledConnection(); TestingConnectionListener listener = new TestingConnectionListener(); pooledConnection.addConnectionEventListener(listener); int thrownErrorCode; try (Connection connection = pooledConnection.getConnection(); Statement statement = connection.createStatement()) { statement.execute("select 1"); SQLException e = assertThrows(SQLException.class, () -> connection.setCatalog("nonexistent_database")); thrownErrorCode = e.getErrorCode(); // should not close underlying physical connection // and fire connection closed events } List connectionClosedEvents = listener.getConnectionClosedEvents(); List connectionErrorEvents = listener.getConnectionErrorEvents(); // assert connection close event assertThat(connectionClosedEvents.size(), is(1)); ConnectionEvent closedEvent = connectionClosedEvents.get(0); assertThat(closedEvent.getSQLException(), is(nullValue())); assertThat(closedEvent.getSource(), instanceOf(SnowflakePooledConnection.class)); assertThat((PooledConnection) closedEvent.getSource(), sameInstance(pooledConnection)); // assert connection error event assertThat(connectionErrorEvents.size(), is(1)); ConnectionEvent errorEvent = connectionErrorEvents.get(0); assertThat(errorEvent.getSource(), instanceOf(SnowflakePooledConnection.class)); assertThat((PooledConnection) errorEvent.getSource(), sameInstance(pooledConnection)); // error event error code match thrown error code assertThat(errorEvent.getSQLException().getErrorCode(), is(thrownErrorCode)); // assert physical connection is not closed Connection physicalConnection = ((SnowflakePooledConnection) pooledConnection).getPhysicalConnection(); assertThat(physicalConnection.isClosed(), is(false)); pooledConnection.removeConnectionEventListener(listener); // will close physical connection pooledConnection.close(); assertThat(physicalConnection.isClosed(), is(true)); } @Test public void testPooledConnectionUsernamePassword() throws SQLException { Map properties = getConnectionParameters(); SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource.setUrl(properties.get("uri")); poolDataSource.setPortNumber(Integer.parseInt(properties.get("port"))); poolDataSource.setSsl("on".equals(properties.get("ssl"))); poolDataSource.setAccount(properties.get("account")); PooledConnection pooledConnection; // Use private key authentication if available, otherwise username/password method if (properties.get("private_key_file") != null && !properties.get("private_key_file").isEmpty()) { poolDataSource.setUser(properties.get("user")); poolDataSource.setPrivateKeyFile( properties.get("private_key_file"), properties.get("private_key_pwd")); pooledConnection = poolDataSource.getPooledConnection(); } else { pooledConnection = poolDataSource.getPooledConnection(properties.get("user"), properties.get("password")); } TestingConnectionListener listener = new TestingConnectionListener(); pooledConnection.addConnectionEventListener(listener); try (Connection connection = pooledConnection.getConnection()) { connection.createStatement().execute("select 1"); } pooledConnection.close(); } private static class TestingConnectionListener implements ConnectionEventListener { private List connectionClosedEvents; private List connectionErrorEvents; TestingConnectionListener() { connectionClosedEvents = new ArrayList<>(); connectionErrorEvents = new ArrayList<>(); } @Override public void connectionClosed(ConnectionEvent event) { connectionClosedEvents.add(event); } @Override public void connectionErrorOccurred(ConnectionEvent event) { connectionErrorEvents.add(event); } public List getConnectionClosedEvents() { return connectionClosedEvents; } public List getConnectionErrorEvents() { return connectionErrorEvents; } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/api/implementation/pooling/LogicalConnectionLatestIT.java ================================================ package net.snowflake.client.internal.api.implementation.pooling; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.sql.CallableStatement; import java.sql.Clob; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Collections; import java.util.Map; import java.util.Properties; import javax.sql.PooledConnection; import net.snowflake.client.api.driver.SnowflakeDriver; import net.snowflake.client.api.pooling.SnowflakeConnectionPoolDataSource; import net.snowflake.client.api.pooling.SnowflakeConnectionPoolDataSourceFactory; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.jdbc.BaseJDBCTest; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CONNECTION) public class LogicalConnectionLatestIT extends BaseJDBCTest { Map properties = getConnectionParameters(); @Test public void testLogicalConnection() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); Connection logicalConnection = pooledConnection.getConnection(); try (Statement statement = logicalConnection.createStatement( ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT)) { try (ResultSet resultSet = statement.executeQuery("show parameters")) { assertTrue(resultSet.next()); assertFalse(logicalConnection.isClosed()); assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, logicalConnection.getHoldability()); } } logicalConnection.close(); assertTrue(logicalConnection.isClosed()); pooledConnection.close(); } @Test public void testNetworkTimeout() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { int millis = logicalConnection.getNetworkTimeout(); assertEquals(0, millis); logicalConnection.setNetworkTimeout(null, 200); assertEquals(200, logicalConnection.getNetworkTimeout()); } pooledConnection.close(); } @Test public void testIsValid() throws Throwable { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { assertTrue(logicalConnection.isValid(10)); assertThrows(SQLException.class, () -> logicalConnection.isValid(-10)); } pooledConnection.close(); } @Test public void testConnectionClientInfo() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { Properties property = logicalConnection.getClientInfo(); assertEquals(0, property.size()); Properties clientInfo = new Properties(); clientInfo.setProperty("name", "Peter"); clientInfo.setProperty("description", "SNOWFLAKE JDBC"); expectSQLClientInfoException(() -> logicalConnection.setClientInfo(clientInfo)); expectSQLClientInfoException( () -> logicalConnection.setClientInfo("ApplicationName", "valueA")); assertNull(logicalConnection.getClientInfo("Peter")); } pooledConnection.close(); } @Test public void testAbort() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); Connection logicalConnection = pooledConnection.getConnection(); Connection physicalConnection = ((SnowflakePooledConnection) pooledConnection).getPhysicalConnection(); assertTrue(!physicalConnection.isClosed()); logicalConnection.abort(null); assertTrue(physicalConnection.isClosed()); } @Test public void testNativeSQL() throws Throwable { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { // today returning the source SQL. assertEquals("select 1", logicalConnection.nativeSQL("select 1")); } pooledConnection.close(); } @Test public void testUnwrapper() throws Throwable { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { boolean canUnwrap = logicalConnection.isWrapperFor(SnowflakeConnectionImpl.class); assertTrue(canUnwrap); SnowflakeConnectionImpl sfconnection = logicalConnection.unwrap(SnowflakeConnectionImpl.class); sfconnection.createStatement(); assertThrows(SQLException.class, () -> logicalConnection.unwrap(SnowflakeDriver.class)); } } @Test public void testTransactionStatement() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { logicalConnection.setAutoCommit(false); assertFalse(logicalConnection.getAutoCommit()); logicalConnection.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); assertEquals(2, logicalConnection.getTransactionIsolation()); try (Statement statement = logicalConnection.createStatement()) { statement.executeUpdate("create or replace table test_transaction (colA int, colB string)"); // start a transaction statement.executeUpdate("insert into test_transaction values (1, 'abc')"); // commit logicalConnection.commit(); try (ResultSet resultSet = statement.executeQuery("select count(*) from test_transaction")) { assertTrue(resultSet.next()); assertEquals(1, resultSet.getInt(1)); } // rollback statement.executeUpdate("delete from test_transaction"); logicalConnection.rollback(); try (ResultSet resultSet = statement.executeQuery("select count(*) from test_transaction")) { assertTrue(resultSet.next()); assertEquals(1, resultSet.getInt(1)); } } finally { try (Statement statement = logicalConnection.createStatement()) { statement.execute("drop table if exists test_transaction"); } } } pooledConnection.close(); } @Test public void testReadOnly() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { // read only is not supported - will always be false assertEquals(false, logicalConnection.isReadOnly()); logicalConnection.setReadOnly(true); assertEquals(false, logicalConnection.isReadOnly()); } pooledConnection.close(); } @Test public void testGetTypeMap() throws Throwable { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { // return an empty type map. setTypeMap is not supported. assertEquals(Collections.emptyMap(), logicalConnection.getTypeMap()); } pooledConnection.close(); } @Test public void testPreparedStatement() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { try (Statement statement = logicalConnection.createStatement()) { statement.execute("create or replace table test_prep (colA int, colB varchar)"); try (PreparedStatement preparedStatement = logicalConnection.prepareStatement("insert into test_prep values (?, ?)")) { preparedStatement.setInt(1, 25); preparedStatement.setString(2, "hello world"); preparedStatement.execute(); int count = 0; try (ResultSet resultSet = statement.executeQuery("select * from test_prep")) { while (resultSet.next()) { count++; } } assertEquals(1, count); } finally { statement.execute("drop table if exists test_prep"); } } } pooledConnection.close(); } @Test public void testSetSchema() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { String schema = logicalConnection.getSchema(); // get the current schema try (ResultSet rst = logicalConnection.createStatement().executeQuery("select current_schema()")) { assertTrue(rst.next()); assertEquals(schema, rst.getString(1)); } logicalConnection.setSchema("PUBLIC"); // get the current schema try (ResultSet rst = logicalConnection.createStatement().executeQuery("select current_schema()")) { assertTrue(rst.next()); assertEquals("PUBLIC", rst.getString(1)); } } pooledConnection.close(); } @Test public void testPrepareCall() throws SQLException { String procedure = "CREATE OR REPLACE PROCEDURE output_message(message VARCHAR)\n" + "RETURNS VARCHAR NOT NULL\n" + "LANGUAGE SQL\n" + "AS\n" + "BEGIN\n" + " RETURN message;\n" + "END;"; SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { try (Statement statement = logicalConnection.createStatement()) { statement.execute(procedure); try (CallableStatement callableStatement = logicalConnection.prepareCall("call output_message(?)")) { callableStatement.setString(1, "hello world"); try (ResultSet resultSet = callableStatement.executeQuery()) { resultSet.next(); assertEquals("hello world", resultSet.getString(1)); } } try (CallableStatement callableStatement = logicalConnection.prepareCall( "call output_message('hello world')", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)) { try (ResultSet resultSet = callableStatement.executeQuery()) { resultSet.next(); assertEquals("hello world", resultSet.getString(1)); assertEquals(1003, callableStatement.getResultSetType()); assertEquals(1007, callableStatement.getResultSetConcurrency()); } } try (CallableStatement callableStatement = logicalConnection.prepareCall( "call output_message('hello world')", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT)) { try (ResultSet resultSet = callableStatement.executeQuery()) { resultSet.next(); assertEquals(2, callableStatement.getResultSetHoldability()); } } statement.execute("drop procedure if exists output_message(varchar)"); } } } @Test public void testClob() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { try (Statement statement = logicalConnection.createStatement()) { statement.execute("create or replace table test_clob (colA text)"); } try (PreparedStatement preparedStatement = logicalConnection.prepareStatement("insert into test_clob values (?)")) { Clob clob = logicalConnection.createClob(); clob.setString(1, "hello world"); preparedStatement.setClob(1, clob); preparedStatement.execute(); } try (Statement statement = logicalConnection.createStatement()) { statement.execute("select * from test_clob"); try (ResultSet resultSet = statement.getResultSet()) { resultSet.next(); assertEquals("hello world", resultSet.getString("COLA")); } } } } @Test public void testDatabaseMetaData() throws SQLException { SnowflakeConnectionPoolDataSource poolDataSource = SnowflakeConnectionPoolDataSourceFactory.createConnectionPoolDataSource(); poolDataSource = setProperties(poolDataSource); PooledConnection pooledConnection = poolDataSource.getPooledConnection(); try (Connection logicalConnection = pooledConnection.getConnection()) { DatabaseMetaData databaseMetaData = logicalConnection.getMetaData(); assertEquals("Snowflake", databaseMetaData.getDatabaseProductName()); assertEquals(properties.get("user"), databaseMetaData.getUserName()); } } @Test public void testLogicalConnectionWhenPhysicalConnectionThrowsErrors() throws SQLException { Connection connection = mock(Connection.class); SnowflakePooledConnection snowflakePooledConnection = mock(SnowflakePooledConnection.class); when(snowflakePooledConnection.getPhysicalConnection()).thenReturn(connection); SQLException sqlException = new SQLException("mocking error"); when(connection.createStatement()).thenThrow(sqlException); when(connection.createStatement(1, 2, 3)).thenThrow(sqlException); when(connection.prepareStatement("mocksql")).thenThrow(sqlException); when(connection.prepareCall("mocksql")).thenThrow(sqlException); when(connection.prepareCall("mocksql", 1, 2, 3)).thenThrow(sqlException); when(connection.nativeSQL("mocksql")).thenThrow(sqlException); when(connection.getAutoCommit()).thenThrow(sqlException); when(connection.getMetaData()).thenThrow(sqlException); when(connection.isReadOnly()).thenThrow(sqlException); when(connection.getCatalog()).thenThrow(sqlException); when(connection.getTransactionIsolation()).thenThrow(sqlException); when(connection.getWarnings()).thenThrow(sqlException); when(connection.prepareCall("mocksql", 1, 2)).thenThrow(sqlException); when(connection.getTypeMap()).thenThrow(sqlException); when(connection.getHoldability()).thenThrow(sqlException); when(connection.createClob()).thenThrow(sqlException); when(connection.getClientInfo("mocksql")).thenThrow(sqlException); when(connection.getClientInfo()).thenThrow(sqlException); when(connection.createArrayOf("mock", null)).thenThrow(sqlException); when(connection.getSchema()).thenThrow(sqlException); when(connection.getNetworkTimeout()).thenThrow(sqlException); when(connection.isWrapperFor(Connection.class)).thenThrow(sqlException); doThrow(sqlException).when(connection).setAutoCommit(false); doThrow(sqlException).when(connection).commit(); doThrow(sqlException).when(connection).rollback(); doThrow(sqlException).when(connection).setReadOnly(false); doThrow(sqlException).when(connection).clearWarnings(); doThrow(sqlException).when(connection).setSchema(null); doThrow(sqlException).when(connection).abort(null); doThrow(sqlException).when(connection).setNetworkTimeout(null, 1); LogicalConnection logicalConnection = new LogicalConnection(snowflakePooledConnection); assertThrows(SQLException.class, logicalConnection::createStatement); assertThrows(SQLException.class, () -> logicalConnection.createStatement(1, 2, 3)); assertThrows(SQLException.class, () -> logicalConnection.nativeSQL("mocksql")); assertThrows(SQLException.class, logicalConnection::getAutoCommit); assertThrows(SQLException.class, logicalConnection::getMetaData); assertThrows(SQLException.class, logicalConnection::isReadOnly); assertThrows(SQLException.class, logicalConnection::getCatalog); assertThrows(SQLException.class, logicalConnection::getTransactionIsolation); assertThrows(SQLException.class, logicalConnection::getWarnings); assertThrows(SQLException.class, () -> logicalConnection.prepareCall("mocksql")); assertThrows(SQLException.class, logicalConnection::getTypeMap); assertThrows(SQLException.class, logicalConnection::getHoldability); assertThrows(SQLException.class, logicalConnection::createClob); assertThrows(SQLException.class, () -> logicalConnection.getClientInfo("mocksql")); assertThrows(SQLException.class, logicalConnection::getClientInfo); assertThrows(SQLException.class, () -> logicalConnection.createArrayOf("mock", null)); assertThrows(SQLException.class, logicalConnection::getSchema); assertThrows(SQLException.class, logicalConnection::getNetworkTimeout); assertThrows(SQLException.class, () -> logicalConnection.isWrapperFor(Connection.class)); assertThrows(SQLException.class, () -> logicalConnection.setAutoCommit(false)); assertThrows(SQLException.class, logicalConnection::rollback); assertThrows(SQLException.class, () -> logicalConnection.setReadOnly(false)); assertThrows(SQLException.class, logicalConnection::clearWarnings); assertThrows(SQLException.class, () -> logicalConnection.setSchema(null)); assertThrows(SQLException.class, () -> logicalConnection.abort(null)); assertThrows(SQLException.class, () -> logicalConnection.setNetworkTimeout(null, 1)); verify(snowflakePooledConnection, times(26)).fireConnectionErrorEvent(sqlException); } private SnowflakeConnectionPoolDataSource setProperties( SnowflakeConnectionPoolDataSource poolDataSource) { poolDataSource.setUrl(properties.get("uri")); poolDataSource.setPortNumber(Integer.parseInt(properties.get("port"))); poolDataSource.setSsl("on".equals(properties.get("ssl"))); poolDataSource.setAccount(properties.get("account")); poolDataSource.setUser(properties.get("user")); // Use private key authentication if available, otherwise password if (properties.get("private_key_file") != null && !properties.get("private_key_file").isEmpty()) { poolDataSource.setPrivateKeyFile( properties.get("private_key_file"), properties.get("private_key_pwd")); } else { poolDataSource.setPassword(properties.get("password")); } poolDataSource.setDatabaseName(properties.get("database")); poolDataSource.setSchema(properties.get("schema")); poolDataSource.setWarehouse(properties.get("warehouse")); return poolDataSource; } } ================================================ FILE: src/test/java/net/snowflake/client/internal/api/implementation/statement/SnowflakeStatementImplCopyResultSetTest.java ================================================ package net.snowflake.client.internal.api.implementation.statement; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.sql.ResultSet; import java.sql.SQLException; import net.snowflake.client.internal.api.implementation.connection.SnowflakeConnectionImpl; import net.snowflake.client.internal.api.implementation.resultset.SnowflakeBaseResultSet; import net.snowflake.client.internal.core.SFBaseResultSet; import net.snowflake.client.internal.core.SFBaseSession; import net.snowflake.client.internal.core.SFBaseStatement; import net.snowflake.client.internal.core.SFException; import net.snowflake.client.internal.core.SFStatementType; import net.snowflake.client.internal.jdbc.SFConnectionHandler; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; class SnowflakeStatementImplCopyResultSetTest { private SnowflakeConnectionImpl mockConnection; private SFConnectionHandler mockHandler; private SFBaseStatement mockSFStatement; private SFBaseResultSet mockSFResultSet; private SFBaseSession mockSession; private SnowflakeBaseResultSet mockJdbcResultSet; private SnowflakeStatementImpl statement; @BeforeEach void setUp() throws SQLException, SFException { mockConnection = mock(SnowflakeConnectionImpl.class); mockHandler = mock(SFConnectionHandler.class); mockSFStatement = mock(SFBaseStatement.class); mockSFResultSet = mock(SFBaseResultSet.class); mockSession = mock(SFBaseSession.class); mockJdbcResultSet = mock(SnowflakeBaseResultSet.class); when(mockConnection.getHandler(any())).thenReturn(mockHandler); when(mockHandler.getSFStatement()).thenReturn(mockSFStatement); when(mockConnection.getSFBaseSession(any())).thenReturn(mockSession); when(mockConnection.getSessionID()).thenReturn("test-session-id"); when(mockConnection.isClosed()).thenReturn(false); when(mockSFStatement.execute(any(), any(), any(), any())).thenReturn(mockSFResultSet); when(mockSFStatement.hasChildren()).thenReturn(false); when(mockSFResultSet.getQueryId()).thenReturn("query-id"); when(mockSFResultSet.isClosed()).thenReturn(false); when(mockSFResultSet.next()).thenReturn(false); when(mockHandler.createResultSet(any(SFBaseResultSet.class), any())) .thenReturn(mockJdbcResultSet); when(mockJdbcResultSet.isClosed()).thenReturn(false); statement = new SnowflakeStatementImpl( mockConnection, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); } @Test void copyWithFlagDisabled_executeReturnsFalseAndResultSetIsNull() throws SQLException { when(mockSFResultSet.getStatementType()).thenReturn(SFStatementType.COPY); when(mockSession.isEnableCopyResultSet()).thenReturn(false); when(mockSession.isSfSQLMode()).thenReturn(false); boolean result = statement.execute("COPY INTO ..."); assertFalse(result, "execute() should return false for COPY when flag is disabled"); assertNull( statement.getResultSet(), "getResultSet() should be null when execute() returns false"); } @Test void copyWithFlagEnabled_executeReturnsTrueAndResultSetIsNonNull() throws SQLException { when(mockSFResultSet.getStatementType()).thenReturn(SFStatementType.COPY); when(mockSession.isEnableCopyResultSet()).thenReturn(true); boolean result = statement.execute("COPY INTO ..."); assertTrue(result, "execute() should return true for COPY when flag is enabled"); assertNotNull( statement.getResultSet(), "getResultSet() should be non-null when execute() returns true"); } @Test void insertWithFlagEnabled_executeReturnsFalse() throws SQLException { when(mockSFResultSet.getStatementType()).thenReturn(SFStatementType.INSERT); when(mockSession.isEnableCopyResultSet()).thenReturn(true); when(mockSession.isSfSQLMode()).thenReturn(false); boolean result = statement.execute("INSERT INTO t VALUES (1)"); assertFalse( result, "execute() should return false for INSERT regardless of enableCopyResultSet flag"); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/config/ConnectionAutoUrlParserTest.java ================================================ package net.snowflake.client.internal.config; import static org.junit.jupiter.api.Assertions.assertEquals; import org.junit.jupiter.api.Test; // Unit tests for getConnectionNameFromUrl() function. public class ConnectionAutoUrlParserTest { @Test void testValidConnection() { String url = "jdbc:snowflake:auto?connectionName=readonly"; String value = SFConnectionConfigParser.getConnectionNameFromUrl(url); assertEquals("readonly", value); } @Test void testNoParameters() { String url = "jdbc:snowflake:auto"; String value = SFConnectionConfigParser.getConnectionNameFromUrl(url); assertEquals("", value); } @Test void testUTFCharParameterKey() { String url = "jdbc:snowflake://account.region.snowflakecomputing.com/?" + "user=vikram&password=secret&connectionName=myConfig¬e=%E2%9C%93"; assertEquals("myConfig", SFConnectionConfigParser.getConnectionNameFromUrl(url)); } @Test void testMissingValueForConnection() { String url = "jdbc:snowflake:auto?connectionName="; assertEquals("", SFConnectionConfigParser.getConnectionNameFromUrl(url)); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/config/SFClientConfigParserTest.java ================================================ package net.snowflake.client.internal.config; import static net.snowflake.client.internal.config.SFClientConfigParser.SF_CLIENT_CONFIG_ENV_NAME; import static net.snowflake.client.internal.config.SFClientConfigParser.SF_CLIENT_CONFIG_FILE_NAME; import static net.snowflake.client.internal.config.SFClientConfigParser.convertToWindowsPath; import static net.snowflake.client.internal.config.SFClientConfigParser.getConfigFilePathFromJDBCJarLocation; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemSetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemUnsetEnv; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mockStatic; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; public class SFClientConfigParserTest { private static final String CONFIG_JSON = "{\"common\":{\"log_level\":\"info\",\"log_path\":\"/jdbc.log\"}}"; private static final String CONFIG_JSON_WITH_UNKNOWN_PROPS = "{\"common\":{\"log_level\":\"info\",\"log_path\":\"/jdbc.log\",\"unknown_inside\":\"/unknown\"},\"unknown_outside\":\"/unknown\"}"; private Path configFilePath; @AfterEach public void cleanup() throws IOException { if (configFilePath != null) { Files.deleteIfExists(configFilePath); } systemUnsetEnv(SF_CLIENT_CONFIG_ENV_NAME); } @Test public void testLoadSFClientConfigValidPath() throws IOException { configFilePath = Paths.get("config.json"); Files.write(configFilePath, CONFIG_JSON.getBytes()); SFClientConfig actualConfig = SFClientConfigParser.loadSFClientConfig(configFilePath.toString()); assertEquals("info", actualConfig.getCommonProps().getLogLevel()); assertEquals("/jdbc.log", actualConfig.getCommonProps().getLogPath()); assertEquals("config.json", actualConfig.getConfigFilePath()); } @Test public void testLoadSFClientConfigValidPathWithUnknownProperties() throws IOException { configFilePath = Paths.get("config.json"); Files.write(configFilePath, CONFIG_JSON_WITH_UNKNOWN_PROPS.getBytes()); SFClientConfig actualConfig = SFClientConfigParser.loadSFClientConfig(configFilePath.toString()); assertEquals("info", actualConfig.getCommonProps().getLogLevel()); assertEquals("/jdbc.log", actualConfig.getCommonProps().getLogPath()); } @Test public void testLoadSFClientConfigInValidPath() { String configFilePath = "InvalidPath"; assertThrows(IOException.class, () -> SFClientConfigParser.loadSFClientConfig(configFilePath)); } @Test public void testLoadSFClientConfigInValidJson() { assertThrows( IOException.class, () -> { String invalidJson = "invalidJson"; configFilePath = Paths.get("config.json"); Files.write(configFilePath, invalidJson.getBytes()); SFClientConfigParser.loadSFClientConfig(configFilePath.toString()); }); } @Test public void testLoadSFClientConfigWithEnvVar() throws IOException { configFilePath = Paths.get("config.json"); Files.write(configFilePath, CONFIG_JSON.getBytes()); systemSetEnv(SF_CLIENT_CONFIG_ENV_NAME, "config.json"); SFClientConfig actualConfig = SFClientConfigParser.loadSFClientConfig(null); assertEquals("info", actualConfig.getCommonProps().getLogLevel()); assertEquals("/jdbc.log", actualConfig.getCommonProps().getLogPath()); } @Test public void testLoadSFClientConfigWithDriverLocation() throws IOException { String configLocation = Paths.get(getConfigFilePathFromJDBCJarLocation(), SF_CLIENT_CONFIG_FILE_NAME).toString(); configFilePath = Paths.get(configLocation); Files.write(configFilePath, CONFIG_JSON.getBytes()); SFClientConfig actualConfig = SFClientConfigParser.loadSFClientConfig(null); assertEquals("info", actualConfig.getCommonProps().getLogLevel()); assertEquals("/jdbc.log", actualConfig.getCommonProps().getLogPath()); } @Test public void testLoadSFClientConfigWithUserHome() throws IOException { String tmpDirectory = systemGetProperty("java.io.tmpdir"); try (MockedStatic mockedSnowflakeUtil = mockStatic(SnowflakeUtil.class)) { // mocking this as Jenkins/GH Action doesn't have write permissions on user.home directory. mockedSnowflakeUtil.when(() -> systemGetProperty("user.home")).thenReturn(tmpDirectory); configFilePath = Paths.get(systemGetProperty("user.home"), SF_CLIENT_CONFIG_FILE_NAME); Files.write(configFilePath, CONFIG_JSON.getBytes()); SFClientConfig actualConfig = SFClientConfigParser.loadSFClientConfig(null); assertEquals("info", actualConfig.getCommonProps().getLogLevel()); assertEquals("/jdbc.log", actualConfig.getCommonProps().getLogPath()); } } @Test public void testLoadSFClientNoConditionsMatch() throws IOException { SFClientConfig actualConfig = SFClientConfigParser.loadSFClientConfig(null); assertNull(actualConfig); } @Test public void testGetConfigFileNameFromJDBCJarLocation() { String jdbcDirectoryPath = getConfigFilePathFromJDBCJarLocation(); assertTrue(jdbcDirectoryPath != null && !jdbcDirectoryPath.isEmpty()); } @Test public void testConvertToWindowsPath() { String mockWindowsPath = "C:/Program Files/example.txt"; String resultWindowsPath = "C:\\Program Files\\example.txt"; String[] testCases = new String[] {"", "file:\\", "\\\\", "/", "nested:\\"}; String mockCloudPrefix = "cloud://"; for (String testcase : testCases) { assertEquals(resultWindowsPath, convertToWindowsPath(testcase + mockWindowsPath)); } assertEquals( mockCloudPrefix + resultWindowsPath, convertToWindowsPath(mockCloudPrefix + mockWindowsPath)); } @Test void testSFClientConfigConstructorAndAccessors() { SFClientConfig.CommonProps props = new SFClientConfig.CommonProps(); props.setLogLevel("DEBUG"); props.setLogPath("/tmp/logs"); SFClientConfig config = new SFClientConfig(props); config.setConfigFilePath("/etc/snowflake/config.json"); assertEquals(props, config.getCommonProps()); assertEquals("/etc/snowflake/config.json", config.getConfigFilePath()); } @Test void testCommonPropsConstructorAndAccessors() { SFClientConfig.CommonProps props = new SFClientConfig.CommonProps(); props.setLogLevel("DEBUG"); props.setLogPath("/var/logs/snowflake.log"); assertEquals("DEBUG", props.getLogLevel()); assertEquals("/var/logs/snowflake.log", props.getLogPath()); } @Test void testSFClientConfigEqualsAndHashCode() { SFClientConfig.CommonProps props1 = new SFClientConfig.CommonProps(); props1.setLogLevel("INFO"); props1.setLogPath("/tmp"); SFClientConfig.CommonProps props2 = new SFClientConfig.CommonProps(); props2.setLogLevel("INFO"); props2.setLogPath("/tmp"); SFClientConfig config1 = new SFClientConfig(props1); SFClientConfig config2 = new SFClientConfig(props2); assertEquals(config1, config2); assertEquals(config1.hashCode(), config2.hashCode()); // Negative test props2.setLogLevel("DEBUG"); assertNotEquals(config1, new SFClientConfig(props2)); } @Test void testCommonPropsEqualsAndHashCode() { SFClientConfig.CommonProps props1 = new SFClientConfig.CommonProps(); props1.setLogLevel("WARN"); props1.setLogPath("/opt/logs"); SFClientConfig.CommonProps props2 = new SFClientConfig.CommonProps(); props2.setLogLevel("WARN"); props2.setLogPath("/opt/logs"); assertEquals(props1, props2); assertEquals(props1.hashCode(), props2.hashCode()); // Negative test props2.setLogLevel("ERROR"); assertNotEquals(props1, props2); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/config/SFConnectionConfigParserPermissionTest.java ================================================ package net.snowflake.client.internal.config; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.attribute.PosixFilePermission; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; import net.snowflake.client.annotations.DontRunOnWindows; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; class SFConnectionConfigParserPermissionTest { private Path createTempFileWithPermissions(Set perms) throws Exception { // Create a unique temporary directory Path tempDir = Files.createTempDirectory("snowflake"); // Inside it, create a file named "connections.toml" Path tomlFile = tempDir.resolve("connections.toml"); Files.createFile(tomlFile); // Apply the given POSIX permissions Files.setPosixFilePermissions(tomlFile, perms); // Mark both the file and the directory for deletion on JVM exit tomlFile.toFile().deleteOnExit(); tempDir.toFile().deleteOnExit(); return tomlFile; } static List permissionTestCases() { return Arrays.asList( new Object[][] { { // Group write new HashSet<>( Arrays.asList( PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.GROUP_WRITE)), true, "writable by group or others" }, { // Others write new HashSet<>( Arrays.asList( PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OTHERS_WRITE)), true, "writable by group or others" }, { // Owner execute new HashSet<>( Arrays.asList( PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE)), true, "executable" }, { // Group execute new HashSet<>( Arrays.asList( PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.GROUP_EXECUTE)), true, "executable" }, { // Others execute new HashSet<>( Arrays.asList( PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OTHERS_EXECUTE)), true, "executable" }, { // Owner read/write only new HashSet<>( Arrays.asList(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE)), false, null } }); } @ParameterizedTest @MethodSource("permissionTestCases") @DontRunOnWindows void testFilePermissionScenarios( Set perms, boolean shouldThrow, String expectedMsg) throws Exception { Path tempFile = createTempFileWithPermissions(perms); try { if (shouldThrow) { Exception ex = assertThrows( SnowflakeSQLException.class, () -> SFConnectionConfigParser.verifyFilePermissionSecure(tempFile)); assertTrue(ex.getMessage().contains(expectedMsg)); } else { assertDoesNotThrow(() -> SFConnectionConfigParser.verifyFilePermissionSecure(tempFile)); } } finally { Files.deleteIfExists(tempFile); } } static List skipReadWarningTestCases() { return Arrays.asList( new Object[][] { { new HashSet<>( Arrays.asList( PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.GROUP_READ, PosixFilePermission.OTHERS_READ)) } }); } @ParameterizedTest @MethodSource("skipReadWarningTestCases") @DontRunOnWindows void testSkipWarningForReadPermissionsEnvVar(Set perms) throws Exception { Path tempFile = createTempFileWithPermissions(perms); SnowflakeUtil.systemSetEnv("SF_SKIP_WARNING_FOR_READ_PERMISSIONS_ON_CONFIG_FILE", "true"); try { assertDoesNotThrow(() -> SFConnectionConfigParser.verifyFilePermissionSecure(tempFile)); } finally { Files.deleteIfExists(tempFile); SnowflakeUtil.systemSetEnv("SF_SKIP_WARNING_FOR_READ_PERMISSIONS_ON_CONFIG_FILE", null); } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/config/SFConnectionConfigParserTest.java ================================================ package net.snowflake.client.internal.config; import static net.snowflake.client.AssumptionUtils.assumeRunningOnLinuxMac; import static net.snowflake.client.internal.config.SFConnectionConfigParser.SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION; import static net.snowflake.client.internal.config.SFConnectionConfigParser.SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY; import static net.snowflake.client.internal.config.SFConnectionConfigParser.SNOWFLAKE_HOME_KEY; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isWindows; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import com.fasterxml.jackson.dataformat.toml.TomlMapper; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.FileAttribute; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; public class SFConnectionConfigParserTest { private static final List ENV_VARIABLES_KEYS = new ArrayList<>( Arrays.asList( SNOWFLAKE_HOME_KEY, SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION)); private Path tempPath = null; private TomlMapper tomlMapper = new TomlMapper(); private Map envVariables = new HashMap<>(); @BeforeEach public void setUp() throws IOException { tempPath = Files.createTempDirectory(".snowflake"); ENV_VARIABLES_KEYS.stream() .forEach( key -> { if (SnowflakeUtil.systemGetEnv(key) != null) { envVariables.put(key, SnowflakeUtil.systemGetEnv(key)); } }); } @AfterEach public void close() throws IOException { SnowflakeUtil.systemUnsetEnv(SNOWFLAKE_HOME_KEY); SnowflakeUtil.systemUnsetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY); SnowflakeUtil.systemUnsetEnv(SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION); Files.walk(tempPath).map(Path::toFile).forEach(File::delete); Files.delete(tempPath); envVariables.forEach((key, value) -> SnowflakeUtil.systemSetEnv(key, value)); } @Test public void testLoadSFConnectionConfigWrongConfigurationName() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "unknown"); prepareConnectionConfigurationTomlFile(); assertThrows( SnowflakeSQLException.class, () -> SFConnectionConfigParser.buildConnectionParameters("")); } @Test public void testLoadSFConnectionConfigInValidPath() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, Paths.get("unknownPath").toString()); prepareConnectionConfigurationTomlFile(); assertNull(SFConnectionConfigParser.buildConnectionParameters("")); } @Test public void testLoadSFConnectionConfigWithTokenFromFile() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); File tokenFile = new File(Paths.get(tempPath.toString(), "token").toUri()); prepareConnectionConfigurationTomlFile( Collections.singletonMap("token_file_path", tokenFile.toString())); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals(tokenFile.toString(), data.getParams().get("token_file_path")); } @Test public void testThrowErrorWhenWrongPermissionsForConnectionConfigurationFile() throws IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); File tokenFile = new File(Paths.get(tempPath.toString(), "token").toUri()); prepareConnectionConfigurationTomlFile( Collections.singletonMap("token_file_path", tokenFile.toString()), false, false); assumeRunningOnLinuxMac(); assertThrows( SnowflakeSQLException.class, () -> SFConnectionConfigParser.buildConnectionParameters("")); } @Test public void testThrowErrorWhenWrongPermissionsForTokenFile() throws IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); File tokenFile = new File(Paths.get(tempPath.toString(), "token").toUri()); prepareConnectionConfigurationTomlFile( Collections.singletonMap("token_file_path", tokenFile.toString()), true, false); assumeRunningOnLinuxMac(); assertThrows( SnowflakeSQLException.class, () -> SFConnectionConfigParser.buildConnectionParameters("")); } @Test public void testNoThrowErrorWhenWrongPermissionsForTokenFileButSkippingFlagIsEnabled() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); SnowflakeUtil.systemSetEnv(SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION, "true"); File tokenFile = new File(Paths.get(tempPath.toString(), "token").toUri()); prepareConnectionConfigurationTomlFile( Collections.singletonMap("token_file_path", tokenFile.toString()), true, false); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals(tokenFile.toString(), data.getParams().get("token_file_path")); } @Test public void testNoThrowErrorWhenWrongPermissionsForConnectionConfigButSkippingFlagIsEnabled() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); SnowflakeUtil.systemSetEnv(SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION, "true"); File tokenFile = new File(Paths.get(tempPath.toString(), "token").toUri()); prepareConnectionConfigurationTomlFile( Collections.singletonMap("token_file_path", tokenFile.toString()), false, false); assumeRunningOnLinuxMac(); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals(tokenFile.toString(), data.getParams().get("token_file_path")); } @Test public void testLoadSFConnectionConfigWithHostConfigured() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); Map extraparams = new HashMap<>(); extraparams.put("host", "snowflake.reg.local"); extraparams.put("account", null); extraparams.put("port", "8082"); extraparams.put("token", "testToken"); prepareConnectionConfigurationTomlFile(extraparams); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals("jdbc:snowflake://snowflake.reg.local:8082", data.getUrl()); assertEquals("oauth", data.getParams().get("authenticator")); assertEquals("testToken", data.getParams().get("token")); } @Test public void testDefaultPortIs443WhenNeitherPortNorProtocolIsSet() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); prepareTomlWithPortAndProtocol(null, null); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals("jdbc:snowflake://myorg-myaccount.snowflakecomputing.com:443", data.getUrl()); } @Test public void testDefaultPortIs443WhenProtocolIsHttps() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); prepareTomlWithPortAndProtocol(null, "https"); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals("jdbc:snowflake://myorg-myaccount.snowflakecomputing.com:443", data.getUrl()); } @Test public void testDefaultPortIs80WhenProtocolIsHttp() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); prepareTomlWithPortAndProtocol(null, "http"); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals( "jdbc:snowflake://http://myorg-myaccount.snowflakecomputing.com:80", data.getUrl()); } @Test public void testExplicitPortIsPreservedRegardlessOfProtocol() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); prepareTomlWithPortAndProtocol("8082", "http"); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals( "jdbc:snowflake://http://myorg-myaccount.snowflakecomputing.com:8082", data.getUrl()); } @Test public void testUrlParametersAreMergedIntoTomlConfiguration() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); prepareTomlWithPortAndProtocol(null, null); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters( "jdbc:snowflake:auto?connectionName=default&tracing=ALL&disablePlatformDetection=true"); assertNotNull(data); assertEquals("jdbc:snowflake://myorg-myaccount.snowflakecomputing.com:443", data.getUrl()); assertEquals("ALL", data.getParams().get("tracing")); assertEquals("true", data.getParams().get("disablePlatformDetection")); assertEquals("user1", data.getParams().get("user")); assertEquals("pass1", data.getParams().get("password")); assertEquals("MY_WH", data.getParams().get("warehouse")); } @Test public void testUrlParameterOverridesTomlValueForSameKey() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); prepareTomlWithPortAndProtocol("8082", "http"); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters( "jdbc:snowflake:auto?connectionName=default&port=443&protocol=https&warehouse=OTHER_WH&tracing=ALL"); assertNotNull(data); assertEquals("jdbc:snowflake://myorg-myaccount.snowflakecomputing.com:443", data.getUrl()); assertEquals("443", data.getParams().get("port")); assertEquals("https", data.getParams().get("protocol")); assertEquals("OTHER_WH", data.getParams().get("warehouse")); assertEquals("ALL", data.getParams().get("tracing")); assertEquals("user1", data.getParams().get("user")); assertEquals("pass1", data.getParams().get("password")); assertEquals("myorg-myaccount", data.getParams().get("account")); Set expectedKeys = new HashSet<>( Arrays.asList( "account", "user", "password", "warehouse", "port", "protocol", "tracing")); Set actualKeys = data.getParams().stringPropertyNames(); assertEquals(expectedKeys, actualKeys); } @Test public void testUrlParameterOverridesTomlUser() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); prepareTomlWithPortAndProtocol(null, null); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters( "jdbc:snowflake:auto?connectionName=default&user=overridden_user"); assertNotNull(data); assertEquals("overridden_user", data.getParams().get("user")); } @Test public void testUrlParametersWithNoExtraParamsKeepsTomlValues() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); prepareTomlWithPortAndProtocol("8082", "http"); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters( "jdbc:snowflake:auto?connectionName=default"); assertNotNull(data); assertEquals( "jdbc:snowflake://http://myorg-myaccount.snowflakecomputing.com:8082", data.getUrl()); assertEquals("user1", data.getParams().get("user")); assertEquals("pass1", data.getParams().get("password")); assertEquals("MY_WH", data.getParams().get("warehouse")); } @Test public void testHttpProtocolFromTomlIsEmbeddedInUrl() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); Map extraparams = new HashMap<>(); extraparams.put("host", "snowflake.reg.local"); extraparams.put("account", null); extraparams.put("port", "8082"); extraparams.put("token", "testToken"); extraparams.put("protocol", "http"); prepareConnectionConfigurationTomlFile(extraparams); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals("jdbc:snowflake://http://snowflake.reg.local:8082", data.getUrl()); } @Test public void testHttpsProtocolFromTomlProducesStandardUrl() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); Map extraparams = new HashMap<>(); extraparams.put("host", "snowflake.reg.local"); extraparams.put("account", null); extraparams.put("port", "8082"); extraparams.put("token", "testToken"); extraparams.put("protocol", "https"); prepareConnectionConfigurationTomlFile(extraparams); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals("jdbc:snowflake://snowflake.reg.local:8082", data.getUrl()); } @Test public void testDefaultPortIs443WhenProtocolIsEmptyString() throws SnowflakeSQLException, IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); Map extraparams = new HashMap<>(); extraparams.put("host", "snowflake.reg.local"); extraparams.put("account", null); extraparams.put("port", null); extraparams.put("token", "testToken"); extraparams.put("protocol", ""); prepareConnectionConfigurationTomlFile(extraparams); ConnectionParameters data = SFConnectionConfigParser.buildConnectionParameters(""); assertNotNull(data); assertEquals("jdbc:snowflake://snowflake.reg.local:443", data.getUrl()); } @Test public void shouldThrowExceptionIfNoneOfHostAndAccountIsSet() throws IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); Map extraparams = new HashMap<>(); extraparams.put("host", null); extraparams.put("account", null); prepareConnectionConfigurationTomlFile(extraparams); assertThrows( SnowflakeSQLException.class, () -> SFConnectionConfigParser.buildConnectionParameters("")); } @Test public void shouldThrowExceptionIfTokenIsNotSetForOauth() throws IOException { SnowflakeUtil.systemSetEnv(SNOWFLAKE_HOME_KEY, tempPath.toString()); SnowflakeUtil.systemSetEnv(SNOWFLAKE_DEFAULT_CONNECTION_NAME_KEY, "default"); SnowflakeUtil.systemSetEnv(SKIP_TOKEN_FILE_PERMISSIONS_VERIFICATION, "true"); File tokenFile = new File(Paths.get(tempPath.toString(), "token").toUri()); prepareConnectionConfigurationTomlFile( Collections.singletonMap("token_file_path", tokenFile.toString()), true, false, ""); assertThrows( SnowflakeSQLException.class, () -> SFConnectionConfigParser.buildConnectionParameters("")); } private void prepareConnectionConfigurationTomlFile() throws IOException { prepareConnectionConfigurationTomlFile(null, true, true); } private void prepareConnectionConfigurationTomlFile(Map moreParameters) throws IOException { prepareConnectionConfigurationTomlFile(moreParameters, true, true); } private void prepareConnectionConfigurationTomlFile( Map moreParameters, boolean onlyUserPermissionConnection, boolean onlyUserPermissionToken) throws IOException { prepareConnectionConfigurationTomlFile( moreParameters, onlyUserPermissionConnection, onlyUserPermissionToken, "token_from_file"); } private void prepareConnectionConfigurationTomlFile( Map moreParameters, boolean onlyUserPermissionConnection, boolean onlyUserPermissionToken, String token) throws IOException { Path path = Paths.get(tempPath.toString(), "connections.toml"); Path filePath = createFilePathWithPermission(path, onlyUserPermissionConnection); File file = filePath.toFile(); Map configuration = new HashMap<>(); Map configurationParams = new HashMap<>(); configurationParams.put("account", "snowaccount.us-west-2.aws"); configurationParams.put("user", "user1"); configurationParams.put("port", "443"); configurationParams.put("authenticator", "oauth"); if (moreParameters != null) { moreParameters.forEach((k, v) -> configurationParams.put(k, v)); } configuration.put("default", configurationParams); tomlMapper.writeValue(file, configuration); if (configurationParams.containsKey("token_file_path")) { Path tokenFilePath = createFilePathWithPermission( Paths.get(configurationParams.get("token_file_path").toString()), onlyUserPermissionToken); Files.write(tokenFilePath, token.getBytes()); Path emptyTokenFilePath = createFilePathWithPermission( Paths.get( configurationParams .get("token_file_path") .toString() .replaceAll("token", "emptytoken")), onlyUserPermissionToken); Files.write(emptyTokenFilePath, "".getBytes()); } } private void prepareTomlWithPortAndProtocol(String port, String protocol) throws IOException { Path path = Paths.get(tempPath.toString(), "connections.toml"); Path filePath = createFilePathWithPermission(path, true); File file = filePath.toFile(); Map configurationParams = new HashMap<>(); configurationParams.put("account", "myorg-myaccount"); configurationParams.put("user", "user1"); configurationParams.put("password", "pass1"); configurationParams.put("warehouse", "MY_WH"); if (port != null) { configurationParams.put("port", port); } if (protocol != null) { configurationParams.put("protocol", protocol); } Map configuration = new HashMap<>(); configuration.put("default", configurationParams); tomlMapper.writeValue(file, configuration); } private Path createFilePathWithPermission(Path path, boolean onlyUserPermission) throws IOException { if (!isWindows()) { FileAttribute> fileAttribute = onlyUserPermission ? PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rw-------")) : PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rwxrw----")); return Files.createFile(path, fileAttribute); } else { return Files.createFile(path); } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/config/SFPermissionsTest.java ================================================ package net.snowflake.client.internal.config; import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.PosixFilePermissions; import net.snowflake.client.annotations.DontRunOnWindows; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; public class SFPermissionsTest { Path configFilePath = Paths.get("config.json"); String configJson = "{\"common\":{\"log_level\":\"debug\",\"log_path\":\"logs\"}}"; @BeforeEach public void createConfigFile() throws IOException { Files.write(configFilePath, configJson.getBytes()); } @AfterEach public void cleanupConfigFile() throws IOException { Files.deleteIfExists(configFilePath); } @ParameterizedTest @CsvSource({ "rwx------,false", "rw-------,false", "r-x------,false", "r--------,false", "rwxrwx---,true", "rwxrw----,true", "rwxr-x---,false", "rwxr-----,false", "rwx-wx---,true", "rwx-w----,true", "rwx--x---,false", "rwx---rwx,true", "rwx---rw-,true", "rwx---r-x,false", "rwx---r--,false", "rwx----wx,true", "rwx----w-,true", "rwx-----x,false" }) @DontRunOnWindows public void testLogDirectoryPermissions(String permission, boolean isSucceed) throws IOException { // TODO: SNOW-1503722 Change to check for thrown exceptions // Don't run on Windows Files.setPosixFilePermissions(configFilePath, PosixFilePermissions.fromString(permission)); Boolean result = SFClientConfigParser.checkGroupOthersWritePermissions(configFilePath.toString()); assertEquals(isSucceed, result); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/AttributeEnhancingHttpRequestRetryHandlerTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import org.apache.http.protocol.BasicHttpContext; import org.apache.http.protocol.HttpContext; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; class AttributeEnhancingHttpRequestRetryHandlerTest { private final IOException dummyException = new IOException("Test exception"); @ParameterizedTest @ValueSource(ints = {0, 3, 5, 10}) void testAttributeSet(int executionCount) { HttpContext context = new BasicHttpContext(); AttributeEnhancingHttpRequestRetryHandler handler = new AttributeEnhancingHttpRequestRetryHandler(); handler.retryRequest(dummyException, executionCount, context); assertEquals( executionCount, context.getAttribute(AttributeEnhancingHttpRequestRetryHandler.EXECUTION_COUNT_ATTRIBUTE), "Attribute should be set to the provided executionCount: " + executionCount); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/CertificateChainTrustValidationTestLatestIT.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.io.InputStream; import java.nio.file.Path; import java.nio.file.Paths; import java.security.KeyStore; import java.security.cert.CertificateFactory; import java.security.cert.X509Certificate; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509TrustManager; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; /** * This test validates the behavior of the SFTrustManager (which should use PKIX validation) when * validating a specific cross-signed certificate chain. It also compares this with the behavior of * the SunX509 X509TrustManager The test ensures that: - The SFTrustManager can validate the chain * successfully. - The SunX509 X509TrustManager fails validation due to its trust store * configuration. * *

Prerequisites for this test: - The certificates used in this test must be generated by the * script located at ssl-tests/generate_certs.sh - The test dynamically sets the JVM properties: * -Djavax.net.ssl.trustStore=path/to/test/resources/ssl-tests/certs/truststore.jks * -Djavax.net.ssl.trustStorePassword=changeit These properties are reset after all tests are * finished. */ @org.junit.jupiter.api.Tag("CORE") public class CertificateChainTrustValidationTestLatestIT { private static final SFLogger logger = SFLoggerFactory.getLogger(CertificateChainTrustValidationTestLatestIT.class); private static final String CERT_RESOURCE_PATH = "ssl-tests/certs/"; private static final String TRUST_STORE_FILE_NAME = "truststore.jks"; private static final String TRUST_STORE_PASSWORD = "changeit"; // Original JVM properties to restore after tests private static String originalTrustStore; private static String originalTrustStorePassword; // Certificates for the exact chain generated by the script private static X509Certificate leafCert; // Cert 0 private static X509Certificate amzRsaM02IntermediateCert; // Cert 1 private static X509Certificate amzRootCa1ChainCert; // Cert 2 (issued by St G2) private static X509Certificate stG2RootCert; // Cert 3 (issued by St Class 2) private static X509Certificate stClass2RootCert; // Ultimate Root of the chain (self-signed) // The specific self-signed Amz Root CA 1 from the trust store private static X509Certificate amzRootCa1SelfSignedForTrustStoreCert; private static SFTrustManager sfTrustManager; private static X509TrustManager sunX509TrustManager; @BeforeAll static void setUpAll() throws Exception { logger.debug("--- Test Setup Started ---"); // Store original JVM properties originalTrustStore = System.getProperty("javax.net.ssl.trustStore"); originalTrustStorePassword = System.getProperty("javax.net.ssl.trustStorePassword"); // Set JVM properties for the test Path trustStorePath = Paths.get("src", "test", "resources", CERT_RESOURCE_PATH, TRUST_STORE_FILE_NAME); System.setProperty("javax.net.ssl.trustStore", trustStorePath.toAbsolutePath().toString()); System.setProperty("javax.net.ssl.trustStorePassword", TRUST_STORE_PASSWORD); logger.debug( "Set JVM property javax.net.ssl.trustStore to: " + System.getProperty("javax.net.ssl.trustStore")); logger.debug("Set JVM property javax.net.ssl.trustStorePassword."); logger.debug("Verifying chain certificates exist in resources..."); try (InputStream is = getResourceStream(CERT_RESOURCE_PATH + "leaf.crt")) { if (is == null) { throw new IllegalStateException( "Leaf certificate not found in resources. " + "Please ensure the 'test/resources/ssl-tests/generate_certs.sh' script was run successfully."); } } catch (IOException e) { throw new IllegalStateException("Error accessing test resources: " + e.getMessage(), e); } CertificateFactory cf = CertificateFactory.getInstance("X.509"); logger.debug("Loading chain certificates from classpath: " + CERT_RESOURCE_PATH + "..."); try (InputStream is = getResourceStream(CERT_RESOURCE_PATH + "leaf.crt")) { leafCert = (X509Certificate) cf.generateCertificate(is); } try (InputStream is = getResourceStream(CERT_RESOURCE_PATH + "amz_rsa_m02_intermediate.crt")) { amzRsaM02IntermediateCert = (X509Certificate) cf.generateCertificate(is); } try (InputStream is = getResourceStream(CERT_RESOURCE_PATH + "amz_root_ca1_chain.crt")) { amzRootCa1ChainCert = (X509Certificate) cf.generateCertificate(is); } try (InputStream is = getResourceStream(CERT_RESOURCE_PATH + "st_g2_root.crt")) { stG2RootCert = (X509Certificate) cf.generateCertificate(is); } try (InputStream is = getResourceStream(CERT_RESOURCE_PATH + "st_class2_root.crt")) { stClass2RootCert = (X509Certificate) cf.generateCertificate(is); } logger.debug("Chain certificates loaded successfully."); logger.debug("Loading the specific self-signed Amz Root CA 1 for the trust store..."); try (InputStream is = getResourceStream(CERT_RESOURCE_PATH + "amz_root_ca1_trust_store.crt")) { amzRootCa1SelfSignedForTrustStoreCert = (X509Certificate) cf.generateCertificate(is); } logger.debug("Self-signed Amz Root CA 1 for Trust Store loaded."); logger.debug( "Loading trust store from classpath: " + CERT_RESOURCE_PATH + TRUST_STORE_FILE_NAME + "..."); KeyStore trustStore = KeyStore.getInstance("JKS"); // Load trust store using the dynamically set property, or directly from classpath if that fails // (for robustness) // Note: When javax.net.ssl.trustStore is set, default TrustManagerFactory uses it // automatically. // We load it here explicitly to ensure it exists and for direct checks. try (InputStream is = getResourceStream(CERT_RESOURCE_PATH + TRUST_STORE_FILE_NAME)) { trustStore.load(is, TRUST_STORE_PASSWORD.toCharArray()); } logger.debug("Trust store loaded."); assertTrue( trustStore.containsAlias("rootca1_self_signed_for_ts"), "Trust store MUST contain 'rootca1_self_signed_for_ts' alias."); logger.debug( "Trust store content verified: only rootca1_self_signed_for_ts is present as a trust anchor."); logger.debug("Initializing PKIX X509TrustManager..."); sfTrustManager = new SFTrustManager(new HttpClientSettingsKey(OCSPMode.FAIL_CLOSED), null); assertNotNull(sfTrustManager, "PKIX X509TrustManager should be initialized."); logger.debug("PKIX X509TrustManager initialized successfully."); logger.debug("Initializing SunX509 X509TrustManager..."); TrustManagerFactory sunX509Tmf = TrustManagerFactory.getInstance("SunX509"); sunX509Tmf.init(trustStore); // Initialize with our explicitly loaded trustStore for (TrustManager tm : sunX509Tmf.getTrustManagers()) { if (tm instanceof X509TrustManager) { sunX509TrustManager = (X509TrustManager) tm; break; } } assertNotNull(sunX509TrustManager, "SunX509 X509TrustManager should be initialized."); logger.debug("SunX509 X509TrustManager initialized successfully."); logger.debug("--- Test Setup Complete ---"); } @AfterAll static void tearDownAll() { logger.debug("--- Test Teardown Started ---"); // Restore original JVM properties if (originalTrustStore != null) { System.setProperty("javax.net.ssl.trustStore", originalTrustStore); logger.debug("Restored javax.net.ssl.trustStore to: " + originalTrustStore); } else { System.clearProperty("javax.net.ssl.trustStore"); logger.debug("Cleared javax.net.ssl.trustStore property."); } if (originalTrustStorePassword != null) { System.setProperty("javax.net.ssl.trustStorePassword", originalTrustStorePassword); logger.debug("Restored javax.net.ssl.trustStorePassword."); } else { System.clearProperty("javax.net.ssl.trustStorePassword"); logger.debug("Cleared javax.net.ssl.trustStorePassword property."); } logger.debug("--- Test Teardown Complete ---"); } private static InputStream getResourceStream(String resourceName) { InputStream is = CertificateChainTrustValidationTestLatestIT.class .getClassLoader() .getResourceAsStream(resourceName); if (is == null) { System.err.println( "ERROR: Resource not found: " + resourceName + ". Ensure 'recreate_all_certs.sh' placed it correctly."); } return is; } /** * Scenario: - Chain: Leaf -> Amz RSA 2048 M02 -> Amz Root CA 1 (issued by St G2) -> St G2 (issued * by St Class 2) - Trust Store: Contains ONLY a *self-signed* Amz Root CA 1 (which shares Subject * DN and Public Key with the chain's Amz Root CA 1). */ @Test void shouldProperlyValidateCrossSignedChain() { // The chain provided will be the full 4-certificate chain. // PKIX should find the trusted public key at amzRootCa1ChainCert and terminate successfully // there. X509Certificate[] chain = new X509Certificate[] { leafCert, amzRsaM02IntermediateCert, amzRootCa1ChainCert, // This is the Amz Root CA 1 issued by St G2. stG2RootCert // This is St G2, its issuer. }; String authType = "RSA"; // Test with SfTrustManager (PKIX Validation) assertDoesNotThrow( () -> { sfTrustManager.checkServerTrusted(chain, authType); }, "PKIX should pass because the chain's Amz Root CA 1 public key matches a trusted anchor."); // Test with SunX509 Validation Exception e = assertThrows( Exception.class, () -> sunX509TrustManager.checkServerTrusted(chain, authType)); assertTrue( e.getMessage().contains("No trusted certificate found"), "SunX509 should fail because it does match Amz Root CA 1 as a trusted anchor in the trust store, " + "even though the public key matches. This is expected behavior for SunX509."); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/CoreUtilsMiscellaneousTest.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import com.azure.core.http.ProxyOptions; import com.google.auth.http.HttpTransportFactory; import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Properties; import net.snowflake.client.annotations.DontRunOnGithubActions; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.jdbc.cloud.storage.CloudStorageProxyFactory; import org.apache.http.HttpHost; import org.junit.jupiter.api.Test; import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; public class CoreUtilsMiscellaneousTest { /** * Assert that the AssertUtil.AssertTrue statement issues the correct SFException when conditions * are not met */ @Test public void testSnowflakeAssertTrue() { SFException e = assertThrows( SFException.class, () -> { AssertUtil.assertTrue(1 == 0, "Numbers do not match"); }); assertEquals("JDBC driver internal error: Numbers do not match.", e.getMessage()); } /** Test that Constants.getOS function is working as expected */ @Test @DontRunOnGithubActions public void testgetOS() { Constants.clearOSForTesting(); String originalOS = systemGetProperty("os.name"); System.setProperty("os.name", "Windows"); assertEquals(Constants.OS.WINDOWS, Constants.getOS()); Constants.clearOSForTesting(); System.setProperty("os.name", "Linux"); assertEquals(Constants.OS.LINUX, Constants.getOS()); Constants.clearOSForTesting(); System.setProperty("os.name", "Macintosh"); assertEquals(Constants.OS.MAC, Constants.getOS()); Constants.clearOSForTesting(); System.setProperty("os.name", "Sunos"); assertEquals(Constants.OS.SOLARIS, Constants.getOS()); // Set back to initial value at end of test Constants.clearOSForTesting(); System.setProperty("os.name", originalOS); } @Test public void testHttpClientSettingsKey() { // Create 2 identical HTTPClientKeys with different nonProxyHost settings HttpClientSettingsKey testKey1 = new HttpClientSettingsKey( OCSPMode.FAIL_OPEN, "snowflakecomputing.com", 443, "*.foo.com", "testuser", "pw", "https", "jdbc", false); HttpClientSettingsKey testKey2 = new HttpClientSettingsKey( OCSPMode.FAIL_OPEN, "snowflakecomputing.com", 443, "*.baz.com", "testuser", "pw", "https", "jdbc", false); // Create an HTTPClientKey with all default settings HttpClientSettingsKey testKey3 = new HttpClientSettingsKey(OCSPMode.FAIL_CLOSED, "jdbc", false); // Assert the first 2 test keys are equal assertTrue(testKey1.equals(testKey2)); // Assert that testKey2 has its non proxy hosts updated by the equals function assertEquals("*.foo.com", testKey2.getNonProxyHosts()); // Assert that the test key with the default options is different from the others assertFalse(testKey1.equals(testKey3)); } @Test public void testSetProxyForS3() { HttpClientSettingsKey testKey = new HttpClientSettingsKey( OCSPMode.FAIL_OPEN, "snowflakecomputing.com", 443, "*.foo.com", "testuser", "pw", "https", "jdbc", false); ProxyConfiguration proxyConfiguration = CloudStorageProxyFactory.createProxyConfigurationForS3(testKey); assertEquals(HttpProtocol.HTTPS.toString(), proxyConfiguration.scheme().toUpperCase()); assertEquals("snowflakecomputing.com", proxyConfiguration.host()); assertEquals(443, proxyConfiguration.port()); assertEquals( new HashSet<>(Collections.singletonList("\\Q\\E.*\\Q.foo.com\\E")), proxyConfiguration.nonProxyHosts()); assertEquals("pw", proxyConfiguration.password()); assertEquals("testuser", proxyConfiguration.username()); } @Test public void testSetSessionlessProxyForS3() throws SnowflakeSQLException { Properties props = new Properties(); props.put("useProxy", "true"); props.put("proxyHost", "localhost"); props.put("proxyPort", "8084"); props.put("proxyUser", "testuser"); props.put("proxyPassword", "pw"); props.put("nonProxyHosts", "baz.com | foo.com"); props.put("proxyProtocol", "http"); ProxyConfiguration proxyConfiguration = CloudStorageProxyFactory.createSessionlessProxyConfigurationForS3(props); assertEquals(HttpProtocol.HTTP.toString(), proxyConfiguration.scheme().toUpperCase()); assertEquals("localhost", proxyConfiguration.host()); assertEquals(8084, proxyConfiguration.port()); assertEquals( new HashSet<>(Arrays.asList("\\Qbaz.com\\E", "\\Qfoo.com\\E")), proxyConfiguration.nonProxyHosts()); assertEquals("pw", proxyConfiguration.password()); assertEquals("testuser", proxyConfiguration.username()); // Test that exception is thrown when port number is invalid props.put("proxyPort", "invalidnumber"); SnowflakeSQLException e = assertThrows( SnowflakeSQLException.class, () -> { CloudStorageProxyFactory.createSessionlessProxyConfigurationForS3(props); }); assertEquals((int) ErrorCode.INVALID_PROXY_PROPERTIES.getMessageCode(), e.getErrorCode()); } @Test public void testSetProxyForGCS() { HttpClientSettingsKey testKey = new HttpClientSettingsKey( OCSPMode.FAIL_OPEN, "snowflakecomputing.com", 443, "*.foo.com", "testuser", "pw", "https", "jdbc", false); HttpTransportFactory transportFactory = CloudStorageProxyFactory.createHttpTransportForGCS(testKey); assertNotNull(transportFactory); assertNotNull(transportFactory.create()); // Verify null is returned when no proxy is configured HttpClientSettingsKey noProxyKey = new HttpClientSettingsKey(OCSPMode.FAIL_OPEN); assertNull(CloudStorageProxyFactory.createHttpTransportForGCS(noProxyKey)); assertNull(CloudStorageProxyFactory.createHttpTransportForGCS(null)); } @Test public void testSetSessionlessProxyForGCS() throws SnowflakeSQLException { Properties props = new Properties(); props.put("useProxy", "true"); props.put("proxyHost", "localhost"); props.put("proxyPort", "8084"); props.put("proxyUser", "testuser"); props.put("proxyPassword", "pw"); props.put("nonProxyHosts", "baz.com | foo.com"); props.put("proxyProtocol", "http"); HttpTransportFactory transportFactory = CloudStorageProxyFactory.createSessionlessHttpTransportForGCS(props); assertNotNull(transportFactory); assertNotNull(transportFactory.create()); // Verify null is returned when proxy is disabled Properties noProxyProps = new Properties(); noProxyProps.put("useProxy", "false"); assertNull(CloudStorageProxyFactory.createSessionlessHttpTransportForGCS(noProxyProps)); assertNull(CloudStorageProxyFactory.createSessionlessHttpTransportForGCS(null)); // Test that exception is thrown when port number is invalid props.put("proxyPort", "invalidnumber"); SnowflakeSQLException e = assertThrows( SnowflakeSQLException.class, () -> { CloudStorageProxyFactory.createSessionlessHttpTransportForGCS(props); }); assertEquals((int) ErrorCode.INVALID_PROXY_PROPERTIES.getMessageCode(), e.getErrorCode()); } @Test public void testSetProxyForAzure() { HttpClientSettingsKey testKey = new HttpClientSettingsKey( OCSPMode.FAIL_OPEN, "snowflakecomputing.com", 443, "*.foo.com", "testuser", "pw", "https", "jdbc", false); ProxyOptions proxyOptions = CloudStorageProxyFactory.createProxyOptionsForAzure(testKey); assertEquals(ProxyOptions.Type.HTTP, proxyOptions.getType()); assertEquals(new InetSocketAddress("snowflakecomputing.com", 443), proxyOptions.getAddress()); assertEquals("testuser", proxyOptions.getUsername()); assertEquals("pw", proxyOptions.getPassword()); assertEquals("(.*?\\.foo\\.com)", proxyOptions.getNonProxyHosts()); } @Test public void testSetSessionlessProxyForAzure() throws SnowflakeSQLException { Properties props = new Properties(); props.put("useProxy", "true"); props.put("proxyHost", "localhost"); props.put("proxyPort", "8084"); props.put("proxyUser", "testuser"); props.put("proxyPassword", "pw"); props.put("nonProxyHosts", "*"); ProxyOptions proxyOptions = CloudStorageProxyFactory.createSessionlessProxyOptionsForAzure(props); assertEquals(ProxyOptions.Type.HTTP, proxyOptions.getType()); assertEquals(new InetSocketAddress("localhost", 8084), proxyOptions.getAddress()); assertEquals("testuser", proxyOptions.getUsername()); assertEquals("pw", proxyOptions.getPassword()); assertEquals("(.*?)", proxyOptions.getNonProxyHosts()); // Test that exception is thrown when port number is invalid props.put("proxyPort", "invalidnumber"); SnowflakeSQLException e = assertThrows( SnowflakeSQLException.class, () -> { CloudStorageProxyFactory.createSessionlessProxyOptionsForAzure(props); }); assertEquals((int) ErrorCode.INVALID_PROXY_PROPERTIES.getMessageCode(), e.getErrorCode()); } @Test public void testSizeOfHttpClientMapWithVariableNonProxyHosts() { // clear httpClient hashmap before test HttpUtil.httpClient = new HashMap<>(); // Clear client route planner hashmap before test HttpUtil.httpClientRoutePlanner = new HashMap<>(); HttpClientSettingsKey key1 = new HttpClientSettingsKey( null, "localhost", 8080, "google.com | baz.com", "testuser", "pw", "https", "jdbc", false); // Assert there is 1 entry in the hashmap now HttpUtil.getHttpClient(key1); assertEquals(1, HttpUtil.httpClient.size()); HttpClientSettingsKey key2 = new HttpClientSettingsKey( null, "localhost", 8080, "snowflake.com", "testuser", "pw", "https", "jdbc", false); HttpUtil.getHttpClient(key2); // Assert there is still 1 entry because key is re-used when only proxy difference is // nonProxyHosts assertEquals(1, HttpUtil.httpClient.size()); // Assert previous key has updated non-proxy hosts assertEquals("snowflake.com", key1.getNonProxyHosts()); HttpClientSettingsKey key3 = new HttpClientSettingsKey( null, "differenthost.com", 8080, "snowflake.com", "testuser", "pw", "https", "jdbc", false); // Assert proxy with different host generates new entry in httpClient map HttpUtil.getHttpClient(key3); assertEquals(2, HttpUtil.httpClient.size()); } @Test public void testSizeOfHttpClientMapWithGzipAndUserAgentSuffix() { // clear httpClient hashmap before test HttpUtil.httpClient = new HashMap<>(); HttpClientSettingsKey key1 = new HttpClientSettingsKey( null, "localhost", 8080, "google.com | baz.com", "testuser", "pw", "https", "jdbc", false); // Assert there is 1 entry in the hashmap now HttpUtil.getHttpClient(key1); assertEquals(1, HttpUtil.httpClient.size()); HttpClientSettingsKey key2 = new HttpClientSettingsKey( null, "localhost", 8080, "google.com | baz.com", "testuser", "pw", "https", "jdbc", true); HttpUtil.getHttpClient(key2); // Assert there are 2 entries because gzip has changed assertEquals(2, HttpUtil.httpClient.size()); HttpClientSettingsKey key3 = new HttpClientSettingsKey( null, "localhost", 8080, "google.com | baz.com", "testuser", "pw", "https", "odbc", true); HttpUtil.getHttpClient(key3); // Assert there are 3 entries because userAgentSuffix has changed assertEquals(3, HttpUtil.httpClient.size()); } @Test public void testSdkProxyRoutePlannerNonProxyHostsBypassesProxy() throws Exception { SdkProxyRoutePlanner planner = new SdkProxyRoutePlanner( "proxy.example.com", 8080, HttpProtocol.HTTP, "*.internal.com|localhost"); // Hosts matching nonProxyHosts should bypass proxy (determineProxy returns null) HttpHost internalHost = new HttpHost("app.internal.com"); HttpHost localhostHost = new HttpHost("localhost"); HttpHost externalHost = new HttpHost("external.com"); assertNull(planner.determineProxy(internalHost, null, null)); assertNull(planner.determineProxy(localhostHost, null, null)); assertNotNull(planner.determineProxy(externalHost, null, null)); } @Test public void testSdkProxyRoutePlannerReDoSPatternDoesNotHang() throws Exception { // The exact ReDoS payload from the vulnerability report SdkProxyRoutePlanner planner = new SdkProxyRoutePlanner("proxy.example.com", 8080, HttpProtocol.HTTP, "(a+)+"); String maliciousHost = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"; HttpHost target = new HttpHost(maliciousHost); long start = System.currentTimeMillis(); assertNotNull(planner.determineProxy(target, null, null)); long elapsed = System.currentTimeMillis() - start; assertTrue( elapsed < 1000, "Non-proxy host matching should complete nearly instantly, took " + elapsed + "ms"); } @Test public void testConvertProxyPropertiesToHttpClientKey() throws SnowflakeSQLException { OCSPMode mode = OCSPMode.FAIL_OPEN; Properties props = new Properties(); HttpClientSettingsKey expectedNoProxy = new HttpClientSettingsKey(mode); // Test for null proxy properties HttpClientSettingsKey settingsKey = SnowflakeUtil.convertProxyPropertiesToHttpClientKey(mode, props); assertTrue(expectedNoProxy.equals(settingsKey)); // Set useProxy to false so proxy properties will not be set props.put("useProxy", "false"); props.put("proxyHost", "localhost"); props.put("proxyPort", "8084"); settingsKey = SnowflakeUtil.convertProxyPropertiesToHttpClientKey(mode, props); assertTrue(expectedNoProxy.equals(settingsKey)); // Test without gzip_disabled props.put("useProxy", "true"); props.put("proxyHost", "localhost"); props.put("proxyPort", "8084"); props.put("proxyUser", "testuser"); props.put("proxyPassword", "pw"); props.put("nonProxyHosts", "baz.com | foo.com"); props.put("proxyProtocol", "http"); props.put("user_agent_suffix", "jdbc"); settingsKey = SnowflakeUtil.convertProxyPropertiesToHttpClientKey(mode, props); HttpClientSettingsKey expectedWithProxy = new HttpClientSettingsKey( OCSPMode.FAIL_OPEN, "localhost", 8084, "baz.com | foo.com", "testuser", "pw", "http", "jdbc", Boolean.valueOf(false)); assertTrue(expectedWithProxy.equals(settingsKey)); // Test with gzip_disabled props.put("gzipDisabled", "true"); settingsKey = SnowflakeUtil.convertProxyPropertiesToHttpClientKey(mode, props); expectedWithProxy = new HttpClientSettingsKey( OCSPMode.FAIL_OPEN, "localhost", 8084, "baz.com | foo.com", "testuser", "pw", "http", "jdbc", Boolean.valueOf(true)); assertTrue(expectedWithProxy.equals(settingsKey)); // Test that exception is thrown when port number is invalid props.put("proxyPort", "invalidnumber"); SnowflakeSQLException e = assertThrows( SnowflakeSQLException.class, () -> { SnowflakeUtil.convertProxyPropertiesToHttpClientKey(mode, props); }); assertEquals((int) ErrorCode.INVALID_PROXY_PROPERTIES.getMessageCode(), e.getErrorCode()); } @Test public void testNullAndEmptyProxySettingsForS3() { HttpClientSettingsKey testKey = new HttpClientSettingsKey(OCSPMode.FAIL_OPEN, null, 443, null, null, null, "", "", false); ProxyConfiguration proxyConfiguration = CloudStorageProxyFactory.createProxyConfigurationForS3(testKey); assertEquals(HttpProtocol.HTTP.toString(), proxyConfiguration.scheme().toUpperCase()); assertEquals("", proxyConfiguration.host()); assertEquals(443, proxyConfiguration.port()); assertEquals(Collections.emptySet(), proxyConfiguration.nonProxyHosts()); assertNull(proxyConfiguration.username()); assertNull(proxyConfiguration.password()); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/CredentialManagerTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.nio.charset.StandardCharsets; import java.util.Base64; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; class CredentialManagerTest { public static final String SNOWFLAKE_HOST = "some-account.us-west-2.aws.snowflakecomputing.com"; public static final String EXTERNAL_OAUTH_HOST = "some-external-oauth-host.com"; public static final String SOME_ACCESS_TOKEN = "some-oauth-access-token"; public static final String SOME_REFRESH_TOKEN = "some-refresh-token"; public static final String SOME_ID_TOKEN_FROM_CACHE = "some-id-token"; public static final String SOME_MFA_TOKEN_FROM_CACHE = "some-mfa-token"; public static final String SOME_DPOP_PUBLIC_KEY = // pragma: allowlist nextline secret "{\"kty\":\"EC\",\"d\":\"j5-J-nLE4J1I8ZWtArP8eQbxUbYMPmRvaEjEkHFlHds\",\"crv\":\"P-256\",\"x\":\"RL5cE-TC4Jr6CxtT4lEI2Yu6wT6LbwojPQsgHUg01F0\",\"y\":\"UAdLUSWTJ6czXaS3SfEFUZzKPcVVq4OZAD8e7Rp75y4\"}"; public static final String SOME_USER = "some-user"; public static final String ACCESS_TOKEN_FROM_CACHE = "access-token-from-cache"; public static final String REFRESH_TOKEN_FROM_CACHE = "refresh-token-from-cache"; public static final String EXTERNAL_ACCESS_TOKEN_FROM_CACHE = "external-access-token-from-cache"; public static final String EXTERNAL_REFRESH_TOKEN_FROM_CACHE = "external-refresh-token-from-cache"; private static final SecureStorageManager mockSecureStorageManager = mock(SecureStorageManager.class); @BeforeAll public static void setUp() { CredentialManager.injectSecureStorageManager(mockSecureStorageManager); } @AfterAll public static void tearDown() { CredentialManager.resetSecureStorageManager(); } @Test public void shouldCreateHostBasedOnExternalIdpUrl() throws SFException { SFLoginInput loginInput = createLoginInputWithExternalOAuth(); String host = CredentialManager.getHostForOAuthCacheKey(loginInput); assertEquals(EXTERNAL_OAUTH_HOST, host); } @Test public void shouldCreateHostBasedOnSnowflakeServerUrl() throws SFException { SFLoginInput loginInput = createLoginInputWithSnowflakeServer(); String host = CredentialManager.getHostForOAuthCacheKey(loginInput); assertEquals(SNOWFLAKE_HOST, host); } @Test public void shouldProperlyWriteTokensToCache() throws SFException { Base64.Encoder encoder = Base64.getEncoder(); SFLoginInput loginInputSnowflakeOAuth = createLoginInputWithSnowflakeServer(); CredentialManager.writeIdToken(loginInputSnowflakeOAuth, SOME_ID_TOKEN_FROM_CACHE); verify(mockSecureStorageManager, times(1)) .setCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.ID_TOKEN.getValue(), encoder.encodeToString(SOME_ID_TOKEN_FROM_CACHE.getBytes(StandardCharsets.UTF_8))); CredentialManager.writeMfaToken(loginInputSnowflakeOAuth, SOME_MFA_TOKEN_FROM_CACHE); verify(mockSecureStorageManager, times(1)) .setCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.MFA_TOKEN.getValue(), encoder.encodeToString(SOME_MFA_TOKEN_FROM_CACHE.getBytes(StandardCharsets.UTF_8))); CredentialManager.writeOAuthAccessToken(loginInputSnowflakeOAuth); verify(mockSecureStorageManager, times(1)) .setCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.OAUTH_ACCESS_TOKEN.getValue(), encoder.encodeToString(SOME_ACCESS_TOKEN.getBytes(StandardCharsets.UTF_8))); CredentialManager.writeOAuthRefreshToken(loginInputSnowflakeOAuth); verify(mockSecureStorageManager, times(1)) .setCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.OAUTH_REFRESH_TOKEN.getValue(), encoder.encodeToString(SOME_REFRESH_TOKEN.getBytes(StandardCharsets.UTF_8))); SFLoginInput loginInputExternalOAuth = createLoginInputWithExternalOAuth(); CredentialManager.writeOAuthAccessToken(loginInputExternalOAuth); verify(mockSecureStorageManager, times(1)) .setCredential( EXTERNAL_OAUTH_HOST, SOME_USER, CachedCredentialType.OAUTH_ACCESS_TOKEN.getValue(), encoder.encodeToString(SOME_ACCESS_TOKEN.getBytes(StandardCharsets.UTF_8))); CredentialManager.writeOAuthRefreshToken(loginInputExternalOAuth); verify(mockSecureStorageManager, times(1)) .setCredential( EXTERNAL_OAUTH_HOST, SOME_USER, CachedCredentialType.OAUTH_REFRESH_TOKEN.getValue(), encoder.encodeToString(SOME_REFRESH_TOKEN.getBytes(StandardCharsets.UTF_8))); SFLoginInput loginInputDPoP = createLoginInputWithDPoPPublicKey(); String dpopBundledToken = encoder.encodeToString(SOME_ACCESS_TOKEN.getBytes(StandardCharsets.UTF_8)) + "." + encoder.encodeToString(SOME_DPOP_PUBLIC_KEY.getBytes(StandardCharsets.UTF_8)); CredentialManager.writeDPoPBundledAccessToken(loginInputDPoP); verify(mockSecureStorageManager, times(1)) .setCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.DPOP_BUNDLED_ACCESS_TOKEN.getValue(), dpopBundledToken); } @Test public void shouldProperlyDeleteTokensFromCache() throws SFException { SFLoginInput loginInputSnowflakeOAuth = createLoginInputWithSnowflakeServer(); CredentialManager.deleteIdTokenCacheEntry( loginInputSnowflakeOAuth.getHostFromServerUrl(), loginInputSnowflakeOAuth.getUserName()); verify(mockSecureStorageManager, times(1)) .deleteCredential(SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.ID_TOKEN.getValue()); CredentialManager.deleteMfaTokenCacheEntry( loginInputSnowflakeOAuth.getHostFromServerUrl(), loginInputSnowflakeOAuth.getUserName()); verify(mockSecureStorageManager, times(1)) .deleteCredential(SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.MFA_TOKEN.getValue()); CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInputSnowflakeOAuth); verify(mockSecureStorageManager, times(1)) .deleteCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.OAUTH_ACCESS_TOKEN.getValue()); CredentialManager.deleteOAuthRefreshTokenCacheEntry(loginInputSnowflakeOAuth); verify(mockSecureStorageManager, times(1)) .deleteCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.OAUTH_REFRESH_TOKEN.getValue()); SFLoginInput loginInputExternalOAuth = createLoginInputWithExternalOAuth(); CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInputExternalOAuth); verify(mockSecureStorageManager, times(1)) .deleteCredential( EXTERNAL_OAUTH_HOST, SOME_USER, CachedCredentialType.OAUTH_ACCESS_TOKEN.getValue()); CredentialManager.deleteOAuthRefreshTokenCacheEntry(loginInputExternalOAuth); verify(mockSecureStorageManager, times(1)) .deleteCredential( EXTERNAL_OAUTH_HOST, SOME_USER, CachedCredentialType.OAUTH_REFRESH_TOKEN.getValue()); SFLoginInput loginInputDPoP = createLoginInputWithDPoPPublicKey(); CredentialManager.deleteDPoPBundledAccessTokenCacheEntry(loginInputDPoP); verify(mockSecureStorageManager, times(1)) .deleteCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.DPOP_BUNDLED_ACCESS_TOKEN.getValue()); } @Test public void shouldProperlyUpdateInputWithTokensFromCache() throws SFException { Base64.Encoder encoder = Base64.getEncoder(); SFLoginInput loginInputSnowflakeOAuth = createLoginInputWithSnowflakeServer(); when(mockSecureStorageManager.getCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.ID_TOKEN.getValue())) .thenReturn( encoder.encodeToString(SOME_ID_TOKEN_FROM_CACHE.getBytes(StandardCharsets.UTF_8))); CredentialManager.fillCachedIdToken(loginInputSnowflakeOAuth); when(mockSecureStorageManager.getCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.MFA_TOKEN.getValue())) .thenReturn( encoder.encodeToString(SOME_MFA_TOKEN_FROM_CACHE.getBytes(StandardCharsets.UTF_8))); CredentialManager.fillCachedMfaToken(loginInputSnowflakeOAuth); assertEquals(SOME_ID_TOKEN_FROM_CACHE, loginInputSnowflakeOAuth.getIdToken()); assertEquals(SOME_MFA_TOKEN_FROM_CACHE, loginInputSnowflakeOAuth.getMfaToken()); when(mockSecureStorageManager.getCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.OAUTH_ACCESS_TOKEN.getValue())) .thenReturn( encoder.encodeToString(ACCESS_TOKEN_FROM_CACHE.getBytes(StandardCharsets.UTF_8))); CredentialManager.fillCachedOAuthAccessToken(loginInputSnowflakeOAuth); when(mockSecureStorageManager.getCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.OAUTH_REFRESH_TOKEN.getValue())) .thenReturn( encoder.encodeToString(REFRESH_TOKEN_FROM_CACHE.getBytes(StandardCharsets.UTF_8))); CredentialManager.fillCachedOAuthRefreshToken(loginInputSnowflakeOAuth); assertEquals(ACCESS_TOKEN_FROM_CACHE, loginInputSnowflakeOAuth.getOauthAccessToken()); assertEquals(REFRESH_TOKEN_FROM_CACHE, loginInputSnowflakeOAuth.getOauthRefreshToken()); SFLoginInput loginInputExternalOAuth = createLoginInputWithExternalOAuth(); when(mockSecureStorageManager.getCredential( EXTERNAL_OAUTH_HOST, SOME_USER, CachedCredentialType.OAUTH_ACCESS_TOKEN.getValue())) .thenReturn( encoder.encodeToString( EXTERNAL_ACCESS_TOKEN_FROM_CACHE.getBytes(StandardCharsets.UTF_8))); CredentialManager.fillCachedOAuthAccessToken(loginInputExternalOAuth); when(mockSecureStorageManager.getCredential( EXTERNAL_OAUTH_HOST, SOME_USER, CachedCredentialType.OAUTH_REFRESH_TOKEN.getValue())) .thenReturn( encoder.encodeToString( EXTERNAL_REFRESH_TOKEN_FROM_CACHE.getBytes(StandardCharsets.UTF_8))); CredentialManager.fillCachedOAuthRefreshToken(loginInputExternalOAuth); assertEquals(EXTERNAL_ACCESS_TOKEN_FROM_CACHE, loginInputExternalOAuth.getOauthAccessToken()); assertEquals(EXTERNAL_REFRESH_TOKEN_FROM_CACHE, loginInputExternalOAuth.getOauthRefreshToken()); SFLoginInput loginInputDPoP = createLoginInputWithDPoPPublicKey(); String dpopBundledToken = encoder.encodeToString(SOME_ACCESS_TOKEN.getBytes(StandardCharsets.UTF_8)) + "." + encoder.encodeToString(SOME_DPOP_PUBLIC_KEY.getBytes(StandardCharsets.UTF_8)); when(mockSecureStorageManager.getCredential( SNOWFLAKE_HOST, SOME_USER, CachedCredentialType.DPOP_BUNDLED_ACCESS_TOKEN.getValue())) .thenReturn(dpopBundledToken); CredentialManager.fillCachedDPoPBundledAccessToken(loginInputDPoP); assertEquals(SOME_ACCESS_TOKEN, loginInputDPoP.getOauthAccessToken()); assertEquals(SOME_DPOP_PUBLIC_KEY, loginInputDPoP.getDPoPPublicKey()); } private SFLoginInput createLoginInputWithExternalOAuth() { SFLoginInput loginInput = createGenericLoginInput(); loginInput.setOauthLoginInput( new SFOauthLoginInput( null, null, null, null, "https://some-external-oauth-host.com/oauth/token", null)); return loginInput; } private SFLoginInput createLoginInputWithSnowflakeServer() { SFLoginInput loginInput = createGenericLoginInput(); loginInput.setOauthLoginInput(new SFOauthLoginInput(null, null, null, null, null, null)); loginInput.setServerUrl("https://some-account.us-west-2.aws.snowflakecomputing.com:443/"); return loginInput; } private SFLoginInput createLoginInputWithDPoPPublicKey() { SFLoginInput loginInput = createLoginInputWithSnowflakeServer(); loginInput.setDPoPPublicKey(SOME_DPOP_PUBLIC_KEY); return loginInput; } private SFLoginInput createGenericLoginInput() { SFLoginInput loginInput = new SFLoginInput(); loginInput.setOauthAccessToken(SOME_ACCESS_TOKEN); loginInput.setOauthRefreshToken(SOME_REFRESH_TOKEN); loginInput.setUserName(SOME_USER); return loginInput; } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/EventHandlerTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; import java.io.StringWriter; import java.nio.file.Files; import java.util.logging.Level; import java.util.logging.LogRecord; import java.util.zip.GZIPInputStream; import org.apache.commons.io.IOUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; public class EventHandlerTest { @TempDir private File tmpFolder; @BeforeEach public void setUp() throws IOException { new File(tmpFolder, "snowflake_dumps").mkdirs(); System.setProperty("snowflake.dump_path", tmpFolder.getCanonicalPath()); } @Test public void testPublishRecord() { LogRecord record = new LogRecord(Level.INFO, "test message"); EventHandler handler = new EventHandler(10, 5000); assertEquals(0, handler.getLogBufferSize()); handler.publish(record); assertEquals(1, handler.getLogBufferSize()); } @Test public void testDumpLogBuffer() throws IOException { System.setProperty("snowflake.max_dumpfiles", "1"); System.setProperty("snowflake.max_dumpdir_size_mb", "100"); LogRecord record = new LogRecord(Level.INFO, "test message"); EventHandler handler = new EventHandler(10, 5000); handler.publish(record); handler.flush(); File logDumpFile = new File(EventUtil.getDumpPathPrefix() + "/sf_log_.dmp.gz"); GZIPInputStream gzip = new GZIPInputStream(Files.newInputStream(logDumpFile.toPath())); StringWriter sWriter = new StringWriter(); IOUtils.copy(gzip, sWriter, "UTF-8"); assertTrue(sWriter.toString().contains("test message")); gzip.close(); sWriter.close(); logDumpFile.delete(); } @Test public void testEventFlusher() { EventHandler handler = new EventHandler(2, 1000); assertEquals(0, handler.getBufferSize()); handler.triggerBasicEvent(Event.EventType.STATE_TRANSITION, "test event"); assertEquals(1, handler.getBufferSize()); handler.triggerBasicEvent(Event.EventType.STATE_TRANSITION, "test event 2"); // buffer should flush when max entries is reached assertEquals(0, handler.getBufferSize()); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/EventTest.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.EventUtil.DUMP_PATH_PROP; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; import java.io.StringWriter; import java.nio.file.Files; import java.util.zip.GZIPInputStream; import org.apache.commons.io.IOUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; public class EventTest { @TempDir private File tmpFolder; private File homeDirectory; private File dmpDirectory; @BeforeEach public void setUp() throws IOException { homeDirectory = new File(tmpFolder, "homedir"); homeDirectory.mkdirs(); dmpDirectory = new File(homeDirectory, "snowflake_dumps"); dmpDirectory.mkdirs(); } @AfterEach public void tearDown() { dmpDirectory.delete(); } @Test public void testEvent() { Event event = new BasicEvent(Event.EventType.NONE, "basic event"); event.setType(Event.EventType.NETWORK_ERROR); event.setMessage("network timeout"); assertEquals(1, event.getType().getId()); assertEquals("network timeout", event.getMessage()); } @Test public void testWriteEventDumpLine() throws IOException { try { // Set dmp file path String dumpPath = homeDirectory.getCanonicalPath(); System.setProperty(DUMP_PATH_PROP, dumpPath); EventUtil.setDumpPathPrefixForTesting(dumpPath); Event event = new BasicEvent(Event.EventType.NETWORK_ERROR, "network timeout"); event.writeEventDumpLine("network timeout after 60 seconds"); // Assert the dump path prefix function correctly leads to the temporary dump directory // created String dmpPath1 = EventUtil.getDumpPathPrefix(); String dmpPath2 = dmpDirectory.getCanonicalPath(); assertEquals(dmpPath2, dmpPath1, "dump path is: " + EventUtil.getDumpPathPrefix()); File dumpFile = new File( EventUtil.getDumpPathPrefix() + File.separator + "sf_event_" + EventUtil.getDumpFileId() + ".dmp.gz"); GZIPInputStream gzip = new GZIPInputStream(Files.newInputStream(dumpFile.toPath())); StringWriter sWriter = new StringWriter(); IOUtils.copy(gzip, sWriter, "UTF-8"); assertTrue(sWriter.toString().contains("network timeout after 60 seconds")); gzip.close(); sWriter.close(); dumpFile.delete(); } finally { System.clearProperty("snowflake.dump_path"); EventUtil.setDumpPathPrefixForTesting(EventUtil.getDumpPathPrefix()); } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/FileCacheManagerDefaultDirTest.java ================================================ package net.snowflake.client.internal.core; import java.io.File; import net.snowflake.client.annotations.RunOnLinuxOrMac; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; import org.mockito.Mockito; public class FileCacheManagerDefaultDirTest { @Test @RunOnLinuxOrMac public void shouldCreateCacheDirForLinuxXDG() { try (MockedStatic constantsMockedStatic = Mockito.mockStatic(Constants.class)) { constantsMockedStatic.when(Constants::getOS).thenReturn(Constants.OS.LINUX); try (MockedStatic snowflakeUtilMockedStatic = Mockito.mockStatic(SnowflakeUtil.class)) { snowflakeUtilMockedStatic .when(() -> SnowflakeUtil.systemGetEnv("XDG_CACHE_HOME")) .thenReturn("/XDG/Cache/"); try (MockedStatic fileUtilMockedStatic = Mockito.mockStatic(FileUtil.class)) { fileUtilMockedStatic.when(() -> FileUtil.isWritable("/XDG/Cache/")).thenReturn(true); File defaultCacheDir = FileCacheUtil.getDefaultCacheDir(); Assertions.assertNotNull(defaultCacheDir); Assertions.assertEquals("/XDG/Cache/snowflake", defaultCacheDir.getAbsolutePath()); } } } } @Test @RunOnLinuxOrMac public void shouldCreateCacheDirForLinuxWithoutXDG() { try (MockedStatic constantsMockedStatic = Mockito.mockStatic(Constants.class)) { constantsMockedStatic.when(Constants::getOS).thenReturn(Constants.OS.LINUX); try (MockedStatic snowflakeUtilMockedStatic = Mockito.mockStatic(SnowflakeUtil.class)) { snowflakeUtilMockedStatic .when(() -> SnowflakeUtil.systemGetEnv("XDG_CACHE_HOME")) .thenReturn(null); snowflakeUtilMockedStatic .when(() -> SnowflakeUtil.systemGetProperty("user.home")) .thenReturn("/User/Home"); try (MockedStatic fileUtilMockedStatic = Mockito.mockStatic(FileUtil.class)) { fileUtilMockedStatic.when(() -> FileUtil.isWritable("/User/Home")).thenReturn(true); File defaultCacheDir = FileCacheUtil.getDefaultCacheDir(); Assertions.assertNotNull(defaultCacheDir); Assertions.assertEquals("/User/Home/.cache/snowflake", defaultCacheDir.getAbsolutePath()); } } } } @Test @RunOnLinuxOrMac public void shouldCreateCacheDirForWindows() { try (MockedStatic constantsMockedStatic = Mockito.mockStatic(Constants.class)) { constantsMockedStatic.when(Constants::getOS).thenReturn(Constants.OS.WINDOWS); try (MockedStatic snowflakeUtilMockedStatic = Mockito.mockStatic(SnowflakeUtil.class)) { snowflakeUtilMockedStatic .when(() -> SnowflakeUtil.systemGetProperty("user.home")) .thenReturn("/User/Home"); try (MockedStatic fileUtilMockedStatic = Mockito.mockStatic(FileUtil.class)) { fileUtilMockedStatic.when(() -> FileUtil.isWritable("/User/Home")).thenReturn(true); File defaultCacheDir = FileCacheUtil.getDefaultCacheDir(); Assertions.assertNotNull(defaultCacheDir); Assertions.assertEquals( "/User/Home/AppData/Local/Snowflake/Caches", defaultCacheDir.getAbsolutePath()); } } } } @Test @RunOnLinuxOrMac public void shouldCreateCacheDirForMacOS() { try (MockedStatic constantsMockedStatic = Mockito.mockStatic(Constants.class)) { constantsMockedStatic.when(Constants::getOS).thenReturn(Constants.OS.MAC); try (MockedStatic snowflakeUtilMockedStatic = Mockito.mockStatic(SnowflakeUtil.class)) { snowflakeUtilMockedStatic .when(() -> SnowflakeUtil.systemGetProperty("user.home")) .thenReturn("/User/Home"); try (MockedStatic fileUtilMockedStatic = Mockito.mockStatic(FileUtil.class)) { fileUtilMockedStatic.when(() -> FileUtil.isWritable("/User/Home")).thenReturn(true); File defaultCacheDir = FileCacheUtil.getDefaultCacheDir(); Assertions.assertNotNull(defaultCacheDir); Assertions.assertEquals( "/User/Home/Library/Caches/Snowflake", defaultCacheDir.getAbsolutePath()); } } } } @Test @RunOnLinuxOrMac public void shouldReturnNullWhenNoHomeDirSet() { try (MockedStatic constantsMockedStatic = Mockito.mockStatic(Constants.class)) { constantsMockedStatic.when(Constants::getOS).thenReturn(Constants.OS.LINUX); try (MockedStatic snowflakeUtilMockedStatic = Mockito.mockStatic(SnowflakeUtil.class)) { snowflakeUtilMockedStatic .when(() -> SnowflakeUtil.systemGetEnv("XDG_CACHE_HOME")) .thenReturn(null); snowflakeUtilMockedStatic .when(() -> SnowflakeUtil.systemGetProperty("user.home")) .thenReturn(null); File defaultCacheDir = FileCacheUtil.getDefaultCacheDir(); Assertions.assertNull(defaultCacheDir); } } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/FileCacheManagerTest.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.StmtUtil.mapper; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.isWindows; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemGetProperty; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.isA; import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.util.stream.Collectors; import java.util.stream.Stream; import net.snowflake.client.annotations.RunOnLinuxOrMac; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.jdbc.BaseJDBCTest; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; import org.mockito.MockedStatic; import org.mockito.Mockito; @Nested @Tag(TestTags.CORE) class FileCacheManagerTest extends BaseJDBCTest { private static final String CACHE_FILE_NAME = "credential_cache_v1.json.json"; private static final String CACHE_DIR_PROP = "net.snowflake.jdbc.temporaryCredentialCacheDir"; private static final String CACHE_DIR_ENV = "SF_TEMPORARY_CREDENTIAL_CACHE_DIR"; private static final long CACHE_FILE_LOCK_EXPIRATION_IN_SECONDS = 60L; private FileCacheManager fileCacheManager; private File cacheFile; @BeforeEach public void setup() throws IOException { fileCacheManager = new FileCacheManagerBuilder() .setCacheDirectorySystemProperty(CACHE_DIR_PROP) .setCacheDirectoryEnvironmentVariable(CACHE_DIR_ENV) .setBaseCacheFileName(CACHE_FILE_NAME) .setCacheFileLockExpirationInSeconds(CACHE_FILE_LOCK_EXPIRATION_IN_SECONDS) .build(); cacheFile = createCacheFile(); } @AfterEach public void clean() throws IOException { if (Files.exists(cacheFile.toPath())) { Files.delete(cacheFile.toPath()); } if (Files.exists(cacheFile.getParentFile().toPath())) { Files.delete(cacheFile.getParentFile().toPath()); } } @ParameterizedTest @CsvSource({ "rwx------,rwx------,false", "rw-------,rwx------,true", "rw-------,rwx--xrwx,true", "r-x------,rwx------,false", "r--------,rwx------,true", "rwxrwx---,rwx------,false", "rwxrw----,rwx------,false", "rwxr-x---,rwx------,false", "rwxr-----,rwx------,false", "rwx-wx---,rwx------,false", "rwx-w----,rwx------,false", "rwx--x---,rwx------,false", "rwx---rwx,rwx------,false", "rwx---rw-,rwx------,false", "rwx---r-x,rwx------,false", "rwx---r--,rwx------,false", "rwx----wx,rwx------,false", "rwx----w-,rwx------,false", "rwx-----x,rwx------,false" }) @RunOnLinuxOrMac public void throwWhenReadCacheFileWithPermissionDifferentThanReadWriteForUserTest( String permission, String parentDirectoryPermissions, boolean isSucceed) throws IOException { fileCacheManager.overrideCacheFile(cacheFile); Files.setPosixFilePermissions(cacheFile.toPath(), PosixFilePermissions.fromString(permission)); Files.setPosixFilePermissions( cacheFile.getParentFile().toPath(), PosixFilePermissions.fromString(parentDirectoryPermissions)); if (isSucceed) { assertDoesNotThrow(() -> fileCacheManager.readCacheFile()); } else { SecurityException ex = assertThrows(SecurityException.class, () -> fileCacheManager.readCacheFile()); assertTrue(ex.getMessage().contains("is wider than allowed.")); } } @Test @RunOnLinuxOrMac public void notThrowExceptionWhenCacheFolderIsNotAccessibleWhenReadFromCache() throws IOException { try { Files.setPosixFilePermissions( cacheFile.getParentFile().toPath(), PosixFilePermissions.fromString("---------")); FileCacheManager fcm = new FileCacheManagerBuilder() .setCacheDirectorySystemProperty(CACHE_DIR_PROP) .setCacheDirectoryEnvironmentVariable(CACHE_DIR_ENV) .setBaseCacheFileName(CACHE_FILE_NAME) .setCacheFileLockExpirationInSeconds(CACHE_FILE_LOCK_EXPIRATION_IN_SECONDS) .build(); assertDoesNotThrow(fcm::readCacheFile); } finally { Files.setPosixFilePermissions( cacheFile.getParentFile().toPath(), PosixFilePermissions.fromString("rwx------")); } } @Test public void shouldEnterNoopModeWhenCacheDirectoryIsNotAvailable() { try (MockedStatic fileCacheUtilMock = Mockito.mockStatic(FileCacheUtil.class, Mockito.CALLS_REAL_METHODS)) { fileCacheUtilMock.when(FileCacheUtil::getDefaultCacheDir).thenReturn(null); FileCacheManager fcm = new FileCacheManagerBuilder() .setCacheDirectorySystemProperty("nonexistent.system.property") .setCacheDirectoryEnvironmentVariable("NONEXISTENT_ENV_VAR") .setBaseCacheFileName(CACHE_FILE_NAME) .setCacheFileLockExpirationInSeconds(CACHE_FILE_LOCK_EXPIRATION_IN_SECONDS) .build(); assertTrue(fcm instanceof NoOpFileCacheManager); assertNull(fcm.getCacheFilePath()); assertDoesNotThrow(fcm::readCacheFile); assertDoesNotThrow(() -> fcm.writeCacheFile(mapper.createObjectNode())); assertDoesNotThrow(fcm::deleteCacheFile); assertNull(fcm.withLock(() -> "test")); } } @Test @RunOnLinuxOrMac public void notThrowExceptionWhenCacheFolderIsNotAccessibleWhenWriteToCache() throws IOException { String tmpDirPath = System.getProperty("java.io.tmpdir"); String cacheDirPath = tmpDirPath + File.separator + "snowflake-cache-dir-noaccess"; System.setProperty("FILE_CACHE_MANAGER_CACHE_PATH", cacheDirPath); try { Files.createDirectory( Paths.get(cacheDirPath), PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("---------"))); FileCacheManager fcm = new FileCacheManagerBuilder() .setOnlyOwnerPermissions(false) .setCacheDirectorySystemProperty("FILE_CACHE_MANAGER_CACHE_PATH") .setCacheDirectoryEnvironmentVariable("NONEXISTENT") .setBaseCacheFileName("cache-file") .build(); assertDoesNotThrow(() -> fcm.writeCacheFile(mapper.createObjectNode())); } finally { Files.deleteIfExists(Paths.get(cacheDirPath)); System.clearProperty("FILE_CACHE_MANAGER_CACHE_PATH"); } } @Test @RunOnLinuxOrMac public void throwWhenOverrideCacheFileHasDifferentOwnerThanCurrentUserTest() { try (MockedStatic fileUtilMock = Mockito.mockStatic(FileUtil.class, Mockito.CALLS_REAL_METHODS)) { fileUtilMock.when(() -> FileUtil.getFileOwnerName(isA(Path.class))).thenReturn("anotherUser"); SecurityException ex = assertThrows(SecurityException.class, () -> fileCacheManager.readCacheFile()); assertTrue(ex.getMessage().contains("The file owner is different than current user")); } } @Test @RunOnLinuxOrMac public void notThrowWhenUserNameIsQuestionMarkInContainerEnvironment() { try (MockedStatic fileUtilMock = Mockito.mockStatic(FileUtil.class, Mockito.CALLS_REAL_METHODS); MockedStatic snowflakeUtilMock = Mockito.mockStatic(SnowflakeUtil.class, Mockito.CALLS_REAL_METHODS)) { fileUtilMock.when(() -> FileUtil.getFileOwnerName(isA(Path.class))).thenReturn("root"); snowflakeUtilMock.when(() -> SnowflakeUtil.systemGetProperty("user.name")).thenReturn("?"); assertDoesNotThrow(() -> fileCacheManager.readCacheFile()); } } @Test @RunOnLinuxOrMac public void notThrowForToWidePermissionsWhenOnlyOwnerPermissionsSetFalseTest() throws IOException { FileCacheManager fcm = new FileCacheManagerBuilder() .setOnlyOwnerPermissions(false) .setCacheDirectorySystemProperty(CACHE_DIR_PROP) .setCacheDirectoryEnvironmentVariable(CACHE_DIR_ENV) .setBaseCacheFileName(CACHE_FILE_NAME) .setCacheFileLockExpirationInSeconds(CACHE_FILE_LOCK_EXPIRATION_IN_SECONDS) .build(); fcm.overrideCacheFile(cacheFile); Files.setPosixFilePermissions(cacheFile.toPath(), PosixFilePermissions.fromString("rwxrwx---")); assertDoesNotThrow(fcm::readCacheFile); } @Test @RunOnLinuxOrMac public void throwWhenOverrideCacheFileNotFound() { Path wrongPath = Paths.get(systemGetProperty("user.home"), ".cache", "snowflake2", "wrongFileName"); SecurityException ex = assertThrows( SecurityException.class, () -> fileCacheManager.overrideCacheFile(wrongPath.toFile())); assertTrue( ex.getMessage() .contains( "Unable to access the file/directory to check the permissions. Error: java.nio.file.NoSuchFileException:")); } @Test @RunOnLinuxOrMac public void throwWhenSymlinkAsCache() throws IOException { Path symlink = createSymlink(); try { SecurityException ex = assertThrows( SecurityException.class, () -> fileCacheManager.overrideCacheFile(symlink.toFile())); assertTrue(ex.getMessage().contains("Symbolic link is not allowed for file cache")); } finally { if (Files.exists(symlink)) { Files.delete(symlink); } } } private File createCacheFile() { Path cacheFile = Paths.get(systemGetProperty("user.home"), ".cache", "snowflake_cache", CACHE_FILE_NAME); try { if (Files.exists(cacheFile)) { Files.delete(cacheFile); } if (Files.exists(cacheFile.getParent())) { Files.delete(cacheFile.getParent()); } if (!isWindows()) { Files.createDirectories( cacheFile.getParent(), PosixFilePermissions.asFileAttribute( Stream.of( PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE) .collect(Collectors.toSet()))); } else { Files.createDirectories(cacheFile.getParent()); } if (!isWindows()) { Files.createFile( cacheFile, PosixFilePermissions.asFileAttribute( Stream.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE) .collect(Collectors.toSet()))); } else { Files.createFile(cacheFile); } ObjectNode cacheContent = mapper.createObjectNode(); cacheContent.put("token", "tokenValue"); fileCacheManager.overrideCacheFile(cacheFile.toFile()); fileCacheManager.writeCacheFile(cacheContent); return cacheFile.toFile(); } catch (IOException e) { throw new RuntimeException(e); } } private Path createSymlink() throws IOException { Path link = Paths.get(cacheFile.getParent(), "symlink_" + CACHE_FILE_NAME); if (Files.exists(link)) { Files.delete(link); } return Files.createSymbolicLink(link, cacheFile.toPath()); } @Test void shouldCreateDirAndFile() { String tmpDirPath = System.getProperty("java.io.tmpdir"); String cacheDirPath = tmpDirPath + File.separator + "snowflake-cache-dir"; System.setProperty("FILE_CACHE_MANAGER_SHOULD_CREATE_DIR_AND_FILE", cacheDirPath); new FileCacheManagerBuilder() .setOnlyOwnerPermissions(false) .setCacheDirectorySystemProperty("FILE_CACHE_MANAGER_SHOULD_CREATE_DIR_AND_FILE") .setBaseCacheFileName("cache-file") .build(); assertTrue(new File(tmpDirPath).exists()); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/HeaderCustomizerHttpRequestInterceptorTest.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.AttributeEnhancingHttpRequestRetryHandler.EXECUTION_COUNT_ATTRIBUTE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import net.snowflake.client.api.http.HttpHeadersCustomizer; import org.apache.http.Header; import org.apache.http.HttpException; import org.apache.http.HttpRequest; import org.apache.http.RequestLine; import org.apache.http.message.BasicHeader; import org.apache.http.protocol.BasicHttpContext; import org.apache.http.protocol.HttpContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.SdkHttpRequest; class HeaderCustomizerHttpRequestInterceptorTest { private HttpHeadersCustomizer mockCustomizer1; private HttpHeadersCustomizer mockCustomizer2; private HttpRequest mockHttpRequest; private RequestLine mockRequestLine; private Context.ModifyHttpRequest mockAwsRequestContext; private SdkHttpRequest mockAwsRequest; private ExecutionAttributes mockAwsRequestAttributes; private SdkHttpRequest.Builder mockAwsRequestContextBuilder; private HttpContext httpContext; private HeaderCustomizerHttpRequestInterceptor interceptor; private List customizersList; private final String TEST_URI_STRING = "https://test.snowflakecomputing.com/api/v1"; private final URI TEST_URI = URI.create(TEST_URI_STRING); private final String TEST_METHOD_GET = "GET"; @BeforeEach void setUp() { httpContext = new BasicHttpContext(); customizersList = new ArrayList<>(); // Default interceptor has empty list, tests will add customizers as needed interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); // Setup common mocks mockCustomizer1 = mock(); mockCustomizer2 = mock(); mockHttpRequest = mock(); mockRequestLine = mock(); mockAwsRequestContext = mock(); mockAwsRequest = mock(); mockAwsRequestAttributes = mock(); mockAwsRequestContextBuilder = mock(); lenient().when(mockHttpRequest.getRequestLine()).thenReturn(mockRequestLine); lenient().when(mockHttpRequest.getAllHeaders()).thenReturn(new Header[0]); lenient().when(mockRequestLine.getMethod()).thenReturn(TEST_METHOD_GET); lenient().when(mockRequestLine.getUri()).thenReturn(TEST_URI_STRING); lenient().when(mockAwsRequest.method()).thenReturn(SdkHttpMethod.GET); lenient().when(mockAwsRequest.getUri()).thenReturn(TEST_URI); lenient() .when(mockAwsRequest.headers()) .thenReturn(new HashMap<>()); // Mutable map for testing adds lenient().when(mockAwsRequestContext.httpRequest()).thenReturn(mockAwsRequest); lenient().when(mockAwsRequest.toBuilder()).thenReturn(mockAwsRequestContextBuilder); } @Test void testApacheInterceptorWithoutCustomizersDoesNothing() throws Exception { interceptor = new HeaderCustomizerHttpRequestInterceptor(Collections.emptyList()); interceptor.process(mockHttpRequest, httpContext); verify(mockHttpRequest, never()).addHeader(anyString(), anyString()); } @Test void testApacheInterceptorWithCustomizerWhichDoesNotApplyDoesNothing() throws Exception { when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(false); customizersList.add(mockCustomizer1); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); interceptor.process(mockHttpRequest, httpContext); verify(mockCustomizer1).applies(eq(TEST_METHOD_GET), eq(TEST_URI_STRING), anyMap()); verify(mockCustomizer1, never()).newHeaders(); verify(mockHttpRequest, never()).addHeader(anyString(), anyString()); } @ParameterizedTest @ValueSource(booleans = {true, false}) void testApacheInterceptorWithCustomizerAddsHeaders(boolean invokeOnce) throws Exception { Map> newHeaders = new HashMap<>(); newHeaders.put("X-Static", Collections.singletonList("StaticVal")); when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(true); when(mockCustomizer1.newHeaders()).thenReturn(newHeaders); when(mockCustomizer1.invokeOnce()).thenReturn(invokeOnce); customizersList.add(mockCustomizer1); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); httpContext.setAttribute(EXECUTION_COUNT_ATTRIBUTE, 0); interceptor.process(mockHttpRequest, httpContext); verify(mockCustomizer1).newHeaders(); verify(mockHttpRequest).addHeader("X-Static", "StaticVal"); } @Test void testApacheInterceptorWithInvokeOnceTrueSkipsOnRetry() throws Exception { when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(true); when(mockCustomizer1.invokeOnce()).thenReturn(true); customizersList.add(mockCustomizer1); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); httpContext.setAttribute(EXECUTION_COUNT_ATTRIBUTE, 1); // Simulate retry interceptor.process(mockHttpRequest, httpContext); verify(mockCustomizer1, never()).newHeaders(); verify(mockHttpRequest, never()).addHeader(anyString(), anyString()); } @Test void testApacheInterceptorWithInvokeOnceFalseAddsHeaderOnRetry() throws Exception { Map> newHeaders = new HashMap<>(); newHeaders.put("X-Dynamic", Collections.singletonList("RetryValue")); when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(true); when(mockCustomizer1.newHeaders()).thenReturn(newHeaders); when(mockCustomizer1.invokeOnce()).thenReturn(false); customizersList.add(mockCustomizer1); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); httpContext.setAttribute(EXECUTION_COUNT_ATTRIBUTE, 1); // Simulate retry interceptor.process(mockHttpRequest, httpContext); verify(mockCustomizer1).newHeaders(); verify(mockHttpRequest).addHeader("X-Dynamic", "RetryValue"); } @Test void testApacheInterceptorDoesNotAllowCustomizerToOverrideHeader() throws HttpException, IOException { // Simulate driver adding User-Agent initially Header[] initialHeaders = {new BasicHeader("User-Agent", "SnowflakeDriver/1.0")}; when(mockHttpRequest.getAllHeaders()).thenReturn(initialHeaders); Map> newHeaders = new HashMap<>(); newHeaders.put("User-Agent", Collections.singletonList("MaliciousAgent/2.0")); when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(true); when(mockCustomizer1.newHeaders()).thenReturn(newHeaders); // Attempting override customizersList.add(mockCustomizer1); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); httpContext.setAttribute(EXECUTION_COUNT_ATTRIBUTE, 0); interceptor.process(mockHttpRequest, httpContext); // Verify the original map wasn't modified with the bad header assertEquals(initialHeaders, mockHttpRequest.getAllHeaders()); verify(mockHttpRequest, never()).addHeader(eq("User-Agent"), anyString()); } @Test void testMultipleCustomizersAddingHeaders() throws Exception { Map> headers1 = new HashMap<>(); headers1.put("X-Custom1", Collections.singletonList("Val1")); Map> headers2 = new HashMap<>(); headers2.put("X-Custom2", Arrays.asList("Val2a", "Val2b")); when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(true); when(mockCustomizer1.newHeaders()).thenReturn(headers1); when(mockCustomizer1.invokeOnce()).thenReturn(false); when(mockCustomizer2.applies(anyString(), anyString(), anyMap())).thenReturn(true); when(mockCustomizer2.newHeaders()).thenReturn(headers2); when(mockCustomizer2.invokeOnce()).thenReturn(false); customizersList.add(mockCustomizer1); customizersList.add(mockCustomizer2); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); httpContext.setAttribute(EXECUTION_COUNT_ATTRIBUTE, 0); interceptor.process(mockHttpRequest, httpContext); verify(mockCustomizer1).newHeaders(); verify(mockCustomizer2).newHeaders(); verify(mockHttpRequest).addHeader("X-Custom1", "Val1"); verify(mockHttpRequest).addHeader("X-Custom2", "Val2a"); verify(mockHttpRequest).addHeader("X-Custom2", "Val2b"); } @Test void testAWSInterceptorWithoutCustomizersDoesNothing() { interceptor = new HeaderCustomizerHttpRequestInterceptor(Collections.emptyList()); interceptor.modifyHttpRequest(mockAwsRequestContext, mockAwsRequestAttributes); verify(mockAwsRequestContextBuilder, never()).appendHeader(anyString(), anyString()); } @Test void testAWSInterceptorWithCustomizerWhichDoesNotApplyDoesNothing() { when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(false); customizersList.add(mockCustomizer1); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); interceptor.modifyHttpRequest(mockAwsRequestContext, mockAwsRequestAttributes); verify(mockCustomizer1).applies(eq(TEST_METHOD_GET), eq(TEST_URI_STRING), anyMap()); verify(mockCustomizer1, never()).newHeaders(); verify(mockAwsRequestContextBuilder, never()).appendHeader(anyString(), anyString()); } @Test void testAWSInterceptorWithCustomizerAddsHeaders() { Map> newHeaders = new HashMap<>(); newHeaders.put("X-AWS-Custom", Collections.singletonList("AwsValue1")); when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(true); when(mockCustomizer1.newHeaders()).thenReturn(newHeaders); customizersList.add(mockCustomizer1); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); interceptor.modifyHttpRequest(mockAwsRequestContext, mockAwsRequestAttributes); verify(mockCustomizer1).newHeaders(); verify(mockAwsRequestContextBuilder).appendHeader("X-AWS-Custom", "AwsValue1"); } @Test void testAWSInterceptorDoesNotAllowCustomizerToOverrideHeader() { // Simulate driver adding User-Agent initially Map> initialAwsHeaders = new HashMap<>(); initialAwsHeaders.put("User-Agent", Collections.singletonList("SnowflakeAWSClient/1.0")); when(mockAwsRequest.headers()).thenReturn(initialAwsHeaders); // Return mutable map for test Map> newHeaders = new HashMap<>(); newHeaders.put("User-Agent", Collections.singletonList("MaliciousAgent/3.0")); when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(true); when(mockCustomizer1.newHeaders()).thenReturn(newHeaders); customizersList.add(mockCustomizer1); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); interceptor.modifyHttpRequest(mockAwsRequestContext, mockAwsRequestAttributes); // Verify the original map wasn't modified with the bad header assertEquals("SnowflakeAWSClient/1.0", mockAwsRequest.headers().get("User-Agent").get(0)); verify(mockAwsRequestContextBuilder, never()).appendHeader(eq("User-Agent"), anyString()); } @Test void testAWSInterceptorAddsMultiValueHeader() { Map> newHeaders = new HashMap<>(); newHeaders.put("X-Multi", Arrays.asList("ValA", "ValB")); when(mockCustomizer1.applies(anyString(), anyString(), anyMap())).thenReturn(true); when(mockCustomizer1.newHeaders()).thenReturn(newHeaders); customizersList.add(mockCustomizer1); interceptor = new HeaderCustomizerHttpRequestInterceptor(customizersList); interceptor.modifyHttpRequest(mockAwsRequestContext, mockAwsRequestAttributes); verify(mockCustomizer1).newHeaders(); verify(mockAwsRequestContextBuilder).appendHeader("X-Multi", "ValA"); verify(mockAwsRequestContextBuilder).appendHeader("X-Multi", "ValB"); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/HeartbeatIntervalSelectorTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.Arrays; import java.util.HashSet; import java.util.Set; import org.junit.jupiter.api.Test; public class HeartbeatIntervalSelectorTest { // Helper method for Java 8 compatibility (setOf() is Java 9+) private static Set setOf(Long... values) { return new HashSet<>(Arrays.asList(values)); } // === Validation Tests === @Test public void testSelectBestInterval_NullExistingIntervals_ThrowsException() { IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> HeartbeatIntervalSelector.selectBestInterval(10, null)); assertTrue(exception.getMessage().contains("cannot be null")); } @Test public void testSelectBestInterval_EmptyExistingIntervals_ThrowsException() { IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> HeartbeatIntervalSelector.selectBestInterval(10, new HashSet<>())); assertTrue(exception.getMessage().contains("cannot be null or empty")); } @Test public void testSelectBestInterval_NegativeRequestedInterval_ThrowsException() { Set existing = setOf(10L); IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> HeartbeatIntervalSelector.selectBestInterval(-5, existing)); assertTrue(exception.getMessage().contains("must be positive")); } @Test public void testSelectBestInterval_ZeroRequestedInterval_ThrowsException() { Set existing = setOf(10L); IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> HeartbeatIntervalSelector.selectBestInterval(0, existing)); assertTrue(exception.getMessage().contains("must be positive")); } // === Core Behavior Tests === @Test public void testSelectBestInterval_ExactMatch_ReturnsExactMatch() { Set existing = setOf(5L, 10L, 15L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(10, result, "Should return exact match when it exists"); } @Test public void testSelectBestInterval_RequestedBetweenTwo_ReturnsClosestSmaller() { Set existing = setOf(5L, 15L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals( 5, result, "Should return closest smaller interval (more frequent heartbeats are safer)"); } @Test public void testSelectBestInterval_RequestedBetweenMany_ReturnsClosestSmaller() { Set existing = setOf(3L, 5L, 8L, 15L, 20L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(8, result, "Should return 8 (closest to 10 but still smaller)"); } @Test public void testSelectBestInterval_RequestedSmallerThanAll_ReturnsShortestInterval() { // Critical case: If requested is 9s but all existing are [10s, 15s, 20s], // there is no interval that is less than or equal to the requested value. // In that case, return the shortest available interval (10s) rather than a longer one. Set existing = setOf(10L, 15L, 20L); long result = HeartbeatIntervalSelector.selectBestInterval(9, existing); assertEquals(10, result, "Should return shortest interval when no smaller exists"); } @Test public void testSelectBestInterval_RequestedLargerThanAll_ReturnsLargestSmallerInterval() { Set existing = setOf(5L, 10L, 15L); long result = HeartbeatIntervalSelector.selectBestInterval(20, existing); assertEquals( 15, result, "Should return largest of the smaller intervals (closest but still safe)"); } // === Edge Cases === @Test public void testSelectBestInterval_SingleExistingIntervalSmaller_ReturnsIt() { Set existing = setOf(5L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(5, result, "Should return the only existing interval"); } @Test public void testSelectBestInterval_SingleExistingIntervalLarger_ReturnsIt() { Set existing = setOf(15L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals( 15, result, "Should return the only existing interval even if it's larger (no other choice)"); } @Test public void testSelectBestInterval_SingleExistingIntervalEqual_ReturnsIt() { Set existing = setOf(10L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(10, result, "Should return exact match"); } @Test public void testSelectBestInterval_RequestedVeryLarge_ReturnsLargestSmaller() { Set existing = setOf(1L, 5L, 10L); long result = HeartbeatIntervalSelector.selectBestInterval(1000, existing); assertEquals(10, result, "Should return largest of smaller intervals"); } @Test public void testSelectBestInterval_RequestedVerySmall_ReturnsSmallestAvailable() { Set existing = setOf(10L, 20L, 30L); long result = HeartbeatIntervalSelector.selectBestInterval(1, existing); assertEquals(10, result, "Should return smallest available interval"); } // === Real-World Scenarios === @Test public void testSelectBestInterval_RealWorldScenario1_ShortLivedSession() { // Short-lived session: 60s validity, wants 10s heartbeat // But we have threads at [5s, 30s, 3600s] Set existing = setOf(5L, 30L, 3600L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(5, result, "Should use 5s (more frequent than needed, but safe)"); } @Test public void testSelectBestInterval_RealWorldScenario2_LongLivedSession() { // Long-lived session: 4h validity, wants 3600s (1h) heartbeat // But we have threads at [10s, 60s, 300s] Set existing = setOf(10L, 60L, 300L); long result = HeartbeatIntervalSelector.selectBestInterval(3600, existing); assertEquals(300, result, "Should use 300s (closest smaller interval)"); } @Test public void testSelectBestInterval_RealWorldScenario3_CriticalBugCase() { // This was the original bug scenario: // Session needs 10s heartbeat (short validity) // Existing threads: [12s] (from a longer-lived session) // PROBLEM: 12s is LONGER than requested 10s, which could cause expiration // BEHAVIOR: Return 12s as only available option (telemetry will alert on this) Set existing = setOf(12L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals( 12, result, "Should select 12s as only option (session might expire - telemetry will alert)"); } @Test public void testSelectBestInterval_RealWorldScenario4_MaxThreadsReached() { // 10 threads already exist with various intervals // New session needs 25s heartbeat Set existing = setOf(5L, 10L, 15L, 20L, 30L, 60L, 120L, 300L, 600L, 3600L); long result = HeartbeatIntervalSelector.selectBestInterval(25, existing); assertEquals(20, result, "Should select 20s (closest smaller to 25s)"); } @Test public void testSelectBestInterval_RealWorldScenario5_EdgeOfEquality() { // Requested = 10, existing = [9, 10, 11] Set existing = setOf(9L, 10L, 11L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(10, result, "Should select exact match (10s)"); } // === Boundary Tests === @Test public void testSelectBestInterval_OneSmallerOneEqual_ReturnsEqual() { Set existing = setOf(5L, 10L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(10, result, "Should prefer exact match over smaller"); } @Test public void testSelectBestInterval_OneEqualOneLarger_ReturnsEqual() { Set existing = setOf(10L, 15L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(10, result, "Should return exact match"); } @Test public void testSelectBestInterval_AllSmaller_ReturnsLargestOfSmaller() { Set existing = setOf(1L, 2L, 3L, 4L, 5L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(5, result, "Should return largest of all smaller intervals"); } @Test public void testSelectBestInterval_AllLarger_ReturnsSmallestOfLarger() { Set existing = setOf(20L, 30L, 40L, 50L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(20, result, "Should return smallest of all larger intervals (best fallback)"); } // === Safety Verification Tests === @Test public void testSelectBestInterval_NeverReturnsLongerWhenShorterExists() { // Verify that we NEVER select a longer interval when shorter exists Set existing = setOf(5L, 20L); long result = HeartbeatIntervalSelector.selectBestInterval(10, existing); assertEquals(5, result, "Must select 5s (shorter/safer), never 20s (longer/risky)"); } @Test public void testSelectBestInterval_AlwaysReturnsValidInterval() { // Result should always be one of the existing intervals Set existing = setOf(5L, 10L, 15L); long result = HeartbeatIntervalSelector.selectBestInterval(12, existing); assertTrue(existing.contains(result), "Result must be from existing intervals"); } @Test public void testSelectBestInterval_ConsistentResults() { // Same inputs should always produce same output Set existing = setOf(5L, 10L, 15L, 20L); long result1 = HeartbeatIntervalSelector.selectBestInterval(12, existing); long result2 = HeartbeatIntervalSelector.selectBestInterval(12, existing); long result3 = HeartbeatIntervalSelector.selectBestInterval(12, existing); assertEquals(result1, result2, "Results should be consistent"); assertEquals(result2, result3, "Results should be consistent"); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/HeartbeatRegistryTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.sql.SQLException; import java.time.Clock; import java.time.Duration; import java.time.Instant; import java.time.ZoneId; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CORE) public class HeartbeatRegistryTest { private ScheduledExecutorService mockExecutor; private ManualClock manualClock; private HeartbeatRegistry registry; private SFSession mockSession1; private SFSession mockSession2; private SFSession mockSession3; @BeforeEach public void setUp() { mockExecutor = mock(ScheduledExecutorService.class); manualClock = new ManualClock(Instant.ofEpochSecond(1000000)); // Mock schedule to return a future @SuppressWarnings("unchecked") ScheduledFuture mockFuture = mock(ScheduledFuture.class); when(mockExecutor.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) .thenAnswer(invocation -> mockFuture); when(mockExecutor.isShutdown()).thenReturn(false); registry = new HeartbeatRegistry(mockExecutor, manualClock); mockSession1 = mock(SFSession.class); mockSession2 = mock(SFSession.class); mockSession3 = mock(SFSession.class); when(mockSession1.getSessionId()).thenReturn("session-1"); when(mockSession2.getSessionId()).thenReturn("session-2"); when(mockSession3.getSessionId()).thenReturn("session-3"); } @AfterEach public void tearDown() { if (registry != null) { registry.shutdown(); } } @Test public void testAddSession_SingleSession() { registry.addSession(mockSession1, 60, 10); assertEquals(1, registry.getActiveThreadCount()); assertEquals(1, registry.getSessionCountForInterval(10)); } @Test public void testAddSession_MultipleSessionsSameInterval_NoReschedule() { registry.addSession(mockSession1, 60, 10); registry.addSession(mockSession2, 60, 10); // Only ONE thread should exist assertEquals(1, registry.getActiveThreadCount()); assertEquals(2, registry.getSessionCountForInterval(10)); // Verify scheduler called only ONCE (no reschedule) verify(mockExecutor, times(1)).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); } @Test public void testAddSession_MultipleSessionsDifferentIntervals_Independent() { // Session 1: 60s validity, 10s heartbeat (interval = 10s) registry.addSession(mockSession1, 60, 10); // Session 2: 4h validity, 1h heartbeat (interval = 3600s) registry.addSession(mockSession2, 14400, 3600); // TWO independent threads should exist assertEquals(2, registry.getActiveThreadCount()); assertEquals(1, registry.getSessionCountForInterval(10)); assertEquals(1, registry.getSessionCountForInterval(3600)); } @Test public void testAddSession_CalculatesIntervalCorrectly() { // Requested 100s, but validity/4 = 60/4 = 15s registry.addSession(mockSession1, 60, 100); // Should use 15s (minimum of 100 and 60/4) assertEquals(1, registry.getActiveThreadCount()); assertEquals(1, registry.getSessionCountForInterval(15)); } @Test public void testAddSession_NullSession_ThrowsException() { assertThrows(IllegalArgumentException.class, () -> registry.addSession(null, 60, 10)); } @Test public void testAddSession_InvalidValidity_ThrowsException() { assertThrows(IllegalArgumentException.class, () -> registry.addSession(mockSession1, 0, 10)); assertThrows(IllegalArgumentException.class, () -> registry.addSession(mockSession1, -60, 10)); } @Test public void testAddSession_InvalidFrequency_ThrowsException() { assertThrows(IllegalArgumentException.class, () -> registry.addSession(mockSession1, 60, 0)); assertThrows(IllegalArgumentException.class, () -> registry.addSession(mockSession1, 60, -10)); } @Test public void testRemoveSession_RemovesFromCorrectThread() { registry.addSession(mockSession1, 60, 10); registry.addSession(mockSession2, 60, 10); assertEquals(2, registry.getSessionCountForInterval(10)); registry.removeSession(mockSession1); assertEquals(1, registry.getSessionCountForInterval(10)); assertEquals(1, registry.getActiveThreadCount()); } @Test public void testRemoveSession_CleansUpEmptyThread() { registry.addSession(mockSession1, 60, 10); assertEquals(1, registry.getActiveThreadCount()); registry.removeSession(mockSession1); // Thread should be cleaned up assertEquals(0, registry.getActiveThreadCount()); assertEquals(0, registry.getSessionCountForInterval(10)); } @Test public void testRemoveSession_DoesNotAffectOtherThreads() { registry.addSession(mockSession1, 60, 10); registry.addSession(mockSession2, 14400, 3600); assertEquals(2, registry.getActiveThreadCount()); registry.removeSession(mockSession1); // Thread for interval 10s should be gone assertEquals(1, registry.getActiveThreadCount()); assertEquals(0, registry.getSessionCountForInterval(10)); // Thread for interval 3600s should still exist assertEquals(1, registry.getSessionCountForInterval(3600)); } @Test public void testRemoveSession_NonExistentSession_DoesNotThrow() { assertDoesNotThrow(() -> registry.removeSession(mockSession1)); } @Test public void testRemoveSession_NullSession_DoesNotThrow() { assertDoesNotThrow(() -> registry.removeSession(null)); } @Test public void testCriticalBug_ShortSessionNotExpiredByLongSession() throws Exception { // This tests the fix for the critical bug // Session A: 60s validity, 10s heartbeat (interval = min(10, 60/4) = 10s) registry.addSession(mockSession1, 60, 10); // Session B: 4h validity, 1h heartbeat (interval = min(3600, 14400/4) = 3600s) registry.addSession(mockSession2, 14400, 3600); // Verify TWO independent threads exist assertEquals(2, registry.getActiveThreadCount()); // Advance time by 65 seconds (past session1's validity) manualClock.advance(Duration.ofSeconds(65)); // Clear previous invocations clearInvocations(mockSession1, mockSession2); // Trigger 10s heartbeat (for short-lived session) registry.triggerHeartbeatForInterval(10); // Verify: Session1 got heartbeat (NOT expired) try { verify(mockSession1, times(1)).heartbeat(); // Verify: Session2 NOT heartbeated yet (its interval is 3600s) verify(mockSession2, never()).heartbeat(); } catch (SFException | SQLException e) { fail("Verification should not throw: " + e.getMessage()); } } @Test public void testTriggerHeartbeatForInterval_OnlyAffectsSpecifiedInterval() throws Exception { registry.addSession(mockSession1, 60, 10); registry.addSession(mockSession2, 14400, 3600); // Trigger only 10s interval registry.triggerHeartbeatForInterval(10); try { verify(mockSession1, times(1)).heartbeat(); verify(mockSession2, never()).heartbeat(); } catch (SFException | SQLException e) { fail("Verification should not throw: " + e.getMessage()); } // Trigger only 3600s interval clearInvocations(mockSession1, mockSession2); registry.triggerHeartbeatForInterval(3600); try { verify(mockSession1, never()).heartbeat(); verify(mockSession2, times(1)).heartbeat(); } catch (SFException | SQLException e) { fail("Verification should not throw: " + e.getMessage()); } } @Test public void testTriggerHeartbeatForInterval_NonExistentInterval_DoesNotThrow() { assertDoesNotThrow(() -> registry.triggerHeartbeatForInterval(999)); } @Test public void testShutdown_CleansUpAllThreads() { registry.addSession(mockSession1, 60, 10); registry.addSession(mockSession2, 14400, 3600); assertEquals(2, registry.getActiveThreadCount()); registry.shutdown(); assertEquals(0, registry.getActiveThreadCount()); verify(mockExecutor, times(1)).shutdown(); } /** Manual clock implementation for testing. */ private static class ManualClock extends Clock { private Instant currentTime; ManualClock(Instant startTime) { this.currentTime = startTime; } void advance(Duration duration) { currentTime = currentTime.plus(duration); } @Override public ZoneId getZone() { return ZoneId.systemDefault(); } @Override public Clock withZone(ZoneId zone) { return this; } @Override public Instant instant() { return currentTime; } @Override public long millis() { return currentTime.toEpochMilli(); } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/HeartbeatThreadTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.sql.SQLException; import java.time.Clock; import java.time.Instant; import java.time.ZoneId; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import net.snowflake.client.category.TestTags; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; @Tag(TestTags.CORE) public class HeartbeatThreadTest { private ScheduledExecutorService mockExecutor; private Clock fixedClock; private SFSession mockSession1; private SFSession mockSession2; @BeforeEach public void setUp() { mockExecutor = mock(ScheduledExecutorService.class); fixedClock = Clock.fixed(Instant.ofEpochSecond(1000000), ZoneId.systemDefault()); mockSession1 = mock(SFSession.class); mockSession2 = mock(SFSession.class); when(mockSession1.getSessionId()).thenReturn("session-1"); when(mockSession2.getSessionId()).thenReturn("session-2"); // Mock schedule to return a future @SuppressWarnings("unchecked") ScheduledFuture mockFuture = mock(ScheduledFuture.class); when(mockExecutor.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) .thenAnswer(invocation -> mockFuture); } @Test public void testConstructor_ValidParameters() { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); assertEquals(10, thread.getIntervalSeconds()); assertEquals(0, thread.getSessionCount()); assertTrue(thread.isEmpty()); } @Test public void testConstructor_InvalidInterval() { assertThrows( IllegalArgumentException.class, () -> new HeartbeatThread(0, mockExecutor, fixedClock), "Should reject zero interval"); assertThrows( IllegalArgumentException.class, () -> new HeartbeatThread(-10, mockExecutor, fixedClock), "Should reject negative interval"); } @Test public void testConstructor_NullExecutor() { assertThrows( IllegalArgumentException.class, () -> new HeartbeatThread(10, null, fixedClock), "Should reject null executor"); } @Test public void testConstructor_NullClock() { assertThrows( IllegalArgumentException.class, () -> new HeartbeatThread(10, mockExecutor, null), "Should reject null clock"); } @Test public void testAddSession_FirstSession_StartsThread() { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); thread.addSession(mockSession1); assertEquals(1, thread.getSessionCount()); assertFalse(thread.isEmpty()); // Verify scheduler was called to start heartbeat verify(mockExecutor, times(1)).schedule(any(Runnable.class), anyLong(), eq(TimeUnit.SECONDS)); } @Test public void testAddSession_MultipleSessionsSameThread_NoReschedule() { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); thread.addSession(mockSession1); thread.addSession(mockSession2); assertEquals(2, thread.getSessionCount()); // Verify scheduler was called only ONCE (no reschedule on second add) verify(mockExecutor, times(1)).schedule(any(Runnable.class), anyLong(), eq(TimeUnit.SECONDS)); } @Test public void testRemoveSession() { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); thread.addSession(mockSession1); thread.addSession(mockSession2); assertEquals(2, thread.getSessionCount()); thread.removeSession(mockSession1); assertEquals(1, thread.getSessionCount()); assertFalse(thread.isEmpty()); thread.removeSession(mockSession2); assertEquals(0, thread.getSessionCount()); assertTrue(thread.isEmpty()); } @Test public void testShutdown() { @SuppressWarnings("unchecked") ScheduledFuture mockFuture = mock(ScheduledFuture.class); when(mockExecutor.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) .thenAnswer(invocation -> mockFuture); HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); thread.addSession(mockSession1); thread.shutdown(); // Verify future was cancelled verify(mockFuture, times(1)).cancel(false); // Thread should be empty after shutdown assertEquals(0, thread.getSessionCount()); assertTrue(thread.isEmpty()); } @Test public void testRun_HeartbeatsAllSessions() throws Exception { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); thread.addSession(mockSession1); thread.addSession(mockSession2); // Trigger heartbeat manually thread.triggerHeartbeatNow(); // Verify both sessions were heartbeated try { verify(mockSession1, times(1)).heartbeat(); verify(mockSession2, times(1)).heartbeat(); } catch (SFException | SQLException e) { fail("Verification should not throw: " + e.getMessage()); } } @Test public void testRun_ContinuesOnException() throws Exception { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); // Make session1 throw exception try { doThrow(new RuntimeException("Heartbeat failed")).when(mockSession1).heartbeat(); } catch (SFException | SQLException e) { fail("Stubbing should not throw: " + e.getMessage()); } thread.addSession(mockSession1); thread.addSession(mockSession2); // Trigger heartbeat - should not throw thread.triggerHeartbeatNow(); // Verify both sessions were called (session1 threw, session2 succeeded) try { verify(mockSession1, times(1)).heartbeat(); verify(mockSession2, times(1)).heartbeat(); } catch (SFException | SQLException e) { fail("Verification should not throw: " + e.getMessage()); } } @Test public void testRun_ReschedulesIfSessionsRemain() throws Exception { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); thread.addSession(mockSession1); // Clear previous invocations clearInvocations(mockExecutor); // Run heartbeat thread.triggerHeartbeatNow(); // Should schedule next heartbeat since session still exists verify(mockExecutor, times(1)).schedule(any(Runnable.class), anyLong(), eq(TimeUnit.SECONDS)); } @Test public void testRun_DoesNotRescheduleAfterShutdown() { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); thread.addSession(mockSession1); thread.shutdown(); clearInvocations(mockExecutor); // Try to run - should not reschedule thread.run(); verify(mockExecutor, never()).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); } @Test public void testScheduleHeartbeat_CalculatesCorrectDelay() { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); thread.addSession(mockSession1); ArgumentCaptor delayCaptor = ArgumentCaptor.forClass(Long.class); verify(mockExecutor).schedule(any(Runnable.class), delayCaptor.capture(), eq(TimeUnit.SECONDS)); long delay = delayCaptor.getValue(); assertTrue(delay >= 0, "Delay should be non-negative"); assertTrue(delay <= 10, "Delay should not exceed interval"); } @Test public void testAddSession_AfterShutdown_ReturnsFalse() { HeartbeatThread thread = new HeartbeatThread(10, mockExecutor, fixedClock); thread.shutdown(); // Try to add session after shutdown - should return false assertFalse(thread.addSession(mockSession1)); assertEquals(0, thread.getSessionCount()); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/HttpUtilLatestIT.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import java.io.IOException; import java.net.SocketTimeoutException; import java.time.Duration; import net.snowflake.client.category.TestTags; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; import org.hamcrest.CoreMatchers; import org.hamcrest.MatcherAssert; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @Tag(TestTags.CORE) public class HttpUtilLatestIT { private static final String HANG_WEBSERVER_ADDRESS = "http://localhost:12345/hang"; @BeforeEach public void resetHttpClientsCache() { HttpUtil.httpClient.clear(); } @AfterEach public void resetHttpTimeouts() { HttpUtil.setConnectionTimeout(60000); HttpUtil.setSocketTimeout(300000); } /** Added in > 3.14.5 */ @Test public void shouldGetDefaultConnectionAndSocketTimeouts() { assertEquals(Duration.ofMillis(60_000), HttpUtil.getConnectionTimeout()); assertEquals(Duration.ofMillis(300_000), HttpUtil.getSocketTimeout()); } /** Added in > 3.14.5 */ @Test @Timeout(1) public void shouldOverrideConnectionAndSocketTimeouts() { // it's hard to test connection timeout so there is only a test for socket timeout HttpUtil.setConnectionTimeout(100); HttpUtil.setSocketTimeout(200); CloseableHttpClient httpClient = HttpUtil.getHttpClient(new HttpClientSettingsKey(OCSPMode.INSECURE)); IOException e = assertThrows( IOException.class, () -> { httpClient.execute(new HttpGet(HANG_WEBSERVER_ADDRESS)); }); MatcherAssert.assertThat(e, CoreMatchers.instanceOf(SocketTimeoutException.class)); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/HttpUtilTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.fail; import java.lang.reflect.Field; import java.util.AbstractMap; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import javax.net.ssl.TrustManager; import net.snowflake.client.internal.core.crl.CertRevocationCheckMode; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.Configurable; import org.apache.http.impl.client.CloseableHttpClient; import org.hamcrest.CoreMatchers; import org.hamcrest.Matcher; import org.hamcrest.MatcherAssert; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; public class HttpUtilTest { /** * Test based on reported * issue in SNOW-1898533 */ @Test public void buildHttpClientRace() throws InterruptedException { HttpUtil.httpClient.clear(); // start two threads but only need one to fail CountDownLatch latch = new CountDownLatch(1); final Queue> failures = new ConcurrentLinkedQueue<>(); final HttpClientSettingsKey noProxyKey = new HttpClientSettingsKey(null); final HttpClientSettingsKey proxyKey = new HttpClientSettingsKey( null, "some.proxy.host", 8080, null, null, null, "http", null, false); Thread noProxyThread = new Thread(() -> verifyProxyUsage(noProxyKey, failures, latch), "noProxyThread"); noProxyThread.start(); Thread withProxyThread = new Thread(() -> verifyProxyUsage(proxyKey, failures, latch), "withProxyThread"); withProxyThread.start(); // if latch goes to zero, then one of the threads failed // if await times out (returns false), then neither thread has failed (both still running) boolean failed = latch.await(1, TimeUnit.SECONDS); noProxyThread.interrupt(); withProxyThread.interrupt(); if (failed) { AbstractMap.SimpleEntry failure = failures.remove(); fail(failure.getKey().getName() + " failed", failure.getValue()); } } @Test void testConfigureTrustManagerIfNeededWithNullKey() { TrustManager[] result = HttpUtil.configureTrustManagerIfNeeded(null, null); assertNull(result); } @Test void testConfigureTrustManagerIfNeededWithOcspDisabledAndNoCrlChecks() { HttpClientSettingsKey key = new HttpClientSettingsKey(OCSPMode.DISABLE_OCSP_CHECKS); TrustManager[] result = HttpUtil.configureTrustManagerIfNeeded(key, null); assertNull(result); } @ParameterizedTest @EnumSource( value = OCSPMode.class, names = {"DISABLE_OCSP_CHECKS"}, mode = EnumSource.Mode.EXCLUDE) void testConfigureTrustManagerIfNeededForOcsp(OCSPMode mode) { HttpClientSettingsKey key = new HttpClientSettingsKey(mode); TrustManager[] result = HttpUtil.configureTrustManagerIfNeeded(key, null); assertNotNull(result); assertInstanceOf(SFTrustManager.class, result[0]); } @ParameterizedTest @EnumSource( value = CertRevocationCheckMode.class, names = {"DISABLED"}, mode = EnumSource.Mode.EXCLUDE) void testConfigureTrustManagerIfNeededWithCrlModes(CertRevocationCheckMode mode) { HttpClientSettingsKey key = new HttpClientSettingsKey(OCSPMode.DISABLE_OCSP_CHECKS); key.setRevocationCheckMode(mode); TrustManager[] result = HttpUtil.configureTrustManagerIfNeeded(key, null); assertNotNull(result); assertInstanceOf(SFExtendedCrlTrustManager.class, result[0]); } @Test void testConfigureTrustManagerWithOcspAndCrlDisabled() { HttpClientSettingsKey key = new HttpClientSettingsKey(OCSPMode.DISABLE_OCSP_CHECKS); key.setRevocationCheckMode(CertRevocationCheckMode.DISABLED); TrustManager[] result = HttpUtil.configureTrustManagerIfNeeded(key, null); assertNull(result); } private static void verifyProxyUsage( HttpClientSettingsKey key, Queue> failures, CountDownLatch latch) { while (!Thread.currentThread().isInterrupted()) { try (CloseableHttpClient client = HttpUtil.buildHttpClient(key, null, false)) { assertHttpClientUsesProxy(client, key.usesProxy()); } catch (Throwable e) { failures.add(new AbstractMap.SimpleEntry<>(Thread.currentThread(), e)); latch.countDown(); break; } } } private static void assertHttpClientUsesProxy(CloseableHttpClient client, boolean proxyUsed) { assertRequestConfigWithoutProxyConfig(client); assertRoutePlannerOverridden(client, proxyUsed); } private static void assertRequestConfigWithoutProxyConfig(CloseableHttpClient client) { MatcherAssert.assertThat(client, CoreMatchers.instanceOf(Configurable.class)); Configurable c = (Configurable) client; RequestConfig config = c.getConfig(); assertNull(config.getProxy(), "request config has configured proxy"); } private static void assertRoutePlannerOverridden(CloseableHttpClient client, boolean proxyUsed) { try { // HTTP client does not provide information about proxy settings so to detect that we are // using proxy we have to look inside via reflection and if the route planner is overridden to // our proxy class Field routePlannerField = client.getClass().getDeclaredField("routePlanner"); routePlannerField.setAccessible(true); Matcher snowflakeProxyPlannerClassMatcher = CoreMatchers.instanceOf(SnowflakeMutableProxyRoutePlanner.class); MatcherAssert.assertThat( routePlannerField.get(client), proxyUsed ? snowflakeProxyPlannerClassMatcher : CoreMatchers.not(snowflakeProxyPlannerClassMatcher)); } catch (NoSuchFieldException | IllegalAccessException e) { throw new RuntimeException(e); } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/HttpUtilWiremockLatestIT.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import net.snowflake.client.api.http.HttpHeadersCustomizer; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.jdbc.BaseWiremockTest; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.util.EntityUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CORE) public class HttpUtilWiremockLatestIT extends BaseWiremockTest { private static final String SUCCESS_AT_THIRD_RETRY_ECHOING_HEADERS_SENT = "{\n" + " \"mappings\": [\n" + " {\n" + " \"scenarioName\": \"Retry Scenario Example\",\n" + " \"requiredScenarioState\": \"Started\",\n" + " \"newScenarioState\": \"First Attempt Failed\",\n" + " \"request\": {\n" + " \"method\": \"GET\",\n" + " \"urlPath\": \"/echo-headers\"\n" + " },\n" + " \"response\": {\n" + " \"fault\": \"EMPTY_RESPONSE\"\n" + " }\n" + " },\n" + " {\n" + " \"scenarioName\": \"Retry Scenario Example\",\n" + " \"requiredScenarioState\": \"First Attempt Failed\",\n" + " \"newScenarioState\": \"Second Attempt Failed\",\n" + " \"request\": {\n" + " \"method\": \"GET\",\n" + " \"urlPath\": \"/echo-headers\"\n" + " },\n" + " \"response\": {\n" + " \"fault\": \"EMPTY_RESPONSE\"\n" + " }\n" + " },\n" + " {\n" + " \"scenarioName\": \"Retry Scenario Example\",\n" + " \"requiredScenarioState\": \"Second Attempt Failed\",\n" + " \"request\": {\n" + " \"method\": \"GET\",\n" + " \"urlPath\": \"/echo-headers\"\n" + " },\n" + " \"response\": {\n" + " \"status\": 200,\n" + " \"headers\": {\n" + " \"Content-Type\": \"application/json\"\n" + " },\n" + " \"body\": \"{{request.headers}}\",\n" + " \"transformers\": [\n" + " \"response-template\"\n" + " ]\n" + " }\n" + " }\n" + " ]\n" + "}\n"; @AfterEach public void resetHttpClients() { HttpUtil.httpClient.clear(); } @Test public void testAddHttpInterceptorsIfPresent() throws IOException { importMapping(SUCCESS_AT_THIRD_RETRY_ECHOING_HEADERS_SENT); AtomicInteger invocations = new AtomicInteger(); HttpHeadersCustomizer customizer = new HttpHeadersCustomizer() { @Override public boolean applies( String method, String uri, Map> currentHeaders) { return true; } @Override public Map> newHeaders() { invocations.incrementAndGet(); Map> stringListMap = new java.util.HashMap<>(); stringListMap.put("test-header", Collections.singletonList("test-header-value")); return stringListMap; } @Override public boolean invokeOnce() { return false; } }; try (CloseableHttpClient httpClient = HttpUtil.buildHttpClient( new HttpClientSettingsKey(OCSPMode.DISABLE_OCSP_CHECKS), null, false, Collections.singletonList(customizer))) { CloseableHttpResponse response = httpClient.execute( new HttpGet( String.format("http://%s:%d/echo-headers", WIREMOCK_HOST, wiremockHttpPort))); String content = EntityUtils.toString(response.getEntity()); assertHttpHeadersAdded(content); assertCustomizerInvokedForEachRetry(3, invocations.get()); } } private void assertHttpHeadersAdded(String responseBody) { assertTrue(responseBody.contains("test-header-value")); } private void assertCustomizerInvokedForEachRetry(int expectedInvocations, int actualInvocations) { assertEquals(expectedInvocations, actualInvocations); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/MinicoreTelemetryWiremockIT.java ================================================ package net.snowflake.client.internal.core; import static org.awaitility.Awaitility.await; import java.time.Duration; import java.util.HashMap; import java.util.Map; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.core.minicore.Minicore; import net.snowflake.client.internal.jdbc.BaseWiremockTest; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CORE) public class MinicoreTelemetryWiremockIT extends BaseWiremockTest { private static final String LOGIN_MAPPING_PATH = "/wiremock/mappings/minicore/minicore_telemetry.json"; private final String WIREMOCK_HOST_WITH_HTTPS_AND_PORT = "https://" + WIREMOCK_HOST + ":" + wiremockHttpsPort; @Test public void testMinicoreTelemetryIncludedInLoginRequest() throws Exception, SFException { Minicore.initializeAsync(); await().atMost(Duration.ofSeconds(5)).until(() -> Minicore.getInstance() != null); importMappingFromResources(LOGIN_MAPPING_PATH); setCustomTrustStorePropertyPath(); SFLoginInput loginInput = createLoginInput(); Map connectionPropertiesMap = new HashMap<>(); connectionPropertiesMap.put(SFSessionProperty.TRACING, "ALL"); SessionUtil.openSession(loginInput, connectionPropertiesMap, "ALL"); verifyRequestCount(1, "/session/v1/login-request.*"); } private SFLoginInput createLoginInput() { SFLoginInput input = new SFLoginInput(); input.setServerUrl(WIREMOCK_HOST_WITH_HTTPS_AND_PORT); input.setUserName("TEST_USER"); input.setPassword("TEST_PASSWORD"); input.setAccountName("TEST_ACCOUNT"); input.setAppId("TEST_APP_ID"); input.setOCSPMode(OCSPMode.FAIL_OPEN); input.setHttpClientSettingsKey(new HttpClientSettingsKey(OCSPMode.FAIL_OPEN)); input.setLoginTimeout(30); input.setSessionParameters(new HashMap<>()); return input; } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/OAuthAuthorizationCodeFlowLatestIT.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.core.SessionUtilExternalBrowser.AuthExternalBrowserHandlers; import java.net.URI; import java.time.Duration; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.core.auth.oauth.AccessTokenProvider; import net.snowflake.client.internal.core.auth.oauth.OAuthAuthorizationCodeAccessTokenProvider; import net.snowflake.client.internal.core.auth.oauth.StateProvider; import net.snowflake.client.internal.core.auth.oauth.TokenResponseDTO; import net.snowflake.client.internal.jdbc.BaseWiremockTest; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CORE) public class OAuthAuthorizationCodeFlowLatestIT extends BaseWiremockTest { private static final String SCENARIOS_BASE_DIR = MAPPINGS_BASE_DIR + "/oauth/authorization_code"; private static final String SUCCESSFUL_FLOW_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/successful_flow.json"; private static final String SUCCESSFUL_DPOP_FLOW_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/successful_dpop_flow.json"; private static final String SUCCESSFUL_FLOW_WITH_SINGLE_USE_REFRESH_TOKENS_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/successful_flow_with_single_use_refresh_tokens.json"; private static final String DPOP_NONCE_ERROR_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/dpop_nonce_error_flow.json"; private static final String BROWSER_TIMEOUT_SCENARIO_MAPPING = SCENARIOS_BASE_DIR + "/browser_timeout_authorization_error.json"; private static final String INVALID_SCOPE_SCENARIO_MAPPING = SCENARIOS_BASE_DIR + "/invalid_scope_error.json"; private static final String INVALID_STATE_SCENARIO_MAPPING = SCENARIOS_BASE_DIR + "/invalid_state_error.json"; private static final String TOKEN_REQUEST_ERROR_SCENARIO_MAPPING = SCENARIOS_BASE_DIR + "/token_request_error.json"; private static final String CUSTOM_URLS_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/external_idp_custom_urls.json"; private static final SFLogger logger = SFLoggerFactory.getLogger(OAuthAuthorizationCodeFlowLatestIT.class); private final AuthExternalBrowserHandlers wiremockProxyRequestBrowserHandler = new WiremockProxyRequestBrowserHandler(); private final AccessTokenProvider provider = new OAuthAuthorizationCodeAccessTokenProvider( wiremockProxyRequestBrowserHandler, new MockStateProvider(), 30); public OAuthAuthorizationCodeFlowLatestIT() throws SFException {} @Test public void successfulFlowScenario() throws SFException { importMappingFromResources(SUCCESSFUL_FLOW_SCENARIO_MAPPINGS); SFLoginInput loginInput = createLoginInputStub("http://localhost:8009/snowflake/oauth-redirect", null, null, false); TokenResponseDTO tokenResponse = provider.getAccessToken(loginInput); String accessToken = tokenResponse.getAccessToken(); Assertions.assertFalse(SnowflakeUtil.isNullOrEmpty(accessToken)); Assertions.assertEquals("access-token-123", accessToken); } @Test public void successfulFlowDPoPScenario() throws SFException { importMappingFromResources(SUCCESSFUL_DPOP_FLOW_SCENARIO_MAPPINGS); SFLoginInput loginInput = createLoginInputStubWithDPoPEnabled( "http://localhost:8012/snowflake/oauth-redirect", null, null); TokenResponseDTO tokenResponse = provider.getAccessToken(loginInput); String accessToken = tokenResponse.getAccessToken(); Assertions.assertFalse(SnowflakeUtil.isNullOrEmpty(accessToken)); Assertions.assertEquals("access-token-123", accessToken); } @Test public void successfulFlowDPoPScenarioWithNonce() throws SFException { importMappingFromResources(DPOP_NONCE_ERROR_SCENARIO_MAPPINGS); SFLoginInput loginInput = createLoginInputStubWithDPoPEnabled( "http://localhost:8013/snowflake/oauth-redirect", null, null); TokenResponseDTO tokenResponse = provider.getAccessToken(loginInput); String accessToken = tokenResponse.getAccessToken(); Assertions.assertFalse(SnowflakeUtil.isNullOrEmpty(accessToken)); Assertions.assertEquals("access-token-123", accessToken); } @Test public void successfulFlowWithSingleUseRefreshTokensScenario() throws SFException { importMappingFromResources(SUCCESSFUL_FLOW_WITH_SINGLE_USE_REFRESH_TOKENS_SCENARIO_MAPPINGS); SFLoginInput loginInput = createLoginInputStub("http://localhost:8009/snowflake/oauth-redirect", null, null, true); TokenResponseDTO tokenResponse = provider.getAccessToken(loginInput); String accessToken = tokenResponse.getAccessToken(); String refreshToken = tokenResponse.getRefreshToken(); Assertions.assertFalse(SnowflakeUtil.isNullOrEmpty(accessToken)); Assertions.assertFalse(SnowflakeUtil.isNullOrEmpty(refreshToken)); Assertions.assertEquals("access-token-123", accessToken); Assertions.assertEquals("refresh-token-123", refreshToken); } @Test public void customUrlsScenario() throws SFException { importMappingFromResources(CUSTOM_URLS_SCENARIO_MAPPINGS); SFLoginInput loginInput = createLoginInputStub( "http://localhost:8007/snowflake/oauth-redirect", String.format("http://%s:%d/authorization", WIREMOCK_HOST, wiremockHttpPort), String.format("http://%s:%d/tokenrequest", WIREMOCK_HOST, wiremockHttpPort), false); TokenResponseDTO tokenResponse = provider.getAccessToken(loginInput); String accessToken = tokenResponse.getAccessToken(); Assertions.assertFalse(SnowflakeUtil.isNullOrEmpty(accessToken)); Assertions.assertEquals("access-token-123", accessToken); } @Test public void browserTimeoutFlowScenario() throws SFException { importMappingFromResources(BROWSER_TIMEOUT_SCENARIO_MAPPING); SFLoginInput loginInput = createLoginInputStub("http://localhost:8004/snowflake/oauth-redirect", null, null, false); AccessTokenProvider provider = new OAuthAuthorizationCodeAccessTokenProvider( wiremockProxyRequestBrowserHandler, new MockStateProvider(), 1); SFException e = Assertions.assertThrows(SFException.class, () -> provider.getAccessToken(loginInput)); Assertions.assertTrue( e.getMessage() .contains( "Authorization request timed out. Snowflake driver did not receive authorization code back to the redirect URI. Verify your security integration and driver configuration.")); } @Test public void invalidScopeFlowScenario() { importMappingFromResources(INVALID_SCOPE_SCENARIO_MAPPING); SFLoginInput loginInput = createLoginInputStub("http://localhost:8002/snowflake/oauth-redirect", null, null, false); SFException e = Assertions.assertThrows(SFException.class, () -> provider.getAccessToken(loginInput)); Assertions.assertTrue( e.getMessage() .contains( "Error during authorization: invalid_scope, One or more scopes are not configured for the authorization server resource.")); } @Test public void invalidStateFlowScenario() { importMappingFromResources(INVALID_STATE_SCENARIO_MAPPING); SFLoginInput loginInput = createLoginInputStub("http://localhost:8010/snowflake/oauth-redirect", null, null, false); SFException e = Assertions.assertThrows(SFException.class, () -> provider.getAccessToken(loginInput)); Assertions.assertTrue( e.getMessage() .contains( "Error during OAuth Authorization Code authentication: Invalid authorization request redirection state: invalidstate, expected: abc123")); } @Test public void tokenRequestErrorFlowScenario() { importMappingFromResources(TOKEN_REQUEST_ERROR_SCENARIO_MAPPING); SFLoginInput loginInput = createLoginInputStub("http://localhost:8003/snowflake/oauth-redirect", null, null, false); SFException e = Assertions.assertThrows(SFException.class, () -> provider.getAccessToken(loginInput)); Assertions.assertTrue( e.getMessage() .contains("JDBC driver encountered communication error. Message: HTTP status=400")); } private SFLoginInput createLoginInputStub( String redirectUri, String authorizationUrl, String tokenRequestUrl, boolean enableSingleUseRefreshTokens) { SFLoginInput loginInputStub = new SFLoginInput(); loginInputStub.setServerUrl(String.format("http://%s:%d/", WIREMOCK_HOST, wiremockHttpPort)); loginInputStub.setOauthLoginInput( new SFOauthLoginInput( "123", "123", redirectUri, authorizationUrl, tokenRequestUrl, "session:role:ANALYST", enableSingleUseRefreshTokens)); loginInputStub.setSocketTimeout(Duration.ofMinutes(5)); loginInputStub.setHttpClientSettingsKey(new HttpClientSettingsKey(OCSPMode.FAIL_OPEN)); return loginInputStub; } private SFLoginInput createLoginInputStubWithDPoPEnabled( String redirectUri, String authorizationUrl, String tokenRequestUrl) { SFLoginInput loginInputStub = createLoginInputStub(redirectUri, authorizationUrl, tokenRequestUrl, false); loginInputStub.setDPoPEnabled(true); return loginInputStub; } static class WiremockProxyRequestBrowserHandler implements AuthExternalBrowserHandlers { @Override public HttpPost build(URI uri) { // do nothing return null; } @Override public void openBrowser(String ssoUrl) { try (CloseableHttpClient client = HttpClients.createDefault()) { logger.debug("executing browser request to redirect uri: {}", ssoUrl); HttpResponse response = client.execute(new HttpGet(ssoUrl)); if (response.getStatusLine().getStatusCode() != 200) { throw new RuntimeException("Invalid response from " + ssoUrl); } } catch (Exception e) { throw new RuntimeException(e); } } @Override public void output(String msg) { // do nothing } } static class MockStateProvider implements StateProvider { @Override public String getState() { return "abc123"; } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/OAuthClientCredentialsFlowLatestIT.java ================================================ package net.snowflake.client.internal.core; import java.time.Duration; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.core.auth.oauth.AccessTokenProvider; import net.snowflake.client.internal.core.auth.oauth.OAuthClientCredentialsAccessTokenProvider; import net.snowflake.client.internal.core.auth.oauth.TokenResponseDTO; import net.snowflake.client.internal.jdbc.BaseWiremockTest; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CORE) public class OAuthClientCredentialsFlowLatestIT extends BaseWiremockTest { private static final String SCENARIOS_BASE_DIR = MAPPINGS_BASE_DIR + "/oauth/client_credentials"; private static final String SUCCESSFUL_FLOW_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/successful_flow.json"; private static final String SUCCESSFUL_DPOP_FLOW_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/successful_dpop_flow.json"; private static final String DPOP_NONCE_ERROR_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/dpop_nonce_error_flow.json"; private static final String TOKEN_REQUEST_ERROR_SCENARIO_MAPPING = SCENARIOS_BASE_DIR + "/token_request_error.json"; @Test public void successfulFlowScenario() throws SFException { importMappingFromResources(SUCCESSFUL_FLOW_SCENARIO_MAPPINGS); SFLoginInput loginInput = createLoginInputStub(); AccessTokenProvider provider = new OAuthClientCredentialsAccessTokenProvider(); TokenResponseDTO tokenResponse = provider.getAccessToken(loginInput); String accessToken = tokenResponse.getAccessToken(); Assertions.assertFalse(SnowflakeUtil.isNullOrEmpty(accessToken)); Assertions.assertEquals("access-token-123", accessToken); } @Test public void successfulFlowScenarioDPoP() throws SFException { importMappingFromResources(SUCCESSFUL_DPOP_FLOW_SCENARIO_MAPPINGS); SFLoginInput loginInput = createLoginInputStubWithDPoPEnabled(); AccessTokenProvider provider = new OAuthClientCredentialsAccessTokenProvider(); TokenResponseDTO tokenResponse = provider.getAccessToken(loginInput); String accessToken = tokenResponse.getAccessToken(); Assertions.assertFalse(SnowflakeUtil.isNullOrEmpty(accessToken)); Assertions.assertEquals("access-token-123", accessToken); } @Test public void successfulFlowScenarioDPoPNonceError() throws SFException { importMappingFromResources(DPOP_NONCE_ERROR_SCENARIO_MAPPINGS); SFLoginInput loginInput = createLoginInputStubWithDPoPEnabled(); AccessTokenProvider provider = new OAuthClientCredentialsAccessTokenProvider(); TokenResponseDTO tokenResponse = provider.getAccessToken(loginInput); String accessToken = tokenResponse.getAccessToken(); Assertions.assertFalse(SnowflakeUtil.isNullOrEmpty(accessToken)); Assertions.assertEquals("access-token-123", accessToken); } @Test public void tokenRequestErrorFlowScenario() throws SFException { importMappingFromResources(TOKEN_REQUEST_ERROR_SCENARIO_MAPPING); SFLoginInput loginInput = createLoginInputStub(); AccessTokenProvider provider = new OAuthClientCredentialsAccessTokenProvider(); SFException e = Assertions.assertThrows(SFException.class, () -> provider.getAccessToken(loginInput)); Assertions.assertTrue( e.getMessage() .contains("JDBC driver encountered communication error. Message: HTTP status=400")); } private SFLoginInput createLoginInputStub() { SFLoginInput loginInputStub = new SFLoginInput(); loginInputStub.setServerUrl(String.format("http://%s:%d/", WIREMOCK_HOST, wiremockHttpPort)); loginInputStub.setOauthLoginInput( new SFOauthLoginInput( "123", "123", null, null, String.format("http://%s:%d/oauth/token-request", WIREMOCK_HOST, wiremockHttpPort), "session:role:ANALYST")); loginInputStub.setSocketTimeout(Duration.ofMinutes(5)); loginInputStub.setHttpClientSettingsKey(new HttpClientSettingsKey(OCSPMode.FAIL_OPEN)); return loginInputStub; } private SFLoginInput createLoginInputStubWithDPoPEnabled() { SFLoginInput loginInputStub = createLoginInputStub(); loginInputStub.setDPoPEnabled(true); return loginInputStub; } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/OAuthLegacyFlowLatestIT.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import java.time.Duration; import java.util.HashMap; import net.snowflake.client.api.auth.AuthenticatorType; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.jdbc.BaseWiremockTest; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.AUTHENTICATION) public class OAuthLegacyFlowLatestIT extends BaseWiremockTest { private static final String SCENARIOS_BASE_DIR = MAPPINGS_BASE_DIR + "/oauth/legacy_oauth"; private static final String EXPIRED_TOKEN_SCENARIO = SCENARIOS_BASE_DIR + "/token_expired.json"; @Test public void shouldThrowExpirationExceptionUponExpiredTokenResponse() { SFLoginInput loginInput = createLoginInputStub(); importMappingFromResources(EXPIRED_TOKEN_SCENARIO); SnowflakeSQLException e = assertThrows( SnowflakeSQLException.class, () -> SessionUtil.openSession(loginInput, new HashMap<>(), "INFO")); assertTrue( e.getMessage().contains("OAuth access token expired. [1172527951366]"), "Expected expiration error message, but got: " + e.getMessage()); } private SFLoginInput createLoginInputStub() { SFLoginInput input = new SFLoginInput(); input.setAuthenticator(AuthenticatorType.OAUTH.name()); input.setOriginalAuthenticator(AuthenticatorType.OAUTH.name()); input.setServerUrl(String.format("http://%s:%d/", WIREMOCK_HOST, wiremockHttpPort)); input.setUserName("MOCK_USERNAME"); input.setAccountName("MOCK_ACCOUNT_NAME"); input.setAppId("MOCK_APP_ID"); input.setAppVersion("MOCK_APP_VERSION"); input.setToken("expired-access-token-123"); input.setOCSPMode(OCSPMode.FAIL_OPEN); input.setHttpClientSettingsKey(new HttpClientSettingsKey(OCSPMode.FAIL_OPEN)); input.setBrowserResponseTimeout(Duration.ofSeconds(5)); input.setBrowserHandler( new OAuthAuthorizationCodeFlowLatestIT.WiremockProxyRequestBrowserHandler()); input.setLoginTimeout(1000); HashMap sessionParameters = new HashMap<>(); sessionParameters.put("CLIENT_STORE_TEMPORARY_CREDENTIAL", "true"); input.setSessionParameters(sessionParameters); input.setOauthLoginInput(new SFOauthLoginInput(null, null, null, null, null, null)); return input; } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/OAuthTokenCacheLatestIT.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import java.time.Duration; import java.util.Collections; import java.util.HashMap; import java.util.Map; import net.snowflake.client.api.auth.AuthenticatorType; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.jdbc.BaseWiremockTest; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.MockedStatic; import org.mockito.stubbing.Answer; @Tag(TestTags.CORE) public class OAuthTokenCacheLatestIT extends BaseWiremockTest { public static final String MOCK_DPOP_PUBLIC_KEY = "{\"kty\":\"EC\",\"d\":\"j5-J-nLE4J1I8ZWtArP8eQbxUbYMPmRvaEjEkHFlHds\",\"crv\":\"P-256\",\"x\":\"RL5cE-TC4Jr6CxtT4lEI2Yu6wT6LbwojPQsgHUg01F0\",\"y\":\"UAdLUSWTJ6czXaS3SfEFUZzKPcVVq4OZAD8e7Rp75y4\"}"; // pragma: allowlist secret private static final String SCENARIOS_BASE_DIR = MAPPINGS_BASE_DIR + "/oauth/token_caching"; private static final String CACHING_TOKENS_AFTER_CONNECTING_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/caching_tokens_after_connecting.json"; private static final String NOT_CACHING_TOKENS_FOR_CLIENT_CREDENTIALS_FLOW = SCENARIOS_BASE_DIR + "/not_caching_after_client_credentials_flow.json"; private static final String CACHING_TOKENS_AND_DPOP_KEY_AFTER_CONNECTING_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/caching_tokens_and_dpop_key_after_connecting.json"; private static final String REUSING_CACHED_ACCESS_TOKEN_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/reusing_cached_access_token_to_authenticate.json"; private static final String REFRESHING_EXPIRED_ACCESS_TOKEN_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/refreshing_expired_access_token.json"; private static final String REFRESHING_EXPIRED_ACCESS_TOKEN_SCENARIO_MAPPINGS_DPOP = SCENARIOS_BASE_DIR + "/refreshing_expired_access_token_dpop.json"; private static final String REFRESHING_EXPIRED_ACCESS_TOKEN_SCENARIO_MAPPINGS_DPOP_NONCE_ERROR = SCENARIOS_BASE_DIR + "/refreshing_expired_access_token_dpop_nonce_error.json"; private static final String CACHING_REFRESHED_ACCESS_TOKEN_AND_NEW_REFRESH_TOKEN_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/caching_refreshed_access_token_and_new_refresh_token.json"; private static final String RESTARTING_FULL_FLOW_ON_EXPIRATION_AND_ERROR_WHEN_REFRESHING = SCENARIOS_BASE_DIR + "/restarting_full_flow_on_refresh_token_error.json"; private static final String RESTARTING_FULL_FLOW_ON_EXPIRATION_AND_NO_REFRESH_TOKEN = SCENARIOS_BASE_DIR + "/restarting_full_flow_on_expiration_and_no_refresh_token.json"; @Test public void shouldCacheAccessTokenAfterConnecting() throws SFException, SnowflakeSQLException { importMappingFromResources(CACHING_TOKENS_AFTER_CONNECTING_SCENARIO_MAPPINGS); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { SFLoginInput loginInput = createLoginInputStub(); SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); captureAndAssertSavedTokenValues( credentialManagerMockedStatic, "access-token-123", "refresh-token-123"); } } @Test public void shouldCacheAccessTokenAndDPoPKeyAfterConnecting() throws SFException, SnowflakeSQLException { importMappingFromResources(CACHING_TOKENS_AND_DPOP_KEY_AFTER_CONNECTING_SCENARIO_MAPPINGS); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { SFLoginInput loginInput = createLoginInputStubWithDPoPEnabled(); SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); captureAndAssertSavedTokenValuesAndDPoPKey( credentialManagerMockedStatic, "access-token-123", "refresh-token-123"); } } @Test public void shouldReuseCachedAccessTokenWhenConnecting() throws SFException, SnowflakeSQLException { importMappingFromResources(REUSING_CACHED_ACCESS_TOKEN_SCENARIO_MAPPINGS); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { mockLoadingTokensFromCache( credentialManagerMockedStatic, "reused-access-token-123", "reused-refresh-token-123"); SFLoginInput loginInput = createLoginInputStub(); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); assertEquals("reused-access-token-123", loginOutput.getOauthAccessToken()); assertEquals("reused-refresh-token-123", loginOutput.getOauthRefreshToken()); } } @Test public void shouldNotCacheTokensForClientCredentialsFlow() throws SFException, SnowflakeSQLException { importMappingFromResources(NOT_CACHING_TOKENS_FOR_CLIENT_CREDENTIALS_FLOW); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { SFLoginInput loginInput = createLoginInputStub(); loginInput.setAuthenticator(AuthenticatorType.OAUTH_CLIENT_CREDENTIALS.name()); loginInput.setOriginalAuthenticator(AuthenticatorType.OAUTH_CLIENT_CREDENTIALS.name()); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); Assertions.assertNotNull(loginOutput); Assertions.assertEquals("session token", loginOutput.getSessionToken()); credentialManagerMockedStatic.verify( () -> CredentialManager.fillCachedOAuthAccessToken(loginInput), never()); credentialManagerMockedStatic.verify( () -> CredentialManager.fillCachedOAuthRefreshToken(loginInput), never()); credentialManagerMockedStatic.verify( () -> CredentialManager.fillCachedDPoPBundledAccessToken(loginInput), never()); credentialManagerMockedStatic.verify( () -> CredentialManager.writeOAuthAccessToken(loginInput), never()); credentialManagerMockedStatic.verify( () -> CredentialManager.writeOAuthRefreshToken(loginInput), never()); credentialManagerMockedStatic.verify( () -> CredentialManager.writeDPoPBundledAccessToken(loginInput), never()); } } @Test public void shouldRefreshExpiredAccessTokenAndConnectSuccessfully() throws SFException, SnowflakeSQLException { importMappingFromResources(REFRESHING_EXPIRED_ACCESS_TOKEN_SCENARIO_MAPPINGS); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { mockLoadingTokensFromCache( credentialManagerMockedStatic, "expired-access-token-123", "some-refresh-token-123"); SFLoginInput loginInput = createLoginInputStub(); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInput), times(1)); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthRefreshTokenCacheEntry(loginInput), never()); assertEquals("new-refreshed-access-token-123", loginOutput.getOauthAccessToken()); captureAndAssertSavedTokenValues( credentialManagerMockedStatic, "new-refreshed-access-token-123", "some-refresh-token-123"); } } @Test public void shouldRefreshExpiredAccessTokenWithDPoPAndConnectSuccessfully() throws SFException, SnowflakeSQLException { importMappingFromResources(REFRESHING_EXPIRED_ACCESS_TOKEN_SCENARIO_MAPPINGS_DPOP); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { mockLoadingTokensFromCache(credentialManagerMockedStatic, null, "some-refresh-token-123"); mockLoadingDPoPPublicKeyFromCache(credentialManagerMockedStatic, "expired-access-token-123"); SFLoginInput loginInput = createLoginInputStubWithDPoPEnabled(); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInput), times(1)); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteDPoPBundledAccessTokenCacheEntry(loginInput), times(1)); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthRefreshTokenCacheEntry(loginInput), never()); assertEquals("new-refreshed-access-token-123", loginOutput.getOauthAccessToken()); captureAndAssertSavedTokenValuesAndDPoPKey( credentialManagerMockedStatic, "new-refreshed-access-token-123", "some-refresh-token-123"); } } @Test public void shouldRefreshExpiredAccessTokenWithDPoPNonceErrorAndConnectSuccessfully() throws SFException, SnowflakeSQLException { importMappingFromResources(REFRESHING_EXPIRED_ACCESS_TOKEN_SCENARIO_MAPPINGS_DPOP_NONCE_ERROR); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { mockLoadingTokensFromCache(credentialManagerMockedStatic, null, "some-refresh-token-123"); mockLoadingDPoPPublicKeyFromCache(credentialManagerMockedStatic, "expired-access-token-123"); SFLoginInput loginInput = createLoginInputStubWithDPoPEnabled(); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInput), times(1)); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteDPoPBundledAccessTokenCacheEntry(loginInput), times(1)); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthRefreshTokenCacheEntry(loginInput), never()); assertEquals("new-refreshed-access-token-123", loginOutput.getOauthAccessToken()); captureAndAssertSavedTokenValuesAndDPoPKey( credentialManagerMockedStatic, "new-refreshed-access-token-123", "some-refresh-token-123"); } } @Test public void shouldCacheRefreshedAccessTokenAndNewRefreshToken() throws SFException, SnowflakeSQLException { importMappingFromResources( CACHING_REFRESHED_ACCESS_TOKEN_AND_NEW_REFRESH_TOKEN_SCENARIO_MAPPINGS); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { mockLoadingTokensFromCache( credentialManagerMockedStatic, "expired-access-token-123", "some-refresh-token-123"); SFLoginInput loginInput = createLoginInputStub(); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInput), times(1)); assertEquals("new-refreshed-access-token-123", loginOutput.getOauthAccessToken()); captureAndAssertSavedTokenValues( credentialManagerMockedStatic, "new-refreshed-access-token-123", "new-refresh-token-123"); } } @Test public void shouldRestartFullFlowOnAccessTokenExpirationAndErrorWhenRefreshing() throws SFException, SnowflakeSQLException { importMappingFromResources(RESTARTING_FULL_FLOW_ON_EXPIRATION_AND_ERROR_WHEN_REFRESHING); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { mockLoadingTokensFromCache( credentialManagerMockedStatic, "expired-access-token-123", "some-refresh-token-123"); SFLoginInput loginInput = createLoginInputStub(); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInput), times(1)); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthRefreshTokenCacheEntry(loginInput), times(1)); assertEquals("newly-obtained-access-token-123", loginOutput.getOauthAccessToken()); captureAndAssertSavedTokenValues( credentialManagerMockedStatic, "newly-obtained-access-token-123", "newly-obtained-refresh-token"); } } @Test public void shouldRestartFullFlowOnAccessTokenExpirationAndNoRefreshToken() throws SFException, SnowflakeSQLException { importMappingFromResources(RESTARTING_FULL_FLOW_ON_EXPIRATION_AND_NO_REFRESH_TOKEN); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { mockLoadingTokensFromCache(credentialManagerMockedStatic, "expired-access-token-123", null); SFLoginInput loginInput = createLoginInputStub(); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInput), times(1)); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthRefreshTokenCacheEntry(loginInput), never()); assertEquals("newly-obtained-access-token-123", loginOutput.getOauthAccessToken()); captureAndAssertSavedTokenValues( credentialManagerMockedStatic, "newly-obtained-access-token-123", null); } } @Test public void shouldNotDeleteCachedTokensWhenCachingIsDisabled() throws SFException, SnowflakeSQLException { importMappingFromResources(REFRESHING_EXPIRED_ACCESS_TOKEN_SCENARIO_MAPPINGS); try (MockedStatic credentialManagerMockedStatic = mockStatic(CredentialManager.class)) { SFLoginInput loginInput = createLoginInputStubWithCachingDisabled(); loginInput.setOauthAccessToken("expired-access-token-123"); loginInput.setOauthRefreshToken("some-refresh-token-123"); SFLoginOutput loginOutput = SessionUtil.openSession(loginInput, new HashMap<>(), "INFO"); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteOAuthAccessTokenCacheEntry(loginInput), never()); credentialManagerMockedStatic.verify( () -> CredentialManager.deleteDPoPBundledAccessTokenCacheEntry(loginInput), never()); credentialManagerMockedStatic.verify( () -> CredentialManager.writeOAuthAccessToken(any(SFLoginInput.class)), never()); credentialManagerMockedStatic.verify( () -> CredentialManager.writeOAuthRefreshToken(any(SFLoginInput.class)), never()); assertEquals("new-refreshed-access-token-123", loginOutput.getOauthAccessToken()); } } private SFLoginInput createLoginInputStub() { SFLoginInput input = new SFLoginInput(); input.setAuthenticator(AuthenticatorType.OAUTH_AUTHORIZATION_CODE.name()); input.setOriginalAuthenticator(AuthenticatorType.OAUTH_AUTHORIZATION_CODE.name()); input.setServerUrl(String.format("http://%s:%d/", WIREMOCK_HOST, wiremockHttpPort)); input.setUserName("MOCK_USERNAME"); input.setAccountName("MOCK_ACCOUNT_NAME"); input.setAppId("MOCK_APP_ID"); input.setAppVersion("MOCK_APP_VERSION"); input.setOCSPMode(OCSPMode.FAIL_OPEN); input.setHttpClientSettingsKey(new HttpClientSettingsKey(OCSPMode.FAIL_OPEN)); input.setBrowserResponseTimeout(Duration.ofSeconds(5)); input.setBrowserHandler( new OAuthAuthorizationCodeFlowLatestIT.WiremockProxyRequestBrowserHandler()); input.setLoginTimeout(1000); input.setSessionParameters(Collections.singletonMap("CLIENT_STORE_TEMPORARY_CREDENTIAL", true)); input.setOauthLoginInput( new SFOauthLoginInput( "123", "123", null, String.format("http://%s:%d/oauth/authorize", WIREMOCK_HOST, wiremockHttpPort), String.format("http://%s:%d/oauth/token-request", WIREMOCK_HOST, wiremockHttpPort), "session:role:ANALYST")); return input; } private SFLoginInput createLoginInputStubWithDPoPEnabled() { SFLoginInput input = createLoginInputStub(); input.setDPoPEnabled(true); return input; } private SFLoginInput createLoginInputStubWithCachingDisabled() { SFLoginInput input = new SFLoginInput(); input.setAuthenticator(AuthenticatorType.OAUTH_AUTHORIZATION_CODE.name()); input.setOriginalAuthenticator(AuthenticatorType.OAUTH_AUTHORIZATION_CODE.name()); input.setServerUrl(String.format("http://%s:%d/", WIREMOCK_HOST, wiremockHttpPort)); input.setUserName("MOCK_USERNAME"); input.setAccountName("MOCK_ACCOUNT_NAME"); input.setAppId("MOCK_APP_ID"); input.setAppVersion("MOCK_APP_VERSION"); input.setOCSPMode(OCSPMode.FAIL_OPEN); input.setHttpClientSettingsKey(new HttpClientSettingsKey(OCSPMode.FAIL_OPEN)); input.setBrowserResponseTimeout(Duration.ofSeconds(5)); input.setBrowserHandler( new OAuthAuthorizationCodeFlowLatestIT.WiremockProxyRequestBrowserHandler()); input.setLoginTimeout(1000); Map sessionParams = new HashMap<>(); sessionParams.put("CLIENT_STORE_TEMPORARY_CREDENTIAL", false); input.setSessionParameters(sessionParams); input.setOauthLoginInput( new SFOauthLoginInput( "123", "123", null, String.format("http://%s:%d/oauth/authorize", WIREMOCK_HOST, wiremockHttpPort), String.format("http://%s:%d/oauth/token-request", WIREMOCK_HOST, wiremockHttpPort), "session:role:ANALYST")); return input; } private static void mockLoadingTokensFromCache( MockedStatic credentialManagerMock, String oauthAccessToken, String oauthRefreshToken) { Answer fillCachedOAuthAccessTokenInvocation = invocation -> { ((SFLoginInput) invocation.getArguments()[0]).setOauthAccessToken(oauthAccessToken); return null; }; Answer fillCachedOAuthRefreshTokenInvocation = invocation -> { ((SFLoginInput) invocation.getArguments()[0]).setOauthRefreshToken(oauthRefreshToken); return null; }; credentialManagerMock .when(() -> CredentialManager.fillCachedOAuthAccessToken(any(SFLoginInput.class))) .then(fillCachedOAuthAccessTokenInvocation); credentialManagerMock .when(() -> CredentialManager.fillCachedOAuthRefreshToken(any(SFLoginInput.class))) .then(fillCachedOAuthRefreshTokenInvocation); } private static void mockLoadingDPoPPublicKeyFromCache( MockedStatic credentialManagerMock, String oauthAccessToken) { Answer fillCachedDPoPPublicKeyInvocation = invocation -> { ((SFLoginInput) invocation.getArguments()[0]).setOauthAccessToken(oauthAccessToken); ((SFLoginInput) invocation.getArguments()[0]).setDPoPPublicKey(MOCK_DPOP_PUBLIC_KEY); return null; }; credentialManagerMock .when(() -> CredentialManager.fillCachedDPoPBundledAccessToken(any(SFLoginInput.class))) .then(fillCachedDPoPPublicKeyInvocation); } private static void captureAndAssertSavedTokenValues( MockedStatic credentialManagerMock, String expectedAccessToken, String expectedRefreshToken) { ArgumentCaptor accessTokenInputCaptor = ArgumentCaptor.forClass(SFLoginInput.class); credentialManagerMock.verify( () -> CredentialManager.writeOAuthAccessToken(accessTokenInputCaptor.capture())); assertEquals(expectedAccessToken, accessTokenInputCaptor.getValue().getOauthAccessToken()); if (expectedRefreshToken != null) { ArgumentCaptor refreshTokenInputCaptor = ArgumentCaptor.forClass(SFLoginInput.class); credentialManagerMock.verify( () -> CredentialManager.writeOAuthRefreshToken(refreshTokenInputCaptor.capture())); assertEquals(expectedRefreshToken, refreshTokenInputCaptor.getValue().getOauthRefreshToken()); } else { credentialManagerMock.verify( () -> CredentialManager.writeOAuthRefreshToken(any(SFLoginInput.class)), never()); } } private static void captureAndAssertSavedTokenValuesAndDPoPKey( MockedStatic credentialManagerMock, String expectedAccessToken, String expectedRefreshToken) { if (expectedRefreshToken != null) { ArgumentCaptor refreshTokenInputCaptor = ArgumentCaptor.forClass(SFLoginInput.class); credentialManagerMock.verify( () -> CredentialManager.writeOAuthRefreshToken(refreshTokenInputCaptor.capture())); assertEquals(expectedRefreshToken, refreshTokenInputCaptor.getValue().getOauthRefreshToken()); } ArgumentCaptor dpopBundledAccessTokenInputCaptor = ArgumentCaptor.forClass(SFLoginInput.class); credentialManagerMock.verify( () -> CredentialManager.writeDPoPBundledAccessToken( dpopBundledAccessTokenInputCaptor.capture())); assertEquals( expectedAccessToken, dpopBundledAccessTokenInputCaptor.getValue().getOauthAccessToken()); assertNotNull(dpopBundledAccessTokenInputCaptor.getValue().getDPoPPublicKey()); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/OCSPCacheServerTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.stream.Stream; import org.junit.jupiter.api.extension.ExtensionContext; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.ArgumentsProvider; import org.junit.jupiter.params.provider.ArgumentsSource; public class OCSPCacheServerTest { static class URLProvider implements ArgumentsProvider { @Override public Stream provideArguments(ExtensionContext context) throws Exception { return Stream.of( Arguments.of( "bla-12345.global.snowflakecomputing.com", "https://ocspssd-12345.global.snowflakecomputing.com/ocsp/fetch", "https://ocspssd-12345.global.snowflakecomputing.com/ocsp/retry"), Arguments.of( "bla-12345.global.snowflakecomputing.cn", "https://ocspssd-12345.global.snowflakecomputing.cn/ocsp/fetch", "https://ocspssd-12345.global.snowflakecomputing.cn/ocsp/retry"), Arguments.of( "bla-12345.global.snowflakecomputing.xyz", "https://ocspssd-12345.global.snowflakecomputing.xyz/ocsp/fetch", "https://ocspssd-12345.global.snowflakecomputing.xyz/ocsp/retry"), Arguments.of( "bla-12345.GLOBAL.snowflakecomputing.xyz", "https://ocspssd-12345.GLOBAL.snowflakecomputing.xyz/ocsp/fetch", "https://ocspssd-12345.GLOBAL.snowflakecomputing.xyz/ocsp/retry"), Arguments.of( "bla-12345.snowflakecomputing.com", "https://ocspssd.snowflakecomputing.com/ocsp/fetch", "https://ocspssd.snowflakecomputing.com/ocsp/retry"), Arguments.of( "bla-12345.snowflakecomputing.cn", "https://ocspssd.snowflakecomputing.cn/ocsp/fetch", "https://ocspssd.snowflakecomputing.cn/ocsp/retry"), Arguments.of( "bla-12345.snowflakecomputing.xyz", "https://ocspssd.snowflakecomputing.xyz/ocsp/fetch", "https://ocspssd.snowflakecomputing.xyz/ocsp/retry"), Arguments.of( "bla-12345.SNOWFLAKEcomputing.xyz", "https://ocspssd.SNOWFLAKEcomputing.xyz/ocsp/fetch", "https://ocspssd.SNOWFLAKEcomputing.xyz/ocsp/retry"), Arguments.of( "s3.amazoncomaws.com", "https://ocspssd.snowflakecomputing.com/ocsp/fetch", "https://ocspssd.snowflakecomputing.com/ocsp/retry"), Arguments.of( "s3.amazoncomaws.COM", "https://ocspssd.snowflakecomputing.COM/ocsp/fetch", "https://ocspssd.snowflakecomputing.COM/ocsp/retry"), Arguments.of( "s3.amazoncomaws.com.cn", "https://ocspssd.snowflakecomputing.cn/ocsp/fetch", "https://ocspssd.snowflakecomputing.cn/ocsp/retry"), Arguments.of( "S3.AMAZONCOMAWS.COM.CN", "https://ocspssd.snowflakecomputing.CN/ocsp/fetch", "https://ocspssd.snowflakecomputing.CN/ocsp/retry")); } } @ParameterizedTest(name = "For host {0} cache server fetch url should be {1} and retry url {2}") @ArgumentsSource(URLProvider.class) public void shouldChooseOcspCacheServerUrls( String host, String expectedFetchUrl, String expectedRetryUrl) { SFTrustManager.OCSPCacheServer ocspCacheServer = new SFTrustManager.OCSPCacheServer(); ocspCacheServer.resetOCSPResponseCacheServer(host); assertEquals(expectedFetchUrl, ocspCacheServer.SF_OCSP_RESPONSE_CACHE_SERVER); assertEquals(expectedRetryUrl, ocspCacheServer.SF_OCSP_RESPONSE_RETRY_URL); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/ObjectMapperTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.MapperFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import java.nio.charset.StandardCharsets; import java.sql.SQLException; import java.util.Base64; import java.util.Calendar; import java.util.Date; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; import net.snowflake.client.internal.jdbc.SnowflakeUtil; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtensionContext; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.ArgumentsProvider; import org.junit.jupiter.params.provider.ArgumentsSource; import org.mockito.Mockito; public class ObjectMapperTest { private static final int jacksonDefaultMaxStringLength = 20_000_000; static String originalLogger; static class DataProvider implements ArgumentsProvider { @Override public Stream provideArguments(ExtensionContext context) throws Exception { return Stream.of( Arguments.of(16 * 1024 * 1024, jacksonDefaultMaxStringLength), Arguments.of(16 * 1024 * 1024, 23_000_000), Arguments.of(32 * 1024 * 1024, 45_000_000), Arguments.of(64 * 1024 * 1024, 90_000_000), Arguments.of(128 * 1024 * 1024, 180_000_000)); } } @BeforeAll public static void setProperty() { originalLogger = System.getProperty("net.snowflake.jdbc.loggerImpl"); System.setProperty("net.snowflake.jdbc.loggerImpl", "net.snowflake.client.log.JDK14Logger"); } @AfterAll public static void clearProperty() { if (originalLogger != null) { System.setProperty("net.snowflake.jdbc.loggerImpl", originalLogger); } else { System.clearProperty("net.snowflake.jdbc.loggerImpl"); } System.clearProperty(ObjectMapperFactory.MAX_JSON_STRING_LENGTH_JVM); } private static void setJacksonDefaultMaxStringLength(int maxJsonStringLength) { System.setProperty( ObjectMapperFactory.MAX_JSON_STRING_LENGTH_JVM, Integer.toString(maxJsonStringLength)); } @Test public void testInvalidMaxJsonStringLength() throws SQLException { System.setProperty(ObjectMapperFactory.MAX_JSON_STRING_LENGTH_JVM, "abc"); // calling getObjectMapper() should log the exception but not throw // default maxJsonStringLength value will be used ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); int stringLengthInMapper = mapper.getFactory().streamReadConstraints().getMaxStringLength(); assertEquals(ObjectMapperFactory.DEFAULT_MAX_JSON_STRING_LEN, stringLengthInMapper); } @ParameterizedTest @ArgumentsSource(DataProvider.class) public void testObjectMapperWithLargeJsonString(int lobSizeInBytes, int maxJsonStringLength) { setJacksonDefaultMaxStringLength(maxJsonStringLength); ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); try { JsonNode jsonNode = mapper.readTree(generateBase64EncodedJsonString(lobSizeInBytes)); assertNotNull(jsonNode); } catch (Exception e) { // exception is expected when jackson's default maxStringLength value is used while retrieving // 16M string data assertEquals(jacksonDefaultMaxStringLength, maxJsonStringLength); } } // ------------------------------------------------------------------------- // getObjectMapper() -- config flag assertions // ------------------------------------------------------------------------- @Test public void testGetObjectMapper_enablesUseBigDecimalForFloats() { ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); assertTrue(mapper.isEnabled(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS)); } @Test public void testGetObjectMapper_disablesAccessModifierOverrides() { ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); assertFalse(mapper.isEnabled(MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS)); assertFalse(mapper.isEnabled(MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS)); } @Test public void testGetObjectMapper_appliesDefaultMaxStringLength() { System.clearProperty(ObjectMapperFactory.MAX_JSON_STRING_LENGTH_JVM); ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); assertEquals( ObjectMapperFactory.DEFAULT_MAX_JSON_STRING_LEN, mapper.getFactory().streamReadConstraints().getMaxStringLength()); } // ------------------------------------------------------------------------- // getObjectMapperForSession() -- coverage of the session-aware path // ------------------------------------------------------------------------- @Test public void testGetObjectMapperForSession_withDateFormat_serializesDate() throws Exception { ObjectMapper mapper = objectMapperForSession("YYYY-MM-DD"); assertEquals( "\"2025-03-09\"", mapper.writeValueAsString(new Date(2025 - 1900, Calendar.MARCH, 9))); } @Test public void testGetObjectMapperForSession_withDateFormat_deserializesDate() throws Exception { ObjectMapper mapper = objectMapperForSession("YYYY-MM-DD"); assertEquals( new Date(2025 - 1900, Calendar.MARCH, 9), mapper.readValue("\"2025-03-09\"", Date.class)); } @Test public void testGetObjectMapperForSession_withDateTimeFormat_serializesDate() throws Exception { ObjectMapper mapper = objectMapperForSession("YYYY-MM-DD HH24:MI:SS"); assertEquals( "\"2025-03-09 14:30:00\"", mapper.writeValueAsString(new Date(2025 - 1900, Calendar.MARCH, 9, 14, 30, 0))); } @Test public void testGetObjectMapperForSession_nullSession_returnsBaseMapper() { ObjectMapper mapper = ObjectMapperFactory.getObjectMapperForSession(null); assertNotNull(mapper); assertTrue(mapper.isEnabled(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS)); } @Test public void testGetObjectMapperForSession_nullParameters_returnsBaseMapper() { SFBaseSession session = Mockito.mock(SFBaseSession.class); Mockito.when(session.getCommonParameters()).thenReturn(null); ObjectMapper mapper = ObjectMapperFactory.getObjectMapperForSession(session); assertNotNull(mapper); assertTrue(mapper.isEnabled(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS)); } @Test public void testGetObjectMapperForSession_missingDateOutputFormat_keepsTimestampsEnabled() { SFBaseSession session = Mockito.mock(SFBaseSession.class); Mockito.when(session.getCommonParameters()).thenReturn(new HashMap<>()); ObjectMapper mapper = ObjectMapperFactory.getObjectMapperForSession(session); assertTrue(mapper.isEnabled(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS)); } private static ObjectMapper objectMapperForSession(String dateOutputFormat) { SFBaseSession session = Mockito.mock(SFBaseSession.class); Map params = new HashMap<>(); params.put("DATE_OUTPUT_FORMAT", dateOutputFormat); Mockito.when(session.getCommonParameters()).thenReturn(params); return ObjectMapperFactory.getObjectMapperForSession(session); } private String generateBase64EncodedJsonString(int numChar) { StringBuilder jsonStr = new StringBuilder(); String largeStr = SnowflakeUtil.randomAlphaNumeric(numChar); // encode the string and put it into a JSON formatted string jsonStr.append("[\"").append(encodeStringToBase64(largeStr)).append("\"]"); return jsonStr.toString(); } private String encodeStringToBase64(String stringToBeEncoded) { return Base64.getEncoder().encodeToString(stringToBeEncoded.getBytes(StandardCharsets.UTF_8)); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/PrivateLinkDetectorTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.stream.Stream; import org.junit.jupiter.api.extension.ExtensionContext; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.ArgumentsProvider; import org.junit.jupiter.params.provider.ArgumentsSource; public class PrivateLinkDetectorTest { static class DataProvider implements ArgumentsProvider { @Override public Stream provideArguments(ExtensionContext context) throws Exception { return Stream.of( Arguments.of("snowhouse.snowflakecomputing.com", false), Arguments.of("snowhouse.privatelink.snowflakecomputing.com", true), Arguments.of("snowhouse.PRIVATELINK.snowflakecomputing.com", true), Arguments.of("snowhouse.snowflakecomputing.cn", false), Arguments.of("snowhouse.privatelink.snowflakecomputing.cn", true), Arguments.of("snowhouse.PRIVATELINK.snowflakecomputing.cn", true), Arguments.of("snowhouse.snowflakecomputing.xyz", false), Arguments.of("snowhouse.privatelink.snowflakecomputing.xyz", true), Arguments.of("snowhouse.PRIVATELINK.snowflakecomputing.xyz", true)); } } @ParameterizedTest @ArgumentsSource(DataProvider.class) public void shouldDetectPrivateLinkHost(String host, boolean expectedToBePrivateLink) { assertEquals( expectedToBePrivateLink, PrivateLinkDetector.isPrivateLink(host), String.format("Expecting %s to be private link: %s", host, expectedToBePrivateLink)); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/ProgrammaticAccessTokenAuthFlowLatestIT.java ================================================ package net.snowflake.client.internal.core; import java.util.HashMap; import net.snowflake.client.api.auth.AuthenticatorType; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.jdbc.BaseWiremockTest; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @Tag(TestTags.CORE) public class ProgrammaticAccessTokenAuthFlowLatestIT extends BaseWiremockTest { private static final String SCENARIOS_BASE_DIR = MAPPINGS_BASE_DIR + "/pat"; private static final String SUCCESSFUL_FLOW_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/successful_flow.json"; private static final String INVALID_TOKEN_SCENARIO_MAPPINGS = SCENARIOS_BASE_DIR + "/invalid_pat_token.json"; @Test public void successfulFlowScenarioPatAsToken() throws SFException, SnowflakeSQLException { importMappingFromResources(SUCCESSFUL_FLOW_SCENARIO_MAPPINGS); SFLoginInput loginInputWithPatAsToken = createLoginInputStub(); SFLoginOutput loginOutput = SessionUtil.newSession(loginInputWithPatAsToken, new HashMap<>(), "INFO"); assertSuccessfulLoginOutput(loginOutput); } @Test public void invalidTokenScenario() { importMappingFromResources(INVALID_TOKEN_SCENARIO_MAPPINGS); SnowflakeSQLException e = Assertions.assertThrows( SnowflakeSQLException.class, () -> SessionUtil.newSession(createLoginInputStub(), new HashMap<>(), "INFO")); Assertions.assertEquals("Programmatic access token is invalid.", e.getMessage()); } private void assertSuccessfulLoginOutput(SFLoginOutput loginOutput) { Assertions.assertNotNull(loginOutput); Assertions.assertEquals("session token", loginOutput.getSessionToken()); Assertions.assertEquals("master token", loginOutput.getMasterToken()); Assertions.assertEquals(14400, loginOutput.getMasterTokenValidityInSeconds()); Assertions.assertEquals("8.48.0", loginOutput.getDatabaseVersion()); Assertions.assertEquals("TEST_DHEYMAN", loginOutput.getSessionDatabase()); Assertions.assertEquals("TEST_JDBC", loginOutput.getSessionSchema()); Assertions.assertEquals("ANALYST", loginOutput.getSessionRole()); Assertions.assertEquals("TEST_XSMALL", loginOutput.getSessionWarehouse()); Assertions.assertEquals("1172562260498", loginOutput.getSessionId()); Assertions.assertEquals(1, loginOutput.getCommonParams().size()); Assertions.assertEquals(4, loginOutput.getCommonParams().get("CLIENT_PREFETCH_THREADS")); } private SFLoginInput createLoginInputStub() { SFLoginInput input = new SFLoginInput(); input.setAuthenticator(AuthenticatorType.PROGRAMMATIC_ACCESS_TOKEN.name()); input.setServerUrl(String.format("http://%s:%d/", WIREMOCK_HOST, wiremockHttpPort)); input.setUserName("MOCK_USERNAME"); input.setAccountName("MOCK_ACCOUNT_NAME"); input.setAppId("MOCK_APP_ID"); input.setAppVersion("MOCK_APP_VERSION"); input.setToken("MOCK_TOKEN"); input.setOCSPMode(OCSPMode.FAIL_OPEN); input.setHttpClientSettingsKey(new HttpClientSettingsKey(OCSPMode.FAIL_OPEN)); input.setLoginTimeout(1000); input.setSessionParameters(new HashMap<>()); return input; } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/QueryContextCacheTest.java ================================================ package net.snowflake.client.internal.core; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import org.junit.jupiter.api.Test; public class QueryContextCacheTest { private QueryContextCache qcc = null; private long BASE_READ_TIMESTAMP = 1668727958; private String CONTEXT = "Some query context"; private long BASE_ID = 0; private long BASE_PRIORITY = 0; private int MAX_CAPACITY = 5; private long[] expectedIDs; private long[] expectedReadTimestamp; private long[] expectedPriority; private void initCache() { qcc = new QueryContextCache(MAX_CAPACITY); } private void initCacheWithData() { initCacheWithDataWithContext(CONTEXT); } private void initCacheWithDataWithContext(String context) { qcc = new QueryContextCache(MAX_CAPACITY); expectedIDs = new long[MAX_CAPACITY]; expectedReadTimestamp = new long[MAX_CAPACITY]; expectedPriority = new long[MAX_CAPACITY]; for (int i = 0; i < MAX_CAPACITY; i++) { expectedIDs[i] = BASE_ID + i; expectedReadTimestamp[i] = BASE_READ_TIMESTAMP + i; expectedPriority[i] = BASE_PRIORITY + i; qcc.merge(expectedIDs[i], expectedReadTimestamp[i], expectedPriority[i], context); } qcc.syncPriorityMap(); } private void initCacheWithDataInRandomOrder() { qcc = new QueryContextCache(MAX_CAPACITY); expectedIDs = new long[MAX_CAPACITY]; expectedReadTimestamp = new long[MAX_CAPACITY]; expectedPriority = new long[MAX_CAPACITY]; for (int i = 0; i < MAX_CAPACITY; i++) { expectedIDs[i] = BASE_ID + i; expectedReadTimestamp[i] = BASE_READ_TIMESTAMP + i; expectedPriority[i] = BASE_PRIORITY + i; } qcc.merge(expectedIDs[3], expectedReadTimestamp[3], expectedPriority[3], CONTEXT); qcc.merge(expectedIDs[2], expectedReadTimestamp[2], expectedPriority[2], CONTEXT); qcc.merge(expectedIDs[4], expectedReadTimestamp[4], expectedPriority[4], CONTEXT); qcc.merge(expectedIDs[0], expectedReadTimestamp[0], expectedPriority[0], CONTEXT); qcc.merge(expectedIDs[1], expectedReadTimestamp[1], expectedPriority[1], CONTEXT); qcc.syncPriorityMap(); } /** Test for empty cache */ @Test public void testIsEmpty() throws Exception { initCache(); assertThat("Empty cache", qcc.getSize() == 0); } @Test public void testWithSomeData() throws Exception { initCacheWithData(); // Compare elements assertCacheData(); } @Test public void testWithSomeDataInRandomOrder() throws Exception { initCacheWithDataInRandomOrder(); // Compare elements assertCacheData(); } @Test public void testMoreThanCapacity() throws Exception { initCacheWithData(); // Add one more element at the end int i = MAX_CAPACITY; qcc.merge(BASE_ID + i, BASE_READ_TIMESTAMP + i, BASE_PRIORITY + i, CONTEXT); qcc.syncPriorityMap(); qcc.checkCacheCapacity(); // Compare elements assertCacheData(); } @Test public void testUpdateTimestamp() throws Exception { initCacheWithData(); // Add one more element with new TS with existing id int updatedID = 1; expectedReadTimestamp[updatedID] = BASE_READ_TIMESTAMP + updatedID + 10; qcc.merge( BASE_ID + updatedID, expectedReadTimestamp[updatedID], BASE_PRIORITY + updatedID, CONTEXT); qcc.syncPriorityMap(); qcc.checkCacheCapacity(); // Compare elements assertCacheData(); } @Test public void testUpdatePriority() throws Exception { initCacheWithData(); // Add one more element with new priority with existing id int updatedID = 3; long updatedPriority = BASE_PRIORITY + updatedID + 7; expectedPriority[updatedID] = updatedPriority; qcc.merge( BASE_ID + updatedID, BASE_READ_TIMESTAMP + updatedID, expectedPriority[updatedID], CONTEXT); qcc.syncPriorityMap(); qcc.checkCacheCapacity(); for (int i = updatedID; i < MAX_CAPACITY - 1; i++) { expectedIDs[i] = expectedIDs[i + 1]; expectedReadTimestamp[i] = expectedReadTimestamp[i + 1]; expectedPriority[i] = expectedPriority[i + 1]; } expectedIDs[MAX_CAPACITY - 1] = BASE_ID + updatedID; expectedReadTimestamp[MAX_CAPACITY - 1] = BASE_READ_TIMESTAMP + updatedID; expectedPriority[MAX_CAPACITY - 1] = updatedPriority; assertCacheData(); } @Test public void testAddSamePriority() throws Exception { initCacheWithData(); // Add one more element with same priority int i = MAX_CAPACITY; long UpdatedPriority = BASE_PRIORITY + 1; qcc.merge(BASE_ID + i, BASE_READ_TIMESTAMP + i, UpdatedPriority, CONTEXT); qcc.syncPriorityMap(); qcc.checkCacheCapacity(); expectedIDs[1] = BASE_ID + i; expectedReadTimestamp[1] = BASE_READ_TIMESTAMP + i; // Compare elements assertCacheData(); } @Test public void testAddSameIDButStaleTimestamp() throws Exception { initCacheWithData(); // Add one more element with same priority int i = 2; qcc.merge(BASE_ID + i, BASE_READ_TIMESTAMP + i - 10, BASE_PRIORITY + i, CONTEXT); qcc.syncPriorityMap(); qcc.checkCacheCapacity(); // Compare elements assertCacheData(); } @Test public void testEmptyCacheWithNullData() throws Exception { initCacheWithData(); qcc.deserializeQueryContextJson(null); assertThat("Empty cache", qcc.getSize() == 0); } @Test public void testEmptyCacheWithEmptyResponseData() throws Exception { initCacheWithData(); qcc.deserializeQueryContextJson(""); assertThat("Empty cache", qcc.getSize() == 0); } @Test public void testSerializeRequestAndDeserializeResponseData() throws Exception { // Init qcc initCacheWithData(); assertCacheData(); QueryContextDTO requestData = qcc.serializeQueryContextDTO(); // Clear qcc qcc.clearCache(); assertThat("Empty cache", qcc.getSize() == 0); qcc.deserializeQueryContextDTO(requestData); assertCacheData(); } @Test public void testSerializeRequestAndDeserializeResponseDataWithNullContext() throws Exception { // Init qcc initCacheWithDataWithContext(null); assertCacheDataWithContext(null); QueryContextDTO requestData = qcc.serializeQueryContextDTO(); // Clear qcc qcc.clearCache(); assertThat("Empty cache", qcc.getSize() == 0); qcc.deserializeQueryContextDTO(requestData); assertCacheDataWithContext(null); QueryContextCache mockQcc = spy(qcc); mockQcc.deserializeQueryContextDTO(null); verify(mockQcc).clearCache(); verify(mockQcc, times(2)).logCacheEntries(); } private void assertCacheData() { assertCacheDataWithContext(CONTEXT); } private void assertCacheDataWithContext(String context) { int size = qcc.getSize(); assertThat("Non empty cache", size == MAX_CAPACITY); long[] ids = new long[size]; long[] readTimestamps = new long[size]; long[] priorities = new long[size]; String[] contexts = new String[size]; // Compare elements qcc.getElements(ids, readTimestamps, priorities, contexts); for (int i = 0; i < size; i++) { assertEquals(expectedIDs[i], ids[i]); assertEquals(expectedReadTimestamp[i], readTimestamps[i]); assertEquals(expectedPriority[i], priorities[i]); assertEquals(context, contexts[i]); } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/QueryContextEntryDTOTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; class QueryContextEntryDTOTest { @Mock private OpaqueContextDTO mockContext; @BeforeEach void setUp() { MockitoAnnotations.openMocks(this); } @Test void testDefaultConstructor() { QueryContextEntryDTO entry = new QueryContextEntryDTO(); assertNotNull(entry); assertEquals(0, entry.getId()); assertEquals(0, entry.getTimestamp()); assertEquals(0, entry.getPriority()); assertNull(entry.getContext()); } @Test void testParameterizedConstructor() { QueryContextEntryDTO entry = new QueryContextEntryDTO(1L, 100L, 10L, mockContext); assertEquals(1L, entry.getId()); assertEquals(100L, entry.getTimestamp()); assertEquals(10L, entry.getPriority()); assertEquals(mockContext, entry.getContext()); } @Test void testSettersAndGetters() { QueryContextEntryDTO entry = new QueryContextEntryDTO(); entry.setId(2L); entry.setTimestamp(200L); entry.setPriority(20L); entry.setContext(mockContext); assertEquals(2L, entry.getId()); assertEquals(200L, entry.getTimestamp()); assertEquals(20L, entry.getPriority()); assertEquals(mockContext, entry.getContext()); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/SFArrowResultSetIT.java ================================================ package net.snowflake.client.internal.core; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.sql.ResultSet; import java.sql.Statement; import java.time.Instant; import java.util.ArrayList; import java.util.Base64; import java.util.Date; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Random; import net.snowflake.client.annotations.DontRunOnThinJar; import net.snowflake.client.api.exception.ErrorCode; import net.snowflake.client.api.exception.SnowflakeSQLException; import net.snowflake.client.api.resultset.SnowflakeResultSet; import net.snowflake.client.api.resultset.SnowflakeResultSetSerializable; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.jdbc.ArrowResultChunk; import net.snowflake.client.internal.jdbc.BaseJDBCWithSharedConnectionIT; import net.snowflake.client.internal.jdbc.SnowflakeResultChunk; import net.snowflake.client.internal.jdbc.SnowflakeResultSetSerializableV1; import net.snowflake.client.internal.jdbc.telemetry.NoOpTelemetryClient; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.BitVector; import org.apache.arrow.vector.DateDayVector; import org.apache.arrow.vector.DecimalVector; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.Float8Vector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.SmallIntVector; import org.apache.arrow.vector.TinyIntVector; import org.apache.arrow.vector.VarBinaryVector; import org.apache.arrow.vector.VarCharVector; import org.apache.arrow.vector.VectorSchemaRoot; import org.apache.arrow.vector.complex.StructVector; import org.apache.arrow.vector.dictionary.DictionaryProvider; import org.apache.arrow.vector.ipc.ArrowStreamWriter; import org.apache.arrow.vector.ipc.ArrowWriter; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.FieldType; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.arrow.vector.util.Text; import org.apache.commons.lang3.RandomStringUtils; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @Tag(TestTags.ARROW) public class SFArrowResultSetIT extends BaseJDBCWithSharedConnectionIT { private Random random = new Random(); /** * allocator for arrow RootAllocator is shaded so it cannot be overridden when testing thin or fat * jar */ protected BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE); /** temporary folder to store result files */ @TempDir private File tempDir; /** Test the case that all results are returned in first chunk */ @Test @DontRunOnThinJar public void testNoOfflineData() throws Throwable { List fieldList = new ArrayList<>(); Map customFieldMeta = new HashMap<>(); customFieldMeta.put("logicalType", "FIXED"); customFieldMeta.put("scale", "0"); FieldType type = new FieldType(false, Types.MinorType.INT.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); Schema schema = new Schema(fieldList); Object[][] data = generateData(schema, 1000); File file = createArrowFile("testNoOfflineData_0_0_0", schema, data, 10); int dataSize = (int) file.length(); byte[] dataBytes = new byte[dataSize]; try (InputStream is = new FileInputStream(file)) { is.read(dataBytes, 0, dataSize); } SnowflakeResultSetSerializableV1 resultSetSerializable = new SnowflakeResultSetSerializableV1(); resultSetSerializable.setRootAllocator(new RootAllocator(Long.MAX_VALUE)); resultSetSerializable.setFirstChunkStringData(Base64.getEncoder().encodeToString(dataBytes)); resultSetSerializable.setFirstChunkByteData(dataBytes); resultSetSerializable.setChunkFileCount(0); SFArrowResultSet resultSet = new SFArrowResultSet(resultSetSerializable, new NoOpTelemetryClient(), false); int i = 0; while (resultSet.next()) { int val = resultSet.getInt(1); assertThat(val, equalTo(data[0][i])); i++; } // assert that total rowcount is 1000 assertThat(i, is(1000)); } @Test public void testEmptyResultSet() throws Throwable { SnowflakeResultSetSerializableV1 resultSetSerializable = new SnowflakeResultSetSerializableV1(); resultSetSerializable.setFirstChunkStringData( Base64.getEncoder().encodeToString("".getBytes(StandardCharsets.UTF_8))); resultSetSerializable.setChunkFileCount(0); SFArrowResultSet resultSet = new SFArrowResultSet(resultSetSerializable, new NoOpTelemetryClient(), false); assertThat(resultSet.next(), is(false)); assertThat(resultSet.isLast(), is(false)); assertThat(resultSet.isAfterLast(), is(true)); resultSetSerializable.setFirstChunkStringData(null); resultSet = new SFArrowResultSet(resultSetSerializable, new NoOpTelemetryClient(), false); assertThat(resultSet.next(), is(false)); assertThat(resultSet.isLast(), is(false)); assertThat(resultSet.isAfterLast(), is(true)); } /** Testing the case that all data comes from chunk downloader */ @Test @DontRunOnThinJar public void testOnlyOfflineData() throws Throwable { final int colCount = 2; final int chunkCount = 10; // generate data List fieldList = new ArrayList<>(); Map customFieldMeta = new HashMap<>(); customFieldMeta.put("logicalType", "FIXED"); customFieldMeta.put("scale", "0"); FieldType type = new FieldType(false, Types.MinorType.INT.getType(), null, customFieldMeta); for (int i = 0; i < colCount; i++) { fieldList.add(new Field("col_" + i, type, null)); } Schema schema = new Schema(fieldList); // generate 10 chunk of data List dataLists = new ArrayList<>(); List fileLists = new ArrayList<>(); for (int i = 0; i < chunkCount; i++) { Object[][] data = generateData(schema, 500); File file = createArrowFile("testOnlyOfflineData_" + i, schema, data, 10); dataLists.add(data); fileLists.add(file); } SnowflakeResultSetSerializableV1 resultSetSerializable = new SnowflakeResultSetSerializableV1(); resultSetSerializable.setChunkDownloader(new MockChunkDownloader(fileLists)); resultSetSerializable.setChunkFileCount(chunkCount); SFArrowResultSet resultSet = new SFArrowResultSet(resultSetSerializable, new NoOpTelemetryClient(), false); int index = 0; while (resultSet.next()) { for (int i = 0; i < colCount; i++) { int val = resultSet.getInt(i + 1); Integer expectedVal = (Integer) dataLists.get(index / 500)[i][index % 500]; assertThat(val, is(expectedVal)); } index++; } // assert that total rowcount is 5000 assertThat(index, is(5000)); } /** Testing the case that all data comes from chunk downloader */ @Test @DontRunOnThinJar public void testFirstResponseAndOfflineData() throws Throwable { final int colCount = 2; final int chunkCount = 10; // generate data List fieldList = new ArrayList<>(); Map customFieldMeta = new HashMap<>(); customFieldMeta.put("logicalType", "FIXED"); customFieldMeta.put("scale", "0"); FieldType type = new FieldType(false, Types.MinorType.INT.getType(), null, customFieldMeta); for (int i = 0; i < colCount; i++) { fieldList.add(new Field("col_" + i, type, null)); } Schema schema = new Schema(fieldList); // generate 10 chunk of data List dataLists = new ArrayList<>(); List fileLists = new ArrayList<>(); // first chunk set to base64 rowset Object[][] firstChunkData = generateData(schema, 500); File arrowFile = createArrowFile("testOnlyOfflineData_0", schema, firstChunkData, 10); dataLists.add(firstChunkData); int dataSize = (int) arrowFile.length(); byte[] dataBytes = new byte[dataSize]; try (InputStream is = new FileInputStream(arrowFile)) { is.read(dataBytes, 0, dataSize); } SnowflakeResultSetSerializableV1 resultSetSerializable = new SnowflakeResultSetSerializableV1(); resultSetSerializable.setFirstChunkStringData(Base64.getEncoder().encodeToString(dataBytes)); resultSetSerializable.setFirstChunkByteData(dataBytes); resultSetSerializable.setChunkFileCount(chunkCount); resultSetSerializable.setRootAllocator(new RootAllocator(Long.MAX_VALUE)); // build chunk downloader for (int i = 0; i < chunkCount; i++) { Object[][] data = generateData(schema, 500); File file = createArrowFile("testOnlyOfflineData_" + (i + 1), schema, data, 10); dataLists.add(data); fileLists.add(file); } resultSetSerializable.setChunkDownloader(new MockChunkDownloader(fileLists)); SFArrowResultSet resultSet = new SFArrowResultSet(resultSetSerializable, new NoOpTelemetryClient(), false); int index = 0; while (resultSet.next()) { for (int i = 0; i < colCount; i++) { int val = resultSet.getInt(i + 1); Integer expectedVal = (Integer) dataLists.get(index / 500)[i][index % 500]; assertThat(val, is(expectedVal)); } index++; } // assert that total rowcount is 5500 assertThat(index, is(5500)); } /** Class to mock chunk downloader. It is just reading data from tmp directory one by one */ private class MockChunkDownloader implements ChunkDownloader { private List resultFileNames; private int currentFileIndex; private RootAllocator rootAllocator = new RootAllocator(Long.MAX_VALUE); MockChunkDownloader(List resultFileNames) { this.resultFileNames = resultFileNames; this.currentFileIndex = 0; } @Override public SnowflakeResultChunk getNextChunkToConsume() throws SnowflakeSQLException { if (currentFileIndex < resultFileNames.size()) { ArrowResultChunk resultChunk = new ArrowResultChunk("", 0, 0, 0, rootAllocator, null); try (InputStream is = new FileInputStream(resultFileNames.get(currentFileIndex))) { resultChunk.readArrowStream(is); currentFileIndex++; return resultChunk; } catch (IOException e) { throw new SnowflakeSQLException(ErrorCode.INTERNAL_ERROR, "Failed " + "to read data"); } } else { return null; } } @Override public DownloaderMetrics terminate() { return null; } } Object[][] generateData(Schema schema, int rowCount) { Object[][] data = new Object[schema.getFields().size()][rowCount]; for (int i = 0; i < schema.getFields().size(); i++) { Types.MinorType type = Types.getMinorTypeForArrowType(schema.getFields().get(i).getType()); switch (type) { case BIT: { for (int j = 0; j < rowCount; j++) { data[i][j] = random.nextBoolean(); } break; } case INT: { for (int j = 0; j < rowCount; j++) { data[i][j] = 0; } break; } case DATEDAY: { for (int j = 0; j < rowCount; j++) { data[i][j] = Date.from(Instant.now()); } break; } case BIGINT: case DECIMAL: { for (int j = 0; j < rowCount; j++) { data[i][j] = 154639183700000l; } break; } case FLOAT8: { for (int j = 0; j < rowCount; j++) { data[i][j] = random.nextDouble(); } break; } case TINYINT: { for (int j = 0; j < rowCount; j++) { data[i][j] = (byte) random.nextInt(1 << 8); } break; } case SMALLINT: { for (int j = 0; j < rowCount; j++) { data[i][j] = (short) random.nextInt(1 << 16); } break; } case VARBINARY: { for (int j = 0; j < rowCount; j++) { data[i][j] = RandomStringUtils.random(20).getBytes(); } break; } case VARCHAR: { for (int j = 0; j < rowCount; j++) { data[i][j] = RandomStringUtils.random(20); } break; } // add other data types as needed later } } return data; } File createArrowFile(String fileName, Schema schema, Object[][] data, int rowsPerRecordBatch) throws IOException { File file = new File(tempDir, fileName); file.createNewFile(); VectorSchemaRoot root = VectorSchemaRoot.create(schema, allocator); try (FileOutputStream fos = new FileOutputStream(file); ArrowWriter writer = new ArrowStreamWriter(root, new DictionaryProvider.MapDictionaryProvider(), fos)) { writer.start(); for (int i = 0; i < data[0].length; ) { int rowsToAppend = Math.min(rowsPerRecordBatch, data[0].length - i); root.setRowCount(rowsToAppend); for (int j = 0; j < data.length; j++) { FieldVector vector = root.getFieldVectors().get(j); switch (vector.getMinorType()) { case BIT: writeBitToField(vector, data[j], i, rowsToAppend); break; case INT: writeIntToField(vector, data[j], i, rowsToAppend); break; case TINYINT: writeTinyIntToField(vector, data[j], i, rowsToAppend); break; case SMALLINT: writeSmallIntToField(vector, data[j], i, rowsToAppend); break; case DATEDAY: writeDateToField(vector, data[j], i, rowsToAppend); break; case BIGINT: writeLongToField(vector, data[j], i, rowsToAppend); break; case FLOAT8: writeDoubleToField(vector, data[j], i, rowsToAppend); break; case VARBINARY: writeBytesToField(vector, data[j], i, rowsToAppend); break; case VARCHAR: writeTextToField(vector, data[j], i, rowsToAppend); break; case DECIMAL: writeDecimalToField(vector, data[j], i, rowsToAppend); break; case STRUCT: writeTimestampStructToField(vector, data[j], data[j + 1], i, rowsToAppend); j++; break; } } writer.writeBatch(); i += rowsToAppend; } } return file; } private void writeLongToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { BigIntVector vector = (BigIntVector) fieldVector; vector.setInitialCapacity(rowsToAppend); vector.allocateNew(); vector.setNull(0); for (int i = 0; i < rowsToAppend; i++) { vector.setSafe(i, 1, (long) data[startIndex + i]); } // how many are set fieldVector.setValueCount(rowsToAppend); } private void writeBitToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { BitVector vector = (BitVector) fieldVector; vector.setInitialCapacity(rowsToAppend); vector.allocateNew(); vector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { int val = (Boolean) data[startIndex + i] == true ? 1 : 0; vector.setSafe(i, 1, val); } // how many are set fieldVector.setValueCount(rowsToAppend); } private void writeDateToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { DateDayVector datedayVector = (DateDayVector) fieldVector; datedayVector.setInitialCapacity(rowsToAppend); datedayVector.allocateNew(); datedayVector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { datedayVector.setSafe(i, 1, (int) ((Date) data[startIndex + i]).getTime() / 1000); } // how many are set fieldVector.setValueCount(rowsToAppend); } private void writeDecimalToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { DecimalVector datedayVector = (DecimalVector) fieldVector; datedayVector.setInitialCapacity(rowsToAppend); datedayVector.allocateNew(); datedayVector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { datedayVector.setSafe(i, (long) data[startIndex + i]); } // how many are set fieldVector.setValueCount(rowsToAppend); } private void writeDoubleToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { Float8Vector vector = (Float8Vector) fieldVector; vector.setInitialCapacity(rowsToAppend); vector.allocateNew(); vector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { vector.setSafe(i, 1, (double) data[startIndex + i]); } // how many are set fieldVector.setValueCount(rowsToAppend); } private void writeIntToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { IntVector intVector = (IntVector) fieldVector; intVector.setInitialCapacity(rowsToAppend); intVector.allocateNew(); intVector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { intVector.setSafe(i, 1, (int) data[startIndex + i]); } fieldVector.setValueCount(rowsToAppend); } private void writeSmallIntToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { SmallIntVector intVector = (SmallIntVector) fieldVector; intVector.setInitialCapacity(rowsToAppend); intVector.allocateNew(); intVector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { intVector.setSafe(i, 1, (short) data[startIndex + i]); } // how many are set fieldVector.setValueCount(rowsToAppend); } private void writeTinyIntToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { TinyIntVector vector = (TinyIntVector) fieldVector; vector.setInitialCapacity(rowsToAppend); vector.allocateNew(); vector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { vector.setSafe(i, 1, (byte) data[startIndex + i]); } // how many are set fieldVector.setValueCount(rowsToAppend); } private void writeBytesToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { VarBinaryVector vector = (VarBinaryVector) fieldVector; vector.setInitialCapacity(rowsToAppend); vector.allocateNew(); vector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { vector.setSafe(i, (byte[]) data[startIndex + i], 0, ((byte[]) data[startIndex + i]).length); } // how many are set fieldVector.setValueCount(rowsToAppend); } private void writeTextToField( FieldVector fieldVector, Object[] data, int startIndex, int rowsToAppend) { VarCharVector intVector = (VarCharVector) fieldVector; intVector.setInitialCapacity(rowsToAppend); intVector.allocateNew(); intVector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { intVector.setSafe(i, new Text((String) data[startIndex + i])); } // how many are set fieldVector.setValueCount(rowsToAppend); } private void writeTimestampStructToField( FieldVector fieldVector, Object[] data, Object[] data2, int startIndex, int rowsToAppend) { StructVector vector = (StructVector) fieldVector; vector.setInitialCapacity(rowsToAppend); vector.allocateNew(); vector.setNull(0); for (int i = 1; i < rowsToAppend; i++) { List childVectors = vector.getChildrenFromFields(); BigIntVector v1 = (BigIntVector) childVectors.get(0); v1.setSafe(i, 1, (long) data[startIndex + i]); IntVector v2 = (IntVector) childVectors.get(1); v2.setSafe(i, 1, (int) data2[startIndex + i]); } // how many are set fieldVector.setValueCount(rowsToAppend); } /** Test that first chunk containing struct vectors (used for timestamps) can be sorted */ @Test @DontRunOnThinJar public void testSortedResultChunkWithStructVectors() throws Throwable { try (Statement statement = connection.createStatement()) { statement.execute("create or replace table teststructtimestamp (t1 timestamp_ltz)"); try (ResultSet rs = statement.executeQuery("select * from teststructtimestamp")) { List resultSetSerializables = ((SnowflakeResultSet) rs).getResultSetSerializables(100 * 1024 * 1024); SnowflakeResultSetSerializableV1 resultSetSerializable = (SnowflakeResultSetSerializableV1) resultSetSerializables.get(0); Map customFieldMeta = new HashMap<>(); customFieldMeta.put("logicalType", "TIMESTAMP_LTZ"); customFieldMeta.put("scale", "38"); // test normal date FieldType fieldType = new FieldType(true, Types.MinorType.BIGINT.getType(), null, customFieldMeta); FieldType fieldType2 = new FieldType(true, Types.MinorType.INT.getType(), null, customFieldMeta); StructVector structVector = StructVector.empty("testListVector", allocator); List fieldList = new LinkedList(); Field bigIntField = new Field("epoch", fieldType, null); Field intField = new Field("fraction", fieldType2, null); fieldList.add(bigIntField); fieldList.add(intField); FieldType structFieldType = new FieldType(true, Types.MinorType.STRUCT.getType(), null, customFieldMeta); Field structField = new Field("timestamp", structFieldType, fieldList); structVector.initializeChildrenFromFields(fieldList); List fieldListMajor = new LinkedList(); fieldListMajor.add(structField); Schema dataSchema = new Schema(fieldList); Object[][] data = generateData(dataSchema, 1000); Schema schema = new Schema(fieldListMajor); File file = createArrowFile("testTimestamp", schema, data, 10); int dataSize = (int) file.length(); byte[] dataBytes = new byte[dataSize]; try (InputStream is = new FileInputStream(file)) { is.read(dataBytes, 0, dataSize); } resultSetSerializable.setRootAllocator(new RootAllocator(Long.MAX_VALUE)); resultSetSerializable.setFirstChunkStringData( Base64.getEncoder().encodeToString(dataBytes)); resultSetSerializable.setFirstChunkByteData(dataBytes); resultSetSerializable.setChunkFileCount(0); SFArrowResultSet resultSet = new SFArrowResultSet(resultSetSerializable, new NoOpTelemetryClient(), true); for (int i = 0; i < 1000; i++) { resultSet.next(); } // We inserted a null row at the beginning so when sorted, the last row should be null assertEquals(null, resultSet.getObject(1)); assertFalse(resultSet.next()); statement.execute("drop table teststructtimestamp;"); } } } /** Test that the first chunk can be sorted */ @Test @DontRunOnThinJar public void testSortedResultChunk() throws Throwable { try (Statement statement = connection.createStatement()) { statement.execute( "create or replace table alltypes (i1 int, d1 date, b1 bigint, f1 float, s1 smallint, t1 tinyint, b2 binary, t2 text, b3 boolean, d2 decimal)"); try (ResultSet rs = statement.executeQuery("select * from alltypes")) { List resultSetSerializables = ((SnowflakeResultSet) rs).getResultSetSerializables(100 * 1024 * 1024); SnowflakeResultSetSerializableV1 resultSetSerializable = (SnowflakeResultSetSerializableV1) resultSetSerializables.get(0); List fieldList = new ArrayList<>(); Map customFieldMeta = new HashMap<>(); customFieldMeta.put("logicalType", "FIXED"); customFieldMeta.put("scale", "0"); FieldType type = new FieldType(false, Types.MinorType.INT.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); customFieldMeta.put("logicalType", "DATE"); type = new FieldType(false, Types.MinorType.DATEDAY.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); customFieldMeta.put("logicalType", "FIXED"); type = new FieldType(false, Types.MinorType.BIGINT.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); customFieldMeta.put("logicalType", "REAL"); type = new FieldType(false, Types.MinorType.FLOAT8.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); customFieldMeta.put("logicalType", "FIXED"); type = new FieldType(false, Types.MinorType.SMALLINT.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); customFieldMeta.put("logicalType", "FIXED"); type = new FieldType(false, Types.MinorType.TINYINT.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); customFieldMeta.put("logicalType", "BINARY"); type = new FieldType(false, Types.MinorType.VARBINARY.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); customFieldMeta.put("logicalType", "TEXT"); type = new FieldType(false, Types.MinorType.VARCHAR.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); customFieldMeta.put("logicalType", "BOOLEAN"); type = new FieldType(false, Types.MinorType.BIT.getType(), null, customFieldMeta); fieldList.add(new Field("", type, null)); customFieldMeta.put("logicalType", "REAL"); type = new FieldType(false, new ArrowType.Decimal(38, 16, 128), null, customFieldMeta); fieldList.add(new Field("", type, null)); Schema schema = new Schema(fieldList); Object[][] data = generateData(schema, 1000); File file = createArrowFile("testVectorTypes", schema, data, 10); int dataSize = (int) file.length(); byte[] dataBytes = new byte[dataSize]; try (InputStream is = new FileInputStream(file)) { is.read(dataBytes, 0, dataSize); } resultSetSerializable.setRootAllocator(new RootAllocator(Long.MAX_VALUE)); resultSetSerializable.setFirstChunkStringData( Base64.getEncoder().encodeToString(dataBytes)); resultSetSerializable.setFirstChunkByteData(dataBytes); resultSetSerializable.setChunkFileCount(0); SFArrowResultSet resultSet = new SFArrowResultSet(resultSetSerializable, new NoOpTelemetryClient(), true); for (int i = 0; i < 1000; i++) { resultSet.next(); } // We inserted a null row at the beginning so when sorted, the last row should be null assertEquals(null, resultSet.getObject(1)); assertFalse(resultSet.next()); statement.execute("drop table alltypes;"); } } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/SFCrlTrustManagerDelegationTest.java ================================================ package net.snowflake.client.internal.core; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import javax.net.ssl.X509TrustManager; import net.snowflake.client.internal.core.crl.CrlRevocationManager; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; public class SFCrlTrustManagerDelegationTest { private X509TrustManager mockTrustManager; private CrlRevocationManager mockRevocationManager; private X509Certificate[] certChain; private String authType; @BeforeEach void setUp() { mockTrustManager = mock(X509TrustManager.class); mockRevocationManager = mock(CrlRevocationManager.class); certChain = new X509Certificate[] {mock(X509Certificate.class)}; authType = "RSA"; when(mockTrustManager.getAcceptedIssuers()).thenReturn(new X509Certificate[0]); } @Test void testCheckClientTrustedDelegatesToTrustManager() throws CertificateException { SFBasicCrlTrustManager trustManager = new SFBasicCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.checkClientTrusted(certChain, authType); verify(mockTrustManager).checkClientTrusted(certChain, authType); } @Test void testCheckServerTrustedDelegatesToTrustManager() throws CertificateException { SFBasicCrlTrustManager trustManager = new SFBasicCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.checkServerTrusted(certChain, authType); verify(mockTrustManager).checkServerTrusted(certChain, authType); verify(mockRevocationManager).validateRevocationStatus(certChain, authType); } @Test void testGetAcceptedIssuersDelegatesToTrustManager() { SFBasicCrlTrustManager trustManager = new SFBasicCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.getAcceptedIssuers(); verify(mockTrustManager).getAcceptedIssuers(); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/SFCrlTrustManagerFactoryTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.when; import java.security.cert.X509Certificate; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; import javax.net.ssl.X509TrustManager; import net.snowflake.client.internal.core.crl.CertRevocationCheckMode; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.MockedStatic; public class SFCrlTrustManagerFactoryTest { private HttpClientSettingsKey testKey; private MockedStatic mockedTrustManagerFactory; @BeforeEach void setUp() { testKey = new HttpClientSettingsKey(OCSPMode.DISABLE_OCSP_CHECKS); testKey.setRevocationCheckMode(CertRevocationCheckMode.ENABLED); } @AfterEach void tearDown() { if (mockedTrustManagerFactory != null) { mockedTrustManagerFactory.close(); } } @ParameterizedTest @ValueSource(classes = {X509TrustManager.class, X509ExtendedTrustManager.class}) void testCreateProperCrlTrustManagerBasedOnJvmProvided(Class clazz) throws Exception { mockTrustManagerFactoryToReturn(clazz); X509TrustManager result = SFCrlTrustManagerFactory.createCrlTrustManager(testKey); assertInstanceOf(clazz, result); } private void mockTrustManagerFactoryToReturn(Class mockClass) { mockedTrustManagerFactory = mockStatic(TrustManagerFactory.class); TrustManagerFactory mockFactory = mock(TrustManagerFactory.class); mockedTrustManagerFactory .when(() -> TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm())) .thenReturn(mockFactory); T trustManagerMock = mock(mockClass); when(trustManagerMock.getAcceptedIssuers()).thenReturn(new X509Certificate[0]); when(mockFactory.getTrustManagers()).thenReturn(new TrustManager[] {trustManagerMock}); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/SFCrlTrustManagerLatestIT.java ================================================ package net.snowflake.client.internal.core; import static org.awaitility.Awaitility.await; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.AnyOf.anyOf; import java.io.File; import java.io.IOException; import java.security.Security; import java.sql.Connection; import java.sql.Statement; import java.time.Duration; import java.util.Properties; import java.util.stream.Stream; import net.snowflake.client.category.TestTags; import net.snowflake.client.internal.core.crl.CRLCacheConfig; import net.snowflake.client.internal.core.crl.CertRevocationCheckMode; import net.snowflake.client.internal.jdbc.BaseJDBCTest; import net.snowflake.client.internal.log.SFLogger; import net.snowflake.client.internal.log.SFLoggerFactory; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtensionContext; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.ArgumentsProvider; import org.junit.jupiter.params.provider.ArgumentsSource; @Tag(TestTags.CORE) public class SFCrlTrustManagerLatestIT extends BaseJDBCTest { private static final SFLogger logger = SFLoggerFactory.getLogger(SFTrustManagerIT.class); @TempDir static File tmpFolder; private static class HostProvider implements ArgumentsProvider { @Override public Stream provideArguments(ExtensionContext context) throws Exception { return Stream.of( Arguments.of("storage.googleapis.com"), Arguments.of("ocspssd.us-east-1.snowflakecomputing.com/ocsp/fetch"), Arguments.of("sfcsupport.snowflakecomputing.com"), Arguments.of("sfcsupport.us-east-1.snowflakecomputing.com"), Arguments.of("sfcsupport.eu-central-1.snowflakecomputing.com"), Arguments.of("sfc-dev1-regression.s3.amazonaws.com"), Arguments.of("sfc-ds2-customer-stage.s3.amazonaws.com"), Arguments.of("snowflake.okta.com"), Arguments.of("sfcdev2.blob.core.windows.net")); } } @ParameterizedTest @ArgumentsSource(HostProvider.class) public void testCrl(String host) throws Throwable { System.setProperty(CRLCacheConfig.CRL_RESPONSE_CACHE_DIR, tmpFolder.getAbsolutePath()); HttpClientSettingsKey httpClientSettings = new HttpClientSettingsKey(OCSPMode.DISABLE_OCSP_CHECKS); httpClientSettings.setRevocationCheckMode(CertRevocationCheckMode.ENABLED); HttpClient client = HttpUtil.buildHttpClient(httpClientSettings, null, false); accessHost(host, client); } private static void accessHost(String host, HttpClient client) throws IOException { HttpGet httpRequest = new HttpGet(String.format("https://%s:443/", host)); HttpResponse response = client.execute(httpRequest); await() .atMost(Duration.ofSeconds(10)) .until(() -> response.getStatusLine().getStatusCode(), not(equalTo(-1))); assertThat( String.format("response code for %s", host), response.getStatusLine().getStatusCode(), anyOf(equalTo(200), equalTo(400), equalTo(403), equalTo(404), equalTo(513))); } @Test void shouldNotFailWhenSimpleTrustManagerIsUsed() throws Exception { Security.insertProviderAt(new TestSecurityProvider(), 1); HttpUtil.reset(); Properties props = new Properties(); props.setProperty("insecureMode", "true"); props.setProperty("disableOCSPChecks", "true"); props.setProperty("CERT_REVOCATION_CHECK_MODE", "ENABLED"); try (Connection connection = getConnection(props)) { Statement statement = connection.createStatement(); statement.execute("SELECT 1"); } finally { Security.removeProvider(TestSecurityProvider.class.getSimpleName()); HttpUtil.reset(); } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/SFExtendedCrlTrustManagerDelegationTest.java ================================================ package net.snowflake.client.internal.core; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.net.Socket; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import javax.net.ssl.SSLEngine; import javax.net.ssl.X509ExtendedTrustManager; import net.snowflake.client.internal.core.crl.CrlRevocationManager; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; public class SFExtendedCrlTrustManagerDelegationTest { private X509ExtendedTrustManager mockTrustManager; private CrlRevocationManager mockRevocationManager; private X509Certificate[] certChain; private String authType; private Socket socket; private SSLEngine sslEngine; @BeforeEach void setUp() { mockTrustManager = mock(X509ExtendedTrustManager.class); mockRevocationManager = mock(CrlRevocationManager.class); certChain = new X509Certificate[] {mock(X509Certificate.class)}; authType = "RSA"; socket = mock(Socket.class); sslEngine = mock(SSLEngine.class); when(mockTrustManager.getAcceptedIssuers()).thenReturn(new X509Certificate[0]); } @Test void testCheckClientTrustedDelegatesToTrustManager() throws CertificateException { SFExtendedCrlTrustManager trustManager = new SFExtendedCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.checkClientTrusted(certChain, authType); verify(mockTrustManager).checkClientTrusted(certChain, authType); } @Test void testCheckClientTrustedWithSocketDelegatesToTrustManager() throws CertificateException { SFExtendedCrlTrustManager trustManager = new SFExtendedCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.checkClientTrusted(certChain, authType, socket); verify(mockTrustManager).checkClientTrusted(certChain, authType, socket); } @Test void testCheckClientTrustedWithSSLEngineDelegatesToTrustManager() throws CertificateException { SFExtendedCrlTrustManager trustManager = new SFExtendedCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.checkClientTrusted(certChain, authType, sslEngine); verify(mockTrustManager).checkClientTrusted(certChain, authType, sslEngine); } @Test void testCheckServerTrustedDelegatesToTrustManager() throws CertificateException { SFExtendedCrlTrustManager trustManager = new SFExtendedCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.checkServerTrusted(certChain, authType); verify(mockTrustManager).checkServerTrusted(certChain, authType); verify(mockRevocationManager).validateRevocationStatus(certChain, authType); } @Test void testCheckServerTrustedWithSocketDelegatesToTrustManager() throws CertificateException { SFExtendedCrlTrustManager trustManager = new SFExtendedCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.checkServerTrusted(certChain, authType, socket); verify(mockTrustManager).checkServerTrusted(certChain, authType, socket); verify(mockRevocationManager).validateRevocationStatus(certChain, authType); } @Test void testCheckServerTrustedWithSSLEngineDelegatesToTrustManager() throws CertificateException { SFExtendedCrlTrustManager trustManager = new SFExtendedCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.checkServerTrusted(certChain, authType, sslEngine); verify(mockTrustManager).checkServerTrusted(certChain, authType, sslEngine); verify(mockRevocationManager).validateRevocationStatus(certChain, authType); } @Test void testGetAcceptedIssuersDelegatesToTrustManager() { SFExtendedCrlTrustManager trustManager = new SFExtendedCrlTrustManager(mockRevocationManager, mockTrustManager); trustManager.getAcceptedIssuers(); verify(mockTrustManager).getAcceptedIssuers(); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/SFLoginInputTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import org.junit.jupiter.api.Test; public class SFLoginInputTest { @Test public void testGetHostFromServerUrlWithoutProtocolShouldNotThrow() throws SFException { SFLoginInput sfLoginInput = new SFLoginInput(); sfLoginInput.setServerUrl("host.com:443"); assertEquals("host.com", sfLoginInput.getHostFromServerUrl()); } @Test public void testGetHostFromServerUrlWithProtocolShouldNotThrow() throws SFException { SFLoginInput sfLoginInput = new SFLoginInput(); sfLoginInput.setServerUrl("https://host.com"); assertEquals("host.com", sfLoginInput.getHostFromServerUrl()); } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/SFSSLConnectionSocketFactoryTest.java ================================================ package net.snowflake.client.internal.core; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.security.NoSuchAlgorithmException; import javax.net.ssl.SSLContext; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; public class SFSSLConnectionSocketFactoryTest { @BeforeEach public void setUp() { SFSSLConnectionSocketFactory.setMinTlsVersion("TLSv1.2"); SFSSLConnectionSocketFactory.setMaxTlsVersion("TLSv1.3"); } @AfterEach public void tearDown() { SFSSLConnectionSocketFactory.setMinTlsVersion("TLSv1.2"); SFSSLConnectionSocketFactory.setMaxTlsVersion("TLSv1.3"); } @Test public void testDefaultTlsVersions() throws Exception { String[] supportedVersions = getSupportedTlsVersions(); if (isTls13Available()) { assertEquals(2, supportedVersions.length); assertEquals("TLSv1.2", supportedVersions[0]); assertEquals("TLSv1.3", supportedVersions[1]); } else { assertEquals(1, supportedVersions.length); assertEquals("TLSv1.2", supportedVersions[0]); } } @ParameterizedTest @CsvSource({ "TLSv1.2,TLSv1.3,TLSv1.2 TLSv1.3", "TLSv1.2,TLSv1.2,TLSv1.2", "TLSv1.3,TLSv1.3,TLSv1.3" }) public void testTlsConstraints(String min, String max, String expected) throws Exception { if (isTls13Available()) { SFSSLConnectionSocketFactory.setMinTlsVersion(min); SFSSLConnectionSocketFactory.setMaxTlsVersion(max); String versions = String.join(" ", getSupportedTlsVersions()); assertEquals(expected, versions); } } @Test public void testMinGreaterThanMax() { SFSSLConnectionSocketFactory.setMinTlsVersion("TLSv1.3"); SFSSLConnectionSocketFactory.setMaxTlsVersion("TLSv1.2"); InvocationTargetException thrown = assertThrows(InvocationTargetException.class, this::getSupportedTlsVersions); assertInstanceOf(IllegalArgumentException.class, thrown.getCause()); } private String[] getSupportedTlsVersions() throws Exception { Method method = SFSSLConnectionSocketFactory.class.getDeclaredMethod("getSupportedTlsVersions"); method.setAccessible(true); return (String[]) method.invoke(null); } private boolean isTls13Available() { try { SSLContext.getInstance("TLSv1.3"); return true; } catch (NoSuchAlgorithmException e) { return false; } } } ================================================ FILE: src/test/java/net/snowflake/client/internal/core/SFSessionPropertyTest.java ================================================ package net.snowflake.client.internal.core; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemSetEnv; import static net.snowflake.client.internal.jdbc.SnowflakeUtil.systemUnsetEnv; import static org.hamcrest.CoreMatchers.endsWith; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.mock; import java.lang.reflect.Field; import java.util.HashMap; import java.util.Map; import net.snowflake.client.api.exception.ErrorCode; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; public class SFSessionPropertyTest { private static final String SF_ENABLE_WIF_AWS_EXTERNAL_ID = "SF_ENABLE_WIF_AWS_EXTERNAL_ID"; private String originalEnvValue; @BeforeEach public void setUp() { originalEnvValue = System.getenv(SF_ENABLE_WIF_AWS_EXTERNAL_ID); systemUnsetEnv(SF_ENABLE_WIF_AWS_EXTERNAL_ID); } @AfterEach public void tearDown() { if (originalEnvValue != null) { systemSetEnv(SF_ENABLE_WIF_AWS_EXTERNAL_ID, originalEnvValue); } else { systemUnsetEnv(SF_ENABLE_WIF_AWS_EXTERNAL_ID); } } @Test public void testCheckApplicationName() throws SFException { String[] validApplicationName = {"test1234", "test_1234", "test-1234", "test.1234"}; String[] invalidApplicationName = {"1234test", "test$A", "test