Repository: FirebirdSQL/NETProvider Branch: master Commit: a61996cb5734 Files: 647 Total size: 3.5 MB Directory structure: gitextract_qhgfmhye/ ├── .github/ │ ├── FUNDING.yml │ └── workflows/ │ └── ci.yml ├── .gitignore ├── CONTRIBUTING.md ├── README.md ├── build.ps1 ├── docs/ │ ├── ado-net-schema.md │ ├── ado-net.md │ ├── batching.md │ ├── decfloat.md │ ├── entity-framework-6.md │ ├── entity-framework-core.md │ ├── events.md │ ├── int128.md │ ├── services-backup.md │ └── time-zones.md ├── header.ps1 ├── include.ps1 ├── license.txt ├── src/ │ ├── .editorconfig │ ├── Directory.Build.props │ ├── EntityFramework.Firebird/ │ │ ├── App.config.install.xdt │ │ ├── DefaultFbMigrationSqlGeneratorBehavior.cs │ │ ├── EntityFramework.Firebird.csproj │ │ ├── FbConnectionFactory.cs │ │ ├── FbMigrationSqlGenerator.cs │ │ ├── FbMigrationsTransactionsInterceptor.cs │ │ ├── FbProviderManifest.cs │ │ ├── FbProviderServices.cs │ │ ├── IFbMigrationSqlGeneratorBehavior.cs │ │ ├── MetadataHelpers.cs │ │ ├── Properties/ │ │ │ └── EntityFramework.Firebird.snk │ │ ├── Resources/ │ │ │ ├── ProviderManifest.xml │ │ │ ├── StoreSchemaDefinition.ssdl │ │ │ ├── StoreSchemaDefinitionVersion3.ssdl │ │ │ ├── StoreSchemaMapping.msl │ │ │ └── StoreSchemaMappingVersion3.msl │ │ ├── SqlGen/ │ │ │ ├── DmlSqlGenerator.cs │ │ │ ├── ExpressionTranslator.cs │ │ │ ├── FirstClause.cs │ │ │ ├── ISqlFragment.cs │ │ │ ├── JoinSymbol.cs │ │ │ ├── SkipClause.cs │ │ │ ├── SqlBuilder.cs │ │ │ ├── SqlGenerator.cs │ │ │ ├── SqlSelectStatement.cs │ │ │ ├── SqlWriter.cs │ │ │ ├── Symbol.cs │ │ │ ├── SymbolPair.cs │ │ │ └── SymbolTable.cs │ │ ├── SsdlToFb.cs │ │ ├── TypeHelpers.cs │ │ └── Web.config.install.xdt │ ├── EntityFramework.Firebird.Tests/ │ │ ├── EntityFramework.Firebird.Tests.csproj │ │ ├── EntityFrameworkTestsBase.cs │ │ ├── FbTestDbContext.cs │ │ ├── InfrastructureTests.cs │ │ ├── QueryTests.cs │ │ └── app.config │ ├── FirebirdSql.Data.External/ │ │ ├── FirebirdSql.Data.External.projitems │ │ ├── FirebirdSql.Data.External.shproj │ │ ├── RC4/ │ │ │ ├── ICipherParameters.cs │ │ │ ├── KeyParameter.cs │ │ │ └── RC4Engine.cs │ │ └── zlib/ │ │ ├── Deflate.cs │ │ ├── InfTree.cs │ │ ├── Inflate.cs │ │ ├── Tree.cs │ │ ├── Zlib.cs │ │ ├── ZlibCodec.cs │ │ └── ZlibConstants.cs │ ├── FirebirdSql.Data.FirebirdClient/ │ │ ├── Client/ │ │ │ ├── ClientFactory.cs │ │ │ ├── Managed/ │ │ │ │ ├── AuthBlock.cs │ │ │ │ ├── DataProviderStreamWrapper.cs │ │ │ │ ├── FetchResponse.cs │ │ │ │ ├── FirebirdNetworkHandlingWrapper.cs │ │ │ │ ├── GdsConnection.cs │ │ │ │ ├── GenericResponse.cs │ │ │ │ ├── IDataProvider.cs │ │ │ │ ├── IResponse.cs │ │ │ │ ├── ITracksIOFailure.cs │ │ │ │ ├── IXdrReader.cs │ │ │ │ ├── IXdrWriter.cs │ │ │ │ ├── ProtocolsSupported.cs │ │ │ │ ├── SqlResponse.cs │ │ │ │ ├── Srp/ │ │ │ │ │ ├── Srp256Client.cs │ │ │ │ │ ├── SrpClient.cs │ │ │ │ │ └── SrpClientBase.cs │ │ │ │ ├── Sspi/ │ │ │ │ │ └── SspiHelper.cs │ │ │ │ ├── Version10/ │ │ │ │ │ ├── GdsArray.cs │ │ │ │ │ ├── GdsBlob.cs │ │ │ │ │ ├── GdsDatabase.cs │ │ │ │ │ ├── GdsEventManager.cs │ │ │ │ │ ├── GdsServiceManager.cs │ │ │ │ │ ├── GdsStatement.cs │ │ │ │ │ └── GdsTransaction.cs │ │ │ │ ├── Version11/ │ │ │ │ │ ├── AuthResponse.cs │ │ │ │ │ ├── GdsDatabase.cs │ │ │ │ │ ├── GdsServiceManager.cs │ │ │ │ │ └── GdsStatement.cs │ │ │ │ ├── Version12/ │ │ │ │ │ ├── GdsDatabase.cs │ │ │ │ │ ├── GdsServiceManager.cs │ │ │ │ │ └── GdsStatement.cs │ │ │ │ ├── Version13/ │ │ │ │ │ ├── ContAuthResponse.cs │ │ │ │ │ ├── CryptKeyCallbackResponse.cs │ │ │ │ │ ├── GdsDatabase.cs │ │ │ │ │ ├── GdsServiceManager.cs │ │ │ │ │ ├── GdsStatement.cs │ │ │ │ │ └── WireCryptOption.cs │ │ │ │ ├── Version15/ │ │ │ │ │ ├── CryptKeyCallbackResponse.cs │ │ │ │ │ ├── GdsDatabase.cs │ │ │ │ │ ├── GdsServiceManager.cs │ │ │ │ │ └── GdsStatement.cs │ │ │ │ ├── Version16/ │ │ │ │ │ ├── BatchCompletionStateResponse.cs │ │ │ │ │ ├── GdsBatch.cs │ │ │ │ │ ├── GdsDatabase.cs │ │ │ │ │ ├── GdsServiceManager.cs │ │ │ │ │ └── GdsStatement.cs │ │ │ │ └── XdrReaderWriter.cs │ │ │ └── Native/ │ │ │ ├── FbClientFactory.cs │ │ │ ├── FesArray.cs │ │ │ ├── FesBlob.cs │ │ │ ├── FesConnection.cs │ │ │ ├── FesDatabase.cs │ │ │ ├── FesServiceManager.cs │ │ │ ├── FesStatement.cs │ │ │ ├── FesTransaction.cs │ │ │ ├── Handles/ │ │ │ │ ├── BlobHandle.cs │ │ │ │ ├── DatabaseHandle.cs │ │ │ │ ├── FirebirdHandle.cs │ │ │ │ ├── IFirebirdHandle.cs │ │ │ │ ├── StatementHandle.cs │ │ │ │ └── TransactionHandle.cs │ │ │ ├── IFbClient.cs │ │ │ ├── Marshalers/ │ │ │ │ ├── ArrayBoundMarshal.cs │ │ │ │ ├── ArrayDescMarshal.cs │ │ │ │ ├── ArrayDescMarshaler.cs │ │ │ │ ├── XSQLDA.cs │ │ │ │ ├── XSQLVAR.cs │ │ │ │ └── XsqldaMarshaler.cs │ │ │ └── StatusVectorHelper.cs │ │ ├── Common/ │ │ │ ├── ArrayBase.cs │ │ │ ├── ArrayBound.cs │ │ │ ├── ArrayDesc.cs │ │ │ ├── BatchBase.cs │ │ │ ├── BatchParameterBuffer.cs │ │ │ ├── BinaryEncoding.cs │ │ │ ├── BlobBase.cs │ │ │ ├── BlobParameterBuffer.cs │ │ │ ├── BlobStream.cs │ │ │ ├── Charset.cs │ │ │ ├── ConnectionPoolLifetimeHelper.cs │ │ │ ├── ConnectionString.cs │ │ │ ├── DatabaseBase.cs │ │ │ ├── DatabaseParameterBuffer1.cs │ │ │ ├── DatabaseParameterBuffer2.cs │ │ │ ├── DatabaseParameterBufferBase.cs │ │ │ ├── DbDataType.cs │ │ │ ├── DbField.cs │ │ │ ├── DbStatementType.cs │ │ │ ├── DbValue.cs │ │ │ ├── DecimalCodec.cs │ │ │ ├── DecimalShiftHelper.cs │ │ │ ├── DecimalType.cs │ │ │ ├── DenselyPackedDecimalCodec.cs │ │ │ ├── Descriptor.cs │ │ │ ├── EmptyDescriptorFiller.cs │ │ │ ├── EventParameterBuffer.cs │ │ │ ├── ExplicitCancellation.cs │ │ │ ├── Extensions.cs │ │ │ ├── IDescriptorFiller.cs │ │ │ ├── InfoValuesConverter.cs │ │ │ ├── Int128Helper.cs │ │ │ ├── IscCodes.cs │ │ │ ├── IscError.cs │ │ │ ├── IscErrorMessages.cs │ │ │ ├── IscException.cs │ │ │ ├── IscHelper.cs │ │ │ ├── NamedParametersParser.cs │ │ │ ├── NativeHelpers.cs │ │ │ ├── ParameterBuffer.cs │ │ │ ├── RemoteEvent.cs │ │ │ ├── ServiceManagerBase.cs │ │ │ ├── ServiceParameterBuffer2.cs │ │ │ ├── ServiceParameterBuffer3.cs │ │ │ ├── ServiceParameterBufferBase.cs │ │ │ ├── ShutdownHelper.cs │ │ │ ├── SizeHelper.cs │ │ │ ├── SqlStateMapping.cs │ │ │ ├── StatementBase.cs │ │ │ ├── StatementState.cs │ │ │ ├── TimeZoneMapping.cs │ │ │ ├── TransactionBase.cs │ │ │ ├── TransactionParameterBuffer.cs │ │ │ ├── TransactionState.cs │ │ │ ├── TypeDecoder.cs │ │ │ ├── TypeEncoder.cs │ │ │ └── TypeHelper.cs │ │ ├── FirebirdClient/ │ │ │ ├── FbBatchCommand.cs │ │ │ ├── FbBatchNonQueryResult.cs │ │ │ ├── FbBatchParameterCollection.cs │ │ │ ├── FbCharset.cs │ │ │ ├── FbCommand.cs │ │ │ ├── FbCommandBuilder.cs │ │ │ ├── FbConnection.cs │ │ │ ├── FbConnectionInternal.cs │ │ │ ├── FbConnectionPoolManager.cs │ │ │ ├── FbConnectionStringBuilder.cs │ │ │ ├── FbDataAdapter.cs │ │ │ ├── FbDataReader.cs │ │ │ ├── FbDatabaseInfo.cs │ │ │ ├── FbDbType.cs │ │ │ ├── FbEnlistmentNotification.cs │ │ │ ├── FbError.cs │ │ │ ├── FbErrorCollection.cs │ │ │ ├── FbException.cs │ │ │ ├── FbInfoMessageEventArgs.cs │ │ │ ├── FbParameter.cs │ │ │ ├── FbParameterCollection.cs │ │ │ ├── FbRemoteEvent.cs │ │ │ ├── FbRemoteEventCountsEventArgs.cs │ │ │ ├── FbRemoteEventErrorEventArgs.cs │ │ │ ├── FbRowUpdatedEventArgs.cs │ │ │ ├── FbRowUpdatingEventArgs.cs │ │ │ ├── FbServerType.cs │ │ │ ├── FbTransaction.cs │ │ │ ├── FbTransactionBehavior.cs │ │ │ ├── FbTransactionInfo.cs │ │ │ ├── FbTransactionOptions.cs │ │ │ ├── FbWireCrypt.cs │ │ │ ├── FirebirdClientFactory.cs │ │ │ └── IFbPreparedCommand.cs │ │ ├── FirebirdSql.Data.FirebirdClient.csproj │ │ ├── Isql/ │ │ │ ├── CommandExecutedEventArgs.cs │ │ │ ├── CommandExecutingEventArgs.cs │ │ │ ├── FbBatchExecution.cs │ │ │ ├── FbScript.cs │ │ │ ├── FbStatement.cs │ │ │ ├── FbStatementCollection.cs │ │ │ ├── SqlStatementType.cs │ │ │ ├── SqlStringParser.cs │ │ │ └── UnknownStatementEventArgs.cs │ │ ├── Logging/ │ │ │ ├── FbLogManager.cs │ │ │ └── LogMessages.cs │ │ ├── Properties/ │ │ │ ├── ComAssemblyInfo.cs │ │ │ ├── FirebirdSql.Data.FirebirdClient.snk │ │ │ └── InternalsVisibleToAssemblyInfo.cs │ │ ├── Schema/ │ │ │ ├── FbCharacterSets.cs │ │ │ ├── FbCheckConstraints.cs │ │ │ ├── FbChecksByTable.cs │ │ │ ├── FbCollations.cs │ │ │ ├── FbColumnPrivileges.cs │ │ │ ├── FbColumns.cs │ │ │ ├── FbDomains.cs │ │ │ ├── FbForeignKeyColumns.cs │ │ │ ├── FbForeignKeys.cs │ │ │ ├── FbFunctionArguments.cs │ │ │ ├── FbFunctionPrivileges.cs │ │ │ ├── FbFunctions.cs │ │ │ ├── FbGenerators.cs │ │ │ ├── FbIndexColumns.cs │ │ │ ├── FbIndexes.cs │ │ │ ├── FbMetaData.xml │ │ │ ├── FbPrimaryKeys.cs │ │ │ ├── FbProcedureParameters.cs │ │ │ ├── FbProcedurePrivileges.cs │ │ │ ├── FbProcedures.cs │ │ │ ├── FbRoles.cs │ │ │ ├── FbSchema.cs │ │ │ ├── FbSchemaFactory.cs │ │ │ ├── FbTableConstraints.cs │ │ │ ├── FbTablePrivileges.cs │ │ │ ├── FbTables.cs │ │ │ ├── FbTriggers.cs │ │ │ ├── FbUniqueKeys.cs │ │ │ ├── FbViewColumns.cs │ │ │ ├── FbViewPrivileges.cs │ │ │ └── FbViews.cs │ │ ├── Services/ │ │ │ ├── FbBackup.cs │ │ │ ├── FbBackupFile.cs │ │ │ ├── FbBackupFileCollection.cs │ │ │ ├── FbBackupFlags.cs │ │ │ ├── FbBackupRestoreStatistics.cs │ │ │ ├── FbConfiguration.cs │ │ │ ├── FbDatabaseTraceConfiguration.cs │ │ │ ├── FbDatabaseTraceConfigurationCollection.cs │ │ │ ├── FbDatabaseTraceEvents.cs │ │ │ ├── FbDatabasesInfo.cs │ │ │ ├── FbLog.cs │ │ │ ├── FbNBackup.cs │ │ │ ├── FbNBackupFlags.cs │ │ │ ├── FbNFixup.cs │ │ │ ├── FbNRestore.cs │ │ │ ├── FbRestore.cs │ │ │ ├── FbRestoreFlags.cs │ │ │ ├── FbSecurity.cs │ │ │ ├── FbServerConfig.cs │ │ │ ├── FbServerProperties.cs │ │ │ ├── FbService.cs │ │ │ ├── FbServiceState.cs │ │ │ ├── FbServiceTraceConfiguration.cs │ │ │ ├── FbServiceTraceEvents.cs │ │ │ ├── FbShutdownMode.cs │ │ │ ├── FbShutdownOnlineMode.cs │ │ │ ├── FbShutdownType.cs │ │ │ ├── FbStatistical.cs │ │ │ ├── FbStatisticalFlags.cs │ │ │ ├── FbStreamingBackup.cs │ │ │ ├── FbStreamingRestore.cs │ │ │ ├── FbTrace.cs │ │ │ ├── FbTraceConfiguration.cs │ │ │ ├── FbTraceVersion.cs │ │ │ ├── FbUserData.cs │ │ │ ├── FbValidation.cs │ │ │ ├── FbValidation2.cs │ │ │ ├── FbValidationFlags.cs │ │ │ └── ServiceOutputEventArgs.cs │ │ └── Types/ │ │ ├── FbDecFloat.cs │ │ ├── FbZonedDateTime.cs │ │ └── FbZonedTime.cs │ ├── FirebirdSql.Data.FirebirdClient.Tests/ │ │ ├── AuthBlockTests.cs │ │ ├── BlobStreamTests.cs │ │ ├── ConnectionPoolLifetimeHelperTests.cs │ │ ├── ConnectionStringTests.cs │ │ ├── FbArrayTests.cs │ │ ├── FbBatchCommandTests.cs │ │ ├── FbBlobTests.cs │ │ ├── FbBooleanSupportTests.cs │ │ ├── FbCommandBuilderTests.cs │ │ ├── FbCommandTests.cs │ │ ├── FbConnectionStringBuilderTests.cs │ │ ├── FbConnectionTests.cs │ │ ├── FbDataAdapterTests.cs │ │ ├── FbDataReaderTests.cs │ │ ├── FbDatabaseInfoTests.cs │ │ ├── FbDecFloat16SupportTests.cs │ │ ├── FbDecFloat34SupportTests.cs │ │ ├── FbDecFloatTypeTests.cs │ │ ├── FbExceptionTests.cs │ │ ├── FbImplicitTransactionTests.cs │ │ ├── FbInt128SupportTests.cs │ │ ├── FbLongNumericsSupportTests.cs │ │ ├── FbParameterCollectionTests.cs │ │ ├── FbParameterTests.cs │ │ ├── FbRemoteEventTests.cs │ │ ├── FbSchemaTests.cs │ │ ├── FbScriptTests.cs │ │ ├── FbServicesTests.cs │ │ ├── FbStoredProcedureCallsTests.cs │ │ ├── FbTimeZonesSupportTests.cs │ │ ├── FbTransactionInfoTests.cs │ │ ├── FbTransactionTests.cs │ │ ├── FbZonedDateTimeTypeTests.cs │ │ ├── FbZonedTimeTypeTests.cs │ │ ├── FirebirdSql.Data.FirebirdClient.Tests.csproj │ │ ├── GuidTests.cs │ │ ├── Srp256ClientTests.cs │ │ ├── SrpClientTests.cs │ │ ├── TrackerIssuesTests.cs │ │ └── TransactionScopeTests.cs │ ├── FirebirdSql.Data.TestsBase/ │ │ ├── FbServerTypeTestFixtureSource.cs │ │ ├── FbTestsBase.cs │ │ ├── FbTestsSetup.cs │ │ ├── FirebirdSql.Data.TestsBase.projitems │ │ ├── FirebirdSql.Data.TestsBase.shproj │ │ ├── FirebirdSql.Data.TestsBase.snk │ │ ├── NoServerCategoryAttribute.cs │ │ └── Program.cs │ ├── FirebirdSql.EntityFrameworkCore.Firebird/ │ │ ├── Design/ │ │ │ └── Internal/ │ │ │ └── FbDesignTimeServices.cs │ │ ├── Diagnostics/ │ │ │ └── Internal/ │ │ │ └── FbLoggingDefinitions.cs │ │ ├── Extensions/ │ │ │ ├── FbDatabaseFacadeExtensions.cs │ │ │ ├── FbDbContextOptionsBuilderExtensions.cs │ │ │ ├── FbModelBuilderExtensions.cs │ │ │ ├── FbModelExtensions.cs │ │ │ ├── FbPropertyBuilderExtensions.cs │ │ │ ├── FbPropertyExtensions.cs │ │ │ └── FbServiceCollectionExtensions.cs │ │ ├── FirebirdSql.EntityFrameworkCore.Firebird.csproj │ │ ├── Infrastructure/ │ │ │ ├── FbDbContextOptionsBuilder.cs │ │ │ └── Internal/ │ │ │ ├── FbModelValidator.cs │ │ │ ├── FbOptionsExtension.cs │ │ │ └── IFbOptions.cs │ │ ├── Internal/ │ │ │ └── FbOptions.cs │ │ ├── Metadata/ │ │ │ ├── Conventions/ │ │ │ │ ├── FbConventionSetBuilder.cs │ │ │ │ ├── FbStoreGenerationConvention.cs │ │ │ │ ├── FbValueGenerationConvention.cs │ │ │ │ └── FbValueGenerationStrategyConvention.cs │ │ │ ├── FbIdentityType.cs │ │ │ ├── FbValueGenerationStrategy.cs │ │ │ └── Internal/ │ │ │ ├── FbAnnotationNames.cs │ │ │ └── FbRelationalAnnotationProvider.cs │ │ ├── Migrations/ │ │ │ ├── FbMigrationSqlGeneratorBehavior.cs │ │ │ ├── FbMigrationsSqlGenerator.cs │ │ │ ├── IFbMigrationSqlGeneratorBehavior.cs │ │ │ └── Internal/ │ │ │ ├── FbHistoryRepository.cs │ │ │ └── FbMigrationDatabaseLock.cs │ │ ├── Properties/ │ │ │ ├── EntityFrameworkCoreAssemblyInfo.cs │ │ │ └── FirebirdSql.EntityFrameworkCore.Firebird.snk │ │ ├── Query/ │ │ │ ├── ExpressionTranslators/ │ │ │ │ └── Internal/ │ │ │ │ ├── FbByteArrayMethodTranslator.cs │ │ │ │ ├── FbConvertTranslator.cs │ │ │ │ ├── FbDateAddTranslator.cs │ │ │ │ ├── FbDateOnlyMethodTranslator.cs │ │ │ │ ├── FbDateOnlyPartComponentTranslator.cs │ │ │ │ ├── FbDateTimeDateComponentTranslator.cs │ │ │ │ ├── FbDateTimeNowTodayTranslator.cs │ │ │ │ ├── FbDateTimePartComponentTranslator.cs │ │ │ │ ├── FbMathTranslator.cs │ │ │ │ ├── FbMemberTranslatorProvider.cs │ │ │ │ ├── FbMethodCallTranslatorProvider.cs │ │ │ │ ├── FbNewGuidTranslator.cs │ │ │ │ ├── FbObjectToStringTranslator.cs │ │ │ │ ├── FbStringContainsTranslator.cs │ │ │ │ ├── FbStringEndsWithTranslator.cs │ │ │ │ ├── FbStringFirstOrDefaultTranslator.cs │ │ │ │ ├── FbStringIndexOfTranslator.cs │ │ │ │ ├── FbStringIsNullOrWhiteSpaceTranslator.cs │ │ │ │ ├── FbStringLastOrDefaultTranslator.cs │ │ │ │ ├── FbStringLengthTranslator.cs │ │ │ │ ├── FbStringReplaceTranslator.cs │ │ │ │ ├── FbStringStartsWithTranslator.cs │ │ │ │ ├── FbStringSubstringTranslator.cs │ │ │ │ ├── FbStringToLowerTranslator.cs │ │ │ │ ├── FbStringToUpperTranslator.cs │ │ │ │ ├── FbStringTrimTranslator.cs │ │ │ │ ├── FbTimeOnlyMethodTranslator.cs │ │ │ │ ├── FbTimeOnlyPartComponentTranslator.cs │ │ │ │ └── FbTimeSpanPartComponentTranslator.cs │ │ │ ├── Expressions/ │ │ │ │ └── Internal/ │ │ │ │ └── FbSpacedFunctionExpression.cs │ │ │ └── Internal/ │ │ │ ├── FbQueryRootProcessor.cs │ │ │ ├── FbQuerySqlGenerator.cs │ │ │ ├── FbQuerySqlGeneratorFactory.cs │ │ │ ├── FbQueryTranslationPreprocessor.cs │ │ │ ├── FbQueryTranslationPreprocessorFactory.cs │ │ │ ├── FbSqlExpressionFactory.cs │ │ │ ├── FbSqlTranslatingExpressionVisitor.cs │ │ │ └── FbSqlTranslatingExpressionVisitorFactory.cs │ │ ├── Scaffolding/ │ │ │ └── Internal/ │ │ │ ├── FbDatabaseModelFactory.cs │ │ │ └── FbProviderCodeGenerator.cs │ │ ├── Storage/ │ │ │ └── Internal/ │ │ │ ├── FbBoolTypeMapping.cs │ │ │ ├── FbByteArrayTypeMapping.cs │ │ │ ├── FbDatabaseCreator.cs │ │ │ ├── FbDateOnlyTypeMapping.cs │ │ │ ├── FbDateTimeTypeMapping.cs │ │ │ ├── FbGuidTypeMapping.cs │ │ │ ├── FbRelationalConnection.cs │ │ │ ├── FbRelationalTransaction.cs │ │ │ ├── FbSqlGenerationHelper.cs │ │ │ ├── FbStringTypeMapping.cs │ │ │ ├── FbTimeOnlyTypeMapping.cs │ │ │ ├── FbTimeSpanTypeMapping.cs │ │ │ ├── FbTransactionFactory.cs │ │ │ ├── FbTypeMappingSource.cs │ │ │ ├── IFbSqlGenerationHelper.cs │ │ │ ├── IRelationalFbConnection.cs │ │ │ └── IRelationalFbTransaction.cs │ │ ├── Update/ │ │ │ └── Internal/ │ │ │ ├── FbModificationCommandBatchFactory.cs │ │ │ ├── FbUpdateSqlGenerator.cs │ │ │ └── IFbUpdateSqlGenerator.cs │ │ ├── Utilities/ │ │ │ ├── EnumerableMethods.cs │ │ │ ├── SharedTypeExtensions.cs │ │ │ ├── StringBuilderExtensions.cs │ │ │ └── TranslatorsHelper.cs │ │ └── ValueGeneration/ │ │ └── Internal/ │ │ ├── FbSequenceHiLoValueGenerator.cs │ │ ├── FbSequenceValueGeneratorFactory.cs │ │ ├── FbSequenceValueGeneratorState.cs │ │ ├── FbValueGeneratorCache.cs │ │ ├── FbValueGeneratorSelector.cs │ │ ├── IFbSequenceValueGeneratorFactory.cs │ │ └── IFbValueGeneratorCache.cs │ ├── FirebirdSql.EntityFrameworkCore.Firebird.FunctionalTests/ │ │ ├── ComplianceFbTest.cs │ │ ├── FirebirdSql.EntityFrameworkCore.Firebird.FunctionalTests.csproj │ │ ├── Helpers/ │ │ │ ├── ModelHelpers.cs │ │ │ ├── SkippingAttributes.cs │ │ │ └── Xunit.cs │ │ ├── MigrationsFbTest.cs │ │ ├── Query/ │ │ │ ├── AdHocAdvancedMappingsQueryFbTest.cs │ │ │ ├── AdHocComplexTypeQueryFbTest.cs │ │ │ ├── AdHocManyToManyQueryFbTest.cs │ │ │ ├── AdHocMiscellaneousQueryFbTest.cs │ │ │ ├── AdHocNavigationsQueryFbTest.cs │ │ │ ├── AdHocQueryFiltersQueryFbTest.cs │ │ │ ├── AdHocQuerySplittingQueryFbTest.cs │ │ │ ├── Associations/ │ │ │ │ ├── ComplexTableSplitting/ │ │ │ │ │ ├── ComplexTableSplittingBulkUpdateFbTest.cs │ │ │ │ │ ├── ComplexTableSplittingFbFixture.cs │ │ │ │ │ ├── ComplexTableSplittingMiscellaneousFbTest.cs │ │ │ │ │ ├── ComplexTableSplittingProjectionFbTest.cs │ │ │ │ │ └── ComplexTableSplittingStructuralEqualityFbTest.cs │ │ │ │ ├── Navigations/ │ │ │ │ │ ├── NavigationsCollectionFbTest.cs │ │ │ │ │ ├── NavigationsFbFixture.cs │ │ │ │ │ ├── NavigationsIncludeFbTest.cs │ │ │ │ │ ├── NavigationsMiscellaneousFbTest.cs │ │ │ │ │ ├── NavigationsPrimitiveCollectionFbTest.cs │ │ │ │ │ ├── NavigationsProjectionFbTest.cs │ │ │ │ │ ├── NavigationsSetOperationsFbTest.cs │ │ │ │ │ └── NavigationsStructuralEqualityFbTest.cs │ │ │ │ ├── OwnedNavigations/ │ │ │ │ │ ├── OwnedNavigationsCollectionFbTest.cs │ │ │ │ │ ├── OwnedNavigationsFbFixture.cs │ │ │ │ │ ├── OwnedNavigationsMiscellaneousFbTest.cs │ │ │ │ │ ├── OwnedNavigationsProjectionFbTest.cs │ │ │ │ │ ├── OwnedNavigationsSetOperationsFbTest.cs │ │ │ │ │ └── OwnedNavigationsStructuralEqualityFbTest.cs │ │ │ │ └── OwnedTableSplitting/ │ │ │ │ ├── OwnedTableSplittingFbFixture.cs │ │ │ │ ├── OwnedTableSplittingMiscellaneousFbTest.cs │ │ │ │ ├── OwnedTableSplittingProjectionFbTest.cs │ │ │ │ └── OwnedTableSplittingStructuralEqualityFbTest.cs │ │ │ ├── ComplexNavigationsCollectionsQueryFbTest.cs │ │ │ ├── ComplexNavigationsCollectionsSharedTypeQueryFbTest.cs │ │ │ ├── ComplexNavigationsCollectionsSplitQueryFbTest.cs │ │ │ ├── ComplexNavigationsCollectionsSplitSharedTypeQueryFbTest.cs │ │ │ ├── ComplexNavigationsQueryFbFixture.cs │ │ │ ├── ComplexNavigationsQueryFbTest.cs │ │ │ ├── ComplexNavigationsSharedTypeQueryFbFixture.cs │ │ │ ├── ComplexNavigationsSharedTypeQueryFbTest.cs │ │ │ ├── ComplexTypeQueryFbTest.cs │ │ │ ├── CompositeKeysQueryFbFixture.cs │ │ │ ├── CompositeKeysQueryFbTest.cs │ │ │ ├── CompositeKeysSplitQueryFbTest.cs │ │ │ ├── Ef6GroupByFbTest.cs │ │ │ ├── EntitySplittingQueryFbTest.cs │ │ │ ├── FromSqlQueryFbTest.cs │ │ │ ├── FromSqlSprocQueryFbTest.cs │ │ │ ├── FunkyDataQueryFbTest.cs │ │ │ ├── GearsOfWarFromSqlQueryFbTest.cs │ │ │ ├── GearsOfWarQueryFbFixture.cs │ │ │ ├── GearsOfWarQueryFbTest.cs │ │ │ ├── IncludeOneToOneFbTest.cs │ │ │ ├── IncompleteMappingInheritanceQueryFbFixture.cs │ │ │ ├── IncompleteMappingInheritanceQueryFbTest.cs │ │ │ ├── InheritanceRelationshipsQueryFbTest.cs │ │ │ ├── ManyToManyNoTrackingQueryFbTest.cs │ │ │ ├── ManyToManyQueryFbFixture.cs │ │ │ ├── ManyToManyQueryFbTest.cs │ │ │ ├── MappingQueryFbTest.cs │ │ │ ├── NonSharedPrimitiveCollectionsQueryFbTest.cs │ │ │ ├── NorthwindAggregateOperatorsQueryFbTest.cs │ │ │ ├── NorthwindAsNoTrackingQueryFbTest.cs │ │ │ ├── NorthwindAsTrackingQueryFbTest.cs │ │ │ ├── NorthwindChangeTrackingQueryFbTest.cs │ │ │ ├── NorthwindCompiledQueryFbTest.cs │ │ │ ├── NorthwindDbFunctionsQueryFbTest.cs │ │ │ ├── NorthwindEFPropertyIncludeQueryFbTest.cs │ │ │ ├── NorthwindFunctionsQueryFbTest.cs │ │ │ ├── NorthwindGroupByQueryFbTest.cs │ │ │ ├── NorthwindIncludeNoTrackingQueryFbTest.cs │ │ │ ├── NorthwindIncludeQueryFbTest.cs │ │ │ ├── NorthwindJoinQueryFbTest.cs │ │ │ ├── NorthwindKeylessEntitiesQueryFbTest.cs │ │ │ ├── NorthwindMiscellaneousQueryFbTest.cs │ │ │ ├── NorthwindNavigationsQueryFbTest.cs │ │ │ ├── NorthwindQueryFbFixture.cs │ │ │ ├── NorthwindQueryFiltersQueryFbTest.cs │ │ │ ├── NorthwindQueryTaggingQueryFbTest.cs │ │ │ ├── NorthwindSelectQueryFbTest.cs │ │ │ ├── NorthwindSetOperationsQueryFbTest.cs │ │ │ ├── NorthwindSplitIncludeNoTrackingQueryFbTest.cs │ │ │ ├── NorthwindSplitIncludeQueryFbTest.cs │ │ │ ├── NorthwindSqlQueryFbTest.cs │ │ │ ├── NorthwindStringIncludeQueryFbTest.cs │ │ │ ├── NorthwindWhereQueryFbTest.cs │ │ │ ├── NullKeysFbTest.cs │ │ │ ├── NullSemanticsQueryFbFixture.cs │ │ │ ├── NullSemanticsQueryFbTest.cs │ │ │ ├── OperatorsProceduralFbTest.cs │ │ │ ├── OperatorsQueryFbTest.cs │ │ │ ├── OptionalDependentQueryFbFixture.cs │ │ │ ├── OptionalDependentQueryFbTest.cs │ │ │ ├── OwnedEntityQueryFbTest.cs │ │ │ ├── OwnedQueryFbTest.cs │ │ │ ├── PrimitiveCollectionsQueryFbTest.cs │ │ │ ├── QueryFilterFuncletizationFbTest.cs │ │ │ ├── QueryNoClientEvalFbTest.cs │ │ │ ├── SharedTypeQueryFbTest.cs │ │ │ ├── SqlExecutorFbTest.cs │ │ │ ├── SqlQueryFbTest.cs │ │ │ ├── TPCFiltersInheritanceQueryFbFixture.cs │ │ │ ├── TPCFiltersInheritanceQueryFbTest.cs │ │ │ ├── TPCGearsOfWarQueryFbFixture.cs │ │ │ ├── TPCGearsOfWarQueryFbTest.cs │ │ │ ├── TPCInheritanceQueryFbFixture.cs │ │ │ ├── TPCInheritanceQueryFbFixtureBase.cs │ │ │ ├── TPCInheritanceQueryFbTest.cs │ │ │ ├── TPCInheritanceQueryFbTestBase.cs │ │ │ ├── TPCInheritanceQueryHiLoFbFixture.cs │ │ │ ├── TPCInheritanceQueryHiLoFbTest.cs │ │ │ ├── TPCManyToManyNoTrackingQueryFbTest.cs │ │ │ ├── TPCManyToManyQueryFbFixture.cs │ │ │ ├── TPCManyToManyQueryFbTest.cs │ │ │ ├── TPCRelationshipsQueryFbTest.cs │ │ │ ├── TPHFiltersInheritanceQueryFbFixture.cs │ │ │ ├── TPHFiltersInheritanceQueryFbTest.cs │ │ │ ├── TPHInheritanceQueryFbFixture.cs │ │ │ ├── TPHInheritanceQueryFbTest.cs │ │ │ ├── TPTFiltersInheritanceQueryFbFixture.cs │ │ │ ├── TPTFiltersInheritanceQueryFbTest.cs │ │ │ ├── TPTGearsOfWarQueryFbFixture.cs │ │ │ ├── TPTGearsOfWarQueryFbTest.cs │ │ │ ├── TPTInheritanceQueryFbFixture.cs │ │ │ ├── TPTInheritanceQueryFbTest.cs │ │ │ ├── TPTManyToManyNoTrackingQueryFbTest.cs │ │ │ ├── TPTManyToManyQueryFbFixture.cs │ │ │ ├── TPTManyToManyQueryFbTest.cs │ │ │ ├── TPTRelationshipsQueryFbTest.cs │ │ │ ├── ToSqlQueryFbTest.cs │ │ │ ├── Translations/ │ │ │ │ ├── BasicTypesQueryFbFixture.cs │ │ │ │ ├── ByteArrayTranslationsFbTest.cs │ │ │ │ ├── EnumTranslationsFbTest.cs │ │ │ │ ├── GuidTranslationsFbTest.cs │ │ │ │ ├── MathTranslationsFbTest.cs │ │ │ │ ├── MiscellaneousTranslationsFbTest.cs │ │ │ │ ├── Operators/ │ │ │ │ │ ├── ArithmeticOperatorTranslationsFbTest.cs │ │ │ │ │ ├── BitwiseOperatorTranslationsFbTest.cs │ │ │ │ │ ├── ComparisonOperatorTranslationsFbTest.cs │ │ │ │ │ ├── LogicalOperatorTranslationsFbTest.cs │ │ │ │ │ └── MiscellaneousOperatorTranslationsFbTest.cs │ │ │ │ ├── StringTranslationsFbTest.cs │ │ │ │ └── Temporal/ │ │ │ │ ├── DateOnlyTranslationsFbTest.cs │ │ │ │ ├── DateTimeOffsetTranslationsFbTest.cs │ │ │ │ ├── DateTimeTranslationsFbTest.cs │ │ │ │ ├── TimeOnlyTranslationsFbTest.cs │ │ │ │ └── TimeSpanTranslationsFbTest.cs │ │ │ ├── UdfDbFunctionFbTests.cs │ │ │ └── WarningsFbTest.cs │ │ ├── TestModels/ │ │ │ └── Northwind/ │ │ │ └── NorthwindFbContext.cs │ │ ├── TestUtilities/ │ │ │ ├── FbPrecompiledQueryTestHelpers.cs │ │ │ ├── FbTestHelpers.cs │ │ │ ├── FbTestStore.cs │ │ │ └── FbTestStoreFactory.cs │ │ └── UpdatesFbTest.cs │ ├── FirebirdSql.EntityFrameworkCore.Firebird.Tests/ │ │ ├── EndToEnd/ │ │ │ ├── DeleteTests.cs │ │ │ ├── InsertTests.cs │ │ │ └── UpdateTests.cs │ │ ├── EntityFrameworkCoreTestsBase.cs │ │ ├── FbTestDbContext.cs │ │ ├── FirebirdSql.EntityFrameworkCore.Firebird.Tests.csproj │ │ ├── Migrations/ │ │ │ └── MigrationsTests.cs │ │ ├── Query/ │ │ │ └── ElementaryTests.cs │ │ └── Scaffolding/ │ │ └── ScaffoldingTests.cs │ ├── NETProvider.slnx │ ├── Perf/ │ │ ├── CommandBenchmark.Execute.cs │ │ ├── CommandBenchmark.Fetch.cs │ │ ├── CommandBenchmark.cs │ │ ├── Perf.csproj │ │ └── Program.cs │ ├── Scratchpad/ │ │ ├── Program.cs │ │ └── Scratchpad.csproj │ └── Versions.props └── tests.ps1 ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/FUNDING.yml ================================================ github: cincuranet custom: https://firebirdsql.org/en/donate/ ================================================ FILE: .github/workflows/ci.yml ================================================ name: CI on: [push, pull_request] env: CONFIGURATION: Release DOTNET_SKIP_FIRST_TIME_EXPERIENCE: 1 jobs: ci: runs-on: windows-2025 strategy: fail-fast: false matrix: FIREBIRD_SELECTION: [FB30, FB40, FB50] TEST_SUITE: [Tests-FirebirdClient-Default-Compression-CryptRequired, Tests-FirebirdClient-Default-NoCompression-CryptRequired, Tests-FirebirdClient-Default-Compression-CryptDisabled, Tests-FirebirdClient-Default-NoCompression-CryptDisabled, Tests-FirebirdClient-Embedded, Tests-EFCore, Tests-EFCore-Functional, Tests-EF6] timeout-minutes: 120 steps: - name: Checkout uses: actions/checkout@v5 - name: .NET 10.0 uses: actions/setup-dotnet@v5 with: dotnet-version: 10.0.x - name: Build run: | try { .\build.ps1 -Configuration ${{ env.CONFIGURATION }} exit $LASTEXITCODE } catch { echo $_ exit 1 } shell: powershell - name: Tests run: | try { $env:tests_firebird_dir = 'C:\firebird' .\tests.ps1 -Configuration ${{ env.CONFIGURATION }} -FirebirdSelection ${{ matrix.FIREBIRD_SELECTION }} -TestSuite ${{ matrix.TEST_SUITE }} exit $LASTEXITCODE } catch { echo $_ exit 1 } shell: powershell - name: Publish Artifacts uses: actions/upload-artifact@v4 with: name: 'ci_${{ matrix.TEST_SUITE }}_${{ matrix.FIREBIRD_SELECTION }}_${{ env.CONFIGURATION }}' path: '.\\out\\' ================================================ FILE: .gitignore ================================================ *.suo *.user *.lock.json .vs bin obj out/ .idea ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing Thanks for considering contributing. Here are some items to consider before starting (in no particular order). * Check [issues](https://github.com/FirebirdSQL/NETProvider/issues) if you want to start on something. * Before you start working on something announce your intention on the mailing list (or comment on the issue). * Issue only complete PRs (no WIPs). * Make sure your PR doesn't contain unneccessary changes (whitespaces, new lines, ...). * Follow existing code formatting/styling (even for new files). * Disclose AI usage. * Make sure you swept all corners (i.e. all build configurations are fine, works with all Firebird version, etc.). * Make sure your changes merge without conflicts. * Consider backward compatibility. * Don't be afraid to ask (i.e. backward compatibility). * Be prepared to do some changes after your PR is reviewed. * Consider whether your change is benefit for majority of users (opposite to only a small group). * All contributions are licensed under [_Initial Developer's Public License_](license.txt). ================================================ FILE: README.md ================================================ # Firebird .NET Data Provider ## Documentation * [ADO.NET provider](docs/ado-net.md) * [Entity Framework 6 provider](docs/entity-framework-6.md) * [Entity Framework Core provider](docs/entity-framework-core.md) * [Services - Backup](docs/services-backup.md) * [Events](docs/events.md) * [ADO.NET - Schema](docs/ado-net-schema.md) * [Time zones](docs/time-zones.md) * [DECFLOAT datatype](docs/decfloat.md) * [INT128 datatype](docs/int128.md) * [Batching](docs/batching.md) ## Packages | NuGet | Version | Downloads | |-------|---------|-----------| | [FirebirdSql.Data.FirebirdClient](https://www.nuget.org/packages/FirebirdSql.Data.FirebirdClient) | ![FirebirdSql.Data.FirebirdClient](https://img.shields.io/nuget/v/FirebirdSql.Data.FirebirdClient.svg) | ![FirebirdSql.Data.FirebirdClient](https://img.shields.io/nuget/dt/FirebirdSql.Data.FirebirdClient.svg) | | [EntityFramework.Firebird](https://www.nuget.org/packages/EntityFramework.Firebird) | ![EntityFramework.Firebird](https://img.shields.io/nuget/v/EntityFramework.Firebird.svg) | ![EntityFramework.Firebird](https://img.shields.io/nuget/dt/EntityFramework.Firebird.svg) | | [FirebirdSql.EntityFrameworkCore.Firebird](https://www.nuget.org/packages/FirebirdSql.EntityFrameworkCore.Firebird) | ![FirebirdSql.EntityFrameworkCore.Firebird](https://img.shields.io/nuget/v/FirebirdSql.EntityFrameworkCore.Firebird.svg) | ![FirebirdSql.EntityFrameworkCore.Firebird](https://img.shields.io/nuget/dt/FirebirdSql.EntityFrameworkCore.Firebird.svg) | ## Resources * [Downloads](https://github.com/FirebirdSQL/NETProvider/releases) * [Issue tracker](https://github.com/FirebirdSQL/NETProvider/issues) * [Development mailing list](https://groups.google.com/forum/#!forum/firebird-net-provider) * [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/FirebirdSQL/NETProvider) ## Builds [![GitHub Actions](https://github.com/FirebirdSQL/NETProvider/workflows/CI/badge.svg)](https://github.com/FirebirdSQL/NETProvider/actions) ## Misc ### Notable supporters * Sean Leyne (Broadview Software) * SMS-Timing ### 3rd party code * For zlib compression the provider uses pieces from [DotNetZip](http://dotnetzip.codeplex.com/) library. * For RC4 encryption the provider uses pieces from [Bouncy Castle](https://www.bouncycastle.org/csharp/index.html) library. ================================================ FILE: build.ps1 ================================================ param( [Parameter(Mandatory=$True)]$Configuration) $ErrorActionPreference = 'Stop' $baseDir = Split-Path -Parent $PSCommandPath . "$baseDir\include.ps1" $outDir = "$baseDir\out" $versionProvider = '' $versionEFCore = '' $versionEF6 = '' function Clean() { if (Test-Path $outDir) { rm -Force -Recurse $outDir } mkdir $outDir | Out-Null } function Build() { dotnet clean "$baseDir\src\NETProvider.slnx" -c $Configuration -v m dotnet build "$baseDir\src\NETProvider.slnx" -c $Configuration -p:ContinuousIntegrationBuild=true -v m } function Versions() { function v($file) { return (Get-Item $file).VersionInfo.ProductVersion -replace '(\d+)\.(\d+)\.(\d+)(-[a-z0-9]+)?.*','$1.$2.$3$4' } $script:versionProvider = v $baseDir\src\FirebirdSql.Data.FirebirdClient\bin\$Configuration\net10.0\FirebirdSql.Data.FirebirdClient.dll $script:versionEFCore = v $baseDir\src\FirebirdSql.EntityFrameworkCore.Firebird\bin\$Configuration\net10.0\FirebirdSql.EntityFrameworkCore.Firebird.dll $script:versionEF6 = v $baseDir\src\EntityFramework.Firebird\bin\$Configuration\net48\EntityFramework.Firebird.dll } function NuGets() { cp $baseDir\src\FirebirdSql.Data.FirebirdClient\bin\$Configuration\FirebirdSql.Data.FirebirdClient.$versionProvider.nupkg $outDir cp $baseDir\src\FirebirdSql.EntityFrameworkCore.Firebird\bin\$Configuration\FirebirdSql.EntityFrameworkCore.Firebird.$versionEFCore.nupkg $outDir cp $baseDir\src\EntityFramework.Firebird\bin\$Configuration\EntityFramework.Firebird.$versionEF6.nupkg $outDir cp $baseDir\src\FirebirdSql.Data.FirebirdClient\bin\$Configuration\FirebirdSql.Data.FirebirdClient.$versionProvider.snupkg $outDir cp $baseDir\src\FirebirdSql.EntityFrameworkCore.Firebird\bin\$Configuration\FirebirdSql.EntityFrameworkCore.Firebird.$versionEFCore.snupkg $outDir cp $baseDir\src\EntityFramework.Firebird\bin\$Configuration\EntityFramework.Firebird.$versionEF6.snupkg $outDir } Clean Build Versions NuGets ================================================ FILE: docs/ado-net-schema.md ================================================ # ADO.NET - Schema ### Steps * Install `FirebirdSql.Data.FirebirdClient` from NuGet. * Add `using FirebirdSql.Data.FirebirdClient;`. ### Code ```csharp using (var connection = new FbConnection("database=localhost:demo.fdb;user=sysdba;password=masterkey")) { connection.Open(); var metadataCollections = connection.GetSchema(); var dataTypes = connection.GetSchema(DbMetaDataCollectionNames.DataTypes); var dataSourceInformation = connection.GetSchema(DbMetaDataCollectionNames.DataSourceInformation); var reservedWords = connection.GetSchema(DbMetaDataCollectionNames.ReservedWords); var userTables = connection.GetSchema("Tables", new string[] { null, null, null, "TABLE" }); var systemTables = connection.GetSchema("Tables", new string[] { null, null, null, "SYSTEM TABLE" }); var tableColumns = connection.GetSchema("Columns", new string[] { null, null, "TableName" }); } ``` ================================================ FILE: docs/ado-net.md ================================================ # ADO.NET ### Steps * Install `FirebirdSql.Data.FirebirdClient` from NuGet. * Add `using FirebirdSql.Data.FirebirdClient;`. * Basic classes are `FbConnection`, `FbTransaction`, `FbCommand` and `FbDataReader`. * Connection string can be built using `FbConnectionStringBuilder`. ### Code ```csharp using (var connection = new FbConnection("database=localhost:demo.fdb;user=sysdba;password=masterkey")) { connection.Open(); using (var transaction = connection.BeginTransaction()) { using (var command = new FbCommand("select * from demo", connection, transaction)) { using (var reader = command.ExecuteReader()) { while (reader.Read()) { var values = new object[reader.FieldCount]; reader.GetValues(values); Console.WriteLine(string.Join("|", values)); } } } } } ``` ### Scripts ```sql create table demo (id int primary key, foobar varchar(20) character set utf8); ``` ```sql insert into demo values (6, 'FooBar'); ``` ================================================ FILE: docs/batching.md ================================================ # Batching Batching is supported for Firebird 4 (and above). The work is handled by `FbBatchCommand` class. It has similar API surface as `FbCommand`. The usage should feel familiar. ### Specifics Calling the `ExecuteNonQuery`/`ExecuteNonQueryAsync` does not throw an exception, should the exception happen on server while processing the data. Instead the returned `FbBatchNonQueryResult` object should be used to check the status. The `EnsureSuccess` method or `AllSuccess` property can be used for global check. Further enumeration gives detailed information. Properties `MultiError`, `ReturnRecordsAffected` and `BatchBufferSize` allow for behavior fine-tuning (these represent `TAG_MULTIERROR`, `TAG_RECORD_COUNTS` and `TAG_BUFFER_BYTES_SIZE` in BPB). When dealing with huge batches of possible unlimited size, it's good to use `ComputeCurrentBatchSize`/`ComputeCurrentBatchSizeAsync` to make sure the batch is not over `BatchBufferSize`. However calling `ComputeCurrentBatchSize`/`ComputeCurrentBatchSizeAsync` is not cheap and should be handled accordingly. ### Limitations At the moment batching is not supported for Firebird Embedded. The progress is tracked [here](https://github.com/FirebirdSQL/NETProvider/issues/1022). Using (real) blobs as values is not supported. Regular `byte[]`, `string`, etc. values and the implicit conversions work just fine. The progress is tracked [here](https://github.com/FirebirdSQL/NETProvider/issues/1038). ### Examples Examples can be found in [`FbBatchCommandTests`](../src/FirebirdSql.Data.FirebirdClient.Tests/FbBatchCommandTests.cs). ================================================ FILE: docs/decfloat.md ================================================ # DECFLOAT datatype `DECFLOAT(16)` and `DECFLOAT(34)` from Firebird 4 is handled by `FbDecFloat` type. `FbDecFloat` can be used as parameter value for `FbParameter`. ### Examples Examples can be found in [`FbDecFloat16SupportTests`](../src/FirebirdSql.Data.FirebirdClient.Tests/FbDecFloat16SupportTests.cs) and [`FbDecFloat34SupportTests`](../src/FirebirdSql.Data.FirebirdClient.Tests/FbDecFloat34SupportTests.cs). ================================================ FILE: docs/entity-framework-6.md ================================================ # Entity Framework 6 ### Steps * Install `EntityFramework.Firebird` from NuGet. * Create `DbProviderFactories` record (see below). * Create configuration (see below). * Create your `DbContext`. * Firebird 2.5 and up is supported. ### Code ```csharp class Program { static void Main(string[] args) { using (var db = new MyContext("database=localhost:demo.fdb;user=sysdba;password=masterkey")) { db.Database.Log = Console.WriteLine; db.Demos.ToList(); } } } class MyContext : DbContext { public MyContext(string connectionString) : base(new FbConnection(connectionString), true) { } public DbSet Demos { get; set; } protected override void OnModelCreating(DbModelBuilder modelBuilder) { base.OnModelCreating(modelBuilder); modelBuilder.Properties() .Configure(x => x.HasColumnName(x.ClrPropertyInfo.Name.ToUpper())); var demoConf = modelBuilder.Entity(); demoConf.ToTable("DEMO"); } } class Demo { public int Id { get; set; } public string FooBar { get; set; } } ``` ### DbProviderFactories .NET Framework: ```xml ``` .NET Core/.NET 5+: ```csharp System.Data.Common.DbProviderFactories.RegisterFactory(FbProviderServices.ProviderInvariantName, FirebirdClientFactory.Instance); ``` ### Configuration .NET Framework: ```xml
``` .NET Core/.NET 5+: ```csharp public class Conf : DbConfiguration { public Conf() { SetProviderServices(FbProviderServices.ProviderInvariantName, FbProviderServices.Instance); } } ``` ### Scripts ```sql create table demo (id int primary key, foobar varchar(20) character set utf8); ``` ```sql insert into demo values (6, 'FooBar'); ``` ================================================ FILE: docs/entity-framework-core.md ================================================ # Entity Framework Core 8.x * Install `FirebirdSql.EntityFrameworkCore.Firebird` from NuGet. * Create your `DbContext`. * Call `UseFirebird` in `OnConfiguring`. * Firebird 3 and up is supported. ### Code ```csharp class Program { static void Main(string[] args) { using (var db = new MyContext("database=localhost:demo.fdb;user=sysdba;password=masterkey")) { db.Demos.ToList(); } } } class MyContext : DbContext { static readonly ILoggerFactory MyLoggerFactory = LoggerFactory.Create(builder => { builder.AddConsole(); }); readonly string _connectionString; public MyContext(string connectionString) { _connectionString = connectionString; } public DbSet Demos { get; set; } protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder) { base.OnConfiguring(optionsBuilder); optionsBuilder .UseLoggerFactory(MyLoggerFactory) .UseFirebird(_connectionString); } protected override void OnModelCreating(ModelBuilder modelBuilder) { base.OnModelCreating(modelBuilder); var demoConf = modelBuilder.Entity(); demoConf.Property(x => x.Id).HasColumnName("ID"); demoConf.Property(x => x.FooBar).HasColumnName("FOOBAR"); demoConf.ToTable("DEMO"); } } class Demo { public int Id { get; set; } public string FooBar { get; set; } } ``` ### Scripts ```sql create table demo (id int primary key, foobar varchar(20) character set utf8); ``` ```sql insert into demo values (6, 'FooBar'); ``` ================================================ FILE: docs/events.md ================================================ # Events ### Steps * Install `FirebirdSql.Data.FirebirdClient` from NuGet. * Add `using FirebirdSql.Data.FirebirdClient;`. ### Code ```csharp using (var events = new FbRemoteEvent("database=localhost:demo.fdb;user=sysdba;password=masterkey")) { events.RemoteEventCounts += (sender, e) => Console.WriteLine($"Event: {e.Name} | Counts: {e.Counts}"); events.RemoteEventError += (sender, e) => Console.WriteLine($"ERROR: {e.Error}"); events.QueueEvents("EVENT1", "EVENT2", "EVENT3", "EVENT4"); Console.WriteLine("Listening..."); Console.ReadLine(); } ``` ================================================ FILE: docs/int128.md ================================================ # INT128 datatype `INT128` from Firebird 4 is handled by regular .NET `BigInteger` type. `BigInteger` can be used as parameter value for `FbParameter`. ### Examples Examples can be found in [`FbInt128SupportTests`](../src/FirebirdSql.Data.FirebirdClient.Tests/FbInt128SupportTests.cs). ================================================ FILE: docs/services-backup.md ================================================ # Services - Backup ### Steps * Install `FirebirdSql.Data.FirebirdClient` from NuGet. * Add `using FirebirdSql.Data.Services;`. ### Code ```csharp var backup = new FbBackup("database=localhost:demo.fdb;user=sysdba;password=masterkey"); backup.BackupFiles.Add(new FbBackupFile(@"C:\backup.fbk")); //backup.Options = ... backup.Verbose = true; backup.ServiceOutput += (sender, e) => Console.WriteLine(e.Message); backup.Execute(); ``` ### More * `FbRestore` * `FbStreamingBackup` * `FbStreamingRestore` ================================================ FILE: docs/time-zones.md ================================================ # Time zones Time zones from Firebird 4 are handled by `FbZonedDateTime` and `FbZonedTime` types respectively. Given the lack of proper support for time zones in .NET (especially cross platform), these types provide the building blocks for developer to work with time zones using some library (i.e. _NodaTime_). Both `FbZonedDateTime` and `FbZonedTime` can be used as parameter value for `FbParameter`. ### Examples Examples can be found in [`FbTimeZonesSupportTests`](../src/FirebirdSql.Data.FirebirdClient.Tests/FbTimeZonesSupportTests.cs). ================================================ FILE: header.ps1 ================================================ $LicenseHeader = @" /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ "@ $baseDir = Split-Path -Parent $PSCommandPath gci $baseDir -Recurse -Filter *.cs | %{ $content = gc $_.FullName -Encoding UTF8 $newContent = @() $started = $false foreach ($line in $content) { if ($line.StartsWith('//$Authors')) { $started = $true $line = $LicenseHeader + "`r`n`r`n" + $line } if ($started) { $newContent += $line } } if (!$started) { #echo $_.FullName return } sc $_.FullName $newContent -Encoding UTF8 } ================================================ FILE: include.ps1 ================================================ function Check-ExitCode() { $exitCode = $LASTEXITCODE if ($exitCode -ne 0) { echo "Non-zero ($exitCode) exit code. Exiting..." exit $exitCode } } ================================================ FILE: license.txt ================================================ Initial Developer's Public License Version 1.0 1. Definitions 1.0 "Commercial Use" means distribution or otherwise making the Covered Code available to a third party. 1.1 "Contributor" means each entity that creates or contributes to the creation of Modifications. 1.2 "Contributor Version" means the combination of the Original Code, prior Modifications used by a Contributor, and the Modifications made by that particular Contributor. 1.3. "Covered Code" means the Original Code or Modifications or the combination of the Original Code and Modifications, in each case including portions thereof. 1.4. "Electronic Distribution Mechanism" means a mechanism generally accepted in the software development community for the electronic transfer of data. 1.5. "Executable" means Covered Code in any form other than Source Code. 1.6. "Initial Developer" means the individual or entity identified as the Initial Developer in the Source Code notice required by Exhibit A. 1.7. "Larger Work" means a work which combines Covered Code or portions thereof with code not governed by the terms of this License. 1.8. "License" means this document. 1.8.1. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. 1.9. "Modifications" means any addition to or deletion from the substance or structure of either the Original Code or any previous Modifications. When Covered Code is released as a series of files, a Modification is: Any addition to or deletion from the contents of a file containing Original Code or previous Modifications. Any new file that contains any part of the Original Code or previous Modifications. 1.10. "Original Code" means Source Code of computer software code which is described in the Source Code notice required by Exhibit A as Original Code, and which, at the time of its release under this License is not already Covered Code governed by this License. 1.10.1. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. 1.11. "Source Code" means the preferred form of the Covered Code for making modifications to it, including all modules it contains, plus any associated interface definition files, scripts used to control compilation and installation of an Executable, or source code differential comparisons against either the Original Code or another well known, available Covered Code of the Contributor's choice. The Source Code can be in a compressed or archival form, provided the appropriate decompression or de-archiving software is widely available for no charge. 1.12. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License or a future version of this License issued under Section 6.1. For legal entities, "You" includes any entity w hich controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. Source Code License. 2.1. The Initial Developer Grant. The Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license, subject to third party intellectual property claims: (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer to use, reproduce, modify, display, perform, sublicense and distribute the Original Code (or portions thereof) with or without Modifications, and/or as part of a Larger Work; and (b) under Patents Claims infringed by the making, using or selling of Original Code, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Code (or portions thereof). (c) the licenses granted in this Section 2.1(a) and (b) are effective on the date Initial Developer first distributes Original Code under the terms of this License. d) Notwithstanding Section 2.1(b) above, no patent license is granted: 1) for code that You delete from the Original Code; 2) separate from the Original Code; or 3) for infringements caused by: i) the modification of the Original Code or ii) the combination of the Original Code with other software or devices. 2.2. Contributor Grant. Subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor, to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof) either on an unmodified basis, with other Modifications, as Covered Code and/or as part of a Larger Work; and (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: 1) Modifications made by that Contributor (or portions thereof); and 2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). (c) the licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first makes Commercial Use of the Covered Code. (d) Notwithstanding Section 2.2(b) above, no patent license is granted: 1) for any code that Contributor has deleted from the Contributor Version; 2) separate from the Contributor Version; 3) for infringements caused by: i) third party modifications of Contributor Version or ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or 4) under Patent Claims infringed by Covered Code in the absence of Modifications made by that Contributor. 3. Distribution Obligations. 3.1. Application of License. The Modifications which You create or to which You contribute are governed by the terms of this License, including without limitation Section 2.2. The Source Code version of Covered Code may be distributed only under the terms of this License or a future version of this License released under Section 6.1, and You must include a copy of this License with every copy of the Source Code You distribute. You may not offer or impose any terms on any Source Code version that alters or restricts the applicable version of this License or the recipients' rights hereunder. However, You may include an additional document offering the additional rights described in Section 3.5. 3.2. Availability of Source Code. Any Modification which You create or to which You contribute must be made available in Source Code form under the terms of this License either on the same media as an Executable version or via an accepted Electronic Distribution Mechanism to anyone to whom you made an Executable version available; and if made available via Electronic Distribution Mechanism, must remain available for at least twelve (12) months after the date it initially became available, or at least six (6) months after a subsequent version of that particular Modification has been made available to such recipients. You are responsible for ensuring that the Source Code version remains available even if the Electronic Distribution Mechanism is maintained by a third party. 3.3. Description of Modifications. You must cause all Covered Code to which You contribute to contain a file documenting the changes You made to create that Covered Code and the date of any change. You must include a prominent statement that the Modification is derived, directly or indirectly, from Original Code provided by the Initial Developer and including the name of the Initial Developer in (a) the Source Code, and (b) in any notice in an Executable version or related documentation in which You describe the origin or ownership of the Covered Code. 3.4. Intellectual Property Matters a) Third Party Claims. If Contributor has knowledge that a license under a third party's intellectual property rights is required to exercise the rights granted by such Contributor under Sections 2.1 or 2.2, Contributor must include a text file with the Source Code distribution titled "LEGAL" which describes the claim and the party making the claim in sufficient detail that a recipient will know whom to contact. If Contributor obtains such knowledge after the Modification is made available as described in Section 3.2, Contributor shall promptly modify the LEGAL file in all copies Contributor makes available thereafter and shall take other steps (such as notifying appropriate mailing lists or newsgroups) reasonably calculated to inform those who received the Covered Code that new knowledge has been obtained. (b) Contributor APIs. If Contributor's Modifications include an application programming interface and Contributor has knowledge of patent licenses which are reasonably necessary to implement that API, Contributor must also include this information in the LEGAL file. (c) Representations. Contributor represents that, except as disclosed pursuant to Section 3.4(a) above, Contributor believes that Contributor's Modifications are Contributor's original creation(s) and/or Contributor has sufficient rights to grant the rights conveyed by this License. 3.5. Required Notices. You must duplicate the notice in Exhibit A in each file of the Source Code. If it is not possible to put such notice in a particular Source Code file due to its structure, then You must include such notice in a location (such as a relevant directory) where a user would be likely to look for such a notice. If You created one or more Modification(s) You may add your name as a Contributor to the notice described in Exhibit A. You must also duplicate this License in any documentation for the Source Code where You describe recipients' rights or ownership rights relating to Covered Code. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Code. However, You may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear than any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. 3.6. Distribution of Executable Versions. You may distribute Covered Code in Executable form only if the requirements of Section 3.1-3.5 have been met for that Covered Code, and if You include a notice stating that the Source Code version of the Covered Code is available under the terms of this License, including a description of how and where You have fulfilled the obligations of Section 3.2. The notice must be conspicuously included in any notice in an Executable version, related documentation or collateral in which You describe recipients' rights relating to the Covered Code. You may distribute the Executable version of Covered Code or ownership rights under a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and hat the license for the Executable version does not attempt to limit or alter the recipient's rights in the Source Code version from the rights set forth in this License. If You distribute the Executable version under a different license You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or any Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. 3.7. Larger Works. You may create a Larger Work by combining Covered Code with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Code. 4. Inability to Comply Due to Statute or Regulation. If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Code due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be included in the LEGAL file described in Section 3.4 and must be included with all distributions of the Source Code. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Application of this License. This License applies to code to which the Initial Developer has attached the notice in Exhibit A and to related Covered Code. 6. Versions of the License. 6.1. New Versions. The Initial Developer of this code may publish revised and/or new versions of the License from time to time. Each version will be given a distinguishing version number. 6.2. Effect of New Versions. Once Covered Code has been published under a particular version of the License, You may always continue to use it under the terms of that version. You may also choose to use such Covered Code under the terms of any subsequent version of the License published by the Initial Developer. No one other than the Initial Developer has the right to modify the terms applicable to Covered Code created under this License. 6.3. Derivative Works. If You create or use a modified version of this License (which you may only do in order to apply it to code which is not already Covered Code governed by this License), You must (a) rename Your license so that the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape", "MPL", "NPL", or any confusingly similar phrases do not appear in your license (except to note that your license differs from this License) and (b) otherwise make it clear that Your version of the license contains terms which differ from the Mozilla Public License and Netscape Public License. (Filling in the name of the Initial Developer, Original Code or Contributor in the notice described in Exhibit A shall not of themselves be deemed to be modifications of this License.) 6.4 Origin of the Initial Developer's Public License. The Initial Developer's Public License is based on the Mozilla Public License V 1.1 with the following changes: 1) The license is published by the Initial Developer of this code. Only the Initial Developer can modify the terms applicable to Covered Code. 2) The license can be modified and used for code which is not already governed by this license. Modified versions of the license must be renamed to avoid confusion with the Initial Developer's Public License and must include a description of changes from the Initial Developer's Public License. 3) The name of the license in Exhibit A is the "Initial Developer's Public License". 4) The reference to an alternative license in Exhibit A has been removed . 5) Amendments I, II, III, V, and VI have been deleted. 6) Exhibit A, Netscape Public License has been deleted 7. DISCLAIMER OF WARRANTY. COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. 8. TERMINATION. 8.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. All sublicenses to the Covered Code which are properly granted shall survive any termination of this License. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. 8.2. If You initiate litigation by asserting a patent infringement claim (excluding declatory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You file such action is referred to as "Participant") alleging that: (a) such Participant's Contributor Version directly or indirectly infringes any patent, then any and all rights granted by such Participant to You under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively, unless if within 60 days after receipt of notice You either: (i) agree in writing to pay Participant a mutually agreeable reasonable royalty for Your past and future use of Modifications made by such Participant, or (ii) withdraw Your litigation claim with respect to the Contributor Version against such Participant. If within 60 days of notice, a reasonable royalty and payment arrangement are not mutually agreed upon in writing by the parties or the litigation claim is not withdrawn, the rights granted by Participant to You under Sections 2.1 and/or 2.2 automatically terminate at the expiration of the 60 day notice period specified above. (b) any software, hardware, or device, other than such Participant's Contributor Version, directly or indirectly infringes any patent, then any rights granted to You by such Participant under Sections 2.1(b) and 2.2(b) are revoked effective as of the date You first made, used, sold, distributed, or had made, Modifications made by that Participant. 8.3. If You assert a patent infringement claim against Participant alleging that such Participant's Contributor Version directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license. 8.4. In the event of termination under Sections 8.1 or 8.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or any distributor hereunder prior to termination shall survive termination. 9. LIMITATION OF LIABILITY. UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. 10. U.S. GOVERNMENT END USERS. The Covered Code is a "commercial item", as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" and "commercial computer software documentation", as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Code with only those rights set forth herein. 11. MISCELLANEOUS. This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by California law provisions (except to the extent applicable law, if any, provides otherwise), excluding its conflict-of-law provisions. With respect to disputes in which at least one party is a citizen of, or an entity chartered or registered to do business in the United States of America, any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California, with venue lying in Santa Clara County, California, with the losing party responsible for costs, including without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. 12. RESPONSIBILITY FOR CLAIMS. As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. 13. MULTIPLE-LICENSED CODE. Initial Developer may designate portions of the Covered Code as "Multiple-Licensed". "Multiple-Licensed" means that the Initial Devpoeloper permits you to utilize portions of the Covered Code under Your choice of the IDPL or the alternative licenses, if any, specified by the Initial Developer in the file described in Exhibit A. EXHIBIT A -Initial Developer's Public License. The contents of this file are subject to the Initial Developer's Public License Version 1.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License from the Firebird Project website, at http://www.firebirdsql.org/en/initial-developer-s-public-license-version-1-0/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The Original Code is ______________________________________. The Initial Developer of the Original Code is ________________________. Portions created by ______________________ are Copyright (C) ______ _______________________. All Rights Reserved. Contributor(s): ______________________________________. ================================================ FILE: src/.editorconfig ================================================ root = true [*] indent_style = tab indent_size = 4 trim_trailing_whitespace = true [*.cs] dotnet_sort_system_directives_first = true dotnet_separate_import_directive_groups = false csharp_style_implicit_object_creation_when_type_is_apparent = false csharp_prefer_simple_using_statement = false ================================================ FILE: src/Directory.Build.props ================================================ latest portable FirebirdSQL NETProvider FirebirdSQL http://www.firebirdsql.org/en/net-provider/ https://raw.githubusercontent.com/FirebirdSQL/NETProvider/master/firebird-logo.png firebird-logo.png https://github.com/FirebirdSQL/NETProvider git false 3.6 true snupkg license.txt 2026 true true all runtime; build; native; contentfiles; analyzers ================================================ FILE: src/EntityFramework.Firebird/App.config.install.xdt ================================================  ================================================ FILE: src/EntityFramework.Firebird/DefaultFbMigrationSqlGeneratorBehavior.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Collections.Generic; namespace EntityFramework.Firebird; public class DefaultFbMigrationSqlGeneratorBehavior : IFbMigrationSqlGeneratorBehavior { public virtual IEnumerable CreateIdentityForColumn(string columnName, string tableName) { var identitySequenceName = CreateIdentitySequenceName(columnName, tableName); using (var writer = FbMigrationSqlGenerator.SqlWriter()) { writer.WriteLine("EXECUTE BLOCK"); writer.WriteLine("AS"); writer.WriteLine("BEGIN"); writer.Indent++; writer.Write("if (not exists(select 1 from rdb$generators where rdb$generator_name = '"); writer.Write(identitySequenceName); writer.Write("')) then"); writer.WriteLine(); writer.WriteLine("begin"); writer.Indent++; writer.Write("execute statement 'create sequence "); writer.Write(identitySequenceName); writer.Write("';"); writer.WriteLine(); writer.Indent--; writer.WriteLine("end"); writer.Indent--; writer.Write("END"); yield return writer.ToString(); } using (var writer = FbMigrationSqlGenerator.SqlWriter()) { writer.Write("CREATE OR ALTER TRIGGER "); writer.Write(FbMigrationSqlGenerator.Quote(CreateTriggerName(columnName, tableName))); writer.Write(" ACTIVE BEFORE INSERT ON "); writer.Write(FbMigrationSqlGenerator.Quote(tableName)); writer.WriteLine(); writer.WriteLine("AS"); writer.WriteLine("BEGIN"); writer.Indent++; writer.Write("if (new."); writer.Write(FbMigrationSqlGenerator.Quote(columnName)); writer.Write(" is null) then"); writer.WriteLine(); writer.WriteLine("begin"); writer.Indent++; writer.Write("new."); writer.Write(FbMigrationSqlGenerator.Quote(columnName)); writer.Write(" = next value for "); writer.Write(identitySequenceName); writer.Write(";"); writer.WriteLine(); writer.Indent--; writer.WriteLine("end"); writer.Indent--; writer.Write("END"); yield return writer.ToString(); } } public virtual IEnumerable DropIdentityForColumn(string columnName, string tableName) { var triggerName = CreateTriggerName(columnName, tableName); using (var writer = FbMigrationSqlGenerator.SqlWriter()) { writer.WriteLine("EXECUTE BLOCK"); writer.WriteLine("AS"); writer.WriteLine("BEGIN"); writer.Indent++; writer.Write("if (exists(select 1 from rdb$triggers where rdb$trigger_name = '"); writer.Write(triggerName); writer.Write("')) then"); writer.WriteLine(); writer.WriteLine("begin"); writer.Indent++; writer.Write("execute statement 'drop trigger "); writer.Write(FbMigrationSqlGenerator.Quote(triggerName)); writer.Write("';"); writer.WriteLine(); writer.Indent--; writer.WriteLine("end"); writer.Indent--; writer.Write("END"); yield return writer.ToString(); } } protected virtual string CreateTriggerName(string columnName, string tableName) { return string.Format("ID_{0}_{1}", tableName, columnName); } protected virtual string CreateIdentitySequenceName(string columnName, string tableName) { return "GEN_IDENTITY"; } } ================================================ FILE: src/EntityFramework.Firebird/EntityFramework.Firebird.csproj ================================================  net48;netstandard2.1 EntityFramework.Firebird EntityFramework.Firebird true Properties\EntityFramework.Firebird.snk true true $(EF6ProviderVersion) NETProvider - Entity Framework Provider (c) 2014-$(CopyrightEndYear) EntityFramework.Firebird Firebird Entity Framework Provider The Entity Framework provider for Firebird enables you to develop .NET applications that connect to the Firebird database using Entity Framework. firebird firebirsql firebirdclient entityframewor adonet database EF6;TRACE EF6;DEBUG;TRACE ================================================ FILE: src/EntityFramework.Firebird/FbConnectionFactory.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Configuration; using System.Data.Common; using System.Data.Entity.Infrastructure; using System.Linq; using FirebirdSql.Data.FirebirdClient; namespace EntityFramework.Firebird; public class FbConnectionFactory : IDbConnectionFactory { public DbConnection CreateConnection(string nameOrConnectionString) { if (nameOrConnectionString == null) throw new ArgumentNullException(nameof(nameOrConnectionString)); if (nameOrConnectionString.Contains('=')) { return new FbConnection(nameOrConnectionString); } else { var configuration = ConfigurationManager.ConnectionStrings[nameOrConnectionString]; if (configuration == null) throw new ArgumentException("Specified connection string name cannot be found."); return new FbConnection(configuration.ConnectionString); } } } ================================================ FILE: src/EntityFramework.Firebird/FbMigrationSqlGenerator.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.Data.Common; using System.Data.Entity; using System.Data.Entity.Core.Common; using System.Data.Entity.Core.Common.CommandTrees; using System.Data.Entity.Core.Common.CommandTrees.ExpressionBuilder; using System.Data.Entity.Core.Metadata.Edm; using System.Data.Entity.Infrastructure.DependencyResolution; using System.Data.Entity.Migrations.Model; using System.Data.Entity.Migrations.Sql; using System.Globalization; using System.Linq; using System.Text; using System.Text.RegularExpressions; using EntityFramework.Firebird.SqlGen; using FirebirdSql.Data.Common; namespace EntityFramework.Firebird; public class FbMigrationSqlGenerator : MigrationSqlGenerator { readonly IFbMigrationSqlGeneratorBehavior _behavior; string _migrationsHistoryTableName; public FbMigrationSqlGenerator(IFbMigrationSqlGeneratorBehavior behavior = null) { _behavior = behavior ?? new DefaultFbMigrationSqlGeneratorBehavior(); } public override IEnumerable Generate(IEnumerable migrationOperations, string providerManifestToken) { InitializeProviderServices(providerManifestToken); var lastOperation = migrationOperations.Last(); var historyOperation = lastOperation is UpdateDatabaseOperation updateDatabaseOperation ? updateDatabaseOperation.Migrations.First().Operations.OfType().First() : lastOperation as HistoryOperation; if (historyOperation != null) { var modify = historyOperation.CommandTrees.First(); _migrationsHistoryTableName = ((DbScanExpression)modify.Target.Expression).Target.Table; } //This happens only and only if downgrading database to initial point (ie. reverting also Initial migration) else { var dropTableOperation = (DropTableOperation)lastOperation; //DropTableOperation for MigrationHistory-table _migrationsHistoryTableName = Regex.Replace(dropTableOperation.Name, @".+\.(.+)", "$1"); } return GenerateStatements(migrationOperations).ToArray(); } void InitializeProviderServices(string providerManifestToken) { using (var connection = CreateConnection()) { ProviderManifest = DbProviderServices.GetProviderServices(connection).GetProviderManifest(providerManifestToken); } } #region Operations protected IEnumerable Generate(MigrationOperation operation) { throw new NotSupportedException(string.Format("Unknown operation '{0}'.", operation.GetType().FullName)); } protected virtual IEnumerable Generate(UpdateDatabaseOperation operation) { return GenerateStatements(operation.Migrations.SelectMany(x => x.Operations)); } protected virtual IEnumerable Generate(SqlOperation operation) { yield return Statement(operation.Sql, operation.SuppressTransaction); } protected virtual IEnumerable Generate(AddColumnOperation operation) { var tableName = CheckName(ExtractName(operation.Table)); var column = operation.Column; if (column.IsNullable != null && !column.IsNullable.Value && column.DefaultValue == null && string.IsNullOrWhiteSpace(column.DefaultValueSql) && !column.IsIdentity && !column.IsTimestamp) { column.DefaultValue = column.ClrDefaultValue; } var columnData = Generate(column, tableName); using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(tableName)); writer.Write(" ADD "); writer.Write(columnData.Item1); yield return Statement(writer); } foreach (var item in columnData.Item2.Select(x => Statement(x))) yield return item; } protected virtual IEnumerable Generate(AddForeignKeyOperation operation) { using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(CheckName(ExtractName(operation.DependentTable)))); writer.Write(" ADD CONSTRAINT "); writer.Write(Quote(CheckName(CreateItemName(operation.Name)))); writer.Write(" FOREIGN KEY ("); WriteColumns(writer, operation.DependentColumns.Select(Quote)); writer.Write(") REFERENCES "); writer.Write(Quote(CheckName(ExtractName(operation.PrincipalTable)))); writer.Write(" ("); WriteColumns(writer, operation.PrincipalColumns.Select(Quote)); writer.Write(")"); if (operation.CascadeDelete) { writer.Write(" ON DELETE CASCADE"); } yield return Statement(writer); } } protected virtual IEnumerable Generate(AddPrimaryKeyOperation operation) { using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(CheckName(ExtractName(operation.Table)))); writer.Write(" ADD CONSTRAINT "); writer.Write(Quote(CheckName(CreateItemName(operation.Name)))); writer.Write(" PRIMARY KEY ("); WriteColumns(writer, operation.Columns.Select(Quote)); writer.Write(")"); yield return Statement(writer); } } protected virtual IEnumerable Generate(AlterColumnOperation operation) { var column = operation.Column; var tableName = CheckName(ExtractName(operation.Table)); var columnName = CheckName(column.Name); // drop NOT NULL first, either it will be recreated or it was to drop using (var writer = SqlWriter()) { writer.WriteLine("EXECUTE BLOCK"); writer.WriteLine("AS"); writer.WriteLine("declare constraint_name type of column rdb$relation_constraints.rdb$constraint_name;"); writer.WriteLine("BEGIN"); writer.Indent++; writer.WriteLine("select rc.rdb$constraint_name"); writer.WriteLine("from rdb$relation_constraints rc"); writer.WriteLine("join rdb$check_constraints cc on rc.rdb$constraint_name = cc.rdb$constraint_name"); writer.Write("where rc.rdb$constraint_type = 'NOT NULL' and rc.rdb$relation_name = '"); writer.Write(tableName); writer.Write("' and cc.rdb$trigger_name = '"); writer.Write(columnName); writer.Write("'"); writer.WriteLine(); writer.WriteLine("into :constraint_name;"); writer.WriteLine("if (constraint_name is not null) then"); writer.WriteLine("begin"); writer.Indent++; writer.Write("execute statement 'alter table "); writer.Write(Quote(tableName)); writer.Write(" drop constraint ' || :constraint_name;"); writer.WriteLine(); writer.Indent--; writer.WriteLine("end"); writer.Indent--; writer.Write("END"); yield return Statement(writer); } // drop identity trigger first, either it will be recreated or it was to drop foreach (var item in _behavior.DropIdentityForColumn(columnName, tableName)) yield return Statement(item); using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(tableName)); writer.Write(" ALTER COLUMN "); writer.Write(Quote(columnName)); writer.Write(" TYPE "); writer.Write(BuildPropertyType(column)); // possible NOT NULL drop was dropped with statement above if (column.IsNullable != null && !column.IsNullable.Value) { writer.Write(" NOT NULL"); } if (column.Type == PrimitiveTypeKind.Boolean) { writer.Write(" CHECK("); writer.Write(Quote(columnName)); writer.Write(" IN (0,1))"); } yield return Statement(writer); } if (column.DefaultValue != null || !string.IsNullOrWhiteSpace(column.DefaultValueSql)) { using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(tableName)); writer.Write(" ALTER COLUMN "); writer.Write(Quote(columnName)); writer.Write(" DROP DEFAULT"); yield return Statement(writer); } using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(tableName)); writer.Write(" ALTER COLUMN "); writer.Write(Quote(columnName)); writer.Write(" SET DEFAULT "); writer.Write(column.DefaultValue != null ? WriteValue((dynamic)column.DefaultValue) : column.DefaultValueSql); yield return Statement(writer); } } if (column.IsIdentity) { // possible identity drop was dropped with statement above foreach (var item in _behavior.CreateIdentityForColumn(columnName, tableName)) yield return Statement(item); } } protected virtual IEnumerable Generate(AlterProcedureOperation operation) { return Generate(operation, "ALTER"); } protected virtual IEnumerable Generate(AlterTableOperation operation) { // Nothing to do since there is no inherent semantics associated with annotations yield break; } protected virtual IEnumerable Generate(CreateIndexOperation operation) { using (var writer = SqlWriter()) { writer.Write("CREATE "); if (operation.IsUnique) { writer.Write("UNIQUE "); } writer.Write("INDEX "); writer.Write(Quote(CheckName(CreateItemName(BuildIndexName(operation))))); writer.Write(" ON "); writer.Write(Quote(CheckName(ExtractName(operation.Table)))); writer.Write("("); WriteColumns(writer, operation.Columns.Select(Quote)); writer.Write(")"); yield return Statement(writer); } } protected virtual IEnumerable Generate(CreateProcedureOperation operation) { return Generate(operation, "CREATE"); } protected virtual IEnumerable Generate(CreateTableOperation operation) { var tableName = CheckName(ExtractName(operation.Name)); var isMigrationsHistoryTable = tableName.Equals(_migrationsHistoryTableName, StringComparison.InvariantCulture); var columnsData = operation.Columns.Select(x => Generate(x, tableName)).ToArray(); using (var writer = SqlWriter()) { if (isMigrationsHistoryTable) { writer.WriteLine("EXECUTE BLOCK"); writer.WriteLine("AS"); writer.WriteLine("BEGIN"); writer.Indent++; writer.WriteLine("EXECUTE STATEMENT"); writer.Indent++; writer.Write("'"); } writer.Write("CREATE TABLE "); writer.Write(Quote(tableName)); writer.Write(" ("); writer.WriteLine(); writer.Indent++; WriteColumns(writer, columnsData.Select(x => x.Item1), true); writer.Indent--; writer.WriteLine(); writer.Write(")"); if (isMigrationsHistoryTable) { writer.WriteLine("'"); writer.Indent--; writer.WriteLine("WITH AUTONOMOUS TRANSACTION;"); writer.Indent--; writer.Write("END"); } yield return Statement(writer); } if (operation.PrimaryKey != null) { foreach (var item in Generate(operation.PrimaryKey)) yield return item; } foreach (var item in columnsData.SelectMany(x => x.Item2).Select(x => Statement(x))) yield return item; } protected virtual IEnumerable Generate(DropColumnOperation operation) { using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(CheckName(ExtractName(operation.Table)))); writer.Write(" DROP "); writer.Write(Quote(CheckName(operation.Name))); yield return Statement(writer); } } protected virtual IEnumerable Generate(DropForeignKeyOperation operation) { using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(CheckName(ExtractName(operation.DependentTable)))); writer.Write(" DROP CONSTRAINT "); writer.Write(Quote(CheckName(CreateItemName(operation.Name)))); yield return Statement(writer); } } protected virtual IEnumerable Generate(DropIndexOperation operation) { using (var writer = SqlWriter()) { writer.Write("DROP INDEX "); writer.Write(Quote(CheckName(CreateItemName(BuildIndexName(operation))))); yield return Statement(writer); } } protected virtual IEnumerable Generate(DropPrimaryKeyOperation operation) { using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(CheckName(ExtractName(operation.Table)))); writer.Write(" DROP CONSTRAINT "); writer.Write(Quote(CheckName(CreateItemName(operation.Name)))); yield return Statement(writer); } } protected virtual IEnumerable Generate(DropProcedureOperation operation) { using (var writer = SqlWriter()) { writer.Write("DROP PROCEDURE "); writer.Write(Quote(CheckName(ExtractName(operation.Name)))); yield return Statement(writer); } } protected virtual IEnumerable Generate(DropTableOperation operation) { using (var writer = SqlWriter()) { writer.Write("DROP TABLE "); writer.Write(Quote(CheckName(ExtractName(operation.Name)))); yield return Statement(writer); } } protected virtual IEnumerable Generate(MoveProcedureOperation operation) { throw new NotSupportedException("Moving procedure is not supported by Firebird."); } protected virtual IEnumerable Generate(MoveTableOperation operation) { throw new NotSupportedException("Moving table is not supported by Firebird."); } protected virtual IEnumerable Generate(RenameColumnOperation operation) { using (var writer = SqlWriter()) { writer.Write("ALTER TABLE "); writer.Write(Quote(CheckName(ExtractName(operation.Table)))); writer.Write(" ALTER COLUMN "); writer.Write(Quote(CheckName(operation.Name))); writer.Write(" TO "); writer.Write(Quote(CheckName(operation.NewName))); yield return Statement(writer); } } protected virtual IEnumerable Generate(RenameIndexOperation operation) { throw new NotSupportedException("Renaming index is not supported by Firebird."); } protected virtual IEnumerable Generate(RenameProcedureOperation operation) { throw new NotSupportedException("Renaming procedure is not supported by Firebird."); } protected virtual IEnumerable Generate(RenameTableOperation operation) { throw new NotSupportedException("Renaming table is not supported by Firebird."); } protected virtual IEnumerable Generate(HistoryOperation operation) { foreach (var commandTree in operation.CommandTrees) { List _; switch (commandTree.CommandTreeKind) { case DbCommandTreeKind.Insert: const int MigrationIdColumn = 0; const int ContextKeyColumn = 1; const int ModelColumn = 2; const int VersionColumn = 3; const int MaxChunkLength = 32000; var dbInsert = (DbInsertCommandTree)commandTree; var modelData = ((dbInsert.SetClauses[ModelColumn] as DbSetClause).Value as DbConstantExpression).Value as byte[]; // If model length is less than max value, stick to original version if (modelData.Length < MaxChunkLength) { using (var writer = SqlWriter()) { writer.Write(DmlSqlGenerator.GenerateInsertSql(dbInsert, out _, generateParameters: false)); yield return Statement(writer); } } else { // If it's bigger - we split it into chunks, as big as possible var dataChunks = modelData.Split(MaxChunkLength); // We can't change CommandTree, but we can create new one, only difference being data length using (var writer = SqlWriter()) { var setClauses = new ReadOnlyCollection( new List { dbInsert.SetClauses[MigrationIdColumn], dbInsert.SetClauses[ContextKeyColumn], DbExpressionBuilder.SetClause( ((DbSetClause)dbInsert.SetClauses[ModelColumn]).Property, dataChunks.ElementAt(0).ToArray() ), dbInsert.SetClauses[VersionColumn], }); var newCommandTree = new DbInsertCommandTree(dbInsert.MetadataWorkspace, commandTree.DataSpace, dbInsert.Target, setClauses, dbInsert.Returning); writer.Write(DmlSqlGenerator.GenerateInsertSql(newCommandTree, out _, generateParameters: false)); yield return Statement(writer); } // Now we have first Insert, let's update it with chunks of remaing data foreach (var dataChunk in dataChunks.Skip(1)) { using (var writer = SqlWriter()) { var modelProperty = (dbInsert.SetClauses[ModelColumn] as DbSetClause).Property as DbPropertyExpression; var modificationClauses = new List { // Updating existing chunk of data with subsequent part DbExpressionBuilder.SetClause( modelProperty, // TODO: Better solution required // Best if we could use DbExpression.Concat, but it returns DbFunctionExpression, which is not supported // Here we'll get SET Model = 'data', which we can update as text later dataChunk.ToArray() ) }.AsReadOnly(); var updateCommandTree = new DbUpdateCommandTree(dbInsert.MetadataWorkspace, dbInsert.DataSpace, dbInsert.Target, // Predicate is MigrationId value DbExpressionBuilder.Equal( ((DbSetClause)dbInsert.SetClauses[MigrationIdColumn]).Property, ((DbSetClause)dbInsert.SetClauses[MigrationIdColumn]).Value), modificationClauses, dbInsert.Returning); writer.Write(DmlSqlGenerator.GenerateUpdateSql(updateCommandTree, out _, generateParameters: false)); // Since we couldn't concat before, replacing query as string // Replacing SET Model = 'data' // with SET Model = Model || 'data' // Model being first is important, since these are parts of single value var statement = writer.ToString(); var newStatement = statement.Replace($"SET \"{modelProperty.Property.Name}\" = ", $"SET \"{modelProperty.Property.Name}\" = \"{modelProperty.Property.Name}\" || "); yield return Statement(newStatement); } } } break; case DbCommandTreeKind.Delete: using (var writer = SqlWriter()) { writer.Write(DmlSqlGenerator.GenerateDeleteSql((DbDeleteCommandTree)commandTree, out _, generateParameters: false)); yield return Statement(writer); } break; } } } protected virtual IEnumerable Generate(ProcedureOperation operation, string action) { using (var writer = SqlWriter()) { var inputParameters = operation.Parameters.Where(x => !x.IsOutParameter).ToArray(); var outputParameters = operation.Parameters.Where(x => x.IsOutParameter).ToArray(); writer.Write(action); writer.Write(" PROCEDURE "); writer.Write(Quote(CheckName(ExtractName(operation.Name)))); if (inputParameters.Any()) { writer.Write(" ("); writer.WriteLine(); writer.Indent++; WriteColumns(writer, inputParameters.Select(Generate), true); writer.Indent--; writer.WriteLine(); writer.Write(")"); } if (outputParameters.Any()) { writer.WriteLine(); writer.Write("RETURNS ("); writer.WriteLine(); writer.Indent++; WriteColumns(writer, outputParameters.Select(Generate), true); writer.Indent--; writer.WriteLine(); writer.Write(")"); } writer.WriteLine(); writer.WriteLine("AS"); writer.WriteLine("BEGIN"); writer.Write(operation.BodySql); writer.WriteLine(); writer.Write("END"); yield return Statement(writer); } } protected (string, IEnumerable) Generate(ColumnModel column, string tableName) { var builder = new StringBuilder(); var additionalCommands = new List(); var columnName = CheckName(column.Name); var columnType = BuildPropertyType(column); builder.Append(Quote(columnName)); builder.Append(" "); builder.Append(columnType); if (column.DefaultValue != null) { builder.Append(" DEFAULT "); builder.Append(WriteValue((dynamic)column.DefaultValue)); } else if (!string.IsNullOrWhiteSpace(column.DefaultValueSql)) { builder.Append(" DEFAULT "); builder.Append(column.DefaultValueSql); } if ((column.IsNullable != null) && !column.IsNullable.Value) { builder.Append(" NOT NULL"); } if (column.Type == PrimitiveTypeKind.Boolean) { builder.Append(" CHECK("); builder.Append(Quote(columnName)); builder.Append(" IN (0,1))"); } if (column.IsIdentity) { var identity = _behavior.CreateIdentityForColumn(columnName, tableName); additionalCommands.AddRange(identity.Where(x => !string.IsNullOrWhiteSpace(x))); } return (builder.ToString(), additionalCommands); } protected string Generate(ParameterModel parameter) { var builder = new StringBuilder(); builder.Append(Quote(CheckName(parameter.Name))); builder.Append(" "); builder.Append(BuildPropertyType(parameter)); return builder.ToString(); } #endregion #region Helpers static MigrationStatement Statement(SqlWriter sqlWriter, bool suppressTransaction = false) { return Statement(sqlWriter.ToString(), suppressTransaction); } protected static MigrationStatement Statement(string sql, bool suppressTransaction = false) { return new MigrationStatement { Sql = sql, SuppressTransaction = suppressTransaction, BatchTerminator = ";", }; } protected static string WriteValue(object value) { return string.Format(CultureInfo.InvariantCulture, "{0}", value); } protected static string WriteValue(DateTime value) { return SqlGenerator.FormatDateTime(value); } protected static string WriteValue(byte[] value) { return SqlGenerator.FormatBinary(value); } protected static string WriteValue(bool value) { return SqlGenerator.FormatBoolean(value); } protected static string WriteValue(Guid value) { return SqlGenerator.FormatGuid(value); } protected static string WriteValue(string value) { return SqlGenerator.FormatString(value, true); } protected static string WriteValue(TimeSpan value) { return SqlGenerator.FormatTime(value); } protected internal static string Quote(string name) { return SqlGenerator.QuoteIdentifier(name); } internal static SqlWriter SqlWriter() { var result = new SqlWriter(new StringBuilder()); result.Indent++; return result; } string BuildPropertyType(PropertyModel propertyModel) { var storeTypeName = propertyModel.StoreType; var typeUsage = ProviderManifest.GetStoreType(propertyModel.TypeUsage); if (!string.IsNullOrWhiteSpace(storeTypeName)) { typeUsage = BuildStoreTypeUsage(storeTypeName, propertyModel) ?? typeUsage; } return SqlGenerator.GetSqlPrimitiveType(typeUsage); } static string BuildIndexName(IndexOperation indexOperation) { return !indexOperation.HasDefaultName ? indexOperation.Name : IndexOperation.BuildDefaultName(new[] { ExtractName(indexOperation.Table) }.Concat(indexOperation.Columns)); } static string ExtractName(string name) { return name.Substring(name.LastIndexOf('.') + 1); } static string CreateItemName(string name) { while (true) { var match = Regex.Match(name, @"^(?.+_)[^.]+\.(?.+)$"); if (!match.Success) break; name = match.Result("${prefix}${suffix}"); } return name; } static string CheckName(string name) { const int LengthLimit = 31; if (name.Length > LengthLimit) throw new ArgumentOutOfRangeException($"The name '{name}' is longer than Firebird's {LengthLimit} characters limit for object names."); return name; } static void WriteColumns(SqlWriter writer, IEnumerable columns, bool separateLines = false) { var separator = (string)null; foreach (var column in columns) { if (separator != null) { writer.Write(separator); if (separateLines) writer.WriteLine(); } writer.Write(column); separator = ", "; } } static DbConnection CreateConnection() { return DbConfiguration.DependencyResolver.GetService(FbProviderServices.ProviderInvariantName).CreateConnection(); } IEnumerable GenerateStatements(IEnumerable operations) { return operations.Select>(x => Generate(x)).SelectMany(x => x); } #endregion } ================================================ FILE: src/EntityFramework.Firebird/FbMigrationsTransactionsInterceptor.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Data; using System.Data.Common; using System.Data.Entity.Infrastructure.Interception; using System.Diagnostics; using System.Linq; using FirebirdSql.Data.FirebirdClient; namespace EntityFramework.Firebird; // Migrations are executed in Serializable transaction. Because of my "AUTONOMOUS TRANSACTION" usage // I better use ReadCommitted. Here I plug in, in case of Migrations. class FbMigrationsTransactionsInterceptor : IDbConnectionInterceptor { public void BeginningTransaction(DbConnection connection, BeginTransactionInterceptionContext interceptionContext) { if (connection is FbConnection && interceptionContext.IsolationLevel == IsolationLevel.Serializable && IsInMigrations()) { interceptionContext.Result = connection.BeginTransaction(IsolationLevel.ReadCommitted); } } public static bool IsInMigrations() { var stackTrace = new StackTrace(false); return stackTrace.GetFrames().Any(f => f.GetMethod().ReflectedType?.Namespace.Equals("System.Data.Entity.Migrations", StringComparison.Ordinal) ?? false); } public void BeganTransaction(DbConnection connection, BeginTransactionInterceptionContext interceptionContext) { } public void Closed(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void Closing(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void ConnectionStringGetting(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void ConnectionStringGot(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void ConnectionStringSet(DbConnection connection, DbConnectionPropertyInterceptionContext interceptionContext) { } public void ConnectionStringSetting(DbConnection connection, DbConnectionPropertyInterceptionContext interceptionContext) { } public void ConnectionTimeoutGetting(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void ConnectionTimeoutGot(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void DataSourceGetting(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void DataSourceGot(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void DatabaseGetting(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void DatabaseGot(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void Disposed(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void Disposing(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void EnlistedTransaction(DbConnection connection, EnlistTransactionInterceptionContext interceptionContext) { } public void EnlistingTransaction(DbConnection connection, EnlistTransactionInterceptionContext interceptionContext) { } public void Opened(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void Opening(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void ServerVersionGetting(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void ServerVersionGot(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void StateGetting(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } public void StateGot(DbConnection connection, DbConnectionInterceptionContext interceptionContext) { } } ================================================ FILE: src/EntityFramework.Firebird/FbProviderManifest.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Data.Entity.Core; using System.Data.Entity.Core.Common; using System.Data.Entity.Core.Metadata.Edm; using System.Diagnostics; using System.Reflection; using System.Text; using System.Xml; namespace EntityFramework.Firebird; public class FbProviderManifest : DbXmlEnabledProviderManifest { #region Private Fields internal const int BinaryMaxSize = Int32.MaxValue; internal const int AsciiVarcharMaxSize = 32765; internal const int UnicodeVarcharMaxSize = AsciiVarcharMaxSize / 4; internal const char LikeEscapeCharacter = '\\'; private System.Collections.ObjectModel.ReadOnlyCollection _primitiveTypes = null; private System.Collections.ObjectModel.ReadOnlyCollection _functions = null; #endregion #region Constructors /// /// Constructor /// /// A token used to infer the capabilities of the store public FbProviderManifest(string manifestToken) : base(FbProviderManifest.GetProviderManifest()) { } #endregion #region Properties #endregion internal static XmlReader GetProviderManifest() { return GetXmlResource(GetManifestResourceName()); } /// /// Providers should override this to return information specific to their provider. /// /// This method should never return null. /// /// The name of the information to be retrieved. /// An XmlReader at the begining of the information requested. protected override XmlReader GetDbInformation(string informationType) { if (informationType == StoreSchemaDefinition || informationType == StoreSchemaDefinitionVersion3) { return GetStoreSchemaDescription(informationType); } if (informationType == StoreSchemaMapping || informationType == StoreSchemaMappingVersion3) { return GetStoreSchemaMapping(informationType); } if (informationType == ConceptualSchemaDefinition || informationType == ConceptualSchemaDefinitionVersion3) { return null; } throw new ProviderIncompatibleException(string.Format("The provider returned null for the informationType '{0}'.", informationType)); } public override System.Collections.ObjectModel.ReadOnlyCollection GetStoreTypes() { if (_primitiveTypes == null) { _primitiveTypes = base.GetStoreTypes(); } return _primitiveTypes; } public override System.Collections.ObjectModel.ReadOnlyCollection GetStoreFunctions() { if (_functions == null) { _functions = base.GetStoreFunctions(); } return _functions; } /// /// This method takes a type and a set of facets and returns the best mapped equivalent type /// in EDM. /// /// A TypeUsage encapsulating a store type and a set of facets /// A TypeUsage encapsulating an EDM type and a set of facets public override TypeUsage GetEdmType(TypeUsage storeType) { if (storeType == null) { throw new ArgumentNullException("storeType"); } var storeTypeName = storeType.EdmType.Name.ToLowerInvariant(); if (!StoreTypeNameToEdmPrimitiveType.ContainsKey(storeTypeName)) { throw new ArgumentException(string.Format("The underlying provider does not support the type '{0}'.", storeTypeName)); } var edmPrimitiveType = base.StoreTypeNameToEdmPrimitiveType[storeTypeName]; var maxLength = 0; var isUnicode = true; var isFixedLen = false; var isUnbounded = true; PrimitiveTypeKind newPrimitiveTypeKind; switch (storeTypeName) { // for some types we just go with simple type usage with no facets case "smallint": case "int": case "bigint": case "smallint_bool": case "float": case "double": case "guid": return TypeUsage.CreateDefaultTypeUsage(edmPrimitiveType); case "decimal": case "numeric": if (TypeHelpers.TryGetPrecision(storeType, out var precision) && TypeHelpers.TryGetScale(storeType, out var scale)) { return TypeUsage.CreateDecimalTypeUsage(edmPrimitiveType, precision, scale); } else { return TypeUsage.CreateDecimalTypeUsage(edmPrimitiveType); } case "varchar": newPrimitiveTypeKind = PrimitiveTypeKind.String; isUnbounded = !TypeHelpers.TryGetMaxLength(storeType, out maxLength); isUnicode = true; //hardcoded isFixedLen = false; break; case "char": newPrimitiveTypeKind = PrimitiveTypeKind.String; isUnbounded = !TypeHelpers.TryGetMaxLength(storeType, out maxLength); isUnicode = true; //hardcoded isFixedLen = true; break; case "timestamp": return TypeUsage.CreateDateTimeTypeUsage(edmPrimitiveType, null); case "date": return TypeUsage.CreateDateTimeTypeUsage(edmPrimitiveType, null); case "time": return TypeUsage.CreateTimeTypeUsage(edmPrimitiveType, null); case "blob": newPrimitiveTypeKind = PrimitiveTypeKind.Binary; isUnbounded = true; isFixedLen = false; break; case "clob": newPrimitiveTypeKind = PrimitiveTypeKind.String; isUnbounded = true; isUnicode = true; //hardcoded isFixedLen = false; break; default: throw new NotSupportedException(string.Format("The underlying provider does not support the type '{0}'.", storeTypeName)); } Debug.Assert(newPrimitiveTypeKind == PrimitiveTypeKind.String || newPrimitiveTypeKind == PrimitiveTypeKind.Binary, "at this point only string and binary types should be present"); switch (newPrimitiveTypeKind) { case PrimitiveTypeKind.String: if (!isUnbounded) { return TypeUsage.CreateStringTypeUsage(edmPrimitiveType, isUnicode, isFixedLen, maxLength); } else { return TypeUsage.CreateStringTypeUsage(edmPrimitiveType, isUnicode, isFixedLen); } case PrimitiveTypeKind.Binary: if (!isUnbounded) { return TypeUsage.CreateBinaryTypeUsage(edmPrimitiveType, isFixedLen, maxLength); } else { return TypeUsage.CreateBinaryTypeUsage(edmPrimitiveType, isFixedLen); } default: throw new NotSupportedException(string.Format("The underlying provider does not support the type '{0}'.", storeTypeName)); } } /// /// This method takes a type and a set of facets and returns the best mapped equivalent type /// in SQL Server, taking the store version into consideration. /// /// A TypeUsage encapsulating an EDM type and a set of facets /// A TypeUsage encapsulating a store type and a set of facets public override TypeUsage GetStoreType(TypeUsage edmType) { if (edmType == null) { throw new ArgumentNullException("edmType"); } Debug.Assert(edmType.EdmType.BuiltInTypeKind == BuiltInTypeKind.PrimitiveType); if (!(edmType.EdmType is PrimitiveType primitiveType)) { throw new ArgumentException(string.Format("The underlying provider does not support the type '{0}'.", edmType)); } var facets = edmType.Facets; switch (primitiveType.PrimitiveTypeKind) { case PrimitiveTypeKind.Boolean: return TypeUsage.CreateDefaultTypeUsage(StoreTypeNameToStorePrimitiveType["smallint_bool"]); case PrimitiveTypeKind.Int16: return TypeUsage.CreateDefaultTypeUsage(StoreTypeNameToStorePrimitiveType["smallint"]); case PrimitiveTypeKind.Int32: return TypeUsage.CreateDefaultTypeUsage(StoreTypeNameToStorePrimitiveType["int"]); case PrimitiveTypeKind.Int64: return TypeUsage.CreateDefaultTypeUsage(StoreTypeNameToStorePrimitiveType["bigint"]); case PrimitiveTypeKind.Double: return TypeUsage.CreateDefaultTypeUsage(StoreTypeNameToStorePrimitiveType["double"]); case PrimitiveTypeKind.Single: return TypeUsage.CreateDefaultTypeUsage(StoreTypeNameToStorePrimitiveType["float"]); case PrimitiveTypeKind.Decimal: // decimal, numeric { if (!TypeHelpers.TryGetPrecision(edmType, out var precision)) { precision = 9; } if (!TypeHelpers.TryGetScale(edmType, out var scale)) { scale = 0; } return TypeUsage.CreateDecimalTypeUsage(StoreTypeNameToStorePrimitiveType["decimal"], precision, scale); } case PrimitiveTypeKind.Binary: // blob { var isFixedLength = null != facets[MetadataHelpers.FixedLengthFacetName].Value && (bool)facets[MetadataHelpers.FixedLengthFacetName].Value; var f = facets[MetadataHelpers.MaxLengthFacetName]; var isMaxLength = f.IsUnbounded || null == f.Value || (int)f.Value > BinaryMaxSize; var maxLength = !isMaxLength ? (int)f.Value : Int32.MinValue; TypeUsage tu; if (isFixedLength) { tu = TypeUsage.CreateBinaryTypeUsage(StoreTypeNameToStorePrimitiveType["blob"], true, maxLength); } else { if (isMaxLength) { tu = TypeUsage.CreateBinaryTypeUsage(StoreTypeNameToStorePrimitiveType["blob"], false); Debug.Assert(tu.Facets["MaxLength"].Description.IsConstant, "blob is not constant!"); } else { tu = TypeUsage.CreateBinaryTypeUsage(StoreTypeNameToStorePrimitiveType["blob"], false, maxLength); } } return tu; } case PrimitiveTypeKind.String: // char, varchar, text blob { var isUnicode = null == facets[MetadataHelpers.UnicodeFacetName].Value || (bool)facets[MetadataHelpers.UnicodeFacetName].Value; var isFixedLength = null != facets[MetadataHelpers.FixedLengthFacetName].Value && (bool)facets[MetadataHelpers.FixedLengthFacetName].Value; var f = facets[MetadataHelpers.MaxLengthFacetName]; // maxlen is true if facet value is unbounded, the value is bigger than the limited string sizes *or* the facet // value is null. this is needed since functions still have maxlength facet value as null var isMaxLength = f.IsUnbounded || null == f.Value || (int)f.Value > (isUnicode ? UnicodeVarcharMaxSize : AsciiVarcharMaxSize); var maxLength = !isMaxLength ? (int)f.Value : Int32.MinValue; TypeUsage tu; if (isUnicode) { if (isFixedLength) { tu = TypeUsage.CreateStringTypeUsage(StoreTypeNameToStorePrimitiveType["char"], true, true, maxLength); } else { if (isMaxLength) { tu = TypeUsage.CreateStringTypeUsage(StoreTypeNameToStorePrimitiveType["clob"], true, false); } else { tu = TypeUsage.CreateStringTypeUsage(StoreTypeNameToStorePrimitiveType["varchar"], true, false, maxLength); } } } else { if (isFixedLength) { tu = TypeUsage.CreateStringTypeUsage(StoreTypeNameToStorePrimitiveType["char"], false, true, maxLength); } else { if (isMaxLength) { tu = TypeUsage.CreateStringTypeUsage(StoreTypeNameToStorePrimitiveType["clob"], false, false); } else { tu = TypeUsage.CreateStringTypeUsage(StoreTypeNameToStorePrimitiveType["varchar"], false, false, maxLength); } } } return tu; } case PrimitiveTypeKind.DateTime: // datetime, date { bool useTimestamp; if (TypeHelpers.TryGetPrecision(edmType, out var precision)) { if (precision == 0) useTimestamp = false; else useTimestamp = true; } else { useTimestamp = true; } return TypeUsage.CreateDefaultTypeUsage(useTimestamp ? StoreTypeNameToStorePrimitiveType["timestamp"] : StoreTypeNameToStorePrimitiveType["date"]); } case PrimitiveTypeKind.Time: return TypeUsage.CreateDefaultTypeUsage(StoreTypeNameToStorePrimitiveType["time"]); case PrimitiveTypeKind.Guid: return TypeUsage.CreateDefaultTypeUsage(StoreTypeNameToStorePrimitiveType["guid"]); default: throw new NotSupportedException(string.Format("There is no store type corresponding to the EDM type '{0}' of primitive type '{1}'.", edmType, primitiveType.PrimitiveTypeKind)); } } private XmlReader GetStoreSchemaMapping(string mslName) { return GetXmlResource(GetStoreSchemaResourceName(mslName, "msl")); } private XmlReader GetStoreSchemaDescription(string ssdlName) { return GetXmlResource(GetStoreSchemaResourceName(ssdlName, "ssdl")); } private static XmlReader GetXmlResource(string resourceName) { var executingAssembly = Assembly.GetExecutingAssembly(); var stream = executingAssembly.GetManifestResourceStream(resourceName); return XmlReader.Create(stream); } private static string GetManifestResourceName() { return "EntityFramework.Firebird.Resources.ProviderManifest.xml"; } private static string GetStoreSchemaResourceName(string name, string type) { return string.Format("EntityFramework.Firebird.Resources.{0}.{1}", name, type); } public override bool SupportsEscapingLikeArgument(out char escapeCharacter) { escapeCharacter = LikeEscapeCharacter; return true; } public override string EscapeLikeArgument(string argument) { var sb = new StringBuilder(argument); sb.Replace(LikeEscapeCharacter.ToString(), LikeEscapeCharacter.ToString() + LikeEscapeCharacter.ToString()); sb.Replace("%", LikeEscapeCharacter + "%"); sb.Replace("_", LikeEscapeCharacter + "_"); return sb.ToString(); } public override bool SupportsInExpression() { return true; } public override bool SupportsParameterOptimizationInSchemaQueries() { return true; } } ================================================ FILE: src/EntityFramework.Firebird/FbProviderServices.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Data; using System.Data.Common; using System.Data.Entity.Core.Common; using System.Data.Entity.Core.Common.CommandTrees; using System.Data.Entity.Core.Metadata.Edm; using System.Data.Entity.Infrastructure; using System.Data.Entity.Infrastructure.DependencyResolution; using System.Data.Entity.Infrastructure.Interception; using System.Data.Entity.Migrations.Sql; using System.Diagnostics; using System.Linq; using EntityFramework.Firebird.SqlGen; using FirebirdSql.Data.FirebirdClient; using FirebirdSql.Data.Isql; using FirebirdSql.Data.Services; namespace EntityFramework.Firebird; public class FbProviderServices : DbProviderServices { public const string ProviderInvariantName = "FirebirdSql.Data.FirebirdClient"; public static readonly FbProviderServices Instance = new FbProviderServices(); public FbProviderServices() { AddDependencyResolver(new SingletonDependencyResolver(new FbConnectionFactory())); AddDependencyResolver(new SingletonDependencyResolver>(() => new FbMigrationSqlGenerator(), ProviderInvariantName)); DbInterception.Add(new FbMigrationsTransactionsInterceptor()); } protected override DbCommandDefinition CreateDbCommandDefinition(DbProviderManifest manifest, DbCommandTree commandTree) { var prototype = CreateCommand(manifest, commandTree); var result = CreateCommandDefinition(prototype); return result; } private DbCommand CreateCommand(DbProviderManifest manifest, DbCommandTree commandTree) { if (manifest == null) throw new ArgumentNullException("manifest"); if (commandTree == null) throw new ArgumentNullException("commandTree"); var expectedTypes = PrepareTypeCoercions(commandTree); var command = FbCommand.CreateWithTypeCoercions(expectedTypes); command.CommandText = SqlGenerator.GenerateSql(commandTree, out var parameters, out var commandType); command.CommandType = commandType; // Get the function (if any) implemented by the command tree since this influences our interpretation of parameters EdmFunction function = null; if (commandTree is DbFunctionCommandTree) { function = ((DbFunctionCommandTree)commandTree).EdmFunction; } // Now make sure we populate the command's parameters from the CQT's parameters: foreach (var queryParameter in commandTree.Parameters) { FbParameter parameter; // Use the corresponding function parameter TypeUsage where available (currently, the SSDL facets and // type trump user-defined facets and type in the EntityCommand). if (null != function && function.Parameters.TryGetValue(queryParameter.Key, false, out var functionParameter)) { parameter = CreateSqlParameter(functionParameter.Name, functionParameter.TypeUsage, functionParameter.Mode, DBNull.Value); } else { parameter = CreateSqlParameter(queryParameter.Key, queryParameter.Value, ParameterMode.In, DBNull.Value); } command.Parameters.Add(parameter); } // Now add parameters added as part of SQL gen (note: this feature is only safe for DML SQL gen which // does not support user parameters, where there is no risk of name collision) if (null != parameters && 0 < parameters.Count) { if (!(commandTree is DbInsertCommandTree) && !(commandTree is DbUpdateCommandTree) && !(commandTree is DbDeleteCommandTree)) { throw new InvalidOperationException("SqlGenParametersNotPermitted"); } foreach (var parameter in parameters) { command.Parameters.Add(parameter); } } return command; } protected override string GetDbProviderManifestToken(DbConnection connection) { try { var serverVersion = default(Version); if (connection.State == ConnectionState.Open) { serverVersion = FbServerProperties.ParseServerVersion(connection.ServerVersion); } else { var serverProperties = new FbServerProperties() { ConnectionString = connection.ConnectionString }; serverVersion = FbServerProperties.ParseServerVersion(serverProperties.GetServerVersion()); } return serverVersion.ToString(2); } catch (Exception ex) { throw new InvalidOperationException("Could not retrieve storage version.", ex); } } protected override DbProviderManifest GetDbProviderManifest(string versionHint) { if (string.IsNullOrEmpty(versionHint)) { throw new ArgumentException("Could not determine store version; a valid store connection or a version hint is required."); } return new FbProviderManifest(versionHint); } internal static FbParameter CreateSqlParameter(string name, TypeUsage type, ParameterMode mode, object value) { var result = new FbParameter(name, value); var direction = MetadataHelpers.ParameterModeToParameterDirection(mode); if (result.Direction != direction) { result.Direction = direction; } // output parameters are handled differently (we need to ensure there is space for return // values where the user has not given a specific Size/MaxLength) var isOutParam = mode != ParameterMode.In; var sqlDbType = GetSqlDbType(type, isOutParam, out var size); if (result.FbDbType != sqlDbType) { result.FbDbType = sqlDbType; } // Note that we overwrite 'facet' parameters where either the value is different or // there is an output parameter. if (size.HasValue && (isOutParam || result.Size != size.Value)) { result.Size = size.Value; } var isNullable = MetadataHelpers.IsNullable(type); if (isOutParam || isNullable != result.IsNullable) { result.IsNullable = isNullable; } return result; } private static FbDbType GetSqlDbType(TypeUsage type, bool isOutParam, out int? size) { // only supported for primitive type var primitiveTypeKind = MetadataHelpers.GetPrimitiveTypeKind(type); size = default; switch (primitiveTypeKind) { case PrimitiveTypeKind.Boolean: return FbDbType.SmallInt; case PrimitiveTypeKind.Int16: return FbDbType.SmallInt; case PrimitiveTypeKind.Int32: return FbDbType.Integer; case PrimitiveTypeKind.Int64: return FbDbType.BigInt; case PrimitiveTypeKind.Double: return FbDbType.Double; case PrimitiveTypeKind.Single: return FbDbType.Float; case PrimitiveTypeKind.Decimal: return FbDbType.Decimal; case PrimitiveTypeKind.Binary: // for output parameters, ensure there is space... size = GetParameterSize(type, isOutParam); return GetBinaryDbType(type); case PrimitiveTypeKind.String: size = GetParameterSize(type, isOutParam); return GetStringDbType(type); case PrimitiveTypeKind.DateTime: return FbDbType.TimeStamp; case PrimitiveTypeKind.Time: return FbDbType.Time; case PrimitiveTypeKind.Guid: return FbDbType.Guid; default: Debug.Fail("unknown PrimitiveTypeKind " + primitiveTypeKind); throw new InvalidOperationException("unknown PrimitiveTypeKind " + primitiveTypeKind); } } private static int? GetParameterSize(TypeUsage type, bool isOutParam) { if (MetadataHelpers.TryGetMaxLength(type, out var maxLength)) { // if the MaxLength facet has a specific value use it return maxLength; } else if (isOutParam) { // if the parameter is a return/out/inout parameter, ensure there // is space for any value return int.MaxValue; } else { // no value return default; } } private static FbDbType GetStringDbType(TypeUsage type) { Debug.Assert(type.EdmType.BuiltInTypeKind == BuiltInTypeKind.PrimitiveType && PrimitiveTypeKind.String == ((PrimitiveType)type.EdmType).PrimitiveTypeKind, "only valid for string type"); FbDbType dbType; // Specific type depends on whether the string is a unicode string and whether it is a fixed length string. // By default, assume widest type (unicode) and most common type (variable length) if (!MetadataHelpers.TryGetIsFixedLength(type, out var fixedLength)) { fixedLength = false; } if (!MetadataHelpers.TryGetIsUnicode(type, out var unicode)) { unicode = true; } if (fixedLength) { dbType = (unicode ? FbDbType.Char : FbDbType.Char); } else { if (!MetadataHelpers.TryGetMaxLength(type, out var maxLength)) { maxLength = (unicode ? FbProviderManifest.UnicodeVarcharMaxSize : FbProviderManifest.AsciiVarcharMaxSize); } if (maxLength == default || maxLength > (unicode ? FbProviderManifest.UnicodeVarcharMaxSize : FbProviderManifest.AsciiVarcharMaxSize)) { dbType = FbDbType.Text; } else { dbType = (unicode ? FbDbType.VarChar : FbDbType.VarChar); } } return dbType; } private static FbDbType GetBinaryDbType(TypeUsage type) { Debug.Assert(type.EdmType.BuiltInTypeKind == BuiltInTypeKind.PrimitiveType && PrimitiveTypeKind.Binary == ((PrimitiveType)type.EdmType).PrimitiveTypeKind, "only valid for binary type"); // Specific type depends on whether the binary value is fixed length. By default, assume variable length. //bool fixedLength; //if (!MetadataHelpers.TryGetIsFixedLength(type, out fixedLength)) //{ // fixedLength = false; //} return FbDbType.Binary; } private static Type[] PrepareTypeCoercions(DbCommandTree commandTree) { if (commandTree is DbQueryCommandTree queryTree) { if (queryTree.Query is DbProjectExpression projectExpression) { var resultsType = projectExpression.Projection.ResultType.EdmType; if (resultsType is StructuralType resultsAsStructuralType) { var members = resultsAsStructuralType.Members; return members.Select(ExtractExpectedTypeForCoercion).ToArray(); } } } if (commandTree is DbFunctionCommandTree functionTree) { if (functionTree.ResultType != null) { Debug.Assert(MetadataHelpers.IsCollectionType(functionTree.ResultType.EdmType), "Result type of a function is expected to be a collection of RowType or PrimitiveType"); var typeUsage = MetadataHelpers.GetElementTypeUsage(functionTree.ResultType); var elementType = typeUsage.EdmType; if (MetadataHelpers.IsRowType(elementType)) { var members = ((RowType)elementType).Members; return members.Select(ExtractExpectedTypeForCoercion).ToArray(); } else if (MetadataHelpers.IsPrimitiveType(elementType)) { return new[] { MakeTypeCoercion(((PrimitiveType)elementType).ClrEquivalentType, typeUsage) }; } else { Debug.Fail("Result type of a function is expected to be a collection of RowType or PrimitiveType"); } } } return null; } private static Type ExtractExpectedTypeForCoercion(EdmMember member) { var type = ((PrimitiveType)member.TypeUsage.EdmType).ClrEquivalentType; return MakeTypeCoercion(type, member.TypeUsage); } private static Type MakeTypeCoercion(Type type, TypeUsage typeUsage) { if (type.IsValueType && MetadataHelpers.IsNullable(typeUsage)) return typeof(Nullable<>).MakeGenericType(type); return type; } protected override void DbCreateDatabase(DbConnection connection, int? commandTimeout, #pragma warning disable 3001 StoreItemCollection storeItemCollection) #pragma warning restore 3001 { FbConnection.CreateDatabase(connection.ConnectionString, pageSize: 16384); var script = DbCreateDatabaseScript(GetDbProviderManifestToken(connection), storeItemCollection); var fbScript = new FbScript(script); fbScript.Parse(); if (fbScript.Results.Any()) { using (var fbConnection = new FbConnection(connection.ConnectionString)) { var execution = new FbBatchExecution(fbConnection); execution.AppendSqlStatements(fbScript); execution.Execute(); } } } protected override string DbCreateDatabaseScript(string providerManifestToken, #pragma warning disable 3001 StoreItemCollection storeItemCollection) #pragma warning restore 3001 { return SsdlToFb.Transform(storeItemCollection, providerManifestToken); } protected override bool DbDatabaseExists(DbConnection connection, int? commandTimeout, #pragma warning disable 3001 StoreItemCollection storeItemCollection) #pragma warning restore 3001 { if (connection.State == ConnectionState.Open || connection.State == ConnectionState.Executing || connection.State == ConnectionState.Fetching) { return true; } else { try { connection.Open(); return true; } catch { return false; } finally { try { connection.Close(); } catch { } } } } protected override void DbDeleteDatabase(DbConnection connection, int? commandTimeout, #pragma warning disable 3001 StoreItemCollection storeItemCollection) #pragma warning restore 3001 { FbConnection.DropDatabase(connection.ConnectionString); } } ================================================ FILE: src/EntityFramework.Firebird/IFbMigrationSqlGeneratorBehavior.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Collections.Generic; namespace EntityFramework.Firebird; public interface IFbMigrationSqlGeneratorBehavior { IEnumerable CreateIdentityForColumn(string columnName, string tableName); IEnumerable DropIdentityForColumn(string columnName, string tableName); } ================================================ FILE: src/EntityFramework.Firebird/MetadataHelpers.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Data; using System.Data.Entity.Core.Metadata.Edm; using System.Diagnostics; namespace EntityFramework.Firebird; internal static class MetadataHelpers { #region Type Helpers /// /// Cast the EdmType of the given type usage to the given TEdmType /// /// /// /// internal static TEdmType GetEdmType(TypeUsage typeUsage) where TEdmType : EdmType { return (TEdmType)typeUsage.EdmType; } /// /// Gets the TypeUsage of the elment if the given type is a collection type /// /// /// internal static TypeUsage GetElementTypeUsage(TypeUsage type) { if (MetadataHelpers.IsCollectionType(type)) { return ((CollectionType)type.EdmType).TypeUsage; } return null; } /// /// Retrieves the properties of in the EdmType underlying the input type usage, /// if that EdmType is a structured type (EntityType, RowType). /// /// /// internal static IList GetProperties(TypeUsage typeUsage) { return MetadataHelpers.GetProperties(typeUsage.EdmType); } /// /// Retrieves the properties of the given EdmType, if it is /// a structured type (EntityType, RowType). /// /// /// internal static IList GetProperties(EdmType edmType) { switch (edmType.BuiltInTypeKind) { case BuiltInTypeKind.ComplexType: return ((ComplexType)edmType).Properties; case BuiltInTypeKind.EntityType: return ((EntityType)edmType).Properties; case BuiltInTypeKind.RowType: return ((RowType)edmType).Properties; default: return new List(); } } /// /// Is the given type usage over a collection type /// /// /// internal static bool IsCollectionType(TypeUsage typeUsage) { return MetadataHelpers.IsCollectionType(typeUsage.EdmType); } /// /// Is the given type a collection type /// /// /// internal static bool IsCollectionType(EdmType type) { return (BuiltInTypeKind.CollectionType == type.BuiltInTypeKind); } /// /// Is the given type usage over a primitive type /// /// /// internal static bool IsPrimitiveType(TypeUsage type) { return MetadataHelpers.IsPrimitiveType(type.EdmType); } /// /// Is the given type a primitive type /// /// /// internal static bool IsPrimitiveType(EdmType type) { return (BuiltInTypeKind.PrimitiveType == type.BuiltInTypeKind); } /// /// Is the given type usage over a row type /// /// /// internal static bool IsRowType(TypeUsage type) { return MetadataHelpers.IsRowType(type.EdmType); } /// /// Is the given type a row type /// /// /// internal static bool IsRowType(EdmType type) { return (BuiltInTypeKind.RowType == type.BuiltInTypeKind); } /// /// Gets the type of the given type usage if it is a primitive type /// /// /// /// internal static bool TryGetPrimitiveTypeKind(TypeUsage type, out PrimitiveTypeKind typeKind) { if (type != null && type.EdmType != null && type.EdmType.BuiltInTypeKind == BuiltInTypeKind.PrimitiveType) { typeKind = ((PrimitiveType)type.EdmType).PrimitiveTypeKind; return true; } typeKind = default; return false; } internal static PrimitiveTypeKind GetPrimitiveTypeKind(TypeUsage type) { if (!MetadataHelpers.TryGetPrimitiveTypeKind(type, out var returnValue)) { Debug.Assert(false, "Cannot create parameter of non-primitive type"); throw new NotSupportedException("Cannot create parameter of non-primitive type"); } return returnValue; } /// /// Gets the value for the metadata property with the given name /// /// /// /// /// internal static T TryGetValueForMetadataProperty(MetadataItem item, string propertyName) { if (!item.MetadataProperties.TryGetValue(propertyName, true, out var property)) { return default; } return (T)property.Value; } internal static bool IsPrimitiveType(TypeUsage type, PrimitiveTypeKind primitiveType) { if (TryGetPrimitiveTypeKind(type, out var typeKind)) { return (typeKind == primitiveType); } return false; } #endregion #region Facet Support #region Facet Names /// /// Name of the MaxLength Facet /// internal const string MaxLengthFacetName = "MaxLength"; /// /// Name of the Unicode Facet /// internal const string UnicodeFacetName = "Unicode"; /// /// Name of the FixedLength Facet /// internal const string FixedLengthFacetName = "FixedLength"; /// /// Name of the Precision Facet /// internal const string PrecisionFacetName = "Precision"; /// /// Name of the Scale Facet /// internal const string ScaleFacetName = "Scale"; /// /// Name of the DefaultValue Facet /// internal const string DefaultValueFacetName = "DefaultValue"; /// /// Name of the Nullable Facet /// internal const string NullableFacetName = "Nullable"; /// /// Name of StoreGeneratedPattern Facet /// internal const string StoreGeneratedPatternFacetName = "StoreGeneratedPattern"; #endregion #region Facet Retreival Helpers /// /// Get the value specified on the given type usage for the given facet name. /// If the faces does not have a value specifid or that value is null returns /// the default value for that facet. /// /// /// /// /// /// /// Get the value specified on the given type usage for the given facet name. /// If the faces does not have a value specifid or that value is null returns /// the default value for that facet. /// /// /// /// /// internal static T GetFacetValueOrDefault(TypeUsage type, string facetName, T defaultValue) { //Get the value for the facet, if any if (type.Facets.TryGetValue(facetName, false, out var facet) && facet.Value != null && !facet.IsUnbounded) { return (T)facet.Value; } else { return defaultValue; } } internal static bool IsFacetValueConstant(TypeUsage type, string facetName) { return MetadataHelpers.GetFacet(((PrimitiveType)type.EdmType).FacetDescriptions, facetName).IsConstant; } private static FacetDescription GetFacet(IEnumerable facetCollection, string facetName) { foreach (var facetDescription in facetCollection) { if (facetDescription.FacetName == facetName) { return facetDescription; } } return null; } /// /// Given a facet name and an EdmType, tries to get that facet's description. /// /// /// /// /// internal static bool TryGetTypeFacetDescriptionByName(EdmType edmType, string facetName, out FacetDescription facetDescription) { facetDescription = null; if (MetadataHelpers.IsPrimitiveType(edmType)) { var primitiveType = (PrimitiveType)edmType; foreach (var fd in primitiveType.FacetDescriptions) { if (facetName.Equals(fd.FacetName, StringComparison.OrdinalIgnoreCase)) { facetDescription = fd; return true; } } } return false; } internal static bool IsNullable(TypeUsage type) { if (type.Facets.TryGetValue(NullableFacetName, false, out var nullableFacet)) { return (bool)nullableFacet.Value; } return false; } internal static bool TryGetMaxLength(TypeUsage type, out int? maxLength) { if (!IsPrimitiveType(type, PrimitiveTypeKind.String) && !IsPrimitiveType(type, PrimitiveTypeKind.Binary)) { maxLength = 0; return false; } // Binary and String FixedLength facets share the same name return TryGetIntFacetValue(type, MaxLengthFacetName, out maxLength); } internal static bool TryGetIntFacetValue(TypeUsage type, string facetName, out int? intValue) { intValue = 0; if (type.Facets.TryGetValue(facetName, false, out var intFacet) && intFacet.Value != null) { if (!intFacet.IsUnbounded) intValue = (int)intFacet.Value; else intValue = default; return true; } return false; } internal static bool TryGetIsFixedLength(TypeUsage type, out bool isFixedLength) { if (!IsPrimitiveType(type, PrimitiveTypeKind.String) && !IsPrimitiveType(type, PrimitiveTypeKind.Binary)) { isFixedLength = false; return false; } // Binary and String MaxLength facets share the same name return TryGetBooleanFacetValue(type, FixedLengthFacetName, out isFixedLength); } internal static bool TryGetBooleanFacetValue(TypeUsage type, string facetName, out bool boolValue) { boolValue = false; if (type.Facets.TryGetValue(facetName, false, out var boolFacet) && boolFacet.Value != null) { boolValue = (bool)boolFacet.Value; return true; } return false; } internal static bool TryGetIsUnicode(TypeUsage type, out bool isUnicode) { if (!IsPrimitiveType(type, PrimitiveTypeKind.String)) { isUnicode = false; return false; } return TryGetBooleanFacetValue(type, UnicodeFacetName, out isUnicode); } #endregion #endregion internal static bool IsCanonicalFunction(EdmFunction function) { return (function.NamespaceName == "Edm"); } internal static bool IsStoreFunction(EdmFunction function) { return !IsCanonicalFunction(function); } // Returns ParameterDirection corresponding to given ParameterMode internal static ParameterDirection ParameterModeToParameterDirection(ParameterMode mode) { switch (mode) { case ParameterMode.In: return ParameterDirection.Input; case ParameterMode.InOut: return ParameterDirection.InputOutput; case ParameterMode.Out: return ParameterDirection.Output; case ParameterMode.ReturnValue: return ParameterDirection.ReturnValue; default: Debug.Fail("unrecognized mode " + mode.ToString()); return default; } } internal static string GetTableName(EntitySetBase entitySetBase) { var tableName = MetadataHelpers.TryGetValueForMetadataProperty(entitySetBase, "Table"); return !string.IsNullOrEmpty(tableName) ? tableName : entitySetBase.Name; } private static bool IsStoreGeneratedPattern(EdmMember member, StoreGeneratedPattern pattern) { return (member.TypeUsage.Facets.TryGetValue(StoreGeneratedPatternFacetName, false, out var item) && ((StoreGeneratedPattern)item.Value) == pattern); } internal static bool IsStoreGeneratedComputed(EdmMember member) { return IsStoreGeneratedPattern(member, StoreGeneratedPattern.Computed); } internal static bool IsStoreGeneratedIdentity(EdmMember member) { return IsStoreGeneratedPattern(member, StoreGeneratedPattern.Identity); } internal static bool IsStoreGenerated(EdmMember member) { return IsStoreGeneratedComputed(member) || IsStoreGeneratedIdentity(member); } } ================================================ FILE: src/EntityFramework.Firebird/Resources/ProviderManifest.xml ================================================ ================================================ FILE: src/EntityFramework.Firebird/Resources/StoreSchemaDefinition.ssdl ================================================ -- STables SELECT TRIM(rdb$relation_name) as "Id" , 'Firebird' as "CatalogName" , 'Firebird' as "SchemaName" , TRIM(rdb$relation_name) as "Name" FROM rdb$relations WHERE rdb$view_source IS NULL AND rdb$system_flag = 0 -- STableColumns SELECT TRIM(rf.rdb$relation_name) || 'x' || TRIM(rf.rdb$field_name) as "Id" , TRIM(rf.rdb$relation_name) as "ParentId" , TRIM(rf.rdb$field_name) as "Name" , rf.rdb$field_position+1 as "Ordinal" , IIF(COALESCE(rf.rdb$null_flag, f.rdb$null_flag) IS NULL, 1, 0) as "IsNullable" , TRIM(CASE WHEN POSITION('#BOOL#', UPPER(rf.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(rf.rdb$description)) > 0 THEN 'guid' ELSE CASE f.rdb$field_type WHEN 7 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE f.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' END END) as "TypeName" , IIF(f.rdb$character_length = 0, 32765, f.rdb$character_length) as "MaxLength" --hot fix for CORE-2228 , IIF(f.rdb$field_precision = 0 AND f.rdb$field_scale < 0, 18, f.rdb$field_precision) as "Precision" , 4 as "DateTimePrecision" , f.rdb$field_scale * (-1) as "Scale" , NULL as "CollationCatalog" , NULL as "CollationSchema" , NULL as "CollationName" , NULL as "CharacterSetCatalog" , NULL as "CharacterSetSchema" , NULL as "CharacterSetName" , 0 as "IsMultiSet" , IIF(POSITION('#PK_GEN#', UPPER(rf.rdb$description)) > 0, 1, 0) as "IsIdentity" , IIF(f.rdb$computed_blr IS NULL, 0, 1) as "IsStoreGenerated" , COALESCE(rf.rdb$default_source, f.rdb$default_source) as "Default" FROM rdb$relation_fields rf INNER JOIN rdb$fields f ON (rf.rdb$field_source = f.rdb$field_name) INNER JOIN rdb$relations r ON (rf.rdb$relation_name = r.rdb$relation_name) WHERE rf.rdb$system_flag = 0 AND r.rdb$view_blr IS NULL -- SViews SELECT TRIM(rdb$relation_name) as "Id" , 'Firebird' as "CatalogName" , 'Firebird' as "SchemaName" , TRIM(rdb$relation_name) as "Name" , rdb$view_source as "ViewDefinition" , 1 as "IsUpdatable" FROM rdb$relations WHERE rdb$view_blr IS NOT NULL AND rdb$system_flag = 0 -- SViewColumns SELECT TRIM(rf.rdb$relation_name) || 'x' || TRIM(rf.rdb$field_name) as "Id" , TRIM(rf.rdb$relation_name) as "ParentId" , TRIM(rf.rdb$field_name) as "Name" , rf.rdb$field_position+1 as "Ordinal" , IIF(COALESCE(rf.rdb$null_flag, f.rdb$null_flag) IS NULL, 1, 0) as "IsNullable" , TRIM(CASE WHEN POSITION('#BOOL#', UPPER(rf.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(rf.rdb$description)) > 0 THEN 'guid' ELSE CASE f.rdb$field_type WHEN 7 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE f.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' END END) as "TypeName" , IIF(f.rdb$character_length = 0, 32765, f.rdb$character_length) as "MaxLength" --hot fix for CORE-2228 , IIF(f.rdb$field_precision = 0 AND f.rdb$field_scale < 0, 18, f.rdb$field_precision) as "Precision" , 4 as "DateTimePrecision" , f.rdb$field_scale * (-1) as "Scale" , NULL as "CollationCatalog" , NULL as "CollationSchema" , NULL as "CollationName" , NULL as "CharacterSetCatalog" , NULL as "CharacterSetSchema" , NULL as "CharacterSetName" , 0 as "IsMultiSet" , IIF(POSITION('#PK_GEN#', UPPER(rf.rdb$description)) > 0, 1, 0) as "IsIdentity" , IIF(f.rdb$computed_blr IS NULL, 0, 1) as "IsStoreGenerated" , COALESCE(rf.rdb$default_source, f.rdb$default_source) as "Default" FROM rdb$relation_fields rf INNER JOIN rdb$fields f ON (rf.rdb$field_source = f.rdb$field_name) INNER JOIN rdb$relations r ON (rf.rdb$relation_name = r.rdb$relation_name) WHERE rf.rdb$system_flag = 0 AND r.rdb$view_blr IS NOT NULL -- SFunctions SELECT TRIM(f.rdb$function_name) as "Id" , 'Firebird' as "CatalogName" , 'Firebird' as "SchemaName" , TRIM(f.rdb$function_name) as "Name" , TRIM(/*CASE WHEN POSITION('#BOOL#', UPPER(fa.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(fa.rdb$description)) > 0 THEN 'guid' ELSE*/ CASE fa.rdb$field_type WHEN 7 THEN CASE WHEN ((fa.rdb$field_sub_type = 2) OR (fa.rdb$field_sub_type = 0 AND fa.rdb$field_scale < 0)) THEN 'decimal' WHEN fa.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((fa.rdb$field_sub_type = 2) OR (fa.rdb$field_sub_type = 0 AND fa.rdb$field_scale < 0)) THEN 'decimal' WHEN fa.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((fa.rdb$field_sub_type = 2) OR (fa.rdb$field_sub_type = 0 AND fa.rdb$field_scale < 0)) THEN 'decimal' WHEN fa.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE fa.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' /*END*/ END) as "ReturnTypeName" , fa.rdb$character_length as "ReturnMaxLength" , IIF(fa.rdb$field_precision = 0 AND fa.rdb$field_scale < 0, 18, fa.rdb$field_precision) as "ReturnPrecision" , 4 as "ReturnDateTimePrecision" , fa.rdb$field_scale * (-1) as "ReturnScale" , NULL as "ReturnCollationCatalog" , NULL as "ReturnCollationSchema" , NULL as "ReturnCollationName" , NULL as "ReturnCharacterSetCatalog" , NULL as "ReturnCharacterSetSchema" , NULL as "ReturnCharacterSetName" , 0 as "ReturnIsMultiSet" , 0 as "IsAggregate" , 0 as "IsBuiltIn" , 0 as "IsNiladic" --CAST((select CASE COUNT(*) WHEN 1 THEN 1 ELSE 0 END FROM rdb$function_arguments fa WHERE fa.rdb$function_name = f.rdb$function_name) as smallint) as "IsNiladic" FROM rdb$functions f INNER JOIN rdb$function_arguments fa ON (f.rdb$function_name = fa.rdb$function_name AND f.rdb$return_argument = fa.rdb$argument_position) WHERE f.rdb$system_flag = 0 -- SFunctionParameters SELECT TRIM(fa.rdb$function_name) || 'x' || TRIM(fa.rdb$argument_position) as "Id" , TRIM(fa.rdb$function_name) as "ParentId" , 'param' || TRIM(fa.rdb$argument_position) as "Name" , fa.rdb$argument_position as "Ordinal" , TRIM(/*CASE WHEN POSITION('#BOOL#', UPPER(fa.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(fa.rdb$description)) > 0 THEN 'guid' ELSE*/ CASE fa.rdb$field_type WHEN 7 THEN CASE WHEN ((fa.rdb$field_sub_type = 2) OR (fa.rdb$field_sub_type = 0 AND fa.rdb$field_scale < 0)) THEN 'decimal' WHEN fa.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((fa.rdb$field_sub_type = 2) OR (fa.rdb$field_sub_type = 0 AND fa.rdb$field_scale < 0)) THEN 'decimal' WHEN fa.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((fa.rdb$field_sub_type = 2) OR (fa.rdb$field_sub_type = 0 AND fa.rdb$field_scale < 0)) THEN 'decimal' WHEN fa.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE fa.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' /*END*/ END) as "TypeName" , fa.rdb$character_length as "MaxLength" , IIF(fa.rdb$field_precision = 0 AND fa.rdb$field_scale < 0, 18, fa.rdb$field_precision) as "Precision" , 4 as "DateTimePrecision" , fa.rdb$field_scale * (-1) as "Scale" , NULL as "CollationCatalog" , NULL as "CollationSchema" , NULL as "CollationName" , NULL as "CharacterSetCatalog" , NULL as "CharacterSetSchema" , NULL as "CharacterSetName" , 0 as "IsMultiSet" , 'IN' as "Mode" , NULL as "Default" FROM rdb$functions f INNER JOIN rdb$function_arguments fa ON (f.rdb$function_name = fa.rdb$function_name AND f.rdb$return_argument <> fa.rdb$argument_position) WHERE f.rdb$system_flag = 0 -- SProcedures SELECT TRIM(rdb$procedure_name) as "Id" , 'Firebird' as "CatalogName" , 'Firebird' as "SchemaName" , TRIM(rdb$procedure_name) as "Name" FROM rdb$procedures -- SProcedureParameters SELECT TRIM(pp.rdb$procedure_name) || 'x' || TRIM(pp.rdb$parameter_name) as "Id" , TRIM(pp.rdb$procedure_name) as "ParentId" , TRIM(pp.rdb$parameter_name) as "Name" , pp.rdb$parameter_number+1 as "Ordinal" , TRIM(CASE WHEN POSITION('#BOOL#', UPPER(pp.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(pp.rdb$description)) > 0 THEN 'guid' ELSE CASE f.rdb$field_type WHEN 7 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE f.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' END END) as "TypeName" , IIF(f.rdb$character_length = 0, 32765, f.rdb$character_length) as "MaxLength" --hot fix for CORE-2228 , IIF(f.rdb$field_precision = 0 AND f.rdb$field_scale < 0, 18, f.rdb$field_precision) as "Precision" , 4 as "DateTimePrecision" , f.rdb$field_scale * (-1) as "Scale" , NULL as "CollationCatalog" , NULL as "CollationSchema" , NULL "CollationName" , NULL as "CharacterSetCatalog" , NULL as "CharacterSetSchema" , NULL as "CharacterSetName" , 0 as "IsMultiSet" , TRIM(IIF(pp.rdb$parameter_type = 1, 'OUT', 'IN')) as "Mode" , NULL as "Default" FROM rdb$procedure_parameters pp INNER JOIN rdb$fields f ON (pp.rdb$field_source = f.rdb$field_name) WHERE pp.rdb$parameter_type = 0 --Out params in EF are not results -- SConstraints SELECT TRIM(rc.rdb$constraint_name) as "Id" , TRIM(rc.rdb$relation_name) as "ParentId" , TRIM(rc.rdb$constraint_name) as "Name" , TRIM(rc.rdb$constraint_type) as "ConstraintType" , 0 as "IsDeferrable" , 0 "IsInitiallyDeferred" FROM rdb$relation_constraints rc WHERE rc.rdb$constraint_type IN ('PRIMARY KEY', 'FOREIGN KEY', 'UNIQUE') -- SCheckConstraints SELECT TRIM(rc.rdb$constraint_name) as "Id" , TRIM(SUBSTRING(trg.rdb$trigger_source from 6)) as "Expression" FROM rdb$relation_constraints rc INNER JOIN rdb$check_constraints cc on (rc.rdb$constraint_name = cc.rdb$constraint_name) LEFT JOIN rdb$triggers trg on (cc.rdb$trigger_name = trg.rdb$trigger_name) WHERE rc.rdb$constraint_type IN ('CHECK') -- SConstraintColumns SELECT TRIM(rc.rdb$constraint_name) as "ConstraintId" , TRIM(rc.rdb$relation_name) || 'x' || TRIM(ise.rdb$field_name) as "ColumnId" FROM rdb$relation_constraints rc INNER JOIN rdb$index_segments ise ON (rc.rdb$index_name = ise.rdb$index_name) WHERE rc.rdb$constraint_type IN ('PRIMARY KEY', 'FOREIGN KEY', 'UNIQUE') -- SForeignKeyConstraints SELECT TRIM(rc.rdb$constraint_name) as "Id" , rc.rdb$update_rule "UpdateRule" , rc.rdb$delete_rule "DeleteRule" FROM rdb$ref_constraints rc -- SForeignKeys SELECT TRIM(refc.rdb$constraint_name) || 'x' || TRIM(is2.rdb$field_position+1) as "Id" , TRIM(relc1.rdb$relation_name) || 'x' || TRIM(is1.rdb$field_name) as "FromColumnId" , TRIM(relc2.rdb$relation_name) || 'x' || TRIM(is2.rdb$field_name) as "ToColumnId" , TRIM(refc.rdb$constraint_name) as "ConstraintId" , is2.rdb$field_position+1 as "Ordinal" FROM rdb$ref_constraints refc LEFT JOIN rdb$relation_constraints relc1 ON (refc.rdb$constraint_name = relc1.rdb$constraint_name) INNER JOIN rdb$index_segments is1 ON (relc1.rdb$index_name = is1.rdb$index_name) LEFT JOIN rdb$relation_constraints relc2 ON (refc.rdb$const_name_uq = relc2.rdb$constraint_name) INNER JOIN rdb$index_segments is2 ON (relc2.rdb$index_name = is2.rdb$index_name) WHERE is1.rdb$field_position = is2.rdb$field_position -- SViewConstraints SELECT CAST(NULL as varchar(1)) as "Id" , CAST(NULL as varchar(1)) as "ParentId" , CAST(NULL as varchar(1)) as "Name" , CAST(NULL as varchar(1)) as "ConstraintType" , CAST(0 as smallint) as "IsDeferrable" , CAST(0 as smallint) as "IsInitiallyDeferred" , CAST(NULL as varchar(1)) as "Expression" , CAST(NULL as varchar(1)) as "UpdateRule" , CAST(NULL as varchar(1)) as "DeleteRule" FROM rdb$database WHERE 0=1 -- SViewConstraintColumns SELECT CAST(NULL as varchar(1)) as "ConstraintId" , CAST(NULL as varchar(1)) as "ColumnId" FROM rdb$database WHERE 0=1 -- SViewForeignKeys SELECT CAST(NULL as varchar(1)) as "Id" , CAST(NULL as varchar(1)) as "ToColumnId" , CAST(NULL as varchar(1)) as "FromColumnId" , CAST(NULL as varchar(1)) as "ConstraintId" , 0 as "Ordinal" FROM rdb$database WHERE 0=1 ================================================ FILE: src/EntityFramework.Firebird/Resources/StoreSchemaDefinitionVersion3.ssdl ================================================ -- STables3 SELECT TRIM(rdb$relation_name) as "Id" , 'Firebird' as "CatalogName" , 'Firebird' as "SchemaName" , TRIM(rdb$relation_name) as "Name" FROM rdb$relations WHERE rdb$view_source IS NULL AND rdb$system_flag = 0 -- STableColumns3 SELECT TRIM(rf.rdb$relation_name) || 'x' || TRIM(rf.rdb$field_name) as "Id" , TRIM(rf.rdb$relation_name) as "ParentId" , TRIM(rf.rdb$field_name) as "Name" , rf.rdb$field_position+1 as "Ordinal" , IIF(COALESCE(rf.rdb$null_flag, f.rdb$null_flag) IS NULL, 1, 0) as "IsNullable" , TRIM(CASE WHEN POSITION('#BOOL#', UPPER(rf.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(rf.rdb$description)) > 0 THEN 'guid' ELSE CASE f.rdb$field_type WHEN 7 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE f.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' END END) as "TypeName" , IIF(f.rdb$character_length = 0, 32765, f.rdb$character_length) as "MaxLength" --hot fix for CORE-2228 , IIF(f.rdb$field_precision = 0 AND f.rdb$field_scale < 0, 18, f.rdb$field_precision) as "Precision" , 4 as "DateTimePrecision" , f.rdb$field_scale * (-1) as "Scale" , NULL as "CollationCatalog" , NULL as "CollationSchema" , NULL as "CollationName" , NULL as "CharacterSetCatalog" , NULL as "CharacterSetSchema" , NULL as "CharacterSetName" , 0 as "IsMultiSet" , IIF(POSITION('#PK_GEN#', UPPER(rf.rdb$description)) > 0, 1, 0) as "IsIdentity" , IIF(f.rdb$computed_blr IS NULL, 0, 1) as "IsStoreGenerated" , COALESCE(rf.rdb$default_source, f.rdb$default_source) as "Default" FROM rdb$relation_fields rf INNER JOIN rdb$fields f ON (rf.rdb$field_source = f.rdb$field_name) INNER JOIN rdb$relations r ON (rf.rdb$relation_name = r.rdb$relation_name) WHERE rf.rdb$system_flag = 0 AND r.rdb$view_blr IS NULL -- SViews3 SELECT TRIM(rdb$relation_name) as "Id" , 'Firebird' as "CatalogName" , 'Firebird' as "SchemaName" , TRIM(rdb$relation_name) as "Name" , rdb$view_source as "ViewDefinition" , 1 as "IsUpdatable" FROM rdb$relations WHERE rdb$view_blr IS NOT NULL AND rdb$system_flag = 0 -- SViewColumns3 SELECT TRIM(rf.rdb$relation_name) || 'x' || TRIM(rf.rdb$field_name) as "Id" , TRIM(rf.rdb$relation_name) as "ParentId" , TRIM(rf.rdb$field_name) as "Name" , rf.rdb$field_position+1 as "Ordinal" , IIF(COALESCE(rf.rdb$null_flag, f.rdb$null_flag) IS NULL, 1, 0) as "IsNullable" , TRIM(CASE WHEN POSITION('#BOOL#', UPPER(rf.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(rf.rdb$description)) > 0 THEN 'guid' ELSE CASE f.rdb$field_type WHEN 7 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE f.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' END END) as "TypeName" , IIF(f.rdb$character_length = 0, 32765, f.rdb$character_length) as "MaxLength" --hot fix for CORE-2228 , IIF(f.rdb$field_precision = 0 AND f.rdb$field_scale < 0, 18, f.rdb$field_precision) as "Precision" , 4 as "DateTimePrecision" , f.rdb$field_scale * (-1) as "Scale" , NULL as "CollationCatalog" , NULL as "CollationSchema" , NULL as "CollationName" , NULL as "CharacterSetCatalog" , NULL as "CharacterSetSchema" , NULL as "CharacterSetName" , 0 as "IsMultiSet" , IIF(POSITION('#PK_GEN#', UPPER(rf.rdb$description)) > 0, 1, 0) as "IsIdentity" , IIF(f.rdb$computed_blr IS NULL, 0, 1) as "IsStoreGenerated" , COALESCE(rf.rdb$default_source, f.rdb$default_source) as "Default" FROM rdb$relation_fields rf INNER JOIN rdb$fields f ON (rf.rdb$field_source = f.rdb$field_name) INNER JOIN rdb$relations r ON (rf.rdb$relation_name = r.rdb$relation_name) WHERE rf.rdb$system_flag = 0 AND r.rdb$view_blr IS NOT NULL -- SFunctions3 SELECT TRIM(rdb$procedure_name) as "Id" , 'Firebird' as "CatalogName" , 'Firebird' as "SchemaName" , TRIM(rdb$procedure_name) as "Name" , NULL as "ReturnTypeName" , NULL as "ReturnMaxLength" , NULL as "ReturnPrecision" , NULL as "ReturnDateTimePrecision" , NULL as "ReturnScale" , NULL as "ReturnCollationCatalog" , NULL as "ReturnCollationSchema" , NULL as "ReturnCollationName" , NULL as "ReturnCharacterSetCatalog" , NULL as "ReturnCharacterSetSchema" , NULL as "ReturnCharacterSetName" , 0 as "ReturnIsMultiSet" , 0 as "IsAggregate" , 0 as "IsBuiltIn" , IIF(COALESCE(rdb$procedure_inputs, 0) = 0, 1, 0) as "IsNiladic" , 1 as "IsTvf" FROM rdb$procedures WHERE rdb$procedure_type = 1 -- SFunctionParameters3 SELECT TRIM(pp.rdb$procedure_name) || 'x' || TRIM(pp.rdb$parameter_name) as "Id" , TRIM(pp.rdb$procedure_name) as "ParentId" , TRIM(pp.rdb$parameter_name) as "Name" , pp.rdb$parameter_number+1 as "Ordinal" , TRIM(CASE WHEN POSITION('#BOOL#', UPPER(pp.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(pp.rdb$description)) > 0 THEN 'guid' ELSE CASE f.rdb$field_type WHEN 7 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE f.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' END END) as "TypeName" , IIF(f.rdb$character_length = 0, 32765, f.rdb$character_length) as "MaxLength" --hot fix for CORE-2228 , IIF(f.rdb$field_precision = 0 AND f.rdb$field_scale < 0, 18, f.rdb$field_precision) as "Precision" , 4 as "DateTimePrecision" , f.rdb$field_scale * (-1) as "Scale" , NULL as "CollationCatalog" , NULL as "CollationSchema" , NULL "CollationName" , NULL as "CharacterSetCatalog" , NULL as "CharacterSetSchema" , NULL as "CharacterSetName" , 0 as "IsMultiSet" , TRIM(IIF(pp.rdb$parameter_type = 1, 'OUT', 'IN')) as "Mode" , NULL as "Default" FROM rdb$procedure_parameters pp INNER JOIN rdb$fields f ON (pp.rdb$field_source = f.rdb$field_name) WHERE pp.rdb$parameter_type = 0 -- SFunctionReturnTableColumns3 SELECT TRIM(pp.rdb$procedure_name) || 'x' || TRIM(pp.rdb$parameter_name) as "Id" , TRIM(pp.rdb$procedure_name) as "ParentId" , TRIM(pp.rdb$parameter_name) as "Name" , pp.rdb$parameter_number+1 as "Ordinal" , IIF(f.rdb$null_flag IS NULL, 1, 0) as "IsNullable" , TRIM(CASE WHEN POSITION('#BOOL#', UPPER(pp.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(pp.rdb$description)) > 0 THEN 'guid' ELSE CASE f.rdb$field_type WHEN 7 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE f.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' END END) as "TypeName" , IIF(f.rdb$character_length = 0, 32765, f.rdb$character_length) as "MaxLength" --hot fix for CORE-2228 , IIF(f.rdb$field_precision = 0 AND f.rdb$field_scale < 0, 18, f.rdb$field_precision) as "Precision" , 4 as "DateTimePrecision" , f.rdb$field_scale * (-1) as "Scale" , NULL as "CollationCatalog" , NULL as "CollationSchema" , NULL "CollationName" , NULL as "CharacterSetCatalog" , NULL as "CharacterSetSchema" , NULL as "CharacterSetName" , 0 as "IsMultiSet" , 0 as "IsIdentity" , 0 as "IsStoreGenerated" , NULL as "Default" FROM rdb$procedure_parameters pp INNER JOIN rdb$fields f ON (pp.rdb$field_source = f.rdb$field_name) WHERE pp.rdb$parameter_type = 1 -- SProcedures3 SELECT TRIM(rdb$procedure_name) as "Id" , 'Firebird' as "CatalogName" , 'Firebird' as "SchemaName" , TRIM(rdb$procedure_name) as "Name" FROM rdb$procedures WHERE rdb$procedure_type = 2 -- SProcedureParameters3 SELECT TRIM(pp.rdb$procedure_name) || 'x' || TRIM(pp.rdb$parameter_name) as "Id" , TRIM(pp.rdb$procedure_name) as "ParentId" , TRIM(pp.rdb$parameter_name) as "Name" , pp.rdb$parameter_number+1 as "Ordinal" , TRIM(CASE WHEN POSITION('#BOOL#', UPPER(pp.rdb$description)) > 0 THEN 'smallint_bool' WHEN POSITION('#GUID#', UPPER(pp.rdb$description)) > 0 THEN 'guid' ELSE CASE f.rdb$field_type WHEN 7 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'smallint' END WHEN 8 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'int' END WHEN 16 THEN CASE WHEN ((f.rdb$field_sub_type = 2) OR (f.rdb$field_sub_type = 0 AND f.rdb$field_scale < 0)) THEN 'decimal' WHEN f.rdb$field_sub_type = 1 THEN 'numeric' ELSE 'bigint' END WHEN 10 THEN 'float' WHEN 27 THEN 'double' WHEN 12 THEN 'date' WHEN 13 THEN 'time' WHEN 35 THEN 'timestamp' WHEN 261 THEN CASE f.rdb$field_sub_type WHEN 1 THEN 'clob' ELSE 'blob' END WHEN 37 THEN 'varchar' WHEN 14 THEN 'char' WHEN 40 THEN 'cstring' END END) as "TypeName" , IIF(f.rdb$character_length = 0, 32765, f.rdb$character_length) as "MaxLength" --hot fix for CORE-2228 , IIF(f.rdb$field_precision = 0 AND f.rdb$field_scale < 0, 18, f.rdb$field_precision) as "Precision" , 4 as "DateTimePrecision" , f.rdb$field_scale * (-1) as "Scale" , NULL as "CollationCatalog" , NULL as "CollationSchema" , NULL "CollationName" , NULL as "CharacterSetCatalog" , NULL as "CharacterSetSchema" , NULL as "CharacterSetName" , 0 as "IsMultiSet" , TRIM(IIF(pp.rdb$parameter_type = 1, 'OUT', 'IN')) as "Mode" , NULL as "Default" FROM rdb$procedure_parameters pp INNER JOIN rdb$fields f ON (pp.rdb$field_source = f.rdb$field_name) WHERE pp.rdb$parameter_type = 0 -- SConstraints3 SELECT TRIM(rc.rdb$constraint_name) as "Id" , TRIM(rc.rdb$relation_name) as "ParentId" , TRIM(rc.rdb$constraint_name) as "Name" , TRIM(rc.rdb$constraint_type) as "ConstraintType" , 0 as "IsDeferrable" , 0 "IsInitiallyDeferred" FROM rdb$relation_constraints rc WHERE rc.rdb$constraint_type IN ('PRIMARY KEY', 'FOREIGN KEY', 'UNIQUE') -- SCheckConstraints3 SELECT TRIM(rc.rdb$constraint_name) as "Id" , TRIM(SUBSTRING(trg.rdb$trigger_source from 6)) as "Expression" FROM rdb$relation_constraints rc INNER JOIN rdb$check_constraints cc on (rc.rdb$constraint_name = cc.rdb$constraint_name) LEFT JOIN rdb$triggers trg on (cc.rdb$trigger_name = trg.rdb$trigger_name) WHERE rc.rdb$constraint_type IN ('CHECK') -- SConstraintColumns3 SELECT TRIM(rc.rdb$constraint_name) as "ConstraintId" , TRIM(rc.rdb$relation_name) || 'x' || TRIM(ise.rdb$field_name) as "ColumnId" FROM rdb$relation_constraints rc INNER JOIN rdb$index_segments ise ON (rc.rdb$index_name = ise.rdb$index_name) WHERE rc.rdb$constraint_type IN ('PRIMARY KEY', 'FOREIGN KEY', 'UNIQUE') -- SForeignKeyConstraints3 SELECT TRIM(rc.rdb$constraint_name) as "Id" , rc.rdb$update_rule "UpdateRule" , rc.rdb$delete_rule "DeleteRule" FROM rdb$ref_constraints rc -- SForeignKeys3 SELECT TRIM(refc.rdb$constraint_name) || 'x' || TRIM(is2.rdb$field_position+1) as "Id" , TRIM(relc1.rdb$relation_name) || 'x' || TRIM(is1.rdb$field_name) as "FromColumnId" , TRIM(relc2.rdb$relation_name) || 'x' || TRIM(is2.rdb$field_name) as "ToColumnId" , TRIM(refc.rdb$constraint_name) as "ConstraintId" , is2.rdb$field_position+1 as "Ordinal" FROM rdb$ref_constraints refc LEFT JOIN rdb$relation_constraints relc1 ON (refc.rdb$constraint_name = relc1.rdb$constraint_name) INNER JOIN rdb$index_segments is1 ON (relc1.rdb$index_name = is1.rdb$index_name) LEFT JOIN rdb$relation_constraints relc2 ON (refc.rdb$const_name_uq = relc2.rdb$constraint_name) INNER JOIN rdb$index_segments is2 ON (relc2.rdb$index_name = is2.rdb$index_name) WHERE is1.rdb$field_position = is2.rdb$field_position -- SViewConstraints3 SELECT CAST(NULL as varchar(1)) as "Id" , CAST(NULL as varchar(1)) as "ParentId" , CAST(NULL as varchar(1)) as "Name" , CAST(NULL as varchar(1)) as "ConstraintType" , CAST(0 as smallint) as "IsDeferrable" , CAST(0 as smallint) as "IsInitiallyDeferred" , CAST(NULL as varchar(1)) as "Expression" , CAST(NULL as varchar(1)) as "UpdateRule" , CAST(NULL as varchar(1)) as "DeleteRule" FROM rdb$database WHERE 0=1 -- SViewConstraintColumns3 SELECT CAST(NULL as varchar(1)) as "ConstraintId" , CAST(NULL as varchar(1)) as "ColumnId" FROM rdb$database WHERE 0=1 -- SViewForeignKeys3 SELECT CAST(NULL as varchar(1)) as "Id" , CAST(NULL as varchar(1)) as "ToColumnId" , CAST(NULL as varchar(1)) as "FromColumnId" , CAST(NULL as varchar(1)) as "ConstraintId" , 0 as "Ordinal" FROM rdb$database WHERE 0=1 ================================================ FILE: src/EntityFramework.Firebird/Resources/StoreSchemaMapping.msl ================================================  ================================================ FILE: src/EntityFramework.Firebird/Resources/StoreSchemaMappingVersion3.msl ================================================  ================================================ FILE: src/EntityFramework.Firebird/SqlGen/DmlSqlGenerator.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Collections.Generic; using System.Data.Common; using System.Data.Entity.Core.Common.CommandTrees; using System.Data.Entity.Core.Metadata.Edm; using System.Linq; using System.Text; using FirebirdSql.Data.FirebirdClient; namespace EntityFramework.Firebird.SqlGen; internal static class DmlSqlGenerator { #region Static Fields private const int CommandTextBuilderInitialCapacity = 256; #endregion #region Static Methods internal static string GenerateUpdateSql(DbUpdateCommandTree tree, out List parameters, bool generateParameters = true) { var commandText = new StringBuilder(CommandTextBuilderInitialCapacity); var translator = new ExpressionTranslator(commandText, tree, null != tree.Returning, generateParameters); var first = true; commandText.Append("UPDATE "); tree.Target.Expression.Accept(translator); commandText.AppendLine(); // set c1 = ..., c2 = ..., ... commandText.Append("SET "); foreach (DbSetClause setClause in tree.SetClauses) { if (first) { first = false; } else { commandText.Append(", "); } setClause.Property.Accept(translator); commandText.Append(" = "); setClause.Value.Accept(translator); translator.RegisterMemberValue(setClause.Property, setClause.Value); } if (first) { // If first is still true, it indicates there were no set // clauses. Introduce a fake set clause so that: // - we acquire the appropriate locks // - server-gen columns (e.g. timestamp) get recomputed var table = ((DbScanExpression)tree.Target.Expression).Target; // hope this column isn't indexed to not waste power var someColumn = table.ElementType.Members.Last(x => !MetadataHelpers.IsStoreGenerated(x)); commandText.AppendFormat("{0} = {0}", GenerateMemberSql(someColumn)); } commandText.AppendLine(); // where c1 = ..., c2 = ... commandText.Append("WHERE "); tree.Predicate.Accept(translator); commandText.AppendLine(); // generate returning sql GenerateReturningSql(commandText, tree, translator, tree.Returning); parameters = translator.Parameters; return commandText.ToString(); } internal static string GenerateDeleteSql(DbDeleteCommandTree tree, out List parameters, bool generateParameters = true) { var commandText = new StringBuilder(CommandTextBuilderInitialCapacity); var translator = new ExpressionTranslator(commandText, tree, false, generateParameters); commandText.Append("DELETE FROM "); tree.Target.Expression.Accept(translator); commandText.AppendLine(); // where c1 = ... AND c2 = ... commandText.Append("WHERE "); tree.Predicate.Accept(translator); parameters = translator.Parameters; return commandText.ToString(); } internal static string GenerateInsertSql(DbInsertCommandTree tree, out List parameters, bool generateParameters = true) { var commandText = new StringBuilder(CommandTextBuilderInitialCapacity); var translator = new ExpressionTranslator(commandText, tree, null != tree.Returning, generateParameters); var first = true; commandText.Append("INSERT INTO "); tree.Target.Expression.Accept(translator); if (tree.SetClauses.Any()) { // (c1, c2, c3, ...) commandText.Append("("); foreach (DbSetClause setClause in tree.SetClauses) { if (first) { first = false; } else { commandText.Append(", "); } setClause.Property.Accept(translator); } commandText.AppendLine(")"); // values c1, c2, ... first = true; commandText.Append("VALUES ("); foreach (DbSetClause setClause in tree.SetClauses) { if (first) { first = false; } else { commandText.Append(", "); } setClause.Value.Accept(translator); translator.RegisterMemberValue(setClause.Property, setClause.Value); } commandText.AppendLine(")"); } else { commandText.AppendLine("DEFAULT VALUES"); } // generate returning sql GenerateReturningSql(commandText, tree, translator, tree.Returning); parameters = translator.Parameters; return commandText.ToString(); } // Generates SQL describing a member // Requires: member must belong to an entity type (a safe requirement for DML // SQL gen, where we only access table columns) internal static string GenerateMemberSql(EdmMember member) { return SqlGenerator.QuoteIdentifier(member.Name); } private static void GenerateReturningSql( StringBuilder commandText, DbModificationCommandTree tree, ExpressionTranslator translator, DbExpression returning) { // Nothing to do if there is no Returning expression if (returning == null) { return; } var table = ((DbScanExpression)tree.Target.Expression).Target; var columnsToFetch = table.ElementType.Members .Where(m => MetadataHelpers.IsStoreGenerated(m)) .Except((!(tree is DbInsertCommandTree) ? table.ElementType.KeyMembers : Enumerable.Empty())); var startBlock = new StringBuilder(); var separator = string.Empty; startBlock.Append("EXECUTE BLOCK "); if (translator.Parameters.Any()) { startBlock.AppendLine("("); separator = string.Empty; foreach (FbParameter param in translator.Parameters) { startBlock.Append(separator); startBlock.Append(param.ParameterName.Replace("@", string.Empty)); startBlock.Append(" "); var member = translator.MemberValues.First(m => m.Value.Contains(param)).Key; startBlock.Append(SqlGenerator.GetSqlPrimitiveType(member.TypeUsage)); if (param.FbDbType == FbDbType.VarChar || param.FbDbType == FbDbType.Char) startBlock.Append(" CHARACTER SET UTF8"); startBlock.Append(" = "); startBlock.Append(param.ParameterName); separator = ", "; } startBlock.AppendLine(); startBlock.Append(") "); } startBlock.AppendLine("RETURNS ("); separator = string.Empty; foreach (var m in columnsToFetch) { startBlock.Append(separator); startBlock.Append(GenerateMemberSql(m)); startBlock.Append(" "); startBlock.Append(SqlGenerator.GetSqlPrimitiveType(m.TypeUsage)); separator = ", "; } startBlock.AppendLine(")"); startBlock.AppendLine("AS BEGIN"); var newCommand = ChangeParamsToPSQLParams(commandText.ToString(), translator.Parameters.Select(p => p.ParameterName).ToArray()); commandText.Remove(0, commandText.Length); commandText.Insert(0, newCommand); commandText.Insert(0, startBlock.ToString()); commandText.Append("RETURNING "); separator = string.Empty; foreach (var m in columnsToFetch) { commandText.Append(separator); commandText.Append(GenerateMemberSql(m)); separator = ", "; } commandText.Append(" INTO "); separator = string.Empty; foreach (var m in columnsToFetch) { commandText.Append(separator); commandText.Append(":" + GenerateMemberSql(m)); separator = ", "; } commandText.AppendLine(";"); commandText.AppendLine("IF (ROW_COUNT > 0) THEN"); commandText.AppendLine(" SUSPEND;"); commandText.AppendLine("END"); } private static string ChangeParamsToPSQLParams(string commandText, string[] parametersUsed) { var command = new StringBuilder(commandText); foreach (var param in parametersUsed) { command.Replace(param, ":" + param.Remove(0, 1)); } return command.ToString(); } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/ExpressionTranslator.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Data.Common; using System.Data.Entity.Core.Common.CommandTrees; using System.Data.Entity.Core.Metadata.Edm; using System.Diagnostics; using System.Globalization; using System.Text; using FirebirdSql.Data.FirebirdClient; namespace EntityFramework.Firebird.SqlGen; internal class ExpressionTranslator : DbExpressionVisitor { #region Fields private readonly StringBuilder _commandText; private readonly DbModificationCommandTree _commandTree; private readonly List _parameters; private readonly Dictionary> _memberValues; private readonly bool generateParameters; private int _parameterNameCount = 0; #endregion #region Internal Properties internal List Parameters { get { return _parameters; } } internal Dictionary> MemberValues { get { return _memberValues; } } #endregion #region Unsupported Visit Methods public override void Visit(DbApplyExpression expression) { throw new NotSupportedException("Visit(\"ApplyExpression\") is not supported."); } public override void Visit(DbArithmeticExpression expression) { throw new NotSupportedException("Visit(\"ArithmeticExpression\") is not supported."); } public override void Visit(DbCaseExpression expression) { throw new NotSupportedException("Visit(\"CaseExpression\") is not supported."); } public override void Visit(DbCastExpression expression) { throw new NotSupportedException("Visit(\"CastExpression\") is not supported."); } public override void Visit(DbCrossJoinExpression expression) { throw new NotSupportedException("Visit(\"CrossJoinExpression\") is not supported."); } public override void Visit(DbDerefExpression expression) { throw new NotSupportedException("Visit(\"DerefExpression\") is not supported."); } public override void Visit(DbDistinctExpression expression) { throw new NotSupportedException("Visit(\"DistinctExpression\") is not supported."); } public override void Visit(DbElementExpression expression) { throw new NotSupportedException("Visit(\"ElementExpression\") is not supported."); } public override void Visit(DbEntityRefExpression expression) { throw new NotSupportedException("Visit(\"EntityRefExpression\") is not supported."); } public override void Visit(DbExceptExpression expression) { throw new NotSupportedException("Visit(\"ExceptExpression\") is not supported."); } public override void Visit(DbExpression expression) { throw new NotSupportedException("Visit(\"Expression\") is not supported."); } public override void Visit(DbFilterExpression expression) { throw new NotSupportedException("Visit(\"FilterExpression\") is not supported."); } public override void Visit(DbFunctionExpression expression) { throw new NotSupportedException("Visit(\"FunctionExpression\") is not supported."); } public override void Visit(DbGroupByExpression expression) { throw new NotSupportedException("Visit(\"GroupByExpression\") is not supported."); } public override void Visit(DbIntersectExpression expression) { throw new NotSupportedException("Visit(\"IntersectExpression\") is not supported."); } public override void Visit(DbIsEmptyExpression expression) { throw new NotSupportedException("Visit(\"IsEmptyExpression\") is not supported."); } public override void Visit(DbIsOfExpression expression) { throw new NotSupportedException("Visit(\"IsOfExpression\") is not supported."); } public override void Visit(DbJoinExpression expression) { throw new NotSupportedException("Visit(\"JoinExpression\") is not supported."); } public override void Visit(DbLikeExpression expression) { throw new NotSupportedException("Visit(\"LikeExpression\") is not supported."); } public override void Visit(DbLimitExpression expression) { throw new NotSupportedException("Visit(\"LimitExpression\") is not supported."); } public override void Visit(DbOfTypeExpression expression) { throw new NotSupportedException("Visit(\"OfTypeExpression\") is not supported."); } public override void Visit(DbParameterReferenceExpression expression) { throw new NotSupportedException("Visit(\"ParameterReferenceExpression\") is not supported."); } public override void Visit(DbProjectExpression expression) { throw new NotSupportedException("Visit(\"ProjectExpression\") is not supported."); } public override void Visit(DbQuantifierExpression expression) { throw new NotSupportedException("Visit(\"QuantifierExpression\") is not supported."); } public override void Visit(DbRefExpression expression) { throw new NotSupportedException("Visit(\"RefExpression\") is not supported."); } public override void Visit(DbRefKeyExpression expression) { throw new NotSupportedException("Visit(\"RefKeyExpression\") is not supported."); } public override void Visit(DbRelationshipNavigationExpression expression) { throw new NotSupportedException("Visit(\"RelationshipNavigationExpression\") is not supported."); } public override void Visit(DbSkipExpression expression) { throw new NotSupportedException("Visit(\"SkipExpression\") is not supported."); } public override void Visit(DbSortExpression expression) { throw new NotSupportedException("Visit(\"SortExpression\") is not supported."); } public override void Visit(DbTreatExpression expression) { throw new NotSupportedException("Visit(\"TreatExpression\") is not supported."); } public override void Visit(DbUnionAllExpression expression) { throw new NotSupportedException("Visit(\"UnionAllExpression\") is not supported."); } public override void Visit(DbVariableReferenceExpression expression) { throw new NotSupportedException("Visit(\"VariableReferenceExpression\") is not supported."); } #endregion #region Methods public override void Visit(DbAndExpression expression) { VisitBinary(expression, " AND "); } public override void Visit(DbOrExpression expression) { VisitBinary(expression, " OR "); } public override void Visit(DbComparisonExpression expression) { Debug.Assert(expression.ExpressionKind == DbExpressionKind.Equals, "only equals comparison expressions are produced in DML command trees in V1"); VisitBinary(expression, " = "); RegisterMemberValue(expression.Left, expression.Right); } public override void Visit(DbIsNullExpression expression) { expression.Argument.Accept(this); _commandText.Append(" IS NULL"); } public override void Visit(DbNotExpression expression) { _commandText.Append("NOT ("); expression.Accept(this); _commandText.Append(")"); } public override void Visit(DbConstantExpression expression) { if (generateParameters) { var parameter = CreateParameter(expression.Value, expression.ResultType); _commandText.Append(parameter.ParameterName); } else { using (var writer = new SqlWriter(_commandText)) { var sqlGenerator = new SqlGenerator(); sqlGenerator.WriteSql(writer, expression.Accept(sqlGenerator)); } } } public override void Visit(DbScanExpression expression) { _commandText.Append(SqlGenerator.GetTargetSql(expression.Target)); } public override void Visit(DbPropertyExpression expression) { _commandText.Append(DmlSqlGenerator.GenerateMemberSql(expression.Property)); } public override void Visit(DbNullExpression expression) { _commandText.Append("NULL"); } public override void Visit(DbNewInstanceExpression expression) { // assumes all arguments are self-describing (no need to use aliases // because no renames are ever used in the projection) var first = true; foreach (var argument in expression.Arguments) { if (first) { first = false; } else { _commandText.Append(", "); } argument.Accept(this); } } #endregion #region Internal Methods /// /// Initialize a new expression translator populating the given string builder /// with command text. Command text builder and command tree must not be null. /// /// Command text with which to populate commands /// Command tree generating SQL /// Indicates whether the translator should preserve /// member values while compiling t-SQL (only needed for server generation) internal ExpressionTranslator( StringBuilder commandText, DbModificationCommandTree commandTree, bool preserveMemberValues, bool generateParameters) { Debug.Assert(null != commandText); Debug.Assert(null != commandTree); _commandText = commandText; _commandTree = commandTree; _parameters = new List(); _memberValues = preserveMemberValues ? new Dictionary>() : null; this.generateParameters = generateParameters; } // generate parameter (name based on parameter ordinal) internal FbParameter CreateParameter(object value, TypeUsage type) { var parameterName = string.Concat("@p", _parameterNameCount.ToString(CultureInfo.InvariantCulture)); _parameterNameCount++; var parameter = FbProviderServices.CreateSqlParameter(parameterName, type, ParameterMode.In, value); _parameters.Add(parameter); return parameter; } /// /// Call this method to register a property value pair so the translator "remembers" /// the values for members of the row being modified. These values can then be used /// to form a predicate for server-generation (based on the key of the row) /// /// Expression containing the column reference (property expression). /// Expression containing the value of the column. internal void RegisterMemberValue(DbExpression propertyExpression, DbExpression value) { if (null != _memberValues) { // register the value for this property Debug.Assert(propertyExpression.ExpressionKind == DbExpressionKind.Property, "DML predicates and setters must be of the form property = value"); // get name of left property var property = ((DbPropertyExpression)propertyExpression).Property; // don't track null values if (value.ExpressionKind != DbExpressionKind.Null) { Debug.Assert(value.ExpressionKind == DbExpressionKind.Constant, "value must either constant or null"); // retrieve the last parameter added (which describes the parameter) var p = _parameters[_parameters.Count - 1]; if (!_memberValues.ContainsKey(property)) _memberValues.Add(property, new List(new[] { p })); else _memberValues[property].Add(p); } } } #endregion #region Private Methods private void VisitBinary(DbBinaryExpression expression, string separator) { _commandText.Append("("); expression.Left.Accept(this); _commandText.Append(separator); expression.Right.Accept(this); _commandText.Append(")"); } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/FirstClause.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Globalization; namespace EntityFramework.Firebird.SqlGen; internal class FirstClause : ISqlFragment { #region Fields private ISqlFragment _firstCount; #endregion #region Internal Properties /// /// How many first rows should be selected. /// internal ISqlFragment FirstCount { get { return _firstCount; } } #endregion #region Constructors /// /// Creates a FirstClause with the given topCount and withTies. /// /// internal FirstClause(ISqlFragment firstCount) { _firstCount = firstCount; } /// /// Creates a TopClause with the given topCount and withTies. /// /// internal FirstClause(int firstCount) { var sqlBuilder = new SqlBuilder(); sqlBuilder.Append(firstCount.ToString(CultureInfo.InvariantCulture)); _firstCount = sqlBuilder; } #endregion #region ISqlFragment Members /// /// Write out the FIRST part of sql select statement /// It basically writes FIRST (X). /// /// /// public void WriteSql(SqlWriter writer, SqlGenerator sqlGenerator) { writer.Write("FIRST ("); FirstCount.WriteSql(writer, sqlGenerator); writer.Write(")"); writer.Write(" "); } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/ISqlFragment.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) namespace EntityFramework.Firebird.SqlGen; internal interface ISqlFragment { void WriteSql(SqlWriter writer, SqlGenerator sqlGenerator); } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/JoinSymbol.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Data.Entity.Core.Metadata.Edm; namespace EntityFramework.Firebird.SqlGen; internal sealed class JoinSymbol : Symbol { #region Fields private List _columnList; private List _extentList; private List _flattenedExtentList; private Dictionary _nameToExtent; private bool _isNestedJoin; #endregion #region Properties internal List ColumnList { get { if (null == _columnList) { _columnList = new List(); } return _columnList; } set { _columnList = value; } } internal List ExtentList { get { return _extentList; } } internal List FlattenedExtentList { get { if (null == _flattenedExtentList) { _flattenedExtentList = new List(); } return _flattenedExtentList; } set { _flattenedExtentList = value; } } internal Dictionary NameToExtent { get { return _nameToExtent; } } internal bool IsNestedJoin { get { return _isNestedJoin; } set { _isNestedJoin = value; } } #endregion #region Constructors public JoinSymbol(string name, TypeUsage type, List extents) : base(name, type) { _extentList = new List(extents.Count); _nameToExtent = new Dictionary(extents.Count, StringComparer.OrdinalIgnoreCase); foreach (var symbol in extents) { _nameToExtent[symbol.Name] = symbol; ExtentList.Add(symbol); } } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/SkipClause.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Globalization; namespace EntityFramework.Firebird.SqlGen; internal class SkipClause : ISqlFragment { #region Fields private ISqlFragment _skipCount; #endregion #region Internal Properties /// /// How many rows should be skipped. /// internal ISqlFragment SkipCount { get { return _skipCount; } } #endregion #region Constructors /// /// Creates a SkipClause with the given skipCount. /// /// internal SkipClause(ISqlFragment skipCount) { _skipCount = skipCount; } /// /// Creates a SkipClause with the given skipCount. /// /// internal SkipClause(int skipCount) { var sqlBuilder = new SqlBuilder(); sqlBuilder.Append(skipCount.ToString(CultureInfo.InvariantCulture)); _skipCount = sqlBuilder; } #endregion #region ISqlFragment Members /// /// Write out the SKIP part of sql select statement /// It basically writes SKIP (X). /// /// /// public void WriteSql(SqlWriter writer, SqlGenerator sqlGenerator) { writer.Write("SKIP ("); SkipCount.WriteSql(writer, sqlGenerator); writer.Write(")"); writer.Write(" "); } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/SqlBuilder.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Diagnostics; namespace EntityFramework.Firebird.SqlGen; internal sealed class SqlBuilder : ISqlFragment { #region Fields private List _sqlFragments; #endregion #region Properties private List SqlFragments { get { if (null == _sqlFragments) { _sqlFragments = new List(); } return _sqlFragments; } } #endregion #region Methods /// /// Add an object to the list - we do not verify that it is a proper sql fragment /// since this is an internal method. /// /// public void Append(object s) { Debug.Assert(s != null); SqlFragments.Add(s); } /// /// This is to pretty print the SQL. The writer /// needs to know about new lines so that it can add the right amount of /// indentation at the beginning of lines. /// public void AppendLine() { SqlFragments.Add(Environment.NewLine); } /// /// Whether the builder is empty. This is used by the /// to determine whether a sql statement can be reused. /// public bool IsEmpty { get { return ((null == _sqlFragments) || (0 == _sqlFragments.Count)); } } #endregion #region ISqlFragment Members /// /// We delegate the writing of the fragment to the appropriate type. /// /// /// public void WriteSql(SqlWriter writer, SqlGenerator sqlGenerator) { if (null != _sqlFragments) { foreach (var o in _sqlFragments) { var str = (o as string); if (null != str) { writer.Write(str); } else { var sqlFragment = (o as ISqlFragment); if (null != sqlFragment) { sqlFragment.WriteSql(writer, sqlGenerator); } else { throw new InvalidOperationException(); } } } } } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/SqlGenerator.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Data; using System.Data.Common; using System.Data.Entity.Core.Common.CommandTrees; using System.Data.Entity.Core.Metadata.Edm; using System.Diagnostics; using System.Globalization; using System.Text; using FirebirdSql.Data.Common; namespace EntityFramework.Firebird.SqlGen; internal sealed class SqlGenerator : DbExpressionVisitor { #region Visitor parameter stacks /// /// Every relational node has to pass its SELECT statement to its children /// This allows them (DbVariableReferenceExpression eventually) to update the list of /// outer extents (free variables) used by this select statement. /// Stack _selectStatementStack; /// /// The top of the stack /// private SqlSelectStatement CurrentSelectStatement { // There is always something on the stack, so we can always Peek. get { return _selectStatementStack.Peek(); } } /// /// Nested joins and extents need to know whether they should create /// a new Select statement, or reuse the parent's. This flag /// indicates whether the parent is a join or not. /// Stack _isParentAJoinStack; /// /// The top of the stack /// private bool IsParentAJoin { // There might be no entry on the stack if a Join node has never // been seen, so we return false in that case. get { return _isParentAJoinStack.Count == 0 ? false : _isParentAJoinStack.Peek(); } } #endregion #region Global lists and state Dictionary _allExtentNames; internal Dictionary AllExtentNames { get { return _allExtentNames; } } // For each column name, we store the last integer suffix that // was added to produce a unique column name. This speeds up // the creation of the next unique name for this column name. Dictionary _allColumnNames; internal Dictionary AllColumnNames { get { return _allColumnNames; } } SymbolTable _symbolTable = new SymbolTable(); /// /// VariableReferenceExpressions are allowed only as children of DbPropertyExpression /// or MethodExpression. The cheapest way to ensure this is to set the following /// property in DbVariableReferenceExpression and reset it in the allowed parent expressions. /// bool _isVarRefSingle = false; bool _shouldHandleBoolComparison = true; bool _shouldCastParameter = true; Dictionary _shortenedNames = new Dictionary(); #endregion #region Statics static private readonly Dictionary _builtInFunctionHandlers = InitializeBuiltInFunctionHandlers(); static private readonly Dictionary _canonicalFunctionHandlers = InitializeCanonicalFunctionHandlers(); static private readonly Dictionary _functionNameToOperatorDictionary = InitializeFunctionNameToOperatorDictionary(); private delegate ISqlFragment FunctionHandler(SqlGenerator sqlgen, DbFunctionExpression functionExpr); /// /// All special built-in functions and their handlers /// /// private static Dictionary InitializeBuiltInFunctionHandlers() { var functionHandlers = new Dictionary(0, StringComparer.Ordinal); return functionHandlers; } /// /// All special non-aggregate canonical functions and their handlers /// /// private static Dictionary InitializeCanonicalFunctionHandlers() { var functionHandlers = new Dictionary(StringComparer.Ordinal); #region Other Canonical Functions functionHandlers.Add("NewGuid", HandleCanonicalFunctionNewGuid); #endregion #region Math Canonical Functions functionHandlers.Add("Abs", HandleCanonicalFunctionAbs); functionHandlers.Add("Ceiling", HandleCanonicalFunctionCeiling); functionHandlers.Add("Floor", HandleCanonicalFunctionFloor); functionHandlers.Add("Power", HandleCanonicalFunctionPower); functionHandlers.Add("Round", HandleCanonicalFunctionRound); functionHandlers.Add("Truncate", HandleCanonicalFunctionTruncate); #endregion #region String Canonical Functions functionHandlers.Add("Concat", HandleCanonicalConcatFunction); functionHandlers.Add("Contains", HandleCanonicalContainsFunction); functionHandlers.Add("EndsWith", HandleCanonicalEndsWithFunction); functionHandlers.Add("IndexOf", HandleCanonicalFunctionIndexOf); functionHandlers.Add("Length", HandleCanonicalFunctionLength); functionHandlers.Add("ToLower", HandleCanonicalFunctionToLower); functionHandlers.Add("ToUpper", HandleCanonicalFunctionToUpper); functionHandlers.Add("Trim", HandleCanonicalFunctionTrim); functionHandlers.Add("LTrim", HandleCanonicalFunctionLTrim); functionHandlers.Add("RTrim", HandleCanonicalFunctionRTrim); functionHandlers.Add("Left", HandleCanonicalFunctionLeft); functionHandlers.Add("Right", HandleCanonicalFunctionRight); functionHandlers.Add("Reverse", HandleCanonicalFunctionReverse); functionHandlers.Add("Replace", HandleCanonicalFunctionReplace); functionHandlers.Add("StartsWith", HandleCanonicalStartsWithFunction); functionHandlers.Add("Substring", HandleCanonicalFunctionSubstring); #endregion #region Date and Time Canonical Functions functionHandlers.Add("AddNanoseconds", (sqlgen, e) => HandleCanonicalFunctionDateTimeAdd(sqlgen, e, null)); // not supported functionHandlers.Add("AddMicroseconds", (sqlgen, e) => HandleCanonicalFunctionDateTimeAdd(sqlgen, e, null)); // not supported functionHandlers.Add("AddMilliseconds", (sqlgen, e) => HandleCanonicalFunctionDateTimeAdd(sqlgen, e, "MILLISECOND")); functionHandlers.Add("AddSeconds", (sqlgen, e) => HandleCanonicalFunctionDateTimeAdd(sqlgen, e, "SECOND")); functionHandlers.Add("AddMinutes", (sqlgen, e) => HandleCanonicalFunctionDateTimeAdd(sqlgen, e, "MINUTE")); functionHandlers.Add("AddHours", (sqlgen, e) => HandleCanonicalFunctionDateTimeAdd(sqlgen, e, "HOUR")); functionHandlers.Add("AddDays", (sqlgen, e) => HandleCanonicalFunctionDateTimeAdd(sqlgen, e, "DAY")); functionHandlers.Add("AddMonths", (sqlgen, e) => HandleCanonicalFunctionDateTimeAdd(sqlgen, e, "MONTH")); functionHandlers.Add("AddYears", (sqlgen, e) => HandleCanonicalFunctionDateTimeAdd(sqlgen, e, "YEAR")); functionHandlers.Add("CreateDateTime", HandleCanonicalFunctionCreateDateTime); functionHandlers.Add("CreateDateTimeOffset", HandleCanonicalFunctionCreateDateTimeOffset); // not supported functionHandlers.Add("CreateTime", HandleCanonicalFunctionCreateTime); functionHandlers.Add("CurrentDateTime", HandleCanonicalFunctionCurrentDateTime); functionHandlers.Add("CurrentDateTimeOffset", HandleCanonicalFunctionCurrentDateTimeOffset); // not supported functionHandlers.Add("CurrentUtcDateTime", HandleCanonicalFunctionCurrentUtcDateTime); // not supported functionHandlers.Add("Day", (sqlgen, e) => HandleCanonicalFunctionExtract(sqlgen, e, "DAY")); functionHandlers.Add("DayOfYear", (sqlgen, e) => HandleCanonicalFunctionExtract(sqlgen, e, "YEARDAY")); functionHandlers.Add("DiffNanoseconds", (sqlgen, e) => HandleCanonicalFunctionDateTimeDiff(sqlgen, e, null)); // not supported functionHandlers.Add("DiffMicroseconds", (sqlgen, e) => HandleCanonicalFunctionDateTimeDiff(sqlgen, e, null)); // not supported functionHandlers.Add("DiffMilliseconds", (sqlgen, e) => HandleCanonicalFunctionDateTimeDiff(sqlgen, e, "MILLISECOND")); functionHandlers.Add("DiffSeconds", (sqlgen, e) => HandleCanonicalFunctionDateTimeDiff(sqlgen, e, "SECOND")); functionHandlers.Add("DiffMinutes", (sqlgen, e) => HandleCanonicalFunctionDateTimeDiff(sqlgen, e, "MINUTE")); functionHandlers.Add("DiffHours", (sqlgen, e) => HandleCanonicalFunctionDateTimeDiff(sqlgen, e, "HOUR")); functionHandlers.Add("DiffDays", (sqlgen, e) => HandleCanonicalFunctionDateTimeDiff(sqlgen, e, "DAY")); functionHandlers.Add("DiffMonths", (sqlgen, e) => HandleCanonicalFunctionDateTimeDiff(sqlgen, e, "MONTH")); functionHandlers.Add("DiffYears", (sqlgen, e) => HandleCanonicalFunctionDateTimeDiff(sqlgen, e, "YEAR")); functionHandlers.Add("GetTotalOffsetMinutes", HandleCanonicalFunctionGetTotalOffsetMinutes); // not supported functionHandlers.Add("Hour", (sqlgen, e) => HandleCanonicalFunctionExtract(sqlgen, e, "HOUR")); functionHandlers.Add("Millisecond", (sqlgen, e) => HandleCanonicalFunctionExtract(sqlgen, e, "MILLISECOND")); functionHandlers.Add("Minute", (sqlgen, e) => HandleCanonicalFunctionExtract(sqlgen, e, "MINUTE")); functionHandlers.Add("Month", (sqlgen, e) => HandleCanonicalFunctionExtract(sqlgen, e, "MONTH")); functionHandlers.Add("Second", (sqlgen, e) => HandleCanonicalFunctionExtract(sqlgen, e, "SECOND")); functionHandlers.Add("TruncateTime", HandleCanonicalFunctionTruncateTime); functionHandlers.Add("Year", (sqlgen, e) => HandleCanonicalFunctionExtract(sqlgen, e, "YEAR")); #endregion #region Bitwise Canonical Functions functionHandlers.Add("BitwiseAnd", HandleCanonicalFunctionBitwiseAnd); functionHandlers.Add("BitwiseNot", HandleCanonicalFunctionBitwiseNot); // not supported functionHandlers.Add("BitwiseOr", HandleCanonicalFunctionBitwiseOr); functionHandlers.Add("BitwiseXor", HandleCanonicalFunctionBitwiseXor); #endregion return functionHandlers; } /// /// Initializes the mapping from functions to T-SQL operators /// for all functions that translate to T-SQL operators /// /// private static Dictionary InitializeFunctionNameToOperatorDictionary() { return new Dictionary(StringComparer.Ordinal) { { nameof(string.Concat), "||" }, { nameof(string.Contains), "CONTAINING" }, { nameof(string.StartsWith), "STARTING WITH" }, }; } #endregion #region Constructor /// /// Basic constructor. /// internal SqlGenerator() { } #endregion #region Entry points /// /// General purpose static function that can be called from System.Data assembly /// /// Server version /// command tree /// Parameters to add to the command tree corresponding /// to constants in the command tree. Used only in ModificationCommandTrees. /// The string representing the SQL to be executed. internal static string GenerateSql(DbCommandTree tree, out List parameters, out CommandType commandType) { commandType = CommandType.Text; //Handle Query if (tree is DbQueryCommandTree queryCommandTree) { var sqlGen = new SqlGenerator(); parameters = null; return sqlGen.GenerateSql((DbQueryCommandTree)tree); } //Handle Function if (tree is DbFunctionCommandTree DbFunctionCommandTree) { var sqlGen = new SqlGenerator(); parameters = null; var sql = sqlGen.GenerateFunctionSql(DbFunctionCommandTree, out commandType); return sql; } //Handle Insert if (tree is DbInsertCommandTree insertCommandTree) { return DmlSqlGenerator.GenerateInsertSql(insertCommandTree, out parameters); } //Handle Delete if (tree is DbDeleteCommandTree deleteCommandTree) { return DmlSqlGenerator.GenerateDeleteSql(deleteCommandTree, out parameters); } //Handle Update if (tree is DbUpdateCommandTree updateCommandTree) { return DmlSqlGenerator.GenerateUpdateSql(updateCommandTree, out parameters); } throw new NotSupportedException("Unrecognized command tree type"); } #endregion #region Driver Methods /// /// Translate a command tree to a SQL string. /// /// The input tree could be translated to either a SQL SELECT statement /// or a SELECT expression. This choice is made based on the return type /// of the expression /// CollectionType => select statement /// non collection type => select expression /// /// /// The string representing the SQL to be executed. private string GenerateSql(DbQueryCommandTree tree) { _selectStatementStack = new Stack(); _isParentAJoinStack = new Stack(); _allExtentNames = new Dictionary(StringComparer.OrdinalIgnoreCase); _allColumnNames = new Dictionary(StringComparer.OrdinalIgnoreCase); // Literals will not be converted to parameters. ISqlFragment result; if (MetadataHelpers.IsCollectionType(tree.Query.ResultType)) { var sqlStatement = VisitExpressionEnsureSqlStatement(tree.Query); Debug.Assert(sqlStatement != null, "The outer most sql statment is null"); sqlStatement.IsTopMost = true; result = sqlStatement; } else { var sqlBuilder = new SqlBuilder(); sqlBuilder.Append("SELECT "); sqlBuilder.Append(tree.Query.Accept(this)); result = sqlBuilder; } if (_isVarRefSingle) { throw new NotSupportedException(); // A DbVariableReferenceExpression has to be a child of DbPropertyExpression or MethodExpression } // Check that the parameter stacks are not leaking. Debug.Assert(_selectStatementStack.Count == 0); Debug.Assert(_isParentAJoinStack.Count == 0); return WriteSql(result); } /// /// Translate a function command tree to a SQL string. /// private string GenerateFunctionSql(DbFunctionCommandTree tree, out CommandType commandType) { var function = tree.EdmFunction; // We expect function to always have these properties var userCommandText = (string)function.MetadataProperties["CommandTextAttribute"].Value; /// No schema in FB //string userSchemaName = (string)function.MetadataProperties["Schema"].Value; var userFuncName = (string)function.MetadataProperties["StoreFunctionNameAttribute"].Value; if (string.IsNullOrEmpty(userCommandText)) { // build a quoted description of the function commandType = CommandType.StoredProcedure; // if the schema name is not explicitly given, it is assumed to be the metadata namespace /// No schema in FB //string schemaName = string.IsNullOrEmpty(userSchemaName) ? // function.NamespaceName : userSchemaName; // if the function store name is not explicitly given, it is assumed to be the metadata name var functionName = string.IsNullOrEmpty(userFuncName) ? function.Name : userFuncName; // quote elements of function text /// No schema in FB //string quotedSchemaName = QuoteIdentifier(schemaName); var quotedFunctionName = QuoteIdentifier(functionName); // separator /// No schema in FB //const string schemaSeparator = "."; // concatenate elements of function text /// No schema in FB var quotedFunctionText = /*quotedSchemaName + schemaSeparator + */quotedFunctionName; return quotedFunctionText; } else { // if the user has specified the command text, pass it through verbatim and choose CommandType.Text commandType = CommandType.Text; return userCommandText; } } /// /// Convert the SQL fragments to a string. /// We have to setup the Stream for writing. /// /// /// A string representing the SQL to be executed. string WriteSql(ISqlFragment sqlStatement) { var builder = new StringBuilder(1024); using (var writer = new SqlWriter(builder)) { WriteSql(writer, sqlStatement); } return builder.ToString(); } internal SqlWriter WriteSql(SqlWriter writer, ISqlFragment sqlStatement) { sqlStatement.WriteSql(writer, this); return writer; } #endregion #region DbExpressionVisitor Members /// /// Translate(left) AND Translate(right) /// /// /// A . public override ISqlFragment Visit(DbAndExpression e) { return VisitBinaryExpression(" AND ", e.Left, e.Right); } /// /// An apply is just like a join, so it shares the common join processing /// in /// /// /// A . public override ISqlFragment Visit(DbApplyExpression e) { string joinString; switch (e.ExpressionKind) { case DbExpressionKind.CrossApply: joinString = "CROSS APPLY"; break; case DbExpressionKind.OuterApply: joinString = "OUTER APPLY"; break; default: Debug.Assert(false); throw new InvalidOperationException(); } throw new NotSupportedException($"{joinString} statement is not supported in Firebird."); // The join condition does not exist in this case, so we use null. // We do not have a on clause, so we use JoinType.CrossJoin. //return VisitJoinExpression(inputs, DbExpressionKind.CrossJoin, joinString, null); } /// /// For binary expressions, we delegate to . /// We handle the other expressions directly. /// /// /// A public override ISqlFragment Visit(DbArithmeticExpression e) { SqlBuilder result; switch (e.ExpressionKind) { case DbExpressionKind.Divide: result = VisitBinaryExpression(" / ", e.Arguments[0], e.Arguments[1]); break; case DbExpressionKind.Minus: result = VisitBinaryExpression(" - ", e.Arguments[0], e.Arguments[1]); break; case DbExpressionKind.Modulo: //result = VisitBinaryExpression(" % ", e.Arguments[0], e.Arguments[1]); result = new SqlBuilder(); result.Append(" MOD("); result.Append(e.Arguments[0].Accept(this)); result.Append(", "); result.Append(e.Arguments[1].Accept(this)); result.Append(")"); break; case DbExpressionKind.Multiply: result = VisitBinaryExpression(" * ", e.Arguments[0], e.Arguments[1]); break; case DbExpressionKind.Plus: result = VisitBinaryExpression(" + ", e.Arguments[0], e.Arguments[1]); break; case DbExpressionKind.UnaryMinus: result = new SqlBuilder(); result.Append(" -("); result.Append(e.Arguments[0].Accept(this)); result.Append(")"); break; default: Debug.Assert(false); throw new InvalidOperationException(); } return result; } /// /// If the ELSE clause is null, we do not write it out. /// /// /// A public override ISqlFragment Visit(DbCaseExpression e) { var result = new SqlBuilder(); Debug.Assert(e.When.Count == e.Then.Count); result.Append("CASE"); for (var i = 0; i < e.When.Count; ++i) { result.Append(" WHEN ("); result.Append(e.When[i].Accept(this)); result.Append(") THEN "); result.Append(e.Then[i].Accept(this)); } if (e.Else != null && !(e.Else is DbNullExpression)) { result.Append(" ELSE "); result.Append(e.Else.Accept(this)); } result.Append(" END"); return result; } /// /// /// /// /// public override ISqlFragment Visit(DbCastExpression e) { var result = new SqlBuilder(); var sqlPrimitiveType = GetSqlPrimitiveType(e.ResultType); switch (sqlPrimitiveType.ToUpperInvariant()) { default: result.Append("CAST("); result.Append(e.Argument.Accept(this)); result.Append(" AS "); result.Append(sqlPrimitiveType); result.Append(")"); break; } return result; } /// /// The parser generates Not(Equals(...)) for <>. /// /// /// A . public override ISqlFragment Visit(DbComparisonExpression e) { switch (e.ExpressionKind) { case DbExpressionKind.Equals: return VisitBinaryExpression(" = ", e.Left, e.Right); case DbExpressionKind.LessThan: return VisitBinaryExpression(" < ", e.Left, e.Right); case DbExpressionKind.LessThanOrEquals: return VisitBinaryExpression(" <= ", e.Left, e.Right); case DbExpressionKind.GreaterThan: return VisitBinaryExpression(" > ", e.Left, e.Right); case DbExpressionKind.GreaterThanOrEquals: return VisitBinaryExpression(" >= ", e.Left, e.Right); // The parser does not generate the expression kind below. case DbExpressionKind.NotEquals: return VisitBinaryExpression(" <> ", e.Left, e.Right); default: Debug.Assert(false); // The constructor should have prevented this throw new InvalidOperationException(string.Empty); } } /// /// Constants will be send to the store as part of the generated SQL, not as parameters. /// /// /// A . Strings are wrapped in single /// quotes and escaped. Numbers are written literally. public override ISqlFragment Visit(DbConstantExpression e) { var result = new SqlBuilder(); if (MetadataHelpers.TryGetPrimitiveTypeKind(e.ResultType, out var typeKind)) { switch (typeKind) { case PrimitiveTypeKind.Boolean: result.Append(FormatBoolean((bool)e.Value)); break; case PrimitiveTypeKind.Int16: result.Append("CAST("); result.Append(e.Value.ToString()); result.Append(" AS "); result.Append(GetSqlPrimitiveType(e.ResultType)); result.Append(")"); break; case PrimitiveTypeKind.Int32: // default for integral values. result.Append(e.Value.ToString()); break; case PrimitiveTypeKind.Int64: result.Append("CAST("); result.Append(e.Value.ToString()); result.Append(" AS "); result.Append(GetSqlPrimitiveType(e.ResultType)); result.Append(")"); break; case PrimitiveTypeKind.Double: result.Append("CAST("); result.Append(((Double)e.Value).ToString(CultureInfo.InvariantCulture)); result.Append(" AS "); result.Append(GetSqlPrimitiveType(e.ResultType)); result.Append(")"); break; case PrimitiveTypeKind.Single: result.Append("CAST("); result.Append(((Single)e.Value).ToString(CultureInfo.InvariantCulture)); result.Append(" AS "); result.Append(GetSqlPrimitiveType(e.ResultType)); result.Append(")"); break; case PrimitiveTypeKind.Decimal: var sqlPrimitiveType = GetSqlPrimitiveType(e.ResultType); var strDecimal = ((Decimal)e.Value).ToString(CultureInfo.InvariantCulture); var pointPosition = strDecimal.IndexOf('.'); var precision = 9; // there's always the max value in manifest if (MetadataHelpers.TryGetTypeFacetDescriptionByName(e.ResultType.EdmType, MetadataHelpers.PrecisionFacetName, out var precisionFacetDescription)) { if (precisionFacetDescription.DefaultValue != null) precision = (int)precisionFacetDescription.DefaultValue; } var maxScale = (pointPosition != -1 ? precision - pointPosition + 1 : 0); result.Append("CAST("); result.Append(strDecimal); result.Append(" AS "); result.Append(sqlPrimitiveType.Substring(0, sqlPrimitiveType.IndexOf('('))); result.Append("("); result.Append(precision.ToString(CultureInfo.InvariantCulture)); result.Append(","); result.Append(maxScale.ToString(CultureInfo.InvariantCulture)); result.Append("))"); break; case PrimitiveTypeKind.Binary: result.Append(FormatBinary((byte[])e.Value)); break; case PrimitiveTypeKind.String: var isUnicode = MetadataHelpers.GetFacetValueOrDefault(e.ResultType, MetadataHelpers.UnicodeFacetName, true); // constant is always considered Unicode isUnicode = true; var length = MetadataHelpers.GetFacetValueOrDefault(e.ResultType, MetadataHelpers.MaxLengthFacetName, null) ?? (isUnicode ? FbProviderManifest.UnicodeVarcharMaxSize : FbProviderManifest.AsciiVarcharMaxSize); result.Append(FormatString((string)e.Value, isUnicode, length)); break; case PrimitiveTypeKind.DateTime: result.Append(FormatDateTime((DateTime)e.Value)); break; case PrimitiveTypeKind.Time: result.Append(FormatTime((DateTime)e.Value)); break; case PrimitiveTypeKind.Guid: result.Append(FormatGuid((Guid)e.Value)); break; default: // all known scalar types should been handled already. throw new NotSupportedException(); } } else { throw new NotSupportedException(); } return result; } /// /// is illegal at this stage /// /// /// public override ISqlFragment Visit(DbDerefExpression e) { throw new NotSupportedException(); } /// /// The DISTINCT has to be added to the beginning of SqlSelectStatement.Select, /// but it might be too late for that. So, we use a flag on SqlSelectStatement /// instead, and add the "DISTINCT" in the second phase. /// /// /// A public override ISqlFragment Visit(DbDistinctExpression e) { var result = VisitExpressionEnsureSqlStatement(e.Argument); if (!IsCompatible(result, e.ExpressionKind)) { var inputType = MetadataHelpers.GetElementTypeUsage(e.Argument.ResultType); result = CreateNewSelectStatement(result, "DISTINCT", inputType, out var fromSymbol); AddFromSymbol(result, "DISTINCT", fromSymbol, false); } result.IsDistinct = true; return result; } /// /// An element expression returns a scalar - so it is translated to /// ( Select ... ) /// /// /// public override ISqlFragment Visit(DbElementExpression e) { var result = new SqlBuilder(); result.Append("("); result.Append(VisitExpressionEnsureSqlStatement(e.Argument)); result.Append(")"); return result; } /// /// /// /// /// public override ISqlFragment Visit(DbExceptExpression e) { throw new NotSupportedException("The EXCEPT statement is not supported in Firebird."); //return VisitSetOpExpression(e.Left, e.Right, "EXCEPT"); } /// /// Only concrete expression types will be visited. /// /// /// public override ISqlFragment Visit(DbExpression e) { throw new InvalidOperationException(); } /// /// /// /// /// If we are in a Join context, returns a /// with the extent name, otherwise, a new /// with the From field set. public override ISqlFragment Visit(DbScanExpression e) { var target = e.Target; if (IsParentAJoin) { var result = new SqlBuilder(); result.Append(GetTargetSql(target)); return result; } else { var result = new SqlSelectStatement(); result.From.Append(GetTargetSql(target)); return result; } } /// /// Gets escaped SQL identifier describing this entity set. /// /// internal static string GetTargetSql(EntitySetBase entitySetBase) { // construct escaped SQL referencing entity set var builder = new StringBuilder(); var definingQuery = MetadataHelpers.TryGetValueForMetadataProperty(entitySetBase, "DefiningQuery"); if (!string.IsNullOrEmpty(definingQuery)) { builder.Append("("); builder.Append(definingQuery); builder.Append(")"); } else { /// No schema in FB //string schemaName = MetadataHelpers.TryGetValueForMetadataProperty(entitySetBase, "Schema"); //if (!string.IsNullOrEmpty(schemaName)) //{ // builder.Append(QuoteIdentifier(schemaName)); // builder.Append("."); //} //else //{ // builder.Append(QuoteIdentifier(entitySetBase.EntityContainer.Name)); // builder.Append("."); //} builder.Append(QuoteIdentifier(MetadataHelpers.GetTableName(entitySetBase))); } return builder.ToString(); } /// /// The bodies of , , /// , are similar. /// Each does the following. /// /// Visit the input expression /// Determine if the input's SQL statement can be reused, or a new /// one must be created. /// Create a new symbol table scope /// Push the Sql statement onto a stack, so that children can /// update the free variable list. /// Visit the non-input expression. /// Cleanup /// /// /// /// A public override ISqlFragment Visit(DbFilterExpression e) { return VisitFilterExpression(e.Input, e.Predicate, false); } /// /// Lambda functions are not supported. /// The functions supported are: /// /// Canonical Functions - We recognize these by their dataspace, it is DataSpace.CSpace /// Store Functions - We recognize these by the BuiltInAttribute and not being Canonical /// User-defined Functions - All the rest except for Lambda functions /// /// We handle Canonical and Store functions the same way: If they are in the list of functions /// that need special handling, we invoke the appropriate handler, otherwise we translate them to /// FunctionName(arg1, arg2, ..., argn). /// We translate user-defined functions to NamespaceName.FunctionName(arg1, arg2, ..., argn). /// /// /// A public override ISqlFragment Visit(DbFunctionExpression e) { // // check if function requires special case processing, if so, delegates to it // if (IsSpecialBuiltInFunction(e)) { return HandleSpecialBuiltInFunction(e); } if (IsSpecialCanonicalFunction(e)) { return HandleSpecialCanonicalFunction(e); } return HandleFunctionDefault(e); } /// /// is illegal at this stage /// /// /// public override ISqlFragment Visit(DbEntityRefExpression e) { throw new NotSupportedException(); } /// /// is illegal at this stage /// /// /// public override ISqlFragment Visit(DbRefKeyExpression e) { throw new NotSupportedException(); } /// /// for general details. /// We modify both the GroupBy and the Select fields of the SqlSelectStatement. /// GroupBy gets just the keys without aliases, /// and Select gets the keys and the aggregates with aliases. /// /// Whenever there exists at least one aggregate with an argument that is not is not a simple /// over , /// we create a nested query in which we alias the arguments to the aggregates. /// That is due to the following two limitations of Sql Server: /// /// If an expression being aggregated contains an outer reference, then that outer /// reference must be the only column referenced in the expression /// Sql Server cannot perform an aggregate function on an expression containing /// an aggregate or a subquery. /// /// /// The default translation, without inner query is: /// /// SELECT /// kexp1 AS key1, kexp2 AS key2,... kexpn AS keyn, /// aggf1(aexpr1) AS agg1, .. aggfn(aexprn) AS aggn /// FROM input AS a /// GROUP BY kexp1, kexp2, .. kexpn /// /// When we inject an innner query, the equivalent translation is: /// /// SELECT /// key1 AS key1, key2 AS key2, .. keyn AS keys, /// aggf1(agg1) AS agg1, aggfn(aggn) AS aggn /// FROM ( /// SELECT /// kexp1 AS key1, kexp2 AS key2,... kexpn AS keyn, /// aexpr1 AS agg1, .. aexprn AS aggn /// FROM input AS a /// ) as a /// GROUP BY key1, key2, keyn /// /// /// /// A public override ISqlFragment Visit(DbGroupByExpression e) { var varName = GetShortenedName(e.Input.VariableName); var innerQuery = VisitInputExpression(e.Input.Expression, varName, e.Input.VariableType, out var fromSymbol); // GroupBy is compatible with Filter and OrderBy // but not with Project, GroupBy if (!IsCompatible(innerQuery, e.ExpressionKind)) { innerQuery = CreateNewSelectStatement(innerQuery, varName, e.Input.VariableType, out fromSymbol); } _selectStatementStack.Push(innerQuery); _symbolTable.EnterScope(); AddFromSymbol(innerQuery, varName, fromSymbol); // This line is not present for other relational nodes. _symbolTable.Add(GetShortenedName(e.Input.GroupVariableName), fromSymbol); // The enumerator is shared by both the keys and the aggregates, // so, we do not close it in between. var groupByType = MetadataHelpers.GetEdmType(MetadataHelpers.GetEdmType(e.ResultType).TypeUsage); // Whenever there exists at least one aggregate with an argument that is not simply a PropertyExpression // over a VarRefExpression, we need a nested query in which we alias the arguments to the aggregates. var needsInnerQuery = NeedsInnerQuery(e.Aggregates); SqlSelectStatement result; if (needsInnerQuery) { //Create the inner query result = CreateNewSelectStatement(innerQuery, varName, e.Input.VariableType, false, out fromSymbol); AddFromSymbol(result, varName, fromSymbol, false); } else { result = innerQuery; } using (IEnumerator members = groupByType.Properties.GetEnumerator()) { members.MoveNext(); Debug.Assert(result.Select.IsEmpty); var separator = string.Empty; foreach (var key in e.Keys) { var member = members.Current; var alias = QuoteIdentifier(member.Name); result.GroupBy.Append(separator); var keySql = key.Accept(this); if (!needsInnerQuery) { //Default translation: Key AS Alias result.Select.Append(separator); result.Select.AppendLine(); result.Select.Append(keySql); result.Select.Append(" AS "); result.Select.Append(alias); result.GroupBy.Append(keySql); } else { // The inner query contains the default translation Key AS Alias innerQuery.Select.Append(separator); innerQuery.Select.AppendLine(); innerQuery.Select.Append(keySql); innerQuery.Select.Append(" AS "); innerQuery.Select.Append(alias); //The outer resulting query projects over the key aliased in the inner query: // fromSymbol.Alias AS Alias result.Select.Append(separator); result.Select.AppendLine(); result.Select.Append(fromSymbol); result.Select.Append("."); result.Select.Append(alias); result.Select.Append(" AS "); result.Select.Append(alias); result.GroupBy.Append(alias); } separator = ", "; members.MoveNext(); } foreach (var aggregate in e.Aggregates) { var member = members.Current; var alias = QuoteIdentifier(member.Name); Debug.Assert(aggregate.Arguments.Count == 1); var translatedAggregateArgument = aggregate.Arguments[0].Accept(this); object aggregateArgument; if (needsInnerQuery) { //In this case the argument to the aggratete is reference to the one projected out by the // inner query var wrappingAggregateArgument = new SqlBuilder(); wrappingAggregateArgument.Append(fromSymbol); wrappingAggregateArgument.Append("."); wrappingAggregateArgument.Append(alias); aggregateArgument = wrappingAggregateArgument; innerQuery.Select.Append(separator); innerQuery.Select.AppendLine(); innerQuery.Select.Append(translatedAggregateArgument); innerQuery.Select.Append(" AS "); innerQuery.Select.Append(alias); } else { aggregateArgument = translatedAggregateArgument; } ISqlFragment aggregateResult = VisitAggregate(aggregate, aggregateArgument); result.Select.Append(separator); result.Select.AppendLine(); result.Select.Append(aggregateResult); result.Select.Append(" AS "); result.Select.Append(alias); separator = ", "; members.MoveNext(); } } _symbolTable.ExitScope(); _selectStatementStack.Pop(); return result; } /// /// /// /// /// public override ISqlFragment Visit(DbIntersectExpression e) { throw new NotSupportedException("The INTERSECT statement is not supported in Firebird."); //return VisitSetOpExpression(e.Left, e.Right, "INTERSECT"); } /// /// Not(IsEmpty) has to be handled specially, so we delegate to /// . /// /// /// /// A . /// [NOT] EXISTS( ... ) /// public override ISqlFragment Visit(DbIsEmptyExpression e) { return VisitIsEmptyExpression(e, false); } /// /// Not(IsNull) is handled specially, so we delegate to /// /// /// /// A /// IS [NOT] NULL /// public override ISqlFragment Visit(DbIsNullExpression e) { return VisitIsNullExpression(e, false); } /// /// is illegal at this stage /// /// /// A public override ISqlFragment Visit(DbIsOfExpression e) { throw new NotSupportedException(); } /// /// /// /// /// A . public override ISqlFragment Visit(DbCrossJoinExpression e) { return VisitJoinExpression(e.Inputs, e.ExpressionKind, "CROSS JOIN", null); } /// /// /// /// /// A . public override ISqlFragment Visit(DbJoinExpression e) { #region Map join type to a string string joinString; switch (e.ExpressionKind) { case DbExpressionKind.FullOuterJoin: joinString = "FULL OUTER JOIN"; break; case DbExpressionKind.InnerJoin: joinString = "INNER JOIN"; break; case DbExpressionKind.LeftOuterJoin: joinString = "LEFT OUTER JOIN"; break; default: Debug.Assert(false); joinString = null; break; } #endregion var inputs = new List(2); inputs.Add(e.Left); inputs.Add(e.Right); return VisitJoinExpression(inputs, e.ExpressionKind, joinString, e.JoinCondition); } /// /// /// /// /// A public override ISqlFragment Visit(DbLikeExpression e) { var result = new SqlBuilder(); if (!(e.Argument is DbParameterReferenceExpression && e.Pattern is DbParameterReferenceExpression)) { _shouldCastParameter = false; } result.Append(e.Argument.Accept(this)); result.Append(" LIKE "); result.Append(e.Pattern.Accept(this)); // if the ESCAPE expression is a DbNullExpression, then that's tantamount to // not having an ESCAPE at all if (e.Escape.ExpressionKind != DbExpressionKind.Null) { result.Append(" ESCAPE "); result.Append(e.Escape.Accept(this)); } _shouldCastParameter = true; return result; } /// /// Translates to TOP expression. /// /// /// A public override ISqlFragment Visit(DbLimitExpression e) { Debug.Assert(e.Limit is DbConstantExpression || e.Limit is DbParameterReferenceExpression, "DbLimitExpression.Limit is of invalid expression type"); var result = VisitExpressionEnsureSqlStatement(e.Argument, false); if (!IsCompatible(result, e.ExpressionKind)) { var inputType = MetadataHelpers.GetElementTypeUsage(e.Argument.ResultType); result = CreateNewSelectStatement(result, "top", inputType, out var fromSymbol); AddFromSymbol(result, "top", fromSymbol, false); } var topCount = HandleCountExpression(e.Limit); result.First = new FirstClause(topCount); return result; } /// /// DbNewInstanceExpression is allowed as a child of DbProjectExpression only. /// If anyone else is the parent, we throw. /// We also perform special casing for collections - where we could convert /// them into Unions /// /// for the actual implementation. /// /// /// /// public override ISqlFragment Visit(DbNewInstanceExpression e) { if (MetadataHelpers.IsCollectionType(e.ResultType)) { return VisitCollectionConstructor(e); } throw new NotSupportedException(); } /// /// The Not expression may cause the translation of its child to change. /// These children are /// /// NOT(Not(x)) becomes x /// NOT EXISTS becomes EXISTS /// IS NULL becomes IS NOT NULL /// = becomes<> /// /// /// /// A public override ISqlFragment Visit(DbNotExpression e) { // Flatten Not(Not(x)) to x. if (e.Argument is DbNotExpression notExpression) { return notExpression.Argument.Accept(this); } if (e.Argument is DbIsEmptyExpression isEmptyExpression) { return VisitIsEmptyExpression(isEmptyExpression, true); } if (e.Argument is DbIsNullExpression isNullExpression) { return VisitIsNullExpression(isNullExpression, true); } if (e.Argument is DbComparisonExpression comparisonExpression) { if (comparisonExpression.ExpressionKind == DbExpressionKind.Equals) { return VisitBinaryExpression(" <> ", comparisonExpression.Left, comparisonExpression.Right); } } var result = new SqlBuilder(); result.Append(" NOT ("); result.Append(e.Argument.Accept(this)); result.Append(")"); return result; } /// /// /// /// public override ISqlFragment Visit(DbNullExpression e) { var result = new SqlBuilder(); result.Append("NULL"); return result; } /// /// is illegal at this stage /// /// /// A public override ISqlFragment Visit(DbOfTypeExpression e) { throw new NotSupportedException(); } /// /// /// /// /// A /// public override ISqlFragment Visit(DbOrExpression e) { return VisitBinaryExpression(" OR ", e.Left, e.Right); } /// /// /// /// /// A public override ISqlFragment Visit(DbParameterReferenceExpression e) { var result = new SqlBuilder(); string sqlPrimitiveType = null; if (_shouldCastParameter) { sqlPrimitiveType = GetSqlPrimitiveType(e.ResultType); result.Append("CAST("); } // Do not quote this name. // We are not checking that e.Name has no illegal characters. e.g. space result.Append("@" + e.ParameterName); if (_shouldCastParameter) { result.Append(" AS "); result.Append(sqlPrimitiveType); result.Append(")"); } return result; } /// /// for the general ideas. /// /// /// A /// public override ISqlFragment Visit(DbProjectExpression e) { var varName = GetShortenedName(e.Input.VariableName); var result = VisitInputExpression(e.Input.Expression, varName, e.Input.VariableType, out var fromSymbol); // Project is compatible with Filter // but not with Project, GroupBy if (!IsCompatible(result, e.ExpressionKind)) { result = CreateNewSelectStatement(result, varName, e.Input.VariableType, out fromSymbol); } _selectStatementStack.Push(result); _symbolTable.EnterScope(); AddFromSymbol(result, varName, fromSymbol); // Project is the only node that can have DbNewInstanceExpression as a child // so we have to check it here. // We call VisitNewInstanceExpression instead of Visit(DbNewInstanceExpression), since // the latter throws. if (e.Projection is DbNewInstanceExpression newInstanceExpression) { result.Select.Append(VisitNewInstanceExpression(newInstanceExpression)); } else { result.Select.Append(e.Projection.Accept(this)); } _symbolTable.ExitScope(); _selectStatementStack.Pop(); return result; } /// /// This method handles record flattening, which works as follows. /// consider an expression Prop(y, Prop(x, Prop(d, Prop(c, Prop(b, Var(a))))) /// where a,b,c are joins, d is an extent and x and y are fields. /// b has been flattened into a, and has its own SELECT statement. /// c has been flattened into b. /// d has been flattened into c. /// /// We visit the instance, so we reach Var(a) first. This gives us a (join)symbol. /// Symbol(a).b gives us a join symbol, with a SELECT statement i.e. Symbol(b). /// From this point on , we need to remember Symbol(b) as the source alias, /// and then try to find the column. So, we use a SymbolPair. /// /// We have reached the end when the symbol no longer points to a join symbol. /// /// /// A if we have not reached the first /// Join node that has a SELECT statement. /// A if we have seen the JoinNode, and it has /// a SELECT statement. /// A with {Input}.propertyName otherwise. /// public override ISqlFragment Visit(DbPropertyExpression e) { SqlBuilder result; var varName = e.Property.Name; var instanceSql = e.Instance.Accept(this); // Since the DbVariableReferenceExpression is a proper child of ours, we can reset // isVarSingle. if (e.Instance is DbVariableReferenceExpression DbVariableReferenceExpression) { _isVarRefSingle = false; } // We need to flatten, and have not yet seen the first nested SELECT statement. if (instanceSql is JoinSymbol joinSymbol) { varName = GetShortenedName(varName); Debug.Assert(joinSymbol.NameToExtent.ContainsKey(varName)); if (joinSymbol.IsNestedJoin) { return new SymbolPair(joinSymbol, joinSymbol.NameToExtent[varName]); } else { return joinSymbol.NameToExtent[varName]; } } // --------------------------------------- // We have seen the first nested SELECT statement, but not the column. if (instanceSql is SymbolPair symbolPair) { varName = GetShortenedName(varName); if (symbolPair.Column is JoinSymbol columnJoinSymbol) { Debug.Assert(columnJoinSymbol.NameToExtent.ContainsKey(varName)); symbolPair.Column = columnJoinSymbol.NameToExtent[varName]; return symbolPair; } else { // symbolPair.Column has the base extent. // we need the symbol for the column, since it might have been renamed // when handling a JOIN. if (symbolPair.Column.Columns.ContainsKey(e.Property.Name)) { result = new SqlBuilder(); result.Append(symbolPair.Source); result.Append("."); result.Append(symbolPair.Column.Columns[e.Property.Name]); return result; } } } // --------------------------------------- result = new SqlBuilder(); result.Append(instanceSql); result.Append("."); // At this point the column name cannot be renamed, so we do // not use a symbol. result.Append(QuoteIdentifier(varName)); return result; } /// /// Any(input, x) => Exists(Filter(input,x)) /// All(input, x) => Not Exists(Filter(input, not(x)) /// /// /// public override ISqlFragment Visit(DbQuantifierExpression e) { var result = new SqlBuilder(); var negatePredicate = (e.ExpressionKind == DbExpressionKind.All); if (e.ExpressionKind == DbExpressionKind.Any) { result.Append("EXISTS ("); } else { Debug.Assert(e.ExpressionKind == DbExpressionKind.All); result.Append("NOT EXISTS ("); } var filter = VisitFilterExpression(e.Input, e.Predicate, negatePredicate); if (filter.Select.IsEmpty) { AddDefaultColumns(filter); } result.Append(filter); result.Append(")"); return result; } /// /// is illegal at this stage /// /// /// public override ISqlFragment Visit(DbRefExpression e) { throw new NotSupportedException(); } /// /// is illegal at this stage /// /// /// public override ISqlFragment Visit(DbRelationshipNavigationExpression e) { throw new NotSupportedException(); } /// /// Translates to SKIP expression. /// /// /// A public override ISqlFragment Visit(DbSkipExpression e) { Debug.Assert(e.Count is DbConstantExpression || e.Count is DbParameterReferenceExpression, "DbSkipExpression.Count is of invalid expression type"); var varName = GetShortenedName(e.Input.VariableName); var result = VisitInputExpression(e.Input.Expression, varName, e.Input.VariableType, out var fromSymbol); if (!IsCompatible(result, e.ExpressionKind)) { var inputType = MetadataHelpers.GetElementTypeUsage(e.ResultType); result = CreateNewSelectStatement(result, varName, inputType, out fromSymbol); AddFromSymbol(result, varName, fromSymbol, false); } var skipCount = HandleCountExpression(e.Count); result.Skip = new SkipClause(skipCount); _selectStatementStack.Push(result); _symbolTable.EnterScope(); AddFromSymbol(result, varName, fromSymbol); AddSortKeys(result.OrderBy, e.SortOrder); _symbolTable.ExitScope(); _selectStatementStack.Pop(); return result; } /// /// /// /// /// A /// public override ISqlFragment Visit(DbSortExpression e) { var varName = GetShortenedName(e.Input.VariableName); var result = VisitInputExpression(e.Input.Expression, varName, e.Input.VariableType, out var fromSymbol); // OrderBy is compatible with Filter // and nothing else if (!IsCompatible(result, e.ExpressionKind)) { result = CreateNewSelectStatement(result, varName, e.Input.VariableType, out fromSymbol); } _selectStatementStack.Push(result); _symbolTable.EnterScope(); AddFromSymbol(result, varName, fromSymbol); AddSortKeys(result.OrderBy, e.SortOrder); _symbolTable.ExitScope(); _selectStatementStack.Pop(); return result; } /// /// is illegal at this stage /// /// /// A public override ISqlFragment Visit(DbTreatExpression e) { throw new NotSupportedException(); } /// /// This code is shared by /// and /// /// /// Since the left and right expression may not be Sql select statements, /// we must wrap them up to look like SQL select statements. /// /// /// public override ISqlFragment Visit(DbUnionAllExpression e) { return VisitSetOpExpression(e.Left, e.Right, "UNION ALL"); } /// /// This method determines whether an extent from an outer scope(free variable) /// is used in the CurrentSelectStatement. /// /// An extent in an outer scope, if its symbol is not in the FromExtents /// of the CurrentSelectStatement. /// /// /// A . public override ISqlFragment Visit(DbVariableReferenceExpression e) { if (_isVarRefSingle) { throw new NotSupportedException(); // A DbVariableReferenceExpression has to be a child of DbPropertyExpression or MethodExpression // This is also checked in GenerateSql(...) at the end of the visiting. } _isVarRefSingle = true; // This will be reset by DbPropertyExpression or MethodExpression var varName = GetShortenedName(e.VariableName); var result = _symbolTable.Lookup(varName); if (!CurrentSelectStatement.FromExtents.Contains(result)) { CurrentSelectStatement.OuterExtents[result] = true; } return result; } public override ISqlFragment Visit(DbInExpression e) { var result = new SqlBuilder(); result.Append(e.Item.Accept(this)); result.Append(" IN ("); var separator = string.Empty; foreach (var item in e.List) { result.Append(separator); result.Append(item.Accept(this)); separator = ","; } result.Append(")"); return result; } #region Visits shared by multiple nodes /// /// Aggregates are not visited by the normal visitor walk. /// /// The aggreate go be translated /// The translated aggregate argument /// SqlBuilder VisitAggregate(DbAggregate aggregate, object aggregateArgument) { var aggregateResult = new SqlBuilder(); if (!(aggregate is DbFunctionAggregate functionAggregate)) { throw new NotSupportedException(); } if (MetadataHelpers.IsCanonicalFunction(functionAggregate.Function) && ( string.Equals(functionAggregate.Function.Name, "StDev", StringComparison.Ordinal) || string.Equals(functionAggregate.Function.Name, "StDevP", StringComparison.Ordinal) || string.Equals(functionAggregate.Function.Name, "Var", StringComparison.Ordinal) || string.Equals(functionAggregate.Function.Name, "VarP", StringComparison.Ordinal))) { throw new NotSupportedException(); } WriteFunctionName(aggregateResult, functionAggregate.Function); aggregateResult.Append("("); if (functionAggregate.Distinct) { aggregateResult.Append("DISTINCT "); } aggregateResult.Append(aggregateArgument); aggregateResult.Append(")"); return aggregateResult; } SqlBuilder VisitBinaryExpression(string op, DbExpression left, DbExpression right) { var result = new SqlBuilder(); if (!(left is DbParameterReferenceExpression && right is DbParameterReferenceExpression)) { _shouldCastParameter = false; } if (IsComplexExpression(left)) { result.Append("("); } result.Append(left.Accept(this)); if (IsComplexExpression(left)) { result.Append(")"); } if (_shouldHandleBoolComparison) { result.Append(op); if (IsComplexExpression(right)) { result.Append("("); } result.Append(right.Accept(this)); if (IsComplexExpression(right)) { result.Append(")"); } } else { _shouldHandleBoolComparison = true; } _shouldCastParameter = true; return result; } /// /// This is called by the relational nodes. It does the following /// /// If the input is not a SqlSelectStatement, it assumes that the input /// is a collection expression, and creates a new SqlSelectStatement /// /// /// /// /// /// /// A and the main fromSymbol /// for this select statement. SqlSelectStatement VisitInputExpression(DbExpression inputExpression, string inputVarName, TypeUsage inputVarType, out Symbol fromSymbol) { SqlSelectStatement result; var sqlFragment = inputExpression.Accept(this); result = sqlFragment as SqlSelectStatement; if (result == null) { result = new SqlSelectStatement(); WrapNonQueryExtent(result, sqlFragment, inputExpression.ExpressionKind); } if (result.FromExtents.Count == 0) { // input was an extent fromSymbol = new Symbol(inputVarName, inputVarType); } else if (result.FromExtents.Count == 1) { // input was Filter/GroupBy/Project/OrderBy // we are likely to reuse this statement. fromSymbol = result.FromExtents[0]; } else { // input was a join. // we are reusing the select statement produced by a Join node // we need to remove the original extents, and replace them with a // new extent with just the Join symbol. var joinSymbol = new JoinSymbol(inputVarName, inputVarType, result.FromExtents); joinSymbol.FlattenedExtentList = result.AllJoinExtents; fromSymbol = joinSymbol; result.FromExtents.Clear(); result.FromExtents.Add(fromSymbol); } return result; } /// /// /// /// /// Was the parent a DbNotExpression? /// SqlBuilder VisitIsEmptyExpression(DbIsEmptyExpression e, bool negate) { var result = new SqlBuilder(); if (!negate) { result.Append(" NOT"); } result.Append(" EXISTS ("); result.Append(VisitExpressionEnsureSqlStatement(e.Argument)); result.AppendLine(); result.Append(")"); return result; } /// /// Translate a NewInstance(Element(X)) expression into /// "select top(1) * from X" /// /// /// private ISqlFragment VisitCollectionConstructor(DbNewInstanceExpression e) { Debug.Assert(e.Arguments.Count <= 1); if (e.Arguments.Count == 1 && e.Arguments[0].ExpressionKind == DbExpressionKind.Element) { var elementExpr = e.Arguments[0] as DbElementExpression; var result = VisitExpressionEnsureSqlStatement(elementExpr.Argument); if (!IsCompatible(result, DbExpressionKind.Element)) { var inputType = MetadataHelpers.GetElementTypeUsage(elementExpr.Argument.ResultType); result = CreateNewSelectStatement(result, "element", inputType, out var fromSymbol); AddFromSymbol(result, "element", fromSymbol, false); } result.First = new FirstClause(1); return result; } // Otherwise simply build this out as a union-all ladder var collectionType = MetadataHelpers.GetEdmType(e.ResultType); Debug.Assert(collectionType != null); var isScalarElement = MetadataHelpers.IsPrimitiveType(collectionType.TypeUsage); var resultSql = new SqlBuilder(); var separator = string.Empty; // handle empty table if (e.Arguments.Count == 0) { Debug.Assert(isScalarElement); resultSql.Append(" SELECT CAST(NULL AS "); resultSql.Append(GetSqlPrimitiveType(collectionType.TypeUsage)); resultSql.Append(") AS X FROM (SELECT 1 FROM RDB$DATABASE) WHERE 1=0"); } foreach (var arg in e.Arguments) { resultSql.Append(separator); resultSql.Append(" SELECT "); resultSql.Append(arg.Accept(this)); // For scalar elements, no alias is appended yet. Add this. if (isScalarElement) { resultSql.Append(" AS X FROM RDB$DATABASE"); } separator = " UNION ALL "; } return resultSql; } /// /// /// /// /// Was the parent a DbNotExpression? /// SqlBuilder VisitIsNullExpression(DbIsNullExpression e, bool negate) { var result = new SqlBuilder(); _shouldCastParameter = false; result.Append(e.Argument.Accept(this)); if (!negate) { result.Append(" IS NULL"); } else { result.Append(" IS NOT NULL"); } _shouldCastParameter = true; return result; } /// /// This handles the processing of join expressions. /// The extents on a left spine are flattened, while joins /// not on the left spine give rise to new nested sub queries. /// /// Joins work differently from the rest of the visiting, in that /// the parent (i.e. the join node) creates the SqlSelectStatement /// for the children to use. /// /// The "parameter" IsInJoinContext indicates whether a child extent should /// add its stuff to the existing SqlSelectStatement, or create a new SqlSelectStatement /// By passing true, we ask the children to add themselves to the parent join, /// by passing false, we ask the children to create new Select statements for /// themselves. /// /// This method is called from and /// . /// /// /// /// /// /// A ISqlFragment VisitJoinExpression(IList inputs, DbExpressionKind joinKind, string joinString, DbExpression joinCondition) { SqlSelectStatement result; // If the parent is not a join( or says that it is not), // we should create a new SqlSelectStatement. // otherwise, we add our child extents to the parent's FROM clause. if (!IsParentAJoin) { result = new SqlSelectStatement(); result.AllJoinExtents = new List(); _selectStatementStack.Push(result); } else { result = CurrentSelectStatement; } // Process each of the inputs, and then the joinCondition if it exists. // It would be nice if we could call VisitInputExpression - that would // avoid some code duplication // but the Join postprocessing is messy and prevents this reuse. _symbolTable.EnterScope(); var separator = string.Empty; var isLeftMostInput = true; var inputCount = inputs.Count; for (var idx = 0; idx < inputCount; idx++) { var input = inputs[idx]; if (separator != string.Empty) { result.From.AppendLine(); } result.From.Append(separator + " "); // Change this if other conditions are required // to force the child to produce a nested SqlStatement. var needsJoinContext = (input.Expression.ExpressionKind == DbExpressionKind.Scan) || (isLeftMostInput && (IsJoinExpression(input.Expression) || IsApplyExpression(input.Expression))) ; _isParentAJoinStack.Push(needsJoinContext ? true : false); // if the child reuses our select statement, it will append the from // symbols to our FromExtents list. So, we need to remember the // start of the child's entries. var fromSymbolStart = result.FromExtents.Count; var fromExtentFragment = input.Expression.Accept(this); _isParentAJoinStack.Pop(); ProcessJoinInputResult(fromExtentFragment, result, input, fromSymbolStart); separator = joinString; isLeftMostInput = false; } // Visit the on clause/join condition. switch (joinKind) { case DbExpressionKind.FullOuterJoin: case DbExpressionKind.InnerJoin: case DbExpressionKind.LeftOuterJoin: result.From.Append(" ON "); _isParentAJoinStack.Push(false); result.From.Append(joinCondition.Accept(this)); _isParentAJoinStack.Pop(); break; } _symbolTable.ExitScope(); if (!IsParentAJoin) { _selectStatementStack.Pop(); } return result; } /// /// This is called from . /// /// This is responsible for maintaining the symbol table after visiting /// a child of a join expression. /// /// The child's sql statement may need to be completed. /// /// The child's result could be one of /// /// The same as the parent's - this is treated specially. /// A sql select statement, which may need to be completed /// An extent - just copy it to the from clause /// Anything else (from a collection-valued expression) - /// unnest and copy it. /// /// /// If the input was a Join, we need to create a new join symbol, /// otherwise, we create a normal symbol. /// /// We then call AddFromSymbol to add the AS clause, and update the symbol table. /// /// /// /// If the child's result was the same as the parent's, we have to clean up /// the list of symbols in the FromExtents list, since this contains symbols from /// the children of both the parent and the child. /// The happens when the child visited is a Join, and is the leftmost child of /// the parent. /// /// /// /// /// void ProcessJoinInputResult(ISqlFragment fromExtentFragment, SqlSelectStatement result, DbExpressionBinding input, int fromSymbolStart) { Symbol fromSymbol = null; var varName = GetShortenedName(input.VariableName); if (result != fromExtentFragment) { // The child has its own select statement, and is not reusing // our select statement. // This should look a lot like VisitInputExpression(). if (fromExtentFragment is SqlSelectStatement sqlSelectStatement) { if (sqlSelectStatement.Select.IsEmpty) { var columns = AddDefaultColumns(sqlSelectStatement); if (IsJoinExpression(input.Expression) || IsApplyExpression(input.Expression)) { var extents = sqlSelectStatement.FromExtents; var newJoinSymbol = new JoinSymbol(varName, input.VariableType, extents); newJoinSymbol.IsNestedJoin = true; newJoinSymbol.ColumnList = columns; fromSymbol = newJoinSymbol; } else { // this is a copy of the code in CreateNewSelectStatement. // if the oldStatement has a join as its input, ... // clone the join symbol, so that we "reuse" the // join symbol. Normally, we create a new symbol - see the next block // of code. if (sqlSelectStatement.FromExtents[0] is JoinSymbol oldJoinSymbol) { // Note: sqlSelectStatement.FromExtents will not do, since it might // just be an alias of joinSymbol, and we want an actual JoinSymbol. var newJoinSymbol = new JoinSymbol(varName, input.VariableType, oldJoinSymbol.ExtentList); // This indicates that the sqlSelectStatement is a blocking scope // i.e. it hides/renames extent columns newJoinSymbol.IsNestedJoin = true; newJoinSymbol.ColumnList = columns; newJoinSymbol.FlattenedExtentList = oldJoinSymbol.FlattenedExtentList; fromSymbol = newJoinSymbol; } } } result.From.Append(" ("); result.From.Append(sqlSelectStatement); result.From.Append(" )"); } else if (input.Expression is DbScanExpression) { result.From.Append(fromExtentFragment); } else // bracket it { WrapNonQueryExtent(result, fromExtentFragment, input.Expression.ExpressionKind); } if (fromSymbol == null) // i.e. not a join symbol { fromSymbol = new Symbol(varName, input.VariableType); } AddFromSymbol(result, varName, fromSymbol); result.AllJoinExtents.Add(fromSymbol); } else // result == fromExtentFragment. The child extents have been merged into the parent's. { // we are adding extents to the current sql statement via flattening. // We are replacing the child's extents with a single Join symbol. // The child's extents are all those following the index fromSymbolStart. // var extents = new List(); // We cannot call extents.AddRange, since the is no simple way to // get the range of symbols fromSymbolStart..result.FromExtents.Count // from result.FromExtents. // We copy these symbols to create the JoinSymbol later. for (var i = fromSymbolStart; i < result.FromExtents.Count; ++i) { extents.Add(result.FromExtents[i]); } result.FromExtents.RemoveRange(fromSymbolStart, result.FromExtents.Count - fromSymbolStart); fromSymbol = new JoinSymbol(varName, input.VariableType, extents); result.FromExtents.Add(fromSymbol); // this Join Symbol does not have its own select statement, so we // do not set IsNestedJoin // We do not call AddFromSymbol(), since we do not want to add // "AS alias" to the FROM clause- it has been done when the extent was added earlier. _symbolTable.Add(varName, fromSymbol); } } /// /// We assume that this is only called as a child of a Project. /// /// This replaces , since /// we do not allow DbNewInstanceExpression as a child of any node other than /// DbProjectExpression. /// /// We write out the translation of each of the columns in the record. /// /// /// A ISqlFragment VisitNewInstanceExpression(DbNewInstanceExpression e) { var result = new SqlBuilder(); if (e.ResultType.EdmType is RowType rowType) { var members = rowType.Properties; var separator = string.Empty; for (var i = 0; i < e.Arguments.Count; ++i) { var argument = e.Arguments[i]; if (MetadataHelpers.IsRowType(argument.ResultType)) { // We do not support nested records or other complex objects. throw new NotSupportedException(); } var member = members[i]; result.Append(separator); result.AppendLine(); result.Append(argument.Accept(this)); result.Append(" AS "); result.Append(QuoteIdentifier(member.Name)); separator = ", "; } } else { // // Types other then RowType (such as UDTs for instance) are not supported. // throw new NotSupportedException(); } return result; } ISqlFragment VisitSetOpExpression(DbExpression left, DbExpression right, string separator) { var leftSelectStatement = VisitExpressionEnsureSqlStatement(left); var rightSelectStatement = VisitExpressionEnsureSqlStatement(right); var setStatement = new SqlBuilder(); setStatement.Append(leftSelectStatement); setStatement.AppendLine(); setStatement.Append(separator); // e.g. UNION ALL setStatement.AppendLine(); setStatement.Append(rightSelectStatement); return setStatement; } #endregion #region Function Handling Helpers /// /// Determines whether the given function is a built-in function that requires special handling /// /// /// private bool IsSpecialBuiltInFunction(DbFunctionExpression e) { return IsBuiltInFunction(e.Function) && _builtInFunctionHandlers.ContainsKey(e.Function.Name); } /// /// Determines whether the given function is a canonical function that requires special handling /// /// /// private bool IsSpecialCanonicalFunction(DbFunctionExpression e) { return MetadataHelpers.IsCanonicalFunction(e.Function) && _canonicalFunctionHandlers.ContainsKey(e.Function.Name); } /// /// Default handling for functions /// Translates them to FunctionName(arg1, arg2, ..., argn) /// /// /// private ISqlFragment HandleFunctionDefault(DbFunctionExpression e) { var result = new SqlBuilder(); WriteFunctionName(result, e.Function); HandleFunctionArgumentsDefault(e, result); return result; } /// /// Default handling for functions with a given name. /// Translates them to functionName(arg1, arg2, ..., argn) /// /// /// /// private ISqlFragment HandleFunctionDefaultGivenName(DbFunctionExpression e, string functionName) { var result = new SqlBuilder(); result.Append(functionName); HandleFunctionArgumentsDefault(e, result); return result; } /// /// Default handling on function arguments /// Appends the list of arguments to the given result /// If the function is niladic it does not append anything, /// otherwise it appends (arg1, arg2, ..., argn) /// /// /// private void HandleFunctionArgumentsDefault(DbFunctionExpression e, SqlBuilder result) { var isNiladicFunction = MetadataHelpers.TryGetValueForMetadataProperty(e.Function, "NiladicFunctionAttribute"); if (isNiladicFunction && e.Arguments.Count > 0) { throw new InvalidOperationException("Niladic functions cannot have parameters"); } if (!isNiladicFunction) { result.Append("("); var separator = string.Empty; foreach (var arg in e.Arguments) { result.Append(separator); result.Append(arg.Accept(this)); separator = ", "; } result.Append(")"); } } /// /// Handler for special built in functions /// /// /// private ISqlFragment HandleSpecialBuiltInFunction(DbFunctionExpression e) { return HandleSpecialFunction(_builtInFunctionHandlers, e); } /// /// Handler for special canonical functions /// /// /// private ISqlFragment HandleSpecialCanonicalFunction(DbFunctionExpression e) { return HandleSpecialFunction(_canonicalFunctionHandlers, e); } /// /// Dispatches the special function processing to the appropriate handler /// /// /// /// private ISqlFragment HandleSpecialFunction(Dictionary handlers, DbFunctionExpression e) { if (!handlers.ContainsKey(e.Function.Name)) throw new InvalidOperationException("Special handling should be called only for functions in the list of special functions"); return handlers[e.Function.Name](this, e); } /// /// Handles functions that are translated into SQL operators. /// The given function should have one or two arguments. /// Functions with one arguemnt are translated into /// op arg /// Functions with two arguments are translated into /// arg0 op arg1 /// Also, the arguments can be optionaly enclosed in parethesis /// /// /// Whether the arguments should be enclosed in parethesis /// private ISqlFragment HandleSpecialFunctionToOperator(DbFunctionExpression e, bool parenthesiseArguments) { var result = new SqlBuilder(); Debug.Assert(e.Arguments.Count > 0 && e.Arguments.Count <= 2, "There should be 1 or 2 arguments for operator"); if (e.Arguments.Count > 1) { if (parenthesiseArguments) { result.Append("("); } result.Append(e.Arguments[0].Accept(this)); if (parenthesiseArguments) { result.Append(")"); } } result.Append(" "); Debug.Assert(_functionNameToOperatorDictionary.ContainsKey(e.Function.Name), "The function can not be mapped to an operator"); result.Append(_functionNameToOperatorDictionary[e.Function.Name]); result.Append(" "); if (parenthesiseArguments) { result.Append("("); } result.Append(e.Arguments[e.Arguments.Count - 1].Accept(this)); if (parenthesiseArguments) { result.Append(")"); } return result; } #region String Canonical Functions private static ISqlFragment HandleCanonicalConcatFunction(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleSpecialFunctionToOperator(e, false); } private static ISqlFragment HandleCanonicalContainsFunction(SqlGenerator sqlgen, DbFunctionExpression e) { sqlgen._shouldHandleBoolComparison = false; return sqlgen.HandleSpecialFunctionToOperator(e, false); } private static ISqlFragment HandleCanonicalEndsWithFunction(SqlGenerator sqlgen, DbFunctionExpression e) { sqlgen._shouldHandleBoolComparison = false; var result = new SqlBuilder(); result.Append("REVERSE("); result.Append(e.Arguments[0].Accept(sqlgen)); result.Append(") STARTING WITH REVERSE("); result.Append(e.Arguments[1].Accept(sqlgen)); result.Append(")"); return result; } private static ISqlFragment HandleCanonicalFunctionIndexOf(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "POSITION"); } private static ISqlFragment HandleCanonicalFunctionLength(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "CHAR_LENGTH"); } private static ISqlFragment HandleCanonicalFunctionTrim(SqlGenerator sqlgen, DbFunctionExpression e) { return TrimHelper(sqlgen, e, "BOTH"); } private static ISqlFragment HandleCanonicalFunctionLTrim(SqlGenerator sqlgen, DbFunctionExpression e) { return TrimHelper(sqlgen, e, "LEADING"); } private static ISqlFragment HandleCanonicalFunctionRTrim(SqlGenerator sqlgen, DbFunctionExpression e) { return TrimHelper(sqlgen, e, "TRAILING"); } /// /// TRIM ( [ [ ] [ ] FROM ] ) /// ::= LEADING | TRAILING | BOTH /// private static ISqlFragment TrimHelper(SqlGenerator sqlgen, DbFunctionExpression e, string what) { var result = new SqlBuilder(); result.Append("TRIM("); result.Append(what); result.Append(" FROM "); Debug.Assert(e.Arguments.Count == 1, "Trim should have one argument"); result.Append(e.Arguments[0].Accept(sqlgen)); result.Append(")"); return result; } private static ISqlFragment HandleCanonicalFunctionLeft(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "LEFT"); } private static ISqlFragment HandleCanonicalFunctionRight(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "RIGHT"); } private static ISqlFragment HandleCanonicalFunctionReverse(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "REVERSE"); } private static ISqlFragment HandleCanonicalFunctionReplace(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "REPLACE"); } private static ISqlFragment HandleCanonicalStartsWithFunction(SqlGenerator sqlgen, DbFunctionExpression e) { sqlgen._shouldHandleBoolComparison = false; return sqlgen.HandleSpecialFunctionToOperator(e, false); } private static ISqlFragment HandleCanonicalFunctionSubstring(SqlGenerator sqlgen, DbFunctionExpression e) { var result = new SqlBuilder(); result.Append("SUBSTRING("); Debug.Assert(e.Arguments.Count == 3, "Substring should have three arguments"); result.Append(e.Arguments[0].Accept(sqlgen)); result.Append(" FROM "); result.Append(e.Arguments[1].Accept(sqlgen)); result.Append(" FOR "); result.Append(e.Arguments[2].Accept(sqlgen)); result.Append(")"); return result; } private static ISqlFragment HandleCanonicalFunctionToLower(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "LOWER"); } private static ISqlFragment HandleCanonicalFunctionToUpper(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "UPPER"); } #endregion #region Bitwise Canonical Functions private static ISqlFragment HandleCanonicalFunctionBitwiseAnd(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "BIN_AND"); } private static ISqlFragment HandleCanonicalFunctionBitwiseNot(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "BIN_NOT"); } private static ISqlFragment HandleCanonicalFunctionBitwiseOr(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "BIN_OR"); } private static ISqlFragment HandleCanonicalFunctionBitwiseXor(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "BIN_XOR"); } #endregion #region Date and Time Canonical Functions private static ISqlFragment HandleCanonicalFunctionCurrentUtcDateTime(SqlGenerator sqlgen, DbFunctionExpression e) { throw new NotSupportedException("CurrentUtcDateTime is not supported by Firebird."); } private static ISqlFragment HandleCanonicalFunctionCurrentDateTimeOffset(SqlGenerator sqlgen, DbFunctionExpression e) { throw new NotSupportedException("CurrentDateTimeOffset is not supported by Firebird."); } private static ISqlFragment HandleCanonicalFunctionGetTotalOffsetMinutes(SqlGenerator sqlgen, DbFunctionExpression e) { throw new NotSupportedException("GetTotalOffsetMinutes is not supported by Firebird."); } private static ISqlFragment HandleCanonicalFunctionCurrentDateTime(SqlGenerator sqlgen, DbFunctionExpression e) { var result = new SqlBuilder(); result.Append("CURRENT_TIMESTAMP"); return result; } /// /// Handler for canonical funcitons for extracting date parts. /// For example: /// Year(date) -> EXTRACT(YEAR from date) /// private static ISqlFragment HandleCanonicalFunctionExtract(SqlGenerator sqlgen, DbFunctionExpression e, string extractPart) { if (extractPart == null) throw new NotSupportedException(); var result = new SqlBuilder(); result.Append("EXTRACT("); result.Append(extractPart); result.Append(" FROM "); Debug.Assert(e.Arguments.Count == 1, "Canonical datepart functions should have exactly one argument"); result.Append(e.Arguments[0].Accept(sqlgen)); result.Append(")"); return result; } private static ISqlFragment HandleCanonicalFunctionDateTimeAdd(SqlGenerator sqlgen, DbFunctionExpression e, string addPart) { if (addPart == null) throw new NotSupportedException(); var result = new SqlBuilder(); result.Append("DATEADD("); result.Append(addPart); result.Append(", "); Debug.Assert(e.Arguments.Count == 2, "Canonical dateadd functions should have exactly two arguments"); result.Append(e.Arguments[1].Accept(sqlgen)); result.Append(", "); result.Append(e.Arguments[0].Accept(sqlgen)); result.Append(")"); return result; } private static ISqlFragment HandleCanonicalFunctionDateTimeDiff(SqlGenerator sqlgen, DbFunctionExpression e, string diffPart) { if (diffPart == null) throw new NotSupportedException(); var result = new SqlBuilder(); result.Append("DATEDIFF("); result.Append(diffPart); result.Append(", "); Debug.Assert(e.Arguments.Count == 2, "Canonical datediff functions should have exactly two arguments"); result.Append(e.Arguments[1].Accept(sqlgen)); result.Append(", "); result.Append(e.Arguments[0].Accept(sqlgen)); result.Append(")"); return result; } /// /// CCYY-MM-DD HH:NN:SS.nnnn /// CreateDateTime(year, month, day, hour, minute, second) /// private static ISqlFragment HandleCanonicalFunctionCreateDateTime(SqlGenerator sqlgen, DbFunctionExpression e) { var result = new SqlBuilder(); result.Append("CAST('"); result.Append((e.Arguments[0].ExpressionKind == DbExpressionKind.Constant) ? e.Arguments[0].Accept(sqlgen) : (object)"0001"); // year result.Append("-"); result.Append((e.Arguments[1].ExpressionKind == DbExpressionKind.Constant) ? e.Arguments[1].Accept(sqlgen) : (object)"01"); // month result.Append("-"); result.Append((e.Arguments[2].ExpressionKind == DbExpressionKind.Constant) ? e.Arguments[2].Accept(sqlgen) : (object)"01"); // day result.Append(" "); result.Append((e.Arguments[3].ExpressionKind == DbExpressionKind.Constant) ? e.Arguments[3].Accept(sqlgen) : (object)"00"); // hour result.Append(":"); result.Append((e.Arguments[4].ExpressionKind == DbExpressionKind.Constant) ? e.Arguments[4].Accept(sqlgen) : (object)"00"); // minute result.Append(":"); result.Append("00"); // second is typeof(double?), would result in CAST SqlFragment result.Append("' AS TIMESTAMP)"); // in case a date part is not constant, generate additional DATEADD fragments if (e.Arguments[0].ExpressionKind != DbExpressionKind.Constant && e.Arguments[0].ExpressionKind != DbExpressionKind.Null) result = HandleDateAdd("YEAR", e.Arguments[0].Accept(sqlgen), result); if (e.Arguments[1].ExpressionKind != DbExpressionKind.Constant && e.Arguments[1].ExpressionKind != DbExpressionKind.Null) result = HandleDateAdd("MONTH", e.Arguments[1].Accept(sqlgen), result); if (e.Arguments[2].ExpressionKind != DbExpressionKind.Constant && e.Arguments[2].ExpressionKind != DbExpressionKind.Null) result = HandleDateAdd("DAY", e.Arguments[2].Accept(sqlgen), result); if (e.Arguments[3].ExpressionKind != DbExpressionKind.Constant && e.Arguments[3].ExpressionKind != DbExpressionKind.Null) result = HandleDateAdd("HOUR", e.Arguments[3].Accept(sqlgen), result); if (e.Arguments[4].ExpressionKind != DbExpressionKind.Constant && e.Arguments[4].ExpressionKind != DbExpressionKind.Null) result = HandleDateAdd("MINUTE", e.Arguments[4].Accept(sqlgen), result); if ((e.Arguments[5].ExpressionKind != DbExpressionKind.Constant || (((DbConstantExpression)e.Arguments[5]).Value as double?) != 0) && e.Arguments[5].ExpressionKind != DbExpressionKind.Null) result = HandleDateAdd("SECOND", e.Arguments[5].Accept(sqlgen), result); // in case a default value was used for the year/month/day part, remove it afterwards if (e.Arguments[0].ExpressionKind != DbExpressionKind.Constant) result = HandleDateAdd("YEAR", DbExpression.FromInt32(-1).Accept(sqlgen), result); if (e.Arguments[1].ExpressionKind != DbExpressionKind.Constant) result = HandleDateAdd("MONTH", DbExpression.FromInt32(-1).Accept(sqlgen), result); if (e.Arguments[2].ExpressionKind != DbExpressionKind.Constant) result = HandleDateAdd("DAY", DbExpression.FromInt32(-1).Accept(sqlgen), result); return result; } private static SqlBuilder HandleDateAdd(string datePart, ISqlFragment value, ISqlFragment dateTime) { SqlBuilder result = new SqlBuilder(); result.Append("DATEADD("); result.Append(datePart); result.Append(", "); result.Append(value); result.Append(", "); result.Append(dateTime); result.Append(")"); return result; } private static ISqlFragment HandleCanonicalFunctionCreateDateTimeOffset(SqlGenerator sqlgen, DbFunctionExpression e) { throw new NotSupportedException("CreateDateTimeOffset is not supported by Firebird."); } /// /// HH:NN:SS.nnnn /// CreateTime(hour, minute, second) /// private static ISqlFragment HandleCanonicalFunctionCreateTime(SqlGenerator sqlgen, DbFunctionExpression e) { var result = new SqlBuilder(); result.Append("CAST('"); result.Append(e.Arguments[0].Accept(sqlgen)); result.Append(":"); result.Append(e.Arguments[1].Accept(sqlgen)); result.Append(":"); result.Append(e.Arguments[2].Accept(sqlgen)); result.Append("' AS TIME)"); return result; } private static ISqlFragment HandleCanonicalFunctionTruncateTime(SqlGenerator sqlgen, DbFunctionExpression e) { var result = new SqlBuilder(); result.Append("CAST(CAST("); result.Append(e.Arguments[0].Accept(sqlgen)); result.Append(" as DATE) as TIMESTAMP)"); return result; } #endregion #region Other Canonical Functions private static ISqlFragment HandleCanonicalFunctionNewGuid(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "GEN_UUID"); } #endregion #region Math Canonical Functions private static ISqlFragment HandleCanonicalFunctionAbs(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "ABS"); } private static ISqlFragment HandleCanonicalFunctionCeiling(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "CEILING"); } private static ISqlFragment HandleCanonicalFunctionFloor(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "FLOOR"); } private static ISqlFragment HandleCanonicalFunctionPower(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "POWER"); } private static ISqlFragment HandleCanonicalFunctionRound(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "ROUND"); } private static ISqlFragment HandleCanonicalFunctionTruncate(SqlGenerator sqlgen, DbFunctionExpression e) { return sqlgen.HandleFunctionDefaultGivenName(e, "TRUNC"); } #endregion #endregion #endregion #region Helper methods for the DbExpressionVisitor /// /// /// Add the column names from the referenced extent/join to the /// select statement. /// /// If the symbol is a JoinSymbol, we recursively visit all the extents, /// halting at real extents and JoinSymbols that have an associated SqlSelectStatement. /// /// The column names for a real extent can be derived from its type. /// The column names for a Join Select statement can be got from the /// list of columns that was created when the Join's select statement /// was created. /// /// We do the following for each column. /// /// Add the SQL string for each column to the SELECT clause /// Add the column to the list of columns - so that it can /// become part of the "type" of a JoinSymbol /// Check if the column name collides with a previous column added /// to the same select statement. Flag both the columns for renaming if true. /// Add the column to a name lookup dictionary for collision detection. /// /// /// The select statement that started off as SELECT * /// The symbol containing the type information for /// the columns to be added. /// Columns that have been added to the Select statement. /// This is created in . /// A dictionary of the columns above. /// Comma or nothing, depending on whether the SELECT /// clause is empty. void AddColumns(SqlSelectStatement selectStatement, Symbol symbol, List columnList, Dictionary columnDictionary, ref string separator) { if (symbol is JoinSymbol joinSymbol) { if (!joinSymbol.IsNestedJoin) { // Recurse if the join symbol is a collection of flattened extents foreach (var sym in joinSymbol.ExtentList) { // if sym is ScalarType means we are at base case in the // recursion and there are not columns to add, just skip if (MetadataHelpers.IsPrimitiveType(sym.Type)) { continue; } AddColumns(selectStatement, sym, columnList, columnDictionary, ref separator); } } else { foreach (var joinColumn in joinSymbol.ColumnList) { // we write tableName.columnName // rather than tableName.columnName as alias // since the column name is unique (by the way we generate new column names) // // We use the symbols for both the table and the column, // since they are subject to renaming. selectStatement.Select.Append(separator); selectStatement.Select.Append(symbol); selectStatement.Select.Append("."); selectStatement.Select.Append(joinColumn); // check for name collisions. If there is, // flag both the colliding symbols. if (columnDictionary.ContainsKey(joinColumn.Name)) { columnDictionary[joinColumn.Name].NeedsRenaming = true; // the original symbol joinColumn.NeedsRenaming = true; // the current symbol. } else { columnDictionary[joinColumn.Name] = joinColumn; } columnList.Add(joinColumn); separator = ", "; } } } else { // This is a non-join extent/select statement, and the CQT type has // the relevant column information. // The type could be a record type(e.g. Project(...), // or an entity type ( e.g. EntityExpression(...) // so, we check whether it is a structuralType. // Consider an expression of the form J(a, b=P(E)) // The inner P(E) would have been translated to a SQL statement // We should not use the raw names from the type, but the equivalent // symbols (they are present in symbol.Columns) if they exist. // // We add the new columns to the symbol's columns if they do // not already exist. // foreach (var property in MetadataHelpers.GetProperties(symbol.Type)) { var recordMemberName = property.Name; // Since all renaming happens in the second phase // we lose nothing by setting the next column name index to 0 // many times. _allColumnNames[recordMemberName] = 0; // Create a new symbol/reuse existing symbol for the column if (!symbol.Columns.TryGetValue(recordMemberName, out var columnSymbol)) { // we do not care about the types of columns, so we pass null // when construction the symbol. columnSymbol = new Symbol(recordMemberName, null); symbol.Columns.Add(recordMemberName, columnSymbol); } selectStatement.Select.Append(separator); selectStatement.Select.Append(symbol); selectStatement.Select.Append("."); // We use the actual name before the "AS", the new name goes // after the AS. selectStatement.Select.Append(QuoteIdentifier(recordMemberName)); selectStatement.Select.Append(" AS "); selectStatement.Select.Append(columnSymbol); // Check for column name collisions. if (columnDictionary.ContainsKey(recordMemberName)) { columnDictionary[recordMemberName].NeedsRenaming = true; columnSymbol.NeedsRenaming = true; } else { columnDictionary[recordMemberName] = symbol.Columns[recordMemberName]; } columnList.Add(columnSymbol); separator = ", "; } } } /// /// Expands Select * to "select the_list_of_columns" /// If the columns are taken from an extent, they are written as /// {original_column_name AS Symbol(original_column)} to allow renaming. /// /// If the columns are taken from a Join, they are written as just /// {original_column_name}, since there cannot be a name collision. /// /// We concatenate the columns from each of the inputs to the select statement. /// Since the inputs may be joins that are flattened, we need to recurse. /// The inputs are inferred from the symbols in FromExtents. /// /// /// List AddDefaultColumns(SqlSelectStatement selectStatement) { // This is the list of columns added in this select statement // This forms the "type" of the Select statement, if it has to // be expanded in another SELECT * var columnList = new List(); // A lookup for the previous set of columns to aid column name // collision detection. var columnDictionary = new Dictionary(StringComparer.OrdinalIgnoreCase); var separator = string.Empty; // The Select should usually be empty before we are called, // but we do not mind if it is not. if (!selectStatement.Select.IsEmpty) { separator = ", "; } foreach (var symbol in selectStatement.FromExtents) { AddColumns(selectStatement, symbol, columnList, columnDictionary, ref separator); } return columnList; } /// /// /// /// /// /// void AddFromSymbol(SqlSelectStatement selectStatement, string inputVarName, Symbol fromSymbol) { AddFromSymbol(selectStatement, inputVarName, fromSymbol, true); } /// /// This method is called after the input to a relational node is visited. /// and /// There are 2 scenarios /// /// The fromSymbol is new i.e. the select statement has just been /// created, or a join extent has been added. /// The fromSymbol is old i.e. we are reusing a select statement. /// /// /// If we are not reusing the select statement, we have to complete the /// FROM clause with the alias /// /// -- if the input was an extent /// FROM = [SchemaName].[TableName] /// -- if the input was a Project /// FROM = (SELECT ... FROM ... WHERE ...) /// /// /// These become /// /// -- if the input was an extent /// FROM = [SchemaName].[TableName] AS alias /// -- if the input was a Project /// FROM = (SELECT ... FROM ... WHERE ...) AS alias /// /// and look like valid FROM clauses. /// /// Finally, we have to add the alias to the global list of aliases used, /// and also to the current symbol table. /// /// /// The alias to be used. /// /// void AddFromSymbol(SqlSelectStatement selectStatement, string inputVarName, Symbol fromSymbol, bool addToSymbolTable) { // the first check is true if this is a new statement // the second check is true if we are in a join - we do not // check if we are in a join context. // We do not want to add "AS alias" if it has been done already // e.g. when we are reusing the Sql statement. if (selectStatement.FromExtents.Count == 0 || fromSymbol != selectStatement.FromExtents[0]) { selectStatement.FromExtents.Add(fromSymbol); selectStatement.From.Append(" AS "); selectStatement.From.Append(fromSymbol); // We have this inside the if statement, since // we only want to add extents that are actually used. _allExtentNames[fromSymbol.Name] = 0; } if (addToSymbolTable) { _symbolTable.Add(inputVarName, fromSymbol); } } /// /// Translates a list of SortClauses. /// Used in the translation of OrderBy /// /// The SqlBuilder to which the sort keys should be appended /// void AddSortKeys(SqlBuilder orderByClause, IList sortKeys) { var separator = string.Empty; foreach (var sortClause in sortKeys) { orderByClause.Append(separator); orderByClause.Append(sortClause.Expression.Accept(this)); Debug.Assert(sortClause.Collation != null); if (!string.IsNullOrEmpty(sortClause.Collation)) { orderByClause.Append(" COLLATE "); orderByClause.Append(sortClause.Collation); } orderByClause.Append(sortClause.Ascending ? " ASC" : " DESC"); separator = ", "; } } /// /// /// /// /// /// /// /// A new select statement, with the old one as the from clause. SqlSelectStatement CreateNewSelectStatement(SqlSelectStatement oldStatement, string inputVarName, TypeUsage inputVarType, out Symbol fromSymbol) { return CreateNewSelectStatement(oldStatement, inputVarName, inputVarType, true, out fromSymbol); } /// /// This is called after a relational node's input has been visited, and the /// input's sql statement cannot be reused. /// /// When the input's sql statement cannot be reused, we create a new sql /// statement, with the old one as the from clause of the new statement. /// /// The old statement must be completed i.e. if it has an empty select list, /// the list of columns must be projected out. /// /// If the old statement being completed has a join symbol as its from extent, /// the new statement must have a clone of the join symbol as its extent. /// We cannot reuse the old symbol, but the new select statement must behave /// as though it is working over the "join" record. /// /// /// /// /// /// /// A new select statement, with the old one as the from clause. SqlSelectStatement CreateNewSelectStatement(SqlSelectStatement oldStatement, string inputVarName, TypeUsage inputVarType, bool finalizeOldStatement, out Symbol fromSymbol) { fromSymbol = null; // Finalize the old statement if (finalizeOldStatement && oldStatement.Select.IsEmpty) { var columns = AddDefaultColumns(oldStatement); // Thid could not have been called from a join node. Debug.Assert(oldStatement.FromExtents.Count == 1); // if the oldStatement has a join as its input, ... // clone the join symbol, so that we "reuse" the // join symbol. Normally, we create a new symbol - see the next block // of code. if (oldStatement.FromExtents[0] is JoinSymbol oldJoinSymbol) { // Note: oldStatement.FromExtents will not do, since it might // just be an alias of joinSymbol, and we want an actual JoinSymbol. var newJoinSymbol = new JoinSymbol(inputVarName, inputVarType, oldJoinSymbol.ExtentList); // This indicates that the oldStatement is a blocking scope // i.e. it hides/renames extent columns newJoinSymbol.IsNestedJoin = true; newJoinSymbol.ColumnList = columns; newJoinSymbol.FlattenedExtentList = oldJoinSymbol.FlattenedExtentList; fromSymbol = newJoinSymbol; } } if (fromSymbol == null) { // This is just a simple extent/SqlSelectStatement, // and we can get the column list from the type. fromSymbol = new Symbol(inputVarName, inputVarType); } // Observe that the following looks like the body of Visit(ExtentExpression). var selectStatement = new SqlSelectStatement(); selectStatement.From.Append("( "); selectStatement.From.Append(oldStatement); selectStatement.From.AppendLine(); selectStatement.From.Append(") "); return selectStatement; } internal static string FormatBoolean(bool value) { return value ? "CAST(1 AS SMALLINT)" : "CAST(0 AS SMALLINT)"; } internal static string FormatBinary(byte[] value) { return string.Format("x'{0}'", value.ToHexString()); } internal static string FormatString(string value, bool isUnicode, int? explicitLength = null) { var result = new StringBuilder(); result.Append("CAST("); if (isUnicode) { result.Append("_UTF8"); } result.Append("'"); result.Append(value.Replace("'", "''")); result.Append("' AS VARCHAR("); result.Append(explicitLength ?? value.Length); result.Append("))"); return result.ToString(); } internal static string FormatDateTime(DateTime value) { var result = new StringBuilder(); result.Append("CAST('"); result.Append(value.ToString("yyyy-MM-dd HH:mm:ss.ffff", CultureInfo.InvariantCulture)); result.Append("' AS TIMESTAMP)"); return result.ToString(); } internal static string FormatTime(DateTime value) { var result = new StringBuilder(); result.Append("CAST('"); result.Append(value.ToString("HH:mm:ss.ffff", CultureInfo.InvariantCulture)); result.Append("' AS TIME)"); return result.ToString(); } internal static string FormatTime(TimeSpan value) { return FormatTime(DateTime.Today.Add(value)); } internal static string FormatGuid(Guid value) { var result = new StringBuilder(); result.Append("CHAR_TO_UUID('"); result.Append(value.ToString()); result.Append("')"); return result.ToString(); } /// /// Returns the sql primitive/native type name. /// It will include size, precision or scale depending on type information present in the /// type facets /// /// /// internal static string GetSqlPrimitiveType(TypeUsage type) { var primitiveType = MetadataHelpers.GetEdmType(type); var typeName = primitiveType.Name; var isUnicode = true; var isFixedLength = false; var length = 0; byte precision = 0; byte scale = 0; switch (primitiveType.PrimitiveTypeKind) { case PrimitiveTypeKind.Boolean: typeName = "SMALLINT"; break; case PrimitiveTypeKind.Int16: typeName = "SMALLINT"; break; case PrimitiveTypeKind.Int32: typeName = "INT"; break; case PrimitiveTypeKind.Int64: typeName = "BIGINT"; break; case PrimitiveTypeKind.Double: typeName = "DOUBLE PRECISION"; break; case PrimitiveTypeKind.Single: typeName = "FLOAT"; break; case PrimitiveTypeKind.Decimal: precision = MetadataHelpers.GetFacetValueOrDefault(type, MetadataHelpers.PrecisionFacetName, 9); Debug.Assert(precision > 0, "decimal precision must be greater than zero"); scale = MetadataHelpers.GetFacetValueOrDefault(type, MetadataHelpers.ScaleFacetName, 0); Debug.Assert(precision >= scale, "decimalPrecision must be greater or equal to decimalScale"); Debug.Assert(precision <= 18, "decimalPrecision must be less than or equal to 18"); typeName = string.Format("DECIMAL({0},{1})", precision, scale); break; case PrimitiveTypeKind.Binary: typeName = "BLOB SUB_TYPE BINARY"; break; case PrimitiveTypeKind.String: isUnicode = MetadataHelpers.GetFacetValueOrDefault(type, MetadataHelpers.UnicodeFacetName, true); isFixedLength = MetadataHelpers.GetFacetValueOrDefault(type, MetadataHelpers.FixedLengthFacetName, false); length = MetadataHelpers.GetFacetValueOrDefault(type, MetadataHelpers.MaxLengthFacetName, null) ?? (isUnicode ? FbProviderManifest.UnicodeVarcharMaxSize : FbProviderManifest.AsciiVarcharMaxSize); if (isFixedLength) { typeName = (isUnicode ? "CHAR(" : "CHAR(") + length + ")"; } else { if (length > (isUnicode ? FbProviderManifest.UnicodeVarcharMaxSize : FbProviderManifest.AsciiVarcharMaxSize)) { typeName = "BLOB SUB_TYPE TEXT"; } else { typeName = (isUnicode ? "VARCHAR(" : "VARCHAR(") + length + ")"; } } break; case PrimitiveTypeKind.DateTime: precision = MetadataHelpers.GetFacetValueOrDefault(type, MetadataHelpers.PrecisionFacetName, 4); typeName = (precision > 0 ? "TIMESTAMP" : "DATE"); break; case PrimitiveTypeKind.Time: typeName = "TIME"; break; case PrimitiveTypeKind.Guid: typeName = "CHAR(16) CHARACTER SET OCTETS"; break; default: throw new NotSupportedException("Unsupported EdmType: " + primitiveType.PrimitiveTypeKind); } return typeName; } /// /// Handles the expression represending DbLimitExpression.Limit and DbSkipExpression.Count. /// If it is a constant expression, it simply does to string thus avoiding casting it to the specific value /// (which would be done if is called) /// /// /// private ISqlFragment HandleCountExpression(DbExpression e) { ISqlFragment result; if (e.ExpressionKind == DbExpressionKind.Constant) { //For constant expression we should not cast the value, // thus we don't go throught the default DbConstantExpression handling var sqlBuilder = new SqlBuilder(); sqlBuilder.Append(((DbConstantExpression)e).Value.ToString()); result = sqlBuilder; } else { result = e.Accept(this); } return result; } /// /// This is used to determine if a particular expression is an Apply operation. /// This is only the case when the DbExpressionKind is CrossApply or OuterApply. /// /// /// bool IsApplyExpression(DbExpression e) { return (DbExpressionKind.CrossApply == e.ExpressionKind || DbExpressionKind.OuterApply == e.ExpressionKind); } /// /// This is used to determine if a particular expression is a Join operation. /// This is true for DbCrossJoinExpression and DbJoinExpression, the /// latter of which may have one of several different ExpressionKinds. /// /// /// bool IsJoinExpression(DbExpression e) { return (DbExpressionKind.CrossJoin == e.ExpressionKind || DbExpressionKind.FullOuterJoin == e.ExpressionKind || DbExpressionKind.InnerJoin == e.ExpressionKind || DbExpressionKind.LeftOuterJoin == e.ExpressionKind); } /// /// This is used to determine if a calling expression needs to place /// round brackets around the translation of the expression e. /// /// Constants, parameters, properties and internal functions as operators do not require brackets, /// everything else does. /// /// /// true, if the expression needs brackets bool IsComplexExpression(DbExpression e) { switch (e.ExpressionKind) { case DbExpressionKind.Constant: case DbExpressionKind.ParameterReference: case DbExpressionKind.Property: return false; case DbExpressionKind.Function: return (!_functionNameToOperatorDictionary.ContainsKey((e as DbFunctionExpression).Function.Name)); default: return true; } } /// /// Determine if the owner expression can add its unique sql to the input's /// SqlSelectStatement /// /// The SqlSelectStatement of the input to the relational node. /// The kind of the expression node(not the input's) /// bool IsCompatible(SqlSelectStatement result, DbExpressionKind expressionKind) { switch (expressionKind) { case DbExpressionKind.Distinct: return result.First == null // The projection after distinct may not project all // columns used in the Order By && result.OrderBy.IsEmpty; case DbExpressionKind.Filter: return result.Select.IsEmpty && result.Where.IsEmpty && result.GroupBy.IsEmpty && result.First == null; case DbExpressionKind.GroupBy: return result.Select.IsEmpty && result.GroupBy.IsEmpty && result.OrderBy.IsEmpty && result.First == null; case DbExpressionKind.Limit: case DbExpressionKind.Element: return result.First == null; case DbExpressionKind.Project: return result.Select.IsEmpty && result.GroupBy.IsEmpty; case DbExpressionKind.Skip: return result.Select.IsEmpty && result.GroupBy.IsEmpty && result.OrderBy.IsEmpty && !result.IsDistinct; case DbExpressionKind.Sort: return result.Select.IsEmpty && result.GroupBy.IsEmpty && result.OrderBy.IsEmpty; default: Debug.Assert(false); throw new InvalidOperationException(); } } /// /// Decorate with double quotes and escape double quotes inside in Firebird. /// /// /// internal static string QuoteIdentifier(string name) { Debug.Assert(!string.IsNullOrEmpty(name)); // We assume that the names are not quoted to begin with. return "\"" + name.Replace("\"", "\"\"") + "\""; } /// /// Simply calls /// with addDefaultColumns set to true /// /// /// SqlSelectStatement VisitExpressionEnsureSqlStatement(DbExpression e) { return VisitExpressionEnsureSqlStatement(e, true); } /// /// This is called from and nodes which require a /// select statement as an argument e.g. , /// . /// /// SqlGenerator needs its child to have a proper alias if the child is /// just an extent or a join. /// /// The normal relational nodes result in complete valid SQL statements. /// For the rest, we need to treat them as there was a dummy /// /// -- originally {expression} /// -- change that to /// SELECT * /// FROM {expression} as c /// /// /// DbLimitExpression needs to start the statement but not add the default columns /// /// /// /// SqlSelectStatement VisitExpressionEnsureSqlStatement(DbExpression e, bool addDefaultColumns) { Debug.Assert(MetadataHelpers.IsCollectionType(e.ResultType)); SqlSelectStatement result; switch (e.ExpressionKind) { case DbExpressionKind.Project: case DbExpressionKind.Filter: case DbExpressionKind.GroupBy: case DbExpressionKind.Sort: result = e.Accept(this) as SqlSelectStatement; break; default: var inputVarName = "c"; // any name will do - this is my random choice. _symbolTable.EnterScope(); TypeUsage type = null; switch (e.ExpressionKind) { case DbExpressionKind.Scan: case DbExpressionKind.CrossJoin: case DbExpressionKind.FullOuterJoin: case DbExpressionKind.InnerJoin: case DbExpressionKind.LeftOuterJoin: case DbExpressionKind.CrossApply: case DbExpressionKind.OuterApply: type = MetadataHelpers.GetElementTypeUsage(e.ResultType); break; default: Debug.Assert(MetadataHelpers.IsCollectionType(e.ResultType)); type = MetadataHelpers.GetEdmType(e.ResultType).TypeUsage; break; } result = VisitInputExpression(e, inputVarName, type, out var fromSymbol); AddFromSymbol(result, inputVarName, fromSymbol); _symbolTable.ExitScope(); break; } if (addDefaultColumns && result.Select.IsEmpty) { AddDefaultColumns(result); } return result; } /// /// This method is called by and /// /// /// /// /// /// This is passed from /// in the All(...) case. /// SqlSelectStatement VisitFilterExpression(DbExpressionBinding input, DbExpression predicate, bool negatePredicate) { var varName = GetShortenedName(input.VariableName); var result = VisitInputExpression(input.Expression, varName, input.VariableType, out var fromSymbol); // Filter is compatible with OrderBy // but not with Project, another Filter or GroupBy if (!IsCompatible(result, DbExpressionKind.Filter)) { result = CreateNewSelectStatement(result, varName, input.VariableType, out fromSymbol); } _selectStatementStack.Push(result); _symbolTable.EnterScope(); AddFromSymbol(result, varName, fromSymbol); if (negatePredicate) { result.Where.Append("NOT ("); } result.Where.Append(predicate.Accept(this)); if (negatePredicate) { result.Where.Append(")"); } _symbolTable.ExitScope(); _selectStatementStack.Pop(); return result; } /// /// If the sql fragment for an input expression is not a SqlSelect statement /// or other acceptable form (e.g. an extent as a SqlBuilder), we need /// to wrap it in a form acceptable in a FROM clause. These are /// primarily the /// /// The set operation expressions - union all, intersect, except /// TVFs, which are conceptually similar to tables /// /// /// /// /// void WrapNonQueryExtent(SqlSelectStatement result, ISqlFragment sqlFragment, DbExpressionKind expressionKind) { switch (expressionKind) { case DbExpressionKind.Function: // TVF result.From.Append(sqlFragment); break; default: result.From.Append(" ("); result.From.Append(sqlFragment); result.From.Append(")"); break; } } /// /// Is this a builtin function (ie) does it have the builtinAttribute specified? /// /// /// private static bool IsBuiltInFunction(EdmFunction function) { return MetadataHelpers.TryGetValueForMetadataProperty(function, "BuiltInAttribute"); } /// /// /// /// /// void WriteFunctionName(SqlBuilder result, EdmFunction function) { var storeFunctionName = MetadataHelpers.TryGetValueForMetadataProperty(function, "StoreFunctionNameAttribute"); if (string.IsNullOrEmpty(storeFunctionName)) { storeFunctionName = function.Name; } // If the function is a builtin (ie) the BuiltIn attribute has been // specified, then, the function name should not be quoted; additionally, // no namespace should be used. if (IsBuiltInFunction(function)) { if (MetadataHelpers.IsCanonicalFunction(function)) { switch (storeFunctionName) { case "BigCount": result.Append("COUNT"); break; default: result.Append(storeFunctionName.ToUpperInvariant()); break; } } else { result.Append(storeFunctionName); } } else { //result.Append(QuoteIdentifier((string)function.MetadataProperties["Schema"].Value ?? "dbo")); //result.Append("."); result.Append(QuoteIdentifier(storeFunctionName)); } } /// /// Helper method for the Group By visitor /// Returns true if at least one of the aggregates in the given list /// has an argument that is not a /// over /// /// /// static bool NeedsInnerQuery(IList aggregates) { foreach (var aggregate in aggregates) { Debug.Assert(aggregate.Arguments.Count == 1); if (!IsPropertyOverVarRef(aggregate.Arguments[0])) { return true; } } return false; } /// /// Determines whether the given expression is a /// over /// /// /// static bool IsPropertyOverVarRef(DbExpression expression) { if (!(expression is DbPropertyExpression propertyExpression)) { return false; } if (!(propertyExpression.Instance is DbVariableReferenceExpression varRefExpression)) { return false; } return true; } /// /// Shortens the name of variable (tables, etc.). /// /// /// internal string GetShortenedName(string name) { if (!_shortenedNames.TryGetValue(name, out var shortened)) { shortened = BuildName(_shortenedNames.Count); _shortenedNames[name] = shortened; } return shortened; } internal static string BuildName(int index) { const int offset = 'A'; const int length = 'Z' - offset; if (index <= length) { return ((char)(offset + index)).ToString(); } else { return BuildName(index / length) + BuildName(index % length); } } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/SqlSelectStatement.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Collections.Generic; using System.Diagnostics; namespace EntityFramework.Firebird.SqlGen; internal sealed class SqlSelectStatement : ISqlFragment { #region Fields private bool _isDistinct; private List _allJoinExtents; private List _fromExtents; private Dictionary _outerExtents; private FirstClause _first; private SkipClause _skip; private SqlBuilder _select = new SqlBuilder(); private SqlBuilder _from = new SqlBuilder(); private SqlBuilder _where; private SqlBuilder _groupBy; private SqlBuilder _orderBy; //indicates whether it is the top most select statement, // if not Order By should be omitted unless there is a corresponding TOP private bool _isTopMost; #endregion #region Properties public SqlBuilder OrderBy { get { if (null == _orderBy) { _orderBy = new SqlBuilder(); } return _orderBy; } } #endregion #region Internal Properties /// /// Do we need to add a DISTINCT at the beginning of the SELECT /// internal bool IsDistinct { get { return _isDistinct; } set { _isDistinct = value; } } internal List AllJoinExtents { get { return _allJoinExtents; } // We have a setter as well, even though this is a list, // since we use this field only in special cases. set { _allJoinExtents = value; } } internal List FromExtents { get { if (null == _fromExtents) { _fromExtents = new List(); } return _fromExtents; } } internal Dictionary OuterExtents { get { if (null == _outerExtents) { _outerExtents = new Dictionary(); } return _outerExtents; } } internal FirstClause First { get { return _first; } set { Debug.Assert(_first == null, "SqlSelectStatement.Top has already been set"); _first = value; } } internal SkipClause Skip { get { return _skip; } set { Debug.Assert(_skip == null, "SqlSelectStatement.Skip has already been set"); _skip = value; } } internal SqlBuilder Select { get { return _select; } } internal SqlBuilder From { get { return _from; } } internal SqlBuilder Where { get { if (null == _where) { _where = new SqlBuilder(); } return _where; } } internal SqlBuilder GroupBy { get { if (null == _groupBy) { _groupBy = new SqlBuilder(); } return _groupBy; } } internal bool IsTopMost { get { return _isTopMost; } set { _isTopMost = value; } } #endregion #region ISqlFragment Members /// /// Write out a SQL select statement as a string. /// We have to /// /// Check whether the aliases extents we use in this statement have /// to be renamed. /// We first create a list of all the aliases used by the outer extents. /// For each of the FromExtents( or AllJoinExtents if it is non-null), /// rename it if it collides with the previous list. /// /// Write each of the clauses (if it exists) as a string /// /// /// /// public void WriteSql(SqlWriter writer, SqlGenerator sqlGenerator) { #region Check if FROM aliases need to be renamed // Create a list of the aliases used by the outer extents // JoinSymbols have to be treated specially. List outerExtentAliases = null; if ((null != _outerExtents) && (0 < _outerExtents.Count)) { foreach (var outerExtent in _outerExtents.Keys) { if (outerExtent is JoinSymbol joinSymbol) { foreach (var symbol in joinSymbol.FlattenedExtentList) { if (null == outerExtentAliases) { outerExtentAliases = new List(); } outerExtentAliases.Add(symbol.NewName); } } else { if (null == outerExtentAliases) { outerExtentAliases = new List(); } outerExtentAliases.Add(outerExtent.NewName); } } } // An then rename each of the FromExtents we have // If AllJoinExtents is non-null - it has precedence. // The new name is derived from the old name - we append an increasing int. var extentList = AllJoinExtents ?? _fromExtents; if (null != extentList) { foreach (var fromAlias in extentList) { if ((null != outerExtentAliases) && outerExtentAliases.Contains(fromAlias.Name)) { var i = sqlGenerator.AllExtentNames[fromAlias.Name]; string newName; do { ++i; newName = fromAlias.Name + i.ToString(System.Globalization.CultureInfo.InvariantCulture); } while (sqlGenerator.AllExtentNames.ContainsKey(newName)); sqlGenerator.AllExtentNames[fromAlias.Name] = i; fromAlias.NewName = newName; // Add extent to list of known names (although i is always incrementing, "prefix11" can // eventually collide with "prefix1" when it is extended) sqlGenerator.AllExtentNames[newName] = 0; } // Add the current alias to the list, so that the extents // that follow do not collide with me. if (null == outerExtentAliases) { outerExtentAliases = new List(); } outerExtentAliases.Add(fromAlias.NewName); } } #endregion // Increase the indent, so that the Sql statement is nested by one tab. writer.Indent += 1; // ++ can be confusing in this context writer.Write("SELECT "); if (IsDistinct) { writer.Write("DISTINCT "); } if (First != null) { First.WriteSql(writer, sqlGenerator); } if (Skip != null) { Skip.WriteSql(writer, sqlGenerator); } if ((_select == null) || Select.IsEmpty) { Debug.Assert(false); // we have removed all possibilities of SELECT *. writer.Write("*"); } else { Select.WriteSql(writer, sqlGenerator); } writer.WriteLine(); writer.Write("FROM "); From.WriteSql(writer, sqlGenerator); if ((_where != null) && !Where.IsEmpty) { writer.WriteLine(); writer.Write("WHERE "); Where.WriteSql(writer, sqlGenerator); } if ((_groupBy != null) && !GroupBy.IsEmpty) { writer.WriteLine(); writer.Write("GROUP BY "); GroupBy.WriteSql(writer, sqlGenerator); } if ((_orderBy != null) && !OrderBy.IsEmpty && (IsTopMost || First != null || Skip != null)) { writer.WriteLine(); writer.Write("ORDER BY "); OrderBy.WriteSql(writer, sqlGenerator); } --writer.Indent; } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/SqlWriter.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.IO; using System.Text; namespace EntityFramework.Firebird.SqlGen; internal class SqlWriter : StringWriter { #region Fields // We start at -1, since the first select statement will increment it to 0. private int _indent = -1; private bool _atBeginningOfLine = true; #endregion #region Properties /// /// The number of tabs to be added at the beginning of each new line. /// internal int Indent { get { return _indent; } set { _indent = value; } } #endregion #region Constructors /// /// /// /// public SqlWriter(StringBuilder b) : base(b, System.Globalization.CultureInfo.InvariantCulture) { } #endregion #region Methods /// /// Reset atBeginningofLine if we detect the newline string. /// /// Add as many tabs as the value of indent if we are at the /// beginning of a line. /// /// public override void Write(string value) { if (value == Environment.NewLine) { base.WriteLine(); _atBeginningOfLine = true; } else { if (_atBeginningOfLine) { if (_indent > 0) { base.Write(new string('\t', _indent)); } _atBeginningOfLine = false; } base.Write(value); } } public override void WriteLine() { base.WriteLine(); _atBeginningOfLine = true; } public override void WriteLine(string value) { Write(value); WriteLine(); } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/Symbol.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Data.Entity.Core.Metadata.Edm; namespace EntityFramework.Firebird.SqlGen; internal class Symbol : ISqlFragment { #region Fields private Dictionary _columns = new Dictionary(StringComparer.CurrentCultureIgnoreCase); private bool _needsRenaming = false; private bool _isUnnest = false; private string _name; private string _newName; private TypeUsage _type; #endregion #region Public Properties public string Name { get { return _name; } } public string NewName { get { return _newName; } set { _newName = value; } } #endregion #region Internal Properties internal Dictionary Columns { get { return _columns; } } internal bool NeedsRenaming { get { return _needsRenaming; } set { _needsRenaming = value; } } internal bool IsUnnest { get { return _isUnnest; } set { _isUnnest = value; } } internal TypeUsage Type { get { return _type; } set { _type = value; } } #endregion #region Constructors public Symbol(string name, TypeUsage type) { _name = name; _newName = name; Type = type; } #endregion #region ISqlFragment Members /// /// Write this symbol out as a string for sql. This is just /// the new name of the symbol (which could be the same as the old name). /// /// We rename columns here if necessary. /// /// /// public void WriteSql(SqlWriter writer, SqlGenerator sqlGenerator) { if (NeedsRenaming) { string newName; var i = sqlGenerator.AllColumnNames[NewName]; do { ++i; newName = Name + i.ToString(System.Globalization.CultureInfo.InvariantCulture); } while (sqlGenerator.AllColumnNames.ContainsKey(newName)); sqlGenerator.AllColumnNames[NewName] = i; // Prevent it from being renamed repeatedly. NeedsRenaming = false; NewName = newName; // Add this column name to list of known names so that there are no subsequent // collisions sqlGenerator.AllColumnNames[newName] = 0; } writer.Write(SqlGenerator.QuoteIdentifier(NewName)); } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/SymbolPair.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Diagnostics; namespace EntityFramework.Firebird.SqlGen; internal class SymbolPair : ISqlFragment { #region Fields private Symbol _source; private Symbol _column; #endregion #region Properties public Symbol Source { get { return _source; } set { _source = value; } } public Symbol Column { get { return _column; } set { _column = value; } } #endregion #region Constructors public SymbolPair(Symbol source, Symbol column) { Source = source; Column = column; } #endregion #region ISqlFragment Members public void WriteSql(SqlWriter writer, SqlGenerator sqlGenerator) { // Symbol pair should never be part of a SqlBuilder. Debug.Assert(false); } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SqlGen/SymbolTable.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; namespace EntityFramework.Firebird.SqlGen; internal sealed class SymbolTable { #region Fields private List> _symbols = new List>(); #endregion #region Methods internal void EnterScope() { _symbols.Add(new Dictionary(StringComparer.OrdinalIgnoreCase)); } internal void ExitScope() { _symbols.RemoveAt(_symbols.Count - 1); } internal void Add(string name, Symbol value) { _symbols[_symbols.Count - 1][name] = value; } internal Symbol Lookup(string name) { for (var i = _symbols.Count - 1; i >= 0; --i) { if (_symbols[i].ContainsKey(name)) { return _symbols[i][name]; } } return null; } #endregion } ================================================ FILE: src/EntityFramework.Firebird/SsdlToFb.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Data.Entity.Core.Metadata.Edm; using System.Linq; using System.Text; using EntityFramework.Firebird.SqlGen; namespace EntityFramework.Firebird; internal static class SsdlToFb { public static string Transform(StoreItemCollection storeItems, string providerManifestToken) { var result = new StringBuilder(); if (storeItems != null) { result.Append(string.Join(Environment.NewLine, Tables(storeItems))); result.AppendLine(); result.Append(string.Join(Environment.NewLine, ForeignKeyConstraints(storeItems))); result.AppendLine(); } return result.ToString(); } static IEnumerable Tables(StoreItemCollection storeItems) { foreach (var entitySet in storeItems.GetItems()[0].BaseEntitySets.OfType()) { var result = new StringBuilder(); var additionalColumnComments = new Dictionary(); result.AppendFormat("RECREATE TABLE {0} (", SqlGenerator.QuoteIdentifier(MetadataHelpers.GetTableName(entitySet))); result.AppendLine(); foreach (var property in MetadataHelpers.GetProperties(entitySet.ElementType)) { var column = GenerateColumn(property); result.Append("\t"); result.Append(column.ColumnName); result.Append(","); result.AppendLine(); foreach (var item in column.ColumnComments) additionalColumnComments.Add(item.Key, item.Value); } result.AppendFormat("CONSTRAINT {0} PRIMARY KEY ({1})", SqlGenerator.QuoteIdentifier(string.Format("PK_{0}", MetadataHelpers.GetTableName(entitySet))), string.Join(", ", entitySet.ElementType.KeyMembers.Select(pk => SqlGenerator.QuoteIdentifier(pk.Name)))); result.AppendLine(); result.Append(");"); result.AppendLine(); foreach (var identity in entitySet.ElementType.KeyMembers.Where(pk => MetadataHelpers.IsStoreGeneratedIdentity(pk)).Select(i => i.Name)) { additionalColumnComments.Add(identity, "#PK_GEN#"); } foreach (var comment in additionalColumnComments) { result.AppendFormat("COMMENT ON COLUMN {0}.{1} IS '{2}';", SqlGenerator.QuoteIdentifier(MetadataHelpers.GetTableName(entitySet)), SqlGenerator.QuoteIdentifier(comment.Key), comment.Value); result.AppendLine(); } yield return result.ToString(); } } static IEnumerable ForeignKeyConstraints(StoreItemCollection storeItems) { foreach (var associationSet in storeItems.GetItems()[0].BaseEntitySets.OfType()) { var result = new StringBuilder(); var constraint = associationSet.ElementType.ReferentialConstraints.Single(); var end = associationSet.AssociationSetEnds[constraint.FromRole.Name]; var end2 = associationSet.AssociationSetEnds[constraint.ToRole.Name]; result.AppendFormat("ALTER TABLE {0} ADD CONSTRAINT {1} FOREIGN KEY ({2})", SqlGenerator.QuoteIdentifier(MetadataHelpers.GetTableName(end2.EntitySet)), SqlGenerator.QuoteIdentifier(string.Format("FK_{0}", associationSet.Name)), string.Join(", ", constraint.ToProperties.Select(fk => SqlGenerator.QuoteIdentifier(fk.Name)))); result.AppendLine(); result.AppendFormat("REFERENCES {0}({1})", SqlGenerator.QuoteIdentifier(MetadataHelpers.GetTableName(end.EntitySet)), string.Join(", ", constraint.FromProperties.Select(pk => SqlGenerator.QuoteIdentifier(pk.Name)))); result.AppendLine(); result.AppendFormat("ON DELETE {0}", end.CorrespondingAssociationEndMember.DeleteBehavior == OperationAction.Cascade ? "CASCADE" : "NO ACTION"); result.Append(";"); yield return result.ToString(); } } class GenerateColumnResult { public string ColumnName { get; set; } public IDictionary ColumnComments { get; set; } } static GenerateColumnResult GenerateColumn(EdmProperty property) { var column = new StringBuilder(); var columnComments = new Dictionary(); column.Append(SqlGenerator.QuoteIdentifier(property.Name)); column.Append(" "); column.Append(SqlGenerator.GetSqlPrimitiveType(property.TypeUsage)); switch (MetadataHelpers.GetEdmType(property.TypeUsage).PrimitiveTypeKind) { case PrimitiveTypeKind.Boolean: column.AppendFormat(" CHECK ({0} IN (1,0))", SqlGenerator.QuoteIdentifier(property.Name)); columnComments.Add(property.Name, "#BOOL#"); break; case PrimitiveTypeKind.Guid: columnComments.Add(property.Name, "#GUID#"); break; } if (!property.Nullable) { column.Append(" NOT NULL"); } return new GenerateColumnResult() { ColumnName = column.ToString(), ColumnComments = columnComments }; } } ================================================ FILE: src/EntityFramework.Firebird/TypeHelpers.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Data.Entity.Core.Metadata.Edm; namespace EntityFramework.Firebird; internal static class TypeHelpers { public static bool TryGetPrecision(TypeUsage tu, out byte precision) { precision = 0; if (tu.Facets.TryGetValue("Precision", false, out var f)) { if (!f.IsUnbounded && f.Value != null) { precision = (byte)f.Value; return true; } } return false; } public static bool TryGetMaxLength(TypeUsage tu, out int maxLength) { maxLength = 0; if (tu.Facets.TryGetValue("MaxLength", false, out var f)) { if (!f.IsUnbounded && f.Value != null) { maxLength = (int)f.Value; return true; } } return false; } public static bool TryGetScale(TypeUsage tu, out byte scale) { scale = 0; if (tu.Facets.TryGetValue("Scale", false, out var f)) { if (!f.IsUnbounded && f.Value != null) { scale = (byte)f.Value; return true; } } return false; } } ================================================ FILE: src/EntityFramework.Firebird/Web.config.install.xdt ================================================  ================================================ FILE: src/EntityFramework.Firebird.Tests/EntityFramework.Firebird.Tests.csproj ================================================  net10.0 false false true EntityFramework.Firebird.Tests EntityFramework.Firebird.Tests true ..\FirebirdSql.Data.TestsBase\FirebirdSql.Data.TestsBase.snk Exe FirebirdSql.Data.TestsBase.Program ================================================ FILE: src/EntityFramework.Firebird.Tests/EntityFrameworkTestsBase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Data.Entity; using System.Data.Entity.Core.Common; using FirebirdSql.Data.FirebirdClient; using FirebirdSql.Data.TestsBase; namespace EntityFramework.Firebird.Tests; public abstract class EntityFrameworkTestsBase : FbTestsBase { static EntityFrameworkTestsBase() { #if !NETFRAMEWORK // example/documentation for .NET Framework System.Data.Common.DbProviderFactories.RegisterFactory(FbProviderServices.ProviderInvariantName, FirebirdClientFactory.Instance); #endif DbConfiguration.SetConfiguration(new FbTestDbContext.Conf()); } public EntityFrameworkTestsBase() : base(FbServerType.Default, false, FbWireCrypt.Enabled, false) { } public DbProviderServices GetProviderServices() { return FbProviderServices.Instance; } public TContext GetDbContext() where TContext : FbTestDbContext { Database.SetInitializer(null); Connection.Close(); return (TContext)Activator.CreateInstance(typeof(TContext), Connection); } } ================================================ FILE: src/EntityFramework.Firebird.Tests/FbTestDbContext.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Data.Entity; using FirebirdSql.Data.FirebirdClient; namespace EntityFramework.Firebird.Tests; public class FbTestDbContext : DbContext { public class Conf : DbConfiguration { public Conf() { SetProviderServices(FbProviderServices.ProviderInvariantName, FbProviderServices.Instance); } } public FbTestDbContext(FbConnection conn) : base(conn, false) { } } ================================================ FILE: src/EntityFramework.Firebird.Tests/InfrastructureTests.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using NUnit.Framework; namespace EntityFramework.Firebird.Tests; public class InfrastructureTests : EntityFrameworkTestsBase { [Test] public void DbProviderServicesTest() { object dbproviderservices = GetProviderServices(); Assert.IsNotNull(dbproviderservices); Assert.IsInstanceOf(dbproviderservices); } [Test] public void ProviderManifestTest() { var manifest = GetProviderServices().GetProviderManifest("foobar"); Assert.IsNotNull(manifest); } [Test] public void ProviderManifestTokenTest() { var token = GetProviderServices().GetProviderManifestToken(Connection); Assert.IsNotNull(token); Assert.IsNotEmpty(token); var v = new Version(token); Assert.Greater(v.Major, 0); Assert.GreaterOrEqual(v.Minor, 0); Assert.AreEqual(v.Build, -1); Assert.AreEqual(v.Revision, -1); } } ================================================ FILE: src/EntityFramework.Firebird.Tests/QueryTests.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Data.Entity; using System.Linq; using FirebirdSql.Data.FirebirdClient; using NUnit.Framework; namespace EntityFramework.Firebird.Tests; public class QueryTests : EntityFrameworkTestsBase { class QueryTest1Context : FbTestDbContext { public QueryTest1Context(FbConnection conn) : base(conn) { } protected override void OnModelCreating(DbModelBuilder modelBuilder) { base.OnModelCreating(modelBuilder); var queryTest1Entity = modelBuilder.Entity(); queryTest1Entity.Property(x => x.ID).HasColumnName("ID"); queryTest1Entity.ToTable("TEST_QUERYTEST1ENTITY"); } public IDbSet QueryTest1Entity { get; set; } } [Test] public void QueryTest1() { using (var c = GetDbContext()) { c.Database.ExecuteSqlCommand("create table test_querytest1entity (id int primary key)"); Assert.DoesNotThrow(() => c.QueryTest1Entity.Max(x => x.ID)); } } class QueryTest2Context : FbTestDbContext { public QueryTest2Context(FbConnection conn) : base(conn) { } protected override void OnModelCreating(DbModelBuilder modelBuilder) { base.OnModelCreating(modelBuilder); } public IDbSet Foos { get; set; } } [Test] public void QueryTest2() { using (var c = GetDbContext()) { var q = c.Foos .OrderBy(x => x.ID) .Take(45).Skip(0) .Select(x => new { x.ID, x.BazID, BazID2 = x.Baz.ID, x.Baz.BazString, }); Assert.DoesNotThrow(() => { q.ToString(); }); } } class QueryTest3Context : FbTestDbContext { public QueryTest3Context(FbConnection conn) : base(conn) { } protected override void OnModelCreating(DbModelBuilder modelBuilder) { base.OnModelCreating(modelBuilder); } public IDbSet Foos { get; set; } } [Test] public void QueryTest3() { using (var c = GetDbContext()) { var q = c.Foos .OrderByDescending(m => m.Bars.Count()) .Skip(3) .SelectMany(m => m.Bars); Assert.DoesNotThrow(() => { q.ToString(); }); } } class ProperVarcharLengthForConstantContext : FbTestDbContext { public ProperVarcharLengthForConstantContext(FbConnection conn) : base(conn) { } protected override void OnModelCreating(DbModelBuilder modelBuilder) { base.OnModelCreating(modelBuilder); } public IDbSet Bars { get; set; } } [Test] public void ProperVarcharLengthForConstantTest() { using (var c = GetDbContext()) { var q = c.Bars.Where(x => x.BarString == "TEST"); StringAssert.Contains("CAST(_UTF8'TEST' AS VARCHAR(8191))", q.ToString()); } } class DbFunctionsContext : FbTestDbContext { public DbFunctionsContext(FbConnection conn) : base(conn) { } protected override void OnModelCreating(DbModelBuilder modelBuilder) { base.OnModelCreating(modelBuilder); } public IDbSet Quxs { get; set; } } [Test] public void QueryTestDbFunctionsCreateDateTime1() { using (var c = GetDbContext()) { var q = c.Quxs .Where(x => x.QuxDateTime == DbFunctions.CreateDateTime(2020, 3, 19, 14, 12, 0)) .ToString(); StringAssert.Contains("CAST('2020-3-19 14:12:00' AS TIMESTAMP)", q.ToString()); } } [Test] public void QueryTestDbFunctionsCreateDateTime2() { using (var c = GetDbContext()) { var q = c.Quxs .Where(x => x.QuxDateTime == DbFunctions.CreateDateTime(2020, 3, 19, 14, 12, 36)) .ToString(); StringAssert.Contains("DATEADD(SECOND, CAST(36 AS DOUBLE PRECISION), CAST('2020-3-19 14:12:00' AS TIMESTAMP))", q.ToString()); } } [Test] public void QueryTestDbFunctionsCreateDateTime3() { using (var c = GetDbContext()) { var q = c.Quxs .Where(x => x.QuxDateTime == DbFunctions.CreateDateTime(null, null, null, null, null, null)) .ToString(); StringAssert.Contains("DATEADD(DAY, -1, DATEADD(MONTH, -1, DATEADD(YEAR, -1, CAST('0001-01-01 00:00:00' AS TIMESTAMP))))", q.ToString()); } } [Test] public void QueryTestDbFunctionsCreateDateTime4() { using (var c = GetDbContext()) { var q = c.Quxs .Where(x => x.QuxDateTime == DbFunctions.CreateDateTime(x.QuxYear, x.QuxMonth, x.QuxDay, null, null, null)) .ToString(); StringAssert.Contains("DATEADD(DAY, -1, DATEADD(MONTH, -1, DATEADD(YEAR, -1, DATEADD(DAY, \"B\".\"QuxDay\", DATEADD(MONTH, \"B\".\"QuxMonth\", DATEADD(YEAR, \"B\".\"QuxYear\", CAST('0001-01-01 00:00:00' AS TIMESTAMP)))))))", q.ToString()); } } } class QueryTest1Entity { public int ID { get; set; } } class Foo { public int ID { get; set; } public int BazID { get; set; } public ICollection Bars { get; set; } public Baz Baz { get; set; } } class Bar { public int ID { get; set; } public int FooID { get; set; } public string BarString { get; set; } public Foo Foo { get; set; } } class Baz { public int ID { get; set; } public string BazString { get; set; } public ICollection Foos { get; set; } } class Qux { public int ID { get; set; } public DateTime QuxDateTime { get; set; } public int QuxYear { get; set; } public int QuxMonth { get; set; } public int QuxDay { get; set; } } ================================================ FILE: src/EntityFramework.Firebird.Tests/app.config ================================================ 
================================================ FILE: src/FirebirdSql.Data.External/FirebirdSql.Data.External.projitems ================================================  $(MSBuildAllProjects);$(MSBuildThisFileFullPath) true 884ee120-b22e-4940-8c1c-626f13028376 FirebirdSql.Data.External ================================================ FILE: src/FirebirdSql.Data.External/FirebirdSql.Data.External.shproj ================================================ 884ee120-b22e-4940-8c1c-626f13028376 14.0 ================================================ FILE: src/FirebirdSql.Data.External/RC4/ICipherParameters.cs ================================================ using System; namespace Org.BouncyCastle.Crypto { /** * all parameter classes implement this. */ internal interface ICipherParameters { } } ================================================ FILE: src/FirebirdSql.Data.External/RC4/KeyParameter.cs ================================================ using System; using Org.BouncyCastle.Crypto; namespace Org.BouncyCastle.Crypto.Parameters { internal class KeyParameter : ICipherParameters { private readonly byte[] key; public KeyParameter( byte[] key) { if (key == null) throw new ArgumentNullException("key"); this.key = (byte[]) key.Clone(); } public KeyParameter( byte[] key, int keyOff, int keyLen) { if (key == null) throw new ArgumentNullException("key"); if (keyOff < 0 || keyOff > key.Length) throw new ArgumentOutOfRangeException("keyOff"); if (keyLen < 0 || keyLen > (key.Length - keyOff)) throw new ArgumentOutOfRangeException("keyLen"); this.key = new byte[keyLen]; Array.Copy(key, keyOff, this.key, 0, keyLen); } public byte[] GetKey() { return (byte[]) key.Clone(); } } } ================================================ FILE: src/FirebirdSql.Data.External/RC4/RC4Engine.cs ================================================ using System; using Org.BouncyCastle.Crypto.Parameters; //using Org.BouncyCastle.Utilities; namespace Org.BouncyCastle.Crypto.Engines { internal class RC4Engine //: IStreamCipher { private readonly static int STATE_LENGTH = 256; /* * variables to hold the state of the RC4 engine * during encryption and decryption */ private byte[] engineState; private int x; private int y; private byte[] workingKey; /** * initialise a RC4 cipher. * * @param forEncryption whether or not we are for encryption. * @param parameters the parameters required to set up the cipher. * @exception ArgumentException if the parameters argument is * inappropriate. */ public virtual void Init( bool forEncryption, ICipherParameters parameters) { if (parameters is KeyParameter) { /* * RC4 encryption and decryption is completely * symmetrical, so the 'forEncryption' is * irrelevant. */ workingKey = ((KeyParameter)parameters).GetKey(); SetKey(workingKey); return; } //throw new ArgumentException("invalid parameter passed to RC4 init - " + Platform.GetTypeName(parameters)); throw new ArgumentException("invalid parameter passed to RC4 init"); } public virtual string AlgorithmName { get { return "RC4"; } } public virtual byte ReturnByte( byte input) { x = (x + 1) & 0xff; y = (engineState[x] + y) & 0xff; // swap byte tmp = engineState[x]; engineState[x] = engineState[y]; engineState[y] = tmp; // xor return (byte)(input ^ engineState[(engineState[x] + engineState[y]) & 0xff]); } public virtual void ProcessBytes( byte[] input, int inOff, int length, byte[] output, int outOff) { //Check.DataLength(input, inOff, length, "input buffer too short"); //Check.OutputLength(output, outOff, length, "output buffer too short"); for (int i = 0; i < length ; i++) { x = (x + 1) & 0xff; y = (engineState[x] + y) & 0xff; // swap byte tmp = engineState[x]; engineState[x] = engineState[y]; engineState[y] = tmp; // xor output[i+outOff] = (byte)(input[i + inOff] ^ engineState[(engineState[x] + engineState[y]) & 0xff]); } } public virtual void Reset() { SetKey(workingKey); } // Private implementation private void SetKey( byte[] keyBytes) { workingKey = keyBytes; // System.out.println("the key length is ; "+ workingKey.Length); x = 0; y = 0; if (engineState == null) { engineState = new byte[STATE_LENGTH]; } // reset the state of the engine for (int i=0; i < STATE_LENGTH; i++) { engineState[i] = (byte)i; } int i1 = 0; int i2 = 0; for (int i=0; i < STATE_LENGTH; i++) { i2 = ((keyBytes[i1] & 0xff) + engineState[i] + i2) & 0xff; // do the byte-swap inline byte tmp = engineState[i]; engineState[i] = engineState[i2]; engineState[i2] = tmp; i1 = (i1+1) % keyBytes.Length; } } } } ================================================ FILE: src/FirebirdSql.Data.External/zlib/Deflate.cs ================================================ // Deflate.cs // ------------------------------------------------------------------ // // Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. // All rights reserved. // // This code module is part of DotNetZip, a zipfile class library. // // ------------------------------------------------------------------ // // This code is licensed under the Microsoft Public License. // See the file License.txt for the license details. // More info on: http://dotnetzip.codeplex.com // // ------------------------------------------------------------------ // // last saved (in emacs): // Time-stamp: <2011-August-03 19:52:15> // // ------------------------------------------------------------------ // // This module defines logic for handling the Deflate or compression. // // This code is based on multiple sources: // - the original zlib v1.2.3 source, which is Copyright (C) 1995-2005 Jean-loup Gailly. // - the original jzlib, which is Copyright (c) 2000-2003 ymnk, JCraft,Inc. // // However, this code is significantly different from both. // The object model is not the same, and many of the behaviors are different. // // In keeping with the license for these other works, the copyrights for // jzlib and zlib are here. // // ----------------------------------------------------------------------- // Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the distribution. // // 3. The names of the authors may not be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND // FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, // INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, // OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // ----------------------------------------------------------------------- // // This program is based on zlib-1.1.3; credit to authors // Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) // and contributors of zlib. // // ----------------------------------------------------------------------- using System; namespace Ionic.Zlib { internal enum BlockState { NeedMore = 0, // block not completed, need more input or more output BlockDone, // block flush performed FinishStarted, // finish started, need only more output at next deflate FinishDone // finish done, accept no more input or output } internal enum DeflateFlavor { Store, Fast, Slow } internal sealed class DeflateManager { private static readonly int MEM_LEVEL_MAX = 9; private static readonly int MEM_LEVEL_DEFAULT = 8; internal delegate BlockState CompressFunc(FlushType flush); internal class Config { // Use a faster search when the previous match is longer than this internal int GoodLength; // reduce lazy search above this match length // Attempt to find a better match only when the current match is // strictly smaller than this value. This mechanism is used only for // compression levels >= 4. For levels 1,2,3: MaxLazy is actually // MaxInsertLength. (See DeflateFast) internal int MaxLazy; // do not perform lazy search above this match length internal int NiceLength; // quit search above this match length // To speed up deflation, hash chains are never searched beyond this // length. A higher limit improves compression ratio but degrades the speed. internal int MaxChainLength; internal DeflateFlavor Flavor; private Config(int goodLength, int maxLazy, int niceLength, int maxChainLength, DeflateFlavor flavor) { this.GoodLength = goodLength; this.MaxLazy = maxLazy; this.NiceLength = niceLength; this.MaxChainLength = maxChainLength; this.Flavor = flavor; } public static Config Lookup(CompressionLevel level) { return Table[(int)level]; } static Config() { Table = new Config[] { new Config(0, 0, 0, 0, DeflateFlavor.Store), new Config(4, 4, 8, 4, DeflateFlavor.Fast), new Config(4, 5, 16, 8, DeflateFlavor.Fast), new Config(4, 6, 32, 32, DeflateFlavor.Fast), new Config(4, 4, 16, 16, DeflateFlavor.Slow), new Config(8, 16, 32, 32, DeflateFlavor.Slow), new Config(8, 16, 128, 128, DeflateFlavor.Slow), new Config(8, 32, 128, 256, DeflateFlavor.Slow), new Config(32, 128, 258, 1024, DeflateFlavor.Slow), new Config(32, 258, 258, 4096, DeflateFlavor.Slow), }; } private static readonly Config[] Table; } private CompressFunc DeflateFunction; private static readonly System.String[] _ErrorMessage = new System.String[] { "need dictionary", "stream end", "", "file error", "stream error", "data error", "insufficient memory", "buffer error", "incompatible version", "" }; // preset dictionary flag in zlib header private static readonly int PRESET_DICT = 0x20; private static readonly int INIT_STATE = 42; private static readonly int BUSY_STATE = 113; private static readonly int FINISH_STATE = 666; // The deflate compression method private static readonly int Z_DEFLATED = 8; private static readonly int STORED_BLOCK = 0; private static readonly int STATIC_TREES = 1; private static readonly int DYN_TREES = 2; // The three kinds of block type private static readonly int Z_BINARY = 0; private static readonly int Z_ASCII = 1; private static readonly int Z_UNKNOWN = 2; private static readonly int Buf_size = 8 * 2; private static readonly int MIN_MATCH = 3; private static readonly int MAX_MATCH = 258; private static readonly int MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1); private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1); private static readonly int END_BLOCK = 256; internal ZlibCodec _codec; // the zlib encoder/decoder internal int status; // as the name implies internal byte[] pending; // output still pending - waiting to be compressed internal int nextPending; // index of next pending byte to output to the stream internal int pendingCount; // number of bytes in the pending buffer internal sbyte data_type; // UNKNOWN, BINARY or ASCII internal int last_flush; // value of flush param for previous deflate call internal int w_size; // LZ77 window size (32K by default) internal int w_bits; // log2(w_size) (8..16) internal int w_mask; // w_size - 1 //internal byte[] dictionary; internal byte[] window; // Sliding window. Input bytes are read into the second half of the window, // and move to the first half later to keep a dictionary of at least wSize // bytes. With this organization, matches are limited to a distance of // wSize-MAX_MATCH bytes, but this ensures that IO is always // performed with a length multiple of the block size. // // To do: use the user input buffer as sliding window. internal int window_size; // Actual size of window: 2*wSize, except when the user input buffer // is directly used as sliding window. internal short[] prev; // Link to older string with same hash index. To limit the size of this // array to 64K, this link is maintained only for the last 32K strings. // An index in this array is thus a window index modulo 32K. internal short[] head; // Heads of the hash chains or NIL. internal int ins_h; // hash index of string to be inserted internal int hash_size; // number of elements in hash table internal int hash_bits; // log2(hash_size) internal int hash_mask; // hash_size-1 // Number of bits by which ins_h must be shifted at each input // step. It must be such that after MIN_MATCH steps, the oldest // byte no longer takes part in the hash key, that is: // hash_shift * MIN_MATCH >= hash_bits internal int hash_shift; // Window position at the beginning of the current output block. Gets // negative when the window is moved backwards. internal int block_start; Config config; internal int match_length; // length of best match internal int prev_match; // previous match internal int match_available; // set if previous match exists internal int strstart; // start of string to insert into.....???? internal int match_start; // start of matching string internal int lookahead; // number of valid bytes ahead in window // Length of the best match at previous step. Matches not greater than this // are discarded. This is used in the lazy match evaluation. internal int prev_length; // Insert new strings in the hash table only if the match length is not // greater than this length. This saves time but degrades compression. // max_insert_length is used only for compression levels <= 3. internal CompressionLevel compressionLevel; // compression level (1..9) internal CompressionStrategy compressionStrategy; // favor or force Huffman coding internal short[] dyn_ltree; // literal and length tree internal short[] dyn_dtree; // distance tree internal short[] bl_tree; // Huffman tree for bit lengths internal Tree treeLiterals = new Tree(); // desc for literal tree internal Tree treeDistances = new Tree(); // desc for distance tree internal Tree treeBitLengths = new Tree(); // desc for bit length tree // number of codes at each bit length for an optimal tree internal short[] bl_count = new short[InternalConstants.MAX_BITS + 1]; // heap used to build the Huffman trees internal int[] heap = new int[2 * InternalConstants.L_CODES + 1]; internal int heap_len; // number of elements in the heap internal int heap_max; // element of largest frequency // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. // The same heap array is used to build all trees. // Depth of each subtree used as tie breaker for trees of equal frequency internal sbyte[] depth = new sbyte[2 * InternalConstants.L_CODES + 1]; internal int _lengthOffset; // index for literals or lengths // Size of match buffer for literals/lengths. There are 4 reasons for // limiting lit_bufsize to 64K: // - frequencies can be kept in 16 bit counters // - if compression is not successful for the first block, all input // data is still in the window so we can still emit a stored block even // when input comes from standard input. (This can also be done for // all blocks if lit_bufsize is not greater than 32K.) // - if compression is not successful for a file smaller than 64K, we can // even emit a stored file instead of a stored block (saving 5 bytes). // This is applicable only for zip (not gzip or zlib). // - creating new Huffman trees less frequently may not provide fast // adaptation to changes in the input data statistics. (Take for // example a binary file with poorly compressible code followed by // a highly compressible string table.) Smaller buffer sizes give // fast adaptation but have of course the overhead of transmitting // trees more frequently. internal int lit_bufsize; internal int last_lit; // running index in l_buf // Buffer for distances. To simplify the code, d_buf and l_buf have // the same number of elements. To use different lengths, an extra flag // array would be necessary. internal int _distanceOffset; // index into pending; points to distance data?? internal int opt_len; // bit length of current block with optimal trees internal int static_len; // bit length of current block with static trees internal int matches; // number of string matches in current block internal int last_eob_len; // bit length of EOB code for last block // Output buffer. bits are inserted starting at the bottom (least // significant bits). internal short bi_buf; // Number of valid bits in bi_buf. All bits above the last valid bit // are always zero. internal int bi_valid; internal DeflateManager() { dyn_ltree = new short[HEAP_SIZE * 2]; dyn_dtree = new short[(2 * InternalConstants.D_CODES + 1) * 2]; // distance tree bl_tree = new short[(2 * InternalConstants.BL_CODES + 1) * 2]; // Huffman tree for bit lengths } // lm_init private void _InitializeLazyMatch() { window_size = 2 * w_size; // clear the hash - workitem 9063 Array.Clear(head, 0, hash_size); //for (int i = 0; i < hash_size; i++) head[i] = 0; config = Config.Lookup(compressionLevel); SetDeflater(); strstart = 0; block_start = 0; lookahead = 0; match_length = prev_length = MIN_MATCH - 1; match_available = 0; ins_h = 0; } // Initialize the tree data structures for a new zlib stream. private void _InitializeTreeData() { treeLiterals.dyn_tree = dyn_ltree; treeLiterals.staticTree = StaticTree.Literals; treeDistances.dyn_tree = dyn_dtree; treeDistances.staticTree = StaticTree.Distances; treeBitLengths.dyn_tree = bl_tree; treeBitLengths.staticTree = StaticTree.BitLengths; bi_buf = 0; bi_valid = 0; last_eob_len = 8; // enough lookahead for inflate // Initialize the first block of the first file: _InitializeBlocks(); } internal void _InitializeBlocks() { // Initialize the trees. for (int i = 0; i < InternalConstants.L_CODES; i++) dyn_ltree[i * 2] = 0; for (int i = 0; i < InternalConstants.D_CODES; i++) dyn_dtree[i * 2] = 0; for (int i = 0; i < InternalConstants.BL_CODES; i++) bl_tree[i * 2] = 0; dyn_ltree[END_BLOCK * 2] = 1; opt_len = static_len = 0; last_lit = matches = 0; } // Restore the heap property by moving down the tree starting at node k, // exchanging a node with the smallest of its two sons if necessary, stopping // when the heap property is re-established (each father smaller than its // two sons). internal void pqdownheap(short[] tree, int k) { int v = heap[k]; int j = k << 1; // left son of k while (j <= heap_len) { // Set j to the smallest of the two sons: if (j < heap_len && _IsSmaller(tree, heap[j + 1], heap[j], depth)) { j++; } // Exit if v is smaller than both sons if (_IsSmaller(tree, v, heap[j], depth)) break; // Exchange v with the smallest son heap[k] = heap[j]; k = j; // And continue down the tree, setting j to the left son of k j <<= 1; } heap[k] = v; } internal static bool _IsSmaller(short[] tree, int n, int m, sbyte[] depth) { short tn2 = tree[n * 2]; short tm2 = tree[m * 2]; return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m])); } // Scan a literal or distance tree to determine the frequencies of the codes // in the bit length tree. internal void scan_tree(short[] tree, int max_code) { int n; // iterates over all tree elements int prevlen = -1; // last emitted length int curlen; // length of current code int nextlen = (int)tree[0 * 2 + 1]; // length of next code int count = 0; // repeat count of the current code int max_count = 7; // max repeat count int min_count = 4; // min repeat count if (nextlen == 0) { max_count = 138; min_count = 3; } tree[(max_code + 1) * 2 + 1] = (short)0x7fff; // guard //?? for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = (int)tree[(n + 1) * 2 + 1]; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { bl_tree[curlen * 2] = (short)(bl_tree[curlen * 2] + count); } else if (curlen != 0) { if (curlen != prevlen) bl_tree[curlen * 2]++; bl_tree[InternalConstants.REP_3_6 * 2]++; } else if (count <= 10) { bl_tree[InternalConstants.REPZ_3_10 * 2]++; } else { bl_tree[InternalConstants.REPZ_11_138 * 2]++; } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138; min_count = 3; } else if (curlen == nextlen) { max_count = 6; min_count = 3; } else { max_count = 7; min_count = 4; } } } // Construct the Huffman tree for the bit lengths and return the index in // bl_order of the last bit length code to send. internal int build_bl_tree() { int max_blindex; // index of last bit length code of non zero freq // Determine the bit length frequencies for literal and distance trees scan_tree(dyn_ltree, treeLiterals.max_code); scan_tree(dyn_dtree, treeDistances.max_code); // Build the bit length tree: treeBitLengths.build_tree(this); // opt_len now includes the length of the tree representations, except // the lengths of the bit lengths codes and the 5+5+4 bits for the counts. // Determine the number of bit length codes to send. The pkzip format // requires that at least 4 bit length codes be sent. (appnote.txt says // 3 but the actual value used is 4.) for (max_blindex = InternalConstants.BL_CODES - 1; max_blindex >= 3; max_blindex--) { if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] != 0) break; } // Update opt_len to include the bit length tree and counts opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; return max_blindex; } // Send the header for a block using dynamic Huffman trees: the counts, the // lengths of the bit length codes, the literal tree and the distance tree. // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. internal void send_all_trees(int lcodes, int dcodes, int blcodes) { int rank; // index in bl_order send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt send_bits(dcodes - 1, 5); send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt for (rank = 0; rank < blcodes; rank++) { send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3); } send_tree(dyn_ltree, lcodes - 1); // literal tree send_tree(dyn_dtree, dcodes - 1); // distance tree } // Send a literal or distance tree in compressed form, using the codes in // bl_tree. internal void send_tree(short[] tree, int max_code) { int n; // iterates over all tree elements int prevlen = -1; // last emitted length int curlen; // length of current code int nextlen = tree[0 * 2 + 1]; // length of next code int count = 0; // repeat count of the current code int max_count = 7; // max repeat count int min_count = 4; // min repeat count if (nextlen == 0) { max_count = 138; min_count = 3; } for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[(n + 1) * 2 + 1]; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { do { send_code(curlen, bl_tree); } while (--count != 0); } else if (curlen != 0) { if (curlen != prevlen) { send_code(curlen, bl_tree); count--; } send_code(InternalConstants.REP_3_6, bl_tree); send_bits(count - 3, 2); } else if (count <= 10) { send_code(InternalConstants.REPZ_3_10, bl_tree); send_bits(count - 3, 3); } else { send_code(InternalConstants.REPZ_11_138, bl_tree); send_bits(count - 11, 7); } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138; min_count = 3; } else if (curlen == nextlen) { max_count = 6; min_count = 3; } else { max_count = 7; min_count = 4; } } } // Output a block of bytes on the stream. // IN assertion: there is enough room in pending_buf. private void put_bytes(byte[] p, int start, int len) { Array.Copy(p, start, pending, pendingCount, len); pendingCount += len; } #if NOTNEEDED private void put_byte(byte c) { pending[pendingCount++] = c; } internal void put_short(int b) { unchecked { pending[pendingCount++] = (byte)b; pending[pendingCount++] = (byte)(b >> 8); } } internal void putShortMSB(int b) { unchecked { pending[pendingCount++] = (byte)(b >> 8); pending[pendingCount++] = (byte)b; } } #endif internal void send_code(int c, short[] tree) { int c2 = c * 2; send_bits((tree[c2] & 0xffff), (tree[c2 + 1] & 0xffff)); } internal void send_bits(int value, int length) { int len = length; unchecked { if (bi_valid > (int)Buf_size - len) { //int val = value; // bi_buf |= (val << bi_valid); bi_buf |= (short)((value << bi_valid) & 0xffff); //put_short(bi_buf); pending[pendingCount++] = (byte)bi_buf; pending[pendingCount++] = (byte)(bi_buf >> 8); bi_buf = (short)((uint)value >> (Buf_size - bi_valid)); bi_valid += len - Buf_size; } else { // bi_buf |= (value) << bi_valid; bi_buf |= (short)((value << bi_valid) & 0xffff); bi_valid += len; } } } // Send one empty static block to give enough lookahead for inflate. // This takes 10 bits, of which 7 may remain in the bit buffer. // The current inflate code requires 9 bits of lookahead. If the // last two codes for the previous block (real code plus EOB) were coded // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode // the last real code. In this case we send two empty static blocks instead // of one. (There are no problems if the previous block is stored or fixed.) // To simplify the code, we assume the worst case of last real code encoded // on one bit only. internal void _tr_align() { send_bits(STATIC_TREES << 1, 3); send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes); bi_flush(); // Of the 10 bits for the empty block, we have already sent // (10 - bi_valid) bits. The lookahead for the last real code (before // the EOB of the previous block) was thus at least one plus the length // of the EOB plus what we have just sent of the empty static block. if (1 + last_eob_len + 10 - bi_valid < 9) { send_bits(STATIC_TREES << 1, 3); send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes); bi_flush(); } last_eob_len = 7; } // Save the match info and tally the frequency counts. Return true if // the current block must be flushed. internal bool _tr_tally(int dist, int lc) { pending[_distanceOffset + last_lit * 2] = unchecked((byte) ( (uint)dist >> 8 ) ); pending[_distanceOffset + last_lit * 2 + 1] = unchecked((byte)dist); pending[_lengthOffset + last_lit] = unchecked((byte)lc); last_lit++; if (dist == 0) { // lc is the unmatched char dyn_ltree[lc * 2]++; } else { matches++; // Here, lc is the match length - MIN_MATCH dist--; // dist = match distance - 1 dyn_ltree[(Tree.LengthCode[lc] + InternalConstants.LITERALS + 1) * 2]++; dyn_dtree[Tree.DistanceCode(dist) * 2]++; } if ((last_lit & 0x1fff) == 0 && (int)compressionLevel > 2) { // Compute an upper bound for the compressed length int out_length = last_lit << 3; int in_length = strstart - block_start; int dcode; for (dcode = 0; dcode < InternalConstants.D_CODES; dcode++) { out_length = (int)(out_length + (int)dyn_dtree[dcode * 2] * (5L + Tree.ExtraDistanceBits[dcode])); } out_length >>= 3; if ((matches < (last_lit / 2)) && out_length < in_length / 2) return true; } return (last_lit == lit_bufsize - 1) || (last_lit == lit_bufsize); // dinoch - wraparound? // We avoid equality with lit_bufsize because of wraparound at 64K // on 16 bit machines and because stored blocks are restricted to // 64K-1 bytes. } // Send the block data compressed using the given Huffman trees internal void send_compressed_block(short[] ltree, short[] dtree) { int distance; // distance of matched string int lc; // match length or unmatched char (if dist == 0) int lx = 0; // running index in l_buf int code; // the code to send int extra; // number of extra bits to send if (last_lit != 0) { do { int ix = _distanceOffset + lx * 2; distance = ((pending[ix] << 8) & 0xff00) | (pending[ix + 1] & 0xff); lc = (pending[_lengthOffset + lx]) & 0xff; lx++; if (distance == 0) { send_code(lc, ltree); // send a literal byte } else { // literal or match pair // Here, lc is the match length - MIN_MATCH code = Tree.LengthCode[lc]; // send the length code send_code(code + InternalConstants.LITERALS + 1, ltree); extra = Tree.ExtraLengthBits[code]; if (extra != 0) { // send the extra length bits lc -= Tree.LengthBase[code]; send_bits(lc, extra); } distance--; // dist is now the match distance - 1 code = Tree.DistanceCode(distance); // send the distance code send_code(code, dtree); extra = Tree.ExtraDistanceBits[code]; if (extra != 0) { // send the extra distance bits distance -= Tree.DistanceBase[code]; send_bits(distance, extra); } } // Check that the overlay between pending and d_buf+l_buf is ok: } while (lx < last_lit); } send_code(END_BLOCK, ltree); last_eob_len = ltree[END_BLOCK * 2 + 1]; } // Set the data type to ASCII or BINARY, using a crude approximation: // binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise. // IN assertion: the fields freq of dyn_ltree are set and the total of all // frequencies does not exceed 64K (to fit in an int on 16 bit machines). internal void set_data_type() { int n = 0; int ascii_freq = 0; int bin_freq = 0; while (n < 7) { bin_freq += dyn_ltree[n * 2]; n++; } while (n < 128) { ascii_freq += dyn_ltree[n * 2]; n++; } while (n < InternalConstants.LITERALS) { bin_freq += dyn_ltree[n * 2]; n++; } data_type = (sbyte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII); } // Flush the bit buffer, keeping at most 7 bits in it. internal void bi_flush() { if (bi_valid == 16) { pending[pendingCount++] = (byte)bi_buf; pending[pendingCount++] = (byte)(bi_buf >> 8); bi_buf = 0; bi_valid = 0; } else if (bi_valid >= 8) { //put_byte((byte)bi_buf); pending[pendingCount++] = (byte)bi_buf; bi_buf >>= 8; bi_valid -= 8; } } // Flush the bit buffer and align the output on a byte boundary internal void bi_windup() { if (bi_valid > 8) { pending[pendingCount++] = (byte)bi_buf; pending[pendingCount++] = (byte)(bi_buf >> 8); } else if (bi_valid > 0) { //put_byte((byte)bi_buf); pending[pendingCount++] = (byte)bi_buf; } bi_buf = 0; bi_valid = 0; } // Copy a stored block, storing first the length and its // one's complement if requested. internal void copy_block(int buf, int len, bool header) { bi_windup(); // align on byte boundary last_eob_len = 8; // enough lookahead for inflate if (header) unchecked { //put_short((short)len); pending[pendingCount++] = (byte)len; pending[pendingCount++] = (byte)(len >> 8); //put_short((short)~len); pending[pendingCount++] = (byte)~len; pending[pendingCount++] = (byte)(~len >> 8); } put_bytes(window, buf, len); } internal void flush_block_only(bool eof) { _tr_flush_block(block_start >= 0 ? block_start : -1, strstart - block_start, eof); block_start = strstart; _codec.flush_pending(); } // Copy without compression as much as possible from the input stream, return // the current block state. // This function does not insert new strings in the dictionary since // uncompressible data is probably not useful. This function is used // only for the level=0 compression option. // NOTE: this function should be optimized to avoid extra copying from // window to pending_buf. internal BlockState DeflateNone(FlushType flush) { // Stored blocks are limited to 0xffff bytes, pending is limited // to pending_buf_size, and each stored block has a 5 byte header: int max_block_size = 0xffff; int max_start; if (max_block_size > pending.Length - 5) { max_block_size = pending.Length - 5; } // Copy as much as possible from input to output: while (true) { // Fill the window as much as possible: if (lookahead <= 1) { _fillWindow(); if (lookahead == 0 && flush == FlushType.None) return BlockState.NeedMore; if (lookahead == 0) break; // flush the current block } strstart += lookahead; lookahead = 0; // Emit a stored block if pending will be full: max_start = block_start + max_block_size; if (strstart == 0 || strstart >= max_start) { // strstart == 0 is possible when wraparound on 16-bit machine lookahead = (int)(strstart - max_start); strstart = (int)max_start; flush_block_only(false); if (_codec.AvailableBytesOut == 0) return BlockState.NeedMore; } // Flush if we may have to slide, otherwise block_start may become // negative and the data will be gone: if (strstart - block_start >= w_size - MIN_LOOKAHEAD) { flush_block_only(false); if (_codec.AvailableBytesOut == 0) return BlockState.NeedMore; } } flush_block_only(flush == FlushType.Finish); if (_codec.AvailableBytesOut == 0) return (flush == FlushType.Finish) ? BlockState.FinishStarted : BlockState.NeedMore; return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone; } // Send a stored block internal void _tr_stored_block(int buf, int stored_len, bool eof) { send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type copy_block(buf, stored_len, true); // with header } // Determine the best encoding for the current block: dynamic trees, static // trees or store, and output the encoded block to the zip file. internal void _tr_flush_block(int buf, int stored_len, bool eof) { int opt_lenb, static_lenb; // opt_len and static_len in bytes int max_blindex = 0; // index of last bit length code of non zero freq // Build the Huffman trees unless a stored block is forced if (compressionLevel > 0) { // Check if the file is ascii or binary if (data_type == Z_UNKNOWN) set_data_type(); // Construct the literal and distance trees treeLiterals.build_tree(this); treeDistances.build_tree(this); // At this point, opt_len and static_len are the total bit lengths of // the compressed block data, excluding the tree representations. // Build the bit length tree for the above two trees, and get the index // in bl_order of the last bit length code to send. max_blindex = build_bl_tree(); // Determine the best encoding. Compute first the block length in bytes opt_lenb = (opt_len + 3 + 7) >> 3; static_lenb = (static_len + 3 + 7) >> 3; if (static_lenb <= opt_lenb) opt_lenb = static_lenb; } else { opt_lenb = static_lenb = stored_len + 5; // force a stored block } if (stored_len + 4 <= opt_lenb && buf != -1) { // 4: two words for the lengths // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. // Otherwise we can't have processed more than WSIZE input bytes since // the last block flush, because compression would have been // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to // transform a block into a stored block. _tr_stored_block(buf, stored_len, eof); } else if (static_lenb == opt_lenb) { send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3); send_compressed_block(StaticTree.lengthAndLiteralsTreeCodes, StaticTree.distTreeCodes); } else { send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3); send_all_trees(treeLiterals.max_code + 1, treeDistances.max_code + 1, max_blindex + 1); send_compressed_block(dyn_ltree, dyn_dtree); } // The above check is made mod 2^32, for files larger than 512 MB // and uLong implemented on 32 bits. _InitializeBlocks(); if (eof) { bi_windup(); } } // Fill the window when the lookahead becomes insufficient. // Updates strstart and lookahead. // // IN assertion: lookahead < MIN_LOOKAHEAD // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD // At least one byte has been read, or avail_in == 0; reads are // performed for at least two bytes (required for the zip translate_eol // option -- not supported here). private void _fillWindow() { int n, m; int p; int more; // Amount of free space at the end of the window. do { more = (window_size - lookahead - strstart); // Deal with !@#$% 64K limit: if (more == 0 && strstart == 0 && lookahead == 0) { more = w_size; } else if (more == -1) { // Very unlikely, but possible on 16 bit machine if strstart == 0 // and lookahead == 1 (input done one byte at time) more--; // If the window is almost full and there is insufficient lookahead, // move the upper half to the lower one to make room in the upper half. } else if (strstart >= w_size + w_size - MIN_LOOKAHEAD) { Array.Copy(window, w_size, window, 0, w_size); match_start -= w_size; strstart -= w_size; // we now have strstart >= MAX_DIST block_start -= w_size; // Slide the hash table (could be avoided with 32 bit values // at the expense of memory usage). We slide even when level == 0 // to keep the hash table consistent if we switch back to level > 0 // later. (Using level 0 permanently is not an optimal usage of // zlib, so we don't care about this pathological case.) n = hash_size; p = n; do { m = (head[--p] & 0xffff); head[p] = (short)((m >= w_size) ? (m - w_size) : 0); } while (--n != 0); n = w_size; p = n; do { m = (prev[--p] & 0xffff); prev[p] = (short)((m >= w_size) ? (m - w_size) : 0); // If n is not on any hash chain, prev[n] is garbage but // its value will never be used. } while (--n != 0); more += w_size; } if (_codec.AvailableBytesIn == 0) return; // If there was no sliding: // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && // more == window_size - lookahead - strstart // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) // => more >= window_size - 2*WSIZE + 2 // In the BIG_MEM or MMAP case (not yet supported), // window_size == input_size + MIN_LOOKAHEAD && // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. // Otherwise, window_size == 2*WSIZE so more >= 2. // If there was sliding, more >= WSIZE. So in all cases, more >= 2. n = _codec.read_buf(window, strstart + lookahead, more); lookahead += n; // Initialize the hash value now that we have some input: if (lookahead >= MIN_MATCH) { ins_h = window[strstart] & 0xff; ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask; } // If the whole input has less than MIN_MATCH bytes, ins_h is garbage, // but this is not important since only literal bytes will be emitted. } while (lookahead < MIN_LOOKAHEAD && _codec.AvailableBytesIn != 0); } // Compress as much as possible from the input stream, return the current // block state. // This function does not perform lazy evaluation of matches and inserts // new strings in the dictionary only for unmatched strings or for short // matches. It is used only for the fast compression options. internal BlockState DeflateFast(FlushType flush) { // short hash_head = 0; // head of the hash chain int hash_head = 0; // head of the hash chain bool bflush; // set if current block must be flushed while (true) { // Make sure that we always have enough lookahead, except // at the end of the input file. We need MAX_MATCH bytes // for the next match, plus MIN_MATCH bytes to insert the // string following the next match. if (lookahead < MIN_LOOKAHEAD) { _fillWindow(); if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None) { return BlockState.NeedMore; } if (lookahead == 0) break; // flush the current block } // Insert the string window[strstart .. strstart+2] in the // dictionary, and set hash_head to the head of the hash chain: if (lookahead >= MIN_MATCH) { ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; // prev[strstart&w_mask]=hash_head=head[ins_h]; hash_head = (head[ins_h] & 0xffff); prev[strstart & w_mask] = head[ins_h]; head[ins_h] = unchecked((short)strstart); } // Find the longest match, discarding those <= prev_length. // At this point we have always match_length < MIN_MATCH if (hash_head != 0L && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) { // To simplify the code, we prevent matches with the string // of window index 0 (in particular we have to avoid a match // of the string with itself at the start of the input file). if (compressionStrategy != CompressionStrategy.HuffmanOnly) { match_length = longest_match(hash_head); } // longest_match() sets match_start } if (match_length >= MIN_MATCH) { // check_match(strstart, match_start, match_length); bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH); lookahead -= match_length; // Insert new strings in the hash table only if the match length // is not too large. This saves time but degrades compression. if (match_length <= config.MaxLazy && lookahead >= MIN_MATCH) { match_length--; // string at strstart already in hash table do { strstart++; ins_h = ((ins_h << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; // prev[strstart&w_mask]=hash_head=head[ins_h]; hash_head = (head[ins_h] & 0xffff); prev[strstart & w_mask] = head[ins_h]; head[ins_h] = unchecked((short)strstart); // strstart never exceeds WSIZE-MAX_MATCH, so there are // always MIN_MATCH bytes ahead. } while (--match_length != 0); strstart++; } else { strstart += match_length; match_length = 0; ins_h = window[strstart] & 0xff; ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask; // If lookahead < MIN_MATCH, ins_h is garbage, but it does not // matter since it will be recomputed at next deflate call. } } else { // No match, output a literal byte bflush = _tr_tally(0, window[strstart] & 0xff); lookahead--; strstart++; } if (bflush) { flush_block_only(false); if (_codec.AvailableBytesOut == 0) return BlockState.NeedMore; } } flush_block_only(flush == FlushType.Finish); if (_codec.AvailableBytesOut == 0) { if (flush == FlushType.Finish) return BlockState.FinishStarted; else return BlockState.NeedMore; } return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone; } // Same as above, but achieves better compression. We use a lazy // evaluation for matches: a match is finally adopted only if there is // no better match at the next window position. internal BlockState DeflateSlow(FlushType flush) { // short hash_head = 0; // head of hash chain int hash_head = 0; // head of hash chain bool bflush; // set if current block must be flushed // Process the input block. while (true) { // Make sure that we always have enough lookahead, except // at the end of the input file. We need MAX_MATCH bytes // for the next match, plus MIN_MATCH bytes to insert the // string following the next match. if (lookahead < MIN_LOOKAHEAD) { _fillWindow(); if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None) return BlockState.NeedMore; if (lookahead == 0) break; // flush the current block } // Insert the string window[strstart .. strstart+2] in the // dictionary, and set hash_head to the head of the hash chain: if (lookahead >= MIN_MATCH) { ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; // prev[strstart&w_mask]=hash_head=head[ins_h]; hash_head = (head[ins_h] & 0xffff); prev[strstart & w_mask] = head[ins_h]; head[ins_h] = unchecked((short)strstart); } // Find the longest match, discarding those <= prev_length. prev_length = match_length; prev_match = match_start; match_length = MIN_MATCH - 1; if (hash_head != 0 && prev_length < config.MaxLazy && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD) { // To simplify the code, we prevent matches with the string // of window index 0 (in particular we have to avoid a match // of the string with itself at the start of the input file). if (compressionStrategy != CompressionStrategy.HuffmanOnly) { match_length = longest_match(hash_head); } // longest_match() sets match_start if (match_length <= 5 && (compressionStrategy == CompressionStrategy.Filtered || (match_length == MIN_MATCH && strstart - match_start > 4096))) { // If prev_match is also MIN_MATCH, match_start is garbage // but we will ignore the current match anyway. match_length = MIN_MATCH - 1; } } // If there was a match at the previous step and the current // match is not better, output the previous match: if (prev_length >= MIN_MATCH && match_length <= prev_length) { int max_insert = strstart + lookahead - MIN_MATCH; // Do not insert strings in hash table beyond this. // check_match(strstart-1, prev_match, prev_length); bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH); // Insert in hash table all strings up to the end of the match. // strstart-1 and strstart are already inserted. If there is not // enough lookahead, the last two strings are not inserted in // the hash table. lookahead -= (prev_length - 1); prev_length -= 2; do { if (++strstart <= max_insert) { ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; //prev[strstart&w_mask]=hash_head=head[ins_h]; hash_head = (head[ins_h] & 0xffff); prev[strstart & w_mask] = head[ins_h]; head[ins_h] = unchecked((short)strstart); } } while (--prev_length != 0); match_available = 0; match_length = MIN_MATCH - 1; strstart++; if (bflush) { flush_block_only(false); if (_codec.AvailableBytesOut == 0) return BlockState.NeedMore; } } else if (match_available != 0) { // If there was no match at the previous position, output a // single literal. If there was a match but the current match // is longer, truncate the previous match to a single literal. bflush = _tr_tally(0, window[strstart - 1] & 0xff); if (bflush) { flush_block_only(false); } strstart++; lookahead--; if (_codec.AvailableBytesOut == 0) return BlockState.NeedMore; } else { // There is no previous match to compare with, wait for // the next step to decide. match_available = 1; strstart++; lookahead--; } } if (match_available != 0) { bflush = _tr_tally(0, window[strstart - 1] & 0xff); match_available = 0; } flush_block_only(flush == FlushType.Finish); if (_codec.AvailableBytesOut == 0) { if (flush == FlushType.Finish) return BlockState.FinishStarted; else return BlockState.NeedMore; } return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone; } internal int longest_match(int cur_match) { int chain_length = config.MaxChainLength; // max hash chain length int scan = strstart; // current string int match; // matched string int len; // length of current match int best_len = prev_length; // best match length so far int limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0; int niceLength = config.NiceLength; // Stop when cur_match becomes <= limit. To simplify the code, // we prevent matches with the string of window index 0. int wmask = w_mask; int strend = strstart + MAX_MATCH; byte scan_end1 = window[scan + best_len - 1]; byte scan_end = window[scan + best_len]; // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. // It is easy to get rid of this optimization if necessary. // Do not waste too much time if we already have a good match: if (prev_length >= config.GoodLength) { chain_length >>= 2; } // Do not look for matches beyond the end of the input. This is necessary // to make deflate deterministic. if (niceLength > lookahead) niceLength = lookahead; do { match = cur_match; // Skip to next match if the match length cannot increase // or if the match length is less than 2: if (window[match + best_len] != scan_end || window[match + best_len - 1] != scan_end1 || window[match] != window[scan] || window[++match] != window[scan + 1]) continue; // The check at best_len-1 can be removed because it will be made // again later. (This heuristic is not always a win.) // It is not necessary to compare scan[2] and match[2] since they // are always equal when the other bytes match, given that // the hash keys are equal and that HASH_BITS >= 8. scan += 2; match++; // We check for insufficient lookahead only every 8th comparison; // the 256th check will be made at strstart+258. do { } while (window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match] && window[++scan] == window[++match] && scan < strend); len = MAX_MATCH - (int)(strend - scan); scan = strend - MAX_MATCH; if (len > best_len) { match_start = cur_match; best_len = len; if (len >= niceLength) break; scan_end1 = window[scan + best_len - 1]; scan_end = window[scan + best_len]; } } while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length != 0); if (best_len <= lookahead) return best_len; return lookahead; } private bool Rfc1950BytesEmitted = false; private bool _WantRfc1950HeaderBytes = true; internal bool WantRfc1950HeaderBytes { get { return _WantRfc1950HeaderBytes; } set { _WantRfc1950HeaderBytes = value; } } internal int Initialize(ZlibCodec codec, CompressionLevel level) { return Initialize(codec, level, ZlibConstants.WindowBitsMax); } internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits) { return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, CompressionStrategy.Default); } internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits, CompressionStrategy compressionStrategy) { return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, compressionStrategy); } internal int Initialize(ZlibCodec codec, CompressionLevel level, int windowBits, int memLevel, CompressionStrategy strategy) { _codec = codec; _codec.Message = null; // validation if (windowBits < 9 || windowBits > 15) throw new ZlibException("windowBits must be in the range 9..15."); if (memLevel < 1 || memLevel > MEM_LEVEL_MAX) throw new ZlibException(String.Format("memLevel must be in the range 1.. {0}", MEM_LEVEL_MAX)); _codec.dstate = this; w_bits = windowBits; w_size = 1 << w_bits; w_mask = w_size - 1; hash_bits = memLevel + 7; hash_size = 1 << hash_bits; hash_mask = hash_size - 1; hash_shift = ((hash_bits + MIN_MATCH - 1) / MIN_MATCH); window = new byte[w_size * 2]; prev = new short[w_size]; head = new short[hash_size]; // for memLevel==8, this will be 16384, 16k lit_bufsize = 1 << (memLevel + 6); // Use a single array as the buffer for data pending compression, // the output distance codes, and the output length codes (aka tree). // orig comment: This works just fine since the average // output size for (length,distance) codes is <= 24 bits. pending = new byte[lit_bufsize * 4]; _distanceOffset = lit_bufsize; _lengthOffset = (1 + 2) * lit_bufsize; // So, for memLevel 8, the length of the pending buffer is 65536. 64k. // The first 16k are pending bytes. // The middle slice, of 32k, is used for distance codes. // The final 16k are length codes. this.compressionLevel = level; this.compressionStrategy = strategy; Reset(); return ZlibConstants.Z_OK; } internal void Reset() { _codec.TotalBytesIn = _codec.TotalBytesOut = 0; _codec.Message = null; //strm.data_type = Z_UNKNOWN; pendingCount = 0; nextPending = 0; Rfc1950BytesEmitted = false; status = (WantRfc1950HeaderBytes) ? INIT_STATE : BUSY_STATE; _codec._Adler32 = Adler.Adler32(0, null, 0, 0); last_flush = (int)FlushType.None; _InitializeTreeData(); _InitializeLazyMatch(); } internal int End() { if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE) { return ZlibConstants.Z_STREAM_ERROR; } // Deallocate in reverse order of allocations: pending = null; head = null; prev = null; window = null; // free // dstate=null; return status == BUSY_STATE ? ZlibConstants.Z_DATA_ERROR : ZlibConstants.Z_OK; } private void SetDeflater() { switch (config.Flavor) { case DeflateFlavor.Store: DeflateFunction = DeflateNone; break; case DeflateFlavor.Fast: DeflateFunction = DeflateFast; break; case DeflateFlavor.Slow: DeflateFunction = DeflateSlow; break; } } internal int SetParams(CompressionLevel level, CompressionStrategy strategy) { int result = ZlibConstants.Z_OK; if (compressionLevel != level) { Config newConfig = Config.Lookup(level); // change in the deflate flavor (Fast vs slow vs none)? if (newConfig.Flavor != config.Flavor && _codec.TotalBytesIn != 0) { // Flush the last buffer: result = _codec.Deflate(FlushType.Partial); } compressionLevel = level; config = newConfig; SetDeflater(); } // no need to flush with change in strategy? Really? compressionStrategy = strategy; return result; } internal int SetDictionary(byte[] dictionary) { int length = dictionary.Length; int index = 0; if (dictionary == null || status != INIT_STATE) throw new ZlibException("Stream error."); _codec._Adler32 = Adler.Adler32(_codec._Adler32, dictionary, 0, dictionary.Length); if (length < MIN_MATCH) return ZlibConstants.Z_OK; if (length > w_size - MIN_LOOKAHEAD) { length = w_size - MIN_LOOKAHEAD; index = dictionary.Length - length; // use the tail of the dictionary } Array.Copy(dictionary, index, window, 0, length); strstart = length; block_start = length; // Insert all strings in the hash table (except for the last two bytes). // s->lookahead stays null, so s->ins_h will be recomputed at the next // call of fill_window. ins_h = window[0] & 0xff; ins_h = (((ins_h) << hash_shift) ^ (window[1] & 0xff)) & hash_mask; for (int n = 0; n <= length - MIN_MATCH; n++) { ins_h = (((ins_h) << hash_shift) ^ (window[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask; prev[n & w_mask] = head[ins_h]; head[ins_h] = (short)n; } return ZlibConstants.Z_OK; } internal int Deflate(FlushType flush) { int old_flush; if (_codec.OutputBuffer == null || (_codec.InputBuffer == null && _codec.AvailableBytesIn != 0) || (status == FINISH_STATE && flush != FlushType.Finish)) { _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_STREAM_ERROR)]; throw new ZlibException(String.Format("Something is fishy. [{0}]", _codec.Message)); } if (_codec.AvailableBytesOut == 0) { _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)]; throw new ZlibException("OutputBuffer is full (AvailableBytesOut == 0)"); } old_flush = last_flush; last_flush = (int)flush; // Write the zlib (rfc1950) header bytes if (status == INIT_STATE) { int header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8; int level_flags = (((int)compressionLevel - 1) & 0xff) >> 1; if (level_flags > 3) level_flags = 3; header |= (level_flags << 6); if (strstart != 0) header |= PRESET_DICT; header += 31 - (header % 31); status = BUSY_STATE; //putShortMSB(header); unchecked { pending[pendingCount++] = (byte)(header >> 8); pending[pendingCount++] = (byte)header; } // Save the adler32 of the preset dictionary: if (strstart != 0) { pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24); pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16); pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8); pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF); } _codec._Adler32 = Adler.Adler32(0, null, 0, 0); } // Flush as much pending output as possible if (pendingCount != 0) { _codec.flush_pending(); if (_codec.AvailableBytesOut == 0) { //System.out.println(" avail_out==0"); // Since avail_out is 0, deflate will be called again with // more output space, but possibly with both pending and // avail_in equal to zero. There won't be anything to do, // but this is not an error situation so make sure we // return OK instead of BUF_ERROR at next call of deflate: last_flush = -1; return ZlibConstants.Z_OK; } // Make sure there is something to do and avoid duplicate consecutive // flushes. For repeated and useless calls with Z_FINISH, we keep // returning Z_STREAM_END instead of Z_BUFF_ERROR. } else if (_codec.AvailableBytesIn == 0 && (int)flush <= old_flush && flush != FlushType.Finish) { // workitem 8557 // // Not sure why this needs to be an error. pendingCount == 0, which // means there's nothing to deflate. And the caller has not asked // for a FlushType.Finish, but... that seems very non-fatal. We // can just say "OK" and do nothing. // _codec.Message = z_errmsg[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)]; // throw new ZlibException("AvailableBytesIn == 0 && flush<=old_flush && flush != FlushType.Finish"); return ZlibConstants.Z_OK; } // User must not provide more input after the first FINISH: if (status == FINISH_STATE && _codec.AvailableBytesIn != 0) { _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)]; throw new ZlibException("status == FINISH_STATE && _codec.AvailableBytesIn != 0"); } // Start a new block or continue the current one. if (_codec.AvailableBytesIn != 0 || lookahead != 0 || (flush != FlushType.None && status != FINISH_STATE)) { BlockState bstate = DeflateFunction(flush); if (bstate == BlockState.FinishStarted || bstate == BlockState.FinishDone) { status = FINISH_STATE; } if (bstate == BlockState.NeedMore || bstate == BlockState.FinishStarted) { if (_codec.AvailableBytesOut == 0) { last_flush = -1; // avoid BUF_ERROR next call, see above } return ZlibConstants.Z_OK; // If flush != Z_NO_FLUSH && avail_out == 0, the next call // of deflate should use the same flush parameter to make sure // that the flush is complete. So we don't have to output an // empty block here, this will be done at next call. This also // ensures that for a very small output buffer, we emit at most // one empty block. } if (bstate == BlockState.BlockDone) { if (flush == FlushType.Partial) { _tr_align(); } else { // FlushType.Full or FlushType.Sync _tr_stored_block(0, 0, false); // For a full flush, this empty block will be recognized // as a special marker by inflate_sync(). if (flush == FlushType.Full) { // clear hash (forget the history) for (int i = 0; i < hash_size; i++) head[i] = 0; } } _codec.flush_pending(); if (_codec.AvailableBytesOut == 0) { last_flush = -1; // avoid BUF_ERROR at next call, see above return ZlibConstants.Z_OK; } } } if (flush != FlushType.Finish) return ZlibConstants.Z_OK; if (!WantRfc1950HeaderBytes || Rfc1950BytesEmitted) return ZlibConstants.Z_STREAM_END; // Write the zlib trailer (adler32) pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24); pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16); pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8); pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF); //putShortMSB((int)(SharedUtils.URShift(_codec._Adler32, 16))); //putShortMSB((int)(_codec._Adler32 & 0xffff)); _codec.flush_pending(); // If avail_out is zero, the application will call deflate again // to flush the rest. Rfc1950BytesEmitted = true; // write the trailer only once! return pendingCount != 0 ? ZlibConstants.Z_OK : ZlibConstants.Z_STREAM_END; } } } ================================================ FILE: src/FirebirdSql.Data.External/zlib/InfTree.cs ================================================ // Inftree.cs // ------------------------------------------------------------------ // // Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. // All rights reserved. // // This code module is part of DotNetZip, a zipfile class library. // // ------------------------------------------------------------------ // // This code is licensed under the Microsoft Public License. // See the file License.txt for the license details. // More info on: http://dotnetzip.codeplex.com // // ------------------------------------------------------------------ // // last saved (in emacs): // Time-stamp: <2009-October-28 12:43:54> // // ------------------------------------------------------------------ // // This module defines classes used in decompression. This code is derived // from the jzlib implementation of zlib. In keeping with the license for jzlib, // the copyright to that code is below. // // ------------------------------------------------------------------ // // Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the distribution. // // 3. The names of the authors may not be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND // FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, // INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, // OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // ----------------------------------------------------------------------- // // This program is based on zlib-1.1.3; credit to authors // Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) // and contributors of zlib. // // ----------------------------------------------------------------------- using System; namespace Ionic.Zlib { sealed class InfTree { private const int MANY = 1440; private const int Z_OK = 0; private const int Z_STREAM_END = 1; private const int Z_NEED_DICT = 2; private const int Z_ERRNO = - 1; private const int Z_STREAM_ERROR = - 2; private const int Z_DATA_ERROR = - 3; private const int Z_MEM_ERROR = - 4; private const int Z_BUF_ERROR = - 5; private const int Z_VERSION_ERROR = - 6; internal const int fixed_bl = 9; internal const int fixed_bd = 5; //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_tl'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" internal static readonly int[] fixed_tl = new int[]{96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 192, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 160, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 224, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 144, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 208, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 176, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 240, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 200, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 168, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 232, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 152, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 216, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 184, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 248, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 196, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 164, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 228, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 148, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 212, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 180, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 244, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 204, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 172, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 236, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 156, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 220, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 188, 0, 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 252, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 194, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 162, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 226, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 146, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 210, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 178, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 242, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 202, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 170, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 234, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 154, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 218, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 186, 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 250, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 198, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 166, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 230, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 150, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 214, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 182, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 246, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 206, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 174, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 238, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 158, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 222, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 190, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 254, 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 193, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 161, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 225, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 145, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 209, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 177, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 241, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 201, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 169, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 233, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 153, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 217, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 185, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 249, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 197, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 165, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 229, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 149, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 213, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 181, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 245, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 205, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 173, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 237, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 157, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 221, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 189, 0, 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 253, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 195, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 163, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 227, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 147, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 211, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 179, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 243, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 203, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 171, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 235, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 155, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 219, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 187, 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 251, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 199, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 167, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 231, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 151, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 215, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 183, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 247, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 207, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 175, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 239, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 159, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 223, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 191, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 255}; //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_td'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" internal static readonly int[] fixed_td = new int[]{80, 5, 1, 87, 5, 257, 83, 5, 17, 91, 5, 4097, 81, 5, 5, 89, 5, 1025, 85, 5, 65, 93, 5, 16385, 80, 5, 3, 88, 5, 513, 84, 5, 33, 92, 5, 8193, 82, 5, 9, 90, 5, 2049, 86, 5, 129, 192, 5, 24577, 80, 5, 2, 87, 5, 385, 83, 5, 25, 91, 5, 6145, 81, 5, 7, 89, 5, 1537, 85, 5, 97, 93, 5, 24577, 80, 5, 4, 88, 5, 769, 84, 5, 49, 92, 5, 12289, 82, 5, 13, 90, 5, 3073, 86, 5, 193, 192, 5, 24577}; // Tables for deflate from PKZIP's appnote.txt. //UPGRADE_NOTE: Final was removed from the declaration of 'cplens'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" internal static readonly int[] cplens = new int[]{3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; // see note #13 above about 258 //UPGRADE_NOTE: Final was removed from the declaration of 'cplext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" internal static readonly int[] cplext = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; //UPGRADE_NOTE: Final was removed from the declaration of 'cpdist'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" internal static readonly int[] cpdist = new int[]{1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577}; //UPGRADE_NOTE: Final was removed from the declaration of 'cpdext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'" internal static readonly int[] cpdext = new int[]{0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; // If BMAX needs to be larger than 16, then h and x[] should be uLong. internal const int BMAX = 15; // maximum bit length of any code internal int[] hn = null; // hufts used in space internal int[] v = null; // work area for huft_build internal int[] c = null; // bit length count table internal int[] r = null; // table entry for structure assignment internal int[] u = null; // table stack internal int[] x = null; // bit offsets, then code stack private int huft_build(int[] b, int bindex, int n, int s, int[] d, int[] e, int[] t, int[] m, int[] hp, int[] hn, int[] v) { // Given a list of code lengths and a maximum table size, make a set of // tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR // if the given code set is incomplete (the tables are still built in this // case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of // lengths), or Z_MEM_ERROR if not enough memory. int a; // counter for codes of length k int f; // i repeats in table every f entries int g; // maximum code length int h; // table level int i; // counter, current code int j; // counter int k; // number of bits in current code int l; // bits per table (returned in m) int mask; // (1 << w) - 1, to avoid cc -O bug on HP int p; // pointer into c[], b[], or v[] int q; // points to current table int w; // bits before this table == (l * h) int xp; // pointer into x int y; // number of dummy codes added int z; // number of entries in current table // Generate counts for each bit length p = 0; i = n; do { c[b[bindex + p]]++; p++; i--; // assume all entries <= BMAX } while (i != 0); if (c[0] == n) { // null input--all zero length codes t[0] = - 1; m[0] = 0; return Z_OK; } // Find minimum and maximum length, bound *m by those l = m[0]; for (j = 1; j <= BMAX; j++) if (c[j] != 0) break; k = j; // minimum code length if (l < j) { l = j; } for (i = BMAX; i != 0; i--) { if (c[i] != 0) break; } g = i; // maximum code length if (l > i) { l = i; } m[0] = l; // Adjust last length count to fill out codes, if needed for (y = 1 << j; j < i; j++, y <<= 1) { if ((y -= c[j]) < 0) { return Z_DATA_ERROR; } } if ((y -= c[i]) < 0) { return Z_DATA_ERROR; } c[i] += y; // Generate starting offsets into the value table for each length x[1] = j = 0; p = 1; xp = 2; while (--i != 0) { // note that i == g from above x[xp] = (j += c[p]); xp++; p++; } // Make a table of values in order of bit lengths i = 0; p = 0; do { if ((j = b[bindex + p]) != 0) { v[x[j]++] = i; } p++; } while (++i < n); n = x[g]; // set n to length of v // Generate the Huffman codes and for each, make the table entries x[0] = i = 0; // first Huffman code is zero p = 0; // grab values in bit order h = - 1; // no tables yet--level -1 w = - l; // bits decoded == (l * h) u[0] = 0; // just to keep compilers happy q = 0; // ditto z = 0; // ditto // go through the bit lengths (k already is bits in shortest code) for (; k <= g; k++) { a = c[k]; while (a-- != 0) { // here i is the Huffman code of length k bits for value *p // make tables up to required level while (k > w + l) { h++; w += l; // previous table always l bits // compute minimum size table less than or equal to l bits z = g - w; z = (z > l)?l:z; // table size upper limit if ((f = 1 << (j = k - w)) > a + 1) { // try a k-w bit table // too few codes for k-w bit table f -= (a + 1); // deduct codes from patterns left xp = k; if (j < z) { while (++j < z) { // try smaller tables up to z bits if ((f <<= 1) <= c[++xp]) break; // enough codes to use up j bits f -= c[xp]; // else deduct codes from patterns } } } z = 1 << j; // table entries for j-bit table // allocate new table if (hn[0] + z > MANY) { // (note: doesn't matter for fixed) return Z_DATA_ERROR; // overflow of MANY } u[h] = q = hn[0]; // DEBUG hn[0] += z; // connect to last table, if there is one if (h != 0) { x[h] = i; // save pattern for backing up r[0] = (sbyte) j; // bits in this table r[1] = (sbyte) l; // bits to dump before this table j = SharedUtils.URShift(i, (w - l)); r[2] = (int) (q - u[h - 1] - j); // offset to this table Array.Copy(r, 0, hp, (u[h - 1] + j) * 3, 3); // connect to last table } else { t[0] = q; // first table is returned result } } // set up table entry in r r[1] = (sbyte) (k - w); if (p >= n) { r[0] = 128 + 64; // out of values--invalid code } else if (v[p] < s) { r[0] = (sbyte) (v[p] < 256?0:32 + 64); // 256 is end-of-block r[2] = v[p++]; // simple code is just the value } else { r[0] = (sbyte) (e[v[p] - s] + 16 + 64); // non-simple--look up in lists r[2] = d[v[p++] - s]; } // fill code-like entries with r f = 1 << (k - w); for (j = SharedUtils.URShift(i, w); j < z; j += f) { Array.Copy(r, 0, hp, (q + j) * 3, 3); } // backwards increment the k-bit code i for (j = 1 << (k - 1); (i & j) != 0; j = SharedUtils.URShift(j, 1)) { i ^= j; } i ^= j; // backup over finished tables mask = (1 << w) - 1; // needed on HP, cc -O bug while ((i & mask) != x[h]) { h--; // don't need to update q w -= l; mask = (1 << w) - 1; } } } // Return Z_BUF_ERROR if we were given an incomplete table return y != 0 && g != 1?Z_BUF_ERROR:Z_OK; } internal int inflate_trees_bits(int[] c, int[] bb, int[] tb, int[] hp, ZlibCodec z) { int result; initWorkArea(19); hn[0] = 0; result = huft_build(c, 0, 19, 19, null, null, tb, bb, hp, hn, v); if (result == Z_DATA_ERROR) { z.Message = "oversubscribed dynamic bit lengths tree"; } else if (result == Z_BUF_ERROR || bb[0] == 0) { z.Message = "incomplete dynamic bit lengths tree"; result = Z_DATA_ERROR; } return result; } internal int inflate_trees_dynamic(int nl, int nd, int[] c, int[] bl, int[] bd, int[] tl, int[] td, int[] hp, ZlibCodec z) { int result; // build literal/length tree initWorkArea(288); hn[0] = 0; result = huft_build(c, 0, nl, 257, cplens, cplext, tl, bl, hp, hn, v); if (result != Z_OK || bl[0] == 0) { if (result == Z_DATA_ERROR) { z.Message = "oversubscribed literal/length tree"; } else if (result != Z_MEM_ERROR) { z.Message = "incomplete literal/length tree"; result = Z_DATA_ERROR; } return result; } // build distance tree initWorkArea(288); result = huft_build(c, nl, nd, 0, cpdist, cpdext, td, bd, hp, hn, v); if (result != Z_OK || (bd[0] == 0 && nl > 257)) { if (result == Z_DATA_ERROR) { z.Message = "oversubscribed distance tree"; } else if (result == Z_BUF_ERROR) { z.Message = "incomplete distance tree"; result = Z_DATA_ERROR; } else if (result != Z_MEM_ERROR) { z.Message = "empty distance tree with lengths"; result = Z_DATA_ERROR; } return result; } return Z_OK; } internal static int inflate_trees_fixed(int[] bl, int[] bd, int[][] tl, int[][] td, ZlibCodec z) { bl[0] = fixed_bl; bd[0] = fixed_bd; tl[0] = fixed_tl; td[0] = fixed_td; return Z_OK; } private void initWorkArea(int vsize) { if (hn == null) { hn = new int[1]; v = new int[vsize]; c = new int[BMAX + 1]; r = new int[3]; u = new int[BMAX]; x = new int[BMAX + 1]; } else { if (v.Length < vsize) { v = new int[vsize]; } Array.Clear(v,0,vsize); Array.Clear(c,0,BMAX+1); r[0]=0; r[1]=0; r[2]=0; // for(int i=0; i // // ------------------------------------------------------------------ // // This module defines classes for decompression. This code is derived // from the jzlib implementation of zlib, but significantly modified. // The object model is not the same, and many of the behaviors are // different. Nonetheless, in keeping with the license for jzlib, I am // reproducing the copyright to that code here. // // ------------------------------------------------------------------ // // Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the distribution. // // 3. The names of the authors may not be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND // FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, // INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, // OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // ----------------------------------------------------------------------- // // This program is based on zlib-1.1.3; credit to authors // Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) // and contributors of zlib. // // ----------------------------------------------------------------------- using System; namespace Ionic.Zlib { sealed class InflateBlocks { private const int MANY = 1440; // Table for deflate from PKZIP's appnote.txt. internal static readonly int[] border = new int[] { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; private enum InflateBlockMode { TYPE = 0, // get type bits (3, including end bit) LENS = 1, // get lengths for stored STORED = 2, // processing stored block TABLE = 3, // get table lengths BTREE = 4, // get bit lengths tree for a dynamic block DTREE = 5, // get length, distance trees for a dynamic block CODES = 6, // processing fixed or dynamic block DRY = 7, // output remaining window bytes DONE = 8, // finished last block, done BAD = 9, // ot a data error--stuck here } private InflateBlockMode mode; // current inflate_block mode internal int left; // if STORED, bytes left to copy internal int table; // table lengths (14 bits) internal int index; // index into blens (or border) internal int[] blens; // bit lengths of codes internal int[] bb = new int[1]; // bit length tree depth internal int[] tb = new int[1]; // bit length decoding tree internal InflateCodes codes = new InflateCodes(); // if CODES, current state internal int last; // true if this block is the last block internal ZlibCodec _codec; // pointer back to this zlib stream // mode independent information internal int bitk; // bits in bit buffer internal int bitb; // bit buffer internal int[] hufts; // single malloc for tree space internal byte[] window; // sliding window internal int end; // one byte after sliding window internal int readAt; // window read pointer internal int writeAt; // window write pointer internal System.Object checkfn; // check function internal uint check; // check on output internal InfTree inftree = new InfTree(); internal InflateBlocks(ZlibCodec codec, System.Object checkfn, int w) { _codec = codec; hufts = new int[MANY * 3]; window = new byte[w]; end = w; this.checkfn = checkfn; mode = InflateBlockMode.TYPE; Reset(); } internal uint Reset() { uint oldCheck = check; mode = InflateBlockMode.TYPE; bitk = 0; bitb = 0; readAt = writeAt = 0; if (checkfn != null) _codec._Adler32 = check = Adler.Adler32(0, null, 0, 0); return oldCheck; } internal int Process(int r) { int t; // temporary storage int b; // bit buffer int k; // bits in bit buffer int p; // input data pointer int n; // bytes available there int q; // output window write pointer int m; // bytes to end of window or read pointer // copy input/output information to locals (UPDATE macro restores) p = _codec.NextIn; n = _codec.AvailableBytesIn; b = bitb; k = bitk; q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q); // process input based on current state while (true) { switch (mode) { case InflateBlockMode.TYPE: while (k < (3)) { if (n != 0) { r = ZlibConstants.Z_OK; } else { bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } n--; b |= (_codec.InputBuffer[p++] & 0xff) << k; k += 8; } t = (int)(b & 7); last = t & 1; switch ((uint)t >> 1) { case 0: // stored b >>= 3; k -= (3); t = k & 7; // go to byte boundary b >>= t; k -= t; mode = InflateBlockMode.LENS; // get length of stored block break; case 1: // fixed int[] bl = new int[1]; int[] bd = new int[1]; int[][] tl = new int[1][]; int[][] td = new int[1][]; InfTree.inflate_trees_fixed(bl, bd, tl, td, _codec); codes.Init(bl[0], bd[0], tl[0], 0, td[0], 0); b >>= 3; k -= 3; mode = InflateBlockMode.CODES; break; case 2: // dynamic b >>= 3; k -= 3; mode = InflateBlockMode.TABLE; break; case 3: // illegal b >>= 3; k -= 3; mode = InflateBlockMode.BAD; _codec.Message = "invalid block type"; r = ZlibConstants.Z_DATA_ERROR; bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } break; case InflateBlockMode.LENS: while (k < (32)) { if (n != 0) { r = ZlibConstants.Z_OK; } else { bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } ; n--; b |= (_codec.InputBuffer[p++] & 0xff) << k; k += 8; } if ( ( ((~b)>>16) & 0xffff) != (b & 0xffff)) { mode = InflateBlockMode.BAD; _codec.Message = "invalid stored block lengths"; r = ZlibConstants.Z_DATA_ERROR; bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } left = (b & 0xffff); b = k = 0; // dump bits mode = left != 0 ? InflateBlockMode.STORED : (last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE); break; case InflateBlockMode.STORED: if (n == 0) { bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } if (m == 0) { if (q == end && readAt != 0) { q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q); } if (m == 0) { writeAt = q; r = Flush(r); q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q); if (q == end && readAt != 0) { q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q); } if (m == 0) { bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } } } r = ZlibConstants.Z_OK; t = left; if (t > n) t = n; if (t > m) t = m; Array.Copy(_codec.InputBuffer, p, window, q, t); p += t; n -= t; q += t; m -= t; if ((left -= t) != 0) break; mode = last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE; break; case InflateBlockMode.TABLE: while (k < (14)) { if (n != 0) { r = ZlibConstants.Z_OK; } else { bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } n--; b |= (_codec.InputBuffer[p++] & 0xff) << k; k += 8; } table = t = (b & 0x3fff); if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29) { mode = InflateBlockMode.BAD; _codec.Message = "too many length or distance symbols"; r = ZlibConstants.Z_DATA_ERROR; bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f); if (blens == null || blens.Length < t) { blens = new int[t]; } else { Array.Clear(blens, 0, t); // for (int i = 0; i < t; i++) // { // blens[i] = 0; // } } b >>= 14; k -= 14; index = 0; mode = InflateBlockMode.BTREE; goto case InflateBlockMode.BTREE; case InflateBlockMode.BTREE: while (index < 4 + (table >> 10)) { while (k < (3)) { if (n != 0) { r = ZlibConstants.Z_OK; } else { bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } n--; b |= (_codec.InputBuffer[p++] & 0xff) << k; k += 8; } blens[border[index++]] = b & 7; b >>= 3; k -= 3; } while (index < 19) { blens[border[index++]] = 0; } bb[0] = 7; t = inftree.inflate_trees_bits(blens, bb, tb, hufts, _codec); if (t != ZlibConstants.Z_OK) { r = t; if (r == ZlibConstants.Z_DATA_ERROR) { blens = null; mode = InflateBlockMode.BAD; } bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } index = 0; mode = InflateBlockMode.DTREE; goto case InflateBlockMode.DTREE; case InflateBlockMode.DTREE: while (true) { t = table; if (!(index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))) { break; } int i, j, c; t = bb[0]; while (k < t) { if (n != 0) { r = ZlibConstants.Z_OK; } else { bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } n--; b |= (_codec.InputBuffer[p++] & 0xff) << k; k += 8; } t = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 1]; c = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 2]; if (c < 16) { b >>= t; k -= t; blens[index++] = c; } else { // c == 16..18 i = c == 18 ? 7 : c - 14; j = c == 18 ? 11 : 3; while (k < (t + i)) { if (n != 0) { r = ZlibConstants.Z_OK; } else { bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } n--; b |= (_codec.InputBuffer[p++] & 0xff) << k; k += 8; } b >>= t; k -= t; j += (b & InternalInflateConstants.InflateMask[i]); b >>= i; k -= i; i = index; t = table; if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || (c == 16 && i < 1)) { blens = null; mode = InflateBlockMode.BAD; _codec.Message = "invalid bit length repeat"; r = ZlibConstants.Z_DATA_ERROR; bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } c = (c == 16) ? blens[i-1] : 0; do { blens[i++] = c; } while (--j != 0); index = i; } } tb[0] = -1; { int[] bl = new int[] { 9 }; // must be <= 9 for lookahead assumptions int[] bd = new int[] { 6 }; // must be <= 9 for lookahead assumptions int[] tl = new int[1]; int[] td = new int[1]; t = table; t = inftree.inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), blens, bl, bd, tl, td, hufts, _codec); if (t != ZlibConstants.Z_OK) { if (t == ZlibConstants.Z_DATA_ERROR) { blens = null; mode = InflateBlockMode.BAD; } r = t; bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } codes.Init(bl[0], bd[0], hufts, tl[0], hufts, td[0]); } mode = InflateBlockMode.CODES; goto case InflateBlockMode.CODES; case InflateBlockMode.CODES: bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; r = codes.Process(this, r); if (r != ZlibConstants.Z_STREAM_END) { return Flush(r); } r = ZlibConstants.Z_OK; p = _codec.NextIn; n = _codec.AvailableBytesIn; b = bitb; k = bitk; q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q); if (last == 0) { mode = InflateBlockMode.TYPE; break; } mode = InflateBlockMode.DRY; goto case InflateBlockMode.DRY; case InflateBlockMode.DRY: writeAt = q; r = Flush(r); q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q); if (readAt != writeAt) { bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } mode = InflateBlockMode.DONE; goto case InflateBlockMode.DONE; case InflateBlockMode.DONE: r = ZlibConstants.Z_STREAM_END; bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); case InflateBlockMode.BAD: r = ZlibConstants.Z_DATA_ERROR; bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); default: r = ZlibConstants.Z_STREAM_ERROR; bitb = b; bitk = k; _codec.AvailableBytesIn = n; _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; writeAt = q; return Flush(r); } } } internal void Free() { Reset(); window = null; hufts = null; } internal void SetDictionary(byte[] d, int start, int n) { Array.Copy(d, start, window, 0, n); readAt = writeAt = n; } // Returns true if inflate is currently at the end of a block generated // by Z_SYNC_FLUSH or Z_FULL_FLUSH. internal int SyncPoint() { return mode == InflateBlockMode.LENS ? 1 : 0; } // copy as much as possible from the sliding window to the output area internal int Flush(int r) { int nBytes; for (int pass=0; pass < 2; pass++) { if (pass==0) { // compute number of bytes to copy as far as end of window nBytes = (int)((readAt <= writeAt ? writeAt : end) - readAt); } else { // compute bytes to copy nBytes = writeAt - readAt; } // workitem 8870 if (nBytes == 0) { if (r == ZlibConstants.Z_BUF_ERROR) r = ZlibConstants.Z_OK; return r; } if (nBytes > _codec.AvailableBytesOut) nBytes = _codec.AvailableBytesOut; if (nBytes != 0 && r == ZlibConstants.Z_BUF_ERROR) r = ZlibConstants.Z_OK; // update counters _codec.AvailableBytesOut -= nBytes; _codec.TotalBytesOut += nBytes; // update check information if (checkfn != null) _codec._Adler32 = check = Adler.Adler32(check, window, readAt, nBytes); // copy as far as end of window Array.Copy(window, readAt, _codec.OutputBuffer, _codec.NextOut, nBytes); _codec.NextOut += nBytes; readAt += nBytes; // see if more to copy at beginning of window if (readAt == end && pass == 0) { // wrap pointers readAt = 0; if (writeAt == end) writeAt = 0; } else pass++; } // done return r; } } internal static class InternalInflateConstants { // And'ing with mask[n] masks the lower n bits internal static readonly int[] InflateMask = new int[] { 0x00000000, 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff }; } sealed class InflateCodes { // waiting for "i:"=input, // "o:"=output, // "x:"=nothing private const int START = 0; // x: set up for LEN private const int LEN = 1; // i: get length/literal/eob next private const int LENEXT = 2; // i: getting length extra (have base) private const int DIST = 3; // i: get distance next private const int DISTEXT = 4; // i: getting distance extra private const int COPY = 5; // o: copying bytes in window, waiting for space private const int LIT = 6; // o: got literal, waiting for output space private const int WASH = 7; // o: got eob, possibly still output waiting private const int END = 8; // x: got eob and all data flushed private const int BADCODE = 9; // x: got error internal int mode; // current inflate_codes mode // mode dependent information internal int len; internal int[] tree; // pointer into tree internal int tree_index = 0; internal int need; // bits needed internal int lit; // if EXT or COPY, where and how much internal int bitsToGet; // bits to get for extra internal int dist; // distance back to copy from internal byte lbits; // ltree bits decoded per branch internal byte dbits; // dtree bits decoder per branch internal int[] ltree; // literal/length/eob tree internal int ltree_index; // literal/length/eob tree internal int[] dtree; // distance tree internal int dtree_index; // distance tree internal InflateCodes() { } internal void Init(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index) { mode = START; lbits = (byte)bl; dbits = (byte)bd; ltree = tl; ltree_index = tl_index; dtree = td; dtree_index = td_index; tree = null; } internal int Process(InflateBlocks blocks, int r) { int j; // temporary storage int tindex; // temporary pointer int e; // extra bits or operation int b = 0; // bit buffer int k = 0; // bits in bit buffer int p = 0; // input data pointer int n; // bytes available there int q; // output window write pointer int m; // bytes to end of window or read pointer int f; // pointer to copy strings from ZlibCodec z = blocks._codec; // copy input/output information to locals (UPDATE macro restores) p = z.NextIn; n = z.AvailableBytesIn; b = blocks.bitb; k = blocks.bitk; q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; // process input and output based on current state while (true) { switch (mode) { // waiting for "i:"=input, "o:"=output, "x:"=nothing case START: // x: set up for LEN if (m >= 258 && n >= 10) { blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; r = InflateFast(lbits, dbits, ltree, ltree_index, dtree, dtree_index, blocks, z); p = z.NextIn; n = z.AvailableBytesIn; b = blocks.bitb; k = blocks.bitk; q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; if (r != ZlibConstants.Z_OK) { mode = (r == ZlibConstants.Z_STREAM_END) ? WASH : BADCODE; break; } } need = lbits; tree = ltree; tree_index = ltree_index; mode = LEN; goto case LEN; case LEN: // i: get length/literal/eob next j = need; while (k < j) { if (n != 0) r = ZlibConstants.Z_OK; else { blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); } n--; b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; } tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3; b >>= (tree[tindex + 1]); k -= (tree[tindex + 1]); e = tree[tindex]; if (e == 0) { // literal lit = tree[tindex + 2]; mode = LIT; break; } if ((e & 16) != 0) { // length bitsToGet = e & 15; len = tree[tindex + 2]; mode = LENEXT; break; } if ((e & 64) == 0) { // next table need = e; tree_index = tindex / 3 + tree[tindex + 2]; break; } if ((e & 32) != 0) { // end of block mode = WASH; break; } mode = BADCODE; // invalid code z.Message = "invalid literal/length code"; r = ZlibConstants.Z_DATA_ERROR; blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); case LENEXT: // i: getting length extra (have base) j = bitsToGet; while (k < j) { if (n != 0) r = ZlibConstants.Z_OK; else { blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); } n--; b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; } len += (b & InternalInflateConstants.InflateMask[j]); b >>= j; k -= j; need = dbits; tree = dtree; tree_index = dtree_index; mode = DIST; goto case DIST; case DIST: // i: get distance next j = need; while (k < j) { if (n != 0) r = ZlibConstants.Z_OK; else { blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); } n--; b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; } tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3; b >>= tree[tindex + 1]; k -= tree[tindex + 1]; e = (tree[tindex]); if ((e & 0x10) != 0) { // distance bitsToGet = e & 15; dist = tree[tindex + 2]; mode = DISTEXT; break; } if ((e & 64) == 0) { // next table need = e; tree_index = tindex / 3 + tree[tindex + 2]; break; } mode = BADCODE; // invalid code z.Message = "invalid distance code"; r = ZlibConstants.Z_DATA_ERROR; blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); case DISTEXT: // i: getting distance extra j = bitsToGet; while (k < j) { if (n != 0) r = ZlibConstants.Z_OK; else { blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); } n--; b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; } dist += (b & InternalInflateConstants.InflateMask[j]); b >>= j; k -= j; mode = COPY; goto case COPY; case COPY: // o: copying bytes in window, waiting for space f = q - dist; while (f < 0) { // modulo window size-"while" instead f += blocks.end; // of "if" handles invalid distances } while (len != 0) { if (m == 0) { if (q == blocks.end && blocks.readAt != 0) { q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; } if (m == 0) { blocks.writeAt = q; r = blocks.Flush(r); q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; if (q == blocks.end && blocks.readAt != 0) { q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; } if (m == 0) { blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); } } } blocks.window[q++] = blocks.window[f++]; m--; if (f == blocks.end) f = 0; len--; } mode = START; break; case LIT: // o: got literal, waiting for output space if (m == 0) { if (q == blocks.end && blocks.readAt != 0) { q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; } if (m == 0) { blocks.writeAt = q; r = blocks.Flush(r); q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; if (q == blocks.end && blocks.readAt != 0) { q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; } if (m == 0) { blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); } } } r = ZlibConstants.Z_OK; blocks.window[q++] = (byte)lit; m--; mode = START; break; case WASH: // o: got eob, possibly more output if (k > 7) { // return unused byte, if any k -= 8; n++; p--; // can always return one } blocks.writeAt = q; r = blocks.Flush(r); q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q; if (blocks.readAt != blocks.writeAt) { blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); } mode = END; goto case END; case END: r = ZlibConstants.Z_STREAM_END; blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); case BADCODE: // x: got error r = ZlibConstants.Z_DATA_ERROR; blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); default: r = ZlibConstants.Z_STREAM_ERROR; blocks.bitb = b; blocks.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; blocks.writeAt = q; return blocks.Flush(r); } } } // Called with number of bytes left to write in window at least 258 // (the maximum string length) and number of input bytes available // at least ten. The ten bytes are six bytes for the longest length/ // distance pair plus four bytes for overloading the bit buffer. internal int InflateFast(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index, InflateBlocks s, ZlibCodec z) { int t; // temporary pointer int[] tp; // temporary pointer int tp_index; // temporary pointer int e; // extra bits or operation int b; // bit buffer int k; // bits in bit buffer int p; // input data pointer int n; // bytes available there int q; // output window write pointer int m; // bytes to end of window or read pointer int ml; // mask for literal/length tree int md; // mask for distance tree int c; // bytes to copy int d; // distance back to copy from int r; // copy source pointer int tp_index_t_3; // (tp_index+t)*3 // load input, output, bit values p = z.NextIn; n = z.AvailableBytesIn; b = s.bitb; k = s.bitk; q = s.writeAt; m = q < s.readAt ? s.readAt - q - 1 : s.end - q; // initialize masks ml = InternalInflateConstants.InflateMask[bl]; md = InternalInflateConstants.InflateMask[bd]; // do until not enough input or output space for fast loop do { // assume called with m >= 258 && n >= 10 // get literal/length code while (k < (20)) { // max bits for literal/length code n--; b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; } t = b & ml; tp = tl; tp_index = tl_index; tp_index_t_3 = (tp_index + t) * 3; if ((e = tp[tp_index_t_3]) == 0) { b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]); s.window[q++] = (byte)tp[tp_index_t_3 + 2]; m--; continue; } do { b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]); if ((e & 16) != 0) { e &= 15; c = tp[tp_index_t_3 + 2] + ((int)b & InternalInflateConstants.InflateMask[e]); b >>= e; k -= e; // decode distance base of block to copy while (k < 15) { // max bits for distance code n--; b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; } t = b & md; tp = td; tp_index = td_index; tp_index_t_3 = (tp_index + t) * 3; e = tp[tp_index_t_3]; do { b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]); if ((e & 16) != 0) { // get extra bits to add to distance base e &= 15; while (k < e) { // get extra bits (up to 13) n--; b |= (z.InputBuffer[p++] & 0xff) << k; k += 8; } d = tp[tp_index_t_3 + 2] + (b & InternalInflateConstants.InflateMask[e]); b >>= e; k -= e; // do the copy m -= c; if (q >= d) { // offset before dest // just copy r = q - d; if (q - r > 0 && 2 > (q - r)) { s.window[q++] = s.window[r++]; // minimum count is three, s.window[q++] = s.window[r++]; // so unroll loop a little c -= 2; } else { Array.Copy(s.window, r, s.window, q, 2); q += 2; r += 2; c -= 2; } } else { // else offset after destination r = q - d; do { r += s.end; // force pointer in window } while (r < 0); // covers invalid distances e = s.end - r; if (c > e) { // if source crosses, c -= e; // wrapped copy if (q - r > 0 && e > (q - r)) { do { s.window[q++] = s.window[r++]; } while (--e != 0); } else { Array.Copy(s.window, r, s.window, q, e); q += e; r += e; e = 0; } r = 0; // copy rest from start of window } } // copy all or what's left if (q - r > 0 && c > (q - r)) { do { s.window[q++] = s.window[r++]; } while (--c != 0); } else { Array.Copy(s.window, r, s.window, q, c); q += c; r += c; c = 0; } break; } else if ((e & 64) == 0) { t += tp[tp_index_t_3 + 2]; t += (b & InternalInflateConstants.InflateMask[e]); tp_index_t_3 = (tp_index + t) * 3; e = tp[tp_index_t_3]; } else { z.Message = "invalid distance code"; c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3); s.bitb = b; s.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; s.writeAt = q; return ZlibConstants.Z_DATA_ERROR; } } while (true); break; } if ((e & 64) == 0) { t += tp[tp_index_t_3 + 2]; t += (b & InternalInflateConstants.InflateMask[e]); tp_index_t_3 = (tp_index + t) * 3; if ((e = tp[tp_index_t_3]) == 0) { b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]); s.window[q++] = (byte)tp[tp_index_t_3 + 2]; m--; break; } } else if ((e & 32) != 0) { c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3); s.bitb = b; s.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; s.writeAt = q; return ZlibConstants.Z_STREAM_END; } else { z.Message = "invalid literal/length code"; c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3); s.bitb = b; s.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; s.writeAt = q; return ZlibConstants.Z_DATA_ERROR; } } while (true); } while (m >= 258 && n >= 10); // not enough input or output--restore pointers and return c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3); s.bitb = b; s.bitk = k; z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p; s.writeAt = q; return ZlibConstants.Z_OK; } } internal sealed class InflateManager { // preset dictionary flag in zlib header private const int PRESET_DICT = 0x20; private const int Z_DEFLATED = 8; private enum InflateManagerMode { METHOD = 0, // waiting for method byte FLAG = 1, // waiting for flag byte DICT4 = 2, // four dictionary check bytes to go DICT3 = 3, // three dictionary check bytes to go DICT2 = 4, // two dictionary check bytes to go DICT1 = 5, // one dictionary check byte to go DICT0 = 6, // waiting for inflateSetDictionary BLOCKS = 7, // decompressing blocks CHECK4 = 8, // four check bytes to go CHECK3 = 9, // three check bytes to go CHECK2 = 10, // two check bytes to go CHECK1 = 11, // one check byte to go DONE = 12, // finished check, done BAD = 13, // got an error--stay here } private InflateManagerMode mode; // current inflate mode internal ZlibCodec _codec; // pointer back to this zlib stream // mode dependent information internal int method; // if FLAGS, method byte // if CHECK, check values to compare internal uint computedCheck; // computed check value internal uint expectedCheck; // stream check value // if BAD, inflateSync's marker bytes count internal int marker; // mode independent information //internal int nowrap; // flag for no wrapper private bool _handleRfc1950HeaderBytes = true; internal bool HandleRfc1950HeaderBytes { get { return _handleRfc1950HeaderBytes; } set { _handleRfc1950HeaderBytes = value; } } internal int wbits; // log2(window size) (8..15, defaults to 15) internal InflateBlocks blocks; // current inflate_blocks state public InflateManager() { } public InflateManager(bool expectRfc1950HeaderBytes) { _handleRfc1950HeaderBytes = expectRfc1950HeaderBytes; } internal int Reset() { _codec.TotalBytesIn = _codec.TotalBytesOut = 0; _codec.Message = null; mode = HandleRfc1950HeaderBytes ? InflateManagerMode.METHOD : InflateManagerMode.BLOCKS; blocks.Reset(); return ZlibConstants.Z_OK; } internal int End() { if (blocks != null) blocks.Free(); blocks = null; return ZlibConstants.Z_OK; } internal int Initialize(ZlibCodec codec, int w) { _codec = codec; _codec.Message = null; blocks = null; // handle undocumented nowrap option (no zlib header or check) //nowrap = 0; //if (w < 0) //{ // w = - w; // nowrap = 1; //} // set window size if (w < 8 || w > 15) { End(); throw new ZlibException("Bad window size."); //return ZlibConstants.Z_STREAM_ERROR; } wbits = w; blocks = new InflateBlocks(codec, HandleRfc1950HeaderBytes ? this : null, 1 << w); // reset state Reset(); return ZlibConstants.Z_OK; } internal int Inflate(FlushType flush) { int b; if (_codec.InputBuffer == null) throw new ZlibException("InputBuffer is null. "); // int f = (flush == FlushType.Finish) // ? ZlibConstants.Z_BUF_ERROR // : ZlibConstants.Z_OK; // workitem 8870 int f = ZlibConstants.Z_OK; int r = ZlibConstants.Z_BUF_ERROR; while (true) { switch (mode) { case InflateManagerMode.METHOD: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; if (((method = _codec.InputBuffer[_codec.NextIn++]) & 0xf) != Z_DEFLATED) { mode = InflateManagerMode.BAD; _codec.Message = String.Format("unknown compression method (0x{0:X2})", method); marker = 5; // can't try inflateSync break; } if ((method >> 4) + 8 > wbits) { mode = InflateManagerMode.BAD; _codec.Message = String.Format("invalid window size ({0})", (method >> 4) + 8); marker = 5; // can't try inflateSync break; } mode = InflateManagerMode.FLAG; break; case InflateManagerMode.FLAG: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; b = (_codec.InputBuffer[_codec.NextIn++]) & 0xff; if ((((method << 8) + b) % 31) != 0) { mode = InflateManagerMode.BAD; _codec.Message = "incorrect header check"; marker = 5; // can't try inflateSync break; } mode = ((b & PRESET_DICT) == 0) ? InflateManagerMode.BLOCKS : InflateManagerMode.DICT4; break; case InflateManagerMode.DICT4: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000); mode = InflateManagerMode.DICT3; break; case InflateManagerMode.DICT3: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000); mode = InflateManagerMode.DICT2; break; case InflateManagerMode.DICT2: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00); mode = InflateManagerMode.DICT1; break; case InflateManagerMode.DICT1: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff); _codec._Adler32 = expectedCheck; mode = InflateManagerMode.DICT0; return ZlibConstants.Z_NEED_DICT; case InflateManagerMode.DICT0: mode = InflateManagerMode.BAD; _codec.Message = "need dictionary"; marker = 0; // can try inflateSync return ZlibConstants.Z_STREAM_ERROR; case InflateManagerMode.BLOCKS: r = blocks.Process(r); if (r == ZlibConstants.Z_DATA_ERROR) { mode = InflateManagerMode.BAD; marker = 0; // can try inflateSync break; } if (r == ZlibConstants.Z_OK) r = f; if (r != ZlibConstants.Z_STREAM_END) return r; r = f; computedCheck = blocks.Reset(); if (!HandleRfc1950HeaderBytes) { mode = InflateManagerMode.DONE; return ZlibConstants.Z_STREAM_END; } mode = InflateManagerMode.CHECK4; break; case InflateManagerMode.CHECK4: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000); mode = InflateManagerMode.CHECK3; break; case InflateManagerMode.CHECK3: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000); mode = InflateManagerMode.CHECK2; break; case InflateManagerMode.CHECK2: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00); mode = InflateManagerMode.CHECK1; break; case InflateManagerMode.CHECK1: if (_codec.AvailableBytesIn == 0) return r; r = f; _codec.AvailableBytesIn--; _codec.TotalBytesIn++; expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff); if (computedCheck != expectedCheck) { mode = InflateManagerMode.BAD; _codec.Message = "incorrect data check"; marker = 5; // can't try inflateSync break; } mode = InflateManagerMode.DONE; return ZlibConstants.Z_STREAM_END; case InflateManagerMode.DONE: return ZlibConstants.Z_STREAM_END; case InflateManagerMode.BAD: throw new ZlibException(String.Format("Bad state ({0})", _codec.Message)); default: throw new ZlibException("Stream error."); } } } internal int SetDictionary(byte[] dictionary) { int index = 0; int length = dictionary.Length; if (mode != InflateManagerMode.DICT0) throw new ZlibException("Stream error."); if (Adler.Adler32(1, dictionary, 0, dictionary.Length) != _codec._Adler32) { return ZlibConstants.Z_DATA_ERROR; } _codec._Adler32 = Adler.Adler32(0, null, 0, 0); if (length >= (1 << wbits)) { length = (1 << wbits) - 1; index = dictionary.Length - length; } blocks.SetDictionary(dictionary, index, length); mode = InflateManagerMode.BLOCKS; return ZlibConstants.Z_OK; } private static readonly byte[] mark = new byte[] { 0, 0, 0xff, 0xff }; internal int Sync() { int n; // number of bytes to look at int p; // pointer to bytes int m; // number of marker bytes found in a row long r, w; // temporaries to save total_in and total_out // set up if (mode != InflateManagerMode.BAD) { mode = InflateManagerMode.BAD; marker = 0; } if ((n = _codec.AvailableBytesIn) == 0) return ZlibConstants.Z_BUF_ERROR; p = _codec.NextIn; m = marker; // search while (n != 0 && m < 4) { if (_codec.InputBuffer[p] == mark[m]) { m++; } else if (_codec.InputBuffer[p] != 0) { m = 0; } else { m = 4 - m; } p++; n--; } // restore _codec.TotalBytesIn += p - _codec.NextIn; _codec.NextIn = p; _codec.AvailableBytesIn = n; marker = m; // return no joy or set up to restart on a new block if (m != 4) { return ZlibConstants.Z_DATA_ERROR; } r = _codec.TotalBytesIn; w = _codec.TotalBytesOut; Reset(); _codec.TotalBytesIn = r; _codec.TotalBytesOut = w; mode = InflateManagerMode.BLOCKS; return ZlibConstants.Z_OK; } // Returns true if inflate is currently at the end of a block generated // by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP // implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH // but removes the length bytes of the resulting empty stored block. When // decompressing, PPP checks that at the end of input packet, inflate is // waiting for these length bytes. internal int SyncPoint(ZlibCodec z) { return blocks.SyncPoint(); } } } ================================================ FILE: src/FirebirdSql.Data.External/zlib/Tree.cs ================================================ // Tree.cs // ------------------------------------------------------------------ // // Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. // All rights reserved. // // This code module is part of DotNetZip, a zipfile class library. // // ------------------------------------------------------------------ // // This code is licensed under the Microsoft Public License. // See the file License.txt for the license details. // More info on: http://dotnetzip.codeplex.com // // ------------------------------------------------------------------ // // last saved (in emacs): // Time-stamp: <2009-October-28 13:29:50> // // ------------------------------------------------------------------ // // This module defines classes for zlib compression and // decompression. This code is derived from the jzlib implementation of // zlib. In keeping with the license for jzlib, the copyright to that // code is below. // // ------------------------------------------------------------------ // // Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the distribution. // // 3. The names of the authors may not be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND // FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, // INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, // OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // ----------------------------------------------------------------------- // // This program is based on zlib-1.1.3; credit to authors // Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) // and contributors of zlib. // // ----------------------------------------------------------------------- using System; namespace Ionic.Zlib { sealed class Tree { private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1); // extra bits for each length code internal static readonly int[] ExtraLengthBits = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 }; // extra bits for each distance code internal static readonly int[] ExtraDistanceBits = new int[] { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 }; // extra bits for each bit length code internal static readonly int[] extra_blbits = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7}; internal static readonly sbyte[] bl_order = new sbyte[]{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; // The lengths of the bit length codes are sent in order of decreasing // probability, to avoid transmitting the lengths for unused bit // length codes. internal const int Buf_size = 8 * 2; // see definition of array dist_code below //internal const int DIST_CODE_LEN = 512; private static readonly sbyte[] _dist_code = new sbyte[] { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; internal static readonly sbyte[] LengthCode = new sbyte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 }; internal static readonly int[] LengthBase = new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0 }; internal static readonly int[] DistanceBase = new int[] { 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576 }; /// /// Map from a distance to a distance code. /// /// /// No side effects. _dist_code[256] and _dist_code[257] are never used. /// internal static int DistanceCode(int dist) { return (dist < 256) ? _dist_code[dist] : _dist_code[256 + SharedUtils.URShift(dist, 7)]; } internal short[] dyn_tree; // the dynamic tree internal int max_code; // largest code with non zero frequency internal StaticTree staticTree; // the corresponding static tree // Compute the optimal bit lengths for a tree and update the total bit length // for the current block. // IN assertion: the fields freq and dad are set, heap[heap_max] and // above are the tree nodes sorted by increasing frequency. // OUT assertions: the field len is set to the optimal bit length, the // array bl_count contains the frequencies for each bit length. // The length opt_len is updated; static_len is also updated if stree is // not null. internal void gen_bitlen(DeflateManager s) { short[] tree = dyn_tree; short[] stree = staticTree.treeCodes; int[] extra = staticTree.extraBits; int base_Renamed = staticTree.extraBase; int max_length = staticTree.maxLength; int h; // heap index int n, m; // iterate over the tree elements int bits; // bit length int xbits; // extra bits short f; // frequency int overflow = 0; // number of elements with bit length too large for (bits = 0; bits <= InternalConstants.MAX_BITS; bits++) s.bl_count[bits] = 0; // In a first pass, compute the optimal bit lengths (which may // overflow in the case of the bit length tree). tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap for (h = s.heap_max + 1; h < HEAP_SIZE; h++) { n = s.heap[h]; bits = tree[tree[n * 2 + 1] * 2 + 1] + 1; if (bits > max_length) { bits = max_length; overflow++; } tree[n * 2 + 1] = (short) bits; // We overwrite tree[n*2+1] which is no longer needed if (n > max_code) continue; // not a leaf node s.bl_count[bits]++; xbits = 0; if (n >= base_Renamed) xbits = extra[n - base_Renamed]; f = tree[n * 2]; s.opt_len += f * (bits + xbits); if (stree != null) s.static_len += f * (stree[n * 2 + 1] + xbits); } if (overflow == 0) return ; // This happens for example on obj2 and pic of the Calgary corpus // Find the first bit length which could increase: do { bits = max_length - 1; while (s.bl_count[bits] == 0) bits--; s.bl_count[bits]--; // move one leaf down the tree s.bl_count[bits + 1] = (short) (s.bl_count[bits + 1] + 2); // move one overflow item as its brother s.bl_count[max_length]--; // The brother of the overflow item also moves one step up, // but this does not affect bl_count[max_length] overflow -= 2; } while (overflow > 0); for (bits = max_length; bits != 0; bits--) { n = s.bl_count[bits]; while (n != 0) { m = s.heap[--h]; if (m > max_code) continue; if (tree[m * 2 + 1] != bits) { s.opt_len = (int) (s.opt_len + ((long) bits - (long) tree[m * 2 + 1]) * (long) tree[m * 2]); tree[m * 2 + 1] = (short) bits; } n--; } } } // Construct one Huffman tree and assigns the code bit strings and lengths. // Update the total bit length for the current block. // IN assertion: the field freq is set for all tree elements. // OUT assertions: the fields len and code are set to the optimal bit length // and corresponding code. The length opt_len is updated; static_len is // also updated if stree is not null. The field max_code is set. internal void build_tree(DeflateManager s) { short[] tree = dyn_tree; short[] stree = staticTree.treeCodes; int elems = staticTree.elems; int n, m; // iterate over heap elements int max_code = -1; // largest code with non zero frequency int node; // new node being created // Construct the initial heap, with least frequent element in // heap[1]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. // heap[0] is not used. s.heap_len = 0; s.heap_max = HEAP_SIZE; for (n = 0; n < elems; n++) { if (tree[n * 2] != 0) { s.heap[++s.heap_len] = max_code = n; s.depth[n] = 0; } else { tree[n * 2 + 1] = 0; } } // The pkzip format requires that at least one distance code exists, // and that at least one bit should be sent even if there is only one // possible code. So to avoid special checks later on we force at least // two codes of non zero frequency. while (s.heap_len < 2) { node = s.heap[++s.heap_len] = (max_code < 2?++max_code:0); tree[node * 2] = 1; s.depth[node] = 0; s.opt_len--; if (stree != null) s.static_len -= stree[node * 2 + 1]; // node is 0 or 1 so it does not have extra bits } this.max_code = max_code; // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, // establish sub-heaps of increasing lengths: for (n = s.heap_len / 2; n >= 1; n--) s.pqdownheap(tree, n); // Construct the Huffman tree by repeatedly combining the least two // frequent nodes. node = elems; // next internal node of the tree do { // n = node of least frequency n = s.heap[1]; s.heap[1] = s.heap[s.heap_len--]; s.pqdownheap(tree, 1); m = s.heap[1]; // m = node of next least frequency s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency s.heap[--s.heap_max] = m; // Create a new node father of n and m tree[node * 2] = unchecked((short) (tree[n * 2] + tree[m * 2])); s.depth[node] = (sbyte) (System.Math.Max((byte) s.depth[n], (byte) s.depth[m]) + 1); tree[n * 2 + 1] = tree[m * 2 + 1] = (short) node; // and insert the new node in the heap s.heap[1] = node++; s.pqdownheap(tree, 1); } while (s.heap_len >= 2); s.heap[--s.heap_max] = s.heap[1]; // At this point, the fields freq and dad are set. We can now // generate the bit lengths. gen_bitlen(s); // The field len is now set, we can generate the bit codes gen_codes(tree, max_code, s.bl_count); } // Generate the codes for a given tree and bit counts (which need not be // optimal). // IN assertion: the array bl_count contains the bit length statistics for // the given tree and the field len is set for all tree elements. // OUT assertion: the field code is set for all tree elements of non // zero code length. internal static void gen_codes(short[] tree, int max_code, short[] bl_count) { short[] next_code = new short[InternalConstants.MAX_BITS + 1]; // next code value for each bit length short code = 0; // running code value int bits; // bit index int n; // code index // The distribution counts are first used to generate the code values // without bit reversal. for (bits = 1; bits <= InternalConstants.MAX_BITS; bits++) unchecked { next_code[bits] = code = (short) ((code + bl_count[bits - 1]) << 1); } // Check that the bit counts in bl_count are consistent. The last code // must be all ones. //Assert (code + bl_count[MAX_BITS]-1 == (1<>= 1; //SharedUtils.URShift(code, 1); res <<= 1; } while (--len > 0); return res >> 1; } } } ================================================ FILE: src/FirebirdSql.Data.External/zlib/Zlib.cs ================================================ // Zlib.cs // ------------------------------------------------------------------ // // Copyright (c) 2009-2011 Dino Chiesa and Microsoft Corporation. // All rights reserved. // // This code module is part of DotNetZip, a zipfile class library. // // ------------------------------------------------------------------ // // This code is licensed under the Microsoft Public License. // See the file License.txt for the license details. // More info on: http://dotnetzip.codeplex.com // // ------------------------------------------------------------------ // // Last Saved: <2011-August-03 19:52:28> // // ------------------------------------------------------------------ // // This module defines classes for ZLIB compression and // decompression. This code is derived from the jzlib implementation of // zlib, but significantly modified. The object model is not the same, // and many of the behaviors are new or different. Nonetheless, in // keeping with the license for jzlib, the copyright to that code is // included below. // // ------------------------------------------------------------------ // // The following notice applies to jzlib: // // Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the distribution. // // 3. The names of the authors may not be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND // FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, // INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, // OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // ----------------------------------------------------------------------- // // jzlib is based on zlib-1.1.3. // // The following notice applies to zlib: // // ----------------------------------------------------------------------- // // Copyright (C) 1995-2004 Jean-loup Gailly and Mark Adler // // The ZLIB software is provided 'as-is', without any express or implied // warranty. In no event will the authors be held liable for any damages // arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it // freely, subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; you must not // claim that you wrote the original software. If you use this software // in a product, an acknowledgment in the product documentation would be // appreciated but is not required. // 2. Altered source versions must be plainly marked as such, and must not be // misrepresented as being the original software. // 3. This notice may not be removed or altered from any source distribution. // // Jean-loup Gailly jloup@gzip.org // Mark Adler madler@alumni.caltech.edu // // ----------------------------------------------------------------------- using System; using Interop=System.Runtime.InteropServices; namespace Ionic.Zlib { /// /// Describes how to flush the current deflate operation. /// /// /// The different FlushType values are useful when using a Deflate in a streaming application. /// internal enum FlushType { /// No flush at all. None = 0, /// Closes the current block, but doesn't flush it to /// the output. Used internally only in hypothetical /// scenarios. This was supposed to be removed by Zlib, but it is /// still in use in some edge cases. /// Partial, /// /// Use this during compression to specify that all pending output should be /// flushed to the output buffer and the output should be aligned on a byte /// boundary. You might use this in a streaming communication scenario, so that /// the decompressor can get all input data available so far. When using this /// with a ZlibCodec, AvailableBytesIn will be zero after the call if /// enough output space has been provided before the call. Flushing will /// degrade compression and so it should be used only when necessary. /// Sync, /// /// Use this during compression to specify that all output should be flushed, as /// with FlushType.Sync, but also, the compression state should be reset /// so that decompression can restart from this point if previous compressed /// data has been damaged or if random access is desired. Using /// FlushType.Full too often can significantly degrade the compression. /// Full, /// Signals the end of the compression/decompression stream. Finish, } /// /// The compression level to be used when using a DeflateStream or ZlibStream with CompressionMode.Compress. /// internal enum CompressionLevel { /// /// None means that the data will be simply stored, with no change at all. /// If you are producing ZIPs for use on Mac OSX, be aware that archives produced with CompressionLevel.None /// cannot be opened with the default zip reader. Use a different CompressionLevel. /// None= 0, /// /// Same as None. /// Level0 = 0, /// /// The fastest but least effective compression. /// BestSpeed = 1, /// /// A synonym for BestSpeed. /// Level1 = 1, /// /// A little slower, but better, than level 1. /// Level2 = 2, /// /// A little slower, but better, than level 2. /// Level3 = 3, /// /// A little slower, but better, than level 3. /// Level4 = 4, /// /// A little slower than level 4, but with better compression. /// Level5 = 5, /// /// The default compression level, with a good balance of speed and compression efficiency. /// Default = 6, /// /// A synonym for Default. /// Level6 = 6, /// /// Pretty good compression! /// Level7 = 7, /// /// Better compression than Level7! /// Level8 = 8, /// /// The "best" compression, where best means greatest reduction in size of the input data stream. /// This is also the slowest compression. /// BestCompression = 9, /// /// A synonym for BestCompression. /// Level9 = 9, } /// /// Describes options for how the compression algorithm is executed. Different strategies /// work better on different sorts of data. The strategy parameter can affect the compression /// ratio and the speed of compression but not the correctness of the compresssion. /// internal enum CompressionStrategy { /// /// The default strategy is probably the best for normal data. /// Default = 0, /// /// The Filtered strategy is intended to be used most effectively with data produced by a /// filter or predictor. By this definition, filtered data consists mostly of small /// values with a somewhat random distribution. In this case, the compression algorithm /// is tuned to compress them better. The effect of Filtered is to force more Huffman /// coding and less string matching; it is a half-step between Default and HuffmanOnly. /// Filtered = 1, /// /// Using HuffmanOnly will force the compressor to do Huffman encoding only, with no /// string matching. /// HuffmanOnly = 2, } /// /// An enum to specify the direction of transcoding - whether to compress or decompress. /// internal enum CompressionMode { /// /// Used to specify that the stream should compress the data. /// Compress= 0, /// /// Used to specify that the stream should decompress the data. /// Decompress = 1, } /// /// A general purpose exception class for exceptions in the Zlib library. /// [Interop.GuidAttribute("ebc25cf6-9120-4283-b972-0e5520d0000E")] internal class ZlibException : System.Exception { /// /// The ZlibException class captures exception information generated /// by the Zlib library. /// public ZlibException() : base() { } /// /// This ctor collects a message attached to the exception. /// /// the message for the exception. public ZlibException(System.String s) : base(s) { } } internal class SharedUtils { /// /// Performs an unsigned bitwise right shift with the specified number /// /// Number to operate on /// Ammount of bits to shift /// The resulting number from the shift operation public static int URShift(int number, int bits) { return (int)((uint)number >> bits); } #if NOT /// /// Performs an unsigned bitwise right shift with the specified number /// /// Number to operate on /// Ammount of bits to shift /// The resulting number from the shift operation public static long URShift(long number, int bits) { return (long) ((UInt64)number >> bits); } #endif /// /// Reads a number of characters from the current source TextReader and writes /// the data to the target array at the specified index. /// /// /// The source TextReader to read from /// Contains the array of characteres read from the source TextReader. /// The starting index of the target array. /// The maximum number of characters to read from the source TextReader. /// /// /// The number of characters read. The number will be less than or equal to /// count depending on the data available in the source TextReader. Returns -1 /// if the end of the stream is reached. /// public static System.Int32 ReadInput(System.IO.TextReader sourceTextReader, byte[] target, int start, int count) { // Returns 0 bytes if not enough space in target if (target.Length == 0) return 0; char[] charArray = new char[target.Length]; int bytesRead = sourceTextReader.Read(charArray, start, count); // Returns -1 if EOF if (bytesRead == 0) return -1; for (int index = start; index < start + bytesRead; index++) target[index] = (byte)charArray[index]; return bytesRead; } internal static byte[] ToByteArray(System.String sourceString) { return System.Text.UTF8Encoding.UTF8.GetBytes(sourceString); } internal static char[] ToCharArray(byte[] byteArray) { return System.Text.UTF8Encoding.UTF8.GetChars(byteArray); } } internal static class InternalConstants { internal static readonly int MAX_BITS = 15; internal static readonly int BL_CODES = 19; internal static readonly int D_CODES = 30; internal static readonly int LITERALS = 256; internal static readonly int LENGTH_CODES = 29; internal static readonly int L_CODES = (LITERALS + 1 + LENGTH_CODES); // Bit length codes must not exceed MAX_BL_BITS bits internal static readonly int MAX_BL_BITS = 7; // repeat previous bit length 3-6 times (2 bits of repeat count) internal static readonly int REP_3_6 = 16; // repeat a zero length 3-10 times (3 bits of repeat count) internal static readonly int REPZ_3_10 = 17; // repeat a zero length 11-138 times (7 bits of repeat count) internal static readonly int REPZ_11_138 = 18; } internal sealed class StaticTree { internal static readonly short[] lengthAndLiteralsTreeCodes = new short[] { 12, 8, 140, 8, 76, 8, 204, 8, 44, 8, 172, 8, 108, 8, 236, 8, 28, 8, 156, 8, 92, 8, 220, 8, 60, 8, 188, 8, 124, 8, 252, 8, 2, 8, 130, 8, 66, 8, 194, 8, 34, 8, 162, 8, 98, 8, 226, 8, 18, 8, 146, 8, 82, 8, 210, 8, 50, 8, 178, 8, 114, 8, 242, 8, 10, 8, 138, 8, 74, 8, 202, 8, 42, 8, 170, 8, 106, 8, 234, 8, 26, 8, 154, 8, 90, 8, 218, 8, 58, 8, 186, 8, 122, 8, 250, 8, 6, 8, 134, 8, 70, 8, 198, 8, 38, 8, 166, 8, 102, 8, 230, 8, 22, 8, 150, 8, 86, 8, 214, 8, 54, 8, 182, 8, 118, 8, 246, 8, 14, 8, 142, 8, 78, 8, 206, 8, 46, 8, 174, 8, 110, 8, 238, 8, 30, 8, 158, 8, 94, 8, 222, 8, 62, 8, 190, 8, 126, 8, 254, 8, 1, 8, 129, 8, 65, 8, 193, 8, 33, 8, 161, 8, 97, 8, 225, 8, 17, 8, 145, 8, 81, 8, 209, 8, 49, 8, 177, 8, 113, 8, 241, 8, 9, 8, 137, 8, 73, 8, 201, 8, 41, 8, 169, 8, 105, 8, 233, 8, 25, 8, 153, 8, 89, 8, 217, 8, 57, 8, 185, 8, 121, 8, 249, 8, 5, 8, 133, 8, 69, 8, 197, 8, 37, 8, 165, 8, 101, 8, 229, 8, 21, 8, 149, 8, 85, 8, 213, 8, 53, 8, 181, 8, 117, 8, 245, 8, 13, 8, 141, 8, 77, 8, 205, 8, 45, 8, 173, 8, 109, 8, 237, 8, 29, 8, 157, 8, 93, 8, 221, 8, 61, 8, 189, 8, 125, 8, 253, 8, 19, 9, 275, 9, 147, 9, 403, 9, 83, 9, 339, 9, 211, 9, 467, 9, 51, 9, 307, 9, 179, 9, 435, 9, 115, 9, 371, 9, 243, 9, 499, 9, 11, 9, 267, 9, 139, 9, 395, 9, 75, 9, 331, 9, 203, 9, 459, 9, 43, 9, 299, 9, 171, 9, 427, 9, 107, 9, 363, 9, 235, 9, 491, 9, 27, 9, 283, 9, 155, 9, 411, 9, 91, 9, 347, 9, 219, 9, 475, 9, 59, 9, 315, 9, 187, 9, 443, 9, 123, 9, 379, 9, 251, 9, 507, 9, 7, 9, 263, 9, 135, 9, 391, 9, 71, 9, 327, 9, 199, 9, 455, 9, 39, 9, 295, 9, 167, 9, 423, 9, 103, 9, 359, 9, 231, 9, 487, 9, 23, 9, 279, 9, 151, 9, 407, 9, 87, 9, 343, 9, 215, 9, 471, 9, 55, 9, 311, 9, 183, 9, 439, 9, 119, 9, 375, 9, 247, 9, 503, 9, 15, 9, 271, 9, 143, 9, 399, 9, 79, 9, 335, 9, 207, 9, 463, 9, 47, 9, 303, 9, 175, 9, 431, 9, 111, 9, 367, 9, 239, 9, 495, 9, 31, 9, 287, 9, 159, 9, 415, 9, 95, 9, 351, 9, 223, 9, 479, 9, 63, 9, 319, 9, 191, 9, 447, 9, 127, 9, 383, 9, 255, 9, 511, 9, 0, 7, 64, 7, 32, 7, 96, 7, 16, 7, 80, 7, 48, 7, 112, 7, 8, 7, 72, 7, 40, 7, 104, 7, 24, 7, 88, 7, 56, 7, 120, 7, 4, 7, 68, 7, 36, 7, 100, 7, 20, 7, 84, 7, 52, 7, 116, 7, 3, 8, 131, 8, 67, 8, 195, 8, 35, 8, 163, 8, 99, 8, 227, 8 }; internal static readonly short[] distTreeCodes = new short[] { 0, 5, 16, 5, 8, 5, 24, 5, 4, 5, 20, 5, 12, 5, 28, 5, 2, 5, 18, 5, 10, 5, 26, 5, 6, 5, 22, 5, 14, 5, 30, 5, 1, 5, 17, 5, 9, 5, 25, 5, 5, 5, 21, 5, 13, 5, 29, 5, 3, 5, 19, 5, 11, 5, 27, 5, 7, 5, 23, 5 }; internal static readonly StaticTree Literals; internal static readonly StaticTree Distances; internal static readonly StaticTree BitLengths; internal short[] treeCodes; // static tree or null internal int[] extraBits; // extra bits for each code or null internal int extraBase; // base index for extra_bits internal int elems; // max number of elements in the tree internal int maxLength; // max bit length for the codes private StaticTree(short[] treeCodes, int[] extraBits, int extraBase, int elems, int maxLength) { this.treeCodes = treeCodes; this.extraBits = extraBits; this.extraBase = extraBase; this.elems = elems; this.maxLength = maxLength; } static StaticTree() { Literals = new StaticTree(lengthAndLiteralsTreeCodes, Tree.ExtraLengthBits, InternalConstants.LITERALS + 1, InternalConstants.L_CODES, InternalConstants.MAX_BITS); Distances = new StaticTree(distTreeCodes, Tree.ExtraDistanceBits, 0, InternalConstants.D_CODES, InternalConstants.MAX_BITS); BitLengths = new StaticTree(null, Tree.extra_blbits, 0, InternalConstants.BL_CODES, InternalConstants.MAX_BL_BITS); } } /// /// Computes an Adler-32 checksum. /// /// /// The Adler checksum is similar to a CRC checksum, but faster to compute, though less /// reliable. It is used in producing RFC1950 compressed streams. The Adler checksum /// is a required part of the "ZLIB" standard. Applications will almost never need to /// use this class directly. /// /// /// internal sealed class Adler { // largest prime smaller than 65536 private static readonly uint BASE = 65521; // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 private static readonly int NMAX = 5552; #pragma warning disable 3001 #pragma warning disable 3002 /// /// Calculates the Adler32 checksum. /// /// /// /// This is used within ZLIB. You probably don't need to use this directly. /// /// /// /// To compute an Adler32 checksum on a byte array: /// /// var adler = Adler.Adler32(0, null, 0, 0); /// adler = Adler.Adler32(adler, buffer, index, length); /// /// public static uint Adler32(uint adler, byte[] buf, int index, int len) { if (buf == null) return 1; uint s1 = (uint) (adler & 0xffff); uint s2 = (uint) ((adler >> 16) & 0xffff); while (len > 0) { int k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { //s1 += (buf[index++] & 0xff); s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; s1 += buf[index++]; s2 += s1; k -= 16; } if (k != 0) { do { s1 += buf[index++]; s2 += s1; } while (--k != 0); } s1 %= BASE; s2 %= BASE; } return (uint)((s2 << 16) | s1); } #pragma warning restore 3001 #pragma warning restore 3002 } } ================================================ FILE: src/FirebirdSql.Data.External/zlib/ZlibCodec.cs ================================================ // ZlibCodec.cs // ------------------------------------------------------------------ // // Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. // All rights reserved. // // This code module is part of DotNetZip, a zipfile class library. // // ------------------------------------------------------------------ // // This code is licensed under the Microsoft Public License. // See the file License.txt for the license details. // More info on: http://dotnetzip.codeplex.com // // ------------------------------------------------------------------ // // last saved (in emacs): // Time-stamp: <2009-November-03 15:40:51> // // ------------------------------------------------------------------ // // This module defines a Codec for ZLIB compression and // decompression. This code extends code that was based the jzlib // implementation of zlib, but this code is completely novel. The codec // class is new, and encapsulates some behaviors that are new, and some // that were present in other classes in the jzlib code base. In // keeping with the license for jzlib, the copyright to the jzlib code // is included below. // // ------------------------------------------------------------------ // // Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the distribution. // // 3. The names of the authors may not be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND // FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, // INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, // OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // ----------------------------------------------------------------------- // // This program is based on zlib-1.1.3; credit to authors // Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) // and contributors of zlib. // // ----------------------------------------------------------------------- using System; using Interop=System.Runtime.InteropServices; namespace Ionic.Zlib { /// /// Encoder and Decoder for ZLIB and DEFLATE (IETF RFC1950 and RFC1951). /// /// /// /// This class compresses and decompresses data according to the Deflate algorithm /// and optionally, the ZLIB format, as documented in RFC 1950 - ZLIB and RFC 1951 - DEFLATE. /// [Interop.GuidAttribute("ebc25cf6-9120-4283-b972-0e5520d0000D")] [Interop.ComVisible(true)] #if !NETCF //[Interop.ClassInterface(Interop.ClassInterfaceType.AutoDispatch)] #endif sealed internal class ZlibCodec { /// /// The buffer from which data is taken. /// public byte[] InputBuffer; /// /// An index into the InputBuffer array, indicating where to start reading. /// public int NextIn; /// /// The number of bytes available in the InputBuffer, starting at NextIn. /// /// /// Generally you should set this to InputBuffer.Length before the first Inflate() or Deflate() call. /// The class will update this number as calls to Inflate/Deflate are made. /// public int AvailableBytesIn; /// /// Total number of bytes read so far, through all calls to Inflate()/Deflate(). /// public long TotalBytesIn; /// /// Buffer to store output data. /// public byte[] OutputBuffer; /// /// An index into the OutputBuffer array, indicating where to start writing. /// public int NextOut; /// /// The number of bytes available in the OutputBuffer, starting at NextOut. /// /// /// Generally you should set this to OutputBuffer.Length before the first Inflate() or Deflate() call. /// The class will update this number as calls to Inflate/Deflate are made. /// public int AvailableBytesOut; /// /// Total number of bytes written to the output so far, through all calls to Inflate()/Deflate(). /// public long TotalBytesOut; /// /// used for diagnostics, when something goes wrong! /// public System.String Message; internal DeflateManager dstate; internal InflateManager istate; internal uint _Adler32; /// /// The compression level to use in this codec. Useful only in compression mode. /// public CompressionLevel CompressLevel = CompressionLevel.Default; /// /// The number of Window Bits to use. /// /// /// This gauges the size of the sliding window, and hence the /// compression effectiveness as well as memory consumption. It's best to just leave this /// setting alone if you don't know what it is. The maximum value is 15 bits, which implies /// a 32k window. /// public int WindowBits = ZlibConstants.WindowBitsDefault; /// /// The compression strategy to use. /// /// /// This is only effective in compression. The theory offered by ZLIB is that different /// strategies could potentially produce significant differences in compression behavior /// for different data sets. Unfortunately I don't have any good recommendations for how /// to set it differently. When I tested changing the strategy I got minimally different /// compression performance. It's best to leave this property alone if you don't have a /// good feel for it. Or, you may want to produce a test harness that runs through the /// different strategy options and evaluates them on different file types. If you do that, /// let me know your results. /// public CompressionStrategy Strategy = CompressionStrategy.Default; /// /// The Adler32 checksum on the data transferred through the codec so far. You probably don't need to look at this. /// public int Adler32 { get { return (int)_Adler32; } } /// /// Create a ZlibCodec. /// /// /// If you use this default constructor, you will later have to explicitly call /// InitializeInflate() or InitializeDeflate() before using the ZlibCodec to compress /// or decompress. /// public ZlibCodec() { } /// /// Create a ZlibCodec that either compresses or decompresses. /// /// /// Indicates whether the codec should compress (deflate) or decompress (inflate). /// public ZlibCodec(CompressionMode mode) { if (mode == CompressionMode.Compress) { int rc = InitializeDeflate(); if (rc != ZlibConstants.Z_OK) throw new ZlibException("Cannot initialize for deflate."); } else if (mode == CompressionMode.Decompress) { int rc = InitializeInflate(); if (rc != ZlibConstants.Z_OK) throw new ZlibException("Cannot initialize for inflate."); } else throw new ZlibException("Invalid ZlibStreamFlavor."); } /// /// Initialize the inflation state. /// /// /// It is not necessary to call this before using the ZlibCodec to inflate data; /// It is implicitly called when you call the constructor. /// /// Z_OK if everything goes well. public int InitializeInflate() { return InitializeInflate(this.WindowBits); } /// /// Initialize the inflation state with an explicit flag to /// govern the handling of RFC1950 header bytes. /// /// /// /// By default, the ZLIB header defined in RFC 1950 is expected. If /// you want to read a zlib stream you should specify true for /// expectRfc1950Header. If you have a deflate stream, you will want to specify /// false. It is only necessary to invoke this initializer explicitly if you /// want to specify false. /// /// /// whether to expect an RFC1950 header byte /// pair when reading the stream of data to be inflated. /// /// Z_OK if everything goes well. public int InitializeInflate(bool expectRfc1950Header) { return InitializeInflate(this.WindowBits, expectRfc1950Header); } /// /// Initialize the ZlibCodec for inflation, with the specified number of window bits. /// /// The number of window bits to use. If you need to ask what that is, /// then you shouldn't be calling this initializer. /// Z_OK if all goes well. public int InitializeInflate(int windowBits) { this.WindowBits = windowBits; return InitializeInflate(windowBits, true); } /// /// Initialize the inflation state with an explicit flag to govern the handling of /// RFC1950 header bytes. /// /// /// /// If you want to read a zlib stream you should specify true for /// expectRfc1950Header. In this case, the library will expect to find a ZLIB /// header, as defined in RFC /// 1950, in the compressed stream. If you will be reading a DEFLATE or /// GZIP stream, which does not have such a header, you will want to specify /// false. /// /// /// whether to expect an RFC1950 header byte pair when reading /// the stream of data to be inflated. /// The number of window bits to use. If you need to ask what that is, /// then you shouldn't be calling this initializer. /// Z_OK if everything goes well. public int InitializeInflate(int windowBits, bool expectRfc1950Header) { this.WindowBits = windowBits; if (dstate != null) throw new ZlibException("You may not call InitializeInflate() after calling InitializeDeflate()."); istate = new InflateManager(expectRfc1950Header); return istate.Initialize(this, windowBits); } /// /// Inflate the data in the InputBuffer, placing the result in the OutputBuffer. /// /// /// You must have set InputBuffer and OutputBuffer, NextIn and NextOut, and AvailableBytesIn and /// AvailableBytesOut before calling this method. /// /// /// /// private void InflateBuffer() /// { /// int bufferSize = 1024; /// byte[] buffer = new byte[bufferSize]; /// ZlibCodec decompressor = new ZlibCodec(); /// /// Console.WriteLine("\n============================================"); /// Console.WriteLine("Size of Buffer to Inflate: {0} bytes.", CompressedBytes.Length); /// MemoryStream ms = new MemoryStream(DecompressedBytes); /// /// int rc = decompressor.InitializeInflate(); /// /// decompressor.InputBuffer = CompressedBytes; /// decompressor.NextIn = 0; /// decompressor.AvailableBytesIn = CompressedBytes.Length; /// /// decompressor.OutputBuffer = buffer; /// /// // pass 1: inflate /// do /// { /// decompressor.NextOut = 0; /// decompressor.AvailableBytesOut = buffer.Length; /// rc = decompressor.Inflate(FlushType.None); /// /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) /// throw new Exception("inflating: " + decompressor.Message); /// /// ms.Write(decompressor.OutputBuffer, 0, buffer.Length - decompressor.AvailableBytesOut); /// } /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0); /// /// // pass 2: finish and flush /// do /// { /// decompressor.NextOut = 0; /// decompressor.AvailableBytesOut = buffer.Length; /// rc = decompressor.Inflate(FlushType.Finish); /// /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK) /// throw new Exception("inflating: " + decompressor.Message); /// /// if (buffer.Length - decompressor.AvailableBytesOut > 0) /// ms.Write(buffer, 0, buffer.Length - decompressor.AvailableBytesOut); /// } /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0); /// /// decompressor.EndInflate(); /// } /// /// /// /// The flush to use when inflating. /// Z_OK if everything goes well. public int Inflate(FlushType flush) { if (istate == null) throw new ZlibException("No Inflate State!"); return istate.Inflate(flush); } /// /// Ends an inflation session. /// /// /// Call this after successively calling Inflate(). This will cause all buffers to be flushed. /// After calling this you cannot call Inflate() without a intervening call to one of the /// InitializeInflate() overloads. /// /// Z_OK if everything goes well. public int EndInflate() { if (istate == null) throw new ZlibException("No Inflate State!"); int ret = istate.End(); istate = null; return ret; } /// /// I don't know what this does! /// /// Z_OK if everything goes well. public int SyncInflate() { if (istate == null) throw new ZlibException("No Inflate State!"); return istate.Sync(); } /// /// Initialize the ZlibCodec for deflation operation. /// /// /// The codec will use the MAX window bits and the default level of compression. /// /// /// /// int bufferSize = 40000; /// byte[] CompressedBytes = new byte[bufferSize]; /// byte[] DecompressedBytes = new byte[bufferSize]; /// /// ZlibCodec compressor = new ZlibCodec(); /// /// compressor.InitializeDeflate(CompressionLevel.Default); /// /// compressor.InputBuffer = System.Text.ASCIIEncoding.ASCII.GetBytes(TextToCompress); /// compressor.NextIn = 0; /// compressor.AvailableBytesIn = compressor.InputBuffer.Length; /// /// compressor.OutputBuffer = CompressedBytes; /// compressor.NextOut = 0; /// compressor.AvailableBytesOut = CompressedBytes.Length; /// /// while (compressor.TotalBytesIn != TextToCompress.Length && compressor.TotalBytesOut < bufferSize) /// { /// compressor.Deflate(FlushType.None); /// } /// /// while (true) /// { /// int rc= compressor.Deflate(FlushType.Finish); /// if (rc == ZlibConstants.Z_STREAM_END) break; /// } /// /// compressor.EndDeflate(); /// /// /// /// Z_OK if all goes well. You generally don't need to check the return code. public int InitializeDeflate() { return _InternalInitializeDeflate(true); } /// /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel. /// /// /// The codec will use the maximum window bits (15) and the specified /// CompressionLevel. It will emit a ZLIB stream as it compresses. /// /// The compression level for the codec. /// Z_OK if all goes well. public int InitializeDeflate(CompressionLevel level) { this.CompressLevel = level; return _InternalInitializeDeflate(true); } /// /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel, /// and the explicit flag governing whether to emit an RFC1950 header byte pair. /// /// /// The codec will use the maximum window bits (15) and the specified CompressionLevel. /// If you want to generate a zlib stream, you should specify true for /// wantRfc1950Header. In this case, the library will emit a ZLIB /// header, as defined in RFC /// 1950, in the compressed stream. /// /// The compression level for the codec. /// whether to emit an initial RFC1950 byte pair in the compressed stream. /// Z_OK if all goes well. public int InitializeDeflate(CompressionLevel level, bool wantRfc1950Header) { this.CompressLevel = level; return _InternalInitializeDeflate(wantRfc1950Header); } /// /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel, /// and the specified number of window bits. /// /// /// The codec will use the specified number of window bits and the specified CompressionLevel. /// /// The compression level for the codec. /// the number of window bits to use. If you don't know what this means, don't use this method. /// Z_OK if all goes well. public int InitializeDeflate(CompressionLevel level, int bits) { this.CompressLevel = level; this.WindowBits = bits; return _InternalInitializeDeflate(true); } /// /// Initialize the ZlibCodec for deflation operation, using the specified /// CompressionLevel, the specified number of window bits, and the explicit flag /// governing whether to emit an RFC1950 header byte pair. /// /// /// The compression level for the codec. /// whether to emit an initial RFC1950 byte pair in the compressed stream. /// the number of window bits to use. If you don't know what this means, don't use this method. /// Z_OK if all goes well. public int InitializeDeflate(CompressionLevel level, int bits, bool wantRfc1950Header) { this.CompressLevel = level; this.WindowBits = bits; return _InternalInitializeDeflate(wantRfc1950Header); } private int _InternalInitializeDeflate(bool wantRfc1950Header) { if (istate != null) throw new ZlibException("You may not call InitializeDeflate() after calling InitializeInflate()."); dstate = new DeflateManager(); dstate.WantRfc1950HeaderBytes = wantRfc1950Header; return dstate.Initialize(this, this.CompressLevel, this.WindowBits, this.Strategy); } /// /// Deflate one batch of data. /// /// /// You must have set InputBuffer and OutputBuffer before calling this method. /// /// /// /// private void DeflateBuffer(CompressionLevel level) /// { /// int bufferSize = 1024; /// byte[] buffer = new byte[bufferSize]; /// ZlibCodec compressor = new ZlibCodec(); /// /// Console.WriteLine("\n============================================"); /// Console.WriteLine("Size of Buffer to Deflate: {0} bytes.", UncompressedBytes.Length); /// MemoryStream ms = new MemoryStream(); /// /// int rc = compressor.InitializeDeflate(level); /// /// compressor.InputBuffer = UncompressedBytes; /// compressor.NextIn = 0; /// compressor.AvailableBytesIn = UncompressedBytes.Length; /// /// compressor.OutputBuffer = buffer; /// /// // pass 1: deflate /// do /// { /// compressor.NextOut = 0; /// compressor.AvailableBytesOut = buffer.Length; /// rc = compressor.Deflate(FlushType.None); /// /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END) /// throw new Exception("deflating: " + compressor.Message); /// /// ms.Write(compressor.OutputBuffer, 0, buffer.Length - compressor.AvailableBytesOut); /// } /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0); /// /// // pass 2: finish and flush /// do /// { /// compressor.NextOut = 0; /// compressor.AvailableBytesOut = buffer.Length; /// rc = compressor.Deflate(FlushType.Finish); /// /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK) /// throw new Exception("deflating: " + compressor.Message); /// /// if (buffer.Length - compressor.AvailableBytesOut > 0) /// ms.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut); /// } /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0); /// /// compressor.EndDeflate(); /// /// ms.Seek(0, SeekOrigin.Begin); /// CompressedBytes = new byte[compressor.TotalBytesOut]; /// ms.Read(CompressedBytes, 0, CompressedBytes.Length); /// } /// /// /// whether to flush all data as you deflate. Generally you will want to /// use Z_NO_FLUSH here, in a series of calls to Deflate(), and then call EndDeflate() to /// flush everything. /// /// Z_OK if all goes well. public int Deflate(FlushType flush) { if (dstate == null) throw new ZlibException("No Deflate State!"); return dstate.Deflate(flush); } /// /// End a deflation session. /// /// /// Call this after making a series of one or more calls to Deflate(). All buffers are flushed. /// /// Z_OK if all goes well. public int EndDeflate() { if (dstate == null) throw new ZlibException("No Deflate State!"); // TODO: dinoch Tue, 03 Nov 2009 15:39 (test this) //int ret = dstate.End(); dstate = null; return ZlibConstants.Z_OK; //ret; } /// /// Reset a codec for another deflation session. /// /// /// Call this to reset the deflation state. For example if a thread is deflating /// non-consecutive blocks, you can call Reset() after the Deflate(Sync) of the first /// block and before the next Deflate(None) of the second block. /// /// Z_OK if all goes well. public void ResetDeflate() { if (dstate == null) throw new ZlibException("No Deflate State!"); dstate.Reset(); } /// /// Set the CompressionStrategy and CompressionLevel for a deflation session. /// /// the level of compression to use. /// the strategy to use for compression. /// Z_OK if all goes well. public int SetDeflateParams(CompressionLevel level, CompressionStrategy strategy) { if (dstate == null) throw new ZlibException("No Deflate State!"); return dstate.SetParams(level, strategy); } /// /// Set the dictionary to be used for either Inflation or Deflation. /// /// The dictionary bytes to use. /// Z_OK if all goes well. public int SetDictionary(byte[] dictionary) { if (istate != null) return istate.SetDictionary(dictionary); if (dstate != null) return dstate.SetDictionary(dictionary); throw new ZlibException("No Inflate or Deflate state!"); } // Flush as much pending output as possible. All deflate() output goes // through this function so some applications may wish to modify it // to avoid allocating a large strm->next_out buffer and copying into it. // (See also read_buf()). internal void flush_pending() { int len = dstate.pendingCount; if (len > AvailableBytesOut) len = AvailableBytesOut; if (len == 0) return; if (dstate.pending.Length <= dstate.nextPending || OutputBuffer.Length <= NextOut || dstate.pending.Length < (dstate.nextPending + len) || OutputBuffer.Length < (NextOut + len)) { throw new ZlibException(String.Format("Invalid State. (pending.Length={0}, pendingCount={1})", dstate.pending.Length, dstate.pendingCount)); } Array.Copy(dstate.pending, dstate.nextPending, OutputBuffer, NextOut, len); NextOut += len; dstate.nextPending += len; TotalBytesOut += len; AvailableBytesOut -= len; dstate.pendingCount -= len; if (dstate.pendingCount == 0) { dstate.nextPending = 0; } } // Read a new buffer from the current input stream, update the adler32 // and total number of bytes read. All deflate() input goes through // this function so some applications may wish to modify it to avoid // allocating a large strm->next_in buffer and copying from it. // (See also flush_pending()). internal int read_buf(byte[] buf, int start, int size) { int len = AvailableBytesIn; if (len > size) len = size; if (len == 0) return 0; AvailableBytesIn -= len; if (dstate.WantRfc1950HeaderBytes) { _Adler32 = Adler.Adler32(_Adler32, InputBuffer, NextIn, len); } Array.Copy(InputBuffer, NextIn, buf, start, len); NextIn += len; TotalBytesIn += len; return len; } } } ================================================ FILE: src/FirebirdSql.Data.External/zlib/ZlibConstants.cs ================================================ // ZlibConstants.cs // ------------------------------------------------------------------ // // Copyright (c) 2009 Dino Chiesa and Microsoft Corporation. // All rights reserved. // // This code module is part of DotNetZip, a zipfile class library. // // ------------------------------------------------------------------ // // This code is licensed under the Microsoft Public License. // See the file License.txt for the license details. // More info on: http://dotnetzip.codeplex.com // // ------------------------------------------------------------------ // // last saved (in emacs): // Time-stamp: <2009-November-03 18:50:19> // // ------------------------------------------------------------------ // // This module defines constants used by the zlib class library. This // code is derived from the jzlib implementation of zlib, but // significantly modified. In keeping with the license for jzlib, the // copyright to that code is included here. // // ------------------------------------------------------------------ // // Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the distribution. // // 3. The names of the authors may not be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND // FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, // INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, // OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // ----------------------------------------------------------------------- // // This program is based on zlib-1.1.3; credit to authors // Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu) // and contributors of zlib. // // ----------------------------------------------------------------------- using System; namespace Ionic.Zlib { /// /// A bunch of constants used in the Zlib interface. /// internal static class ZlibConstants { /// /// The maximum number of window bits for the Deflate algorithm. /// public const int WindowBitsMax = 15; // 32K LZ77 window /// /// The default number of window bits for the Deflate algorithm. /// public const int WindowBitsDefault = WindowBitsMax; /// /// indicates everything is A-OK /// public const int Z_OK = 0; /// /// Indicates that the last operation reached the end of the stream. /// public const int Z_STREAM_END = 1; /// /// The operation ended in need of a dictionary. /// public const int Z_NEED_DICT = 2; /// /// There was an error with the stream - not enough data, not open and readable, etc. /// public const int Z_STREAM_ERROR = -2; /// /// There was an error with the data - not enough data, bad data, etc. /// public const int Z_DATA_ERROR = -3; /// /// There was an error with the working buffer. /// public const int Z_BUF_ERROR = -5; /// /// The size of the working buffer used in the ZlibCodec class. Defaults to 8192 bytes. /// #if NETCF public const int WorkingBufferSizeDefault = 8192; #else public const int WorkingBufferSizeDefault = 16384; #endif /// /// The minimum size of the working buffer used in the ZlibCodec class. Currently it is 128 bytes. /// public const int WorkingBufferSizeMin = 1024; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/ClientFactory.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Client.Managed; using FirebirdSql.Data.Common; using FirebirdSql.Data.FirebirdClient; using WireCryptOption = FirebirdSql.Data.Client.Managed.Version13.WireCryptOption; namespace FirebirdSql.Data.Client; internal static class ClientFactory { public static DatabaseBase CreateDatabase(ConnectionString options) { return options.ServerType switch { FbServerType.Default => CreateManagedDatabase(options), FbServerType.Embedded => CreateNativeDatabase(options), _ => throw IncorrectServerTypeException(), }; } public static ValueTask CreateDatabaseAsync(ConnectionString options, CancellationToken cancellationToken = default) { return options.ServerType switch { FbServerType.Default => CreateManagedDatabaseAsync(options, cancellationToken), FbServerType.Embedded => CreateNativeDatabaseAsync(options), _ => throw IncorrectServerTypeException(), }; } public static ServiceManagerBase CreateServiceManager(ConnectionString options) { return options.ServerType switch { FbServerType.Default => CreateManagedServiceManager(options), FbServerType.Embedded => CreateNativeServiceManager(options), _ => throw IncorrectServerTypeException(), }; } public static ValueTask CreateServiceManagerAsync(ConnectionString options, CancellationToken cancellationToken = default) { return options.ServerType switch { FbServerType.Default => CreateManagedServiceManagerAsync(options, cancellationToken), FbServerType.Embedded => CreateNativeServiceManagerAsync(options), _ => throw IncorrectServerTypeException(), }; } private static DatabaseBase CreateManagedDatabase(ConnectionString options) { var charset = GetCharset(options); var connection = new GdsConnection(options.UserID, options.Password, options.DataSource, options.Port, options.ConnectionTimeout, options.PacketSize, charset, options.Dialect, options.Compression, FbWireCryptToWireCryptOption(options.WireCrypt), options.CryptKey); connection.Connect(); try { connection.Identify(options.Database); } catch { connection.Disconnect(); throw; } return connection.ProtocolVersion switch { IscCodes.PROTOCOL_VERSION16 => new Managed.Version16.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION15 => new Managed.Version15.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION13 => new Managed.Version13.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION12 => new Managed.Version12.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION11 => new Managed.Version11.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION10 => new Managed.Version10.GdsDatabase(connection), _ => throw UnsupportedProtocolException(), }; } private static async ValueTask CreateManagedDatabaseAsync(ConnectionString options, CancellationToken cancellationToken = default) { var charset = GetCharset(options); var connection = new GdsConnection(options.UserID, options.Password, options.DataSource, options.Port, options.ConnectionTimeout, options.PacketSize, charset, options.Dialect, options.Compression, FbWireCryptToWireCryptOption(options.WireCrypt), options.CryptKey); await connection.ConnectAsync(cancellationToken).ConfigureAwait(false); try { await connection.IdentifyAsync(options.Database, cancellationToken).ConfigureAwait(false); } catch { await connection.DisconnectAsync(cancellationToken).ConfigureAwait(false); throw; } return connection.ProtocolVersion switch { IscCodes.PROTOCOL_VERSION16 => new Managed.Version16.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION15 => new Managed.Version15.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION13 => new Managed.Version13.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION12 => new Managed.Version12.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION11 => new Managed.Version11.GdsDatabase(connection), IscCodes.PROTOCOL_VERSION10 => new Managed.Version10.GdsDatabase(connection), _ => throw UnsupportedProtocolException(), }; } private static DatabaseBase CreateNativeDatabase(ConnectionString options) { var charset = GetCharset(options); return new Native.FesDatabase(options.ClientLibrary, charset, options.PacketSize, options.Dialect); } private static ValueTask CreateNativeDatabaseAsync(ConnectionString options) { var charset = GetCharset(options); return ValueTask.FromResult(new Native.FesDatabase(options.ClientLibrary, charset, options.PacketSize, options.Dialect)); } private static ServiceManagerBase CreateManagedServiceManager(ConnectionString options) { var charset = GetCharset(options); var connection = new GdsConnection(options.UserID, options.Password, options.DataSource, options.Port, options.ConnectionTimeout, options.PacketSize, charset, options.Dialect, options.Compression, FbWireCryptToWireCryptOption(options.WireCrypt), options.CryptKey); connection.Connect(); try { connection.Identify(!string.IsNullOrEmpty(options.Database) ? options.Database : string.Empty); } catch { connection.Disconnect(); throw; } return connection.ProtocolVersion switch { IscCodes.PROTOCOL_VERSION16 => new Managed.Version16.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION15 => new Managed.Version15.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION13 => new Managed.Version13.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION12 => new Managed.Version12.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION11 => new Managed.Version11.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION10 => new Managed.Version10.GdsServiceManager(connection), _ => throw UnsupportedProtocolException(), }; } private static async ValueTask CreateManagedServiceManagerAsync(ConnectionString options, CancellationToken cancellationToken = default) { var charset = GetCharset(options); var connection = new GdsConnection(options.UserID, options.Password, options.DataSource, options.Port, options.ConnectionTimeout, options.PacketSize, charset, options.Dialect, options.Compression, FbWireCryptToWireCryptOption(options.WireCrypt), options.CryptKey); await connection.ConnectAsync(cancellationToken).ConfigureAwait(false); try { await connection.IdentifyAsync(!string.IsNullOrEmpty(options.Database) ? options.Database : string.Empty, cancellationToken).ConfigureAwait(false); } catch { await connection.DisconnectAsync(cancellationToken).ConfigureAwait(false); throw; } return connection.ProtocolVersion switch { IscCodes.PROTOCOL_VERSION16 => new Managed.Version16.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION15 => new Managed.Version15.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION13 => new Managed.Version13.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION12 => new Managed.Version12.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION11 => new Managed.Version11.GdsServiceManager(connection), IscCodes.PROTOCOL_VERSION10 => new Managed.Version10.GdsServiceManager(connection), _ => throw UnsupportedProtocolException(), }; } private static ServiceManagerBase CreateNativeServiceManager(ConnectionString options) { var charset = GetCharset(options); return new Native.FesServiceManager(options.ClientLibrary, charset); } private static ValueTask CreateNativeServiceManagerAsync(ConnectionString options) { var charset = GetCharset(options); return ValueTask.FromResult(new Native.FesServiceManager(options.ClientLibrary, charset)); } private static Exception UnsupportedProtocolException() => new NotSupportedException("Protocol not supported."); private static Exception IncorrectServerTypeException() => new NotSupportedException("Specified server type is not correct."); private static Exception InvalidCharsetException() => new ArgumentException("Invalid character set specified."); private static Charset GetCharset(ConnectionString options) { if (!Charset.TryGetByName(options.Charset, out var charset)) throw InvalidCharsetException(); return charset; } private static WireCryptOption FbWireCryptToWireCryptOption(FbWireCrypt wireCrypt) { return wireCrypt switch { FbWireCrypt.Disabled => WireCryptOption.Disabled, FbWireCrypt.Enabled => WireCryptOption.Enabled, FbWireCrypt.Required => WireCryptOption.Required, _ => throw new ArgumentOutOfRangeException(nameof(wireCrypt), $"{nameof(wireCrypt)}={wireCrypt}"), }; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/AuthBlock.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.IO; using System.Net; using System.Text; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Client.Managed.Srp; using FirebirdSql.Data.Client.Managed.Sspi; using FirebirdSql.Data.Common; using WireCryptOption = FirebirdSql.Data.Client.Managed.Version13.WireCryptOption; namespace FirebirdSql.Data.Client.Managed; sealed class AuthBlock { Srp256Client _srp256; SrpClient _srp; SspiHelper _sspi; public GdsConnection Connection { get; } public string User { get; } public string Password { get; } public WireCryptOption WireCrypt { get; } public byte[] ServerData { get; private set; } public string AcceptPluginName { get; private set; } public bool IsAuthenticated { get; private set; } public byte[] ServerKeys { get; private set; } public byte[] PublicClientData { get; private set; } public bool HasClientData => ClientData != null; public byte[] ClientData { get; private set; } public byte[] SessionKey { get; private set; } public string SessionKeyName { get; private set; } public bool WireCryptInitialized { get; private set; } public AuthBlock(GdsConnection connection, string user, string password, WireCryptOption wireCrypt) { _srp256 = new Srp256Client(); _srp = new SrpClient(); _sspi = new SspiHelper(); Connection = connection; User = user; Password = password; WireCrypt = wireCrypt; } public byte[] UserIdentificationData() { using (var result = new MemoryStream(256)) { var userString = Environment.GetEnvironmentVariable("USERNAME") ?? Environment.GetEnvironmentVariable("USER") ?? string.Empty; var user = Encoding.UTF8.GetBytes(userString); result.WriteByte(IscCodes.CNCT_user); result.WriteByte((byte)user.Length); result.Write(user, 0, user.Length); var host = Encoding.UTF8.GetBytes(Dns.GetHostName()); result.WriteByte(IscCodes.CNCT_host); result.WriteByte((byte)host.Length); result.Write(host, 0, host.Length); result.WriteByte(IscCodes.CNCT_user_verification); result.WriteByte(0); if (!string.IsNullOrEmpty(User)) { var login = Encoding.UTF8.GetBytes(User); result.WriteByte(IscCodes.CNCT_login); result.WriteByte((byte)login.Length); result.Write(login, 0, login.Length); var pluginNameBytes = Encoding.UTF8.GetBytes(_srp256.Name); result.WriteByte(IscCodes.CNCT_plugin_name); result.WriteByte((byte)pluginNameBytes.Length); result.Write(pluginNameBytes, 0, pluginNameBytes.Length); var specificData = Encoding.UTF8.GetBytes(_srp256.PublicKeyHex); WriteMultiPartHelper(result, IscCodes.CNCT_specific_data, specificData); var plugins = string.Join(",", new[] { _srp256.Name, _srp.Name }); var pluginsBytes = Encoding.UTF8.GetBytes(plugins); result.WriteByte(IscCodes.CNCT_plugin_list); result.WriteByte((byte)pluginsBytes.Length); result.Write(pluginsBytes, 0, pluginsBytes.Length); result.WriteByte(IscCodes.CNCT_client_crypt); result.WriteByte(4); result.Write(TypeEncoder.EncodeInt32(WireCryptOptionValue(WireCrypt)), 0, 4); } else { var pluginNameBytes = Encoding.UTF8.GetBytes(_sspi.Name); result.WriteByte(IscCodes.CNCT_plugin_name); result.WriteByte((byte)pluginNameBytes.Length); result.Write(pluginNameBytes, 0, pluginNameBytes.Length); var specificData = _sspi.InitializeClientSecurity(); WriteMultiPartHelper(result, IscCodes.CNCT_specific_data, specificData); result.WriteByte(IscCodes.CNCT_plugin_list); result.WriteByte((byte)pluginNameBytes.Length); result.Write(pluginNameBytes, 0, pluginNameBytes.Length); result.WriteByte(IscCodes.CNCT_client_crypt); result.WriteByte(4); result.Write(TypeEncoder.EncodeInt32(IscCodes.WIRE_CRYPT_DISABLED), 0, 4); } return result.ToArray(); } } public void SendContAuthToBuffer() { Connection.Xdr.Write(IscCodes.op_cont_auth); Connection.Xdr.WriteBuffer(HasClientData ? ClientData : PublicClientData); // p_data Connection.Xdr.Write(AcceptPluginName); // p_name Connection.Xdr.Write(AcceptPluginName); // p_list Connection.Xdr.WriteBuffer(ServerKeys); // p_keys } public async ValueTask SendContAuthToBufferAsync(CancellationToken cancellationToken = default) { await Connection.Xdr.WriteAsync(IscCodes.op_cont_auth, cancellationToken).ConfigureAwait(false); await Connection.Xdr.WriteBufferAsync(HasClientData ? ClientData : PublicClientData, cancellationToken).ConfigureAwait(false); // p_data await Connection.Xdr.WriteAsync(AcceptPluginName, cancellationToken).ConfigureAwait(false); // p_name await Connection.Xdr.WriteAsync(AcceptPluginName, cancellationToken).ConfigureAwait(false); // p_list await Connection.Xdr.WriteBufferAsync(ServerKeys, cancellationToken).ConfigureAwait(false); // p_keys } // TODO: maybe more logic can be pulled up here public IResponse ProcessContAuthResponse() { var operation = Connection.Xdr.ReadOperation(); var response = Connection.ProcessOperation(operation); response.HandleResponseException(); if (response is Version13.ContAuthResponse) { return response; } else if (response is Version13.CryptKeyCallbackResponse || response is Version15.CryptKeyCallbackResponse) { return response; } else if (response is GenericResponse genericResponse) { ServerKeys = genericResponse.Data; Complete(); } else { throw new InvalidOperationException($"Unexpected response ({operation})."); } return response; } public async ValueTask ProcessContAuthResponseAsync(CancellationToken cancellationToken = default) { var operation = await Connection.Xdr.ReadOperationAsync(cancellationToken).ConfigureAwait(false); var response = await Connection.ProcessOperationAsync(operation, cancellationToken).ConfigureAwait(false); response.HandleResponseException(); if (response is Version13.ContAuthResponse) { return response; } else if (response is Version13.CryptKeyCallbackResponse || response is Version15.CryptKeyCallbackResponse) { return response; } else if (response is GenericResponse genericResponse) { ServerKeys = genericResponse.Data; Complete(); } else { throw new InvalidOperationException($"Unexpected response ({operation})."); } return response; } public void SendWireCryptToBuffer() { if (WireCrypt == WireCryptOption.Disabled) return; Connection.Xdr.Write(IscCodes.op_crypt); Connection.Xdr.Write(FirebirdNetworkHandlingWrapper.EncryptionName); Connection.Xdr.Write(SessionKeyName); } public async ValueTask SendWireCryptToBufferAsync(CancellationToken cancellationToken = default) { if (WireCrypt == WireCryptOption.Disabled) return; await Connection.Xdr.WriteAsync(IscCodes.op_crypt, cancellationToken).ConfigureAwait(false); await Connection.Xdr.WriteAsync(FirebirdNetworkHandlingWrapper.EncryptionName, cancellationToken).ConfigureAwait(false); await Connection.Xdr.WriteAsync(SessionKeyName, cancellationToken).ConfigureAwait(false); } public void ProcessWireCryptResponse() { if (WireCrypt == WireCryptOption.Disabled) return; // after writing before reading Connection.StartEncryption(); var operation = Connection.Xdr.ReadOperation(); var response = Connection.ProcessOperation(operation); response.HandleResponseException(); WireCryptInitialized = true; } public async ValueTask ProcessWireCryptResponseAsync(CancellationToken cancellationToken = default) { if (WireCrypt == WireCryptOption.Disabled) return; // after writing before reading Connection.StartEncryption(); var operation = await Connection.Xdr.ReadOperationAsync(cancellationToken).ConfigureAwait(false); var response = await Connection.ProcessOperationAsync(operation, cancellationToken).ConfigureAwait(false); response.HandleResponseException(); WireCryptInitialized = true; } public void WireCryptValidate(int protocolVersion) { var validProtocolVersion = protocolVersion == IscCodes.PROTOCOL_VERSION13 || protocolVersion == IscCodes.PROTOCOL_VERSION15 || protocolVersion == IscCodes.PROTOCOL_VERSION16; if (validProtocolVersion && WireCrypt == WireCryptOption.Required && IsAuthenticated && !WireCryptInitialized) { throw IscException.ForErrorCode(IscCodes.isc_wirecrypt_incompatible); } } public void Start(byte[] serverData, string acceptPluginName, bool isAuthenticated, byte[] serverKeys) { ServerData = serverData; AcceptPluginName = acceptPluginName; IsAuthenticated = isAuthenticated; ServerKeys = serverKeys; var hasServerData = ServerData.Length != 0; if (AcceptPluginName.Equals(_srp256.Name, StringComparison.Ordinal)) { PublicClientData = Encoding.UTF8.GetBytes(_srp256.PublicKeyHex); if (hasServerData) { ClientData = Encoding.UTF8.GetBytes(_srp256.ClientProof(NormalizeLogin(User), Password, ServerData).ToHexString()); } SessionKey = _srp256.SessionKey; SessionKeyName = _srp256.SessionKeyName; } else if (AcceptPluginName.Equals(_srp.Name, StringComparison.Ordinal)) { PublicClientData = Encoding.UTF8.GetBytes(_srp.PublicKeyHex); if (hasServerData) { ClientData = Encoding.UTF8.GetBytes(_srp.ClientProof(NormalizeLogin(User), Password, ServerData).ToHexString()); } SessionKey = _srp.SessionKey; SessionKeyName = _srp.SessionKeyName; } else if (AcceptPluginName.Equals(_sspi.Name, StringComparison.Ordinal)) { if (hasServerData) { ClientData = _sspi.GetClientSecurity(ServerData); } } else { throw new NotSupportedException($"Not supported plugin '{AcceptPluginName}'."); } } public void Complete() { IsAuthenticated = true; ReleaseAuth(); } void ReleaseAuth() { _srp256 = null; _srp = null; _sspi?.Dispose(); _sspi = null; } static void WriteMultiPartHelper(Stream stream, byte code, byte[] data) { const int MaxLength = 255 - 1; var part = 0; for (var i = 0; i < data.Length; i += MaxLength) { stream.WriteByte(code); var length = Math.Min(data.Length - i, MaxLength); stream.WriteByte((byte)(length + 1)); stream.WriteByte((byte)part); stream.Write(data, i, length); part++; } } static int WireCryptOptionValue(WireCryptOption wireCrypt) { return wireCrypt switch { WireCryptOption.Disabled => IscCodes.WIRE_CRYPT_DISABLED, WireCryptOption.Enabled => IscCodes.WIRE_CRYPT_ENABLED, WireCryptOption.Required => IscCodes.WIRE_CRYPT_REQUIRED, _ => throw new ArgumentOutOfRangeException(nameof(wireCrypt), $"{nameof(wireCrypt)}={wireCrypt}"), }; } internal static string NormalizeLogin(string login) { if (string.IsNullOrEmpty(login)) { return login; } if (login.Length > 2 && login[0] == '"' && login[login.Length - 1] == '"') { var sb = new StringBuilder(login, 1, login.Length - 2, login.Length - 2); for (int idx = 0; idx < sb.Length; idx++) { // Double double quotes ("") escape a double quote in a quoted string if (sb[idx] == '"') { // Strip double quote escape sb.Remove(idx, 1); if (idx < sb.Length && sb[idx] == '"') { // Retain escaped double quote idx += 1; } else { // The character after escape is not a double quote, we terminate the conversion and truncate. // Firebird does this as well (see common/utils.cpp#dpbItemUpper) sb.Length = idx; return sb.ToString(); } } } return sb.ToString(); } return login.ToUpperInvariant(); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/DataProviderStreamWrapper.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.IO; using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; namespace FirebirdSql.Data.Client.Managed; sealed class DataProviderStreamWrapper : IDataProvider { readonly Stream _stream; public DataProviderStreamWrapper(Stream stream) { _stream = stream; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public int Read(byte[] buffer, int offset, int count) { return _stream.Read(buffer, offset, count); } [MethodImpl(MethodImplOptions.AggressiveInlining)] public ValueTask ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) { return new ValueTask(_stream.ReadAsync(buffer, offset, count, cancellationToken)); } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void Write(byte[] buffer, int offset, int count) { _stream.Write(buffer, offset, count); } [MethodImpl(MethodImplOptions.AggressiveInlining)] public ValueTask WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) { return new ValueTask(_stream.WriteAsync(buffer, offset, count, cancellationToken)); } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void Flush() { _stream.Flush(); } [MethodImpl(MethodImplOptions.AggressiveInlining)] public ValueTask FlushAsync(CancellationToken cancellationToken = default) { return new ValueTask(_stream.FlushAsync(cancellationToken)); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/FetchResponse.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed; internal class FetchResponse : IResponse { public int Status { get; } public int Count { get; } public FetchResponse(int status, int count) { Status = status; Count = count; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/FirebirdNetworkHandlingWrapper.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed; sealed class FirebirdNetworkHandlingWrapper : IDataProvider, ITracksIOFailure { public const string CompressionName = "zlib"; public const string EncryptionName = "Arc4"; const int PreferredBufferSize = 32 * 1024; readonly IDataProvider _dataProvider; readonly Queue _outputBuffer; readonly Queue _inputBuffer; readonly byte[] _readBuffer; byte[] _compressionBuffer; Ionic.Zlib.ZlibCodec _compressor; Ionic.Zlib.ZlibCodec _decompressor; Org.BouncyCastle.Crypto.Engines.RC4Engine _decryptor; Org.BouncyCastle.Crypto.Engines.RC4Engine _encryptor; public FirebirdNetworkHandlingWrapper(IDataProvider dataProvider) { _dataProvider = dataProvider; _outputBuffer = new Queue(PreferredBufferSize); _inputBuffer = new Queue(PreferredBufferSize); _readBuffer = new byte[PreferredBufferSize]; } public bool IOFailed { get; set; } public int Read(byte[] buffer, int offset, int count) { if (_inputBuffer.Count < count) { var readBuffer = _readBuffer; int read; try { read = _dataProvider.Read(readBuffer, 0, readBuffer.Length); } catch (IOException) { IOFailed = true; throw; } if (read != 0) { if (_decryptor != null) { _decryptor.ProcessBytes(readBuffer, 0, read, readBuffer, 0); } if (_decompressor != null) { read = HandleDecompression(readBuffer, read); readBuffer = _compressionBuffer; } WriteToInputBuffer(readBuffer, read); } } var dataLength = ReadFromInputBuffer(buffer, offset, count); return dataLength; } public async ValueTask ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) { if (_inputBuffer.Count < count) { var readBuffer = _readBuffer; int read; try { read = await _dataProvider.ReadAsync(readBuffer, 0, readBuffer.Length, cancellationToken).ConfigureAwait(false); } catch (IOException) { IOFailed = true; throw; } if (read != 0) { if (_decryptor != null) { _decryptor.ProcessBytes(readBuffer, 0, read, readBuffer, 0); } if (_decompressor != null) { read = HandleDecompression(readBuffer, read); readBuffer = _compressionBuffer; } WriteToInputBuffer(readBuffer, read); } } var dataLength = ReadFromInputBuffer(buffer, offset, count); return dataLength; } public void Write(byte[] buffer, int offset, int count) { for (var i = 0; i < count; i++) _outputBuffer.Enqueue(buffer[offset + i]); } public ValueTask WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) { for (var i = 0; i < count; i++) _outputBuffer.Enqueue(buffer[offset + i]); return ValueTask.CompletedTask; } public void Flush() { var buffer = _outputBuffer.ToArray(); _outputBuffer.Clear(); var count = buffer.Length; if (_compressor != null) { count = HandleCompression(buffer, count); buffer = _compressionBuffer; } if (_encryptor != null) { _encryptor.ProcessBytes(buffer, 0, count, buffer, 0); } try { _dataProvider.Write(buffer, 0, count); _dataProvider.Flush(); } catch (IOException) { IOFailed = true; throw; } } public async ValueTask FlushAsync(CancellationToken cancellationToken = default) { var buffer = _outputBuffer.ToArray(); _outputBuffer.Clear(); var count = buffer.Length; if (_compressor != null) { count = HandleCompression(buffer, count); buffer = _compressionBuffer; } if (_encryptor != null) { _encryptor.ProcessBytes(buffer, 0, count, buffer, 0); } try { await _dataProvider.WriteAsync(buffer, 0, count, cancellationToken).ConfigureAwait(false); await _dataProvider.FlushAsync(cancellationToken).ConfigureAwait(false); } catch (IOException) { IOFailed = true; throw; } } public void StartCompression() { _compressionBuffer = new byte[PreferredBufferSize]; _compressor = new Ionic.Zlib.ZlibCodec(Ionic.Zlib.CompressionMode.Compress); _decompressor = new Ionic.Zlib.ZlibCodec(Ionic.Zlib.CompressionMode.Decompress); } public void StartEncryption(byte[] key) { _encryptor = CreateCipher(key); _decryptor = CreateCipher(key); } int ReadFromInputBuffer(byte[] buffer, int offset, int count) { var read = Math.Min(count, _inputBuffer.Count); for (var i = 0; i < read; i++) { buffer[offset + i] = _inputBuffer.Dequeue(); } return read; } void WriteToInputBuffer(byte[] data, int count) { for (var i = 0; i < count; i++) { _inputBuffer.Enqueue(data[i]); } } int HandleDecompression(byte[] buffer, int count) { _decompressor.InputBuffer = buffer; _decompressor.NextOut = 0; _decompressor.NextIn = 0; _decompressor.AvailableBytesIn = count; while (true) { _decompressor.OutputBuffer = _compressionBuffer; _decompressor.AvailableBytesOut = _compressionBuffer.Length - _decompressor.NextOut; var rc = _decompressor.Inflate(Ionic.Zlib.FlushType.None); if (rc != Ionic.Zlib.ZlibConstants.Z_OK) throw new IOException($"Error '{rc}' while decompressing the data."); if (_decompressor.AvailableBytesIn > 0 || _decompressor.AvailableBytesOut == 0) { ResizeBuffer(ref _compressionBuffer); continue; } break; } return _decompressor.NextOut; } int HandleCompression(byte[] buffer, int count) { _compressor.InputBuffer = buffer; _compressor.NextOut = 0; _compressor.NextIn = 0; _compressor.AvailableBytesIn = count; while (true) { _compressor.OutputBuffer = _compressionBuffer; _compressor.AvailableBytesOut = _compressionBuffer.Length - _compressor.NextOut; var rc = _compressor.Deflate(Ionic.Zlib.FlushType.None); if (rc != Ionic.Zlib.ZlibConstants.Z_OK) throw new IOException($"Error '{rc}' while compressing the data."); if (_compressor.AvailableBytesIn > 0 || _compressor.AvailableBytesOut == 0) { ResizeBuffer(ref _compressionBuffer); continue; } break; } while (true) { _compressor.OutputBuffer = _compressionBuffer; _compressor.AvailableBytesOut = _compressionBuffer.Length - _compressor.NextOut; var rc = _compressor.Deflate(Ionic.Zlib.FlushType.Sync); if (rc != Ionic.Zlib.ZlibConstants.Z_OK) throw new IOException($"Error '{rc}' while compressing the data."); if (_compressor.AvailableBytesIn > 0 || _compressor.AvailableBytesOut == 0) { ResizeBuffer(ref _compressionBuffer); continue; } break; } return _compressor.NextOut; } static void ResizeBuffer(ref byte[] buffer) { Array.Resize(ref buffer, buffer.Length * 2); } static Org.BouncyCastle.Crypto.Engines.RC4Engine CreateCipher(byte[] key) { var cipher = new Org.BouncyCastle.Crypto.Engines.RC4Engine(); cipher.Init(default, new Org.BouncyCastle.Crypto.Parameters.KeyParameter(key)); return cipher; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/GdsConnection.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Net; using System.Net.Sockets; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed; internal sealed class GdsConnection { private NetworkStream _networkStream; private FirebirdNetworkHandlingWrapper _firebirdNetworkHandlingWrapper; public string User { get; private set; } public string Password { get; private set; } public string DataSource { get; private set; } public int PortNumber { get; private set; } public int Timeout { get; private set; } public int PacketSize { get; private set; } public Charset Charset { get; private set; } public short Dialect { get; private set; } public bool Compression { get; private set; } public Version13.WireCryptOption WireCrypt { get; private set; } public byte[] CryptKey { get; private set; } public int ProtocolVersion { get; private set; } public int ProtocolArchitecture { get; private set; } public int ProtocolMinimunType { get; private set; } public bool ConnectionBroken => _firebirdNetworkHandlingWrapper?.IOFailed ?? false; internal IPAddress IPAddress { get; private set; } internal XdrReaderWriter Xdr { get; private set; } internal AuthBlock AuthBlock { get; private set; } public GdsConnection(string dataSource, int port, int timeout) : this(null, null, dataSource, port, timeout, 8192, Charset.DefaultCharset, 3, false, Version13.WireCryptOption.Enabled, null) { } public GdsConnection(string user, string password, string dataSource, int portNumber, int timeout, int packetSize, Charset charset, short dialect, bool compression, Version13.WireCryptOption wireCrypt, byte[] cryptKey) { User = user; Password = password; DataSource = dataSource; PortNumber = portNumber; Timeout = timeout; PacketSize = packetSize; Charset = charset; Dialect = dialect; Compression = compression; WireCrypt = wireCrypt; CryptKey = cryptKey; } public void Connect() { try { IPAddress = GetIPAddress(DataSource); var endPoint = new IPEndPoint(IPAddress, PortNumber); var socket = new Socket(IPAddress.AddressFamily, SocketType.Stream, ProtocolType.Tcp); socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.ReceiveBuffer, PacketSize); socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.SendBuffer, PacketSize); socket.SetSocketOption(SocketOptionLevel.Tcp, SocketOptionName.NoDelay, 1); socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, 1); socket.Connect(endPoint); _networkStream = new NetworkStream(socket, true); _firebirdNetworkHandlingWrapper = new FirebirdNetworkHandlingWrapper(new DataProviderStreamWrapper(_networkStream)); Xdr = new XdrReaderWriter(_firebirdNetworkHandlingWrapper, Charset); } catch (SocketException ex) { throw IscException.ForTypeErrorCodeStrParam(IscCodes.isc_arg_gds, IscCodes.isc_network_error, DataSource, ex); } } public async ValueTask ConnectAsync(CancellationToken cancellationToken = default) { try { IPAddress = await GetIPAddressAsync(DataSource, cancellationToken).ConfigureAwait(false); var endPoint = new IPEndPoint(IPAddress, PortNumber); var socket = new Socket(IPAddress.AddressFamily, SocketType.Stream, ProtocolType.Tcp); socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.ReceiveBuffer, PacketSize); socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.SendBuffer, PacketSize); socket.SetSocketOption(SocketOptionLevel.Tcp, SocketOptionName.NoDelay, 1); socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, 1); using (var timeoutCts = new CancellationTokenSource(TimeSpan.FromSeconds(Timeout))) { using (var combinedCts = CancellationTokenSource.CreateLinkedTokenSource(timeoutCts.Token, cancellationToken)) { await socket.ConnectAsync(endPoint, combinedCts.Token).ConfigureAwait(false); } } _networkStream = new NetworkStream(socket, true); _firebirdNetworkHandlingWrapper = new FirebirdNetworkHandlingWrapper(new DataProviderStreamWrapper(_networkStream)); Xdr = new XdrReaderWriter(_firebirdNetworkHandlingWrapper, Charset); } catch (SocketException ex) { throw IscException.ForTypeErrorCodeStrParam(IscCodes.isc_arg_gds, IscCodes.isc_network_error, DataSource, ex); } } public void Identify(string database) { try { Xdr.Write(IscCodes.op_connect); Xdr.Write(IscCodes.op_attach); Xdr.Write(IscCodes.CONNECT_VERSION3); Xdr.Write(IscCodes.GenericAchitectureClient); Xdr.Write(database); var protocols = ProtocolsSupported.Get(Compression); Xdr.Write(protocols.Count()); AuthBlock = new AuthBlock(this, User, Password, WireCrypt); Xdr.WriteBuffer(AuthBlock.UserIdentificationData()); var priority = 0; foreach (var protocol in protocols) { Xdr.Write(protocol.Version); Xdr.Write(IscCodes.GenericAchitectureClient); Xdr.Write(IscCodes.p_cnct_min_type); Xdr.Write(protocol.MaxPType); Xdr.Write(priority); priority++; } Xdr.Flush(); var operation = Xdr.ReadOperation(); while (operation == IscCodes.op_crypt_key_callback) { var data = Xdr.ReadBuffer(); var size = Xdr.ReadInt32(); Xdr.Write(IscCodes.op_crypt_key_callback); Xdr.WriteBuffer(CryptKey); Xdr.Write(size); Xdr.Flush(); operation = Xdr.ReadOperation(); } if (operation == IscCodes.op_accept || operation == IscCodes.op_cond_accept || operation == IscCodes.op_accept_data) { ProtocolVersion = Xdr.ReadInt32(); ProtocolArchitecture = Xdr.ReadInt32(); ProtocolMinimunType = Xdr.ReadInt32(); if (ProtocolVersion < 0) { ProtocolVersion = (ushort)(ProtocolVersion & IscCodes.FB_PROTOCOL_MASK) | IscCodes.FB_PROTOCOL_FLAG; } if (Compression && !((ProtocolMinimunType & IscCodes.pflag_compress) != 0)) { Compression = false; } if (operation == IscCodes.op_cond_accept || operation == IscCodes.op_accept_data) { AuthBlock.Start( Xdr.ReadBuffer(), Xdr.ReadString(), Xdr.ReadBoolean(), Xdr.ReadBuffer()); if (Compression) { // after reading before writing StartCompression(); } if (operation == IscCodes.op_cond_accept) { while (true) { AuthBlock.SendContAuthToBuffer(); Xdr.Flush(); var response = AuthBlock.ProcessContAuthResponse(); if (response is Version13.ContAuthResponse contAuthResponse) { AuthBlock.Start(contAuthResponse.ServerData, contAuthResponse.AcceptPluginName, contAuthResponse.IsAuthenticated, contAuthResponse.ServerKeys); continue; } break; } if (AuthBlock.ServerKeys.Any()) { AuthBlock.SendWireCryptToBuffer(); Xdr.Flush(); AuthBlock.ProcessWireCryptResponse(); } } } AuthBlock.WireCryptValidate(ProtocolVersion); } else if (operation == IscCodes.op_response) { var response = (GenericResponse)ProcessOperation(operation); response.HandleResponseException(); } else { throw IscException.ForErrorCode(IscCodes.isc_connect_reject); } } catch (IOException ex) { throw IscException.ForIOException(ex); } } public async ValueTask IdentifyAsync(string database, CancellationToken cancellationToken = default) { try { await Xdr.WriteAsync(IscCodes.op_connect, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(IscCodes.op_attach, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(IscCodes.CONNECT_VERSION3, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(IscCodes.GenericAchitectureClient, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(database, cancellationToken).ConfigureAwait(false); var protocols = ProtocolsSupported.Get(Compression); await Xdr.WriteAsync(protocols.Count(), cancellationToken).ConfigureAwait(false); AuthBlock = new AuthBlock(this, User, Password, WireCrypt); await Xdr.WriteBufferAsync(AuthBlock.UserIdentificationData(), cancellationToken).ConfigureAwait(false); var priority = 0; foreach (var protocol in protocols) { await Xdr.WriteAsync(protocol.Version, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(IscCodes.GenericAchitectureClient, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(IscCodes.p_cnct_min_type, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(protocol.MaxPType, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(priority, cancellationToken).ConfigureAwait(false); priority++; } await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var operation = await Xdr.ReadOperationAsync(cancellationToken).ConfigureAwait(false); while (operation == IscCodes.op_crypt_key_callback) { var data = await Xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false); var size = await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(IscCodes.op_crypt_key_callback, cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(CryptKey, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(size, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); operation = await Xdr.ReadOperationAsync(cancellationToken).ConfigureAwait(false); } if (operation == IscCodes.op_accept || operation == IscCodes.op_cond_accept || operation == IscCodes.op_accept_data) { ProtocolVersion = await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); ProtocolArchitecture = await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); ProtocolMinimunType = await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); if (ProtocolVersion < 0) { ProtocolVersion = (ushort)(ProtocolVersion & IscCodes.FB_PROTOCOL_MASK) | IscCodes.FB_PROTOCOL_FLAG; } if (Compression && !((ProtocolMinimunType & IscCodes.pflag_compress) != 0)) { Compression = false; } if (operation == IscCodes.op_cond_accept || operation == IscCodes.op_accept_data) { AuthBlock.Start( await Xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false), await Xdr.ReadStringAsync(cancellationToken).ConfigureAwait(false), await Xdr.ReadBooleanAsync(cancellationToken).ConfigureAwait(false), await Xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false)); if (Compression) { // after reading before writing StartCompression(); } if (operation == IscCodes.op_cond_accept) { while (true) { await AuthBlock.SendContAuthToBufferAsync(cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = await AuthBlock.ProcessContAuthResponseAsync(cancellationToken).ConfigureAwait(false); if (response is Version13.ContAuthResponse contAuthResponse) { AuthBlock.Start(contAuthResponse.ServerData, contAuthResponse.AcceptPluginName, contAuthResponse.IsAuthenticated, contAuthResponse.ServerKeys); continue; } break; } if (AuthBlock.ServerKeys.Any()) { await AuthBlock.SendWireCryptToBufferAsync(cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await AuthBlock.ProcessWireCryptResponseAsync(cancellationToken).ConfigureAwait(false); } } } AuthBlock.WireCryptValidate(ProtocolVersion); } else if (operation == IscCodes.op_response) { var response = (GenericResponse)await ProcessOperationAsync(operation, cancellationToken).ConfigureAwait(false); response.HandleResponseException(); } else { throw IscException.ForErrorCode(IscCodes.isc_connect_reject); } } catch (IOException ex) { throw IscException.ForIOException(ex); } } public void Disconnect() { if (_networkStream != null) { _networkStream.Dispose(); _networkStream = null; } } public async ValueTask DisconnectAsync(CancellationToken cancellationToken = default) { if (_networkStream != null) { await _networkStream.DisposeAsync().ConfigureAwait(false); _networkStream = null; } } internal IResponse ProcessOperation(int operation) { switch (operation) { case IscCodes.op_response: return new GenericResponse( Xdr.ReadInt32(), Xdr.ReadInt64(), Xdr.ReadBuffer(), Xdr.ReadStatusVector()); case IscCodes.op_fetch_response: return new FetchResponse( Xdr.ReadInt32(), Xdr.ReadInt32()); case IscCodes.op_sql_response: return new SqlResponse( Xdr.ReadInt32()); case IscCodes.op_trusted_auth: return new Version11.AuthResponse( Xdr.ReadBuffer()); case IscCodes.op_crypt_key_callback: if (ProtocolVersion == IscCodes.PROTOCOL_VERSION15 || ProtocolVersion == IscCodes.PROTOCOL_VERSION16) { return new Version15.CryptKeyCallbackResponse( Xdr.ReadBuffer(), Xdr.ReadInt32()); } else { return new Version13.CryptKeyCallbackResponse( Xdr.ReadBuffer()); } case IscCodes.op_cont_auth: return new Version13.ContAuthResponse( Xdr.ReadBuffer(), Xdr.ReadString(), Xdr.ReadBoolean(), Xdr.ReadBuffer()); case IscCodes.op_batch_cs: var statementHandle = Xdr.ReadInt16(); var p_batch_reccount = Xdr.ReadInt32(); var p_batch_updates = Xdr.ReadInt32(); var p_batch_vectors = Xdr.ReadInt32(); var p_batch_errors = Xdr.ReadInt32(); var p_batch_updates_data = new int[p_batch_updates]; for (var i = 0; i < p_batch_updates; i++) { p_batch_updates_data[i] = Xdr.ReadInt32(); } var p_batch_vectors_data = new (int messageNumber, IscException statusVector)[p_batch_vectors]; for (var i = 0; i < p_batch_vectors; i++) { var messageNumber = Xdr.ReadInt32(); var statusVector = Xdr.ReadStatusVector(); p_batch_vectors_data[i] = (messageNumber, statusVector); } var p_batch_errors_data = new int[p_batch_errors]; for (var i = 0; i < p_batch_errors; i++) { p_batch_errors_data[i] = Xdr.ReadInt32(); } return new Version16.BatchCompletionStateResponse( statementHandle, p_batch_reccount, p_batch_updates_data, p_batch_vectors_data, p_batch_errors_data); default: throw new ArgumentOutOfRangeException(nameof(operation), $"{nameof(operation)}={operation}"); } } internal async ValueTask ProcessOperationAsync(int operation, CancellationToken cancellationToken = default) { switch (operation) { case IscCodes.op_response: return new GenericResponse( await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false), await Xdr.ReadInt64Async(cancellationToken).ConfigureAwait(false), await Xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false), await Xdr.ReadStatusVectorAsync(cancellationToken).ConfigureAwait(false)); case IscCodes.op_fetch_response: return new FetchResponse( await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false), await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false)); case IscCodes.op_sql_response: return new SqlResponse( await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false)); case IscCodes.op_trusted_auth: return new Version11.AuthResponse( await Xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false)); case IscCodes.op_crypt_key_callback: if (ProtocolVersion == IscCodes.PROTOCOL_VERSION15 || ProtocolVersion == IscCodes.PROTOCOL_VERSION16) { return new Version15.CryptKeyCallbackResponse( await Xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false), await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false)); } else { return new Version13.CryptKeyCallbackResponse( await Xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false)); } case IscCodes.op_cont_auth: return new Version13.ContAuthResponse( await Xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false), await Xdr.ReadStringAsync(cancellationToken).ConfigureAwait(false), await Xdr.ReadBooleanAsync(cancellationToken).ConfigureAwait(false), await Xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false)); case IscCodes.op_batch_cs: var statementHandle = await Xdr.ReadInt16Async().ConfigureAwait(false); var p_batch_reccount = await Xdr.ReadInt32Async().ConfigureAwait(false); var p_batch_updates = await Xdr.ReadInt32Async().ConfigureAwait(false); var p_batch_vectors = await Xdr.ReadInt32Async().ConfigureAwait(false); var p_batch_errors = await Xdr.ReadInt32Async().ConfigureAwait(false); var p_batch_updates_data = new int[p_batch_updates]; for (var i = 0; i < p_batch_updates; i++) { p_batch_updates_data[i] = await Xdr.ReadInt32Async().ConfigureAwait(false); } var p_batch_vectors_data = new (int messageNumber, IscException statusVector)[p_batch_vectors]; for (var i = 0; i < p_batch_vectors; i++) { var messageNumber = await Xdr.ReadInt32Async().ConfigureAwait(false); var statusVector = await Xdr.ReadStatusVectorAsync().ConfigureAwait(false); p_batch_vectors_data[i] = (messageNumber, statusVector); } var p_batch_errors_data = new int[p_batch_errors]; for (var i = 0; i < p_batch_errors; i++) { p_batch_errors_data[i] = await Xdr.ReadInt32Async().ConfigureAwait(false); } return new Version16.BatchCompletionStateResponse( statementHandle, p_batch_reccount, p_batch_updates_data, p_batch_vectors_data, p_batch_errors_data); default: throw new ArgumentOutOfRangeException(nameof(operation), $"{nameof(operation)}={operation}"); } } internal void StartCompression() { _firebirdNetworkHandlingWrapper.StartCompression(); } internal void StartEncryption() { _firebirdNetworkHandlingWrapper.StartEncryption(AuthBlock.SessionKey); } private static IPAddress GetIPAddress(string dataSource) { if (IPAddress.TryParse(dataSource, out var ipaddress)) { return ipaddress; } var addresses = (Dns.GetHostEntry(dataSource)).AddressList; foreach (var address in addresses) { // IPv4 priority if (address.AddressFamily == AddressFamily.InterNetwork) { return address; } } return addresses[0]; } private static async ValueTask GetIPAddressAsync(string dataSource, CancellationToken cancellationToken = default) { if (IPAddress.TryParse(dataSource, out var ipaddress)) { return ipaddress; } var addresses = (await Dns.GetHostEntryAsync(dataSource).ConfigureAwait(false)).AddressList; foreach (var address in addresses) { // IPv4 priority if (address.AddressFamily == AddressFamily.InterNetwork) { return address; } } return addresses[0]; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/GenericResponse.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed; internal sealed class GenericResponse : IResponse { public int ObjectHandle { get; } public long BlobId { get; } public byte[] Data { get; } public IscException Exception { get; } public GenericResponse(int objectHandle, long blobId, byte[] data, IscException exception) { ObjectHandle = objectHandle; BlobId = blobId; Data = data; Exception = exception; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/IDataProvider.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Threading; using System.Threading.Tasks; namespace FirebirdSql.Data.Client.Managed; interface IDataProvider { int Read(byte[] buffer, int offset, int count); ValueTask ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default); void Write(byte[] buffer, int offset, int count); ValueTask WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default); void Flush(); ValueTask FlushAsync(CancellationToken cancellationToken = default); } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/IResponse.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed; interface IResponse { } static class IResponseExtensions { public static void HandleResponseException(this IResponse response) { if (response is GenericResponse genericResponse) { if (genericResponse.Exception != null && !genericResponse.Exception.IsWarning) { throw genericResponse.Exception; } } } public static void HandleResponseWarning(this IResponse response, Action onWarning) { if (response is GenericResponse genericResponse) { if (genericResponse.Exception != null && genericResponse.Exception.IsWarning) { onWarning?.Invoke(genericResponse.Exception); } } } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/ITracksIOFailure.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) namespace FirebirdSql.Data.Client.Managed; interface ITracksIOFailure { bool IOFailed { get; set; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/IXdrReader.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Numerics; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; using FirebirdSql.Data.Types; namespace FirebirdSql.Data.Client.Managed; interface IXdrReader { byte[] ReadBytes(byte[] buffer, int count); ValueTask ReadBytesAsync(byte[] buffer, int count, CancellationToken cancellationToken = default); byte[] ReadOpaque(int length); ValueTask ReadOpaqueAsync(int length, CancellationToken cancellationToken = default); byte[] ReadBuffer(); ValueTask ReadBufferAsync(CancellationToken cancellationToken = default); string ReadString(); ValueTask ReadStringAsync(CancellationToken cancellationToken = default); string ReadString(int length); ValueTask ReadStringAsync(int length, CancellationToken cancellationToken = default); string ReadString(Charset charset); ValueTask ReadStringAsync(Charset charset, CancellationToken cancellationToken = default); string ReadString(Charset charset, int length); ValueTask ReadStringAsync(Charset charset, int length, CancellationToken cancellationToken = default); short ReadInt16(); ValueTask ReadInt16Async(CancellationToken cancellationToken = default); int ReadInt32(); ValueTask ReadInt32Async(CancellationToken cancellationToken = default); long ReadInt64(); ValueTask ReadInt64Async(CancellationToken cancellationToken = default); Guid ReadGuid(int sqlType); ValueTask ReadGuidAsync(int sqlType, CancellationToken cancellationToken = default); float ReadSingle(); ValueTask ReadSingleAsync(CancellationToken cancellationToken = default); double ReadDouble(); ValueTask ReadDoubleAsync(CancellationToken cancellationToken = default); DateTime ReadDateTime(); ValueTask ReadDateTimeAsync(CancellationToken cancellationToken = default); DateTime ReadDate(); ValueTask ReadDateAsync(CancellationToken cancellationToken = default); TimeSpan ReadTime(); ValueTask ReadTimeAsync(CancellationToken cancellationToken = default); decimal ReadDecimal(int type, int scale); ValueTask ReadDecimalAsync(int type, int scale, CancellationToken cancellationToken = default); bool ReadBoolean(); ValueTask ReadBooleanAsync(CancellationToken cancellationToken = default); FbZonedDateTime ReadZonedDateTime(bool isExtended); ValueTask ReadZonedDateTimeAsync(bool isExtended, CancellationToken cancellationToken = default); FbZonedTime ReadZonedTime(bool isExtended); ValueTask ReadZonedTimeAsync(bool isExtended, CancellationToken cancellationToken = default); FbDecFloat ReadDec16(); ValueTask ReadDec16Async(CancellationToken cancellationToken = default); FbDecFloat ReadDec34(); ValueTask ReadDec34Async(CancellationToken cancellationToken = default); BigInteger ReadInt128(); ValueTask ReadInt128Async(CancellationToken cancellationToken = default); IscException ReadStatusVector(); ValueTask ReadStatusVectorAsync(CancellationToken cancellationToken = default); int ReadOperation(); ValueTask ReadOperationAsync(CancellationToken cancellationToken = default); } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/IXdrWriter.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Numerics; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Types; namespace FirebirdSql.Data.Client.Managed; interface IXdrWriter { void Flush(); ValueTask FlushAsync(CancellationToken cancellationToken = default); void WriteBytes(byte[] buffer, int count); ValueTask WriteBytesAsync(byte[] buffer, int count, CancellationToken cancellationToken = default); void WriteOpaque(byte[] buffer); ValueTask WriteOpaqueAsync(byte[] buffer, CancellationToken cancellationToken = default); void WriteOpaque(byte[] buffer, int length); ValueTask WriteOpaqueAsync(byte[] buffer, int length, CancellationToken cancellationToken = default); void WriteBuffer(byte[] buffer); ValueTask WriteBufferAsync(byte[] buffer, CancellationToken cancellationToken = default); void WriteBuffer(byte[] buffer, int length); ValueTask WriteBufferAsync(byte[] buffer, int length, CancellationToken cancellationToken = default); void WriteBlobBuffer(byte[] buffer); ValueTask WriteBlobBufferAsync(byte[] buffer, CancellationToken cancellationToken = default); void WriteTyped(int type, byte[] buffer); ValueTask WriteTypedAsync(int type, byte[] buffer, CancellationToken cancellationToken = default); void Write(string value); ValueTask WriteAsync(string value, CancellationToken cancellationToken = default); void Write(short value); ValueTask WriteAsync(short value, CancellationToken cancellationToken = default); void Write(int value); ValueTask WriteAsync(int value, CancellationToken cancellationToken = default); void Write(long value); ValueTask WriteAsync(long value, CancellationToken cancellationToken = default); void Write(float value); ValueTask WriteAsync(float value, CancellationToken cancellationToken = default); void Write(double value); ValueTask WriteAsync(double value, CancellationToken cancellationToken = default); void Write(decimal value, int type, int scale); ValueTask WriteAsync(decimal value, int type, int scale, CancellationToken cancellationToken = default); void Write(bool value); ValueTask WriteAsync(bool value, CancellationToken cancellationToken = default); void Write(DateTime value); ValueTask WriteAsync(DateTime value, CancellationToken cancellationToken = default); void Write(Guid value, int sqlType); ValueTask WriteAsync(Guid value, int sqlType, CancellationToken cancellationToken = default); void Write(FbDecFloat value, int size); ValueTask WriteAsync(FbDecFloat value, int size, CancellationToken cancellationToken = default); void Write(BigInteger value); ValueTask WriteAsync(BigInteger value, CancellationToken cancellationToken = default); void WriteDate(DateTime value); ValueTask WriteDateAsync(DateTime value, CancellationToken cancellationToken = default); void WriteTime(TimeSpan value); ValueTask WriteTimeAsync(TimeSpan value, CancellationToken cancellationToken = default); } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/ProtocolsSupported.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Collections.Generic; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed; internal static class ProtocolsSupported { internal class Protocol { public int Version { get; } public int MaxPType { get; } public Protocol(int version, int maxPType) { Version = version; MaxPType = maxPType; } } public static ICollection Get(bool compression) { return new[] { new Protocol(IscCodes.PROTOCOL_VERSION10, IscCodes.ptype_batch_send), new Protocol(IscCodes.PROTOCOL_VERSION11, IscCodes.ptype_lazy_send), new Protocol(IscCodes.PROTOCOL_VERSION12, IscCodes.ptype_lazy_send), new Protocol(IscCodes.PROTOCOL_VERSION13, IscCodes.ptype_lazy_send | (compression ? IscCodes.pflag_compress : 0)), new Protocol(IscCodes.PROTOCOL_VERSION15, IscCodes.ptype_lazy_send | (compression ? IscCodes.pflag_compress : 0)), new Protocol(IscCodes.PROTOCOL_VERSION16, IscCodes.ptype_lazy_send | (compression ? IscCodes.pflag_compress : 0)), }; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/SqlResponse.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed; internal class SqlResponse : IResponse { public int Count { get; } public SqlResponse(int count) { Count = count; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Srp/Srp256Client.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Linq; using System.Security.Cryptography; namespace FirebirdSql.Data.Client.Managed.Srp; internal sealed class Srp256Client : SrpClientBase { public override string Name => "Srp256"; protected override byte[] ComputeHash(params byte[][] ba) { using (var hash = SHA256.Create()) { return hash.ComputeHash(ba.SelectMany(x => x).ToArray()); } } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Srp/SrpClient.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Linq; using System.Security.Cryptography; namespace FirebirdSql.Data.Client.Managed.Srp; internal sealed class SrpClient : SrpClientBase { public override string Name => "Srp"; protected override byte[] ComputeHash(params byte[][] ba) { using (var hash = SHA1.Create()) { return hash.ComputeHash(ba.SelectMany(x => x).ToArray()); } } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Srp/SrpClientBase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Hajime Nakagami (nakagami@gmail.com), Jiri Cincura (jiri@cincura.net) using System; using System.Globalization; using System.Linq; using System.Numerics; using System.Security.Cryptography; using System.Text; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Srp; /// /// http://srp.stanford.edu/design.html /// abstract class SrpClientBase { public abstract string Name { get; } public string SessionKeyName { get; } = "Symmetric"; private const int SRP_KEY_SIZE = 128; private const int SRP_SALT_SIZE = 32; private static readonly BigInteger N = BigInteger.Parse("00E67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7", NumberStyles.HexNumber); private static readonly BigInteger g = new BigInteger(2); private static readonly BigInteger k = BigInteger.Parse("1277432915985975349439481660349303019122249719989"); private static readonly byte[] SEPARATOR_BYTES = Encoding.UTF8.GetBytes(":"); public BigInteger PublicKey { get; } // A public string PublicKeyHex => Pad(PublicKey).ToHexString(); public BigInteger PrivateKey { get; } // a public byte[] Proof { get; private set; } // M public byte[] SessionKey { get; private set; } // K public SrpClientBase() { PrivateKey = GetSecret(); PublicKey = BigInteger.ModPow(g, PrivateKey, N); } public byte[] ClientProof(string user, string password, byte[] salt, BigInteger serverPublicKey) { var K = GetClientSessionKey(user, password, salt, serverPublicKey); var n1 = BigIntegerFromByteArray(ComputeSHA1Hash(BigIntegerToByteArray(N))); var n2 = BigIntegerFromByteArray(ComputeSHA1Hash(BigIntegerToByteArray(g))); n1 = BigInteger.ModPow(n1, n2, N); n2 = BigIntegerFromByteArray(ComputeSHA1Hash(Encoding.UTF8.GetBytes(user))); var M = ComputeHash(BigIntegerToByteArray(n1), BigIntegerToByteArray(n2), salt, BigIntegerToByteArray(PublicKey), BigIntegerToByteArray(serverPublicKey), K); SessionKey = K; Proof = M; return Proof; } public byte[] ClientProof(string user, string password, byte[] authData) { var saltLength = authData[0] + authData[1] * 256; var salt = new byte[saltLength]; Array.Copy(authData, 2, salt, 0, saltLength); var serverKeyStart = saltLength + 4; var serverKeyLength = authData.Length - saltLength - 4; var hexServerPublicKey = new byte[serverKeyLength]; Array.Copy(authData, serverKeyStart, hexServerPublicKey, 0, serverKeyLength); var hexServerPublicKeyString = Encoding.UTF8.GetString(hexServerPublicKey); var serverPublicKey = BigInteger.Parse($"00{hexServerPublicKeyString}", NumberStyles.HexNumber); return ClientProof(user, password, salt, serverPublicKey); } public (BigInteger, BigInteger) ServerSeed(string user, string password, byte[] salt) { var v = BigInteger.ModPow(g, GetUserHash(user, password, salt), N); var b = GetSecret(); var gb = BigInteger.ModPow(g, b, N); BigInteger.DivRem(k * v, N, out var kv); BigInteger.DivRem(BigInteger.Add(kv, gb), N, out var B); return (B, b); } public byte[] GetServerSessionKey(string user, string password, byte[] salt, BigInteger A, BigInteger B, BigInteger b) { var u = GetScramble(A, B); var v = BigInteger.ModPow(g, GetUserHash(user, password, salt), N); var vu = BigInteger.ModPow(v, u, N); BigInteger.DivRem(A * vu, N, out var Avu); var sessionSecret = BigInteger.ModPow(Avu, b, N); return ComputeSHA1Hash(BigIntegerToByteArray(sessionSecret)); } public byte[] GetSalt() { return GetRandomBytes(SRP_SALT_SIZE); } private BigInteger GetSecret() { return new BigInteger(GetRandomBytes(SRP_KEY_SIZE / 8).Concat(new byte[] { 0 }).ToArray()); } private byte[] GetClientSessionKey(string user, string password, byte[] salt, BigInteger serverPublicKey) { var u = GetScramble(PublicKey, serverPublicKey); var x = GetUserHash(user, password, salt); var gx = BigInteger.ModPow(g, x, N); BigInteger.DivRem(k * gx, N, out var kgx); var Bkgx = serverPublicKey - kgx; if (Bkgx < 0) { Bkgx = Bkgx + N; } BigInteger.DivRem(Bkgx, N, out var diff); BigInteger.DivRem(u * x, N, out var ux); BigInteger.DivRem(PrivateKey + ux, N, out var aux); var sessionSecret = BigInteger.ModPow(diff, aux, N); return ComputeSHA1Hash(BigIntegerToByteArray(sessionSecret)); } protected abstract byte[] ComputeHash(params byte[][] ba); private static BigInteger GetUserHash(string user, string password, byte[] salt) { var userBytes = Encoding.UTF8.GetBytes(user); var passwordBytes = Encoding.UTF8.GetBytes(password); var hash1 = ComputeSHA1Hash(userBytes, SEPARATOR_BYTES, passwordBytes); var hash2 = ComputeSHA1Hash(salt, hash1); return BigIntegerFromByteArray(hash2); } private static BigInteger BigIntegerFromByteArray(byte[] b) { return new BigInteger(b.AsEnumerable().Reverse().Concat(new byte[] { 0 }).ToArray()); } private static byte[] BigIntegerToByteArray(BigInteger n) { return n.ToByteArray().AsEnumerable().Reverse().SkipWhile((e, i) => i == 0 && e == 0).ToArray(); } private static byte[] ComputeSHA1Hash(params byte[][] ba) { using (var hash = SHA1.Create()) { return hash.ComputeHash(ba.SelectMany(x => x).ToArray()); } } private static byte[] Pad(BigInteger n) { var bn = BigIntegerToByteArray(n); return bn.SkipWhile((_, i) => i < bn.Length - SRP_KEY_SIZE).ToArray(); } private static BigInteger GetScramble(BigInteger x, BigInteger y) { return BigIntegerFromByteArray(ComputeSHA1Hash(Pad(x), Pad(y))); } private static byte[] GetRandomBytes(int count) { var result = new byte[count]; using (var random = RandomNumberGenerator.Create()) { random.GetBytes(result); } return result; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Sspi/SspiHelper.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Vladimir Bodecek, Nathan Fox, Jiri Cincura (jiri@cincura.net) // Adapted from pinvoke.net using System; using System.Runtime.InteropServices; namespace FirebirdSql.Data.Client.Managed.Sspi; internal sealed class SspiHelper : IDisposable { public string Name { get; } = "Win_Sspi"; private const int SECBUFFER_VERSION = 0; private enum SecBufferType { SECBUFFER_EMPTY = 0, SECBUFFER_DATA = 1, SECBUFFER_TOKEN = 2, } #region Structures used in native Win API calls [StructLayout(LayoutKind.Sequential)] public struct SecHandle { public IntPtr LowPart; public IntPtr HighPart; public SecHandle(int? dummy = null) { LowPart = IntPtr.Zero; HighPart = IntPtr.Zero; } public bool IsInvalid { get { return LowPart == IntPtr.Zero && HighPart == IntPtr.Zero; } } } [StructLayout(LayoutKind.Sequential)] public struct SecInteger { public uint LowPart; public int HighPart; public SecInteger(int? dummy = null) { LowPart = 0; HighPart = 0; } } [StructLayout(LayoutKind.Sequential)] private struct SecBuffer : IDisposable { private int cbBuffer; private int bufferType; private IntPtr pvBuffer; public SecBuffer(int bufferSize) { cbBuffer = bufferSize; bufferType = (int)SecBufferType.SECBUFFER_TOKEN; pvBuffer = Marshal.AllocHGlobal(bufferSize); } public SecBuffer(byte[] secBufferBytes) : this(secBufferBytes.Length) { Marshal.Copy(secBufferBytes, 0, pvBuffer, cbBuffer); } public SecBuffer(byte[] secBufferBytes, SecBufferType bufferType) : this(secBufferBytes) { this.bufferType = (int)bufferType; } public void Dispose() { if (pvBuffer != IntPtr.Zero) { Marshal.FreeHGlobal(pvBuffer); pvBuffer = IntPtr.Zero; } } public byte[] GetBytes() { byte[] buffer = null; if (cbBuffer > 0) { buffer = new byte[cbBuffer]; Marshal.Copy(pvBuffer, buffer, 0, cbBuffer); } return buffer; } } [StructLayout(LayoutKind.Sequential)] private struct SecBufferDesc : IDisposable { public int ulVersion; public int cBuffers; public IntPtr pBuffers; public SecBufferDesc(int bufferSize) { ulVersion = SECBUFFER_VERSION; cBuffers = 1; var secBuffer = new SecBuffer(bufferSize); pBuffers = Marshal.AllocHGlobal(Marshal.SizeOf(secBuffer)); Marshal.StructureToPtr(secBuffer, pBuffers, false); } public SecBufferDesc(byte[] secBufferBytes) { ulVersion = SECBUFFER_VERSION; cBuffers = 1; var secBuffer = new SecBuffer(secBufferBytes); pBuffers = Marshal.AllocHGlobal(Marshal.SizeOf(secBuffer)); Marshal.StructureToPtr(secBuffer, pBuffers, false); } public void Dispose() { if (pBuffers != IntPtr.Zero) { var secBuffer = Marshal.PtrToStructure(pBuffers); secBuffer.Dispose(); Marshal.FreeHGlobal(pBuffers); pBuffers = IntPtr.Zero; } } public byte[] GetSecBufferBytes() { if (pBuffers == IntPtr.Zero) throw new ObjectDisposedException(nameof(SecBufferDesc)); var secBuffer = Marshal.PtrToStructure(pBuffers); return secBuffer.GetBytes(); } } #endregion #region Constants used in native Win API calls const int TOKEN_QUERY = 0x00008; const int SEC_E_OK = 0; const int SEC_I_CONTINUE_NEEDED = 0x90312; const int SECPKG_CRED_INBOUND = 1; const int SECPKG_CRED_OUTBOUND = 2; const int SECURITY_NATIVE_DREP = 0x10; const int MAX_TOKEN_SIZE = 12288; const int ISC_REQ_DELEGATE = 0x00000001; const int ISC_REQ_MUTUAL_AUTH = 0x00000002; const int ISC_REQ_REPLAY_DETECT = 0x00000004; const int ISC_REQ_SEQUENCE_DETECT = 0x00000008; const int ISC_REQ_CONFIDENTIALITY = 0x00000010; const int ISC_REQ_USE_SESSION_KEY = 0x00000020; const int ISC_REQ_PROMPT_FOR_CREDS = 0x00000040; const int ISC_REQ_USE_SUPPLIED_CREDS = 0x00000080; const int ISC_REQ_ALLOCATE_MEMORY = 0x00000100; const int ISC_REQ_USE_DCE_STYLE = 0x00000200; const int ISC_REQ_DATAGRAM = 0x00000400; const int ISC_REQ_CONNECTION = 0x00000800; const int ISC_REQ_CALL_LEVEL = 0x00001000; const int ISC_REQ_FRAGMENT_SUPPLIED = 0x00002000; const int ISC_REQ_EXTENDED_ERROR = 0x00004000; const int ISC_REQ_STREAM = 0x00008000; const int ISC_REQ_INTEGRITY = 0x00010000; const int ISC_REQ_IDENTIFY = 0x00020000; const int ISC_REQ_NULL_SESSION = 0x00040000; const int ISC_REQ_MANUAL_CRED_VALIDATION = 0x00080000; const int ISC_REQ_RESERVED1 = 0x00100000; const int ISC_REQ_FRAGMENT_TO_FIT = 0x00200000; const int SECPKG_ATTR_SIZES = 0; const int STANDARD_CONTEXT_ATTRIBUTES = ISC_REQ_CONFIDENTIALITY | ISC_REQ_REPLAY_DETECT | ISC_REQ_SEQUENCE_DETECT | ISC_REQ_CONNECTION; #endregion #region Prototypes of native Win API functions [DllImport("secur32")] static extern int AcquireCredentialsHandle( string pszPrincipal, //SEC_CHAR* string pszPackage, //SEC_CHAR* //"Kerberos","NTLM","Negotiative" int fCredentialUse, IntPtr PAuthenticationID,//_LUID AuthenticationID,//pvLogonID, //PLUID IntPtr pAuthData,//PVOID int pGetKeyFn, //SEC_GET_KEY_FN IntPtr pvGetKeyArgument, //PVOID out SecHandle phCredential, //SecHandle //PCtxtHandle ref out SecInteger ptsExpiry //PTimeStamp //TimeStamp ref ); [DllImport("secur32", SetLastError = true)] static extern int InitializeSecurityContext( ref SecHandle phCredential,//PCredHandle IntPtr phContext, //PCtxtHandle string pszTargetName, int fContextReq, int Reserved1, int TargetDataRep, IntPtr pInput, //PSecBufferDesc SecBufferDesc int Reserved2, out SecHandle phNewContext, //PCtxtHandle ref SecBufferDesc pOutput, //PSecBufferDesc SecBufferDesc out uint pfContextAttr, //managed ulong == 64 bits!!! out SecInteger ptsExpiry //PTimeStamp ); // 2 signatures of this API function needed because different usage [DllImport("secur32", SetLastError = true)] static extern int InitializeSecurityContext( ref SecHandle phCredential,//PCredHandle ref SecHandle phContext, //PCtxtHandle string pszTargetName, int fContextReq, int Reserved1, int TargetDataRep, ref SecBufferDesc SecBufferDesc, //PSecBufferDesc SecBufferDesc int Reserved2, out SecHandle phNewContext, //PCtxtHandle ref SecBufferDesc pOutput, //PSecBufferDesc SecBufferDesc out uint pfContextAttr, //managed ulong == 64 bits!!! out SecInteger ptsExpiry //PTimeStamp ); [DllImport("secur32")] static extern int FreeCredentialsHandle(ref SecHandle phCredential); //PCredHandle [DllImport("secur32")] static extern int DeleteSecurityContext(ref SecHandle phContext); //PCtxtHandle #endregion #region Private members private SecHandle _clientCredentials; private SecHandle _clientContext; private bool _disposed; private string _securityPackage; private string _remotePrincipal; #endregion #region Constructors /// /// Creates SSPIHelper with default "NTLM" security package and no remote principal and gets client credentials /// public SspiHelper() : this("NTLM") { } /// /// Creates SSPIHelper with given security package and no remote principal and gets client credentials /// /// Name of security package (e.g. NTLM, Kerberos, ...) public SspiHelper(string securityPackage) : this(securityPackage, null) { } /// /// Creates SSPIHelper with given security package and remote principal and gets client credentials /// /// Name of security package (e.g. NTLM, Kerberos, ...) /// SPN of server (may be necessary for Kerberos public SspiHelper(string securityPackage, string remotePrincipal) { _securityPackage = securityPackage; _remotePrincipal = remotePrincipal; } #endregion #region Methods /// /// Creates client security context and returns "client token" /// /// Client authentication data to be sent to server public byte[] InitializeClientSecurity() { EnsureDisposed(); CloseClientContext(); InitializeClientCredentials(); _clientContext = new SecHandle(); var clientTokenBuf = new SecBufferDesc(MAX_TOKEN_SIZE); try { var resCode = InitializeSecurityContext( ref _clientCredentials, IntPtr.Zero, _remotePrincipal, STANDARD_CONTEXT_ATTRIBUTES, 0, SECURITY_NATIVE_DREP, IntPtr.Zero, 0, out _clientContext, ref clientTokenBuf, out var contextAttributes, out var expiry); if (resCode != SEC_E_OK && resCode != SEC_I_CONTINUE_NEEDED) throw new Exception($"{nameof(InitializeSecurityContext)} failed"); return clientTokenBuf.GetSecBufferBytes(); } finally { clientTokenBuf.Dispose(); } } /// /// Creates client authentication data based on already existing security context and /// authentication data sent by server /// This method must not be called before InitializeClientSecurity /// /// Authentication data received from server /// Client authentication data to be sent to server public byte[] GetClientSecurity(byte[] serverToken) { EnsureDisposed(); if (_clientContext.IsInvalid) throw new InvalidOperationException($"{nameof(InitializeClientSecurity)} not called"); var clientTokenBuf = new SecBufferDesc(MAX_TOKEN_SIZE); try { var serverTokenBuf = new SecBufferDesc(serverToken); try { var resCode = InitializeSecurityContext( ref _clientCredentials, ref _clientContext, _remotePrincipal, STANDARD_CONTEXT_ATTRIBUTES, 0, SECURITY_NATIVE_DREP, ref serverTokenBuf, 0, out _clientContext, ref clientTokenBuf, out var contextAttributes, out var expiry); if (resCode != SEC_E_OK && resCode != SEC_I_CONTINUE_NEEDED) throw new Exception($"{nameof(InitializeSecurityContext)} failed"); return clientTokenBuf.GetSecBufferBytes(); } finally { serverTokenBuf.Dispose(); } } finally { clientTokenBuf.Dispose(); } } #endregion #region Finalizer ~SspiHelper() { Dispose(false); } #endregion #region IDisposable Members public void Dispose() { Dispose(true); GC.SuppressFinalize(this); } #endregion #region Private methods private void Dispose(bool disposing) { if (!_disposed) { _disposed = true; CloseClientContext(); CloseClientCredentials(); } } private void InitializeClientCredentials() { _clientCredentials = new SecHandle(); var resCode = AcquireCredentialsHandle(null, _securityPackage, SECPKG_CRED_OUTBOUND, IntPtr.Zero, IntPtr.Zero, 0, IntPtr.Zero, out _clientCredentials, out var expiry); if (resCode != SEC_E_OK) throw new Exception($"{nameof(AcquireCredentialsHandle)} failed"); } private void CloseClientContext() { if (!_clientContext.IsInvalid) DeleteSecurityContext(ref _clientContext); } private void CloseClientCredentials() { if (!_clientCredentials.IsInvalid) FreeCredentialsHandle(ref _clientCredentials); } private void EnsureDisposed() { if (_disposed) throw new ObjectDisposedException(nameof(SspiHelper)); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version10/GdsArray.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Globalization; using System.IO; using System.Reflection; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version10; internal sealed class GdsArray : ArrayBase { const long ArrayHandle = 0; #region Fields private long _handle; private GdsDatabase _database; private GdsTransaction _transaction; #endregion #region Properties public override long Handle { get { return _handle; } set { _handle = value; } } public override DatabaseBase Database { get { return _database; } set { _database = (GdsDatabase)value; } } public override TransactionBase Transaction { get { return _transaction; } set { _transaction = (GdsTransaction)value; } } #endregion #region Constructors public GdsArray(ArrayDesc descriptor) : base(descriptor) { } public GdsArray(GdsDatabase database, GdsTransaction transaction, string tableName, string fieldName) : this(database, transaction, -1, tableName, fieldName) { } public GdsArray(GdsDatabase database, GdsTransaction transaction, long handle, string tableName, string fieldName) : base(tableName, fieldName) { _database = database; _transaction = transaction; _handle = handle; } #endregion #region Methods public override byte[] GetSlice(int sliceLength) { try { var sdl = GenerateSDL(Descriptor); _database.Xdr.Write(IscCodes.op_get_slice); _database.Xdr.Write(_transaction.Handle); _database.Xdr.Write(_handle); _database.Xdr.Write(sliceLength); _database.Xdr.WriteBuffer(sdl); _database.Xdr.Write(string.Empty); _database.Xdr.Write(0); _database.Xdr.Flush(); return ReceiveSliceResponse(Descriptor); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask GetSliceAsync(int sliceLength, CancellationToken cancellationToken = default) { try { var sdl = GenerateSDL(Descriptor); await _database.Xdr.WriteAsync(IscCodes.op_get_slice, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_transaction.Handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(sliceLength, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(sdl, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(string.Empty, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(0, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); return await ReceiveSliceResponseAsync(Descriptor, cancellationToken).ConfigureAwait(false); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void PutSlice(Array sourceArray, int sliceLength) { try { var sdl = GenerateSDL(Descriptor); var slice = EncodeSliceArray(sourceArray); _database.Xdr.Write(IscCodes.op_put_slice); _database.Xdr.Write(_transaction.Handle); _database.Xdr.Write(ArrayHandle); _database.Xdr.Write(sliceLength); _database.Xdr.WriteBuffer(sdl); _database.Xdr.Write(string.Empty); _database.Xdr.Write(sliceLength); _database.Xdr.WriteBytes(slice, slice.Length); _database.Xdr.Flush(); var response = (GenericResponse)_database.ReadResponse(); _handle = response.BlobId; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask PutSliceAsync(Array sourceArray, int sliceLength, CancellationToken cancellationToken = default) { try { var sdl = GenerateSDL(Descriptor); var slice = await EncodeSliceArrayAsync(sourceArray, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(IscCodes.op_put_slice, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_transaction.Handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(ArrayHandle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(sliceLength, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(sdl, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(string.Empty, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(sliceLength, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBytesAsync(slice, slice.Length, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); _handle = response.BlobId; } catch (IOException ex) { throw IscException.ForIOException(ex); } } #endregion #region Protected Methods protected override Array DecodeSlice(byte[] slice) { var dbType = DbDataType.Array; Array sliceData = null; Array tempData = null; var systemType = GetSystemType(); var lengths = new int[Descriptor.Dimensions]; var lowerBounds = new int[Descriptor.Dimensions]; var type = 0; var index = 0; for (var i = 0; i < Descriptor.Dimensions; i++) { lowerBounds[i] = Descriptor.Bounds[i].LowerBound; lengths[i] = Descriptor.Bounds[i].UpperBound; if (lowerBounds[i] == 0) { lengths[i]++; } } sliceData = Array.CreateInstance(systemType, lengths, lowerBounds); tempData = Array.CreateInstance(systemType, sliceData.Length); type = TypeHelper.GetSqlTypeFromBlrType(Descriptor.DataType); dbType = TypeHelper.GetDbDataTypeFromBlrType(Descriptor.DataType, 0, Descriptor.Scale); using (var ms = new MemoryStream(slice)) { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms), _database.Charset); while (ms.Position < ms.Length) { switch (dbType) { case DbDataType.Char: tempData.SetValue(xdr.ReadString(Descriptor.Length), index); break; case DbDataType.VarChar: tempData.SetValue(xdr.ReadString(), index); break; case DbDataType.SmallInt: tempData.SetValue(xdr.ReadInt16(), index); break; case DbDataType.Integer: tempData.SetValue(xdr.ReadInt32(), index); break; case DbDataType.BigInt: tempData.SetValue(xdr.ReadInt64(), index); break; case DbDataType.Numeric: case DbDataType.Decimal: tempData.SetValue(xdr.ReadDecimal(type, Descriptor.Scale), index); break; case DbDataType.Float: tempData.SetValue(xdr.ReadSingle(), index); break; case DbDataType.Double: tempData.SetValue(xdr.ReadDouble(), index); break; case DbDataType.Date: tempData.SetValue(xdr.ReadDate(), index); break; case DbDataType.Time: tempData.SetValue(xdr.ReadTime(), index); break; case DbDataType.TimeStamp: tempData.SetValue(xdr.ReadDateTime(), index); break; } index++; } if (systemType.GetTypeInfo().IsPrimitive) { // For primitive types we can use System.Buffer to copy generated data to destination array Buffer.BlockCopy(tempData, 0, sliceData, 0, Buffer.ByteLength(tempData)); } else { sliceData = tempData; } } return sliceData; } protected override async ValueTask DecodeSliceAsync(byte[] slice, CancellationToken cancellationToken = default) { var dbType = DbDataType.Array; Array sliceData = null; Array tempData = null; var systemType = GetSystemType(); var lengths = new int[Descriptor.Dimensions]; var lowerBounds = new int[Descriptor.Dimensions]; var type = 0; var index = 0; for (var i = 0; i < Descriptor.Dimensions; i++) { lowerBounds[i] = Descriptor.Bounds[i].LowerBound; lengths[i] = Descriptor.Bounds[i].UpperBound; if (lowerBounds[i] == 0) { lengths[i]++; } } sliceData = Array.CreateInstance(systemType, lengths, lowerBounds); tempData = Array.CreateInstance(systemType, sliceData.Length); type = TypeHelper.GetSqlTypeFromBlrType(Descriptor.DataType); dbType = TypeHelper.GetDbDataTypeFromBlrType(Descriptor.DataType, 0, Descriptor.Scale); using (var ms = new MemoryStream(slice)) { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms), _database.Charset); while (ms.Position < ms.Length) { switch (dbType) { case DbDataType.Char: tempData.SetValue(await xdr.ReadStringAsync(Descriptor.Length, cancellationToken).ConfigureAwait(false), index); break; case DbDataType.VarChar: tempData.SetValue(await xdr.ReadStringAsync(cancellationToken).ConfigureAwait(false), index); break; case DbDataType.SmallInt: tempData.SetValue(await xdr.ReadInt16Async(cancellationToken).ConfigureAwait(false), index); break; case DbDataType.Integer: tempData.SetValue(await xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false), index); break; case DbDataType.BigInt: tempData.SetValue(await xdr.ReadInt64Async(cancellationToken).ConfigureAwait(false), index); break; case DbDataType.Numeric: case DbDataType.Decimal: tempData.SetValue(await xdr.ReadDecimalAsync(type, Descriptor.Scale, cancellationToken).ConfigureAwait(false), index); break; case DbDataType.Float: tempData.SetValue(await xdr.ReadSingleAsync(cancellationToken).ConfigureAwait(false), index); break; case DbDataType.Double: tempData.SetValue(await xdr.ReadDoubleAsync(cancellationToken).ConfigureAwait(false), index); break; case DbDataType.Date: tempData.SetValue(await xdr.ReadDateAsync(cancellationToken).ConfigureAwait(false), index); break; case DbDataType.Time: tempData.SetValue(await xdr.ReadTimeAsync(cancellationToken).ConfigureAwait(false), index); break; case DbDataType.TimeStamp: tempData.SetValue(await xdr.ReadDateTimeAsync(cancellationToken).ConfigureAwait(false), index); break; } index++; } if (systemType.GetTypeInfo().IsPrimitive) { // For primitive types we can use System.Buffer to copy generated data to destination array Buffer.BlockCopy(tempData, 0, sliceData, 0, Buffer.ByteLength(tempData)); } else { sliceData = tempData; } } return sliceData; } #endregion #region Private Methods private byte[] ReceiveSliceResponse(ArrayDesc desc) { try { var operation = _database.ReadOperation(); if (operation == IscCodes.op_slice) { var isVariying = false; var elements = 0; var length = _database.Xdr.ReadInt32(); length = _database.Xdr.ReadInt32(); switch (desc.DataType) { case IscCodes.blr_text: case IscCodes.blr_text2: case IscCodes.blr_cstring: case IscCodes.blr_cstring2: elements = length / desc.Length; length += elements * ((4 - desc.Length) & 3); break; case IscCodes.blr_varying: case IscCodes.blr_varying2: elements = length / desc.Length; isVariying = true; break; case IscCodes.blr_short: length = length * desc.Length; break; } if (isVariying) { using (var ms = new MemoryStream()) { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms)); for (var i = 0; i < elements; i++) { var buffer = _database.Xdr.ReadOpaque(_database.Xdr.ReadInt32()); xdr.WriteBuffer(buffer, buffer.Length); } xdr.Flush(); return ms.ToArray(); } } else { return _database.Xdr.ReadOpaque(length); } } else { _database.ReadResponse(operation); return null; } } catch (IOException ex) { throw IscException.ForIOException(ex); } } private async ValueTask ReceiveSliceResponseAsync(ArrayDesc desc, CancellationToken cancellationToken = default) { try { var operation = await _database.ReadOperationAsync(cancellationToken).ConfigureAwait(false); if (operation == IscCodes.op_slice) { var isVariying = false; var elements = 0; var length = await _database.Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); length = await _database.Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); switch (desc.DataType) { case IscCodes.blr_text: case IscCodes.blr_text2: case IscCodes.blr_cstring: case IscCodes.blr_cstring2: elements = length / desc.Length; length += elements * ((4 - desc.Length) & 3); break; case IscCodes.blr_varying: case IscCodes.blr_varying2: elements = length / desc.Length; isVariying = true; break; case IscCodes.blr_short: length = length * desc.Length; break; } if (isVariying) { using (var ms = new MemoryStream()) { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms)); for (var i = 0; i < elements; i++) { var buffer = await _database.Xdr.ReadOpaqueAsync(await _database.Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); await xdr.WriteBufferAsync(buffer, buffer.Length, cancellationToken).ConfigureAwait(false); } await xdr.FlushAsync(cancellationToken).ConfigureAwait(false); return ms.ToArray(); } } else { return await _database.Xdr.ReadOpaqueAsync(length, cancellationToken).ConfigureAwait(false); } } else { await _database.ReadResponseAsync(operation, cancellationToken).ConfigureAwait(false); return null; } } catch (IOException ex) { throw IscException.ForIOException(ex); } } private byte[] EncodeSliceArray(Array sourceArray) { var dbType = DbDataType.Array; var charset = _database.Charset; var subType = (Descriptor.Scale < 0) ? 2 : 0; var type = 0; using (var ms = new MemoryStream()) { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms), _database.Charset); type = TypeHelper.GetSqlTypeFromBlrType(Descriptor.DataType); dbType = TypeHelper.GetDbDataTypeFromBlrType(Descriptor.DataType, subType, Descriptor.Scale); foreach (var source in sourceArray) { switch (dbType) { case DbDataType.Char: var buffer = charset.GetBytes(source.ToString()); xdr.WriteOpaque(buffer, Descriptor.Length); break; case DbDataType.VarChar: xdr.Write((string)source); break; case DbDataType.SmallInt: xdr.Write((short)source); break; case DbDataType.Integer: xdr.Write((int)source); break; case DbDataType.BigInt: xdr.Write((long)source); break; case DbDataType.Decimal: case DbDataType.Numeric: xdr.Write((decimal)source, type, Descriptor.Scale); break; case DbDataType.Float: xdr.Write((float)source); break; case DbDataType.Double: xdr.Write((double)source); break; case DbDataType.Date: xdr.WriteDate(Convert.ToDateTime(source, CultureInfo.CurrentCulture.DateTimeFormat)); break; case DbDataType.Time: xdr.WriteTime((TimeSpan)source); break; case DbDataType.TimeStamp: xdr.Write(Convert.ToDateTime(source, CultureInfo.CurrentCulture.DateTimeFormat)); break; default: throw TypeHelper.InvalidDataType((int)dbType); } } xdr.Flush(); return ms.ToArray(); } } private async ValueTask EncodeSliceArrayAsync(Array sourceArray, CancellationToken cancellationToken = default) { var dbType = DbDataType.Array; var charset = _database.Charset; var subType = (Descriptor.Scale < 0) ? 2 : 0; var type = 0; using (var ms = new MemoryStream()) { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms), _database.Charset); type = TypeHelper.GetSqlTypeFromBlrType(Descriptor.DataType); dbType = TypeHelper.GetDbDataTypeFromBlrType(Descriptor.DataType, subType, Descriptor.Scale); foreach (var source in sourceArray) { switch (dbType) { case DbDataType.Char: var buffer = charset.GetBytes(source.ToString()); await xdr.WriteOpaqueAsync(buffer, Descriptor.Length, cancellationToken).ConfigureAwait(false); break; case DbDataType.VarChar: await xdr.WriteAsync((string)source, cancellationToken).ConfigureAwait(false); break; case DbDataType.SmallInt: await xdr.WriteAsync((short)source, cancellationToken).ConfigureAwait(false); break; case DbDataType.Integer: await xdr.WriteAsync((int)source, cancellationToken).ConfigureAwait(false); break; case DbDataType.BigInt: await xdr.WriteAsync((long)source, cancellationToken).ConfigureAwait(false); break; case DbDataType.Decimal: case DbDataType.Numeric: await xdr.WriteAsync((decimal)source, type, Descriptor.Scale, cancellationToken).ConfigureAwait(false); break; case DbDataType.Float: await xdr.WriteAsync((float)source, cancellationToken).ConfigureAwait(false); break; case DbDataType.Double: await xdr.WriteAsync((double)source, cancellationToken).ConfigureAwait(false); break; case DbDataType.Date: await xdr.WriteDateAsync(Convert.ToDateTime(source, CultureInfo.CurrentCulture.DateTimeFormat), cancellationToken).ConfigureAwait(false); break; case DbDataType.Time: await xdr.WriteTimeAsync((TimeSpan)source, cancellationToken).ConfigureAwait(false); break; case DbDataType.TimeStamp: await xdr.WriteAsync(Convert.ToDateTime(source, CultureInfo.CurrentCulture.DateTimeFormat), cancellationToken).ConfigureAwait(false); break; default: throw TypeHelper.InvalidDataType((int)dbType); } } await xdr.FlushAsync(cancellationToken).ConfigureAwait(false); return ms.ToArray(); } } private byte[] GenerateSDL(ArrayDesc desc) { int n; int from; int to; int increment; int dimensions; ArrayBound tail; BinaryWriter sdl; dimensions = desc.Dimensions; if (dimensions > 16) { throw IscException.ForErrorCode(IscCodes.isc_invalid_dimension); } sdl = new BinaryWriter(new MemoryStream()); Stuff( sdl, 4, IscCodes.isc_sdl_version1, IscCodes.isc_sdl_struct, 1, desc.DataType); switch (desc.DataType) { case IscCodes.blr_short: case IscCodes.blr_long: case IscCodes.blr_int64: case IscCodes.blr_quad: StuffSdl(sdl, (byte)desc.Scale); break; case IscCodes.blr_text: case IscCodes.blr_cstring: case IscCodes.blr_varying: StuffWord(sdl, desc.Length); break; default: break; } StuffString(sdl, IscCodes.isc_sdl_relation, desc.RelationName); StuffString(sdl, IscCodes.isc_sdl_field, desc.FieldName); if ((desc.Flags & IscCodes.ARRAY_DESC_COLUMN_MAJOR) == IscCodes.ARRAY_DESC_COLUMN_MAJOR) { from = dimensions - 1; to = -1; increment = -1; } else { from = 0; to = dimensions; increment = 1; } for (n = from; n != to; n += increment) { tail = desc.Bounds[n]; if (tail.LowerBound == 1) { Stuff(sdl, 2, IscCodes.isc_sdl_do1, n); } else { Stuff(sdl, 2, IscCodes.isc_sdl_do2, n); StuffLiteral(sdl, tail.LowerBound); } StuffLiteral(sdl, tail.UpperBound); } Stuff(sdl, 5, IscCodes.isc_sdl_element, 1, IscCodes.isc_sdl_scalar, 0, dimensions); for (n = 0; n < dimensions; n++) { Stuff(sdl, 2, IscCodes.isc_sdl_variable, n); } StuffSdl(sdl, IscCodes.isc_sdl_eoc); return ((MemoryStream)sdl.BaseStream).ToArray(); } private void Stuff(BinaryWriter sdl, short count, params object[] args) { for (var i = 0; i < count; i++) { sdl.Write(Convert.ToByte(args[i], CultureInfo.InvariantCulture)); } } private void Stuff(BinaryWriter sdl, byte[] args) { sdl.Write(args); } private void StuffSdl(BinaryWriter sdl, byte sdl_byte) { Stuff(sdl, 1, sdl_byte); } private void StuffWord(BinaryWriter sdl, short word) { Stuff(sdl, BitConverter.GetBytes(word)); } private void StuffLong(BinaryWriter sdl, int word) { Stuff(sdl, BitConverter.GetBytes(word)); } private void StuffLiteral(BinaryWriter sdl, int literal) { if (literal >= -128 && literal <= 127) { Stuff(sdl, 2, IscCodes.isc_sdl_tiny_integer, literal); return; } if (literal >= -32768 && literal <= 32767) { StuffSdl(sdl, IscCodes.isc_sdl_short_integer); StuffWord(sdl, (short)literal); return; } StuffSdl(sdl, IscCodes.isc_sdl_long_integer); StuffLong(sdl, literal); } private void StuffString(BinaryWriter sdl, int constant, string value) { StuffSdl(sdl, (byte)constant); StuffSdl(sdl, (byte)value.Length); for (var i = 0; i < value.Length; i++) { StuffSdl(sdl, (byte)value[i]); } } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version10/GdsBlob.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version10; internal sealed class GdsBlob : BlobBase { const int DataSegment = 0; const int SeekMode = 0; #region Fields private readonly GdsDatabase _database; private int _blobHandle; #endregion #region Properties public override DatabaseBase Database { get { return _database; } } public override int Handle { get { return _blobHandle; } } #endregion #region Constructors public GdsBlob(GdsDatabase database, GdsTransaction transaction) : this(database, transaction, 0) { } public GdsBlob(GdsDatabase database, GdsTransaction transaction, long blobId) : base(database) { _database = database; _transaction = transaction; _position = 0; _blobHandle = 0; _blobId = blobId; } #endregion #region Protected Methods public override void Create() { try { CreateOrOpen(IscCodes.op_create_blob, null); RblAddValue(IscCodes.RBL_create); } catch (IscException) { throw; } } public override async ValueTask CreateAsync(CancellationToken cancellationToken = default) { try { await CreateOrOpenAsync(IscCodes.op_create_blob, null, cancellationToken).ConfigureAwait(false); RblAddValue(IscCodes.RBL_create); } catch (IscException) { throw; } } public override void Open() { try { CreateOrOpen(IscCodes.op_open_blob, null); } catch (IscException) { throw; } } public override async ValueTask OpenAsync(CancellationToken cancellationToken = default) { try { await CreateOrOpenAsync(IscCodes.op_open_blob, null, cancellationToken).ConfigureAwait(false); } catch (IscException) { throw; } } public override int GetLength() { try { if (!IsOpen) Open(); var bufferLength = 20; var buffer = new byte[bufferLength]; _database.Xdr.Write(IscCodes.op_info_blob); _database.Xdr.Write(_blobHandle); _database.Xdr.Write(0); _database.Xdr.WriteBuffer(new byte[] { IscCodes.isc_info_blob_total_length }, 1); _database.Xdr.Write(bufferLength); _database.Xdr.Flush(); var response = (GenericResponse)_database.ReadResponse(); var responseLength = bufferLength; if (response.Data.Length < bufferLength) { responseLength = response.Data.Length; } Buffer.BlockCopy(response.Data, 0, buffer, 0, responseLength); var length = IscHelper.VaxInteger(buffer, 1, 2); var size = IscHelper.VaxInteger(buffer, 3, (int)length); return (int)size; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask GetLengthAsync(CancellationToken cancellationToken = default) { try { if (!IsOpen) await OpenAsync(cancellationToken).ConfigureAwait(false); var bufferLength = 20; var buffer = new byte[bufferLength]; await _database.Xdr.WriteAsync(IscCodes.op_info_blob, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_blobHandle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(0, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(new byte[] { IscCodes.isc_info_blob_total_length }, 1, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(bufferLength, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); var responseLength = bufferLength; if (response.Data.Length < bufferLength) { responseLength = response.Data.Length; } Buffer.BlockCopy(response.Data, 0, buffer, 0, responseLength); var length = IscHelper.VaxInteger(buffer, 1, 2); var size = IscHelper.VaxInteger(buffer, 3, (int)length); return (int)size; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void GetSegment(Stream stream) { var requested = SegmentSize; try { _database.Xdr.Write(IscCodes.op_get_segment); _database.Xdr.Write(_blobHandle); _database.Xdr.Write(requested < short.MaxValue - 12 ? requested : short.MaxValue - 12); _database.Xdr.Write(DataSegment); _database.Xdr.Flush(); var response = (GenericResponse)_database.ReadResponse(); RblRemoveValue(IscCodes.RBL_segment); if (response.ObjectHandle == 1) { RblAddValue(IscCodes.RBL_segment); } else if (response.ObjectHandle == 2) { RblAddValue(IscCodes.RBL_eof_pending); } var buffer = response.Data; if (buffer.Length == 0) { // previous segment was last, this has no data return; } var len = 0; var srcpos = 0; while (srcpos < buffer.Length) { len = (int)IscHelper.VaxInteger(buffer, srcpos, 2); srcpos += 2; stream.Write(buffer, srcpos, len); srcpos += len; } } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask GetSegmentAsync(Stream stream, CancellationToken cancellationToken = default) { var requested = SegmentSize; try { await _database.Xdr.WriteAsync(IscCodes.op_get_segment, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_blobHandle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(requested < short.MaxValue - 12 ? requested : short.MaxValue - 12, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(DataSegment, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); RblRemoveValue(IscCodes.RBL_segment); if (response.ObjectHandle == 1) { RblAddValue(IscCodes.RBL_segment); } else if (response.ObjectHandle == 2) { RblAddValue(IscCodes.RBL_eof_pending); } var buffer = response.Data; if (buffer.Length == 0) { //previous segment was last, this has no data return; } var len = 0; var srcpos = 0; while (srcpos < buffer.Length) { len = (int)IscHelper.VaxInteger(buffer, srcpos, 2); srcpos += 2; stream.Write(buffer, srcpos, len); srcpos += len; } } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override byte[] GetSegment() { var requested = SegmentSize; try { _database.Xdr.Write(IscCodes.op_get_segment); _database.Xdr.Write(_blobHandle); _database.Xdr.Write(requested < short.MaxValue - 12 ? requested : short.MaxValue - 12); _database.Xdr.Write(DataSegment); _database.Xdr.Flush(); var response = (GenericResponse)_database.ReadResponse(); RblRemoveValue(IscCodes.RBL_segment); if (response.ObjectHandle == 1) { RblAddValue(IscCodes.RBL_segment); } else if (response.ObjectHandle == 2) { RblAddValue(IscCodes.RBL_eof_pending); } var buffer = response.Data; if (buffer.Length == 0) { //previous segment was last, this has no data return Array.Empty(); } var posInInput = 0; var posInOutput = 0; var tmp = new byte[requested * 2]; while (posInInput < buffer.Length) { var len = (int)IscHelper.VaxInteger(buffer, posInInput, 2); posInInput += 2; Array.Copy(buffer, posInInput, tmp, posInOutput, len); posInOutput += len; posInInput += len; } var actualBuffer = new byte[posInOutput]; Array.Copy(tmp, actualBuffer, posInOutput); return actualBuffer; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask GetSegmentAsync(CancellationToken cancellationToken = default) { var requested = SegmentSize; try { await _database.Xdr.WriteAsync(IscCodes.op_get_segment, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_blobHandle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(requested < short.MaxValue - 12 ? requested : short.MaxValue - 12, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(DataSegment, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); RblRemoveValue(IscCodes.RBL_segment); if (response.ObjectHandle == 1) { RblAddValue(IscCodes.RBL_segment); } else if (response.ObjectHandle == 2) { RblAddValue(IscCodes.RBL_eof_pending); } var buffer = response.Data; if (buffer.Length == 0) { // previous segment was last, this has no data return Array.Empty(); } var posInInput = 0; var posInOutput = 0; var tmp = new byte[requested * 2]; while (posInInput < buffer.Length) { var len = (int)IscHelper.VaxInteger(buffer, posInInput, 2); posInInput += 2; Array.Copy(buffer, posInInput, tmp, posInOutput, len); posInOutput += len; posInInput += len; } var actualBuffer = new byte[posInOutput]; Array.Copy(tmp, actualBuffer, posInOutput); return actualBuffer; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void PutSegment(byte[] buffer) { try { _database.Xdr.Write(IscCodes.op_batch_segments); _database.Xdr.Write(_blobHandle); _database.Xdr.WriteBlobBuffer(buffer); _database.Xdr.Flush(); _database.ReadResponse(); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask PutSegmentAsync(byte[] buffer, CancellationToken cancellationToken = default) { try { await _database.Xdr.WriteAsync(IscCodes.op_batch_segments, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_blobHandle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBlobBufferAsync(buffer, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void Seek(int offset, int seekMode) { try { _database.Xdr.Write(IscCodes.op_seek_blob); _database.Xdr.Write(_blobHandle); _database.Xdr.Write(seekMode); _database.Xdr.Write(offset); _database.Xdr.Flush(); var response = (GenericResponse)_database.ReadResponse(); _position = response.ObjectHandle; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask SeekAsync(int offset, int seekMode, CancellationToken cancellationToken = default) { try { await _database.Xdr.WriteAsync(IscCodes.op_seek_blob, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_blobHandle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(seekMode, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(offset, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); _position = response.ObjectHandle; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void Close() { _database.ReleaseObject(IscCodes.op_close_blob, _blobHandle); } public override ValueTask CloseAsync(CancellationToken cancellationToken = default) { return _database.ReleaseObjectAsync(IscCodes.op_close_blob, _blobHandle, cancellationToken); } public override void Cancel() { _database.ReleaseObject(IscCodes.op_cancel_blob, _blobHandle); } public override ValueTask CancelAsync(CancellationToken cancellationToken = default) { return _database.ReleaseObjectAsync(IscCodes.op_cancel_blob, _blobHandle, cancellationToken); } #endregion #region Private API Methods private void CreateOrOpen(int op, BlobParameterBuffer bpb) { try { _database.Xdr.Write(op); if (bpb != null) { _database.Xdr.WriteTyped(IscCodes.isc_bpb_version1, bpb.ToArray()); } _database.Xdr.Write(_transaction.Handle); _database.Xdr.Write(_blobId); _database.Xdr.Flush(); var response = (GenericResponse)_database.ReadResponse(); _blobId = response.BlobId; _blobHandle = response.ObjectHandle; _isOpen = true; } catch (IOException ex) { throw IscException.ForIOException(ex); } } private async ValueTask CreateOrOpenAsync(int op, BlobParameterBuffer bpb, CancellationToken cancellationToken = default) { try { await _database.Xdr.WriteAsync(op, cancellationToken).ConfigureAwait(false); if (bpb != null) { await _database.Xdr.WriteTypedAsync(IscCodes.isc_bpb_version1, bpb.ToArray(), cancellationToken).ConfigureAwait(false); } await _database.Xdr.WriteAsync(_transaction.Handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_blobId, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); _blobId = response.BlobId; _blobHandle = response.ObjectHandle; _isOpen = true; } catch (IOException ex) { throw IscException.ForIOException(ex); } } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version10/GdsDatabase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.IO; using System.Net; using System.Text; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version10; internal class GdsDatabase : DatabaseBase { protected const int PartnerIdentification = 0; protected const int AddressOfAstRoutine = 0; protected const int ArgumentToAstRoutine = 0; protected internal const int DatabaseObjectId = 0; protected internal const int Incarnation = 0; #region Fields protected GdsConnection _connection; protected GdsEventManager _eventManager; protected int _handle; #endregion #region Properties public override bool UseUtf8ParameterBuffer => false; public override int Handle { get { return _handle; } } public override bool HasRemoteEventSupport { get { return true; } } public override bool ConnectionBroken { get { return _connection.ConnectionBroken; } } public XdrReaderWriter Xdr { get { return _connection.Xdr; } } public AuthBlock AuthBlock { get { return _connection.AuthBlock; } } #endregion #region Constructors public GdsDatabase(GdsConnection connection) : base(connection.Charset, connection.PacketSize, connection.Dialect) { _connection = connection; _handle = -1; } #endregion #region Attach/Detach Methods public override void Attach(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { try { SendAttachToBuffer(dpb, database); Xdr.Flush(); ProcessAttachResponse((GenericResponse)ReadResponse()); } catch (IscException) { SafelyDetach(); throw; } catch (IOException ex) { SafelyDetach(); throw IscException.ForIOException(ex); } AfterAttachActions(); } public override async ValueTask AttachAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { try { await SendAttachToBufferAsync(dpb, database, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await ProcessAttachResponseAsync((GenericResponse)await ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } catch (IscException) { await SafelyDetachAsync(cancellationToken).ConfigureAwait(false); throw; } catch (IOException ex) { await SafelyDetachAsync(cancellationToken).ConfigureAwait(false); throw IscException.ForIOException(ex); } await AfterAttachActionsAsync(cancellationToken).ConfigureAwait(false); } protected virtual void SendAttachToBuffer(DatabaseParameterBufferBase dpb, string database) { Xdr.Write(IscCodes.op_attach); Xdr.Write(DatabaseObjectId); if (!string.IsNullOrEmpty(AuthBlock.Password)) { dpb.Append(IscCodes.isc_dpb_password, AuthBlock.Password); } Xdr.WriteBuffer(dpb.Encoding.GetBytes(database)); Xdr.WriteBuffer(dpb.ToArray()); } protected virtual async ValueTask SendAttachToBufferAsync(DatabaseParameterBufferBase dpb, string database, CancellationToken cancellationToken = default) { await Xdr.WriteAsync(IscCodes.op_attach, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(DatabaseObjectId, cancellationToken).ConfigureAwait(false); if (!string.IsNullOrEmpty(AuthBlock.Password)) { dpb.Append(IscCodes.isc_dpb_password, AuthBlock.Password); } await Xdr.WriteBufferAsync(dpb.Encoding.GetBytes(database), cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(dpb.ToArray(), cancellationToken).ConfigureAwait(false); } protected virtual void ProcessAttachResponse(GenericResponse response) { _handle = response.ObjectHandle; } protected virtual ValueTask ProcessAttachResponseAsync(GenericResponse response, CancellationToken cancellationToken = default) { _handle = response.ObjectHandle; return ValueTask.CompletedTask; } protected void AfterAttachActions() { ServerVersion = GetServerVersion(); } protected async ValueTask AfterAttachActionsAsync(CancellationToken cancellationToken = default) { ServerVersion = await GetServerVersionAsync(cancellationToken).ConfigureAwait(false); } public override void AttachWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { throw new NotSupportedException("Trusted Auth isn't supported on < FB2.1."); } public override ValueTask AttachWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { throw new NotSupportedException("Trusted Auth isn't supported on < FB2.1."); } public override void Detach() { if (TransactionCount > 0) { throw IscException.ForErrorCodeIntParam(IscCodes.isc_open_trans, TransactionCount); } try { CloseEventManager(); var detach = _handle != -1; if (detach) { Xdr.Write(IscCodes.op_detach); Xdr.Write(_handle); } Xdr.Write(IscCodes.op_disconnect); Xdr.Flush(); if (detach) { ReadResponse(); } CloseConnection(); } catch (IOException ex) { try { CloseConnection(); } catch (IOException) { } throw IscException.ForIOException(ex); } finally { _connection = null; _eventManager = null; ServerVersion = null; _handle = -1; WarningMessage = null; TransactionCount = 0; } } public override async ValueTask DetachAsync(CancellationToken cancellationToken = default) { if (TransactionCount > 0) { throw IscException.ForErrorCodeIntParam(IscCodes.isc_open_trans, TransactionCount); } try { await CloseEventManagerAsync(cancellationToken).ConfigureAwait(false); var detach = _handle != -1; if (detach) { await Xdr.WriteAsync(IscCodes.op_detach, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); } await Xdr.WriteAsync(IscCodes.op_disconnect, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); if (detach) { await ReadResponseAsync(cancellationToken).ConfigureAwait(false); } await CloseConnectionAsync(cancellationToken).ConfigureAwait(false); } catch (IOException ex) { try { await CloseConnectionAsync(cancellationToken).ConfigureAwait(false); } catch (IOException) { } throw IscException.ForIOException(ex); } finally { _connection = null; _eventManager = null; ServerVersion = null; _handle = -1; WarningMessage = null; TransactionCount = 0; } } protected internal void SafelyDetach() { try { Detach(); } catch { } } protected internal async ValueTask SafelyDetachAsync(CancellationToken cancellationToken = default) { try { await DetachAsync(cancellationToken).ConfigureAwait(false); } catch { } } #endregion #region Database Methods public override void CreateDatabase(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { try { SendCreateToBuffer(dpb, database); Xdr.Flush(); ProcessCreateResponse((GenericResponse)ReadResponse()); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask CreateDatabaseAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { try { await SendCreateToBufferAsync(dpb, database, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await ProcessCreateResponseAsync((GenericResponse)await ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } catch (IOException ex) { throw IscException.ForIOException(ex); } } protected virtual void SendCreateToBuffer(DatabaseParameterBufferBase dpb, string database) { Xdr.Write(IscCodes.op_create); Xdr.Write(DatabaseObjectId); if (!string.IsNullOrEmpty(AuthBlock.Password)) { dpb.Append(IscCodes.isc_dpb_password, AuthBlock.Password); } Xdr.WriteBuffer(dpb.Encoding.GetBytes(database)); Xdr.WriteBuffer(dpb.ToArray()); } protected virtual async ValueTask SendCreateToBufferAsync(DatabaseParameterBufferBase dpb, string database, CancellationToken cancellationToken = default) { await Xdr.WriteAsync(IscCodes.op_create, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(DatabaseObjectId, cancellationToken).ConfigureAwait(false); if (!string.IsNullOrEmpty(AuthBlock.Password)) { dpb.Append(IscCodes.isc_dpb_password, AuthBlock.Password); } await Xdr.WriteBufferAsync(dpb.Encoding.GetBytes(database), cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(dpb.ToArray(), cancellationToken).ConfigureAwait(false); } protected void ProcessCreateResponse(GenericResponse response) { _handle = response.ObjectHandle; } protected ValueTask ProcessCreateResponseAsync(GenericResponse response, CancellationToken cancellationToken = default) { _handle = response.ObjectHandle; return ValueTask.CompletedTask; } public override void CreateDatabaseWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { throw new NotSupportedException("Trusted Auth isn't supported on < FB2.1."); } public override ValueTask CreateDatabaseWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { throw new NotSupportedException("Trusted Auth isn't supported on < FB2.1."); } public override void DropDatabase() { try { Xdr.Write(IscCodes.op_drop_database); Xdr.Write(_handle); Xdr.Flush(); ReadResponse(); _handle = -1; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask DropDatabaseAsync(CancellationToken cancellationToken = default) { try { await Xdr.WriteAsync(IscCodes.op_drop_database, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await ReadResponseAsync(cancellationToken).ConfigureAwait(false); _handle = -1; } catch (IOException ex) { throw IscException.ForIOException(ex); } } #endregion #region Auxiliary Connection Methods public virtual (int auxHandle, string ipAddress, int portNumber, int timeout) ConnectionRequest() { try { Xdr.Write(IscCodes.op_connect_request); Xdr.Write(IscCodes.P_REQ_async); Xdr.Write(_handle); Xdr.Write(PartnerIdentification); Xdr.Flush(); ReadOperation(); var auxHandle = Xdr.ReadInt32(); var garbage1 = new byte[8]; Xdr.ReadBytes(garbage1, 8); var respLen = Xdr.ReadInt32(); respLen += respLen % 4; var sin_family = new byte[2]; Xdr.ReadBytes(sin_family, 2); respLen -= 2; var sin_port = new byte[2]; Xdr.ReadBytes(sin_port, 2); var portNumber = (ushort)IPAddress.NetworkToHostOrder(BitConverter.ToInt16(sin_port, 0)); respLen -= 2; // * The address returned by the server may be incorrect if it is behind a NAT box // * so we must use the address that was used to connect the main socket, not the // * address reported by the server. var sin_addr = new byte[4]; Xdr.ReadBytes(sin_addr, 4); var ipAddress = _connection.IPAddress.ToString(); respLen -= 4; var garbage2 = new byte[respLen]; Xdr.ReadBytes(garbage2, respLen); Xdr.ReadStatusVector(); return (auxHandle, ipAddress, portNumber, _connection.Timeout); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public virtual async ValueTask<(int auxHandle, string ipAddress, int portNumber, int timeout)> ConnectionRequestAsync(CancellationToken cancellationToken = default) { try { await Xdr.WriteAsync(IscCodes.op_connect_request, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(IscCodes.P_REQ_async, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(PartnerIdentification, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await ReadOperationAsync(cancellationToken).ConfigureAwait(false); var auxHandle = await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); var garbage1 = new byte[8]; await Xdr.ReadBytesAsync(garbage1, 8, cancellationToken).ConfigureAwait(false); var respLen = await Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); respLen += respLen % 4; var sin_family = new byte[2]; await Xdr.ReadBytesAsync(sin_family, 2, cancellationToken).ConfigureAwait(false); respLen -= 2; var sin_port = new byte[2]; await Xdr.ReadBytesAsync(sin_port, 2, cancellationToken).ConfigureAwait(false); var portNumber = (ushort)IPAddress.NetworkToHostOrder(BitConverter.ToInt16(sin_port, 0)); respLen -= 2; // * The address returned by the server may be incorrect if it is behind a NAT box // * so we must use the address that was used to connect the main socket, not the // * address reported by the server. var sin_addr = new byte[4]; await Xdr.ReadBytesAsync(sin_addr, 4, cancellationToken).ConfigureAwait(false); var ipAddress = _connection.IPAddress.ToString(); respLen -= 4; var garbage2 = new byte[respLen]; await Xdr.ReadBytesAsync(garbage2, respLen, cancellationToken).ConfigureAwait(false); await Xdr.ReadStatusVectorAsync(cancellationToken).ConfigureAwait(false); return (auxHandle, ipAddress, portNumber, _connection.Timeout); } catch (IOException ex) { throw IscException.ForIOException(ex); } } #endregion #region Connection Methods public void CloseConnection() { _connection.Disconnect(); } public ValueTask CloseConnectionAsync(CancellationToken cancellationToken = default) { return _connection.DisconnectAsync(cancellationToken); } #endregion #region Remote Events Methods public override void CloseEventManager() { if (_eventManager != null) { _eventManager.Close(); _eventManager = null; } } public override async ValueTask CloseEventManagerAsync(CancellationToken cancellationToken = default) { if (_eventManager != null) { await _eventManager.CloseAsync(cancellationToken).ConfigureAwait(false); _eventManager = null; } } public override void QueueEvents(RemoteEvent remoteEvent) { try { if (_eventManager == null) { var (auxHandle, ipAddress, portNumber, timeout) = ConnectionRequest(); _eventManager = new GdsEventManager(auxHandle, ipAddress, portNumber, timeout); _eventManager.Open(); var dummy = _eventManager.StartWaitingForEvents(remoteEvent); } remoteEvent.LocalId++; var epb = remoteEvent.BuildEpb(); var epbData = epb.ToArray(); Xdr.Write(IscCodes.op_que_events); Xdr.Write(_handle); Xdr.WriteBuffer(epbData); Xdr.Write(AddressOfAstRoutine); Xdr.Write(ArgumentToAstRoutine); Xdr.Write(remoteEvent.LocalId); Xdr.Flush(); var response = (GenericResponse)ReadResponse(); remoteEvent.RemoteId = response.ObjectHandle; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask QueueEventsAsync(RemoteEvent remoteEvent, CancellationToken cancellationToken = default) { try { if (_eventManager == null) { var (auxHandle, ipAddress, portNumber, timeout) = await ConnectionRequestAsync(cancellationToken).ConfigureAwait(false); _eventManager = new GdsEventManager(auxHandle, ipAddress, portNumber, timeout); await _eventManager.OpenAsync(cancellationToken).ConfigureAwait(false); var dummy = _eventManager.StartWaitingForEvents(remoteEvent); } remoteEvent.LocalId++; var epb = remoteEvent.BuildEpb(); var epbData = epb.ToArray(); await Xdr.WriteAsync(IscCodes.op_que_events, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(epbData, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(AddressOfAstRoutine, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(ArgumentToAstRoutine, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(remoteEvent.LocalId, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await ReadResponseAsync(cancellationToken).ConfigureAwait(false); remoteEvent.RemoteId = response.ObjectHandle; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void CancelEvents(RemoteEvent events) { try { Xdr.Write(IscCodes.op_cancel_events); Xdr.Write(_handle); Xdr.Write(events.LocalId); Xdr.Flush(); ReadResponse(); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask CancelEventsAsync(RemoteEvent events, CancellationToken cancellationToken = default) { try { await Xdr.WriteAsync(IscCodes.op_cancel_events, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(events.LocalId, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await ReadResponseAsync(cancellationToken).ConfigureAwait(false); } catch (IOException ex) { throw IscException.ForIOException(ex); } } #endregion #region Transaction Methods public override TransactionBase BeginTransaction(TransactionParameterBuffer tpb) { var transaction = new GdsTransaction(this); transaction.BeginTransaction(tpb); return transaction; } public override async ValueTask BeginTransactionAsync(TransactionParameterBuffer tpb, CancellationToken cancellationToken = default) { var transaction = new GdsTransaction(this); await transaction.BeginTransactionAsync(tpb, cancellationToken).ConfigureAwait(false); return transaction; } #endregion #region Cancel Methods public override void CancelOperation(short kind) { throw new NotSupportedException("Cancel Operation isn't supported on < FB2.5."); } public override ValueTask CancelOperationAsync(short kind, CancellationToken cancellationToken = default) { throw new NotSupportedException("Cancel Operation isn't supported on < FB2.5."); } #endregion #region Statement Creation Methods public override StatementBase CreateStatement() { return new GdsStatement(this); } public override StatementBase CreateStatement(TransactionBase transaction) { return new GdsStatement(this, (GdsTransaction)transaction); } #endregion #region Parameter Buffers public override DatabaseParameterBufferBase CreateDatabaseParameterBuffer() { return new DatabaseParameterBuffer1(ParameterBufferEncoding); } public override EventParameterBuffer CreateEventParameterBuffer() { return new EventParameterBuffer(Charset.Encoding); } public override TransactionParameterBuffer CreateTransactionParameterBuffer() { return new TransactionParameterBuffer(Charset.Encoding); } #endregion #region Database Information Methods public override List GetDatabaseInfo(byte[] items) { return GetDatabaseInfo(items, IscCodes.DEFAULT_MAX_BUFFER_SIZE); } public override ValueTask> GetDatabaseInfoAsync(byte[] items, CancellationToken cancellationToken = default) { return GetDatabaseInfoAsync(items, IscCodes.DEFAULT_MAX_BUFFER_SIZE, cancellationToken); } public override List GetDatabaseInfo(byte[] items, int bufferLength) { var buffer = new byte[bufferLength]; DatabaseInfo(items, buffer, buffer.Length); return IscHelper.ParseDatabaseInfo(buffer, Charset); } public override async ValueTask> GetDatabaseInfoAsync(byte[] items, int bufferLength, CancellationToken cancellationToken = default) { var buffer = new byte[bufferLength]; await DatabaseInfoAsync(items, buffer, buffer.Length, cancellationToken).ConfigureAwait(false); return IscHelper.ParseDatabaseInfo(buffer, Charset); } #endregion #region Release Object public virtual void ReleaseObject(int op, int id) { try { SendReleaseObjectToBuffer(op, id); Xdr.Flush(); ProcessReleaseObjectResponse(ReadResponse()); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public virtual async ValueTask ReleaseObjectAsync(int op, int id, CancellationToken cancellationToken = default) { try { await SendReleaseObjectToBufferAsync(op, id, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await ProcessReleaseObjectResponseAsync(await ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } catch (IOException ex) { throw IscException.ForIOException(ex); } } protected virtual void SendReleaseObjectToBuffer(int op, int id) { Xdr.Write(op); Xdr.Write(id); } protected virtual async ValueTask SendReleaseObjectToBufferAsync(int op, int id, CancellationToken cancellationToken = default) { await Xdr.WriteAsync(op, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(id, cancellationToken).ConfigureAwait(false); } protected virtual void ProcessReleaseObjectResponse(IResponse response) { } protected virtual ValueTask ProcessReleaseObjectResponseAsync(IResponse response, CancellationToken cancellationToken = default) { return ValueTask.CompletedTask; } #endregion #region Response Methods public virtual int ReadOperation() { return Xdr.ReadOperation(); } public virtual ValueTask ReadOperationAsync(CancellationToken cancellationToken = default) { return Xdr.ReadOperationAsync(cancellationToken); } public virtual IResponse ReadResponse() { var response = ReadSingleResponse(); response.HandleResponseException(); return response; } public virtual async ValueTask ReadResponseAsync(CancellationToken cancellationToken = default) { var response = await ReadSingleResponseAsync(cancellationToken).ConfigureAwait(false); response.HandleResponseException(); return response; } public virtual IResponse ReadResponse(int operation) { var response = ReadSingleResponse(operation); response.HandleResponseException(); return response; } public virtual async ValueTask ReadResponseAsync(int operation, CancellationToken cancellationToken = default) { var response = await ReadSingleResponseAsync(operation, cancellationToken).ConfigureAwait(false); response.HandleResponseException(); return response; } public void SafeFinishFetching(int numberOfResponses) { while (numberOfResponses > 0) { numberOfResponses--; try { ReadResponse(); } catch (IscException) { } } } public async ValueTask SafeFinishFetchingAsync(int numberOfResponses, CancellationToken cancellationToken = default) { while (numberOfResponses > 0) { numberOfResponses--; try { await ReadResponseAsync(cancellationToken).ConfigureAwait(false); } catch (IscException) { } } } #endregion #region Protected Methods protected IResponse ReadSingleResponse() { return ReadSingleResponse(ReadOperation()); } protected async ValueTask ReadSingleResponseAsync(CancellationToken cancellationToken = default) { return await ReadSingleResponseAsync(await ReadOperationAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } protected virtual IResponse ReadSingleResponse(int operation) { var response = _connection.ProcessOperation(operation); response.HandleResponseWarning(WarningMessage); return response; } protected virtual async ValueTask ReadSingleResponseAsync(int operation, CancellationToken cancellationToken = default) { var response = await _connection.ProcessOperationAsync(operation, cancellationToken).ConfigureAwait(false); response.HandleResponseWarning(WarningMessage); return response; } #endregion #region Private Methods private void DatabaseInfo(byte[] items, byte[] buffer, int bufferLength) { try { Xdr.Write(IscCodes.op_info_database); Xdr.Write(_handle); Xdr.Write(Incarnation); Xdr.WriteBuffer(items, items.Length); Xdr.Write(bufferLength); Xdr.Flush(); var response = (GenericResponse)ReadResponse(); var responseLength = bufferLength; if (response.Data.Length < bufferLength) { responseLength = response.Data.Length; } Buffer.BlockCopy(response.Data, 0, buffer, 0, responseLength); } catch (IOException ex) { throw IscException.ForIOException(ex); } } private async ValueTask DatabaseInfoAsync(byte[] items, byte[] buffer, int bufferLength, CancellationToken cancellationToken = default) { try { await Xdr.WriteAsync(IscCodes.op_info_database, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(Incarnation, cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(items, items.Length, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(bufferLength, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await ReadResponseAsync(cancellationToken).ConfigureAwait(false); var responseLength = bufferLength; if (response.Data.Length < bufferLength) { responseLength = response.Data.Length; } Buffer.BlockCopy(response.Data, 0, buffer, 0, responseLength); } catch (IOException ex) { throw IscException.ForIOException(ex); } } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version10/GdsEventManager.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Diagnostics; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version10; internal class GdsEventManager { bool _closing; int _handle; string _ipAddress; int _portNumber; int _timeout; GdsDatabase _database; public GdsEventManager(int handle, string ipAddress, int portNumber, int timeout) { _closing = false; _handle = handle; _ipAddress = ipAddress; _portNumber = portNumber; _timeout = timeout; } public void Open() { var connection = new GdsConnection(_ipAddress, _portNumber, _timeout); connection.Connect(); _database = new GdsDatabase(connection); } public async ValueTask OpenAsync(CancellationToken cancellationToken = default) { var connection = new GdsConnection(_ipAddress, _portNumber, _timeout); await connection.ConnectAsync(cancellationToken).ConfigureAwait(false); _database = new GdsDatabase(connection); } // this is a special method that's not awaited public async Task StartWaitingForEvents(RemoteEvent remoteEvent) { while (true) { try { var operation = await _database.ReadOperationAsync(CancellationToken.None).ConfigureAwait(false); switch (operation) { case IscCodes.op_event: var dbHandle = await _database.Xdr.ReadInt32Async(CancellationToken.None).ConfigureAwait(false); var buffer = await _database.Xdr.ReadBufferAsync(CancellationToken.None).ConfigureAwait(false); var ast = new byte[8]; await _database.Xdr.ReadBytesAsync(ast, 8, CancellationToken.None).ConfigureAwait(false); var eventId = await _database.Xdr.ReadInt32Async(CancellationToken.None).ConfigureAwait(false); remoteEvent.EventCounts(buffer); await remoteEvent.Database.QueueEventsAsync(remoteEvent, CancellationToken.None).ConfigureAwait(false); break; default: Debug.Assert(false); break; } } catch (Exception) when (Volatile.Read(ref _closing)) { return; } catch (Exception ex) { remoteEvent.EventError(ex); break; } } } public void Close() { Volatile.Write(ref _closing, true); _database.CloseConnection(); } public ValueTask CloseAsync(CancellationToken cancellationToken = default) { Volatile.Write(ref _closing, true); return _database.CloseConnectionAsync(cancellationToken); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version10/GdsServiceManager.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version10; internal class GdsServiceManager : ServiceManagerBase { #region Fields private GdsConnection _connection; private GdsDatabase _database; #endregion #region Properties public override bool UseUtf8ParameterBuffer => false; public GdsConnection Connection { get { return _connection; } } public GdsDatabase Database { get { return _database; } } #endregion #region Constructors public GdsServiceManager(GdsConnection connection) : base(connection.Charset) { _connection = connection; _database = CreateDatabase(_connection); RewireWarningMessage(); } #endregion #region Methods public override void Attach(ServiceParameterBufferBase spb, string dataSource, int port, string service, byte[] cryptKey) { try { SendAttachToBuffer(spb, service); _database.Xdr.Flush(); ProcessAttachResponse((GenericResponse)_database.ReadResponse()); } catch (IOException ex) { _database.Detach(); throw IscException.ForIOException(ex); } } public override async ValueTask AttachAsync(ServiceParameterBufferBase spb, string dataSource, int port, string service, byte[] cryptKey, CancellationToken cancellationToken = default) { try { await SendAttachToBufferAsync(spb, service, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await ProcessAttachResponseAsync((GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } catch (IOException ex) { await _database.DetachAsync(cancellationToken).ConfigureAwait(false); throw IscException.ForIOException(ex); } } protected virtual void SendAttachToBuffer(ServiceParameterBufferBase spb, string service) { _database.Xdr.Write(IscCodes.op_service_attach); _database.Xdr.Write(GdsDatabase.DatabaseObjectId); _database.Xdr.Write(service); _database.Xdr.WriteBuffer(spb.ToArray()); } protected virtual async ValueTask SendAttachToBufferAsync(ServiceParameterBufferBase spb, string service, CancellationToken cancellationToken = default) { await _database.Xdr.WriteAsync(IscCodes.op_service_attach, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(GdsDatabase.DatabaseObjectId, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(service, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(spb.ToArray(), cancellationToken).ConfigureAwait(false); } protected virtual void ProcessAttachResponse(GenericResponse response) { Handle = response.ObjectHandle; } protected virtual ValueTask ProcessAttachResponseAsync(GenericResponse response, CancellationToken cancellationToken = default) { Handle = response.ObjectHandle; return ValueTask.CompletedTask; } public override void Detach() { try { _database.Xdr.Write(IscCodes.op_service_detach); _database.Xdr.Write(Handle); _database.Xdr.Write(IscCodes.op_disconnect); _database.Xdr.Flush(); Handle = 0; } catch (IOException ex) { throw IscException.ForIOException(ex); } finally { try { _connection.Disconnect(); } catch (IOException ex) { throw IscException.ForIOException(ex); } finally { _database = null; _connection = null; } } } public override async ValueTask DetachAsync(CancellationToken cancellationToken = default) { try { await _database.Xdr.WriteAsync(IscCodes.op_service_detach, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(Handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(IscCodes.op_disconnect, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); Handle = 0; } catch (IOException ex) { throw IscException.ForIOException(ex); } finally { try { await _connection.DisconnectAsync(cancellationToken).ConfigureAwait(false); } catch (IOException ex) { throw IscException.ForIOException(ex); } finally { _database = null; _connection = null; } } } public override void Start(ServiceParameterBufferBase spb) { try { _database.Xdr.Write(IscCodes.op_service_start); _database.Xdr.Write(Handle); _database.Xdr.Write(0); _database.Xdr.WriteBuffer(spb.ToArray(), spb.Length); _database.Xdr.Flush(); try { _database.ReadResponse(); } catch (IscException) { throw; } } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask StartAsync(ServiceParameterBufferBase spb, CancellationToken cancellationToken = default) { try { await _database.Xdr.WriteAsync(IscCodes.op_service_start, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(Handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(0, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(spb.ToArray(), spb.Length, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); try { await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); } catch (IscException) { throw; } } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void Query(ServiceParameterBufferBase spb, int requestLength, byte[] requestBuffer, int bufferLength, byte[] buffer) { try { _database.Xdr.Write(IscCodes.op_service_info); _database.Xdr.Write(Handle); _database.Xdr.Write(GdsDatabase.Incarnation); _database.Xdr.WriteBuffer(spb.ToArray(), spb.Length); _database.Xdr.WriteBuffer(requestBuffer, requestLength); _database.Xdr.Write(bufferLength); _database.Xdr.Flush(); var response = (GenericResponse)_database.ReadResponse(); var responseLength = bufferLength; if (response.Data.Length < bufferLength) { responseLength = response.Data.Length; } Buffer.BlockCopy(response.Data, 0, buffer, 0, responseLength); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask QueryAsync(ServiceParameterBufferBase spb, int requestLength, byte[] requestBuffer, int bufferLength, byte[] buffer, CancellationToken cancellationToken = default) { try { await _database.Xdr.WriteAsync(IscCodes.op_service_info, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(Handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(GdsDatabase.Incarnation, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(spb.ToArray(), spb.Length, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(requestBuffer, requestLength, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(bufferLength, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); var responseLength = bufferLength; if (response.Data.Length < bufferLength) { responseLength = response.Data.Length; } Buffer.BlockCopy(response.Data, 0, buffer, 0, responseLength); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override ServiceParameterBufferBase CreateServiceParameterBuffer() { return new ServiceParameterBuffer2(Database.ParameterBufferEncoding); } protected virtual GdsDatabase CreateDatabase(GdsConnection connection) { return new GdsDatabase(connection); } private void RewireWarningMessage() { _database.WarningMessage = ex => WarningMessage?.Invoke(ex); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version10/GdsStatement.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version10; internal class GdsStatement : StatementBase { #region Fields protected int _handle; private bool _disposed; protected GdsDatabase _database; private GdsTransaction _transaction; protected Descriptor _parameters; protected Descriptor _fields; protected bool _allRowsFetched; private Queue _rows; private int _fetchSize; #endregion #region Properties public override DatabaseBase Database { get { return _database; } } public override TransactionBase Transaction { get { return _transaction; } set { if (_transaction != value) { if (TransactionUpdate != null && _transaction != null) { _transaction.Update -= TransactionUpdate; TransactionUpdate = null; } if (value == null) { _transaction = null; } else { _transaction = (GdsTransaction)value; TransactionUpdate = new EventHandler(TransactionUpdated); _transaction.Update += TransactionUpdate; } } } } public override Descriptor Parameters { get { return _parameters; } set { _parameters = value; } } public override Descriptor Fields { get { return _fields; } } public override int FetchSize { get { return _fetchSize; } set { _fetchSize = value; } } public int Handle { get { return _handle; } } #endregion #region Constructors public GdsStatement(GdsDatabase database) : this(database, null) { } public GdsStatement(GdsDatabase database, GdsTransaction transaction) { _handle = IscCodes.INVALID_OBJECT; _fetchSize = 200; _rows = new Queue(); OutputParameters = new Queue(); _database = database; if (transaction != null) { Transaction = transaction; } } #endregion #region Dispose2 public override void Dispose2() { if (!_disposed) { _disposed = true; Release(); Clear(); _rows = null; OutputParameters = null; _database = null; _fields = null; _parameters = null; _transaction = null; _allRowsFetched = false; _handle = 0; _fetchSize = 0; base.Dispose2(); } } public override async ValueTask Dispose2Async(CancellationToken cancellationToken = default) { if (!_disposed) { _disposed = true; await ReleaseAsync(cancellationToken).ConfigureAwait(false); Clear(); _rows = null; OutputParameters = null; _database = null; _fields = null; _parameters = null; _transaction = null; _allRowsFetched = false; _handle = 0; _fetchSize = 0; await base.Dispose2Async(cancellationToken).ConfigureAwait(false); } } #endregion #region Blob Creation Metods public override BlobBase CreateBlob() { return new GdsBlob(_database, _transaction); } public override BlobBase CreateBlob(long blobId) { return new GdsBlob(_database, _transaction, blobId); } #endregion #region Array Creation Methods public override ArrayBase CreateArray(ArrayDesc descriptor) { var array = new GdsArray(descriptor); return array; } public override ValueTask CreateArrayAsync(ArrayDesc descriptor, CancellationToken cancellationToken = default) { var array = new GdsArray(descriptor); return ValueTask.FromResult(array); } public override ArrayBase CreateArray(string tableName, string fieldName) { var array = new GdsArray(_database, _transaction, tableName, fieldName); array.Initialize(); return array; } public override async ValueTask CreateArrayAsync(string tableName, string fieldName, CancellationToken cancellationToken = default) { var array = new GdsArray(_database, _transaction, tableName, fieldName); await array.InitializeAsync(cancellationToken).ConfigureAwait(false); return array; } public override ArrayBase CreateArray(long handle, string tableName, string fieldName) { var array = new GdsArray(_database, _transaction, handle, tableName, fieldName); array.Initialize(); return array; } public override async ValueTask CreateArrayAsync(long handle, string tableName, string fieldName, CancellationToken cancellationToken = default) { var array = new GdsArray(_database, _transaction, handle, tableName, fieldName); await array.InitializeAsync(cancellationToken).ConfigureAwait(false); return array; } #endregion #region Batch Creation Methods public override BatchBase CreateBatch() { throw new NotSupportedException("Batching is not supported on this Firebird version."); } public override BatchParameterBuffer CreateBatchParameterBuffer() { throw new NotSupportedException("Batching is not supported on this Firebird version."); } #endregion #region Methods public override void Prepare(string commandText) { ClearAll(); try { if (State == StatementState.Deallocated) { SendAllocateToBuffer(); _database.Xdr.Flush(); ProcessAllocateResponse((GenericResponse)_database.ReadResponse()); } SendPrepareToBuffer(commandText); _database.Xdr.Flush(); ProcessPrepareResponse((GenericResponse)_database.ReadResponse()); SendInfoSqlToBuffer(StatementTypeInfoItems, IscCodes.STATEMENT_TYPE_BUFFER_SIZE); _database.Xdr.Flush(); StatementType = ProcessStatementTypeInfoBuffer(ProcessInfoSqlResponse((GenericResponse)_database.ReadResponse())); State = StatementState.Prepared; } catch (IOException ex) { State = State == StatementState.Allocated ? StatementState.Error : State; throw IscException.ForIOException(ex); } } public override async ValueTask PrepareAsync(string commandText, CancellationToken cancellationToken = default) { ClearAll(); try { if (State == StatementState.Deallocated) { await SendAllocateToBufferAsync(cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await ProcessAllocateResponseAsync((GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } await SendPrepareToBufferAsync(commandText, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await ProcessPrepareResponseAsync((GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); await SendInfoSqlToBufferAsync(StatementTypeInfoItems, IscCodes.STATEMENT_TYPE_BUFFER_SIZE, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); StatementType = ProcessStatementTypeInfoBuffer(await ProcessInfoSqlResponseAsync((GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false)); State = StatementState.Prepared; } catch (IOException ex) { State = State == StatementState.Allocated ? StatementState.Error : State; throw IscException.ForIOException(ex); } } public override void Execute(int timeout, IDescriptorFiller descriptorFiller) { EnsureNotDeallocated(); Clear(); try { SendExecuteToBuffer(timeout, descriptorFiller); _database.Xdr.Flush(); if (StatementType == DbStatementType.StoredProcedure) { ProcessStoredProcedureExecuteResponse((SqlResponse)_database.ReadResponse()); } var executeResponse = (GenericResponse)_database.ReadResponse(); ProcessExecuteResponse(executeResponse); if (DoRecordsAffected) { SendInfoSqlToBuffer(RowsAffectedInfoItems, IscCodes.ROWS_AFFECTED_BUFFER_SIZE); _database.Xdr.Flush(); RecordsAffected = ProcessRecordsAffectedBuffer(ProcessInfoSqlResponse((GenericResponse)_database.ReadResponse())); } else { RecordsAffected = -1; } State = StatementState.Executed; } catch (IOException ex) { State = StatementState.Error; throw IscException.ForIOException(ex); } } public override async ValueTask ExecuteAsync(int timeout, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default) { EnsureNotDeallocated(); Clear(); try { await SendExecuteToBufferAsync(timeout, descriptorFiller, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); if (StatementType == DbStatementType.StoredProcedure) { await ProcessStoredProcedureExecuteResponseAsync((SqlResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } var executeResponse = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); await ProcessExecuteResponseAsync(executeResponse, cancellationToken).ConfigureAwait(false); if (DoRecordsAffected) { await SendInfoSqlToBufferAsync(RowsAffectedInfoItems, IscCodes.ROWS_AFFECTED_BUFFER_SIZE, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); RecordsAffected = ProcessRecordsAffectedBuffer(await ProcessInfoSqlResponseAsync((GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false)); } else { RecordsAffected = -1; } State = StatementState.Executed; } catch (IOException ex) { State = StatementState.Error; throw IscException.ForIOException(ex); } } public override DbValue[] Fetch() { EnsureNotDeallocated(); if (StatementType == DbStatementType.StoredProcedure && !_allRowsFetched) { _allRowsFetched = true; return GetOutputParameters(); } else if (StatementType == DbStatementType.Insert && _allRowsFetched) { return null; } else if (StatementType != DbStatementType.Select && StatementType != DbStatementType.SelectForUpdate) { return null; } if (!_allRowsFetched && _rows.Count == 0) { try { _database.Xdr.Write(IscCodes.op_fetch); _database.Xdr.Write(_handle); _database.Xdr.WriteBuffer(_fields.ToBlr().Data); _database.Xdr.Write(0); // p_sqldata_message_number _database.Xdr.Write(_fetchSize); // p_sqldata_messages _database.Xdr.Flush(); var operation = _database.ReadOperation(); if (operation == IscCodes.op_fetch_response) { var hasOperation = true; while (!_allRowsFetched) { var response = hasOperation ? _database.ReadResponse(operation) : _database.ReadResponse(); hasOperation = false; if (response is FetchResponse fetchResponse) { if (fetchResponse.Count > 0 && fetchResponse.Status == 0) { _rows.Enqueue(ReadRow()); } else if (fetchResponse.Status == 100) { _allRowsFetched = true; } else { break; } } else { break; } } } else { _database.ReadResponse(operation); } } catch (IOException ex) { throw IscException.ForIOException(ex); } } if (_rows != null && _rows.Count > 0) { return _rows.Dequeue(); } else { _rows.Clear(); return null; } } public override async ValueTask FetchAsync(CancellationToken cancellationToken = default) { EnsureNotDeallocated(); if (StatementType == DbStatementType.StoredProcedure && !_allRowsFetched) { _allRowsFetched = true; return GetOutputParameters(); } else if (StatementType == DbStatementType.Insert && _allRowsFetched) { return null; } else if (StatementType != DbStatementType.Select && StatementType != DbStatementType.SelectForUpdate) { return null; } if (!_allRowsFetched && _rows.Count == 0) { try { await _database.Xdr.WriteAsync(IscCodes.op_fetch, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(_fields.ToBlr().Data, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(0, cancellationToken).ConfigureAwait(false); // p_sqldata_message_number await _database.Xdr.WriteAsync(_fetchSize, cancellationToken).ConfigureAwait(false); // p_sqldata_messages await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var operation = await _database.ReadOperationAsync(cancellationToken).ConfigureAwait(false); if (operation == IscCodes.op_fetch_response) { var hasOperation = true; while (!_allRowsFetched) { var response = hasOperation ? await _database.ReadResponseAsync(operation, cancellationToken).ConfigureAwait(false) : await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); hasOperation = false; if (response is FetchResponse fetchResponse) { if (fetchResponse.Count > 0 && fetchResponse.Status == 0) { _rows.Enqueue(await ReadRowAsync(cancellationToken).ConfigureAwait(false)); } else if (fetchResponse.Status == 100) { _allRowsFetched = true; } else { break; } } else { break; } } } else { await _database.ReadResponseAsync(operation, cancellationToken).ConfigureAwait(false); } } catch (IOException ex) { throw IscException.ForIOException(ex); } } if (_rows != null && _rows.Count > 0) { return _rows.Dequeue(); } else { _rows.Clear(); return null; } } #endregion #region Protected Methods #region op_prepare methods protected void SendPrepareToBuffer(string commandText) { _database.Xdr.Write(IscCodes.op_prepare_statement); _database.Xdr.Write(_transaction.Handle); _database.Xdr.Write(_handle); _database.Xdr.Write((int)_database.Dialect); _database.Xdr.Write(commandText); _database.Xdr.WriteBuffer(DescribeInfoAndBindInfoItems, DescribeInfoAndBindInfoItems.Length); _database.Xdr.Write(IscCodes.PREPARE_INFO_BUFFER_SIZE); } protected async ValueTask SendPrepareToBufferAsync(string commandText, CancellationToken cancellationToken = default) { await _database.Xdr.WriteAsync(IscCodes.op_prepare_statement, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_transaction.Handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync((int)_database.Dialect, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(commandText, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(DescribeInfoAndBindInfoItems, DescribeInfoAndBindInfoItems.Length, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(IscCodes.PREPARE_INFO_BUFFER_SIZE, cancellationToken).ConfigureAwait(false); } protected void ProcessPrepareResponse(GenericResponse response) { var descriptors = ParseSqlInfo(response.Data, DescribeInfoAndBindInfoItems, new Descriptor[] { null, null }); _fields = descriptors[0]; _parameters = descriptors[1]; } protected async ValueTask ProcessPrepareResponseAsync(GenericResponse response, CancellationToken cancellationToken = default) { var descriptors = await ParseSqlInfoAsync(response.Data, DescribeInfoAndBindInfoItems, new Descriptor[] { null, null }, cancellationToken).ConfigureAwait(false); _fields = descriptors[0]; _parameters = descriptors[1]; } #endregion #region op_info_sql methods protected override byte[] GetSqlInfo(byte[] items, int bufferLength) { DoInfoSqlPacket(items, bufferLength); _database.Xdr.Flush(); return ProcessInfoSqlResponse((GenericResponse)_database.ReadResponse()); } protected override async ValueTask GetSqlInfoAsync(byte[] items, int bufferLength, CancellationToken cancellationToken = default) { await DoInfoSqlPacketAsync(items, bufferLength, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); return await ProcessInfoSqlResponseAsync((GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } protected void DoInfoSqlPacket(byte[] items, int bufferLength) { try { SendInfoSqlToBuffer(items, bufferLength); } catch (IOException ex) { throw IscException.ForIOException(ex); } } protected async ValueTask DoInfoSqlPacketAsync(byte[] items, int bufferLength, CancellationToken cancellationToken = default) { try { await SendInfoSqlToBufferAsync(items, bufferLength, cancellationToken).ConfigureAwait(false); } catch (IOException ex) { throw IscException.ForIOException(ex); } } protected void SendInfoSqlToBuffer(byte[] items, int bufferLength) { _database.Xdr.Write(IscCodes.op_info_sql); _database.Xdr.Write(_handle); _database.Xdr.Write(0); _database.Xdr.WriteBuffer(items, items.Length); _database.Xdr.Write(bufferLength); } protected async ValueTask SendInfoSqlToBufferAsync(byte[] items, int bufferLength, CancellationToken cancellationToken = default) { await _database.Xdr.WriteAsync(IscCodes.op_info_sql, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(0, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(items, items.Length, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(bufferLength, cancellationToken).ConfigureAwait(false); } protected byte[] ProcessInfoSqlResponse(GenericResponse response) { Debug.Assert(response.Data != null && response.Data.Length > 0); return response.Data; } protected ValueTask ProcessInfoSqlResponseAsync(GenericResponse response, CancellationToken cancellationToken = default) { Debug.Assert(response.Data != null && response.Data.Length > 0); return ValueTask.FromResult(response.Data); } #endregion #region op_free_statement methods protected override void Free(int option) { if (FreeNotNeeded(option)) return; DoFreePacket(option); ProcessFreeResponse(_database.ReadResponse()); } protected override async ValueTask FreeAsync(int option, CancellationToken cancellationToken = default) { if (FreeNotNeeded(option)) return; await DoFreePacketAsync(option, cancellationToken).ConfigureAwait(false); await ProcessFreeResponseAsync(await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } protected bool FreeNotNeeded(int option) { // does not seem to be possible or necessary to close an execute procedure statement if (StatementType == DbStatementType.StoredProcedure && option == IscCodes.DSQL_close) { return true; } else { return false; } } protected void DoFreePacket(int option) { try { _database.Xdr.Write(IscCodes.op_free_statement); _database.Xdr.Write(_handle); _database.Xdr.Write(option); _database.Xdr.Flush(); if (option == IscCodes.DSQL_drop) { _parameters = null; _fields = null; } Clear(); } catch (IOException ex) { State = StatementState.Error; throw IscException.ForIOException(ex); } } protected async ValueTask DoFreePacketAsync(int option, CancellationToken cancellationToken = default) { try { await _database.Xdr.WriteAsync(IscCodes.op_free_statement, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(option, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); if (option == IscCodes.DSQL_drop) { _parameters = null; _fields = null; } Clear(); } catch (IOException ex) { State = StatementState.Error; throw IscException.ForIOException(ex); } } protected void ProcessFreeResponse(IResponse response) { } protected ValueTask ProcessFreeResponseAsync(IResponse response, CancellationToken cancellationToken = default) { return ValueTask.CompletedTask; } #endregion #region op_allocate_statement methods protected void SendAllocateToBuffer() { _database.Xdr.Write(IscCodes.op_allocate_statement); _database.Xdr.Write(_database.Handle); } protected async ValueTask SendAllocateToBufferAsync(CancellationToken cancellationToken = default) { await _database.Xdr.WriteAsync(IscCodes.op_allocate_statement, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_database.Handle, cancellationToken).ConfigureAwait(false); } protected void ProcessAllocateResponse(GenericResponse response) { _handle = response.ObjectHandle; _allRowsFetched = false; State = StatementState.Allocated; StatementType = DbStatementType.None; } protected ValueTask ProcessAllocateResponseAsync(GenericResponse response, CancellationToken cancellationToken = default) { _handle = response.ObjectHandle; _allRowsFetched = false; State = StatementState.Allocated; StatementType = DbStatementType.None; return ValueTask.CompletedTask; } #endregion #region op_execute/op_execute2 methods protected virtual void SendExecuteToBuffer(int timeout, IDescriptorFiller descriptorFiller) { // this may throw error, so it needs to be before any writing var parametersData = GetParameterData(descriptorFiller, 0); if (StatementType == DbStatementType.StoredProcedure) { _database.Xdr.Write(IscCodes.op_execute2); } else { _database.Xdr.Write(IscCodes.op_execute); } _database.Xdr.Write(_handle); _database.Xdr.Write(_transaction.Handle); if (_parameters != null) { _database.Xdr.WriteBuffer(_parameters.ToBlr().Data); _database.Xdr.Write(0); // Message number _database.Xdr.Write(1); // Number of messages _database.Xdr.WriteBytes(parametersData, parametersData.Length); } else { _database.Xdr.WriteBuffer(null); _database.Xdr.Write(0); _database.Xdr.Write(0); } if (StatementType == DbStatementType.StoredProcedure) { _database.Xdr.WriteBuffer(_fields?.ToBlr().Data); _database.Xdr.Write(0); // Output message number } } protected virtual async ValueTask SendExecuteToBufferAsync(int timeout, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default) { // this may throw error, so it needs to be before any writing var parametersData = await GetParameterDataAsync(descriptorFiller, 0, cancellationToken).ConfigureAwait(false); if (StatementType == DbStatementType.StoredProcedure) { await _database.Xdr.WriteAsync(IscCodes.op_execute2, cancellationToken).ConfigureAwait(false); } else { await _database.Xdr.WriteAsync(IscCodes.op_execute, cancellationToken).ConfigureAwait(false); } await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_transaction.Handle, cancellationToken).ConfigureAwait(false); if (_parameters != null) { await _database.Xdr.WriteBufferAsync(_parameters.ToBlr().Data, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(0, cancellationToken).ConfigureAwait(false); // Message number await _database.Xdr.WriteAsync(1, cancellationToken).ConfigureAwait(false); // Number of messages await _database.Xdr.WriteBytesAsync(parametersData, parametersData.Length, cancellationToken).ConfigureAwait(false); } else { await _database.Xdr.WriteBufferAsync(null, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(0, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(0, cancellationToken).ConfigureAwait(false); } if (StatementType == DbStatementType.StoredProcedure) { await _database.Xdr.WriteBufferAsync(_fields?.ToBlr().Data, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(0, cancellationToken).ConfigureAwait(false); // Output message number } } protected void ProcessExecuteResponse(GenericResponse response) { } protected ValueTask ProcessExecuteResponseAsync(GenericResponse response, CancellationToken cancellationToken = default) { return ValueTask.CompletedTask; } protected void ProcessStoredProcedureExecuteResponse(SqlResponse response) { try { if (response.Count > 0) { OutputParameters.Enqueue(ReadRow()); } } catch (IOException ex) { throw IscException.ForIOException(ex); } } protected async ValueTask ProcessStoredProcedureExecuteResponseAsync(SqlResponse response, CancellationToken cancellationToken = default) { try { if (response.Count > 0) { OutputParameters.Enqueue(await ReadRowAsync(cancellationToken).ConfigureAwait(false)); } } catch (IOException ex) { throw IscException.ForIOException(ex); } } #endregion protected override void TransactionUpdated(object sender, EventArgs e) { if (Transaction != null && TransactionUpdate != null) { Transaction.Update -= TransactionUpdate; } State = StatementState.Closed; TransactionUpdate = null; _allRowsFetched = false; } protected Descriptor[] ParseSqlInfo(byte[] info, byte[] items, Descriptor[] rowDescs) { return ParseTruncSqlInfo(info, items, rowDescs); } protected ValueTask ParseSqlInfoAsync(byte[] info, byte[] items, Descriptor[] rowDescs, CancellationToken cancellationToken = default) { return ParseTruncSqlInfoAsync(info, items, rowDescs, cancellationToken); } protected Descriptor[] ParseTruncSqlInfo(byte[] info, byte[] items, Descriptor[] rowDescs) { var currentPosition = 0; var currentDescriptorIndex = -1; var currentItemIndex = 0; while (info[currentPosition] != IscCodes.isc_info_end) { byte item; while ((item = info[currentPosition++]) != IscCodes.isc_info_sql_describe_end) { switch (item) { case IscCodes.isc_info_truncated: currentItemIndex--; var newItems = new List(items.Length); var part = 0; var chock = 0; for (var i = 0; i < items.Length; i++) { if (items[i] == IscCodes.isc_info_sql_describe_end) { newItems.Insert(chock, IscCodes.isc_info_sql_sqlda_start); newItems.Insert(chock + 1, 2); var processedItems = (rowDescs[part] != null ? rowDescs[part].Count : (short)0); newItems.Insert(chock + 2, (byte)((part == currentDescriptorIndex ? currentItemIndex : processedItems) & 255)); newItems.Insert(chock + 3, (byte)((part == currentDescriptorIndex ? currentItemIndex : processedItems) >> 8)); part++; chock = i + 4 + 1; } newItems.Add(items[i]); } info = GetSqlInfo(newItems.ToArray(), info.Length); currentPosition = 0; currentDescriptorIndex = -1; goto Break; case IscCodes.isc_info_sql_select: case IscCodes.isc_info_sql_bind: currentDescriptorIndex++; if (info[currentPosition] == IscCodes.isc_info_truncated) break; currentPosition++; var len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; if (rowDescs[currentDescriptorIndex] == null) { var n = IscHelper.VaxInteger(info, currentPosition, len); rowDescs[currentDescriptorIndex] = new Descriptor((short)n); if (n == 0) { currentPosition += len; goto Break; } } currentPosition += len; break; case IscCodes.isc_info_sql_sqlda_seq: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; currentItemIndex = (int)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_type: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].DataType = (short)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_sub_type: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].SubType = (short)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_scale: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].NumericScale = (short)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_length: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Length = (short)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_field: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Name = _database.Charset.GetString(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_relation: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Relation = _database.Charset.GetString(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_owner: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Owner = _database.Charset.GetString(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_alias: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Alias = _database.Charset.GetString(info, currentPosition, len); currentPosition += len; break; default: throw IscException.ForErrorCode(IscCodes.isc_dsql_sqlda_err); } } // just to get out of the loop Break: { } } return rowDescs; } protected async ValueTask ParseTruncSqlInfoAsync(byte[] info, byte[] items, Descriptor[] rowDescs, CancellationToken cancellationToken = default) { var currentPosition = 0; var currentDescriptorIndex = -1; var currentItemIndex = 0; while (info[currentPosition] != IscCodes.isc_info_end) { byte item; while ((item = info[currentPosition++]) != IscCodes.isc_info_sql_describe_end) { switch (item) { case IscCodes.isc_info_truncated: currentItemIndex--; var newItems = new List(items.Length); var part = 0; var chock = 0; for (var i = 0; i < items.Length; i++) { if (items[i] == IscCodes.isc_info_sql_describe_end) { newItems.Insert(chock, IscCodes.isc_info_sql_sqlda_start); newItems.Insert(chock + 1, 2); var processedItems = (rowDescs[part] != null ? rowDescs[part].Count : (short)0); newItems.Insert(chock + 2, (byte)((part == currentDescriptorIndex ? currentItemIndex : processedItems) & 255)); newItems.Insert(chock + 3, (byte)((part == currentDescriptorIndex ? currentItemIndex : processedItems) >> 8)); part++; chock = i + 4 + 1; } newItems.Add(items[i]); } info = await GetSqlInfoAsync(newItems.ToArray(), info.Length, cancellationToken).ConfigureAwait(false); currentPosition = 0; currentDescriptorIndex = -1; goto Break; case IscCodes.isc_info_sql_select: case IscCodes.isc_info_sql_bind: currentDescriptorIndex++; if (info[currentPosition] == IscCodes.isc_info_truncated) break; currentPosition++; var len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; if (rowDescs[currentDescriptorIndex] == null) { var n = IscHelper.VaxInteger(info, currentPosition, len); rowDescs[currentDescriptorIndex] = new Descriptor((short)n); if (n == 0) { currentPosition += len; goto Break; } } currentPosition += len; break; case IscCodes.isc_info_sql_sqlda_seq: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; currentItemIndex = (int)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_type: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].DataType = (short)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_sub_type: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].SubType = (short)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_scale: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].NumericScale = (short)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_length: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Length = (short)IscHelper.VaxInteger(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_field: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Name = _database.Charset.GetString(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_relation: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Relation = _database.Charset.GetString(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_owner: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Owner = _database.Charset.GetString(info, currentPosition, len); currentPosition += len; break; case IscCodes.isc_info_sql_alias: len = (int)IscHelper.VaxInteger(info, currentPosition, 2); currentPosition += 2; rowDescs[currentDescriptorIndex][currentItemIndex - 1].Alias = _database.Charset.GetString(info, currentPosition, len); currentPosition += len; break; default: throw IscException.ForErrorCode(IscCodes.isc_dsql_sqlda_err); } } // just to get out of the loop Break: { } } return rowDescs; } protected virtual byte[] WriteParameters() { if (_parameters == null) return null; using (var ms = new MemoryStream(256)) { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms), _database.Charset); for (var i = 0; i < _parameters.Count; i++) { var field = _parameters[i]; try { WriteRawParameter(xdr, field); xdr.Write(field.NullFlag); } catch (IOException ex) { throw IscException.ForIOException(ex); } } xdr.Flush(); return ms.ToArray(); } } protected virtual async ValueTask WriteParametersAsync(CancellationToken cancellationToken = default) { if (_parameters == null) return null; using (var ms = new MemoryStream(256)) { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms), _database.Charset); for (var i = 0; i < _parameters.Count; i++) { var field = _parameters[i]; try { await WriteRawParameterAsync(xdr, field, cancellationToken).ConfigureAwait(false); await xdr.WriteAsync(field.NullFlag, cancellationToken).ConfigureAwait(false); } catch (IOException ex) { throw IscException.ForIOException(ex); } } await xdr.FlushAsync(cancellationToken).ConfigureAwait(false); return ms.ToArray(); } } protected void WriteRawParameter(IXdrWriter xdr, DbField field) { if (field.DbDataType != DbDataType.Null) { field.FixNull(); switch (field.DbDataType) { case DbDataType.Char: if (field.Charset.IsOctetsCharset) { xdr.WriteOpaque(field.DbValue.GetBinary(), field.Length); } else if (field.Charset.IsNoneCharset) { var bvalue = field.Charset.GetBytes(field.DbValue.GetString()); if (bvalue.Length > field.Length) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } xdr.WriteOpaque(bvalue, field.Length); } else { var svalue = field.DbValue.GetString(); if ((field.Length % field.Charset.BytesPerCharacter) == 0 && svalue.EnumerateRunesToChars().Count() > field.CharCount) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } xdr.WriteOpaque(field.Charset.GetBytes(svalue), field.Length); } break; case DbDataType.VarChar: if (field.Charset.IsOctetsCharset) { xdr.WriteBuffer(field.DbValue.GetBinary()); } else if (field.Charset.IsNoneCharset) { var bvalue = field.Charset.GetBytes(field.DbValue.GetString()); if (bvalue.Length > field.Length) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } xdr.WriteBuffer(bvalue); } else { var svalue = field.DbValue.GetString(); if ((field.Length % field.Charset.BytesPerCharacter) == 0 && svalue.EnumerateRunesToChars().Count() > field.CharCount) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } xdr.WriteBuffer(field.Charset.GetBytes(svalue)); } break; case DbDataType.SmallInt: xdr.Write(field.DbValue.GetInt16()); break; case DbDataType.Integer: xdr.Write(field.DbValue.GetInt32()); break; case DbDataType.BigInt: case DbDataType.Array: case DbDataType.Binary: case DbDataType.Text: xdr.Write(field.DbValue.GetInt64()); break; case DbDataType.Decimal: case DbDataType.Numeric: xdr.Write(field.DbValue.GetDecimal(), field.DataType, field.NumericScale); break; case DbDataType.Float: xdr.Write(field.DbValue.GetFloat()); break; case DbDataType.Guid: xdr.Write(field.DbValue.GetGuid(), field.SqlType); break; case DbDataType.Double: xdr.Write(field.DbValue.GetDouble()); break; case DbDataType.Date: xdr.Write(field.DbValue.GetDate()); break; case DbDataType.Time: xdr.Write(field.DbValue.GetTime()); break; case DbDataType.TimeStamp: xdr.Write(field.DbValue.GetDate()); xdr.Write(field.DbValue.GetTime()); break; case DbDataType.Boolean: xdr.Write(field.DbValue.GetBoolean()); break; case DbDataType.TimeStampTZ: xdr.Write(field.DbValue.GetDate()); xdr.Write(field.DbValue.GetTime()); xdr.Write(field.DbValue.GetTimeZoneId()); break; case DbDataType.TimeStampTZEx: xdr.Write(field.DbValue.GetDate()); xdr.Write(field.DbValue.GetTime()); xdr.Write(field.DbValue.GetTimeZoneId()); xdr.Write((short)0); break; case DbDataType.TimeTZ: xdr.Write(field.DbValue.GetTime()); xdr.Write(field.DbValue.GetTimeZoneId()); break; case DbDataType.TimeTZEx: xdr.Write(field.DbValue.GetTime()); xdr.Write(field.DbValue.GetTimeZoneId()); xdr.Write((short)0); break; case DbDataType.Dec16: xdr.Write(field.DbValue.GetDecFloat(), 16); break; case DbDataType.Dec34: xdr.Write(field.DbValue.GetDecFloat(), 34); break; case DbDataType.Int128: xdr.Write(field.DbValue.GetInt128()); break; default: throw IscException.ForStrParam($"Unknown SQL data type: {field.DataType}."); } } } protected async ValueTask WriteRawParameterAsync(IXdrWriter xdr, DbField field, CancellationToken cancellationToken = default) { if (field.DbDataType != DbDataType.Null) { field.FixNull(); switch (field.DbDataType) { case DbDataType.Char: if (field.Charset.IsOctetsCharset) { await xdr.WriteOpaqueAsync(await field.DbValue.GetBinaryAsync(cancellationToken).ConfigureAwait(false), field.Length, cancellationToken).ConfigureAwait(false); } else if (field.Charset.IsNoneCharset) { var bvalue = field.Charset.GetBytes(await field.DbValue.GetStringAsync(cancellationToken).ConfigureAwait(false)); if (bvalue.Length > field.Length) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } await xdr.WriteOpaqueAsync(bvalue, field.Length, cancellationToken).ConfigureAwait(false); } else { var svalue = await field.DbValue.GetStringAsync(cancellationToken).ConfigureAwait(false); if ((field.Length % field.Charset.BytesPerCharacter) == 0 && svalue.EnumerateRunesToChars().Count() > field.CharCount) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } await xdr.WriteOpaqueAsync(field.Charset.GetBytes(svalue), field.Length, cancellationToken).ConfigureAwait(false); } break; case DbDataType.VarChar: if (field.Charset.IsOctetsCharset) { await xdr.WriteBufferAsync(await field.DbValue.GetBinaryAsync(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } else if (field.Charset.IsNoneCharset) { var bvalue = field.Charset.GetBytes(await field.DbValue.GetStringAsync(cancellationToken).ConfigureAwait(false)); if (bvalue.Length > field.Length) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } await xdr.WriteBufferAsync(bvalue, cancellationToken).ConfigureAwait(false); } else { var svalue = await field.DbValue.GetStringAsync(cancellationToken).ConfigureAwait(false); if ((field.Length % field.Charset.BytesPerCharacter) == 0 && svalue.EnumerateRunesToChars().Count() > field.CharCount) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } await xdr.WriteBufferAsync(field.Charset.GetBytes(svalue), cancellationToken).ConfigureAwait(false); } break; case DbDataType.SmallInt: await xdr.WriteAsync(field.DbValue.GetInt16(), cancellationToken).ConfigureAwait(false); break; case DbDataType.Integer: await xdr.WriteAsync(field.DbValue.GetInt32(), cancellationToken).ConfigureAwait(false); break; case DbDataType.BigInt: case DbDataType.Array: case DbDataType.Binary: case DbDataType.Text: await xdr.WriteAsync(field.DbValue.GetInt64(), cancellationToken).ConfigureAwait(false); break; case DbDataType.Decimal: case DbDataType.Numeric: await xdr.WriteAsync(field.DbValue.GetDecimal(), field.DataType, field.NumericScale, cancellationToken).ConfigureAwait(false); break; case DbDataType.Float: await xdr.WriteAsync(field.DbValue.GetFloat(), cancellationToken).ConfigureAwait(false); break; case DbDataType.Guid: await xdr.WriteAsync(field.DbValue.GetGuid(), field.SqlType, cancellationToken).ConfigureAwait(false); break; case DbDataType.Double: await xdr.WriteAsync(field.DbValue.GetDouble(), cancellationToken).ConfigureAwait(false); break; case DbDataType.Date: await xdr.WriteAsync(field.DbValue.GetDate(), cancellationToken).ConfigureAwait(false); break; case DbDataType.Time: await xdr.WriteAsync(field.DbValue.GetTime(), cancellationToken).ConfigureAwait(false); break; case DbDataType.TimeStamp: await xdr.WriteAsync(field.DbValue.GetDate(), cancellationToken).ConfigureAwait(false); await xdr.WriteAsync(field.DbValue.GetTime(), cancellationToken).ConfigureAwait(false); break; case DbDataType.Boolean: await xdr.WriteAsync(field.DbValue.GetBoolean(), cancellationToken).ConfigureAwait(false); break; case DbDataType.TimeStampTZ: await xdr.WriteAsync(field.DbValue.GetDate(), cancellationToken).ConfigureAwait(false); await xdr.WriteAsync(field.DbValue.GetTime(), cancellationToken).ConfigureAwait(false); await xdr.WriteAsync(field.DbValue.GetTimeZoneId(), cancellationToken).ConfigureAwait(false); break; case DbDataType.TimeStampTZEx: await xdr.WriteAsync(field.DbValue.GetDate(), cancellationToken).ConfigureAwait(false); await xdr.WriteAsync(field.DbValue.GetTime(), cancellationToken).ConfigureAwait(false); await xdr.WriteAsync(field.DbValue.GetTimeZoneId(), cancellationToken).ConfigureAwait(false); await xdr.WriteAsync((short)0, cancellationToken).ConfigureAwait(false); break; case DbDataType.TimeTZ: await xdr.WriteAsync(field.DbValue.GetTime(), cancellationToken).ConfigureAwait(false); await xdr.WriteAsync(field.DbValue.GetTimeZoneId(), cancellationToken).ConfigureAwait(false); break; case DbDataType.TimeTZEx: await xdr.WriteAsync(field.DbValue.GetTime(), cancellationToken).ConfigureAwait(false); await xdr.WriteAsync(field.DbValue.GetTimeZoneId(), cancellationToken).ConfigureAwait(false); await xdr.WriteAsync((short)0, cancellationToken).ConfigureAwait(false); break; case DbDataType.Dec16: await xdr.WriteAsync(field.DbValue.GetDecFloat(), 16, cancellationToken).ConfigureAwait(false); break; case DbDataType.Dec34: await xdr.WriteAsync(field.DbValue.GetDecFloat(), 34, cancellationToken).ConfigureAwait(false); break; case DbDataType.Int128: await xdr.WriteAsync(field.DbValue.GetInt128(), cancellationToken).ConfigureAwait(false); break; default: throw IscException.ForStrParam($"Unknown SQL data type: {field.DataType}."); } } } protected object ReadRawValue(IXdrReader xdr, DbField field) { var innerCharset = !_database.Charset.IsNoneCharset ? _database.Charset : field.Charset; switch (field.DbDataType) { case DbDataType.Char: if (field.Charset.IsOctetsCharset) { return xdr.ReadOpaque(field.Length); } else { var s = xdr.ReadString(innerCharset, field.Length); var runes = s.EnumerateRunesToChars().ToList(); if ((field.Length % field.Charset.BytesPerCharacter) == 0 && runes.Count > field.CharCount) { return new string([.. runes.Take(field.CharCount).SelectMany(x => x)]); } else { return s; } } case DbDataType.VarChar: if (field.Charset.IsOctetsCharset) { return xdr.ReadBuffer(); } else { return xdr.ReadString(innerCharset); } case DbDataType.SmallInt: return xdr.ReadInt16(); case DbDataType.Integer: return xdr.ReadInt32(); case DbDataType.Array: case DbDataType.Binary: case DbDataType.Text: case DbDataType.BigInt: return xdr.ReadInt64(); case DbDataType.Decimal: case DbDataType.Numeric: return xdr.ReadDecimal(field.DataType, field.NumericScale); case DbDataType.Float: return xdr.ReadSingle(); case DbDataType.Guid: return xdr.ReadGuid(field.SqlType); case DbDataType.Double: return xdr.ReadDouble(); case DbDataType.Date: return xdr.ReadDate(); case DbDataType.Time: return xdr.ReadTime(); case DbDataType.TimeStamp: return xdr.ReadDateTime(); case DbDataType.Boolean: return xdr.ReadBoolean(); case DbDataType.TimeStampTZ: return xdr.ReadZonedDateTime(false); case DbDataType.TimeStampTZEx: return xdr.ReadZonedDateTime(true); case DbDataType.TimeTZ: return xdr.ReadZonedTime(false); case DbDataType.TimeTZEx: return xdr.ReadZonedTime(true); case DbDataType.Dec16: return xdr.ReadDec16(); case DbDataType.Dec34: return xdr.ReadDec34(); case DbDataType.Int128: return xdr.ReadInt128(); default: throw TypeHelper.InvalidDataType((int)field.DbDataType); } } protected async ValueTask ReadRawValueAsync(IXdrReader xdr, DbField field, CancellationToken cancellationToken = default) { var innerCharset = !_database.Charset.IsNoneCharset ? _database.Charset : field.Charset; switch (field.DbDataType) { case DbDataType.Char: if (field.Charset.IsOctetsCharset) { return await xdr.ReadOpaqueAsync(field.Length, cancellationToken).ConfigureAwait(false); } else { var s = await xdr.ReadStringAsync(innerCharset, field.Length, cancellationToken).ConfigureAwait(false); var runes = s.EnumerateRunesToChars().ToList(); if ((field.Length % field.Charset.BytesPerCharacter) == 0 && runes.Count > field.CharCount) { return new string([.. runes.Take(field.CharCount).SelectMany(x => x)]); } else { return s; } } case DbDataType.VarChar: if (field.Charset.IsOctetsCharset) { return await xdr.ReadBufferAsync(cancellationToken).ConfigureAwait(false); } else { return await xdr.ReadStringAsync(innerCharset, cancellationToken).ConfigureAwait(false); } case DbDataType.SmallInt: return await xdr.ReadInt16Async(cancellationToken).ConfigureAwait(false); case DbDataType.Integer: return await xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); case DbDataType.Array: case DbDataType.Binary: case DbDataType.Text: case DbDataType.BigInt: return await xdr.ReadInt64Async(cancellationToken).ConfigureAwait(false); case DbDataType.Decimal: case DbDataType.Numeric: return await xdr.ReadDecimalAsync(field.DataType, field.NumericScale, cancellationToken).ConfigureAwait(false); case DbDataType.Float: return await xdr.ReadSingleAsync(cancellationToken).ConfigureAwait(false); case DbDataType.Guid: return await xdr.ReadGuidAsync(field.SqlType, cancellationToken).ConfigureAwait(false); case DbDataType.Double: return await xdr.ReadDoubleAsync(cancellationToken).ConfigureAwait(false); case DbDataType.Date: return await xdr.ReadDateAsync(cancellationToken).ConfigureAwait(false); case DbDataType.Time: return await xdr.ReadTimeAsync(cancellationToken).ConfigureAwait(false); case DbDataType.TimeStamp: return await xdr.ReadDateTimeAsync(cancellationToken).ConfigureAwait(false); case DbDataType.Boolean: return await xdr.ReadBooleanAsync(cancellationToken).ConfigureAwait(false); case DbDataType.TimeStampTZ: return await xdr.ReadZonedDateTimeAsync(false, cancellationToken).ConfigureAwait(false); case DbDataType.TimeStampTZEx: return await xdr.ReadZonedDateTimeAsync(true, cancellationToken).ConfigureAwait(false); case DbDataType.TimeTZ: return await xdr.ReadZonedTimeAsync(false, cancellationToken).ConfigureAwait(false); case DbDataType.TimeTZEx: return await xdr.ReadZonedTimeAsync(true, cancellationToken).ConfigureAwait(false); case DbDataType.Dec16: return await xdr.ReadDec16Async(cancellationToken).ConfigureAwait(false); case DbDataType.Dec34: return await xdr.ReadDec34Async(cancellationToken).ConfigureAwait(false); case DbDataType.Int128: return await xdr.ReadInt128Async(cancellationToken).ConfigureAwait(false); default: throw TypeHelper.InvalidDataType((int)field.DbDataType); } } protected void Clear() { if (_rows != null && _rows.Count > 0) { _rows.Clear(); } if (OutputParameters != null && OutputParameters.Count > 0) { OutputParameters.Clear(); } _allRowsFetched = false; } protected void ClearAll() { Clear(); _parameters = null; _fields = null; } protected virtual DbValue[] ReadRow() { var row = new DbValue[_fields.Count]; try { for (var i = 0; i < _fields.Count; i++) { var value = ReadRawValue(_database.Xdr, _fields[i]); var sqlInd = _database.Xdr.ReadInt32(); if (sqlInd == -1) { row[i] = new DbValue(this, _fields[i], null); } else if (sqlInd == 0) { row[i] = new DbValue(this, _fields[i], value); } else { throw IscException.ForStrParam($"Invalid {nameof(sqlInd)} value: {sqlInd}."); } } } catch (IOException ex) { throw IscException.ForIOException(ex); } return row; } protected virtual async ValueTask ReadRowAsync(CancellationToken cancellationToken = default) { var row = new DbValue[_fields.Count]; try { for (var i = 0; i < _fields.Count; i++) { var value = await ReadRawValueAsync(_database.Xdr, _fields[i], cancellationToken).ConfigureAwait(false); var sqlInd = await _database.Xdr.ReadInt32Async(cancellationToken).ConfigureAwait(false); if (sqlInd == -1) { row[i] = new DbValue(this, _fields[i], null); } else if (sqlInd == 0) { row[i] = new DbValue(this, _fields[i], value); } else { throw IscException.ForStrParam($"Invalid {nameof(sqlInd)} value: {sqlInd}."); } } } catch (IOException ex) { throw IscException.ForIOException(ex); } return row; } #endregion #region Protected Internal Methods protected internal byte[] GetParameterData(IDescriptorFiller descriptorFiller, int index) { descriptorFiller.Fill(_parameters, index); return WriteParameters(); } protected internal async ValueTask GetParameterDataAsync(IDescriptorFiller descriptorFiller, int index, CancellationToken cancellationToken = default) { await descriptorFiller.FillAsync(_parameters, index, cancellationToken).ConfigureAwait(false); return WriteParameters(); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version10/GdsTransaction.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version10; internal class GdsTransaction : TransactionBase { #region Fields private int _handle; private bool _disposed; private GdsDatabase _database; #endregion #region Properties public override int Handle { get { return _handle; } } #endregion #region Constructors public GdsTransaction(GdsDatabase database) { _database = database; State = TransactionState.NoTransaction; } #endregion #region Dispose2 public override void Dispose2() { if (!_disposed) { _disposed = true; if (State != TransactionState.NoTransaction) { Rollback(); } _database = null; _handle = 0; State = TransactionState.NoTransaction; base.Dispose2(); } } public override async ValueTask Dispose2Async(CancellationToken cancellationToken = default) { if (!_disposed) { _disposed = true; if (State != TransactionState.NoTransaction) { await RollbackAsync(cancellationToken).ConfigureAwait(false); } _database = null; _handle = 0; State = TransactionState.NoTransaction; await base.Dispose2Async(cancellationToken).ConfigureAwait(false); } } #endregion #region Methods public override void BeginTransaction(TransactionParameterBuffer tpb) { if (State != TransactionState.NoTransaction) { throw new InvalidOperationException(); } try { _database.Xdr.Write(IscCodes.op_transaction); _database.Xdr.Write(_database.Handle); _database.Xdr.WriteBuffer(tpb.ToArray()); _database.Xdr.Flush(); var response = (GenericResponse)_database.ReadResponse(); _database.TransactionCount++; _handle = response.ObjectHandle; State = TransactionState.Active; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask BeginTransactionAsync(TransactionParameterBuffer tpb, CancellationToken cancellationToken = default) { if (State != TransactionState.NoTransaction) { throw new InvalidOperationException(); } try { await _database.Xdr.WriteAsync(IscCodes.op_transaction, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_database.Handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(tpb.ToArray(), cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); _database.TransactionCount++; _handle = response.ObjectHandle; State = TransactionState.Active; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void Commit() { EnsureActiveTransactionState(); try { _database.Xdr.Write(IscCodes.op_commit); _database.Xdr.Write(_handle); _database.Xdr.Flush(); _database.ReadResponse(); _database.TransactionCount--; OnUpdate(EventArgs.Empty); State = TransactionState.NoTransaction; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask CommitAsync(CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); try { await _database.Xdr.WriteAsync(IscCodes.op_commit, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); _database.TransactionCount--; OnUpdate(EventArgs.Empty); State = TransactionState.NoTransaction; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void Rollback() { EnsureActiveTransactionState(); try { _database.Xdr.Write(IscCodes.op_rollback); _database.Xdr.Write(_handle); _database.Xdr.Flush(); _database.ReadResponse(); _database.TransactionCount--; OnUpdate(EventArgs.Empty); State = TransactionState.NoTransaction; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask RollbackAsync(CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); try { await _database.Xdr.WriteAsync(IscCodes.op_rollback, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); _database.TransactionCount--; OnUpdate(EventArgs.Empty); State = TransactionState.NoTransaction; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void CommitRetaining() { EnsureActiveTransactionState(); try { _database.Xdr.Write(IscCodes.op_commit_retaining); _database.Xdr.Write(_handle); _database.Xdr.Flush(); _database.ReadResponse(); State = TransactionState.Active; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask CommitRetainingAsync(CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); try { await _database.Xdr.WriteAsync(IscCodes.op_commit_retaining, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); State = TransactionState.Active; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void RollbackRetaining() { EnsureActiveTransactionState(); try { _database.Xdr.Write(IscCodes.op_rollback_retaining); _database.Xdr.Write(_handle); _database.Xdr.Flush(); _database.ReadResponse(); State = TransactionState.Active; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask RollbackRetainingAsync(CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); try { await _database.Xdr.WriteAsync(IscCodes.op_rollback_retaining, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); State = TransactionState.Active; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void Prepare() { EnsureActiveTransactionState(); try { State = TransactionState.NoTransaction; _database.Xdr.Write(IscCodes.op_prepare); _database.Xdr.Write(_handle); _database.Xdr.Flush(); _database.ReadResponse(); State = TransactionState.Prepared; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask PrepareAsync(CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); try { State = TransactionState.NoTransaction; await _database.Xdr.WriteAsync(IscCodes.op_prepare, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); State = TransactionState.Prepared; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override void Prepare(byte[] buffer) { EnsureActiveTransactionState(); try { State = TransactionState.NoTransaction; _database.Xdr.Write(IscCodes.op_prepare2); _database.Xdr.Write(_handle); _database.Xdr.WriteBuffer(buffer, buffer.Length); _database.Xdr.Flush(); _database.ReadResponse(); State = TransactionState.Prepared; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask PrepareAsync(byte[] buffer, CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); try { State = TransactionState.NoTransaction; await _database.Xdr.WriteAsync(IscCodes.op_prepare2, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(buffer, buffer.Length, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); State = TransactionState.Prepared; } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override List GetTransactionInfo(byte[] items) { return GetTransactionInfo(items, IscCodes.DEFAULT_MAX_BUFFER_SIZE); } public override ValueTask> GetTransactionInfoAsync(byte[] items, CancellationToken cancellationToken = default) { return GetTransactionInfoAsync(items, IscCodes.DEFAULT_MAX_BUFFER_SIZE, cancellationToken); } public override List GetTransactionInfo(byte[] items, int bufferLength) { var buffer = new byte[bufferLength]; DatabaseInfo(items, buffer, buffer.Length); return IscHelper.ParseTransactionInfo(buffer, _database.Charset); } public override async ValueTask> GetTransactionInfoAsync(byte[] items, int bufferLength, CancellationToken cancellationToken = default) { var buffer = new byte[bufferLength]; await DatabaseInfoAsync(items, buffer, buffer.Length, cancellationToken).ConfigureAwait(false); return IscHelper.ParseTransactionInfo(buffer, _database.Charset); } #endregion #region Private Methods private void DatabaseInfo(byte[] items, byte[] buffer, int bufferLength) { try { _database.Xdr.Write(IscCodes.op_info_transaction); _database.Xdr.Write(_handle); _database.Xdr.Write(GdsDatabase.Incarnation); _database.Xdr.WriteBuffer(items, items.Length); _database.Xdr.Write(bufferLength); _database.Xdr.Flush(); var response = (GenericResponse)_database.ReadResponse(); var responseLength = bufferLength; if (response.Data.Length < bufferLength) { responseLength = response.Data.Length; } Buffer.BlockCopy(response.Data, 0, buffer, 0, responseLength); } catch (IOException ex) { throw IscException.ForIOException(ex); } } private async ValueTask DatabaseInfoAsync(byte[] items, byte[] buffer, int bufferLength, CancellationToken cancellationToken = default) { try { await _database.Xdr.WriteAsync(IscCodes.op_info_transaction, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(_handle, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(GdsDatabase.Incarnation, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteBufferAsync(items, items.Length, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(bufferLength, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); var responseLength = bufferLength; if (response.Data.Length < bufferLength) { responseLength = response.Data.Length; } Buffer.BlockCopy(response.Data, 0, buffer, 0, responseLength); } catch (IOException ex) { throw IscException.ForIOException(ex); } } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version11/AuthResponse.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Vladimir Bodecek, Jiri Cincura (jiri@cincura.net) using System; using FirebirdSql.Data.Client.Managed; namespace FirebirdSql.Data.Client.Managed.Version11; internal class AuthResponse : IResponse { public byte[] Data { get; } public AuthResponse(byte[] data) { Data = data; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version11/GdsDatabase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net), Vladimir Bodecek using System; using System.Collections.Generic; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Client.Managed.Sspi; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version11; internal class GdsDatabase : Version10.GdsDatabase { private readonly Queue<(Action, Func)> _deferredPackets; public GdsDatabase(GdsConnection connection) : base(connection) { _deferredPackets = new Queue<(Action, Func)>(); } public override StatementBase CreateStatement() { return new GdsStatement(this); } public override StatementBase CreateStatement(TransactionBase transaction) { return new GdsStatement(this, (Version10.GdsTransaction)transaction); } public override void AttachWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { try { using (var sspiHelper = new SspiHelper()) { var authData = sspiHelper.InitializeClientSecurity(); SendTrustedAuthToBuffer(dpb, authData); SendAttachToBuffer(dpb, database); Xdr.Flush(); var response = ReadResponse(); response = ProcessTrustedAuthResponse(sspiHelper, response); ProcessAttachResponse((GenericResponse)response); } } catch (IscException) { SafelyDetach(); throw; } catch (IOException ex) { SafelyDetach(); throw IscException.ForIOException(ex); } AfterAttachActions(); } public override async ValueTask AttachWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { try { using (var sspiHelper = new SspiHelper()) { var authData = sspiHelper.InitializeClientSecurity(); await SendTrustedAuthToBufferAsync(dpb, authData, cancellationToken).ConfigureAwait(false); await SendAttachToBufferAsync(dpb, database, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = await ReadResponseAsync(cancellationToken).ConfigureAwait(false); response = await ProcessTrustedAuthResponseAsync(sspiHelper, response, cancellationToken).ConfigureAwait(false); await ProcessAttachResponseAsync((GenericResponse)response, cancellationToken).ConfigureAwait(false); } } catch (IscException) { await SafelyDetachAsync(cancellationToken).ConfigureAwait(false); throw; } catch (IOException ex) { await SafelyDetachAsync(cancellationToken).ConfigureAwait(false); throw IscException.ForIOException(ex); } await AfterAttachActionsAsync(cancellationToken).ConfigureAwait(false); } protected virtual void SendTrustedAuthToBuffer(DatabaseParameterBufferBase dpb, byte[] authData) { dpb.Append(IscCodes.isc_dpb_trusted_auth, authData); } protected virtual ValueTask SendTrustedAuthToBufferAsync(DatabaseParameterBufferBase dpb, byte[] authData, CancellationToken cancellationToken = default) { dpb.Append(IscCodes.isc_dpb_trusted_auth, authData); return ValueTask.CompletedTask; } protected IResponse ProcessTrustedAuthResponse(SspiHelper sspiHelper, IResponse response) { while (response is AuthResponse authResponse) { var authData = sspiHelper.GetClientSecurity(authResponse.Data); Xdr.Write(IscCodes.op_trusted_auth); Xdr.WriteBuffer(authData); Xdr.Flush(); response = ReadResponse(); } return response; } protected async ValueTask ProcessTrustedAuthResponseAsync(SspiHelper sspiHelper, IResponse response, CancellationToken cancellationToken = default) { while (response is AuthResponse authResponse) { var authData = sspiHelper.GetClientSecurity(authResponse.Data); await Xdr.WriteAsync(IscCodes.op_trusted_auth, cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(authData, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); response = await ReadResponseAsync(cancellationToken).ConfigureAwait(false); } return response; } public override void CreateDatabaseWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { using (var sspiHelper = new SspiHelper()) { var authData = sspiHelper.InitializeClientSecurity(); SendTrustedAuthToBuffer(dpb, authData); SendCreateToBuffer(dpb, database); Xdr.Flush(); var response = ReadResponse(); response = ProcessTrustedAuthResponse(sspiHelper, response); ProcessCreateResponse((GenericResponse)response); } } public override async ValueTask CreateDatabaseWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { using (var sspiHelper = new SspiHelper()) { var authData = sspiHelper.InitializeClientSecurity(); await SendTrustedAuthToBufferAsync(dpb, authData, cancellationToken).ConfigureAwait(false); await SendCreateToBufferAsync(dpb, database, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = await ReadResponseAsync(cancellationToken).ConfigureAwait(false); response = await ProcessTrustedAuthResponseAsync(sspiHelper, response, cancellationToken).ConfigureAwait(false); await ProcessCreateResponseAsync((GenericResponse)response, cancellationToken).ConfigureAwait(false); } } public override void ReleaseObject(int op, int id) { try { SendReleaseObjectToBuffer(op, id); AppendDeferredPacket(ProcessReleaseObjectResponse); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask ReleaseObjectAsync(int op, int id, CancellationToken cancellationToken = default) { try { await SendReleaseObjectToBufferAsync(op, id, cancellationToken).ConfigureAwait(false); AppendDeferredPacket(ProcessReleaseObjectResponseAsync); } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask ReadOperationAsync(CancellationToken cancellationToken = default) { await ProcessDeferredPacketsAsync(cancellationToken).ConfigureAwait(false); return await base.ReadOperationAsync(cancellationToken).ConfigureAwait(false); } public override int ReadOperation() { ProcessDeferredPackets(); return base.ReadOperation(); } public void AppendDeferredPacket(Action packet) { _deferredPackets.Enqueue((packet, null)); } public void AppendDeferredPacket(Func packet) { _deferredPackets.Enqueue((null, packet)); } private void ProcessDeferredPackets() { if (_deferredPackets.Count > 0) { // copy it to local collection and clear to not get same processing when the method is hit again from ReadSingleResponse var methods = _deferredPackets.ToArray(); _deferredPackets.Clear(); foreach (var (method, methodAsync) in methods) { var response = ReadSingleResponse(); if (method != null) { method(response); continue; } if (methodAsync != null) { methodAsync(response, CancellationToken.None).GetAwaiter().GetResult(); } } } } private async ValueTask ProcessDeferredPacketsAsync(CancellationToken cancellationToken = default) { if (_deferredPackets.Count > 0) { // copy it to local collection and clear to not get same processing when the method is hit again from ReadSingleResponse var methods = _deferredPackets.ToArray(); _deferredPackets.Clear(); foreach (var (method, methodAsync) in methods) { var response = await ReadSingleResponseAsync(cancellationToken).ConfigureAwait(false); if (method != null) { method(response); continue; } if (methodAsync != null) { await methodAsync(response, cancellationToken).ConfigureAwait(false); } } } } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version11/GdsServiceManager.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version11; internal class GdsServiceManager : Version10.GdsServiceManager { public GdsServiceManager(GdsConnection connection) : base(connection) { } protected override Version10.GdsDatabase CreateDatabase(GdsConnection connection) { return new GdsDatabase(connection); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version11/GdsStatement.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Client.Managed.Version10; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version11; internal class GdsStatement : Version10.GdsStatement { #region Constructors public GdsStatement(GdsDatabase database) : base(database) { } public GdsStatement(GdsDatabase database, Version10.GdsTransaction transaction) : base(database, transaction) { } #endregion #region Overriden Methods public override void Prepare(string commandText) { ClearAll(); try { var numberOfResponses = 0; if (State == StatementState.Deallocated) { SendAllocateToBuffer(); numberOfResponses++; } SendPrepareToBuffer(commandText); numberOfResponses++; SendInfoSqlToBuffer(StatementTypeInfoItems, IscCodes.STATEMENT_TYPE_BUFFER_SIZE); numberOfResponses++; _database.Xdr.Flush(); try { GenericResponse allocateResponse = null; if (State == StatementState.Deallocated) { numberOfResponses--; allocateResponse = (GenericResponse)_database.ReadResponse(); } numberOfResponses--; var prepareResponse = (GenericResponse)_database.ReadResponse(); var deferredExecute = ((prepareResponse.ObjectHandle & IscCodes.STMT_DEFER_EXECUTE) == IscCodes.STMT_DEFER_EXECUTE); numberOfResponses--; var statementTypeResponse = (GenericResponse)_database.ReadResponse(); if (allocateResponse != null) { ProcessAllocateResponse(allocateResponse); } ProcessPrepareResponse(prepareResponse); StatementType = ProcessStatementTypeInfoBuffer(ProcessInfoSqlResponse(statementTypeResponse)); } finally { (Database as GdsDatabase).SafeFinishFetching(numberOfResponses); } State = StatementState.Prepared; } catch (IOException ex) { State = State == StatementState.Allocated ? StatementState.Error : State; throw IscException.ForIOException(ex); } } public override async ValueTask PrepareAsync(string commandText, CancellationToken cancellationToken = default) { ClearAll(); try { var numberOfResponses = 0; if (State == StatementState.Deallocated) { await SendAllocateToBufferAsync(cancellationToken).ConfigureAwait(false); numberOfResponses++; } await SendPrepareToBufferAsync(commandText, cancellationToken).ConfigureAwait(false); numberOfResponses++; await SendInfoSqlToBufferAsync(StatementTypeInfoItems, IscCodes.STATEMENT_TYPE_BUFFER_SIZE, cancellationToken).ConfigureAwait(false); numberOfResponses++; await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); try { GenericResponse allocateResponse = null; if (State == StatementState.Deallocated) { numberOfResponses--; allocateResponse = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); } numberOfResponses--; var prepareResponse = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); var deferredExecute = ((prepareResponse.ObjectHandle & IscCodes.STMT_DEFER_EXECUTE) == IscCodes.STMT_DEFER_EXECUTE); numberOfResponses--; var statementTypeResponse = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); if (allocateResponse != null) { await ProcessAllocateResponseAsync(allocateResponse, cancellationToken).ConfigureAwait(false); } await ProcessPrepareResponseAsync(prepareResponse, cancellationToken).ConfigureAwait(false); StatementType = ProcessStatementTypeInfoBuffer(await ProcessInfoSqlResponseAsync(statementTypeResponse, cancellationToken).ConfigureAwait(false)); } finally { await (Database as GdsDatabase).SafeFinishFetchingAsync(numberOfResponses, cancellationToken).ConfigureAwait(false); } State = StatementState.Prepared; } catch (IOException ex) { State = State == StatementState.Allocated ? StatementState.Error : State; throw IscException.ForIOException(ex); } } public override void Execute(int timeout, IDescriptorFiller descriptorFiller) { EnsureNotDeallocated(); Clear(); try { RecordsAffected = -1; SendExecuteToBuffer(timeout, descriptorFiller); var readRowsAffectedResponse = false; if (DoRecordsAffected) { SendInfoSqlToBuffer(RowsAffectedInfoItems, IscCodes.ROWS_AFFECTED_BUFFER_SIZE); readRowsAffectedResponse = true; } _database.Xdr.Flush(); var numberOfResponses = (StatementType == DbStatementType.StoredProcedure ? 1 : 0) + 1 + (readRowsAffectedResponse ? 1 : 0); try { SqlResponse sqlStoredProcedureResponse = null; if (StatementType == DbStatementType.StoredProcedure) { numberOfResponses--; sqlStoredProcedureResponse = (SqlResponse)_database.ReadResponse(); ProcessStoredProcedureExecuteResponse(sqlStoredProcedureResponse); } numberOfResponses--; var executeResponse = (GenericResponse)_database.ReadResponse(); GenericResponse rowsAffectedResponse = null; if (readRowsAffectedResponse) { numberOfResponses--; rowsAffectedResponse = (GenericResponse)_database.ReadResponse(); } ProcessExecuteResponse(executeResponse); if (readRowsAffectedResponse) { RecordsAffected = ProcessRecordsAffectedBuffer(ProcessInfoSqlResponse(rowsAffectedResponse)); } } finally { (Database as GdsDatabase).SafeFinishFetching(numberOfResponses); } State = StatementState.Executed; } catch (IOException ex) { State = StatementState.Error; throw IscException.ForIOException(ex); } } public override async ValueTask ExecuteAsync(int timeout, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default) { EnsureNotDeallocated(); Clear(); try { RecordsAffected = -1; await SendExecuteToBufferAsync(timeout, descriptorFiller, cancellationToken).ConfigureAwait(false); var readRowsAffectedResponse = false; if (DoRecordsAffected) { await SendInfoSqlToBufferAsync(RowsAffectedInfoItems, IscCodes.ROWS_AFFECTED_BUFFER_SIZE, cancellationToken).ConfigureAwait(false); readRowsAffectedResponse = true; } await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var numberOfResponses = (StatementType == DbStatementType.StoredProcedure ? 1 : 0) + 1 + (readRowsAffectedResponse ? 1 : 0); try { SqlResponse sqlStoredProcedureResponse = null; if (StatementType == DbStatementType.StoredProcedure) { numberOfResponses--; sqlStoredProcedureResponse = (SqlResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); await ProcessStoredProcedureExecuteResponseAsync(sqlStoredProcedureResponse, cancellationToken).ConfigureAwait(false); } numberOfResponses--; var executeResponse = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); GenericResponse rowsAffectedResponse = null; if (readRowsAffectedResponse) { numberOfResponses--; rowsAffectedResponse = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); } await ProcessExecuteResponseAsync(executeResponse, cancellationToken).ConfigureAwait(false); if (readRowsAffectedResponse) { RecordsAffected = ProcessRecordsAffectedBuffer(await ProcessInfoSqlResponseAsync(rowsAffectedResponse, cancellationToken).ConfigureAwait(false)); } } finally { await (Database as GdsDatabase).SafeFinishFetchingAsync(numberOfResponses, cancellationToken).ConfigureAwait(false); } State = StatementState.Executed; } catch (IOException ex) { State = StatementState.Error; throw IscException.ForIOException(ex); } } #endregion #region Protected methods protected override void Free(int option) { if (FreeNotNeeded(option)) return; DoFreePacket(option); (Database as GdsDatabase).AppendDeferredPacket(ProcessFreeResponse); } protected override async ValueTask FreeAsync(int option, CancellationToken cancellationToken = default) { if (FreeNotNeeded(option)) return; await DoFreePacketAsync(option, cancellationToken).ConfigureAwait(false); (Database as GdsDatabase).AppendDeferredPacket(ProcessFreeResponseAsync); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version12/GdsDatabase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.IO; using System.Text; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version12; internal class GdsDatabase : Version11.GdsDatabase { public override bool UseUtf8ParameterBuffer => true; public GdsDatabase(GdsConnection connection) : base(connection) { } public override StatementBase CreateStatement() { return new GdsStatement(this); } public override StatementBase CreateStatement(TransactionBase transaction) { return new GdsStatement(this, (Version10.GdsTransaction)transaction); } public override void CancelOperation(short kind) { try { SendCancelOperationToBuffer(kind); Xdr.Flush(); // no response, this is out-of-band } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask CancelOperationAsync(short kind, CancellationToken cancellationToken = default) { try { await SendCancelOperationToBufferAsync(kind, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); // no response, this is out-of-band } catch (IOException ex) { throw IscException.ForIOException(ex); } } protected void SendCancelOperationToBuffer(short kind) { Xdr.Write(IscCodes.op_cancel); Xdr.Write(kind); } protected async ValueTask SendCancelOperationToBufferAsync(int kind, CancellationToken cancellationToken = default) { await Xdr.WriteAsync(IscCodes.op_cancel, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(kind, cancellationToken).ConfigureAwait(false); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version12/GdsServiceManager.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version12; internal class GdsServiceManager : Version11.GdsServiceManager { public GdsServiceManager(GdsConnection connection) : base(connection) { } protected override Version10.GdsDatabase CreateDatabase(GdsConnection connection) { return new GdsDatabase(connection); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version12/GdsStatement.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version12; internal class GdsStatement : Version11.GdsStatement { #region Constructors public GdsStatement(GdsDatabase database) : base(database) { } public GdsStatement(GdsDatabase database, Version10.GdsTransaction transaction) : base(database, transaction) { } #endregion #region Overriden Methods public override void Execute(int timeout, IDescriptorFiller descriptorFiller) { EnsureNotDeallocated(); Clear(); try { RecordsAffected = -1; SendExecuteToBuffer(timeout, descriptorFiller); _database.Xdr.Flush(); var numberOfResponses = (StatementType == DbStatementType.StoredProcedure ? 1 : 0) + 1; try { SqlResponse sqlStoredProcedureResponse = null; if (StatementType == DbStatementType.StoredProcedure) { numberOfResponses--; sqlStoredProcedureResponse = (SqlResponse)_database.ReadResponse(); ProcessStoredProcedureExecuteResponse(sqlStoredProcedureResponse); } numberOfResponses--; var executeResponse = (GenericResponse)_database.ReadResponse(); ProcessExecuteResponse(executeResponse); } finally { (Database as GdsDatabase).SafeFinishFetching(numberOfResponses); } // we need to split this in two, to allow server handle op_cancel properly if (DoRecordsAffected) { SendInfoSqlToBuffer(RowsAffectedInfoItems, IscCodes.ROWS_AFFECTED_BUFFER_SIZE); _database.Xdr.Flush(); numberOfResponses = 1; try { numberOfResponses--; var rowsAffectedResponse = (GenericResponse)_database.ReadResponse(); RecordsAffected = ProcessRecordsAffectedBuffer(ProcessInfoSqlResponse(rowsAffectedResponse)); } finally { (Database as GdsDatabase).SafeFinishFetching(numberOfResponses); } } State = StatementState.Executed; } catch (IOException ex) { State = StatementState.Error; throw IscException.ForIOException(ex); } } public override async ValueTask ExecuteAsync(int timeout, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default) { EnsureNotDeallocated(); Clear(); try { RecordsAffected = -1; await SendExecuteToBufferAsync(timeout, descriptorFiller, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var numberOfResponses = (StatementType == DbStatementType.StoredProcedure ? 1 : 0) + 1; try { SqlResponse sqlStoredProcedureResponse = null; if (StatementType == DbStatementType.StoredProcedure) { numberOfResponses--; sqlStoredProcedureResponse = (SqlResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); await ProcessStoredProcedureExecuteResponseAsync(sqlStoredProcedureResponse, cancellationToken).ConfigureAwait(false); } numberOfResponses--; var executeResponse = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); await ProcessExecuteResponseAsync(executeResponse, cancellationToken).ConfigureAwait(false); } finally { await (Database as GdsDatabase).SafeFinishFetchingAsync(numberOfResponses, cancellationToken).ConfigureAwait(false); } // we need to split this in two, to allow server handle op_cancel properly if (DoRecordsAffected) { await SendInfoSqlToBufferAsync(RowsAffectedInfoItems, IscCodes.ROWS_AFFECTED_BUFFER_SIZE, cancellationToken).ConfigureAwait(false); await _database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); numberOfResponses = 1; try { numberOfResponses--; var rowsAffectedResponse = (GenericResponse)await _database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); RecordsAffected = ProcessRecordsAffectedBuffer(await ProcessInfoSqlResponseAsync(rowsAffectedResponse, cancellationToken).ConfigureAwait(false)); } finally { await (Database as GdsDatabase).SafeFinishFetchingAsync(numberOfResponses, cancellationToken).ConfigureAwait(false); } } State = StatementState.Executed; } catch (IOException ex) { State = StatementState.Error; throw IscException.ForIOException(ex); } } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version13/ContAuthResponse.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; namespace FirebirdSql.Data.Client.Managed.Version13; internal class ContAuthResponse : IResponse { public byte[] ServerData { get; } public string AcceptPluginName { get; } public bool IsAuthenticated { get; } public byte[] ServerKeys { get; } public ContAuthResponse(byte[] serverData, string acceptPluginName, bool isAuthenticated, byte[] serverKeys) { ServerData = serverData; AcceptPluginName = acceptPluginName; IsAuthenticated = isAuthenticated; ServerKeys = serverKeys; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version13/CryptKeyCallbackResponse.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using FirebirdSql.Data.Client.Managed; namespace FirebirdSql.Data.Client.Managed.Version13; internal class CryptKeyCallbackResponse : IResponse { public byte[] Data { get; } public CryptKeyCallbackResponse(byte[] data) { Data = data; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version13/GdsDatabase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Hajime Nakagami, Jiri Cincura (jiri@cincura.net) using System.IO; using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version13; internal class GdsDatabase : Version12.GdsDatabase { public GdsDatabase(GdsConnection connection) : base(connection) { } public override void Attach(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { try { SendAttachToBuffer(dpb, database); Xdr.Flush(); var response = ReadResponse(); if (response is ContAuthResponse) { while (response is ContAuthResponse contAuthResponse) { AuthBlock.Start(contAuthResponse.ServerData, contAuthResponse.AcceptPluginName, contAuthResponse.IsAuthenticated, contAuthResponse.ServerKeys); AuthBlock.SendContAuthToBuffer(); Xdr.Flush(); response = AuthBlock.ProcessContAuthResponse(); response = ProcessCryptCallbackResponseIfNeeded(response, cryptKey); } var genericResponse = (GenericResponse)response; ProcessAttachResponse(genericResponse); if (genericResponse.Data.Any()) { AuthBlock.SendWireCryptToBuffer(); Xdr.Flush(); AuthBlock.ProcessWireCryptResponse(); } } else { response = ProcessCryptCallbackResponseIfNeeded(response, cryptKey); ProcessAttachResponse((GenericResponse)response); AuthBlock.Complete(); } AuthBlock.WireCryptValidate(IscCodes.PROTOCOL_VERSION13); } catch (IscException) { SafelyDetach(); throw; } catch (IOException ex) { SafelyDetach(); throw IscException.ForIOException(ex); } AfterAttachActions(); } public override async ValueTask AttachAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { try { await SendAttachToBufferAsync(dpb, database, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = await ReadResponseAsync(cancellationToken).ConfigureAwait(false); if (response is ContAuthResponse) { while (response is ContAuthResponse contAuthResponse) { AuthBlock.Start(contAuthResponse.ServerData, contAuthResponse.AcceptPluginName, contAuthResponse.IsAuthenticated, contAuthResponse.ServerKeys); await AuthBlock.SendContAuthToBufferAsync(cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); response = await AuthBlock.ProcessContAuthResponseAsync(cancellationToken).ConfigureAwait(false); response = await ProcessCryptCallbackResponseIfNeededAsync(response, cryptKey, cancellationToken).ConfigureAwait(false); } var genericResponse = (GenericResponse)response; await ProcessAttachResponseAsync(genericResponse, cancellationToken).ConfigureAwait(false); if (genericResponse.Data.Any()) { await AuthBlock.SendWireCryptToBufferAsync(cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await AuthBlock.ProcessWireCryptResponseAsync(cancellationToken).ConfigureAwait(false); } } else { response = await ProcessCryptCallbackResponseIfNeededAsync(response, cryptKey, cancellationToken).ConfigureAwait(false); await ProcessAttachResponseAsync((GenericResponse)response, cancellationToken).ConfigureAwait(false); AuthBlock.Complete(); } AuthBlock.WireCryptValidate(IscCodes.PROTOCOL_VERSION13); } catch (IscException) { await SafelyDetachAsync(cancellationToken).ConfigureAwait(false); throw; } catch (IOException ex) { await SafelyDetachAsync(cancellationToken).ConfigureAwait(false); throw IscException.ForIOException(ex); } await AfterAttachActionsAsync(cancellationToken).ConfigureAwait(false); } protected override void SendAttachToBuffer(DatabaseParameterBufferBase dpb, string database) { Xdr.Write(IscCodes.op_attach); Xdr.Write(DatabaseObjectId); if (!AuthBlock.HasClientData) { dpb.Append(IscCodes.isc_dpb_auth_plugin_name, AuthBlock.AcceptPluginName); dpb.Append(IscCodes.isc_dpb_specific_auth_data, AuthBlock.PublicClientData); } else { dpb.Append(IscCodes.isc_dpb_specific_auth_data, AuthBlock.ClientData); } Xdr.WriteBuffer(dpb.Encoding.GetBytes(database)); Xdr.WriteBuffer(dpb.ToArray()); } protected override async ValueTask SendAttachToBufferAsync(DatabaseParameterBufferBase dpb, string database, CancellationToken cancellationToken = default) { await Xdr.WriteAsync(IscCodes.op_attach, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(DatabaseObjectId, cancellationToken).ConfigureAwait(false); if (!AuthBlock.HasClientData) { dpb.Append(IscCodes.isc_dpb_auth_plugin_name, AuthBlock.AcceptPluginName); dpb.Append(IscCodes.isc_dpb_specific_auth_data, AuthBlock.PublicClientData); } else { dpb.Append(IscCodes.isc_dpb_specific_auth_data, AuthBlock.ClientData); } await Xdr.WriteBufferAsync(dpb.Encoding.GetBytes(database), cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(dpb.ToArray(), cancellationToken).ConfigureAwait(false); } public override void CreateDatabase(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { try { SendCreateToBuffer(dpb, database); Xdr.Flush(); var response = ReadResponse(); if (response is ContAuthResponse) { while (response is ContAuthResponse contAuthResponse) { AuthBlock.Start(contAuthResponse.ServerData, contAuthResponse.AcceptPluginName, contAuthResponse.IsAuthenticated, contAuthResponse.ServerKeys); AuthBlock.SendContAuthToBuffer(); Xdr.Flush(); response = AuthBlock.ProcessContAuthResponse(); response = ProcessCryptCallbackResponseIfNeeded(response, cryptKey); } var genericResponse = (GenericResponse)response; ProcessCreateResponse(genericResponse); if (genericResponse.Data.Any()) { AuthBlock.SendWireCryptToBuffer(); Xdr.Flush(); AuthBlock.ProcessWireCryptResponse(); } } else { response = ProcessCryptCallbackResponseIfNeeded(response, cryptKey); ProcessCreateResponse((GenericResponse)response); AuthBlock.Complete(); } } catch (IOException ex) { throw IscException.ForIOException(ex); } } public override async ValueTask CreateDatabaseAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { try { await SendCreateToBufferAsync(dpb, database, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = await ReadResponseAsync(cancellationToken).ConfigureAwait(false); if (response is ContAuthResponse) { while (response is ContAuthResponse contAuthResponse) { AuthBlock.Start(contAuthResponse.ServerData, contAuthResponse.AcceptPluginName, contAuthResponse.IsAuthenticated, contAuthResponse.ServerKeys); await AuthBlock.SendContAuthToBufferAsync(cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); response = await AuthBlock.ProcessContAuthResponseAsync(cancellationToken).ConfigureAwait(false); response = await ProcessCryptCallbackResponseIfNeededAsync(response, cryptKey, cancellationToken).ConfigureAwait(false); } var genericResponse = (GenericResponse)response; await ProcessCreateResponseAsync(genericResponse, cancellationToken).ConfigureAwait(false); if (genericResponse.Data.Any()) { await AuthBlock.SendWireCryptToBufferAsync(cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await AuthBlock.ProcessWireCryptResponseAsync(cancellationToken).ConfigureAwait(false); } } else { response = await ProcessCryptCallbackResponseIfNeededAsync(response, cryptKey, cancellationToken).ConfigureAwait(false); await ProcessCreateResponseAsync((GenericResponse)response, cancellationToken).ConfigureAwait(false); AuthBlock.Complete(); } } catch (IOException ex) { throw IscException.ForIOException(ex); } } protected override void SendCreateToBuffer(DatabaseParameterBufferBase dpb, string database) { Xdr.Write(IscCodes.op_create); Xdr.Write(DatabaseObjectId); if (!AuthBlock.HasClientData) { dpb.Append(IscCodes.isc_dpb_auth_plugin_name, AuthBlock.AcceptPluginName); dpb.Append(IscCodes.isc_dpb_specific_auth_data, AuthBlock.PublicClientData); } else { dpb.Append(IscCodes.isc_dpb_specific_auth_data, AuthBlock.ClientData); } Xdr.WriteBuffer(dpb.Encoding.GetBytes(database)); Xdr.WriteBuffer(dpb.ToArray()); } protected override async ValueTask SendCreateToBufferAsync(DatabaseParameterBufferBase dpb, string database, CancellationToken cancellationToken = default) { await Xdr.WriteAsync(IscCodes.op_create, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(DatabaseObjectId, cancellationToken).ConfigureAwait(false); if (!AuthBlock.HasClientData) { dpb.Append(IscCodes.isc_dpb_auth_plugin_name, AuthBlock.AcceptPluginName); dpb.Append(IscCodes.isc_dpb_specific_auth_data, AuthBlock.PublicClientData); } else { dpb.Append(IscCodes.isc_dpb_specific_auth_data, AuthBlock.ClientData); } await Xdr.WriteBufferAsync(dpb.Encoding.GetBytes(database), cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(dpb.ToArray(), cancellationToken).ConfigureAwait(false); } public override void AttachWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { Attach(dpb, database, cryptKey); } public override ValueTask AttachWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { return AttachAsync(dpb, database, cryptKey, cancellationToken); } public override void CreateDatabaseWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { CreateDatabase(dpb, database, cryptKey); } public override ValueTask CreateDatabaseWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { return CreateDatabaseAsync(dpb, database, cryptKey, cancellationToken); } protected internal virtual IResponse ProcessCryptCallbackResponseIfNeeded(IResponse response, byte[] cryptKey) { while (response is CryptKeyCallbackResponse) { Xdr.Write(IscCodes.op_crypt_key_callback); Xdr.WriteBuffer(cryptKey); Xdr.Flush(); response = ReadResponse(); } return response; } protected internal virtual async ValueTask ProcessCryptCallbackResponseIfNeededAsync(IResponse response, byte[] cryptKey, CancellationToken cancellationToken = default) { while (response is CryptKeyCallbackResponse) { await Xdr.WriteAsync(IscCodes.op_crypt_key_callback, cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(cryptKey, cancellationToken).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); response = await ReadResponseAsync(cancellationToken).ConfigureAwait(false); } return response; } public override StatementBase CreateStatement() { return new GdsStatement(this); } public override StatementBase CreateStatement(TransactionBase transaction) { return new GdsStatement(this, (Version10.GdsTransaction)transaction); } public override DatabaseParameterBufferBase CreateDatabaseParameterBuffer() { return new DatabaseParameterBuffer2(ParameterBufferEncoding); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version13/GdsServiceManager.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.IO; using System.Linq; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version13; internal class GdsServiceManager : Version12.GdsServiceManager { public GdsServiceManager(GdsConnection connection) : base(connection) { } public override bool UseUtf8ParameterBuffer => true; public override void Attach(ServiceParameterBufferBase spb, string dataSource, int port, string service, byte[] cryptKey) { try { SendAttachToBuffer(spb, service); Database.Xdr.Flush(); var response = Database.ReadResponse(); if (response is ContAuthResponse) { while (response is ContAuthResponse contAuthResponse) { Connection.AuthBlock.Start(contAuthResponse.ServerData, contAuthResponse.AcceptPluginName, contAuthResponse.IsAuthenticated, contAuthResponse.ServerKeys); Connection.AuthBlock.SendContAuthToBuffer(); Database.Xdr.Flush(); response = Connection.AuthBlock.ProcessContAuthResponse(); response = (Database as GdsDatabase).ProcessCryptCallbackResponseIfNeeded(response, cryptKey); } var genericResponse = (GenericResponse)response; base.ProcessAttachResponse(genericResponse); if (genericResponse.Data.Any()) { Database.AuthBlock.SendWireCryptToBuffer(); Database.Xdr.Flush(); Database.AuthBlock.ProcessWireCryptResponse(); } } else { response = (Database as GdsDatabase).ProcessCryptCallbackResponseIfNeeded(response, cryptKey); ProcessAttachResponse((GenericResponse)response); Database.AuthBlock.Complete(); } } catch (IscException) { Database.SafelyDetach(); throw; } catch (IOException ex) { Database.SafelyDetach(); throw IscException.ForIOException(ex); } } public override async ValueTask AttachAsync(ServiceParameterBufferBase spb, string dataSource, int port, string service, byte[] cryptKey, CancellationToken cancellationToken = default) { try { await SendAttachToBufferAsync(spb, service, cancellationToken).ConfigureAwait(false); await Database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var response = await Database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); if (response is ContAuthResponse) { while (response is ContAuthResponse contAuthResponse) { Connection.AuthBlock.Start(contAuthResponse.ServerData, contAuthResponse.AcceptPluginName, contAuthResponse.IsAuthenticated, contAuthResponse.ServerKeys); await Connection.AuthBlock.SendContAuthToBufferAsync(cancellationToken).ConfigureAwait(false); await Database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); response = await Connection.AuthBlock.ProcessContAuthResponseAsync(cancellationToken).ConfigureAwait(false); response = await (Database as GdsDatabase).ProcessCryptCallbackResponseIfNeededAsync(response, cryptKey, cancellationToken).ConfigureAwait(false); } var genericResponse = (GenericResponse)response; await base.ProcessAttachResponseAsync(genericResponse, cancellationToken).ConfigureAwait(false); if (genericResponse.Data.Any()) { await Database.AuthBlock.SendWireCryptToBufferAsync(cancellationToken).ConfigureAwait(false); await Database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); await Database.AuthBlock.ProcessWireCryptResponseAsync(cancellationToken).ConfigureAwait(false); } } else { response = await (Database as GdsDatabase).ProcessCryptCallbackResponseIfNeededAsync(response, cryptKey, cancellationToken).ConfigureAwait(false); await ProcessAttachResponseAsync((GenericResponse)response, cancellationToken).ConfigureAwait(false); Database.AuthBlock.Complete(); } } catch (IscException) { await Database.SafelyDetachAsync(cancellationToken).ConfigureAwait(false); throw; } catch (IOException ex) { await Database.SafelyDetachAsync(cancellationToken).ConfigureAwait(false); throw IscException.ForIOException(ex); } } public override ServiceParameterBufferBase CreateServiceParameterBuffer() { return new ServiceParameterBuffer3(Database.ParameterBufferEncoding); } protected override Version10.GdsDatabase CreateDatabase(GdsConnection connection) { return new GdsDatabase(connection); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version13/GdsStatement.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version13; internal class GdsStatement : Version12.GdsStatement { #region Constructors public GdsStatement(GdsDatabase database) : base(database) { } public GdsStatement(GdsDatabase database, Version10.GdsTransaction transaction) : base(database, transaction) { } #endregion #region Overriden Methods protected override byte[] WriteParameters() { if (_parameters == null) return null; using (var ms = new MemoryStream(256)) { try { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms), _database.Charset); var bits = new BitArray(_parameters.Count); for (var i = 0; i < _parameters.Count; i++) { var field = _parameters[i]; bits.Set(i, field.DbValue.IsDBNull()); } var buffer = new byte[(int)Math.Ceiling(_parameters.Count / 8d)]; for (var i = 0; i < buffer.Length * 8; i++) { var index = i / 8; // LSB buffer[index] = (byte)((buffer[index] >> 1) | (bits.Length > i && bits[i] ? 1 << 7 : 0)); } xdr.WriteOpaque(buffer); for (var i = 0; i < _parameters.Count; i++) { var field = _parameters[i]; if (field.DbValue.IsDBNull()) { continue; } WriteRawParameter(xdr, field); } xdr.Flush(); return ms.ToArray(); } catch (IOException ex) { throw IscException.ForIOException(ex); } } } protected override async ValueTask WriteParametersAsync(CancellationToken cancellationToken = default) { if (_parameters == null) return null; using (var ms = new MemoryStream(256)) { try { var xdr = new XdrReaderWriter(new DataProviderStreamWrapper(ms), _database.Charset); var bits = new BitArray(_parameters.Count); for (var i = 0; i < _parameters.Count; i++) { var field = _parameters[i]; bits.Set(i, field.DbValue.IsDBNull()); } var buffer = new byte[(int)Math.Ceiling(_parameters.Count / 8d)]; for (var i = 0; i < buffer.Length * 8; i++) { var index = i / 8; // LSB buffer[index] = (byte)((buffer[index] >> 1) | (bits.Length > i && bits[i] ? 1 << 7 : 0)); } await xdr.WriteOpaqueAsync(buffer, cancellationToken).ConfigureAwait(false); for (var i = 0; i < _parameters.Count; i++) { var field = _parameters[i]; if (field.DbValue.IsDBNull()) { continue; } await WriteRawParameterAsync(xdr, field, cancellationToken).ConfigureAwait(false); } await xdr.FlushAsync(cancellationToken).ConfigureAwait(false); return ms.ToArray(); } catch (IOException ex) { throw IscException.ForIOException(ex); } } } protected override DbValue[] ReadRow() { var row = new DbValue[_fields.Count]; try { if (_fields.Count > 0) { var nullBytes = _database.Xdr.ReadOpaque((int)Math.Ceiling(_fields.Count / 8d)); var nullBits = new BitArray(nullBytes); for (var i = 0; i < _fields.Count; i++) { if (nullBits.Get(i)) { row[i] = new DbValue(this, _fields[i], null); } else { var value = ReadRawValue(_database.Xdr, _fields[i]); row[i] = new DbValue(this, _fields[i], value); } } } } catch (IOException ex) { throw IscException.ForIOException(ex); } return row; } protected override async ValueTask ReadRowAsync(CancellationToken cancellationToken = default) { var row = new DbValue[_fields.Count]; try { if (_fields.Count > 0) { var nullBytes = await _database.Xdr.ReadOpaqueAsync((int)Math.Ceiling(_fields.Count / 8d), cancellationToken).ConfigureAwait(false); var nullBits = new BitArray(nullBytes); for (var i = 0; i < _fields.Count; i++) { if (nullBits.Get(i)) { row[i] = new DbValue(this, _fields[i], null); } else { var value = await ReadRawValueAsync(_database.Xdr, _fields[i], cancellationToken).ConfigureAwait(false); row[i] = new DbValue(this, _fields[i], value); } } } } catch (IOException ex) { throw IscException.ForIOException(ex); } return row; } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version13/WireCryptOption.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) namespace FirebirdSql.Data.Client.Managed.Version13; internal enum WireCryptOption { Disabled, Enabled, Required, } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version15/CryptKeyCallbackResponse.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using FirebirdSql.Data.Client.Managed; namespace FirebirdSql.Data.Client.Managed.Version15; internal class CryptKeyCallbackResponse : IResponse { public byte[] Data { get; } public int Size { get; } public CryptKeyCallbackResponse(byte[] data, int size) { Data = data; Size = size; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version15/GdsDatabase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.IO; using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version15; internal class GdsDatabase : Version13.GdsDatabase { public GdsDatabase(GdsConnection connection) : base(connection) { } protected internal override IResponse ProcessCryptCallbackResponseIfNeeded(IResponse response, byte[] cryptKey) { while (response is CryptKeyCallbackResponse cryptKeyCallbackResponse) { Xdr.Write(IscCodes.op_crypt_key_callback); Xdr.WriteBuffer(cryptKey); Xdr.Write(cryptKeyCallbackResponse.Size); Xdr.Flush(); response = ReadResponse(); } return response; } protected internal override async ValueTask ProcessCryptCallbackResponseIfNeededAsync(IResponse response, byte[] cryptKey, CancellationToken cancellationToken = default) { while (response is CryptKeyCallbackResponse cryptKeyCallbackResponse) { await Xdr.WriteAsync(IscCodes.op_crypt_key_callback, cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(cryptKey, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(cryptKeyCallbackResponse.Size).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); response = await ReadResponseAsync(cancellationToken).ConfigureAwait(false); } return response; } public override StatementBase CreateStatement() { return new GdsStatement(this); } public override StatementBase CreateStatement(TransactionBase transaction) { return new GdsStatement(this, (Version10.GdsTransaction)transaction); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version15/GdsServiceManager.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.IO; using System.Linq; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version15; internal class GdsServiceManager : Version13.GdsServiceManager { public GdsServiceManager(GdsConnection connection) : base(connection) { } protected override Version10.GdsDatabase CreateDatabase(GdsConnection connection) { return new GdsDatabase(connection); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version15/GdsStatement.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version15; internal class GdsStatement : Version13.GdsStatement { public GdsStatement(GdsDatabase database) : base(database) { } public GdsStatement(GdsDatabase database, Version10.GdsTransaction transaction) : base(database, transaction) { } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version16/BatchCompletionStateResponse.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version16; internal class BatchCompletionStateResponse : IResponse { public short StatementHandle { get; } public int ProcessedMessages { get; } public int[] UpdatedRecordsPerMessage { get; } public (int, IscException)[] DetailedErrors { get; } public int[] AdditionalErrorsPerMessage { get; } public BatchCompletionStateResponse(short statementHandle, int processedMessages, int[] updatedRecordsPerMessage, (int, IscException)[] detailedErrors, int[] errorsPerMessage) { StatementHandle = statementHandle; ProcessedMessages = processedMessages; UpdatedRecordsPerMessage = updatedRecordsPerMessage; DetailedErrors = detailedErrors; AdditionalErrorsPerMessage = errorsPerMessage; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version16/GdsBatch.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Linq; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version16; internal class GdsBatch : BatchBase { protected GdsStatement _statement; public override StatementBase Statement => _statement; public GdsDatabase Database => (GdsDatabase)_statement.Database; public GdsBatch(GdsStatement statement) { _statement = statement; } public override ExecuteResultItem[] Execute(int count, IDescriptorFiller descriptorFiller) { // this may throw error, so it needs to be before any writing var parametersData = GetParametersData(count, descriptorFiller); Database.Xdr.Write(IscCodes.op_batch_create); Database.Xdr.Write(_statement.Handle); // p_batch_statement var blr = _statement.Parameters.ToBlr(); Database.Xdr.WriteBuffer(blr.Data); // p_batch_blr Database.Xdr.Write(blr.Length); // p_batch_msglen var pb = _statement.CreateBatchParameterBuffer(); if (_statement.ReturnRecordsAffected) { pb.Append(IscCodes.Batch.TAG_RECORD_COUNTS, 1); } if (MultiError) { pb.Append(IscCodes.Batch.TAG_MULTIERROR, 1); } pb.Append(IscCodes.Batch.TAG_BUFFER_BYTES_SIZE, BatchBufferSize); Database.Xdr.WriteBuffer(pb.ToArray()); // p_batch_pb Database.Xdr.Write(IscCodes.op_batch_msg); Database.Xdr.Write(_statement.Handle); // p_batch_statement Database.Xdr.Write(parametersData.Length); // p_batch_messages foreach (var item in parametersData) { Database.Xdr.WriteOpaque(item, item.Length); // p_batch_data } Database.Xdr.Write(IscCodes.op_batch_exec); Database.Xdr.Write(_statement.Handle); // p_batch_statement Database.Xdr.Write(_statement.Transaction.Handle); // p_batch_transaction; Database.Xdr.Flush(); var numberOfResponses = 3; try { numberOfResponses--; var batchCreateResponse = Database.ReadResponse(); numberOfResponses--; var batchMsgResponse = Database.ReadResponse(); numberOfResponses--; var batchExecResponse = (BatchCompletionStateResponse)Database.ReadResponse(); return BuildResult(batchExecResponse); } finally { Database.SafeFinishFetching(numberOfResponses); } } public override async ValueTask ExecuteAsync(int count, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default) { // this may throw error, so it needs to be before any writing var parametersData = await GetParametersDataAsync(count, descriptorFiller, cancellationToken).ConfigureAwait(false); await Database.Xdr.WriteAsync(IscCodes.op_batch_create, cancellationToken).ConfigureAwait(false); await Database.Xdr.WriteAsync(_statement.Handle, cancellationToken).ConfigureAwait(false); // p_batch_statement var blr = _statement.Parameters.ToBlr(); await Database.Xdr.WriteBufferAsync(blr.Data, cancellationToken).ConfigureAwait(false); // p_batch_blr await Database.Xdr.WriteAsync(blr.Length, cancellationToken).ConfigureAwait(false); // p_batch_msglen var pb = _statement.CreateBatchParameterBuffer(); if (_statement.ReturnRecordsAffected) { pb.Append(IscCodes.Batch.TAG_RECORD_COUNTS, 1); } if (MultiError) { pb.Append(IscCodes.Batch.TAG_MULTIERROR, 1); } pb.Append(IscCodes.Batch.TAG_BUFFER_BYTES_SIZE, BatchBufferSize); await Database.Xdr.WriteBufferAsync(pb.ToArray(), cancellationToken).ConfigureAwait(false); // p_batch_pb await Database.Xdr.WriteAsync(IscCodes.op_batch_msg, cancellationToken).ConfigureAwait(false); await Database.Xdr.WriteAsync(_statement.Handle, cancellationToken).ConfigureAwait(false); // p_batch_statement await Database.Xdr.WriteAsync(parametersData.Length, cancellationToken).ConfigureAwait(false); // p_batch_messages foreach (var item in parametersData) { await Database.Xdr.WriteOpaqueAsync(item, item.Length, cancellationToken).ConfigureAwait(false); // p_batch_data } await Database.Xdr.WriteAsync(IscCodes.op_batch_exec, cancellationToken).ConfigureAwait(false); await Database.Xdr.WriteAsync(_statement.Handle, cancellationToken).ConfigureAwait(false); // p_batch_statement await Database.Xdr.WriteAsync(_statement.Transaction.Handle, cancellationToken).ConfigureAwait(false); // p_batch_transaction; await Database.Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); var numberOfResponses = 3; try { numberOfResponses--; var batchCreateResponse = await Database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); numberOfResponses--; var batchMsgResponse = await Database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); numberOfResponses--; var batchExecResponse = (BatchCompletionStateResponse)await Database.ReadResponseAsync(cancellationToken).ConfigureAwait(false); return BuildResult(batchExecResponse); } finally { await Database.SafeFinishFetchingAsync(numberOfResponses, cancellationToken).ConfigureAwait(false); } } public override int ComputeBatchSize(int count, IDescriptorFiller descriptorFiller) { var parametersData = GetParametersData(count, descriptorFiller); return parametersData.Sum(x => x.Length); } public override async ValueTask ComputeBatchSizeAsync(int count, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default) { var parametersData = await GetParametersDataAsync(count, descriptorFiller, cancellationToken).ConfigureAwait(false); return parametersData.Sum(x => x.Length); } public override void Release() { Database.Xdr.Write(IscCodes.op_batch_rls); Database.Xdr.Write(_statement.Handle); Database.AppendDeferredPacket(ProcessReleaseResponse); } public override async ValueTask ReleaseAsync(CancellationToken cancellationToken = default) { await Database.Xdr.WriteAsync(IscCodes.op_batch_rls, cancellationToken).ConfigureAwait(false); await Database.Xdr.WriteAsync(_statement.Handle, cancellationToken).ConfigureAwait(false); Database.AppendDeferredPacket(ProcessReleaseResponseAsync); } protected void ProcessReleaseResponse(IResponse response) { } protected ValueTask ProcessReleaseResponseAsync(IResponse response, CancellationToken cancellationToken = default) { return ValueTask.CompletedTask; } protected ExecuteResultItem[] BuildResult(BatchCompletionStateResponse response) { var detailedErrors = response.DetailedErrors.ToDictionary(x => x.Item1, x => x.Item2); var additionalErrorsPerMessage = response.AdditionalErrorsPerMessage.ToHashSet(); var result = new ExecuteResultItem[response.ProcessedMessages]; for (var i = 0; i < result.Length; i++) { var recordsAffected = i < response.UpdatedRecordsPerMessage.Length ? response.UpdatedRecordsPerMessage[i] : -1; if (detailedErrors.TryGetValue(i, out var exception)) { result[i] = new ExecuteResultItem() { RecordsAffected = recordsAffected, IsError = true, Exception = exception, }; } else if (additionalErrorsPerMessage.Contains(i)) { result[i] = new ExecuteResultItem() { RecordsAffected = recordsAffected, IsError = true, Exception = null, }; } else { result[i] = new ExecuteResultItem() { RecordsAffected = recordsAffected, IsError = false, Exception = null, }; } } return result; } protected byte[][] GetParametersData(int count, IDescriptorFiller descriptorFiller) { var parametersData = new byte[count][]; for (var i = 0; i < parametersData.Length; i++) { parametersData[i] = _statement.GetParameterData(descriptorFiller, i); } return parametersData; } protected async ValueTask GetParametersDataAsync(int count, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default) { var parametersData = new byte[count][]; for (var i = 0; i < parametersData.Length; i++) { parametersData[i] = await _statement.GetParameterDataAsync(descriptorFiller, i, cancellationToken).ConfigureAwait(false); } return parametersData; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version16/GdsDatabase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.IO; using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version16; internal class GdsDatabase : Version15.GdsDatabase { public GdsDatabase(GdsConnection connection) : base(connection) { } protected internal override IResponse ProcessCryptCallbackResponseIfNeeded(IResponse response, byte[] cryptKey) { while (response is Version15.CryptKeyCallbackResponse cryptKeyCallbackResponse) { Xdr.Write(IscCodes.op_crypt_key_callback); Xdr.WriteBuffer(cryptKey); Xdr.Write(cryptKeyCallbackResponse.Size); Xdr.Flush(); response = ReadResponse(); } return response; } protected internal override async ValueTask ProcessCryptCallbackResponseIfNeededAsync(IResponse response, byte[] cryptKey, CancellationToken cancellationToken = default) { while (response is Version15.CryptKeyCallbackResponse cryptKeyCallbackResponse) { await Xdr.WriteAsync(IscCodes.op_crypt_key_callback, cancellationToken).ConfigureAwait(false); await Xdr.WriteBufferAsync(cryptKey, cancellationToken).ConfigureAwait(false); await Xdr.WriteAsync(cryptKeyCallbackResponse.Size).ConfigureAwait(false); await Xdr.FlushAsync(cancellationToken).ConfigureAwait(false); response = await ReadResponseAsync(cancellationToken).ConfigureAwait(false); } return response; } public override StatementBase CreateStatement() { return new GdsStatement(this); } public override StatementBase CreateStatement(TransactionBase transaction) { return new GdsStatement(this, (Version10.GdsTransaction)transaction); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version16/GdsServiceManager.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.IO; using System.Linq; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version16; internal class GdsServiceManager : Version15.GdsServiceManager { public GdsServiceManager(GdsConnection connection) : base(connection) { } protected override Version10.GdsDatabase CreateDatabase(GdsConnection connection) { return new GdsDatabase(connection); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/Version16/GdsStatement.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Managed.Version16; internal class GdsStatement : Version15.GdsStatement { public GdsStatement(GdsDatabase database) : base(database) { } public GdsStatement(GdsDatabase database, Version10.GdsTransaction transaction) : base(database, transaction) { } protected override void SendExecuteToBuffer(int timeout, IDescriptorFiller descriptorFiller) { base.SendExecuteToBuffer(timeout, descriptorFiller); _database.Xdr.Write(timeout); } protected override async ValueTask SendExecuteToBufferAsync(int timeout, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default) { await base.SendExecuteToBufferAsync(timeout, descriptorFiller, cancellationToken).ConfigureAwait(false); await _database.Xdr.WriteAsync(timeout, cancellationToken).ConfigureAwait(false); } public override BatchBase CreateBatch() { return new GdsBatch(this); } public override BatchParameterBuffer CreateBatchParameterBuffer() { return new BatchParameterBuffer(Database.Charset.Encoding); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Managed/XdrReaderWriter.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Diagnostics; using System.IO; using System.Linq; using System.Numerics; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; using FirebirdSql.Data.Types; namespace FirebirdSql.Data.Client.Managed; sealed class XdrReaderWriter : IXdrReader, IXdrWriter { readonly IDataProvider _dataProvider; readonly Charset _charset; byte[] _smallBuffer; public XdrReaderWriter(IDataProvider dataProvider, Charset charset) { _dataProvider = dataProvider; _charset = charset; _smallBuffer = new byte[8]; } public XdrReaderWriter(IDataProvider dataProvider) : this(dataProvider, Charset.DefaultCharset) { } #region Read public byte[] ReadBytes(byte[] buffer, int count) { if (count > 0) { var toRead = count; var currentlyRead = -1; while (toRead > 0 && currentlyRead != 0) { toRead -= (currentlyRead = _dataProvider.Read(buffer, count - toRead, toRead)); } if (currentlyRead == 0) { if (_dataProvider is ITracksIOFailure tracksIOFailure) { tracksIOFailure.IOFailed = true; } throw new IOException($"Missing {toRead} bytes to fill total {count}."); } } return buffer; } public async ValueTask ReadBytesAsync(byte[] buffer, int count, CancellationToken cancellationToken = default) { if (count > 0) { var toRead = count; var currentlyRead = -1; while (toRead > 0 && currentlyRead != 0) { toRead -= (currentlyRead = await _dataProvider.ReadAsync(buffer, count - toRead, toRead, cancellationToken).ConfigureAwait(false)); } if (currentlyRead == 0) { if (_dataProvider is ITracksIOFailure tracksIOFailure) { tracksIOFailure.IOFailed = true; } throw new IOException($"Missing {toRead} bytes to fill total {count}."); } } return buffer; } public byte[] ReadOpaque(int length) { var buffer = new byte[length]; ReadBytes(buffer, length); ReadPad((4 - length) & 3); return buffer; } public async ValueTask ReadOpaqueAsync(int length, CancellationToken cancellationToken = default) { var buffer = new byte[length]; await ReadBytesAsync(buffer, length, cancellationToken).ConfigureAwait(false); await ReadPadAsync((4 - length) & 3, cancellationToken).ConfigureAwait(false); return buffer; } public byte[] ReadBuffer() { return ReadOpaque((ushort)ReadInt32()); } public async ValueTask ReadBufferAsync(CancellationToken cancellationToken = default) { return await ReadOpaqueAsync((ushort)await ReadInt32Async(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } public string ReadString() { return ReadString(_charset); } public ValueTask ReadStringAsync(CancellationToken cancellationToken = default) { return ReadStringAsync(_charset, cancellationToken); } public string ReadString(int length) { return ReadString(_charset, length); } public ValueTask ReadStringAsync(int length, CancellationToken cancellationToken = default) { return ReadStringAsync(_charset, length, cancellationToken); } public string ReadString(Charset charset) { return ReadString(charset, ReadInt32()); } public async ValueTask ReadStringAsync(Charset charset, CancellationToken cancellationToken = default) { return await ReadStringAsync(charset, await ReadInt32Async(cancellationToken).ConfigureAwait(false), cancellationToken).ConfigureAwait(false); } public string ReadString(Charset charset, int length) { var buffer = ReadOpaque(length); return charset.GetString(buffer, 0, buffer.Length); } public async ValueTask ReadStringAsync(Charset charset, int length, CancellationToken cancellationToken = default) { var buffer = await ReadOpaqueAsync(length, cancellationToken).ConfigureAwait(false); return charset.GetString(buffer, 0, buffer.Length); } public short ReadInt16() { return Convert.ToInt16(ReadInt32()); } public async ValueTask ReadInt16Async(CancellationToken cancellationToken = default) { return Convert.ToInt16(await ReadInt32Async(cancellationToken).ConfigureAwait(false)); } public int ReadInt32() { ReadBytes(_smallBuffer, 4); return TypeDecoder.DecodeInt32(_smallBuffer); } public async ValueTask ReadInt32Async(CancellationToken cancellationToken = default) { await ReadBytesAsync(_smallBuffer, 4, cancellationToken).ConfigureAwait(false); return TypeDecoder.DecodeInt32(_smallBuffer); } public long ReadInt64() { ReadBytes(_smallBuffer, 8); return TypeDecoder.DecodeInt64(_smallBuffer); } public async ValueTask ReadInt64Async(CancellationToken cancellationToken = default) { await ReadBytesAsync(_smallBuffer, 8, cancellationToken).ConfigureAwait(false); return TypeDecoder.DecodeInt64(_smallBuffer); } public Guid ReadGuid(int sqlType) { if (sqlType == IscCodes.SQL_VARYING) { return TypeDecoder.DecodeGuid(ReadBuffer()); } else { return TypeDecoder.DecodeGuid(ReadOpaque(16)); } } public async ValueTask ReadGuidAsync(int sqlType, CancellationToken cancellationToken = default) { if (sqlType == IscCodes.SQL_VARYING) { return TypeDecoder.DecodeGuid(await ReadBufferAsync(cancellationToken).ConfigureAwait(false)); } else { return TypeDecoder.DecodeGuid(await ReadOpaqueAsync(16, cancellationToken).ConfigureAwait(false)); } } public float ReadSingle() { return BitConverter.ToSingle(BitConverter.GetBytes(ReadInt32()), 0); } public async ValueTask ReadSingleAsync(CancellationToken cancellationToken = default) { return BitConverter.ToSingle(BitConverter.GetBytes(await ReadInt32Async(cancellationToken).ConfigureAwait(false)), 0); } public double ReadDouble() { return BitConverter.ToDouble(BitConverter.GetBytes(ReadInt64()), 0); } public async ValueTask ReadDoubleAsync(CancellationToken cancellationToken = default) { return BitConverter.ToDouble(BitConverter.GetBytes(await ReadInt64Async(cancellationToken).ConfigureAwait(false)), 0); } public DateTime ReadDateTime() { var date = ReadDate(); var time = ReadTime(); return date.Add(time); } public async ValueTask ReadDateTimeAsync(CancellationToken cancellationToken = default) { var date = await ReadDateAsync(cancellationToken).ConfigureAwait(false); var time = await ReadTimeAsync(cancellationToken).ConfigureAwait(false); return date.Add(time); } public DateTime ReadDate() { return TypeDecoder.DecodeDate(ReadInt32()); } public async ValueTask ReadDateAsync(CancellationToken cancellationToken = default) { return TypeDecoder.DecodeDate(await ReadInt32Async(cancellationToken).ConfigureAwait(false)); } public TimeSpan ReadTime() { return TypeDecoder.DecodeTime(ReadInt32()); } public async ValueTask ReadTimeAsync(CancellationToken cancellationToken = default) { return TypeDecoder.DecodeTime(await ReadInt32Async(cancellationToken).ConfigureAwait(false)); } public decimal ReadDecimal(int type, int scale) { switch (type & ~1) { case IscCodes.SQL_SHORT: return TypeDecoder.DecodeDecimal(ReadInt16(), scale, type); case IscCodes.SQL_LONG: return TypeDecoder.DecodeDecimal(ReadInt32(), scale, type); case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: return TypeDecoder.DecodeDecimal(ReadInt64(), scale, type); case IscCodes.SQL_DOUBLE: case IscCodes.SQL_D_FLOAT: return TypeDecoder.DecodeDecimal(ReadDouble(), scale, type); case IscCodes.SQL_INT128: return TypeDecoder.DecodeDecimal(ReadInt128(), scale, type); default: throw new ArgumentOutOfRangeException(nameof(type), $"{nameof(type)}={type}"); } } public async ValueTask ReadDecimalAsync(int type, int scale, CancellationToken cancellationToken = default) { switch (type & ~1) { case IscCodes.SQL_SHORT: return TypeDecoder.DecodeDecimal(await ReadInt16Async(cancellationToken).ConfigureAwait(false), scale, type); case IscCodes.SQL_LONG: return TypeDecoder.DecodeDecimal(await ReadInt32Async(cancellationToken).ConfigureAwait(false), scale, type); case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: return TypeDecoder.DecodeDecimal(await ReadInt64Async(cancellationToken).ConfigureAwait(false), scale, type); case IscCodes.SQL_DOUBLE: case IscCodes.SQL_D_FLOAT: return TypeDecoder.DecodeDecimal(await ReadDoubleAsync(cancellationToken).ConfigureAwait(false), scale, type); case IscCodes.SQL_INT128: return TypeDecoder.DecodeDecimal(await ReadInt128Async(cancellationToken).ConfigureAwait(false), scale, type); default: throw new ArgumentOutOfRangeException(nameof(type), $"{nameof(type)}={type}"); } } public bool ReadBoolean() { return TypeDecoder.DecodeBoolean(ReadOpaque(1)); } public async ValueTask ReadBooleanAsync(CancellationToken cancellationToken = default) { return TypeDecoder.DecodeBoolean(await ReadOpaqueAsync(1, cancellationToken).ConfigureAwait(false)); } public FbZonedDateTime ReadZonedDateTime(bool isExtended) { var dt = ReadDateTime(); dt = DateTime.SpecifyKind(dt, DateTimeKind.Utc); return TypeHelper.CreateZonedDateTime(dt, (ushort)ReadInt16(), isExtended ? ReadInt16() : (short?)null); } public async ValueTask ReadZonedDateTimeAsync(bool isExtended, CancellationToken cancellationToken = default) { var dt = await ReadDateTimeAsync(cancellationToken).ConfigureAwait(false); dt = DateTime.SpecifyKind(dt, DateTimeKind.Utc); return TypeHelper.CreateZonedDateTime(dt, (ushort)await ReadInt16Async(cancellationToken).ConfigureAwait(false), isExtended ? await ReadInt16Async(cancellationToken).ConfigureAwait(false) : (short?)null); } public FbZonedTime ReadZonedTime(bool isExtended) { return TypeHelper.CreateZonedTime(ReadTime(), (ushort)ReadInt16(), isExtended ? ReadInt16() : (short?)null); } public async ValueTask ReadZonedTimeAsync(bool isExtended, CancellationToken cancellationToken = default) { return TypeHelper.CreateZonedTime(await ReadTimeAsync(cancellationToken).ConfigureAwait(false), (ushort)await ReadInt16Async(cancellationToken).ConfigureAwait(false), isExtended ? await ReadInt16Async(cancellationToken).ConfigureAwait(false) : (short?)null); } public FbDecFloat ReadDec16() { return TypeDecoder.DecodeDec16(ReadOpaque(8)); } public async ValueTask ReadDec16Async(CancellationToken cancellationToken = default) { return TypeDecoder.DecodeDec16(await ReadOpaqueAsync(8, cancellationToken).ConfigureAwait(false)); } public FbDecFloat ReadDec34() { return TypeDecoder.DecodeDec34(ReadOpaque(16)); } public async ValueTask ReadDec34Async(CancellationToken cancellationToken = default) { return TypeDecoder.DecodeDec34(await ReadOpaqueAsync(16, cancellationToken).ConfigureAwait(false)); } public BigInteger ReadInt128() { return TypeDecoder.DecodeInt128(ReadOpaque(16)); } public async ValueTask ReadInt128Async(CancellationToken cancellationToken = default) { return TypeDecoder.DecodeInt128(await ReadOpaqueAsync(16, cancellationToken).ConfigureAwait(false)); } public IscException ReadStatusVector() { IscException exception = null; var eof = false; while (!eof) { var arg = ReadInt32(); switch (arg) { case IscCodes.isc_arg_gds: default: var er = ReadInt32(); if (er != 0) { if (exception == null) { exception = IscException.ForBuilding(); } exception.Errors.Add(new IscError(arg, er)); } break; case IscCodes.isc_arg_end: exception?.BuildExceptionData(); eof = true; break; case IscCodes.isc_arg_interpreted: case IscCodes.isc_arg_string: case IscCodes.isc_arg_sql_state: exception.Errors.Add(new IscError(arg, ReadString())); break; case IscCodes.isc_arg_number: exception.Errors.Add(new IscError(arg, ReadInt32())); break; } } return exception; } public async ValueTask ReadStatusVectorAsync(CancellationToken cancellationToken = default) { IscException exception = null; var eof = false; while (!eof) { var arg = await ReadInt32Async(cancellationToken).ConfigureAwait(false); switch (arg) { case IscCodes.isc_arg_gds: default: var er = await ReadInt32Async(cancellationToken).ConfigureAwait(false); if (er != 0) { if (exception == null) { exception = IscException.ForBuilding(); } exception.Errors.Add(new IscError(arg, er)); } break; case IscCodes.isc_arg_end: exception?.BuildExceptionData(); eof = true; break; case IscCodes.isc_arg_interpreted: case IscCodes.isc_arg_string: case IscCodes.isc_arg_sql_state: exception.Errors.Add(new IscError(arg, await ReadStringAsync(cancellationToken).ConfigureAwait(false))); break; case IscCodes.isc_arg_number: exception.Errors.Add(new IscError(arg, await ReadInt32Async(cancellationToken).ConfigureAwait(false))); break; } } return exception; } /* loop as long as we are receiving dummy packets, just * throwing them away--note that if we are a server we won't * be receiving them, but it is better to check for them at * this level rather than try to catch them in all places where * this routine is called */ public int ReadOperation() { int operation; do { operation = ReadInt32(); } while (operation == IscCodes.op_dummy); return operation; } public async ValueTask ReadOperationAsync(CancellationToken cancellationToken = default) { int operation; do { operation = await ReadInt32Async(cancellationToken).ConfigureAwait(false); } while (operation == IscCodes.op_dummy); return operation; } #endregion #region Write public void Flush() { _dataProvider.Flush(); } public ValueTask FlushAsync(CancellationToken cancellationToken = default) { return _dataProvider.FlushAsync(cancellationToken); } public void WriteBytes(byte[] buffer, int count) { _dataProvider.Write(buffer, 0, count); } public ValueTask WriteBytesAsync(byte[] buffer, int count, CancellationToken cancellationToken = default) { return _dataProvider.WriteAsync(buffer, 0, count, cancellationToken); } public void WriteOpaque(byte[] buffer) { WriteOpaque(buffer, buffer.Length); } public ValueTask WriteOpaqueAsync(byte[] buffer, CancellationToken cancellationToken = default) { return WriteOpaqueAsync(buffer, buffer.Length, cancellationToken); } public void WriteOpaque(byte[] buffer, int length) { if (buffer != null && length > 0) { _dataProvider.Write(buffer, 0, buffer.Length); WriteFill(length - buffer.Length); WritePad((4 - length) & 3); } } public async ValueTask WriteOpaqueAsync(byte[] buffer, int length, CancellationToken cancellationToken = default) { if (buffer != null && length > 0) { await _dataProvider.WriteAsync(buffer, 0, buffer.Length, cancellationToken).ConfigureAwait(false); await WriteFillAsync(length - buffer.Length, cancellationToken).ConfigureAwait(false); await WritePadAsync((4 - length) & 3, cancellationToken).ConfigureAwait(false); } } public void WriteBuffer(byte[] buffer) { WriteBuffer(buffer, buffer?.Length ?? 0); } public ValueTask WriteBufferAsync(byte[] buffer, CancellationToken cancellationToken = default) { return WriteBufferAsync(buffer, buffer?.Length ?? 0, cancellationToken); } public void WriteBuffer(byte[] buffer, int length) { Write(length); if (buffer != null && length > 0) { _dataProvider.Write(buffer, 0, length); WritePad((4 - length) & 3); } } public async ValueTask WriteBufferAsync(byte[] buffer, int length, CancellationToken cancellationToken = default) { await WriteAsync(length, cancellationToken).ConfigureAwait(false); if (buffer != null && length > 0) { await _dataProvider.WriteAsync(buffer, 0, length, cancellationToken).ConfigureAwait(false); await WritePadAsync((4 - length) & 3, cancellationToken).ConfigureAwait(false); } } public void WriteBlobBuffer(byte[] buffer) { var length = buffer.Length; // 2 for short for buffer length if (length > short.MaxValue) throw new IOException("Blob buffer too big."); Write(length + 2); Write(length + 2); //bizarre but true! three copies of the length _dataProvider.Write(new[] { (byte)((length >> 0) & 0xff), (byte)((length >> 8) & 0xff) }, 0, 2); _dataProvider.Write(buffer, 0, length); WritePad((4 - length + 2) & 3); } public async ValueTask WriteBlobBufferAsync(byte[] buffer, CancellationToken cancellationToken = default) { var length = buffer.Length; // 2 for short for buffer length if (length > short.MaxValue) throw new IOException("Blob buffer too big."); await WriteAsync(length + 2, cancellationToken).ConfigureAwait(false); await WriteAsync(length + 2, cancellationToken).ConfigureAwait(false); //bizarre but true! three copies of the length await _dataProvider.WriteAsync(new[] { (byte)((length >> 0) & 0xff), (byte)((length >> 8) & 0xff) }, 0, 2, cancellationToken).ConfigureAwait(false); await _dataProvider.WriteAsync(buffer, 0, length, cancellationToken).ConfigureAwait(false); await WritePadAsync((4 - length + 2) & 3, cancellationToken).ConfigureAwait(false); } public void WriteTyped(int type, byte[] buffer) { int length; if (buffer == null) { Write(1); _dataProvider.Write(new[] { (byte)type }, 0, 1); length = 1; } else { length = buffer.Length + 1; Write(length); _dataProvider.Write(new[] { (byte)type }, 0, 1); _dataProvider.Write(buffer, 0, buffer.Length); } WritePad((4 - length) & 3); } public async ValueTask WriteTypedAsync(int type, byte[] buffer, CancellationToken cancellationToken = default) { int length; if (buffer == null) { await WriteAsync(1, cancellationToken).ConfigureAwait(false); await _dataProvider.WriteAsync(new[] { (byte)type }, 0, 1, cancellationToken).ConfigureAwait(false); length = 1; } else { length = buffer.Length + 1; await WriteAsync(length, cancellationToken).ConfigureAwait(false); await _dataProvider.WriteAsync(new[] { (byte)type }, 0, 1, cancellationToken).ConfigureAwait(false); await _dataProvider.WriteAsync(buffer, 0, buffer.Length, cancellationToken).ConfigureAwait(false); } await WritePadAsync((4 - length) & 3, cancellationToken).ConfigureAwait(false); } public void Write(string value) { var buffer = _charset.GetBytes(value); WriteBuffer(buffer, buffer.Length); } public ValueTask WriteAsync(string value, CancellationToken cancellationToken = default) { var buffer = _charset.GetBytes(value); return WriteBufferAsync(buffer, buffer.Length, cancellationToken); } public void Write(short value) { Write((int)value); } public ValueTask WriteAsync(short value, CancellationToken cancellationToken = default) { return WriteAsync((int)value, cancellationToken); } public void Write(int value) { _dataProvider.Write(TypeEncoder.EncodeInt32(value), 0, 4); } public ValueTask WriteAsync(int value, CancellationToken cancellationToken = default) { return _dataProvider.WriteAsync(TypeEncoder.EncodeInt32(value), 0, 4, cancellationToken); } public void Write(long value) { _dataProvider.Write(TypeEncoder.EncodeInt64(value), 0, 8); } public ValueTask WriteAsync(long value, CancellationToken cancellationToken = default) { return _dataProvider.WriteAsync(TypeEncoder.EncodeInt64(value), 0, 8, cancellationToken); } public void Write(float value) { var buffer = BitConverter.GetBytes(value); Write(BitConverter.ToInt32(buffer, 0)); } public ValueTask WriteAsync(float value, CancellationToken cancellationToken = default) { var buffer = BitConverter.GetBytes(value); return WriteAsync(BitConverter.ToInt32(buffer, 0), cancellationToken); } public void Write(double value) { var buffer = BitConverter.GetBytes(value); Write(BitConverter.ToInt64(buffer, 0)); } public ValueTask WriteAsync(double value, CancellationToken cancellationToken = default) { var buffer = BitConverter.GetBytes(value); return WriteAsync(BitConverter.ToInt64(buffer, 0), cancellationToken); } public void Write(decimal value, int type, int scale) { var numeric = TypeEncoder.EncodeDecimal(value, scale, type); switch (type & ~1) { case IscCodes.SQL_SHORT: Write((short)numeric); break; case IscCodes.SQL_LONG: Write((int)numeric); break; case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: Write((long)numeric); break; case IscCodes.SQL_DOUBLE: case IscCodes.SQL_D_FLOAT: Write((double)numeric); break; case IscCodes.SQL_INT128: Write((BigInteger)numeric); break; default: throw new ArgumentOutOfRangeException(nameof(type), $"{nameof(type)}={type}"); } } public ValueTask WriteAsync(decimal value, int type, int scale, CancellationToken cancellationToken = default) { var numeric = TypeEncoder.EncodeDecimal(value, scale, type); switch (type & ~1) { case IscCodes.SQL_SHORT: return WriteAsync((short)numeric, cancellationToken); case IscCodes.SQL_LONG: return WriteAsync((int)numeric, cancellationToken); case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: return WriteAsync((long)numeric, cancellationToken); case IscCodes.SQL_DOUBLE: case IscCodes.SQL_D_FLOAT: return WriteAsync((double)numeric, cancellationToken); case IscCodes.SQL_INT128: return WriteAsync((BigInteger)numeric, cancellationToken); default: throw new ArgumentOutOfRangeException(nameof(type), $"{nameof(type)}={type}"); } } public void Write(bool value) { WriteOpaque(TypeEncoder.EncodeBoolean(value)); } public ValueTask WriteAsync(bool value, CancellationToken cancellationToken = default) { return WriteOpaqueAsync(TypeEncoder.EncodeBoolean(value), cancellationToken); } public void Write(DateTime value) { WriteDate(value); WriteTime(TypeHelper.DateTimeTimeToTimeSpan(value)); } public async ValueTask WriteAsync(DateTime value, CancellationToken cancellationToken = default) { await WriteDateAsync(value, cancellationToken).ConfigureAwait(false); await WriteTimeAsync(TypeHelper.DateTimeTimeToTimeSpan(value), cancellationToken).ConfigureAwait(false); } public void Write(Guid value, int sqlType) { var bytes = TypeEncoder.EncodeGuid(value); if (sqlType == IscCodes.SQL_VARYING) { WriteBuffer(bytes); } else { WriteOpaque(bytes); } } public ValueTask WriteAsync(Guid value, int sqlType, CancellationToken cancellationToken = default) { var bytes = TypeEncoder.EncodeGuid(value); if (sqlType == IscCodes.SQL_VARYING) { return WriteBufferAsync(bytes, cancellationToken); } else { return WriteOpaqueAsync(bytes, cancellationToken); } } public void Write(FbDecFloat value, int size) { WriteOpaque(size switch { 16 => TypeEncoder.EncodeDec16(value), 34 => TypeEncoder.EncodeDec34(value), _ => throw new ArgumentOutOfRangeException(), }); } public ValueTask WriteAsync(FbDecFloat value, int size, CancellationToken cancellationToken = default) { return WriteOpaqueAsync(size switch { 16 => TypeEncoder.EncodeDec16(value), 34 => TypeEncoder.EncodeDec34(value), _ => throw new ArgumentOutOfRangeException(), }, cancellationToken); } public void Write(BigInteger value) { WriteOpaqueAsync(TypeEncoder.EncodeInt128(value)); } public ValueTask WriteAsync(BigInteger value, CancellationToken cancellationToken = default) { return WriteOpaqueAsync(TypeEncoder.EncodeInt128(value), cancellationToken); } public void WriteDate(DateTime value) { Write(TypeEncoder.EncodeDate(Convert.ToDateTime(value))); } public ValueTask WriteDateAsync(DateTime value, CancellationToken cancellationToken = default) { return WriteAsync(TypeEncoder.EncodeDate(Convert.ToDateTime(value)), cancellationToken); } public void WriteTime(TimeSpan value) { Write(TypeEncoder.EncodeTime(value)); } public ValueTask WriteTimeAsync(TimeSpan value, CancellationToken cancellationToken = default) { return WriteAsync(TypeEncoder.EncodeTime(value), cancellationToken); } #endregion #region Pad + Fill static readonly byte[] PadArray = new byte[] { 0, 0, 0, 0 }; void WritePad(int length) { _dataProvider.Write(PadArray, 0, length); } ValueTask WritePadAsync(int length, CancellationToken cancellationToken = default) { return _dataProvider.WriteAsync(PadArray, 0, length, cancellationToken); } void ReadPad(int length) { Debug.Assert(length < _smallBuffer.Length); ReadBytes(_smallBuffer, length); } async ValueTask ReadPadAsync(int length, CancellationToken cancellationToken = default) { Debug.Assert(length < _smallBuffer.Length); await ReadBytesAsync(_smallBuffer, length, cancellationToken).ConfigureAwait(false); } static readonly byte[] FillArray = Enumerable.Repeat((byte)32, 32767).ToArray(); void WriteFill(int length) { _dataProvider.Write(FillArray, 0, length); } ValueTask WriteFillAsync(int length, CancellationToken cancellationToken = default) { return _dataProvider.WriteAsync(FillArray, 0, length, cancellationToken); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/FbClientFactory.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Dean Harding, Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Linq; using System.Reflection; using System.Reflection.Emit; using System.Runtime.InteropServices; using System.Threading; using FirebirdSql.Data.Client.Native.Handles; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native; /// /// This class generates a dynamic class that implements the interface and /// calls the native methods in a given "fbembed.dll" (though you can name it anything you like). /// internal static class FbClientFactory { private static readonly string DefaultDllName = "fbembed"; /// /// Because generating the class at runtime is expensive, we cache it here based on the name /// specified. /// private static readonly Dictionary cache; private static readonly ReaderWriterLockSlim cacheLock; private static readonly HashSet injectionTypes; /// /// Static constructor sets up member variables. /// static FbClientFactory() { cache = new Dictionary(); cacheLock = new ReaderWriterLockSlim(LockRecursionPolicy.NoRecursion); injectionTypes = new HashSet(typeof(FbClientFactory).Assembly.GetTypes() .Where(x => !x.IsAbstract && !x.IsInterface) .Where(x => typeof(IFirebirdHandle).IsAssignableFrom(x)) .Select(x => x.MakeByRefType())); } /// /// Dynamically generates a class that will load the "fbembed.dll" given in dllName, and that /// also implements , which you can use to call the library. /// /// The name of the DLL to load (e.g. "fbembed", "C:\path\to\fbembed.dll", etc) /// A class that implements and calls into the native library you specify. public static IFbClient Create(string dllName) { if (string.IsNullOrEmpty(dllName)) { dllName = DefaultDllName; } cacheLock.EnterUpgradeableReadLock(); try { if (cache.TryGetValue(dllName, out var result)) { return result; } else { cacheLock.EnterWriteLock(); try { result = BuildFbClient(dllName); cache.Add(dllName, result); ShutdownHelper.RegisterFbClientShutdown(() => NativeHelpers.CallIfExists(nameof(IFbClient.fb_shutdown), () => result.fb_shutdown(0, 0))); return result; } finally { cacheLock.ExitWriteLock(); } } } finally { cacheLock.ExitUpgradeableReadLock(); } } /// /// This method does the "heavy-lifting" of actually generating a dynamic class that implements /// , and calls the native library specified to do the actual work. /// /// The name of the libarary to use, as passed into the /// that is dynamically generated. /// An implementation of . /// /// Note: To be completly generic, we actually reflect through /// to get the methods and parameters to generate. /// private static IFbClient BuildFbClient(string dllName) { // Get the initial TypeBuilder, with a "blank" class definition var tb = CreateTypeBuilder(dllName); // It needs to implement IFbClient, obviously! tb.AddInterfaceImplementation(typeof(IFbClient)); // Now, go through all the methods in IFbClient and generate the corresponding methods // in our dynamic type. foreach (var mi in typeof(IFbClient).GetMethods()) { GenerateMethod(tb, mi, dllName); } // Finally, create and return an instance of the type itself. Woot! return CreateInstance(tb); } /// /// Generates a method on our for the specified /// /// The we're generating our type with. /// The which represents the "template" method. /// The path to the DLL that we'll put in the . private static void GenerateMethod(TypeBuilder tb, MethodInfo mi, string dllName) { // These are all the parameters in our method var pis = new List(mi.GetParameters()); // We need to keep the parameter types and attributes in a separate array. var ptypes = new Type[pis.Count]; var attrs = new ParameterAttributes[pis.Count]; for (var i = 0; i < pis.Count; i++) { ptypes[i] = pis[i].ParameterType; attrs[i] = pis[i].Attributes; } // We actually need to create TWO methods - one for the interface implementation, and one for the // P/Invoke declaration. We'll create the P/Invoke definition first. var smb = tb.DefineMethod( mi.Name, // The name is the same as the interface name // P/Invoke methods need special attributes... MethodAttributes.Static | MethodAttributes.Private | MethodAttributes.HideBySig, mi.ReturnType, ptypes); // Get the type of the DllImportAttribute, which we'll attach to this method var diaType = typeof(DllImportAttribute); // Create a CustomAttributeBuilder for the DLLImportAttribute, specifying the constructor that takes a string argument. var ctor = diaType.GetConstructor(new Type[] { typeof(string) }); var cab = new CustomAttributeBuilder(ctor, new object[] { dllName }); // Assign the DllImport attribute to the smb smb.SetCustomAttribute(cab); // Also, any attributes on the actual parameters need to be copied to the P/Invoke declaration as well. for (var i = 0; i < attrs.Length; i++) { smb.DefineParameter(i + 1, attrs[i], pis[i].Name); } // Now create the interface implementation method var mb = tb.DefineMethod( "IFbClient." + mi.Name, // We use the standard "Interface.Method" to do an explicit interface implementation MethodAttributes.Private | MethodAttributes.HideBySig | MethodAttributes.NewSlot | MethodAttributes.Virtual | MethodAttributes.Final, mi.ReturnType, ptypes); // Also, any attributes on the actual parameters need to be copied to the P/Invoke declaration as well. for (var i = 0; i < attrs.Length; i++) { mb.DefineParameter(i + 1, attrs[i], pis[i].Name); } // We need to generate a little IL here to actually call the P/Invoke declaration. Luckily for us, since we're just // going to pass our parameters to the P/Invoke method as-is, we don't need to muck with the eval stack ;-) var il = mb.GetILGenerator(); for (var i = 1; i <= pis.Count; i++) { EmitLdarg(il, i); } il.EmitCall(OpCodes.Call, smb, null); EmitClientInjectionToFirebirdHandleOjects(mi.ReturnType, pis, il); il.Emit(OpCodes.Ret); // Define the fact that our IFbClient.Method is the explicit interface implementation of that method tb.DefineMethodOverride(mb, mi); } private static void EmitClientInjectionToFirebirdHandleOjects( Type returnType, List pis, ILGenerator il) { var injectProperties = pis.Select(x => x.ParameterType).Intersect(injectionTypes).Any(); if (injectProperties) { il.DeclareLocal(returnType); il.Emit(OpCodes.Stloc_0); for (var i = 0; i < pis.Count; i++) { if (injectionTypes.Contains(pis[i].ParameterType)) { EmitLdarg(il, i + 1); il.Emit(OpCodes.Ldind_Ref); il.Emit(OpCodes.Ldarg_0); il.Emit(OpCodes.Callvirt, typeof(IFirebirdHandle).GetMethod("SetClient")); il.Emit(OpCodes.Nop); } } il.Emit(OpCodes.Ldloc_0); } } private static void EmitLdarg(ILGenerator il, int i) { if (i == 1) { il.Emit(OpCodes.Ldarg_1); } else if (i == 2) { il.Emit(OpCodes.Ldarg_2); } else if (i == 3) { il.Emit(OpCodes.Ldarg_3); } else { il.Emit(OpCodes.Ldarg_S, (short)i); } } /// /// Creates an instance of the type itself and returns it. Cool!! /// /// The that we created our type with. /// An instance of our type, cast as an . private static IFbClient CreateInstance(TypeBuilder tb) { var t = tb.CreateTypeInfo().AsType(); return (IFbClient)Activator.CreateInstance(t); } /// /// Creates the assembly and module into which we'll generate our class, and returns /// a we can use for building up our type. /// /// The "base name" to use for the name of the assembly and mode. /// A which we can use for building our type. /// /// Notice that we actually generate a new assembly for every different dllName that is /// passed into . This might be inefficient, but since we're mostly /// only ever going to have one (or maybe two) different dllNames, it's not a big deal. /// private static TypeBuilder CreateTypeBuilder(string baseName) { baseName = SanitizeBaseName(baseName); // Generate a name for our assembly, based on the name of the DLL. var assemblyName = new AssemblyName(); assemblyName.Name = baseName + "_Assembly"; // We create the dynamic assembly in our current AppDomain var assemblyBuilder = AssemblyBuilder.DefineDynamicAssembly(assemblyName, AssemblyBuilderAccess.Run); // Generate the actual module (which is the DLL itself) var moduleBuilder = assemblyBuilder.DefineDynamicModule(baseName + "_Module"); // Add our type to the module. return moduleBuilder.DefineType(baseName + "_Class", TypeAttributes.Class); } /// /// Because the baseName could include things like '\' and '/' - which are not legal /// type names, we need to "sanitize" the name and make it acceptable. /// /// The "base name" which we'll make sure contains only legal /// identfier characters. /// A new string that is a value type name. private static string SanitizeBaseName(string baseName) { // Note: We could actually go through and replace invalid characters with different // characters, and so on, but that's too much work. Besides, you never really see the // dynamic type name (expect maybe in a stack trace). If you really don't like this method, // you're free to change it ;) return "FB_" + Math.Abs(baseName.GetHashCode()); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/FesArray.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Globalization; using System.IO; using System.Reflection; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Client.Native.Marshalers; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native; internal sealed class FesArray : ArrayBase { #region Fields private long _handle; private FesDatabase _database; private FesTransaction _transaction; private IntPtr[] _statusVector; #endregion #region Properties public override long Handle { get { return _handle; } set { _handle = value; } } public override DatabaseBase Database { get { return _database; } set { _database = (FesDatabase)value; } } public override TransactionBase Transaction { get { return _transaction; } set { _transaction = (FesTransaction)value; } } #endregion #region Constructors public FesArray(ArrayDesc descriptor) : base(descriptor) { _statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; } public FesArray(FesDatabase database, FesTransaction transaction, string tableName, string fieldName) : this(database, transaction, -1, tableName, fieldName) { } public FesArray(FesDatabase database, FesTransaction transaction, long handle, string tableName, string fieldName) : base(tableName, fieldName) { _database = database; _transaction = transaction; _handle = handle; _statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; } #endregion #region Methods public override byte[] GetSlice(int sliceLength) { ClearStatusVector(); var dbHandle = _database.HandlePtr; var trHandle = _transaction.HandlePtr; var arrayDesc = ArrayDescMarshaler.MarshalManagedToNative(Descriptor); var buffer = new byte[sliceLength]; _database.FbClient.isc_array_get_slice( _statusVector, ref dbHandle, ref trHandle, ref _handle, arrayDesc, buffer, ref sliceLength); ArrayDescMarshaler.CleanUpNativeData(ref arrayDesc); _database.ProcessStatusVector(_statusVector); return buffer; } public override ValueTask GetSliceAsync(int sliceLength, CancellationToken cancellationToken = default) { ClearStatusVector(); var dbHandle = _database.HandlePtr; var trHandle = _transaction.HandlePtr; var arrayDesc = ArrayDescMarshaler.MarshalManagedToNative(Descriptor); var buffer = new byte[sliceLength]; _database.FbClient.isc_array_get_slice( _statusVector, ref dbHandle, ref trHandle, ref _handle, arrayDesc, buffer, ref sliceLength); ArrayDescMarshaler.CleanUpNativeData(ref arrayDesc); _database.ProcessStatusVector(_statusVector); return ValueTask.FromResult(buffer); } public override void PutSlice(Array sourceArray, int sliceLength) { ClearStatusVector(); var dbHandle = _database.HandlePtr; var trHandle = _transaction.HandlePtr; var arrayDesc = ArrayDescMarshaler.MarshalManagedToNative(Descriptor); var systemType = GetSystemType(); var buffer = new byte[sliceLength]; if (systemType.GetTypeInfo().IsPrimitive) { Buffer.BlockCopy(sourceArray, 0, buffer, 0, buffer.Length); } else { buffer = EncodeSlice(Descriptor, sourceArray, sliceLength); } _database.FbClient.isc_array_put_slice( _statusVector, ref dbHandle, ref trHandle, ref _handle, arrayDesc, buffer, ref sliceLength); ArrayDescMarshaler.CleanUpNativeData(ref arrayDesc); _database.ProcessStatusVector(_statusVector); } public override ValueTask PutSliceAsync(Array sourceArray, int sliceLength, CancellationToken cancellationToken = default) { ClearStatusVector(); var dbHandle = _database.HandlePtr; var trHandle = _transaction.HandlePtr; var arrayDesc = ArrayDescMarshaler.MarshalManagedToNative(Descriptor); var systemType = GetSystemType(); var buffer = new byte[sliceLength]; if (systemType.GetTypeInfo().IsPrimitive) { Buffer.BlockCopy(sourceArray, 0, buffer, 0, buffer.Length); } else { buffer = EncodeSlice(Descriptor, sourceArray, sliceLength); } _database.FbClient.isc_array_put_slice( _statusVector, ref dbHandle, ref trHandle, ref _handle, arrayDesc, buffer, ref sliceLength); ArrayDescMarshaler.CleanUpNativeData(ref arrayDesc); _database.ProcessStatusVector(_statusVector); return ValueTask.CompletedTask; } #endregion #region Protected Methods protected override Array DecodeSlice(byte[] slice) { Array sliceData = null; var slicePosition = 0; var type = 0; var dbType = DbDataType.Array; var systemType = GetSystemType(); var charset = _database.Charset; var lengths = new int[Descriptor.Dimensions]; var lowerBounds = new int[Descriptor.Dimensions]; for (var i = 0; i < Descriptor.Dimensions; i++) { lowerBounds[i] = Descriptor.Bounds[i].LowerBound; lengths[i] = Descriptor.Bounds[i].UpperBound; if (lowerBounds[i] == 0) { lengths[i]++; } } sliceData = Array.CreateInstance(systemType, lengths, lowerBounds); var tempData = Array.CreateInstance(systemType, sliceData.Length); type = TypeHelper.GetSqlTypeFromBlrType(Descriptor.DataType); dbType = TypeHelper.GetDbDataTypeFromBlrType(Descriptor.DataType, 0, Descriptor.Scale); int itemLength = Descriptor.Length; for (var i = 0; i < tempData.Length; i++) { if (slicePosition >= slice.Length) { break; } switch (dbType) { case DbDataType.Char: tempData.SetValue(charset.GetString(slice, slicePosition, itemLength), i); break; case DbDataType.VarChar: { var index = slicePosition; var count = 0; while (slice[index++] != 0) { count++; } tempData.SetValue(charset.GetString(slice, slicePosition, count), i); slicePosition += 2; } break; case DbDataType.SmallInt: tempData.SetValue(BitConverter.ToInt16(slice, slicePosition), i); break; case DbDataType.Integer: tempData.SetValue(BitConverter.ToInt32(slice, slicePosition), i); break; case DbDataType.BigInt: tempData.SetValue(BitConverter.ToInt64(slice, slicePosition), i); break; case DbDataType.Decimal: case DbDataType.Numeric: { object evalue = null; switch (type) { case IscCodes.SQL_SHORT: evalue = BitConverter.ToInt16(slice, slicePosition); break; case IscCodes.SQL_LONG: evalue = BitConverter.ToInt32(slice, slicePosition); break; case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: evalue = BitConverter.ToInt64(slice, slicePosition); break; } var dvalue = TypeDecoder.DecodeDecimal(evalue, Descriptor.Scale, type); tempData.SetValue(dvalue, i); } break; case DbDataType.Double: tempData.SetValue(BitConverter.ToDouble(slice, slicePosition), i); break; case DbDataType.Float: tempData.SetValue(BitConverter.ToSingle(slice, slicePosition), i); break; case DbDataType.Date: { var idate = BitConverter.ToInt32(slice, slicePosition); var date = TypeDecoder.DecodeDate(idate); tempData.SetValue(date, i); } break; case DbDataType.Time: { var itime = BitConverter.ToInt32(slice, slicePosition); var time = TypeDecoder.DecodeTime(itime); tempData.SetValue(time, i); } break; case DbDataType.TimeStamp: { var idate = BitConverter.ToInt32(slice, slicePosition); var itime = BitConverter.ToInt32(slice, slicePosition + 4); var date = TypeDecoder.DecodeDate(idate); var time = TypeDecoder.DecodeTime(itime); var timestamp = date.Add(time); tempData.SetValue(timestamp, i); } break; } slicePosition += itemLength; } if (systemType.GetTypeInfo().IsPrimitive) { // For primitive types we can use System.Buffer to copy generated data to destination array Buffer.BlockCopy(tempData, 0, sliceData, 0, Buffer.ByteLength(tempData)); } else { sliceData = tempData; } return sliceData; } protected override ValueTask DecodeSliceAsync(byte[] slice, CancellationToken cancellationToken = default) { Array sliceData = null; var slicePosition = 0; var type = 0; var dbType = DbDataType.Array; var systemType = GetSystemType(); var charset = _database.Charset; var lengths = new int[Descriptor.Dimensions]; var lowerBounds = new int[Descriptor.Dimensions]; for (var i = 0; i < Descriptor.Dimensions; i++) { lowerBounds[i] = Descriptor.Bounds[i].LowerBound; lengths[i] = Descriptor.Bounds[i].UpperBound; if (lowerBounds[i] == 0) { lengths[i]++; } } sliceData = Array.CreateInstance(systemType, lengths, lowerBounds); var tempData = Array.CreateInstance(systemType, sliceData.Length); type = TypeHelper.GetSqlTypeFromBlrType(Descriptor.DataType); dbType = TypeHelper.GetDbDataTypeFromBlrType(Descriptor.DataType, 0, Descriptor.Scale); int itemLength = Descriptor.Length; for (var i = 0; i < tempData.Length; i++) { if (slicePosition >= slice.Length) { break; } switch (dbType) { case DbDataType.Char: tempData.SetValue(charset.GetString(slice, slicePosition, itemLength), i); break; case DbDataType.VarChar: { var index = slicePosition; var count = 0; while (slice[index++] != 0) { count++; } tempData.SetValue(charset.GetString(slice, slicePosition, count), i); slicePosition += 2; } break; case DbDataType.SmallInt: tempData.SetValue(BitConverter.ToInt16(slice, slicePosition), i); break; case DbDataType.Integer: tempData.SetValue(BitConverter.ToInt32(slice, slicePosition), i); break; case DbDataType.BigInt: tempData.SetValue(BitConverter.ToInt64(slice, slicePosition), i); break; case DbDataType.Decimal: case DbDataType.Numeric: { object evalue = null; switch (type) { case IscCodes.SQL_SHORT: evalue = BitConverter.ToInt16(slice, slicePosition); break; case IscCodes.SQL_LONG: evalue = BitConverter.ToInt32(slice, slicePosition); break; case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: evalue = BitConverter.ToInt64(slice, slicePosition); break; } var dvalue = TypeDecoder.DecodeDecimal(evalue, Descriptor.Scale, type); tempData.SetValue(dvalue, i); } break; case DbDataType.Double: tempData.SetValue(BitConverter.ToDouble(slice, slicePosition), i); break; case DbDataType.Float: tempData.SetValue(BitConverter.ToSingle(slice, slicePosition), i); break; case DbDataType.Date: { var idate = BitConverter.ToInt32(slice, slicePosition); var date = TypeDecoder.DecodeDate(idate); tempData.SetValue(date, i); } break; case DbDataType.Time: { var itime = BitConverter.ToInt32(slice, slicePosition); var time = TypeDecoder.DecodeTime(itime); tempData.SetValue(time, i); } break; case DbDataType.TimeStamp: { var idate = BitConverter.ToInt32(slice, slicePosition); var itime = BitConverter.ToInt32(slice, slicePosition + 4); var date = TypeDecoder.DecodeDate(idate); var time = TypeDecoder.DecodeTime(itime); var timestamp = date.Add(time); tempData.SetValue(timestamp, i); } break; } slicePosition += itemLength; } if (systemType.GetTypeInfo().IsPrimitive) { // For primitive types we can use System.Buffer to copy generated data to destination array Buffer.BlockCopy(tempData, 0, sliceData, 0, Buffer.ByteLength(tempData)); } else { sliceData = tempData; } return ValueTask.FromResult(sliceData); } #endregion #region Private Metods private void ClearStatusVector() { Array.Clear(_statusVector, 0, _statusVector.Length); } private byte[] EncodeSlice(ArrayDesc desc, Array sourceArray, int length) { using (var ms = new MemoryStream()) { using (var writer = new BinaryWriter(ms)) { var charset = _database.Charset; var dbType = DbDataType.Array; var subType = (Descriptor.Scale < 0) ? 2 : 0; var type = 0; type = TypeHelper.GetSqlTypeFromBlrType(Descriptor.DataType); dbType = TypeHelper.GetDbDataTypeFromBlrType(Descriptor.DataType, subType, Descriptor.Scale); foreach (var source in sourceArray) { switch (dbType) { case DbDataType.Char: { var value = source != null ? (string)source : string.Empty; var buffer = charset.GetBytes(value); writer.Write(buffer); if (desc.Length > buffer.Length) { for (var j = buffer.Length; j < desc.Length; j++) { writer.Write((byte)32); } } } break; case DbDataType.VarChar: { var value = source != null ? (string)source : string.Empty; var buffer = charset.GetBytes(value); writer.Write(buffer); if (desc.Length > buffer.Length) { for (var j = buffer.Length; j < desc.Length; j++) { writer.Write((byte)0); } } writer.Write((short)0); } break; case DbDataType.SmallInt: writer.Write((short)source); break; case DbDataType.Integer: writer.Write((int)source); break; case DbDataType.BigInt: writer.Write((long)source); break; case DbDataType.Float: writer.Write((float)source); break; case DbDataType.Double: writer.Write((double)source); break; case DbDataType.Numeric: case DbDataType.Decimal: { var numeric = TypeEncoder.EncodeDecimal((decimal)source, desc.Scale, type); switch (type) { case IscCodes.SQL_SHORT: writer.Write((short)numeric); break; case IscCodes.SQL_LONG: writer.Write((int)numeric); break; case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: writer.Write((long)numeric); break; } } break; case DbDataType.Date: writer.Write(TypeEncoder.EncodeDate(Convert.ToDateTime(source, CultureInfo.CurrentCulture.DateTimeFormat))); break; case DbDataType.Time: writer.Write(TypeEncoder.EncodeTime((TimeSpan)source)); break; case DbDataType.TimeStamp: var dt = Convert.ToDateTime(source, CultureInfo.CurrentCulture.DateTimeFormat); writer.Write(TypeEncoder.EncodeDate(dt)); writer.Write(TypeEncoder.EncodeTime(TypeHelper.DateTimeTimeToTimeSpan(dt))); break; default: throw TypeHelper.InvalidDataType((int)dbType); } } writer.Flush(); return ms.ToArray(); } } } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/FesBlob.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.IO; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Client.Native.Handles; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native; internal sealed class FesBlob : BlobBase { #region Fields private FesDatabase _database; private IntPtr[] _statusVector; private BlobHandle _blobHandle; #endregion #region Properties public override DatabaseBase Database { get { return _database; } } public override int Handle { get { return _blobHandle.DangerousGetHandle().AsInt(); } } #endregion #region Constructors public FesBlob(FesDatabase database, FesTransaction transaction) : this(database, transaction, 0) { } public FesBlob(FesDatabase database, FesTransaction transaction, long blobId) : base(database) { _database = database; _transaction = transaction; _position = 0; _blobHandle = new BlobHandle(); _blobId = blobId; _statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; } #endregion #region Protected Methods public override void Create() { ClearStatusVector(); var dbHandle = _database.HandlePtr; var trHandle = ((FesTransaction)_transaction).HandlePtr; _database.FbClient.isc_create_blob2( _statusVector, ref dbHandle, ref trHandle, ref _blobHandle, ref _blobId, 0, new byte[0]); _database.ProcessStatusVector(_statusVector); _isOpen = true; RblAddValue(IscCodes.RBL_create); } public override ValueTask CreateAsync(CancellationToken cancellationToken = default) { ClearStatusVector(); var dbHandle = _database.HandlePtr; var trHandle = ((FesTransaction)_transaction).HandlePtr; _database.FbClient.isc_create_blob2( _statusVector, ref dbHandle, ref trHandle, ref _blobHandle, ref _blobId, 0, new byte[0]); _database.ProcessStatusVector(_statusVector); _isOpen = true; RblAddValue(IscCodes.RBL_create); return ValueTask.CompletedTask; } public override void Open() { ClearStatusVector(); var dbHandle = _database.HandlePtr; var trHandle = ((FesTransaction)_transaction).HandlePtr; _database.FbClient.isc_open_blob2( _statusVector, ref dbHandle, ref trHandle, ref _blobHandle, ref _blobId, 0, new byte[0]); _database.ProcessStatusVector(_statusVector); _isOpen = true; } public override ValueTask OpenAsync(CancellationToken cancellationToken = default) { ClearStatusVector(); var dbHandle = _database.HandlePtr; var trHandle = ((FesTransaction)_transaction).HandlePtr; _database.FbClient.isc_open_blob2( _statusVector, ref dbHandle, ref trHandle, ref _blobHandle, ref _blobId, 0, new byte[0]); _database.ProcessStatusVector(_statusVector); _isOpen = true; return ValueTask.CompletedTask; } public override int GetLength() { ClearStatusVector(); var buffer = new byte[20]; _database.FbClient.isc_blob_info( _statusVector, ref _blobHandle, 1, new byte[] { IscCodes.isc_info_blob_total_length }, (short)buffer.Length, buffer); _database.ProcessStatusVector(_statusVector); var length = IscHelper.VaxInteger(buffer, 1, 2); var size = IscHelper.VaxInteger(buffer, 3, (int)length); return (int)size; } public override ValueTask GetLengthAsync(CancellationToken cancellationToken = default) { ClearStatusVector(); var buffer = new byte[20]; _database.FbClient.isc_blob_info( _statusVector, ref _blobHandle, 1, new byte[] { IscCodes.isc_info_blob_total_length }, (short)buffer.Length, buffer); _database.ProcessStatusVector(_statusVector); var length = IscHelper.VaxInteger(buffer, 1, 2); var size = IscHelper.VaxInteger(buffer, 3, (int)length); return ValueTask.FromResult((int)size); } public override void GetSegment(Stream stream) { var requested = (short)SegmentSize; short segmentLength = 0; ClearStatusVector(); var tmp = new byte[requested]; var status = _database.FbClient.isc_get_segment( _statusVector, ref _blobHandle, ref segmentLength, requested, tmp); RblRemoveValue(IscCodes.RBL_segment); if (_statusVector[1] == new IntPtr(IscCodes.isc_segstr_eof)) { RblAddValue(IscCodes.RBL_eof_pending); return; } else { if (status == IntPtr.Zero || _statusVector[1] == new IntPtr(IscCodes.isc_segment)) { RblAddValue(IscCodes.RBL_segment); } else { _database.ProcessStatusVector(_statusVector); } } stream.Write(tmp, 0, segmentLength); } public override ValueTask GetSegmentAsync(Stream stream, CancellationToken cancellationToken = default) { var requested = (short)SegmentSize; short segmentLength = 0; ClearStatusVector(); var tmp = new byte[requested]; var status = _database.FbClient.isc_get_segment( _statusVector, ref _blobHandle, ref segmentLength, requested, tmp); RblRemoveValue(IscCodes.RBL_segment); if (_statusVector[1] == new IntPtr(IscCodes.isc_segstr_eof)) { RblAddValue(IscCodes.RBL_eof_pending); return ValueTask.CompletedTask; } else { if (status == IntPtr.Zero || _statusVector[1] == new IntPtr(IscCodes.isc_segment)) { RblAddValue(IscCodes.RBL_segment); } else { _database.ProcessStatusVector(_statusVector); } } stream.Write(tmp, 0, segmentLength); return ValueTask.CompletedTask; } public override byte[] GetSegment() { var requested = (short)(SegmentSize - 2); short segmentLength = 0; ClearStatusVector(); var tmp = new byte[requested]; var status = _database.FbClient.isc_get_segment( _statusVector, ref _blobHandle, ref segmentLength, requested, tmp); RblRemoveValue(IscCodes.RBL_segment); if (_statusVector[1] == new IntPtr(IscCodes.isc_segstr_eof)) { RblAddValue(IscCodes.RBL_eof_pending); return Array.Empty(); } if (status == IntPtr.Zero || _statusVector[1] == new IntPtr(IscCodes.isc_segment)) { RblAddValue(IscCodes.RBL_segment); } else { _database.ProcessStatusVector(_statusVector); } var actualSegment = tmp; if (actualSegment.Length != segmentLength) { tmp = new byte[segmentLength]; Array.Copy(actualSegment, tmp, segmentLength); actualSegment = tmp; } return actualSegment; } public override ValueTask GetSegmentAsync(CancellationToken cancellationToken = default) { var requested = (short)SegmentSize; short segmentLength = 0; ClearStatusVector(); var tmp = new byte[requested]; var status = _database.FbClient.isc_get_segment( _statusVector, ref _blobHandle, ref segmentLength, requested, tmp); RblRemoveValue(IscCodes.RBL_segment); if (_statusVector[1] == new IntPtr(IscCodes.isc_segstr_eof)) { RblAddValue(IscCodes.RBL_eof_pending); return ValueTask.FromResult(Array.Empty()); } else { if (status == IntPtr.Zero || _statusVector[1] == new IntPtr(IscCodes.isc_segment)) { RblAddValue(IscCodes.RBL_segment); } else { _database.ProcessStatusVector(_statusVector); } } var actualSegment = tmp; if (actualSegment.Length != segmentLength) { tmp = new byte[segmentLength]; Array.Copy(actualSegment, tmp, segmentLength); actualSegment = tmp; } return ValueTask.FromResult(actualSegment); } public override void PutSegment(byte[] buffer) { ClearStatusVector(); _database.FbClient.isc_put_segment( _statusVector, ref _blobHandle, (short)buffer.Length, buffer); _database.ProcessStatusVector(_statusVector); } public override ValueTask PutSegmentAsync(byte[] buffer, CancellationToken cancellationToken = default) { ClearStatusVector(); _database.FbClient.isc_put_segment( _statusVector, ref _blobHandle, (short)buffer.Length, buffer); _database.ProcessStatusVector(_statusVector); return ValueTask.CompletedTask; } public override void Seek(int position, int seekOperation) { ClearStatusVector(); var resultingPosition = 0; _database.FbClient.isc_seek_blob( _statusVector, ref _blobHandle, (short)seekOperation, position, ref resultingPosition); _database.ProcessStatusVector(_statusVector); } public override ValueTask SeekAsync(int position, int seekOperation, CancellationToken cancellationToken = default) { ClearStatusVector(); var resultingPosition = 0; _database.FbClient.isc_seek_blob( _statusVector, ref _blobHandle, (short)seekOperation, position, ref resultingPosition); _database.ProcessStatusVector(_statusVector); return ValueTask.CompletedTask; } public override void Close() { ClearStatusVector(); _database.FbClient.isc_close_blob(_statusVector, ref _blobHandle); _database.ProcessStatusVector(_statusVector); } public override ValueTask CloseAsync(CancellationToken cancellationToken = default) { ClearStatusVector(); _database.FbClient.isc_close_blob(_statusVector, ref _blobHandle); _database.ProcessStatusVector(_statusVector); return ValueTask.CompletedTask; } public override void Cancel() { ClearStatusVector(); _database.FbClient.isc_cancel_blob(_statusVector, ref _blobHandle); _database.ProcessStatusVector(_statusVector); } public override ValueTask CancelAsync(CancellationToken cancellationToken = default) { ClearStatusVector(); _database.FbClient.isc_cancel_blob(_statusVector, ref _blobHandle); _database.ProcessStatusVector(_statusVector); return ValueTask.CompletedTask; } #endregion #region Private Methods private void ClearStatusVector() { Array.Clear(_statusVector, 0, _statusVector.Length); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/FesConnection.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Text; using System.Text.RegularExpressions; namespace FirebirdSql.Data.Client.Native; internal sealed class FesConnection { private FesConnection() { } public static Version GetClientVersion(IFbClient fbClient) { var sb = new StringBuilder(64); fbClient.isc_get_client_version(sb); var version = sb.ToString(); var m = Regex.Match(version, @"Firebird (\d+.\d+)"); if (!m.Success) return null; return new Version(m.Groups[1].Value); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/FesDatabase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Client.Native.Handles; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native; internal sealed class FesDatabase : DatabaseBase { #region Fields private static readonly Version Version25 = new Version(2, 5); private readonly IFbClient _fbClient; private readonly Version _fbClientVersion; private DatabaseHandle _handle; private IntPtr[] _statusVector; #endregion #region Properties public override bool UseUtf8ParameterBuffer => _fbClientVersion >= Version25; public override int Handle => _handle.DangerousGetHandle().AsInt(); public override bool HasRemoteEventSupport => false; public override bool ConnectionBroken => false; public IFbClient FbClient => _fbClient; public Version FbClientVersion => _fbClientVersion; public DatabaseHandle HandlePtr => _handle; #endregion #region Constructors public FesDatabase(string dllName, Charset charset, int packetSize, short dialect) : base(charset, packetSize, dialect) { _fbClient = FbClientFactory.Create(dllName); _fbClientVersion = FesConnection.GetClientVersion(_fbClient); _handle = new DatabaseHandle(); _statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; } #endregion #region Database Methods public override void CreateDatabase(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { CheckCryptKeyForSupport(cryptKey); var databaseBuffer = dpb.Encoding.GetBytes(database); StatusVectorHelper.ClearStatusVector(_statusVector); _fbClient.isc_create_database( _statusVector, (short)databaseBuffer.Length, databaseBuffer, ref _handle, dpb.Length, dpb.ToArray(), 0); ProcessStatusVector(Charset.DefaultCharset); } public override ValueTask CreateDatabaseAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { CheckCryptKeyForSupport(cryptKey); var databaseBuffer = dpb.Encoding.GetBytes(database); StatusVectorHelper.ClearStatusVector(_statusVector); _fbClient.isc_create_database( _statusVector, (short)databaseBuffer.Length, databaseBuffer, ref _handle, dpb.Length, dpb.ToArray(), 0); ProcessStatusVector(Charset.DefaultCharset); return ValueTask.CompletedTask; } public override void CreateDatabaseWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { throw new NotSupportedException("Trusted Auth isn't supported on Firebird Embedded."); } public override ValueTask CreateDatabaseWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { throw new NotSupportedException("Trusted Auth isn't supported on Firebird Embedded."); } public override void DropDatabase() { StatusVectorHelper.ClearStatusVector(_statusVector); _fbClient.isc_drop_database(_statusVector, ref _handle); ProcessStatusVector(); _handle.Dispose(); } public override ValueTask DropDatabaseAsync(CancellationToken cancellationToken = default) { StatusVectorHelper.ClearStatusVector(_statusVector); _fbClient.isc_drop_database(_statusVector, ref _handle); ProcessStatusVector(); _handle.Dispose(); return ValueTask.CompletedTask; } #endregion #region Remote Events Methods public override void CloseEventManager() { throw new NotSupportedException(); } public override ValueTask CloseEventManagerAsync(CancellationToken cancellationToken = default) { throw new NotSupportedException(); } public override void QueueEvents(RemoteEvent events) { throw new NotSupportedException(); } public override ValueTask QueueEventsAsync(RemoteEvent events, CancellationToken cancellationToken = default) { throw new NotSupportedException(); } public override void CancelEvents(RemoteEvent events) { throw new NotSupportedException(); } public override ValueTask CancelEventsAsync(RemoteEvent events, CancellationToken cancellationToken = default) { throw new NotSupportedException(); } #endregion #region Methods public override void Attach(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { CheckCryptKeyForSupport(cryptKey); var databaseBuffer = dpb.Encoding.GetBytes(database); StatusVectorHelper.ClearStatusVector(_statusVector); _fbClient.isc_attach_database( _statusVector, (short)databaseBuffer.Length, databaseBuffer, ref _handle, dpb.Length, dpb.ToArray()); ProcessStatusVector(Charset.DefaultCharset); ServerVersion = GetServerVersion(); } public override async ValueTask AttachAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { CheckCryptKeyForSupport(cryptKey); var databaseBuffer = dpb.Encoding.GetBytes(database); StatusVectorHelper.ClearStatusVector(_statusVector); _fbClient.isc_attach_database( _statusVector, (short)databaseBuffer.Length, databaseBuffer, ref _handle, dpb.Length, dpb.ToArray()); ProcessStatusVector(Charset.DefaultCharset); ServerVersion = await GetServerVersionAsync(cancellationToken).ConfigureAwait(false); } public override void AttachWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey) { throw new NotSupportedException("Trusted Auth isn't supported on Firebird Embedded."); } public override ValueTask AttachWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default) { throw new NotSupportedException("Trusted Auth isn't supported on Firebird Embedded."); } public override void Detach() { if (TransactionCount > 0) { throw IscException.ForErrorCodeIntParam(IscCodes.isc_open_trans, TransactionCount); } if (!_handle.IsInvalid) { StatusVectorHelper.ClearStatusVector(_statusVector); _fbClient.isc_detach_database(_statusVector, ref _handle); ProcessStatusVector(); _handle.Dispose(); } WarningMessage = null; ServerVersion = null; _statusVector = null; TransactionCount = 0; } public override ValueTask DetachAsync(CancellationToken cancellationToken = default) { if (TransactionCount > 0) { throw IscException.ForErrorCodeIntParam(IscCodes.isc_open_trans, TransactionCount); } if (!_handle.IsInvalid) { StatusVectorHelper.ClearStatusVector(_statusVector); _fbClient.isc_detach_database(_statusVector, ref _handle); ProcessStatusVector(); _handle.Dispose(); } WarningMessage = null; ServerVersion = null; _statusVector = null; TransactionCount = 0; return ValueTask.CompletedTask; } #endregion #region Transaction Methods public override TransactionBase BeginTransaction(TransactionParameterBuffer tpb) { var transaction = new FesTransaction(this); transaction.BeginTransaction(tpb); return transaction; } public override async ValueTask BeginTransactionAsync(TransactionParameterBuffer tpb, CancellationToken cancellationToken = default) { var transaction = new FesTransaction(this); await transaction.BeginTransactionAsync(tpb, cancellationToken).ConfigureAwait(false); return transaction; } #endregion #region Cancel Methods public override void CancelOperation(short kind) { var localStatusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; _fbClient.fb_cancel_operation(localStatusVector, ref _handle, (ushort)kind); try { ProcessStatusVector(localStatusVector); } catch (IscException ex) when (ex.ErrorCode == IscCodes.isc_nothing_to_cancel) { } } public override ValueTask CancelOperationAsync(short kind, CancellationToken cancellationToken = default) { var localStatusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; _fbClient.fb_cancel_operation(localStatusVector, ref _handle, (ushort)kind); try { ProcessStatusVector(localStatusVector); } catch (IscException ex) when (ex.ErrorCode == IscCodes.isc_nothing_to_cancel) { } return ValueTask.CompletedTask; } #endregion #region Statement Creation Methods public override StatementBase CreateStatement() { return new FesStatement(this); } public override StatementBase CreateStatement(TransactionBase transaction) { return new FesStatement(this, transaction as FesTransaction); } #endregion #region Parameter Buffers public override DatabaseParameterBufferBase CreateDatabaseParameterBuffer() { return new DatabaseParameterBuffer1(ParameterBufferEncoding); } public override EventParameterBuffer CreateEventParameterBuffer() { return new EventParameterBuffer(Charset.Encoding); } public override TransactionParameterBuffer CreateTransactionParameterBuffer() { return new TransactionParameterBuffer(Charset.Encoding); } #endregion #region Database Information Methods public override List GetDatabaseInfo(byte[] items) { return GetDatabaseInfo(items, IscCodes.DEFAULT_MAX_BUFFER_SIZE); } public override ValueTask> GetDatabaseInfoAsync(byte[] items, CancellationToken cancellationToken = default) { return GetDatabaseInfoAsync(items, IscCodes.DEFAULT_MAX_BUFFER_SIZE, cancellationToken); } public override List GetDatabaseInfo(byte[] items, int bufferLength) { var buffer = new byte[bufferLength]; DatabaseInfo(items, buffer, buffer.Length); return IscHelper.ParseDatabaseInfo(buffer, Charset); } public override ValueTask> GetDatabaseInfoAsync(byte[] items, int bufferLength, CancellationToken cancellationToken = default) { var buffer = new byte[bufferLength]; DatabaseInfo(items, buffer, buffer.Length); return ValueTask.FromResult(IscHelper.ParseDatabaseInfo(buffer, Charset)); } #endregion #region Internal Methods internal void ProcessStatusVector(IntPtr[] statusVector) { StatusVectorHelper.ProcessStatusVector(statusVector, Charset, WarningMessage); } #endregion #region Private Methods private void DatabaseInfo(byte[] items, byte[] buffer, int bufferLength) { StatusVectorHelper.ClearStatusVector(_statusVector); _fbClient.isc_database_info( _statusVector, ref _handle, (short)items.Length, items, (short)bufferLength, buffer); ProcessStatusVector(); } private void ProcessStatusVector() { StatusVectorHelper.ProcessStatusVector(_statusVector, Charset, WarningMessage); } private void ProcessStatusVector(Charset charset) { StatusVectorHelper.ProcessStatusVector(_statusVector, charset, WarningMessage); } #endregion #region Internal Static Methods internal static void CheckCryptKeyForSupport(byte[] cryptKey) { // ICryptKeyCallbackImpl would have to be passed from C# for 'cryptKey' passing if (cryptKey?.Length > 0) throw new NotSupportedException("Passing Encryption Key isn't, yet, supported on Firebird Embedded."); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/FesServiceManager.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native; internal sealed class FesServiceManager : ServiceManagerBase { #region Fields private static readonly Version Version30 = new Version(3, 0); private readonly IFbClient _fbClient; private readonly Version _fbClientVersion; private IntPtr[] _statusVector; #endregion #region Properties public override bool UseUtf8ParameterBuffer => _fbClientVersion >= Version30; #endregion #region Constructors public FesServiceManager(string dllName, Charset charset) : base(charset) { _fbClient = FbClientFactory.Create(dllName); _fbClientVersion = FesConnection.GetClientVersion(_fbClient); _statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; } #endregion #region Methods public override void Attach(ServiceParameterBufferBase spb, string dataSource, int port, string service, byte[] cryptKey) { FesDatabase.CheckCryptKeyForSupport(cryptKey); StatusVectorHelper.ClearStatusVector(_statusVector); var svcHandle = Handle; _fbClient.isc_service_attach( _statusVector, (short)service.Length, service, ref svcHandle, spb.Length, spb.ToArray()); ProcessStatusVector(Charset.DefaultCharset); Handle = svcHandle; } public override ValueTask AttachAsync(ServiceParameterBufferBase spb, string dataSource, int port, string service, byte[] cryptKey, CancellationToken cancellationToken = default) { FesDatabase.CheckCryptKeyForSupport(cryptKey); StatusVectorHelper.ClearStatusVector(_statusVector); var svcHandle = Handle; _fbClient.isc_service_attach( _statusVector, (short)service.Length, service, ref svcHandle, spb.Length, spb.ToArray()); ProcessStatusVector(Charset.DefaultCharset); Handle = svcHandle; return ValueTask.CompletedTask; } public override void Detach() { StatusVectorHelper.ClearStatusVector(_statusVector); var svcHandle = Handle; _fbClient.isc_service_detach(_statusVector, ref svcHandle); ProcessStatusVector(); Handle = svcHandle; } public override ValueTask DetachAsync(CancellationToken cancellationToken = default) { StatusVectorHelper.ClearStatusVector(_statusVector); var svcHandle = Handle; _fbClient.isc_service_detach(_statusVector, ref svcHandle); ProcessStatusVector(); Handle = svcHandle; return ValueTask.CompletedTask; } public override void Start(ServiceParameterBufferBase spb) { StatusVectorHelper.ClearStatusVector(_statusVector); var svcHandle = Handle; var reserved = 0; _fbClient.isc_service_start( _statusVector, ref svcHandle, ref reserved, spb.Length, spb.ToArray()); ProcessStatusVector(); } public override ValueTask StartAsync(ServiceParameterBufferBase spb, CancellationToken cancellationToken = default) { StatusVectorHelper.ClearStatusVector(_statusVector); var svcHandle = Handle; var reserved = 0; _fbClient.isc_service_start( _statusVector, ref svcHandle, ref reserved, spb.Length, spb.ToArray()); ProcessStatusVector(); return ValueTask.CompletedTask; } public override void Query(ServiceParameterBufferBase spb, int requestLength, byte[] requestBuffer, int bufferLength, byte[] buffer) { StatusVectorHelper.ClearStatusVector(_statusVector); var svcHandle = Handle; var reserved = 0; _fbClient.isc_service_query( _statusVector, ref svcHandle, ref reserved, spb.Length, spb.ToArray(), (short)requestLength, requestBuffer, (short)buffer.Length, buffer); ProcessStatusVector(); } public override ValueTask QueryAsync(ServiceParameterBufferBase spb, int requestLength, byte[] requestBuffer, int bufferLength, byte[] buffer, CancellationToken cancellationToken = default) { StatusVectorHelper.ClearStatusVector(_statusVector); var svcHandle = Handle; var reserved = 0; _fbClient.isc_service_query( _statusVector, ref svcHandle, ref reserved, spb.Length, spb.ToArray(), (short)requestLength, requestBuffer, (short)buffer.Length, buffer); ProcessStatusVector(); return ValueTask.CompletedTask; } public override ServiceParameterBufferBase CreateServiceParameterBuffer() { return new ServiceParameterBuffer2(ParameterBufferEncoding); } #endregion #region Private Methods private void ProcessStatusVector() { StatusVectorHelper.ProcessStatusVector(_statusVector, Charset, WarningMessage); } private void ProcessStatusVector(Charset charset) { StatusVectorHelper.ProcessStatusVector(_statusVector, charset, WarningMessage); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/FesStatement.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.Client.Native.Handles; using FirebirdSql.Data.Client.Native.Marshalers; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native; internal sealed class FesStatement : StatementBase { #region Fields private StatementHandle _handle; private bool _disposed; private FesDatabase _database; private FesTransaction _transaction; private Descriptor _parameters; private Descriptor _fields; private bool _allRowsFetched; private IntPtr[] _statusVector; private IntPtr _fetchSqlDa; #endregion #region Properties public override DatabaseBase Database { get { return _database; } } public override TransactionBase Transaction { get { return _transaction; } set { if (_transaction != value) { if (TransactionUpdate != null && _transaction != null) { _transaction.Update -= TransactionUpdate; TransactionUpdate = null; } if (value == null) { _transaction = null; } else { _transaction = (FesTransaction)value; TransactionUpdate = new EventHandler(TransactionUpdated); _transaction.Update += TransactionUpdate; } } } } public override Descriptor Parameters { get { return _parameters; } set { _parameters = value; } } public override Descriptor Fields { get { return _fields; } } public override int FetchSize { get { return 200; } set { } } #endregion #region Constructors public FesStatement(FesDatabase database) : this(database, null) { } public FesStatement(FesDatabase database, FesTransaction transaction) { _database = database; _handle = new StatementHandle(); OutputParameters = new Queue(); _statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; _fetchSqlDa = IntPtr.Zero; if (transaction != null) { Transaction = transaction; } } #endregion #region Dispose2 public override void Dispose2() { if (!_disposed) { _disposed = true; Release(); Clear(); _database = null; _fields = null; _parameters = null; _transaction = null; OutputParameters = null; _statusVector = null; _allRowsFetched = false; _handle.Dispose(); FetchSize = 0; base.Dispose2(); } } public override async ValueTask Dispose2Async(CancellationToken cancellationToken = default) { if (!_disposed) { _disposed = true; await ReleaseAsync(cancellationToken).ConfigureAwait(false); Clear(); _database = null; _fields = null; _parameters = null; _transaction = null; OutputParameters = null; _statusVector = null; _allRowsFetched = false; _handle.Dispose(); FetchSize = 0; await base.Dispose2Async(cancellationToken).ConfigureAwait(false); } } #endregion #region Blob Creation Metods public override BlobBase CreateBlob() { return new FesBlob(_database, _transaction); } public override BlobBase CreateBlob(long blobId) { return new FesBlob(_database, _transaction, blobId); } #endregion #region Array Creation Methods public override ArrayBase CreateArray(ArrayDesc descriptor) { var array = new FesArray(descriptor); return array; } public override ValueTask CreateArrayAsync(ArrayDesc descriptor, CancellationToken cancellationToken = default) { var array = new FesArray(descriptor); return ValueTask.FromResult(array); } public override ArrayBase CreateArray(string tableName, string fieldName) { var array = new FesArray(_database, _transaction, tableName, fieldName); array.Initialize(); return array; } public override async ValueTask CreateArrayAsync(string tableName, string fieldName, CancellationToken cancellationToken = default) { var array = new FesArray(_database, _transaction, tableName, fieldName); await array.InitializeAsync(cancellationToken).ConfigureAwait(false); return array; } public override ArrayBase CreateArray(long handle, string tableName, string fieldName) { var array = new FesArray(_database, _transaction, handle, tableName, fieldName); array.Initialize(); return array; } public override async ValueTask CreateArrayAsync(long handle, string tableName, string fieldName, CancellationToken cancellationToken = default) { var array = new FesArray(_database, _transaction, handle, tableName, fieldName); await array.InitializeAsync(cancellationToken).ConfigureAwait(false); return array; } public override BatchBase CreateBatch() { throw new NotSupportedException("Batching isn't, yet, supported on Firebird Embedded."); } public override BatchParameterBuffer CreateBatchParameterBuffer() { throw new NotSupportedException("Batching isn't, yet, supported on Firebird Embedded."); } #endregion #region Methods public override void Release() { XsqldaMarshaler.CleanUpNativeData(ref _fetchSqlDa); base.Release(); } public override ValueTask ReleaseAsync(CancellationToken cancellationToken = default) { XsqldaMarshaler.CleanUpNativeData(ref _fetchSqlDa); return base.ReleaseAsync(cancellationToken); } public override void Close() { XsqldaMarshaler.CleanUpNativeData(ref _fetchSqlDa); base.Close(); } public override ValueTask CloseAsync(CancellationToken cancellationToken = default) { XsqldaMarshaler.CleanUpNativeData(ref _fetchSqlDa); return base.CloseAsync(cancellationToken); } public override void Prepare(string commandText) { ClearAll(); ClearStatusVector(); if (State == StatementState.Deallocated) { Allocate(); } _fields = new Descriptor(1); var sqlda = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _fields); var trHandle = _transaction.HandlePtr; var buffer = _database.Charset.GetBytes(commandText); _database.FbClient.isc_dsql_prepare( _statusVector, ref trHandle, ref _handle, (short)buffer.Length, buffer, _database.Dialect, sqlda); var descriptor = XsqldaMarshaler.MarshalNativeToManaged(_database.Charset, sqlda); XsqldaMarshaler.CleanUpNativeData(ref sqlda); _database.ProcessStatusVector(_statusVector); _fields = descriptor; if (_fields.ActualCount > 0 && _fields.ActualCount != _fields.Count) { Describe(); } else { if (_fields.ActualCount == 0) { _fields = new Descriptor(0); } } _fields.ResetValues(); DescribeParameters(); StatementType = GetStatementType(); State = StatementState.Prepared; } public override async ValueTask PrepareAsync(string commandText, CancellationToken cancellationToken = default) { ClearAll(); ClearStatusVector(); if (State == StatementState.Deallocated) { Allocate(); } _fields = new Descriptor(1); var sqlda = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _fields); var trHandle = _transaction.HandlePtr; var buffer = _database.Charset.GetBytes(commandText); _database.FbClient.isc_dsql_prepare( _statusVector, ref trHandle, ref _handle, (short)buffer.Length, buffer, _database.Dialect, sqlda); var descriptor = XsqldaMarshaler.MarshalNativeToManaged(_database.Charset, sqlda); XsqldaMarshaler.CleanUpNativeData(ref sqlda); _database.ProcessStatusVector(_statusVector); _fields = descriptor; if (_fields.ActualCount > 0 && _fields.ActualCount != _fields.Count) { Describe(); } else { if (_fields.ActualCount == 0) { _fields = new Descriptor(0); } } _fields.ResetValues(); DescribeParameters(); StatementType = await GetStatementTypeAsync(cancellationToken).ConfigureAwait(false); State = StatementState.Prepared; } public override void Execute(int timeout, IDescriptorFiller descriptorFiller) { EnsureNotDeallocated(); descriptorFiller.Fill(_parameters, 0); ClearStatusVector(); NativeHelpers.CallIfExists( nameof(IFbClient.fb_dsql_set_timeout), () => { _database.FbClient.fb_dsql_set_timeout(_statusVector, ref _handle, (uint)timeout); _database.ProcessStatusVector(_statusVector); }); ClearStatusVector(); var inSqlda = IntPtr.Zero; var outSqlda = IntPtr.Zero; if (_parameters != null) { inSqlda = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _parameters); } if (StatementType == DbStatementType.StoredProcedure) { Fields.ResetValues(); outSqlda = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _fields); } var trHandle = _transaction.HandlePtr; _database.FbClient.isc_dsql_execute2( _statusVector, ref trHandle, ref _handle, IscCodes.SQLDA_VERSION1, inSqlda, outSqlda); if (outSqlda != IntPtr.Zero) { var descriptor = XsqldaMarshaler.MarshalNativeToManaged(_database.Charset, outSqlda, true); var values = new DbValue[descriptor.Count]; for (var i = 0; i < values.Length; i++) { var d = descriptor[i]; var value = d.DbValue.GetValue(); values[i] = new DbValue(this, d, value); } OutputParameters.Enqueue(values); } XsqldaMarshaler.CleanUpNativeData(ref inSqlda); XsqldaMarshaler.CleanUpNativeData(ref outSqlda); _database.ProcessStatusVector(_statusVector); if (DoRecordsAffected) { RecordsAffected = GetRecordsAffected(); } else { RecordsAffected = -1; } State = StatementState.Executed; } public override async ValueTask ExecuteAsync(int timeout, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default) { EnsureNotDeallocated(); await descriptorFiller.FillAsync(_parameters, 0, cancellationToken).ConfigureAwait(false); ClearStatusVector(); NativeHelpers.CallIfExists( nameof(IFbClient.fb_dsql_set_timeout), () => { _database.FbClient.fb_dsql_set_timeout(_statusVector, ref _handle, (uint)timeout); _database.ProcessStatusVector(_statusVector); }); ClearStatusVector(); var inSqlda = IntPtr.Zero; var outSqlda = IntPtr.Zero; if (_parameters != null) { inSqlda = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _parameters); } if (StatementType == DbStatementType.StoredProcedure) { Fields.ResetValues(); outSqlda = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _fields); } var trHandle = _transaction.HandlePtr; _database.FbClient.isc_dsql_execute2( _statusVector, ref trHandle, ref _handle, IscCodes.SQLDA_VERSION1, inSqlda, outSqlda); if (outSqlda != IntPtr.Zero) { var descriptor = XsqldaMarshaler.MarshalNativeToManaged(_database.Charset, outSqlda, true); var values = new DbValue[descriptor.Count]; for (var i = 0; i < values.Length; i++) { var d = descriptor[i]; var value = await d.DbValue.GetValueAsync(cancellationToken).ConfigureAwait(false); values[i] = new DbValue(this, d, value); } OutputParameters.Enqueue(values); } XsqldaMarshaler.CleanUpNativeData(ref inSqlda); XsqldaMarshaler.CleanUpNativeData(ref outSqlda); _database.ProcessStatusVector(_statusVector); if (DoRecordsAffected) { RecordsAffected = await GetRecordsAffectedAsync(cancellationToken).ConfigureAwait(false); } else { RecordsAffected = -1; } State = StatementState.Executed; } public override DbValue[] Fetch() { EnsureNotDeallocated(); if (StatementType == DbStatementType.StoredProcedure && !_allRowsFetched) { _allRowsFetched = true; return GetOutputParameters(); } else if (StatementType == DbStatementType.Insert && _allRowsFetched) { return null; } else if (StatementType != DbStatementType.Select && StatementType != DbStatementType.SelectForUpdate) { return null; } if (_allRowsFetched) { return null; } _fields.ResetValues(); if (_fetchSqlDa == IntPtr.Zero) { _fetchSqlDa = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _fields); } ClearStatusVector(); var status = _database.FbClient.isc_dsql_fetch(_statusVector, ref _handle, IscCodes.SQLDA_VERSION1, _fetchSqlDa); if (status == new IntPtr(100)) { _allRowsFetched = true; XsqldaMarshaler.CleanUpNativeData(ref _fetchSqlDa); return null; } else { var rowDesc = XsqldaMarshaler.MarshalNativeToManaged(_database.Charset, _fetchSqlDa, true); if (_fields.Count == rowDesc.Count) { for (var i = 0; i < _fields.Count; i++) { if (_fields[i].IsArray() && _fields[i].ArrayHandle != null) { rowDesc[i].ArrayHandle = _fields[i].ArrayHandle; } } } _fields = rowDesc; _database.ProcessStatusVector(_statusVector); var row = new DbValue[_fields.ActualCount]; for (var i = 0; i < row.Length; i++) { var d = _fields[i]; var value = d.DbValue.GetValue(); row[i] = new DbValue(this, d, value); } return row; } } public override async ValueTask FetchAsync(CancellationToken cancellationToken = default) { EnsureNotDeallocated(); if (StatementType == DbStatementType.StoredProcedure && !_allRowsFetched) { _allRowsFetched = true; return GetOutputParameters(); } else if (StatementType == DbStatementType.Insert && _allRowsFetched) { return null; } else if (StatementType != DbStatementType.Select && StatementType != DbStatementType.SelectForUpdate) { return null; } if (_allRowsFetched) { return null; } _fields.ResetValues(); if (_fetchSqlDa == IntPtr.Zero) { _fetchSqlDa = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _fields); } ClearStatusVector(); var status = _database.FbClient.isc_dsql_fetch(_statusVector, ref _handle, IscCodes.SQLDA_VERSION1, _fetchSqlDa); if (status == new IntPtr(100)) { _allRowsFetched = true; XsqldaMarshaler.CleanUpNativeData(ref _fetchSqlDa); return null; } else { var rowDesc = XsqldaMarshaler.MarshalNativeToManaged(_database.Charset, _fetchSqlDa, true); if (_fields.Count == rowDesc.Count) { for (var i = 0; i < _fields.Count; i++) { if (_fields[i].IsArray() && _fields[i].ArrayHandle != null) { rowDesc[i].ArrayHandle = _fields[i].ArrayHandle; } } } _fields = rowDesc; _database.ProcessStatusVector(_statusVector); var row = new DbValue[_fields.ActualCount]; for (var i = 0; i < row.Length; i++) { var d = _fields[i]; var value = await d.DbValue.GetValueAsync(cancellationToken).ConfigureAwait(false); row[i] = new DbValue(this, d, value); } return row; } } #endregion #region Protected Methods protected override void Free(int option) { // Does not seem to be possible or necessary to close // an execute procedure statement. if (StatementType == DbStatementType.StoredProcedure && option == IscCodes.DSQL_close) { return; } ClearStatusVector(); _database.FbClient.isc_dsql_free_statement( _statusVector, ref _handle, (short)option); if (option == IscCodes.DSQL_drop) { _parameters = null; _fields = null; } Clear(); _allRowsFetched = false; _database.ProcessStatusVector(_statusVector); } protected override ValueTask FreeAsync(int option, CancellationToken cancellationToken = default) { // Does not seem to be possible or necessary to close // an execute procedure statement. if (StatementType == DbStatementType.StoredProcedure && option == IscCodes.DSQL_close) { return ValueTask.CompletedTask; } ClearStatusVector(); _database.FbClient.isc_dsql_free_statement( _statusVector, ref _handle, (short)option); if (option == IscCodes.DSQL_drop) { _parameters = null; _fields = null; } Clear(); _allRowsFetched = false; _database.ProcessStatusVector(_statusVector); return ValueTask.CompletedTask; } protected override void TransactionUpdated(object sender, EventArgs e) { if (Transaction != null && TransactionUpdate != null) { Transaction.Update -= TransactionUpdate; } Clear(); State = StatementState.Closed; TransactionUpdate = null; _allRowsFetched = false; } protected override byte[] GetSqlInfo(byte[] items, int bufferLength) { ClearStatusVector(); var buffer = new byte[bufferLength]; _database.FbClient.isc_dsql_sql_info( _statusVector, ref _handle, (short)items.Length, items, (short)bufferLength, buffer); _database.ProcessStatusVector(_statusVector); return buffer; } protected override ValueTask GetSqlInfoAsync(byte[] items, int bufferLength, CancellationToken cancellationToken = default) { ClearStatusVector(); var buffer = new byte[bufferLength]; _database.FbClient.isc_dsql_sql_info( _statusVector, ref _handle, (short)items.Length, items, (short)bufferLength, buffer); _database.ProcessStatusVector(_statusVector); return ValueTask.FromResult(buffer); } #endregion #region Private Methods private void ClearStatusVector() { Array.Clear(_statusVector, 0, _statusVector.Length); } private void Clear() { OutputParameters?.Clear(); } private void ClearAll() { Clear(); _parameters = null; _fields = null; } private void Allocate() { ClearStatusVector(); var dbHandle = _database.HandlePtr; _database.FbClient.isc_dsql_allocate_statement( _statusVector, ref dbHandle, ref _handle); _database.ProcessStatusVector(_statusVector); _allRowsFetched = false; State = StatementState.Allocated; StatementType = DbStatementType.None; } private void Describe() { ClearStatusVector(); _fields = new Descriptor(_fields.ActualCount); var sqlda = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _fields); _database.FbClient.isc_dsql_describe( _statusVector, ref _handle, IscCodes.SQLDA_VERSION1, sqlda); var descriptor = XsqldaMarshaler.MarshalNativeToManaged(_database.Charset, sqlda); XsqldaMarshaler.CleanUpNativeData(ref sqlda); _database.ProcessStatusVector(_statusVector); _fields = descriptor; } private void DescribeParameters() { ClearStatusVector(); _parameters = new Descriptor(1); var sqlda = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, _parameters); _database.FbClient.isc_dsql_describe_bind( _statusVector, ref _handle, IscCodes.SQLDA_VERSION1, sqlda); var descriptor = XsqldaMarshaler.MarshalNativeToManaged(_database.Charset, sqlda); _database.ProcessStatusVector(_statusVector); if (descriptor.ActualCount != 0 && descriptor.Count != descriptor.ActualCount) { var n = descriptor.ActualCount; descriptor = new Descriptor(n); XsqldaMarshaler.CleanUpNativeData(ref sqlda); sqlda = XsqldaMarshaler.MarshalManagedToNative(_database.Charset, descriptor); _database.FbClient.isc_dsql_describe_bind( _statusVector, ref _handle, IscCodes.SQLDA_VERSION1, sqlda); descriptor = XsqldaMarshaler.MarshalNativeToManaged(_database.Charset, sqlda); XsqldaMarshaler.CleanUpNativeData(ref sqlda); _database.ProcessStatusVector(_statusVector); } else { if (descriptor.ActualCount == 0) { descriptor = new Descriptor(0); } } if (sqlda != IntPtr.Zero) { XsqldaMarshaler.CleanUpNativeData(ref sqlda); } _parameters = descriptor; } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/FesTransaction.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.IO; using System.Data; using System.Runtime.InteropServices; using FirebirdSql.Data.Common; using FirebirdSql.Data.Client.Native.Handles; using System.Threading.Tasks; using System.Threading; using System.Net; using System.Collections.Generic; namespace FirebirdSql.Data.Client.Native; internal sealed class FesTransaction : TransactionBase { #region Inner Structs [StructLayout(LayoutKind.Sequential)] struct IscTeb { public IntPtr dbb_ptr; public int tpb_len; public IntPtr tpb_ptr; } #endregion #region Fields private TransactionHandle _handle; private FesDatabase _database; private bool _disposed; private IntPtr[] _statusVector; #endregion #region Properties public override int Handle { get { return _handle.DangerousGetHandle().AsInt(); } } public TransactionHandle HandlePtr { get { return _handle; } } #endregion #region Constructors public FesTransaction(FesDatabase database) { _database = database; _handle = new TransactionHandle(); State = TransactionState.NoTransaction; _statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; } #endregion #region Dispose2 public override void Dispose2() { if (!_disposed) { _disposed = true; if (State != TransactionState.NoTransaction) { Rollback(); } _database = null; _handle.Dispose(); State = TransactionState.NoTransaction; _statusVector = null; base.Dispose2(); } } public override async ValueTask Dispose2Async(CancellationToken cancellationToken = default) { if (!_disposed) { _disposed = true; if (State != TransactionState.NoTransaction) { await RollbackAsync(cancellationToken).ConfigureAwait(false); } _database = null; _handle.Dispose(); State = TransactionState.NoTransaction; _statusVector = null; await base.Dispose2Async(cancellationToken).ConfigureAwait(false); } } #endregion #region Methods public override void BeginTransaction(TransactionParameterBuffer tpb) { if (State != TransactionState.NoTransaction) { throw new InvalidOperationException(); } var teb = new IscTeb(); var tebData = IntPtr.Zero; try { ClearStatusVector(); teb.dbb_ptr = Marshal.AllocHGlobal(4); Marshal.WriteInt32(teb.dbb_ptr, _database.Handle); teb.tpb_len = tpb.Length; teb.tpb_ptr = Marshal.AllocHGlobal(tpb.Length); Marshal.Copy(tpb.ToArray(), 0, teb.tpb_ptr, tpb.Length); var size = Marshal.SizeOf(); tebData = Marshal.AllocHGlobal(size); Marshal.StructureToPtr(teb, tebData, true); _database.FbClient.isc_start_multiple( _statusVector, ref _handle, 1, tebData); _database.ProcessStatusVector(_statusVector); State = TransactionState.Active; _database.TransactionCount++; } finally { if (teb.dbb_ptr != IntPtr.Zero) { Marshal.FreeHGlobal(teb.dbb_ptr); } if (teb.tpb_ptr != IntPtr.Zero) { Marshal.FreeHGlobal(teb.tpb_ptr); } if (tebData != IntPtr.Zero) { Marshal.DestroyStructure(tebData); Marshal.FreeHGlobal(tebData); } } } public override ValueTask BeginTransactionAsync(TransactionParameterBuffer tpb, CancellationToken cancellationToken = default) { if (State != TransactionState.NoTransaction) { throw new InvalidOperationException(); } var teb = new IscTeb(); var tebData = IntPtr.Zero; try { ClearStatusVector(); teb.dbb_ptr = Marshal.AllocHGlobal(4); Marshal.WriteInt32(teb.dbb_ptr, _database.Handle); teb.tpb_len = tpb.Length; teb.tpb_ptr = Marshal.AllocHGlobal(tpb.Length); Marshal.Copy(tpb.ToArray(), 0, teb.tpb_ptr, tpb.Length); var size = Marshal.SizeOf(); tebData = Marshal.AllocHGlobal(size); Marshal.StructureToPtr(teb, tebData, true); _database.FbClient.isc_start_multiple( _statusVector, ref _handle, 1, tebData); _database.ProcessStatusVector(_statusVector); State = TransactionState.Active; _database.TransactionCount++; } finally { if (teb.dbb_ptr != IntPtr.Zero) { Marshal.FreeHGlobal(teb.dbb_ptr); } if (teb.tpb_ptr != IntPtr.Zero) { Marshal.FreeHGlobal(teb.tpb_ptr); } if (tebData != IntPtr.Zero) { Marshal.DestroyStructure(tebData); Marshal.FreeHGlobal(tebData); } } return ValueTask.CompletedTask; } public override void Commit() { EnsureActiveTransactionState(); ClearStatusVector(); _database.FbClient.isc_commit_transaction(_statusVector, ref _handle); _database.ProcessStatusVector(_statusVector); _database.TransactionCount--; OnUpdate(EventArgs.Empty); State = TransactionState.NoTransaction; } public override ValueTask CommitAsync(CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); ClearStatusVector(); _database.FbClient.isc_commit_transaction(_statusVector, ref _handle); _database.ProcessStatusVector(_statusVector); _database.TransactionCount--; OnUpdate(EventArgs.Empty); State = TransactionState.NoTransaction; return ValueTask.CompletedTask; } public override void Rollback() { EnsureActiveTransactionState(); ClearStatusVector(); _database.FbClient.isc_rollback_transaction(_statusVector, ref _handle); _database.ProcessStatusVector(_statusVector); _database.TransactionCount--; OnUpdate(EventArgs.Empty); State = TransactionState.NoTransaction; } public override ValueTask RollbackAsync(CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); ClearStatusVector(); _database.FbClient.isc_rollback_transaction(_statusVector, ref _handle); _database.ProcessStatusVector(_statusVector); _database.TransactionCount--; OnUpdate(EventArgs.Empty); State = TransactionState.NoTransaction; return ValueTask.CompletedTask; } public override void CommitRetaining() { EnsureActiveTransactionState(); ClearStatusVector(); _database.FbClient.isc_commit_retaining(_statusVector, ref _handle); _database.ProcessStatusVector(_statusVector); State = TransactionState.Active; } public override ValueTask CommitRetainingAsync(CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); ClearStatusVector(); _database.FbClient.isc_commit_retaining(_statusVector, ref _handle); _database.ProcessStatusVector(_statusVector); State = TransactionState.Active; return ValueTask.CompletedTask; } public override void RollbackRetaining() { EnsureActiveTransactionState(); ClearStatusVector(); _database.FbClient.isc_rollback_retaining(_statusVector, ref _handle); _database.ProcessStatusVector(_statusVector); State = TransactionState.Active; } public override ValueTask RollbackRetainingAsync(CancellationToken cancellationToken = default) { EnsureActiveTransactionState(); ClearStatusVector(); _database.FbClient.isc_rollback_retaining(_statusVector, ref _handle); _database.ProcessStatusVector(_statusVector); State = TransactionState.Active; return ValueTask.CompletedTask; } public override void Prepare() { } public override ValueTask PrepareAsync(CancellationToken cancellationToken = default) { return ValueTask.CompletedTask; } public override void Prepare(byte[] buffer) { } public override ValueTask PrepareAsync(byte[] buffer, CancellationToken cancellationToken = default) { return ValueTask.CompletedTask; } public override List GetTransactionInfo(byte[] items) { return GetTransactionInfo(items, IscCodes.DEFAULT_MAX_BUFFER_SIZE); } public override ValueTask> GetTransactionInfoAsync(byte[] items, CancellationToken cancellationToken = default) { return GetTransactionInfoAsync(items, IscCodes.DEFAULT_MAX_BUFFER_SIZE, cancellationToken); } public override List GetTransactionInfo(byte[] items, int bufferLength) { var buffer = new byte[bufferLength]; TransactionInfo(items, buffer, buffer.Length); return IscHelper.ParseTransactionInfo(buffer, _database.Charset); } public override ValueTask> GetTransactionInfoAsync(byte[] items, int bufferLength, CancellationToken cancellationToken = default) { var buffer = new byte[bufferLength]; TransactionInfo(items, buffer, buffer.Length); return ValueTask.FromResult(IscHelper.ParseTransactionInfo(buffer, _database.Charset)); } #endregion #region Private Methods private void TransactionInfo(byte[] items, byte[] buffer, int bufferLength) { StatusVectorHelper.ClearStatusVector(_statusVector); _database.FbClient.isc_transaction_info( _statusVector, ref _handle, (short)items.Length, items, (short)bufferLength, buffer); ProcessStatusVector(); } private void ClearStatusVector() { Array.Clear(_statusVector, 0, _statusVector.Length); } private void ProcessStatusVector() { StatusVectorHelper.ProcessStatusVector(_statusVector, _database.Charset, _database.WarningMessage); } private void ProcessStatusVector(Charset charset) { StatusVectorHelper.ProcessStatusVector(_statusVector, charset, _database.WarningMessage); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Handles/BlobHandle.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Hennadii Zabula, Jiri Cincura (jiri@cincura.net) using System; using System.Diagnostics.Contracts; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native.Handles; // public visibility added, because auto-generated assembly can't work with internal types public class BlobHandle : FirebirdHandle { protected override bool ReleaseHandle() { Contract.Requires(FbClient != null); if (IsClosed) { return true; } var statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; var @ref = this; FbClient.isc_close_blob(statusVector, ref @ref); handle = @ref.handle; var exception = StatusVectorHelper.ParseStatusVector(statusVector, Charset.DefaultCharset); return exception == null || exception.IsWarning; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Handles/DatabaseHandle.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Hennadii Zabula, Jiri Cincura (jiri@cincura.net) using System; using System.Diagnostics.Contracts; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native.Handles; // public visibility added, because auto-generated assembly can't work with internal types public class DatabaseHandle : FirebirdHandle { protected override bool ReleaseHandle() { Contract.Requires(FbClient != null); if (IsClosed) { return true; } var statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; var @ref = this; FbClient.isc_detach_database(statusVector, ref @ref); handle = @ref.handle; var exception = StatusVectorHelper.ParseStatusVector(statusVector, Charset.DefaultCharset); return exception == null || exception.IsWarning; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Handles/FirebirdHandle.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Hennadii Zabula using System; using System.Diagnostics.Contracts; using System.Runtime.InteropServices; namespace FirebirdSql.Data.Client.Native.Handles; // public visibility added, because auto-generated assembly can't work with internal types public abstract class FirebirdHandle : SafeHandle, IFirebirdHandle { private IFbClient _fbClient; protected FirebirdHandle() : base(IntPtr.Zero, true) { } // Method added because we can't inject IFbClient in ctor public void SetClient(IFbClient fbClient) { Contract.Requires(_fbClient == null); Contract.Requires(fbClient != null); Contract.Ensures(_fbClient != null); _fbClient = fbClient; } public IFbClient FbClient => _fbClient; public override bool IsInvalid => handle == IntPtr.Zero; } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Handles/IFirebirdHandle.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Hennadii Zabula using System; namespace FirebirdSql.Data.Client.Native.Handles; // public visibility added, because auto-generated assembly can't work with internal types public interface IFirebirdHandle { void SetClient(IFbClient fbClient); } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Handles/StatementHandle.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Hennadii Zabula, Jiri Cincura (jiri@cincura.net) using System; using System.Diagnostics.Contracts; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native.Handles; // public visibility added, because auto-generated assembly can't work with internal types public class StatementHandle : FirebirdHandle { protected override bool ReleaseHandle() { Contract.Requires(FbClient != null); if (IsClosed) { return true; } var statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; var @ref = this; FbClient.isc_dsql_free_statement(statusVector, ref @ref, IscCodes.DSQL_drop); handle = @ref.handle; var exception = StatusVectorHelper.ParseStatusVector(statusVector, Charset.DefaultCharset); return exception == null || exception.IsWarning; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Handles/TransactionHandle.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Hennadii Zabula, Jiri Cincura (jiri@cincura.net) using System; using System.Diagnostics.Contracts; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native.Handles; // public visibility added, because auto-generated assembly can't work with internal types public class TransactionHandle : FirebirdHandle { protected override bool ReleaseHandle() { Contract.Requires(FbClient != null); if (IsClosed) { return true; } var statusVector = new IntPtr[IscCodes.ISC_STATUS_LENGTH]; var @ref = this; FbClient.isc_rollback_transaction(statusVector, ref @ref); handle = @ref.handle; var exception = StatusVectorHelper.ParseStatusVector(statusVector, Charset.DefaultCharset); return exception == null || exception.IsWarning; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/IFbClient.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Dean Harding, Jiri Cincura (jiri@cincura.net) using System; using System.Runtime.InteropServices; using System.Text; using FirebirdSql.Data.Client.Native.Handles; namespace FirebirdSql.Data.Client.Native; /// /// This is the interface that the dynamically-generated class uses to call the native library. /// Each connection can specify different client library to use even on the same OS. /// IFbClient and FbClientactory classes are implemented to support this feature. /// Public visibility added, because auto-generated assembly can't work with internal types /// public interface IFbClient { #pragma warning disable IDE1006 IntPtr isc_array_get_slice( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle, ref long arrayId, IntPtr desc, byte[] destArray, ref int sliceLength); IntPtr isc_array_put_slice( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle, ref long arrayId, IntPtr desc, byte[] sourceArray, ref int sliceLength); IntPtr isc_create_blob2( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle, [MarshalAs(UnmanagedType.I4)] ref BlobHandle blobHandle, ref long blobId, short bpbLength, byte[] bpbAddress); IntPtr isc_open_blob2( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle, [MarshalAs(UnmanagedType.I4)] ref BlobHandle blobHandle, ref long blobId, short bpbLength, byte[] bpbAddress); IntPtr isc_blob_info( [In, Out] IntPtr[] statusVector, ref BlobHandle blobHandle, short itemListBufferLength, byte[] itemListBuffer, short resultBufferLength, byte[] resultBuffer); IntPtr isc_get_segment( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref BlobHandle blobHandle, ref short actualSegLength, short segBufferLength, byte[] segBuffer); IntPtr isc_put_segment( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref BlobHandle blobHandle, short segBufferLength, byte[] segBuffer); IntPtr isc_seek_blob( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref BlobHandle blobHandle, short mode, int offset, ref int resultingBlobPosition); IntPtr isc_cancel_blob( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref BlobHandle blobHandle); IntPtr isc_close_blob( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref BlobHandle blobHandle); IntPtr isc_attach_database( [In, Out] IntPtr[] statusVector, short dbNameLength, byte[] dbName, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle, short parmBufferLength, byte[] parmBuffer); IntPtr isc_detach_database( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle); IntPtr isc_database_info( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle, short itemListBufferLength, byte[] itemListBuffer, short resultBufferLength, byte[] resultBuffer); IntPtr isc_create_database( [In, Out] IntPtr[] statusVector, short dbNameLength, byte[] dbName, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle, short parmBufferLength, byte[] parmBuffer, short dbType); IntPtr isc_drop_database( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle); IntPtr isc_start_multiple( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle, short dbHandleCount, IntPtr tebVectorAddress); IntPtr isc_commit_transaction( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle); IntPtr isc_commit_retaining( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle); IntPtr isc_rollback_transaction( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle); IntPtr isc_rollback_retaining( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle); IntPtr fb_shutdown( uint timeout, int reason); IntPtr fb_cancel_operation( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle, ushort option); IntPtr isc_dsql_allocate_statement( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref DatabaseHandle dbHandle, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle); IntPtr isc_dsql_describe( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle, short daVersion, IntPtr xsqlda); IntPtr isc_dsql_describe_bind( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle, short daVersion, IntPtr xsqlda); IntPtr isc_dsql_prepare( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle, short length, byte[] statement, short dialect, IntPtr xsqlda); IntPtr isc_dsql_execute( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle, short daVersion, IntPtr xsqlda); IntPtr isc_dsql_execute2( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle, short da_version, IntPtr inXsqlda, IntPtr outXsqlda); IntPtr isc_dsql_fetch( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle, short daVersion, IntPtr xsqlda); IntPtr isc_dsql_free_statement( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle, short option); IntPtr isc_dsql_sql_info( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle, short itemsLength, byte[] items, short bufferLength, byte[] buffer); IntPtr isc_service_attach( [In, Out] IntPtr[] statusVector, short serviceLength, string service, ref int svcHandle, short spbLength, byte[] spb); IntPtr isc_service_start( [In, Out] IntPtr[] statusVector, ref int svcHandle, ref int reserved, short spbLength, byte[] spb); IntPtr isc_service_detach( [In, Out] IntPtr[] statusVector, ref int svcHandle); IntPtr isc_service_query( [In, Out] IntPtr[] statusVector, ref int svcHandle, ref int reserved, short sendSpbLength, byte[] sendSpb, short requestSpbLength, byte[] requestSpb, short bufferLength, byte[] buffer); IntPtr fb_dsql_set_timeout( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref StatementHandle stmtHandle, uint timeout); void isc_get_client_version( [MarshalAs(UnmanagedType.LPStr)] StringBuilder version); IntPtr isc_transaction_info( [In, Out] IntPtr[] statusVector, [MarshalAs(UnmanagedType.I4)] ref TransactionHandle trHandle, short itemListBufferLength, byte[] itemListBuffer, short resultBufferLength, byte[] resultBuffer); #pragma warning restore IDE1006 } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Marshalers/ArrayBoundMarshal.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System.Runtime.InteropServices; namespace FirebirdSql.Data.Client.Native.Marshalers; [StructLayout(LayoutKind.Sequential)] internal struct ArrayBoundMarshal { public short LowerBound; public short UpperBound; } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Marshalers/ArrayDescMarshal.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System.Runtime.InteropServices; namespace FirebirdSql.Data.Client.Native.Marshalers; [StructLayout(LayoutKind.Sequential)] internal struct ArrayDescMarshal { public byte DataType; public byte Scale; public short Length; [MarshalAs(UnmanagedType.ByValTStr, SizeConst = 32)] public string FieldName; [MarshalAs(UnmanagedType.ByValTStr, SizeConst = 32)] public string RelationName; public short Dimensions; public short Flags; public static int ComputeLength(int n) { return Marshal.SizeOf() + n * Marshal.SizeOf(); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Marshalers/ArrayDescMarshaler.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Runtime.InteropServices; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native.Marshalers; internal static class ArrayDescMarshaler { public static void CleanUpNativeData(ref IntPtr pNativeData) { if (pNativeData != IntPtr.Zero) { Marshal.DestroyStructure(pNativeData); for (var i = 0; i < 16; i++) { Marshal.DestroyStructure(pNativeData + ArrayDescMarshal.ComputeLength(i)); } Marshal.FreeHGlobal(pNativeData); pNativeData = IntPtr.Zero; } } public static IntPtr MarshalManagedToNative(ArrayDesc descriptor) { var arrayDesc = new ArrayDescMarshal(); arrayDesc.DataType = descriptor.DataType; arrayDesc.Scale = (byte)descriptor.Scale; arrayDesc.Length = descriptor.Length; arrayDesc.FieldName = descriptor.FieldName; arrayDesc.RelationName = descriptor.RelationName; arrayDesc.Dimensions = descriptor.Dimensions; arrayDesc.Flags = descriptor.Flags; var arrayBounds = new ArrayBoundMarshal[descriptor.Bounds.Length]; for (var i = 0; i < descriptor.Dimensions; i++) { arrayBounds[i].LowerBound = (short)descriptor.Bounds[i].LowerBound; arrayBounds[i].UpperBound = (short)descriptor.Bounds[i].UpperBound; } return MarshalManagedToNative(arrayDesc, arrayBounds); } public static IntPtr MarshalManagedToNative(ArrayDescMarshal arrayDesc, ArrayBoundMarshal[] arrayBounds) { var size = ArrayDescMarshal.ComputeLength(arrayBounds.Length); var ptr = Marshal.AllocHGlobal(size); Marshal.StructureToPtr(arrayDesc, ptr, true); for (var i = 0; i < arrayBounds.Length; i++) { Marshal.StructureToPtr(arrayBounds[i], ptr + ArrayDescMarshal.ComputeLength(i), true); } return ptr; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Marshalers/XSQLDA.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System.Runtime.InteropServices; namespace FirebirdSql.Data.Client.Native.Marshalers; [StructLayout(LayoutKind.Sequential)] internal struct XSQLDA { public short version; [MarshalAs(UnmanagedType.ByValTStr, SizeConst = 8)] public string sqldaid; public int sqldabc; public short sqln; public short sqld; } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Marshalers/XSQLVAR.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Runtime.InteropServices; namespace FirebirdSql.Data.Client.Native.Marshalers; [StructLayout(LayoutKind.Sequential)] internal class XSQLVAR { public short sqltype; public short sqlscale; public short sqlsubtype; public short sqllen; public IntPtr sqldata; public IntPtr sqlind; public short sqlname_length; [MarshalAs(UnmanagedType.ByValArray, SizeConst = 32)] public byte[] sqlname; public short relname_length; [MarshalAs(UnmanagedType.ByValArray, SizeConst = 32)] public byte[] relname; public short ownername_length; [MarshalAs(UnmanagedType.ByValArray, SizeConst = 32)] public byte[] ownername; public short aliasname_length; [MarshalAs(UnmanagedType.ByValArray, SizeConst = 32)] public byte[] aliasname; } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/Marshalers/XsqldaMarshaler.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net), Hennadii Zabula using System; using System.Runtime.InteropServices; using System.IO; using FirebirdSql.Data.Common; using System.Threading.Tasks; namespace FirebirdSql.Data.Client.Native.Marshalers; internal static class XsqldaMarshaler { private static int SizeOfXSQLDA = Marshal.SizeOf(); private static int SizeOfXSQLVAR = Marshal.SizeOf(); public static void CleanUpNativeData(ref IntPtr pNativeData) { if (pNativeData != IntPtr.Zero) { var xsqlda = Marshal.PtrToStructure(pNativeData); Marshal.DestroyStructure(pNativeData); for (var i = 0; i < xsqlda.sqln; i++) { var ptr = IntPtr.Add(pNativeData, ComputeLength(i)); var sqlvar = new XSQLVAR(); MarshalXSQLVARNativeToManaged(ptr, sqlvar, true); if (sqlvar.sqldata != IntPtr.Zero) { Marshal.FreeHGlobal(sqlvar.sqldata); sqlvar.sqldata = IntPtr.Zero; } if (sqlvar.sqlind != IntPtr.Zero) { Marshal.FreeHGlobal(sqlvar.sqlind); sqlvar.sqlind = IntPtr.Zero; } Marshal.DestroyStructure(ptr); } Marshal.FreeHGlobal(pNativeData); pNativeData = IntPtr.Zero; } } public static IntPtr MarshalManagedToNative(Charset charset, Descriptor descriptor) { var xsqlda = new XSQLDA { version = descriptor.Version, sqln = descriptor.Count, sqld = descriptor.ActualCount }; var xsqlvar = new XSQLVAR[descriptor.Count]; for (var i = 0; i < xsqlvar.Length; i++) { xsqlvar[i] = new XSQLVAR { sqltype = descriptor[i].DataType, sqlscale = descriptor[i].NumericScale, sqlsubtype = descriptor[i].SubType, sqllen = descriptor[i].Length }; if (descriptor[i].HasDataType() && descriptor[i].DbDataType != DbDataType.Null) { var buffer = descriptor[i].DbValue.GetBytes(); xsqlvar[i].sqldata = Marshal.AllocHGlobal(buffer.Length); Marshal.Copy(buffer, 0, xsqlvar[i].sqldata, buffer.Length); } else { xsqlvar[i].sqldata = Marshal.AllocHGlobal(0); } xsqlvar[i].sqlind = Marshal.AllocHGlobal(Marshal.SizeOf()); Marshal.WriteInt16(xsqlvar[i].sqlind, descriptor[i].NullFlag); xsqlvar[i].sqlname = GetStringBuffer(charset, descriptor[i].Name); xsqlvar[i].sqlname_length = (short)descriptor[i].Name.Length; xsqlvar[i].relname = GetStringBuffer(charset, descriptor[i].Relation); xsqlvar[i].relname_length = (short)descriptor[i].Relation.Length; xsqlvar[i].ownername = GetStringBuffer(charset, descriptor[i].Owner); xsqlvar[i].ownername_length = (short)descriptor[i].Owner.Length; xsqlvar[i].aliasname = GetStringBuffer(charset, descriptor[i].Alias); xsqlvar[i].aliasname_length = (short)descriptor[i].Alias.Length; } return MarshalManagedToNative(xsqlda, xsqlvar); } public static IntPtr MarshalManagedToNative(XSQLDA xsqlda, XSQLVAR[] xsqlvar) { var size = ComputeLength(xsqlda.sqln); var ptr = Marshal.AllocHGlobal(size); Marshal.StructureToPtr(xsqlda, ptr, true); for (var i = 0; i < xsqlvar.Length; i++) { var offset = ComputeLength(i); Marshal.StructureToPtr(xsqlvar[i], IntPtr.Add(ptr, offset), true); } return ptr; } public static Descriptor MarshalNativeToManaged(Charset charset, IntPtr pNativeData) { return MarshalNativeToManaged(charset, pNativeData, false); } public static Descriptor MarshalNativeToManaged(Charset charset, IntPtr pNativeData, bool fetching) { var xsqlda = Marshal.PtrToStructure(pNativeData); var descriptor = new Descriptor(xsqlda.sqln) { ActualCount = xsqlda.sqld }; var xsqlvar = new XSQLVAR(); for (var i = 0; i < xsqlda.sqln; i++) { var ptr = IntPtr.Add(pNativeData, ComputeLength(i)); MarshalXSQLVARNativeToManaged(ptr, xsqlvar); descriptor[i].DataType = xsqlvar.sqltype; descriptor[i].NumericScale = xsqlvar.sqlscale; descriptor[i].SubType = xsqlvar.sqlsubtype; descriptor[i].Length = xsqlvar.sqllen; descriptor[i].NullFlag = xsqlvar.sqlind == IntPtr.Zero ? (short)0 : Marshal.ReadInt16(xsqlvar.sqlind); if (fetching) { if (descriptor[i].NullFlag != -1) { descriptor[i].SetValue(GetBytes(xsqlvar)); } } descriptor[i].Name = GetString(charset, xsqlvar.sqlname, xsqlvar.sqlname_length); descriptor[i].Relation = GetString(charset, xsqlvar.relname, xsqlvar.relname_length); descriptor[i].Owner = GetString(charset, xsqlvar.ownername, xsqlvar.ownername_length); descriptor[i].Alias = GetString(charset, xsqlvar.aliasname, xsqlvar.aliasname_length); } return descriptor; } private static void MarshalXSQLVARNativeToManaged(IntPtr ptr, XSQLVAR xsqlvar, bool onlyPointers = false) { unsafe { using (var reader = new BinaryReader(new UnmanagedMemoryStream((byte*)ptr.ToPointer(), SizeOfXSQLVAR))) { if (!onlyPointers) xsqlvar.sqltype = reader.ReadInt16(); else reader.BaseStream.Position += sizeof(short); if (!onlyPointers) xsqlvar.sqlscale = reader.ReadInt16(); else reader.BaseStream.Position += sizeof(short); if (!onlyPointers) xsqlvar.sqlsubtype = reader.ReadInt16(); else reader.BaseStream.Position += sizeof(short); if (!onlyPointers) xsqlvar.sqllen = reader.ReadInt16(); else reader.BaseStream.Position += sizeof(short); xsqlvar.sqldata = reader.ReadIntPtr(); xsqlvar.sqlind = reader.ReadIntPtr(); if (!onlyPointers) xsqlvar.sqlname_length = reader.ReadInt16(); else reader.BaseStream.Position += sizeof(short); if (!onlyPointers) xsqlvar.sqlname = reader.ReadBytes(32); else reader.BaseStream.Position += 32; if (!onlyPointers) xsqlvar.relname_length = reader.ReadInt16(); else reader.BaseStream.Position += sizeof(short); if (!onlyPointers) xsqlvar.relname = reader.ReadBytes(32); else reader.BaseStream.Position += 32; if (!onlyPointers) xsqlvar.ownername_length = reader.ReadInt16(); else reader.BaseStream.Position += sizeof(short); if (!onlyPointers) xsqlvar.ownername = reader.ReadBytes(32); else reader.BaseStream.Position += 32; if (!onlyPointers) xsqlvar.aliasname_length = reader.ReadInt16(); else reader.BaseStream.Position += sizeof(short); if (!onlyPointers) xsqlvar.aliasname = reader.ReadBytes(32); else reader.BaseStream.Position += 32; } } } private static int ComputeLength(int n) { var length = (SizeOfXSQLDA + n * SizeOfXSQLVAR); if (IntPtr.Size == 8) { length += 4; } return length; } private static byte[] GetBytes(XSQLVAR xsqlvar) { if (xsqlvar.sqllen == 0 || xsqlvar.sqldata == IntPtr.Zero) { return null; } var type = xsqlvar.sqltype & ~1; switch (type) { case IscCodes.SQL_VARYING: { var buffer = new byte[Marshal.ReadInt16(xsqlvar.sqldata)]; var tmp = IntPtr.Add(xsqlvar.sqldata, 2); Marshal.Copy(tmp, buffer, 0, buffer.Length); return buffer; } case IscCodes.SQL_TEXT: case IscCodes.SQL_SHORT: case IscCodes.SQL_LONG: case IscCodes.SQL_FLOAT: case IscCodes.SQL_DOUBLE: case IscCodes.SQL_D_FLOAT: case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: case IscCodes.SQL_BLOB: case IscCodes.SQL_ARRAY: case IscCodes.SQL_TIMESTAMP: case IscCodes.SQL_TYPE_TIME: case IscCodes.SQL_TYPE_DATE: case IscCodes.SQL_BOOLEAN: case IscCodes.SQL_TIMESTAMP_TZ: case IscCodes.SQL_TIMESTAMP_TZ_EX: case IscCodes.SQL_TIME_TZ: case IscCodes.SQL_TIME_TZ_EX: case IscCodes.SQL_DEC16: case IscCodes.SQL_DEC34: case IscCodes.SQL_INT128: { var buffer = new byte[xsqlvar.sqllen]; Marshal.Copy(xsqlvar.sqldata, buffer, 0, buffer.Length); return buffer; } default: throw TypeHelper.InvalidDataType(type); } } private static byte[] GetStringBuffer(Charset charset, string value) { var buffer = new byte[32]; charset.GetBytes(value, 0, value.Length, buffer, 0); return buffer; } private static string GetString(Charset charset, byte[] buffer) { var value = charset.GetString(buffer); return value.TrimEnd('\0', ' '); } private static string GetString(Charset charset, byte[] buffer, short bufferLength) { return charset.GetString(buffer, 0, bufferLength); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Client/Native/StatusVectorHelper.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Runtime.InteropServices; using FirebirdSql.Data.Common; namespace FirebirdSql.Data.Client.Native { static class StatusVectorHelper { public static void ProcessStatusVector(IntPtr[] statusVector, Charset charset, Action warningMessage) { var ex = ParseStatusVector(statusVector, charset); if (ex != null) { if (ex.IsWarning) { warningMessage?.Invoke(ex); } else { throw ex; } } } public static void ClearStatusVector(IntPtr[] statusVector) { Array.Clear(statusVector, 0, statusVector.Length); } public static IscException ParseStatusVector(IntPtr[] statusVector, Charset charset) { IscException exception = null; var eof = false; for (var i = 0; i < statusVector.Length;) { var arg = statusVector[i++]; switch (arg.AsInt()) { case IscCodes.isc_arg_gds: default: var er = statusVector[i++]; if (er != IntPtr.Zero) { if (exception == null) { exception = IscException.ForBuilding(); } exception.Errors.Add(new IscError(arg.AsInt(), er.AsInt())); } break; case IscCodes.isc_arg_end: exception?.BuildExceptionData(); eof = true; break; case IscCodes.isc_arg_interpreted: case IscCodes.isc_arg_string: { var ptr = statusVector[i++]; var buffer = ReadStringData(ptr); var value = charset.GetString(buffer); exception.Errors.Add(new IscError(arg.AsInt(), value)); } break; case IscCodes.isc_arg_cstring: { i++; var ptr = statusVector[i++]; var buffer = ReadStringData(ptr); var value = charset.GetString(buffer); exception.Errors.Add(new IscError(arg.AsInt(), value)); } break; case IscCodes.isc_arg_win32: case IscCodes.isc_arg_number: exception.Errors.Add(new IscError(arg.AsInt(), statusVector[i++].AsInt())); break; case IscCodes.isc_arg_sql_state: { var ptr = statusVector[i++]; var buffer = ReadStringData(ptr); var value = charset.GetString(buffer); exception.Errors.Add(new IscError(arg.AsInt(), value)); } break; } if (eof) { break; } } return exception; } private static byte[] ReadStringData(IntPtr ptr) { var buffer = new List(); var offset = 0; while (true) { var b = Marshal.ReadByte(ptr, offset); if (b == 0) break; buffer.Add(b); offset++; } return buffer.ToArray(); } } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/ArrayBase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Text; using System.Globalization; using System.Threading.Tasks; using System.Threading; namespace FirebirdSql.Data.Common; internal abstract class ArrayBase { #region Fields private ArrayDesc _descriptor; private string _tableName; private string _fieldName; private string _rdbFieldName; #endregion #region Properties public ArrayDesc Descriptor => _descriptor; #endregion #region Abstract Properties public abstract long Handle { get; set; } public abstract DatabaseBase Database { get; set; } public abstract TransactionBase Transaction { get; set; } #endregion #region Constructors protected ArrayBase(ArrayDesc descriptor) { _tableName = descriptor.RelationName; _fieldName = descriptor.FieldName; _descriptor = descriptor; } protected ArrayBase(string tableName, string fieldName) { _tableName = tableName; _fieldName = fieldName; _rdbFieldName = string.Empty; } #endregion #region Methods public void Initialize() { LookupBounds(); } public ValueTask InitializeAsync(CancellationToken cancellationToken = default) { return LookupBoundsAsync(cancellationToken); } public Array Read() { var slice = GetSlice(GetSliceLength(true)); return DecodeSlice(slice); } public async ValueTask ReadAsync(CancellationToken cancellationToken = default) { var slice = await GetSliceAsync(GetSliceLength(true), cancellationToken).ConfigureAwait(false); return await DecodeSliceAsync(slice, cancellationToken).ConfigureAwait(false); } public void Write(Array sourceArray) { SetDesc(sourceArray); PutSlice(sourceArray, GetSliceLength(false)); } public async ValueTask WriteAsync(Array sourceArray, CancellationToken cancellationToken = default) { SetDesc(sourceArray); await PutSliceAsync(sourceArray, GetSliceLength(false), cancellationToken).ConfigureAwait(false); } public void SetDesc(Array sourceArray) { _descriptor.Dimensions = (short)sourceArray.Rank; for (var i = 0; i < sourceArray.Rank; i++) { var lb = _descriptor.Bounds[i].LowerBound; var ub = sourceArray.GetLength(i) - 1 + lb; _descriptor.Bounds[i].UpperBound = ub; } } private void LookupBounds() { LookupDesc(); var lookup = Database.CreateStatement(Transaction); try { lookup.Prepare(GetArrayBounds()); lookup.Execute(0, EmptyDescriptorFiller.Instance); _descriptor.Bounds = new ArrayBound[16]; DbValue[] values; var i = 0; while ((values = lookup.Fetch()) != null) { _descriptor.Bounds[i].LowerBound = values[0].GetInt32(); _descriptor.Bounds[i].UpperBound = values[1].GetInt32(); i++; } } finally { lookup.Dispose2(); } } private async ValueTask LookupBoundsAsync(CancellationToken cancellationToken = default) { await LookupDescAsync(cancellationToken).ConfigureAwait(false); var lookup = Database.CreateStatement(Transaction); try { await lookup.PrepareAsync(GetArrayBounds(), cancellationToken).ConfigureAwait(false); await lookup.ExecuteAsync(0, EmptyDescriptorFiller.Instance, cancellationToken).ConfigureAwait(false); _descriptor.Bounds = new ArrayBound[16]; DbValue[] values; var i = 0; while ((values = await lookup.FetchAsync(cancellationToken).ConfigureAwait(false)) != null) { _descriptor.Bounds[i].LowerBound = values[0].GetInt32(); _descriptor.Bounds[i].UpperBound = values[1].GetInt32(); i++; } } finally { await lookup.Dispose2Async(cancellationToken).ConfigureAwait(false); } } private void LookupDesc() { var lookup = Database.CreateStatement(Transaction); try { lookup.Prepare(GetArrayDesc()); lookup.Execute(0, EmptyDescriptorFiller.Instance); _descriptor = new ArrayDesc(); var values = lookup.Fetch(); if (values != null && values.Length > 0) { _descriptor.RelationName = _tableName; _descriptor.FieldName = _fieldName; _descriptor.DataType = values[0].GetByte(); _descriptor.Scale = values[1].GetInt16(); _descriptor.Length = values[2].GetInt16(); _descriptor.Dimensions = values[3].GetInt16(); _descriptor.Flags = 0; _rdbFieldName = (values[4].GetString()).Trim(); } else { throw new InvalidOperationException(); } } finally { lookup.Dispose2(); } } private async ValueTask LookupDescAsync(CancellationToken cancellationToken = default) { var lookup = Database.CreateStatement(Transaction); try { await lookup.PrepareAsync(GetArrayDesc(), cancellationToken).ConfigureAwait(false); await lookup.ExecuteAsync(0, EmptyDescriptorFiller.Instance, cancellationToken).ConfigureAwait(false); _descriptor = new ArrayDesc(); var values = await lookup.FetchAsync(cancellationToken).ConfigureAwait(false); if (values != null && values.Length > 0) { _descriptor.RelationName = _tableName; _descriptor.FieldName = _fieldName; _descriptor.DataType = values[0].GetByte(); _descriptor.Scale = values[1].GetInt16(); _descriptor.Length = values[2].GetInt16(); _descriptor.Dimensions = values[3].GetInt16(); _descriptor.Flags = 0; _rdbFieldName = (await values[4].GetStringAsync(cancellationToken).ConfigureAwait(false)).Trim(); } else { throw new InvalidOperationException(); } } finally { await lookup.Dispose2Async(cancellationToken).ConfigureAwait(false); } } #endregion #region Protected Methods protected int GetSliceLength(bool read) { var elements = 1; for (var i = 0; i < _descriptor.Dimensions; i++) { var bound = _descriptor.Bounds[i]; elements *= bound.UpperBound - bound.LowerBound + 1; } var length = elements * _descriptor.Length; switch (_descriptor.DataType) { case IscCodes.blr_varying: case IscCodes.blr_varying2: length += elements * 2; break; } return length; } protected Type GetSystemType() { return TypeHelper.GetTypeFromBlrType(_descriptor.DataType, default, _descriptor.Scale); } #endregion #region Abstract Methods public abstract byte[] GetSlice(int slice_length); public abstract ValueTask GetSliceAsync(int slice_length, CancellationToken cancellationToken = default); public abstract void PutSlice(Array source_array, int slice_length); public abstract ValueTask PutSliceAsync(Array source_array, int slice_length, CancellationToken cancellationToken = default); #endregion #region Protected Abstract Methods protected abstract Array DecodeSlice(byte[] slice); protected abstract ValueTask DecodeSliceAsync(byte[] slice, CancellationToken cancellationToken = default); #endregion #region Private Methods private string GetArrayDesc() { var sql = new StringBuilder(); sql.Append( "SELECT Y.RDB$FIELD_TYPE, Y.RDB$FIELD_SCALE, Y.RDB$FIELD_LENGTH, Y.RDB$DIMENSIONS, X.RDB$FIELD_SOURCE " + "FROM RDB$RELATION_FIELDS X, RDB$FIELDS Y " + "WHERE X.RDB$FIELD_SOURCE = Y.RDB$FIELD_NAME "); if (_tableName != null && _tableName.Length != 0) { sql.AppendFormat(" AND X.RDB$RELATION_NAME = '{0}'", _tableName); } if (_fieldName != null && _fieldName.Length != 0) { sql.AppendFormat(" AND X.RDB$FIELD_NAME = '{0}'", _fieldName); } return sql.ToString(); } private string GetArrayBounds() { var sql = new StringBuilder(); sql.Append("SELECT X.RDB$LOWER_BOUND, X.RDB$UPPER_BOUND FROM RDB$FIELD_DIMENSIONS X "); if (_fieldName != null && _fieldName.Length != 0) { sql.AppendFormat("WHERE X.RDB$FIELD_NAME = '{0}'", _rdbFieldName); } sql.Append(" ORDER BY X.RDB$DIMENSION"); return sql.ToString(); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/ArrayBound.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Runtime.InteropServices; namespace FirebirdSql.Data.Common; [StructLayout(LayoutKind.Auto)] internal struct ArrayBound { public int LowerBound { get; set; } public int UpperBound { get; set; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/ArrayDesc.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Runtime.InteropServices; namespace FirebirdSql.Data.Common; [StructLayout(LayoutKind.Auto)] internal struct ArrayDesc { public byte DataType { get; set; } public short Scale { get; set; } public short Length { get; set; } public string FieldName { get; set; } public string RelationName { get; set; } public short Dimensions { get; set; } // Specifies wheter array is to be accesed in // row mayor or column-mayor order public short Flags { get; set; } public ArrayBound[] Bounds { get; set; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/BatchBase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Threading; using System.Threading.Tasks; namespace FirebirdSql.Data.Common; internal abstract class BatchBase { public abstract StatementBase Statement { get; } public bool MultiError { get; set; } public int BatchBufferSize { get; set; } public class ExecuteResultItem { public int RecordsAffected { get; set; } public bool IsError { get; set; } public IscException Exception { get; set; } } public abstract ExecuteResultItem[] Execute(int count, IDescriptorFiller descriptorFiller); public abstract ValueTask ExecuteAsync(int count, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default); public abstract int ComputeBatchSize(int count, IDescriptorFiller descriptorFiller); public abstract ValueTask ComputeBatchSizeAsync(int count, IDescriptorFiller descriptorFiller, CancellationToken cancellationToken = default); public abstract void Release(); public abstract ValueTask ReleaseAsync(CancellationToken cancellationToken = default); } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/BatchParameterBuffer.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Text; namespace FirebirdSql.Data.Common; internal sealed class BatchParameterBuffer : DatabaseParameterBufferBase { public BatchParameterBuffer(Encoding encoding) : base(IscCodes.Batch.VERSION1, encoding) { } public override void Append(int type, byte value) { WriteByte(type); Write(1); Write(value); } public override void Append(int type, short value) { WriteByte(type); Write(2); Write(value); } public override void Append(int type, int value) { WriteByte(type); Write(4); Write(value); } public override void Append(int type, byte[] buffer) { WriteByte(type); Write(buffer.Length); Write(buffer); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/BinaryEncoding.cs ================================================ /* * BinaryEncoding handler for .Net. This class implements * a symmetric encoding that will convert string to byte[] * and byte[] to string without any character set * transliteration. * * The contents of this file were written by jimb * at connectedsw.com on Dec 9, 2004. It is placed in * the Public Domain and may be used as you see fit. */ using System; using System.Text; namespace FirebirdSql.Data.Common; internal class BinaryEncoding : Encoding { public static string BytesToString(byte[] byteArray) { // This code isn't great because it requires a double copy, // but it requires unsafe code to solve the problem efficiently. var charArray = new char[byteArray.GetLength(0)]; Array.Copy(byteArray, charArray, byteArray.Length); return new string(charArray); } static void Validate(object data, int dataLength, int index, int count) { if (data == null) { throw new ArgumentNullException(); } if (index < 0 || count < 0 || dataLength - index < count) { throw new ArgumentOutOfRangeException(); } } public override int GetByteCount(char[] chars, int index, int count) { Validate(chars, chars.Length, index, count); return count; } public override int GetByteCount(string chars) { return chars.Length; } public override int GetBytes(char[] chars, int charIndex, int charCount, byte[] bytes, int index) { Validate(chars, chars.Length, charIndex, charCount); if (index < 0 || index > bytes.Length) { throw new ArgumentOutOfRangeException(); } if (bytes.Length - index < charCount) { throw new ArgumentException(); } var charEnd = charIndex + charCount; while (charIndex < charEnd) { bytes[index++] = (byte)chars[charIndex++]; } return charCount; } public override int GetBytes(string chars, int charIndex, int charCount, byte[] bytes, int index) { Validate(chars, chars.Length, charIndex, charCount); if (index < 0 || index > bytes.Length) { throw new ArgumentOutOfRangeException(); } if (bytes.Length - index < charCount) { throw new ArgumentException(); } var charEnd = charIndex + charCount; while (charIndex < charEnd) { bytes[index++] = (byte)chars[charIndex++]; } return charCount; } public override int GetCharCount(byte[] bytes, int index, int count) { Validate(bytes, bytes.Length, index, count); return (count); } public override int GetChars(byte[] bytes, int index, int count, char[] chars, int charIndex) { Validate(bytes, bytes.Length, index, count); if (charIndex < 0 || charIndex > chars.Length) { throw new ArgumentOutOfRangeException(); } if (chars.Length - charIndex < count) { throw new ArgumentException(); } var byteEnd = index + count; while (index < byteEnd) { chars[charIndex++] = (char)bytes[index++]; } return count; } public override string GetString(byte[] bytes) { return BytesToString(bytes); } public override string GetString(byte[] bytes, int index, int count) { Validate(bytes, bytes.Length, index, count); return BytesToString(bytes); } public override int GetMaxByteCount(int charCount) { return charCount; } public override int GetMaxCharCount(int count) { return count; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/BlobBase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.IO; using System.Threading; using System.Threading.Tasks; namespace FirebirdSql.Data.Common; internal abstract class BlobBase { private int _rblFlags; private Charset _charset; private int _segmentSize; protected long _blobId; protected bool _isOpen; protected int _position; protected TransactionBase _transaction; public abstract int Handle { get; } public long Id => _blobId; public bool EOF => (_rblFlags & IscCodes.RBL_eof_pending) != 0; public bool IsOpen => _isOpen; public int SegmentSize => _segmentSize; public int Position => _position; public abstract DatabaseBase Database { get; } protected BlobBase(DatabaseBase db) { _segmentSize = db.PacketSize; _charset = db.Charset; } public string ReadString() { var buffer = Read(); return _charset.GetString(buffer, 0, buffer.Length); } public async ValueTask ReadStringAsync(CancellationToken cancellationToken = default) { var buffer = await ReadAsync(cancellationToken).ConfigureAwait(false); return _charset.GetString(buffer, 0, buffer.Length); } public byte[] Read() { using (var ms = new MemoryStream()) { try { Open(); while (!EOF) { GetSegment(ms); } Close(); } catch { // Cancel the blob and rethrow the exception Cancel(); throw; } return ms.ToArray(); } } public async ValueTask ReadAsync(CancellationToken cancellationToken = default) { using (var ms = new MemoryStream()) { try { await OpenAsync(cancellationToken).ConfigureAwait(false); while (!EOF) { await GetSegmentAsync(ms, cancellationToken).ConfigureAwait(false); } await CloseAsync(cancellationToken).ConfigureAwait(false); } catch { // Cancel the blob and rethrow the exception await CancelAsync(cancellationToken).ConfigureAwait(false); throw; } return ms.ToArray(); } } public void Write(string data) { Write(_charset.GetBytes(data)); } public ValueTask WriteAsync(string data, CancellationToken cancellationToken = default) { return WriteAsync(_charset.GetBytes(data), cancellationToken); } public void Write(byte[] buffer) { Write(buffer, 0, buffer.Length); } public ValueTask WriteAsync(byte[] buffer, CancellationToken cancellationToken = default) { return WriteAsync(buffer, 0, buffer.Length, cancellationToken); } public void Write(byte[] buffer, int index, int count) { try { Create(); var length = count; var offset = index; var chunk = length >= _segmentSize ? _segmentSize : length; var tmpBuffer = new byte[chunk]; while (length > 0) { if (chunk > length) { chunk = length; tmpBuffer = new byte[chunk]; } Array.Copy(buffer, offset, tmpBuffer, 0, chunk); PutSegment(tmpBuffer); offset += chunk; length -= chunk; } Close(); } catch { // Cancel the blob and rethrow the exception Cancel(); throw; } } public async ValueTask WriteAsync(byte[] buffer, int index, int count, CancellationToken cancellationToken = default) { try { await CreateAsync(cancellationToken).ConfigureAwait(false); var length = count; var offset = index; var chunk = length >= _segmentSize ? _segmentSize : length; var tmpBuffer = new byte[chunk]; while (length > 0) { if (chunk > length) { chunk = length; tmpBuffer = new byte[chunk]; } Array.Copy(buffer, offset, tmpBuffer, 0, chunk); await PutSegmentAsync(tmpBuffer, cancellationToken).ConfigureAwait(false); offset += chunk; length -= chunk; } await CloseAsync(cancellationToken).ConfigureAwait(false); } catch { // Cancel the blob and rethrow the exception await CancelAsync(cancellationToken).ConfigureAwait(false); throw; } } public abstract void Create(); public abstract ValueTask CreateAsync(CancellationToken cancellationToken = default); public abstract void Open(); public abstract ValueTask OpenAsync(CancellationToken cancellationToken = default); public abstract int GetLength(); public abstract ValueTask GetLengthAsync(CancellationToken cancellationToken = default); public abstract byte[] GetSegment(); public abstract ValueTask GetSegmentAsync(CancellationToken cancellationToken = default); public abstract void GetSegment(Stream stream); public abstract ValueTask GetSegmentAsync(Stream stream, CancellationToken cancellationToken = default); public abstract void PutSegment(byte[] buffer); public abstract ValueTask PutSegmentAsync(byte[] buffer, CancellationToken cancellationToken = default); public abstract void Seek(int offset, int seekMode); public abstract ValueTask SeekAsync(int offset, int seekMode, CancellationToken cancellationToken = default); public abstract void Close(); public abstract ValueTask CloseAsync(CancellationToken cancellationToken = default); public abstract void Cancel(); public abstract ValueTask CancelAsync(CancellationToken cancellationToken = default); protected void RblAddValue(int rblValue) { _rblFlags |= rblValue; } protected void RblRemoveValue(int rblValue) { _rblFlags &= ~rblValue; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/BlobParameterBuffer.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System.Text; namespace FirebirdSql.Data.Common; internal sealed class BlobParameterBuffer : ParameterBuffer { public BlobParameterBuffer(Encoding encoding) { Encoding = encoding; } public Encoding Encoding { get; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/BlobStream.cs ================================================ using System; using System.IO; using System.Threading; using System.Threading.Tasks; namespace FirebirdSql.Data.Common; public sealed class BlobStream : Stream { private readonly BlobBase _blobHandle; private int _position; private byte[] _currentSegment; private int _segmentPosition; private int Available => _currentSegment?.Length - _segmentPosition ?? 0; internal BlobStream(BlobBase blob) { _blobHandle = blob; _position = 0; } public override long Position { get => _position; set => Seek(value, SeekOrigin.Begin); } public override long Length { get { if (!_blobHandle.IsOpen) _blobHandle.Open(); return _blobHandle.GetLength(); } } public override void Flush() { } public override int Read(byte[] buffer, int offset, int count) { ValidateBufferSize(buffer, offset, count); if (!_blobHandle.IsOpen) _blobHandle.Open(); var copied = 0; var remainingBufferSize = buffer.Length - offset; do { if (remainingBufferSize == 0) break; if (Available > 0) { var toCopy = Math.Min(Available, remainingBufferSize); Array.Copy(_currentSegment, _segmentPosition, buffer, offset + copied, toCopy); copied += toCopy; _segmentPosition += toCopy; remainingBufferSize -= toCopy; _position += toCopy; } if (_blobHandle.EOF) break; if (Available == 0) { _currentSegment = _blobHandle.GetSegment(); _segmentPosition = 0; } } while (copied < count); return copied; } public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { ValidateBufferSize(buffer, offset, count); if (!_blobHandle.IsOpen) await _blobHandle.OpenAsync(cancellationToken).ConfigureAwait(false); var copied = 0; var remainingBufferSize = buffer.Length - offset; do { if (remainingBufferSize == 0) break; if (Available > 0) { var toCopy = Math.Min(Available, remainingBufferSize); Array.Copy(_currentSegment, _segmentPosition, buffer, offset + copied, toCopy); copied += toCopy; _segmentPosition += toCopy; remainingBufferSize -= toCopy; _position += toCopy; } if (_blobHandle.EOF) break; if (Available == 0) { _currentSegment = await _blobHandle.GetSegmentAsync(cancellationToken).ConfigureAwait(false); _segmentPosition = 0; } } while (copied < count); return copied; } public override long Seek(long offset, SeekOrigin origin) { if (!_blobHandle.IsOpen) _blobHandle.Open(); var seekMode = origin switch { SeekOrigin.Begin => IscCodes.isc_blb_seek_from_head, SeekOrigin.Current => IscCodes.isc_blb_seek_relative, SeekOrigin.End => IscCodes.isc_blb_seek_from_tail, _ => throw new ArgumentOutOfRangeException(nameof(origin)) }; _blobHandle.Seek((int)offset, seekMode); return _position = _blobHandle.Position; } public override void SetLength(long value) { throw new NotSupportedException(); } public override void Write(byte[] buffer, int offset, int count) { try { if (!_blobHandle.IsOpen) _blobHandle.Create(); var chunk = count >= _blobHandle.SegmentSize ? _blobHandle.SegmentSize : count; var tmpBuffer = new byte[chunk]; while (count > 0) { if (chunk > count) { chunk = count; tmpBuffer = new byte[chunk]; } Array.Copy(buffer, offset, tmpBuffer, 0, chunk); _blobHandle.PutSegment(tmpBuffer); offset += chunk; count -= chunk; _position += chunk; } } catch { _blobHandle.Cancel(); throw; } } public override async Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { try { if (!_blobHandle.IsOpen) await _blobHandle.CreateAsync(cancellationToken).ConfigureAwait(false); var chunk = count >= _blobHandle.SegmentSize ? _blobHandle.SegmentSize : count; var tmpBuffer = new byte[chunk]; while (count > 0) { if (chunk > count) { chunk = count; tmpBuffer = new byte[chunk]; } Array.Copy(buffer, offset, tmpBuffer, 0, chunk); await _blobHandle.PutSegmentAsync(tmpBuffer, cancellationToken).ConfigureAwait(false); offset += chunk; count -= chunk; _position += chunk; } } catch { await _blobHandle.CancelAsync(cancellationToken).ConfigureAwait(false); throw; } } public override bool CanRead => true; public override bool CanSeek => true; public override bool CanWrite => true; protected override void Dispose(bool disposing) { _blobHandle.Close(); } public override ValueTask DisposeAsync() { return _blobHandle.CloseAsync(); } private static void ValidateBufferSize(byte[] buffer, int offset, int count) { if (buffer is null) throw new ArgumentNullException(nameof(buffer)); if (buffer.Length < offset + count) throw new InvalidOperationException(); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/Charset.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Linq; using System.Text; namespace FirebirdSql.Data.Common; internal sealed class Charset { internal const string Octets = "OCTETS"; internal const string None = "NONE"; private readonly static Dictionary charsetsById; private readonly static Dictionary charsetsByName; static Charset() { var charsets = GetSupportedCharsets(); charsetsById = charsets.ToDictionary(x => x.Identifier); charsetsByName = charsets.ToDictionary(x => x.Name, StringComparer.CurrentCultureIgnoreCase); } public static Charset DefaultCharset => charsetsByName[None]; public static bool TryGetById(int id, out Charset charset) => charsetsById.TryGetValue(id, out charset); public static bool TryGetByName(string name, out Charset charset) => charsetsByName.TryGetValue(name, out charset); private static List GetSupportedCharsets() { var charsets = new List(); charsets.Add(new Charset(0, None, 1, None)); charsets.Add(new Charset(1, Octets, 1, Octets)); charsets.Add(new Charset(2, "ASCII", 1, "ascii")); charsets.Add(new Charset(3, "UNICODE_FSS", 3, "UTF-8")); charsets.Add(new Charset(4, "UTF8", 4, "UTF-8")); TryAddCharset(charsets, () => new Charset(5, "SJIS_0208", 2, "shift_jis")); TryAddCharset(charsets, () => new Charset(6, "EUCJ_0208", 2, "euc-jp")); TryAddCharset(charsets, () => new Charset(7, "ISO2022-JP", 2, "iso-2022-jp")); TryAddCharset(charsets, () => new Charset(10, "DOS437", 1, "IBM437")); TryAddCharset(charsets, () => new Charset(11, "DOS850", 1, "ibm850")); TryAddCharset(charsets, () => new Charset(12, "DOS865", 1, "IBM865")); TryAddCharset(charsets, () => new Charset(13, "DOS860", 1, "IBM860")); TryAddCharset(charsets, () => new Charset(14, "DOS863", 1, "IBM863")); TryAddCharset(charsets, () => new Charset(21, "ISO8859_1", 1, "iso-8859-1")); TryAddCharset(charsets, () => new Charset(22, "ISO8859_2", 1, "iso-8859-2")); TryAddCharset(charsets, () => new Charset(44, "KSC_5601", 2, "ks_c_5601-1987")); TryAddCharset(charsets, () => new Charset(47, "DOS861", 1, "ibm861")); TryAddCharset(charsets, () => new Charset(51, "WIN1250", 1, "windows-1250")); TryAddCharset(charsets, () => new Charset(52, "WIN1251", 1, "windows-1251")); TryAddCharset(charsets, () => new Charset(53, "WIN1252", 1, "windows-1252")); TryAddCharset(charsets, () => new Charset(54, "WIN1253", 1, "windows-1253")); TryAddCharset(charsets, () => new Charset(55, "WIN1254", 1, "windows-1254")); TryAddCharset(charsets, () => new Charset(56, "BIG_5", 2, "big5")); TryAddCharset(charsets, () => new Charset(57, "GB_2312", 2, "gb2312")); TryAddCharset(charsets, () => new Charset(58, "WIN1255", 1, "windows-1255")); TryAddCharset(charsets, () => new Charset(59, "WIN1256", 1, "windows-1256")); TryAddCharset(charsets, () => new Charset(60, "WIN1257", 1, "windows-1257")); //TryAddCharset(charsets, () => new Charset(61, "UTF16", 4, "utf-16")); //TryAddCharset(charsets, () => new Charset(62, "UTF32", 4, "utf-32")); TryAddCharset(charsets, () => new Charset(63, "KOI8R", 2, "koi8-r")); TryAddCharset(charsets, () => new Charset(64, "KOI8U", 2, "koi8-u")); TryAddCharset(charsets, () => new Charset(65, "TIS620", 1, "tis-620")); return charsets; } private static void TryAddCharset(List charsets, Func charsetCreator) { try { charsets.Add(charsetCreator()); } catch { } } public int Identifier { get; } public string Name { get; } public string SystemName { get; private set; } public int BytesPerCharacter { get; } public Encoding Encoding { get; } public bool IsOctetsCharset { get; } public bool IsNoneCharset { get; } public Charset(int id, string name, int bytesPerCharacter, string systemName) { Identifier = id; Name = name; BytesPerCharacter = bytesPerCharacter; SystemName = systemName; IsNoneCharset = false; IsOctetsCharset = false; switch (SystemName) { case None: Encoding = Encoding.GetANSIEncoding(); IsNoneCharset = true; break; case Octets: Encoding = new BinaryEncoding(); IsOctetsCharset = true; break; default: Encoding = Encoding.GetEncoding(SystemName); break; } } public byte[] GetBytes(string s) { return Encoding.GetBytes(s); } public int GetBytes(string s, int charIndex, int charCount, byte[] bytes, int byteIndex) { return Encoding.GetBytes(s, charIndex, charCount, bytes, byteIndex); } public string GetString(byte[] buffer) { return Encoding.GetString(buffer); } public string GetString(byte[] buffer, int index, int count) { return Encoding.GetString(buffer, index, count); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/ConnectionPoolLifetimeHelper.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = @realic, Jiri Cincura (jiri@cincura.net) using System; namespace FirebirdSql.Data.Common; internal static class ConnectionPoolLifetimeHelper { internal static bool IsAlive(long connectionLifetime, long created, long now) { if (connectionLifetime == 0) return true; return (now - created) < (connectionLifetime * 1000); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/ConnectionString.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Data; using System.Globalization; using System.IO; using System.Linq; using System.Text.RegularExpressions; using FirebirdSql.Data.FirebirdClient; namespace FirebirdSql.Data.Common; internal sealed class ConnectionString { #region Constants internal const string DefaultValueDataSource = ""; internal const int DefaultValuePortNumber = 3050; internal const string DefaultValueUserId = ""; internal const string DefaultValuePassword = ""; internal const string DefaultValueRoleName = ""; internal const string DefaultValueCatalog = ""; internal const string DefaultValueCharacterSet = "UTF8"; internal const int DefaultValueDialect = 3; internal const int DefaultValuePacketSize = 8192; internal const bool DefaultValuePooling = true; internal const int DefaultValueConnectionLifetime = 0; internal const int DefaultValueMinPoolSize = 0; internal const int DefaultValueMaxPoolSize = 100; internal const int DefaultValueConnectionTimeout = 15; internal const int DefaultValueFetchSize = 200; internal const FbServerType DefaultValueServerType = FbServerType.Default; internal const IsolationLevel DefaultValueIsolationLevel = IsolationLevel.ReadCommitted; internal const bool DefaultValueRecordsAffected = true; internal const bool DefaultValueEnlist = true; internal const string DefaultValueClientLibrary = "fbembed"; internal const int DefaultValueDbCachePages = 0; internal const bool DefaultValueNoDbTriggers = false; internal const bool DefaultValueNoGarbageCollect = false; internal const bool DefaultValueCompression = false; internal const byte[] DefaultValueCryptKey = null; internal const FbWireCrypt DefaultValueWireCrypt = FbWireCrypt.Enabled; internal const string DefaultValueApplicationName = ""; internal const int DefaultValueCommandTimeout = 0; internal const int DefaultValueParallelWorkers = 0; internal const string DefaultKeyUserId = "user id"; internal const string DefaultKeyPortNumber = "port number"; internal const string DefaultKeyDataSource = "data source"; internal const string DefaultKeyPassword = "password"; internal const string DefaultKeyRoleName = "role name"; internal const string DefaultKeyCatalog = "initial catalog"; internal const string DefaultKeyCharacterSet = "character set"; internal const string DefaultKeyDialect = "dialect"; internal const string DefaultKeyPacketSize = "packet size"; internal const string DefaultKeyPooling = "pooling"; internal const string DefaultKeyConnectionLifetime = "connection lifetime"; internal const string DefaultKeyMinPoolSize = "min pool size"; internal const string DefaultKeyMaxPoolSize = "max pool size"; internal const string DefaultKeyConnectionTimeout = "connection timeout"; internal const string DefaultKeyFetchSize = "fetch size"; internal const string DefaultKeyServerType = "server type"; internal const string DefaultKeyIsolationLevel = "isolation level"; internal const string DefaultKeyRecordsAffected = "records affected"; internal const string DefaultKeyEnlist = "enlist"; internal const string DefaultKeyClientLibrary = "client library"; internal const string DefaultKeyDbCachePages = "cache pages"; internal const string DefaultKeyNoDbTriggers = "no db triggers"; internal const string DefaultKeyNoGarbageCollect = "no garbage collect"; internal const string DefaultKeyCompression = "compression"; internal const string DefaultKeyCryptKey = "crypt key"; internal const string DefaultKeyWireCrypt = "wire crypt"; internal const string DefaultKeyApplicationName = "application name"; internal const string DefaultKeyCommandTimeout = "command timeout"; internal const string DefaultKeyParallelWorkers = "parallel workers"; #endregion #region Static Fields internal static readonly IDictionary Synonyms = new Dictionary(StringComparer.OrdinalIgnoreCase) { { DefaultKeyDataSource, DefaultKeyDataSource }, { "datasource", DefaultKeyDataSource }, { "server", DefaultKeyDataSource }, { "host", DefaultKeyDataSource }, { "port", DefaultKeyPortNumber }, { DefaultKeyPortNumber, DefaultKeyPortNumber }, { "database", DefaultKeyCatalog }, { DefaultKeyCatalog, DefaultKeyCatalog }, { DefaultKeyUserId, DefaultKeyUserId }, { "userid", DefaultKeyUserId }, { "uid", DefaultKeyUserId }, { "user", DefaultKeyUserId }, { "user name", DefaultKeyUserId }, { "username", DefaultKeyUserId }, { DefaultKeyPassword, DefaultKeyPassword }, { "user password", DefaultKeyPassword }, { "userpassword", DefaultKeyPassword }, { DefaultKeyDialect, DefaultKeyDialect }, { DefaultKeyPooling, DefaultKeyPooling }, { DefaultKeyMaxPoolSize, DefaultKeyMaxPoolSize }, { "maxpoolsize", DefaultKeyMaxPoolSize }, { DefaultKeyMinPoolSize, DefaultKeyMinPoolSize }, { "minpoolsize", DefaultKeyMinPoolSize }, { DefaultKeyCharacterSet, DefaultKeyCharacterSet }, { "charset", DefaultKeyCharacterSet }, { DefaultKeyConnectionLifetime, DefaultKeyConnectionLifetime }, { "connectionlifetime", DefaultKeyConnectionLifetime }, { "timeout", DefaultKeyConnectionTimeout }, { DefaultKeyConnectionTimeout, DefaultKeyConnectionTimeout }, { "connectiontimeout", DefaultKeyConnectionTimeout }, { DefaultKeyPacketSize, DefaultKeyPacketSize }, { "packetsize", DefaultKeyPacketSize }, { "role", DefaultKeyRoleName }, { DefaultKeyRoleName, DefaultKeyRoleName }, { DefaultKeyFetchSize, DefaultKeyFetchSize }, { "fetchsize", DefaultKeyFetchSize }, { DefaultKeyServerType, DefaultKeyServerType }, { "servertype", DefaultKeyServerType }, { DefaultKeyIsolationLevel, DefaultKeyIsolationLevel }, { "isolationlevel", DefaultKeyIsolationLevel }, { DefaultKeyRecordsAffected, DefaultKeyRecordsAffected }, { DefaultKeyEnlist, DefaultKeyEnlist }, { "clientlibrary", DefaultKeyClientLibrary }, { DefaultKeyClientLibrary, DefaultKeyClientLibrary }, { DefaultKeyDbCachePages, DefaultKeyDbCachePages }, { "cachepages", DefaultKeyDbCachePages }, { "pagebuffers", DefaultKeyDbCachePages }, { "page buffers", DefaultKeyDbCachePages }, { DefaultKeyNoDbTriggers, DefaultKeyNoDbTriggers }, { "nodbtriggers", DefaultKeyNoDbTriggers }, { "no dbtriggers", DefaultKeyNoDbTriggers }, { "no database triggers", DefaultKeyNoDbTriggers }, { "nodatabasetriggers", DefaultKeyNoDbTriggers }, { DefaultKeyNoGarbageCollect, DefaultKeyNoGarbageCollect }, { "nogarbagecollect", DefaultKeyNoGarbageCollect }, { DefaultKeyCompression, DefaultKeyCompression }, { "wire compression", DefaultKeyCompression }, { DefaultKeyCryptKey, DefaultKeyCryptKey }, { "cryptkey", DefaultKeyCryptKey }, { DefaultKeyWireCrypt, DefaultKeyWireCrypt }, { "wirecrypt", DefaultKeyWireCrypt }, { DefaultKeyApplicationName, DefaultKeyApplicationName }, { "applicationname", DefaultKeyApplicationName }, { "app", DefaultKeyApplicationName }, { DefaultKeyCommandTimeout, DefaultKeyCommandTimeout }, { "commandtimeout", DefaultKeyCommandTimeout }, { DefaultKeyParallelWorkers, DefaultKeyParallelWorkers }, { "parallelworkers", DefaultKeyParallelWorkers }, { "parallel", DefaultKeyParallelWorkers }, }; internal static readonly IDictionary DefaultValues = new Dictionary(StringComparer.Ordinal) { { DefaultKeyDataSource, DefaultValueDataSource }, { DefaultKeyPortNumber, DefaultValuePortNumber }, { DefaultKeyUserId, DefaultValueUserId }, { DefaultKeyPassword, DefaultValuePassword }, { DefaultKeyRoleName, DefaultValueRoleName }, { DefaultKeyCatalog, DefaultValueCatalog }, { DefaultKeyCharacterSet, DefaultValueCharacterSet }, { DefaultKeyDialect, DefaultValueDialect }, { DefaultKeyPacketSize, DefaultValuePacketSize }, { DefaultKeyPooling, DefaultValuePooling }, { DefaultKeyConnectionLifetime, DefaultValueConnectionLifetime }, { DefaultKeyMinPoolSize, DefaultValueMinPoolSize }, { DefaultKeyMaxPoolSize, DefaultValueMaxPoolSize }, { DefaultKeyConnectionTimeout, DefaultValueConnectionTimeout }, { DefaultKeyFetchSize, DefaultValueFetchSize }, { DefaultKeyServerType, DefaultValueServerType }, { DefaultKeyIsolationLevel, DefaultValueIsolationLevel }, { DefaultKeyRecordsAffected, DefaultValueRecordsAffected }, { DefaultKeyEnlist, DefaultValueEnlist }, { DefaultKeyClientLibrary, DefaultValueClientLibrary }, { DefaultKeyDbCachePages, DefaultValueDbCachePages }, { DefaultKeyNoDbTriggers, DefaultValueNoDbTriggers }, { DefaultKeyNoGarbageCollect, DefaultValueNoGarbageCollect }, { DefaultKeyCompression, DefaultValueCompression }, { DefaultKeyCryptKey, DefaultValueCryptKey }, { DefaultKeyWireCrypt, DefaultValueWireCrypt }, { DefaultKeyApplicationName, DefaultValueApplicationName }, { DefaultKeyCommandTimeout, DefaultValueCommandTimeout }, { DefaultKeyParallelWorkers, DefaultValueParallelWorkers }, }; #endregion #region Fields private Dictionary _options; #endregion #region Properties public string UserID => GetString(DefaultKeyUserId, _options.TryGetValue); public string Password => GetString(DefaultKeyPassword, _options.TryGetValue); public string DataSource => GetString(DefaultKeyDataSource, _options.TryGetValue); public int Port => GetInt32(DefaultKeyPortNumber, _options.TryGetValue); public string Database => ExpandDataDirectory(GetString(DefaultKeyCatalog, _options.TryGetValue)); public int PacketSize => GetInt32(DefaultKeyPacketSize, _options.TryGetValue); public string Role => GetString(DefaultKeyRoleName, _options.TryGetValue); public short Dialect => GetInt16(DefaultKeyDialect, _options.TryGetValue); public string Charset => GetString(DefaultKeyCharacterSet, _options.TryGetValue); public int ConnectionTimeout => GetInt32(DefaultKeyConnectionTimeout, _options.TryGetValue); public bool Pooling => GetBoolean(DefaultKeyPooling, _options.TryGetValue); public long ConnectionLifetime => GetInt64(DefaultKeyConnectionLifetime, _options.TryGetValue); public int MinPoolSize => GetInt32(DefaultKeyMinPoolSize, _options.TryGetValue); public int MaxPoolSize => GetInt32(DefaultKeyMaxPoolSize, _options.TryGetValue); public int FetchSize => GetInt32(DefaultKeyFetchSize, _options.TryGetValue); public FbServerType ServerType => GetServerType(DefaultKeyServerType, _options.TryGetValue); public IsolationLevel IsolationLevel => GetIsolationLevel(DefaultKeyIsolationLevel, _options.TryGetValue); public bool ReturnRecordsAffected => GetBoolean(DefaultKeyRecordsAffected, _options.TryGetValue); public bool Enlist => GetBoolean(DefaultKeyEnlist, _options.TryGetValue); public string ClientLibrary => GetString(DefaultKeyClientLibrary, _options.TryGetValue); public int DbCachePages => GetInt32(DefaultKeyDbCachePages, _options.TryGetValue); public bool NoDatabaseTriggers => GetBoolean(DefaultKeyNoDbTriggers, _options.TryGetValue); public bool NoGarbageCollect => GetBoolean(DefaultKeyNoGarbageCollect, _options.TryGetValue); public bool Compression => GetBoolean(DefaultKeyCompression, _options.TryGetValue); public byte[] CryptKey => GetBytes(DefaultKeyCryptKey, _options.TryGetValue); public FbWireCrypt WireCrypt => GetWireCrypt(DefaultKeyWireCrypt, _options.TryGetValue); public string ApplicationName => GetString(DefaultKeyApplicationName, _options.TryGetValue); public int CommandTimeout => GetInt32(DefaultKeyCommandTimeout, _options.TryGetValue); public int ParallelWorkers => GetInt32(DefaultKeyParallelWorkers, _options.TryGetValue); #endregion #region Internal Properties internal string NormalizedConnectionString { get { return string.Join(";", _options.OrderBy(x => x.Key, StringComparer.Ordinal).Where(x => x.Value != null).Select(x => string.Format("{0}={1}", x.Key, WrapValueIfNeeded(x.Value.ToString())))); } } #endregion #region Constructors public ConnectionString() { SetDefaultOptions(); } public ConnectionString(string connectionString) : this() { Load(connectionString); } #endregion #region Methods public void Validate() { if ( (string.IsNullOrEmpty(Database)) || (string.IsNullOrEmpty(DataSource) && ServerType != FbServerType.Embedded) || (string.IsNullOrEmpty(Charset)) ) { throw new ArgumentException("An invalid connection string argument has been supplied or a required connection string argument has not been supplied."); } if (Port <= 0 || Port > 65535) { throw new ArgumentException("Incorrect port."); } if (MinPoolSize > MaxPoolSize) { throw new ArgumentException("Incorrect pool size."); } if (Dialect < 1 || Dialect > 3) { throw new ArgumentException("Incorrect database dialect it should be 1, 2, or 3."); } if (PacketSize < 512 || PacketSize > 32767) { throw new ArgumentException(string.Format(CultureInfo.CurrentCulture, "'Packet Size' value of {0} is not valid.{1}The value should be an integer >= 512 and <= 32767.", PacketSize, Environment.NewLine)); } if (DbCachePages < 0) { throw new ArgumentException(string.Format(CultureInfo.CurrentCulture, "'Cache Pages' value of {0} is not valid.{1}The value should be an integer >= 0.", DbCachePages, Environment.NewLine)); } if (Pooling && NoDatabaseTriggers) { throw new ArgumentException("Cannot use Pooling and NoDatabaseTriggers together."); } if (ParallelWorkers < 0) { throw new ArgumentException(string.Format(CultureInfo.CurrentCulture, "'Parallel Workers' value of {0} is not valid.{1}The value should be an integer >= 0.", ParallelWorkers, Environment.NewLine)); } } #endregion #region Private Methods private void Load(string connectionString) { const string KeyPairsRegex = "(([\\w\\s\\d]*)\\s*?=\\s*?\"([^\"]*)\"|([\\w\\s\\d]*)\\s*?=\\s*?'([^']*)'|([\\w\\s\\d]*)\\s*?=\\s*?([^\"';][^;]*))"; if (!string.IsNullOrEmpty(connectionString)) { var keyPairs = Regex.Matches(connectionString, KeyPairsRegex); foreach (Match keyPair in keyPairs) { if (keyPair.Groups.Count == 8) { var values = new string[] { (keyPair.Groups[2].Success ? keyPair.Groups[2].Value : keyPair.Groups[4].Success ? keyPair.Groups[4].Value : keyPair.Groups[6].Success ? keyPair.Groups[6].Value : string.Empty) .Trim().ToLowerInvariant(), (keyPair.Groups[3].Success ? keyPair.Groups[3].Value : keyPair.Groups[5].Success ? keyPair.Groups[5].Value : keyPair.Groups[7].Success ? keyPair.Groups[7].Value : string.Empty) .Trim() }; if (values.Length == 2 && !string.IsNullOrEmpty(values[0]) && !string.IsNullOrEmpty(values[1])) { if (Synonyms.TryGetValue(values[0], out var key)) { switch (key) { case DefaultKeyServerType: _options[key] = ParseEnum(values[1], DefaultKeyServerType); break; case DefaultKeyIsolationLevel: _options[key] = ParseEnum(values[1], DefaultKeyIsolationLevel); break; case DefaultKeyCryptKey: var cryptKey = default(byte[]); try { cryptKey = Convert.FromBase64String(values[1]); } catch { throw NotSupported(DefaultKeyCryptKey); } _options[key] = cryptKey; break; case DefaultKeyWireCrypt: _options[key] = ParseEnum(values[1], DefaultKeyWireCrypt); break; default: _options[key] = values[1]; break; } } } } } if (!string.IsNullOrEmpty(Database)) { ParseConnectionInfo(Database); } } } private void SetDefaultOptions() { _options = new Dictionary(DefaultValues); } // it is expected the hostname do be at least 2 characters to prevent possible ambiguity (DNET-892) private void ParseConnectionInfo(string connectionInfo) { connectionInfo = connectionInfo.Trim(); { // URL style inet://[hostv6]:port/database var match = Regex.Match(connectionInfo, "^inet://\\[(?[A-Za-z0-9:]{2,})\\]:(?\\d+)/(?.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = match.Groups["host"].Value; _options[DefaultKeyPortNumber] = int.Parse(match.Groups["port"].Value, CultureInfo.InvariantCulture); return; } } { // URL style inet://host:port/database var match = Regex.Match(connectionInfo, "^inet://(?[A-Za-z0-9\\.-]{2,}):(?\\d+)/(?.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = match.Groups["host"].Value; _options[DefaultKeyPortNumber] = int.Parse(match.Groups["port"].Value, CultureInfo.InvariantCulture); return; } } { // URL style inet://host/database var match = Regex.Match(connectionInfo, "^inet://(?[A-Za-z0-9\\.:-]{2,})/(?.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = match.Groups["host"].Value; return; } } { // URL style inet:///database var match = Regex.Match(connectionInfo, "^inet:///(?.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = "localhost"; return; } } { // new style //[hostv6]:port/database var match = Regex.Match(connectionInfo, "^//\\[(?[A-Za-z0-9:]{2,})\\]:(?\\d+)/(?.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = match.Groups["host"].Value; _options[DefaultKeyPortNumber] = int.Parse(match.Groups["port"].Value, CultureInfo.InvariantCulture); return; } } { // new style //host:port/database var match = Regex.Match(connectionInfo, "^//(?[A-Za-z0-9\\.-]{2,}):(?\\d+)/(?.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = match.Groups["host"].Value; _options[DefaultKeyPortNumber] = int.Parse(match.Groups["port"].Value, CultureInfo.InvariantCulture); return; } } { // new style //host/database var match = Regex.Match(connectionInfo, "^//(?[A-Za-z0-9\\.:-]{2,})/(?.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = match.Groups["host"].Value; return; } } { // old style host:X:\database var match = Regex.Match(connectionInfo, "^(?[A-Za-z0-9\\.:-]{2,}):(?[A-Za-z]:\\\\.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = match.Groups["host"].Value; return; } } { // old style host/port:database var match = Regex.Match(connectionInfo, "^(?[A-Za-z0-9\\.:-]{2,})/(?\\d+):(?.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = match.Groups["host"].Value; _options[DefaultKeyPortNumber] = int.Parse(match.Groups["port"].Value, CultureInfo.InvariantCulture); return; } } { // old style host:database var match = Regex.Match(connectionInfo, "^(?[A-Za-z0-9\\.:-]{2,}):(?.+)$"); if (match.Success) { _options[DefaultKeyCatalog] = match.Groups["database"].Value; _options[DefaultKeyDataSource] = match.Groups["host"].Value; return; } } _options[DefaultKeyCatalog] = connectionInfo; } #endregion #region Internal Static Methods internal delegate bool TryGetValueDelegate(string key, out object value); internal static short GetInt16(string key, TryGetValueDelegate tryGetValue, short defaultValue = default) { return tryGetValue(key, out var value) ? Convert.ToInt16(value, CultureInfo.InvariantCulture) : defaultValue; } internal static int GetInt32(string key, TryGetValueDelegate tryGetValue, int defaultValue = default) { return tryGetValue(key, out var value) ? Convert.ToInt32(value, CultureInfo.InvariantCulture) : defaultValue; } internal static long GetInt64(string key, TryGetValueDelegate tryGetValue, long defaultValue = default) { return tryGetValue(key, out var value) ? Convert.ToInt64(value, CultureInfo.InvariantCulture) : defaultValue; } internal static string GetString(string key, TryGetValueDelegate tryGetValue, string defaultValue = default) { return tryGetValue(key, out var value) ? Convert.ToString(value, CultureInfo.InvariantCulture) : defaultValue; } internal static bool GetBoolean(string key, TryGetValueDelegate tryGetValue, bool defaultValue = default) { return tryGetValue(key, out var value) ? Convert.ToBoolean(value, CultureInfo.InvariantCulture) : defaultValue; } internal static byte[] GetBytes(string key, TryGetValueDelegate tryGetValue, byte[] defaultValue = default) { return tryGetValue(key, out var value) ? (byte[])value : defaultValue; } internal static FbServerType GetServerType(string key, TryGetValueDelegate tryGetValue, FbServerType defaultValue = default) { return tryGetValue(key, out var value) ? (FbServerType)value : defaultValue; } internal static IsolationLevel GetIsolationLevel(string key, TryGetValueDelegate tryGetValue, IsolationLevel defaultValue = default) { return tryGetValue(key, out var value) ? (IsolationLevel)value : defaultValue; } internal static FbWireCrypt GetWireCrypt(string key, TryGetValueDelegate tryGetValue, FbWireCrypt defaultValue = default) { return tryGetValue(key, out var value) ? (FbWireCrypt)value : defaultValue; } #endregion #region Private Static Methods private static string ExpandDataDirectory(string s) { const string DataDirectoryKeyword = "|DataDirectory|"; if (s == null) return s; var dataDirectoryLocation = (string)AppDomain.CurrentDomain.GetData("DataDirectory") ?? string.Empty; var pattern = string.Format("{0}{1}?", Regex.Escape(DataDirectoryKeyword), Regex.Escape(Path.DirectorySeparatorChar.ToString())); return Regex.Replace(s, pattern, dataDirectoryLocation + Path.DirectorySeparatorChar, RegexOptions.IgnoreCase | RegexOptions.CultureInvariant); } private static T ParseEnum(string value, string name) where T : struct { if (!Enum.TryParse(value, true, out var result)) throw NotSupported(name); return result; } private static Exception NotSupported(string name) => new NotSupportedException($"Not supported '{name}'."); private static string WrapValueIfNeeded(string value) { if (value != null && value.Contains(';')) return "'" + value + "'"; return value; } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DatabaseBase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Text; using System.Threading; using System.Threading.Tasks; namespace FirebirdSql.Data.Common; internal abstract class DatabaseBase { public Action WarningMessage { get; set; } public abstract bool UseUtf8ParameterBuffer { get; } public Encoding ParameterBufferEncoding => UseUtf8ParameterBuffer ? Encoding.UTF8 : Encoding.GetANSIEncoding(); public abstract int Handle { get; } public Charset Charset { get; } public int PacketSize { get; } public short Dialect { get; } public int TransactionCount { get; set; } public string ServerVersion { get; protected set; } public abstract bool HasRemoteEventSupport { get; } public abstract bool ConnectionBroken { get; } public DatabaseBase(Charset charset, int packetSize, short dialect) { Charset = charset; PacketSize = packetSize; Dialect = dialect; } public abstract void Attach(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey); public abstract ValueTask AttachAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default); public abstract void AttachWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey); public abstract ValueTask AttachWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default); public abstract void Detach(); public abstract ValueTask DetachAsync(CancellationToken cancellationToken = default); public abstract void CreateDatabase(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey); public abstract ValueTask CreateDatabaseAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default); public abstract void CreateDatabaseWithTrustedAuth(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey); public abstract ValueTask CreateDatabaseWithTrustedAuthAsync(DatabaseParameterBufferBase dpb, string database, byte[] cryptKey, CancellationToken cancellationToken = default); public abstract void DropDatabase(); public abstract ValueTask DropDatabaseAsync(CancellationToken cancellationToken = default); public abstract TransactionBase BeginTransaction(TransactionParameterBuffer tpb); public abstract ValueTask BeginTransactionAsync(TransactionParameterBuffer tpb, CancellationToken cancellationToken = default); public abstract StatementBase CreateStatement(); public abstract StatementBase CreateStatement(TransactionBase transaction); public abstract DatabaseParameterBufferBase CreateDatabaseParameterBuffer(); public abstract EventParameterBuffer CreateEventParameterBuffer(); public abstract TransactionParameterBuffer CreateTransactionParameterBuffer(); public abstract List GetDatabaseInfo(byte[] items); public abstract ValueTask> GetDatabaseInfoAsync(byte[] items, CancellationToken cancellationToken = default); public abstract List GetDatabaseInfo(byte[] items, int bufferLength); public abstract ValueTask> GetDatabaseInfoAsync(byte[] items, int bufferLength, CancellationToken cancellationToken = default); public abstract void CloseEventManager(); public abstract ValueTask CloseEventManagerAsync(CancellationToken cancellationToken = default); public abstract void QueueEvents(RemoteEvent events); public abstract ValueTask QueueEventsAsync(RemoteEvent events, CancellationToken cancellationToken = default); public abstract void CancelEvents(RemoteEvent events); public abstract ValueTask CancelEventsAsync(RemoteEvent events, CancellationToken cancellationToken = default); public abstract void CancelOperation(short kind); public abstract ValueTask CancelOperationAsync(short kind, CancellationToken cancellationToken = default); public string GetServerVersion() { var items = new byte[] { IscCodes.isc_info_firebird_version, IscCodes.isc_info_end }; var info = GetDatabaseInfo(items, IscCodes.BUFFER_SIZE_256); return (string)info[info.Count - 1]; } public async ValueTask GetServerVersionAsync(CancellationToken cancellationToken = default) { var items = new byte[] { IscCodes.isc_info_firebird_version, IscCodes.isc_info_end }; var info = await GetDatabaseInfoAsync(items, IscCodes.BUFFER_SIZE_256, cancellationToken).ConfigureAwait(false); return (string)info[info.Count - 1]; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DatabaseParameterBuffer1.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Text; namespace FirebirdSql.Data.Common; internal sealed class DatabaseParameterBuffer1 : DatabaseParameterBufferBase { public DatabaseParameterBuffer1(Encoding encoding) : base(IscCodes.isc_dpb_version1, encoding) { } public override void Append(int type, byte value) { WriteByte(type); WriteByte(1); Write(value); } public override void Append(int type, short value) { WriteByte(type); WriteByte(2); Write(value); } public override void Append(int type, int value) { WriteByte(type); WriteByte(4); Write(value); } public override void Append(int type, byte[] buffer) { WriteByte(type); WriteByte(buffer.Length); Write(buffer); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DatabaseParameterBuffer2.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Text; namespace FirebirdSql.Data.Common; internal sealed class DatabaseParameterBuffer2 : DatabaseParameterBufferBase { public DatabaseParameterBuffer2(Encoding encoding) : base(IscCodes.isc_dpb_version2, encoding) { } public override void Append(int type, byte value) { WriteByte(type); Write(1); Write(value); } public override void Append(int type, short value) { WriteByte(type); Write(2); Write(value); } public override void Append(int type, int value) { WriteByte(type); Write(4); Write(value); } public override void Append(int type, byte[] buffer) { WriteByte(type); Write(buffer.Length); Write(buffer); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DatabaseParameterBufferBase.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Text; namespace FirebirdSql.Data.Common; internal abstract class DatabaseParameterBufferBase : ParameterBuffer { public DatabaseParameterBufferBase(int version, Encoding encoding) { Encoding = encoding; Append(version); } public abstract void Append(int type, byte value); public abstract void Append(int type, short value); public abstract void Append(int type, int value); public abstract void Append(int type, byte[] buffer); public void Append(int type, string content) => Append(type, Encoding.GetBytes(content)); public Encoding Encoding { get; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DbDataType.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; namespace FirebirdSql.Data.Common; internal enum DbDataType { Array, BigInt, Binary, Boolean, Char, Date, Decimal, Double, Float, Guid, Integer, Numeric, SmallInt, Text, Time, TimeStamp, VarChar, TimeStampTZ, TimeStampTZEx, TimeTZ, TimeTZEx, Dec16, Dec34, Int128, Null, } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DbField.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Linq; using System.Numerics; using FirebirdSql.Data.Types; namespace FirebirdSql.Data.Common; internal sealed class DbField { #region Fields private short _dataType; private short _numericScale; private short _subType; private short _length; private short _nullFlag; private string _name; private string _relation; private string _owner; private string _alias; private int _charCount; private DbValue _dbValue; private Charset _charset; private ArrayBase _arrayHandle; #endregion #region Properties public DbDataType DbDataType { get { return TypeHelper.GetDbDataTypeFromSqlType(SqlType, SubType, NumericScale, Length, Charset); } } public int SqlType { get { return _dataType & ~1; } } public short DataType { get { return _dataType; } set { _dataType = value; } } public short NumericScale { get { return _numericScale; } set { _numericScale = value; } } public short SubType { get { return _subType; } set { _subType = value; if (IsCharacter()) { // Bits 0-7 of sqlsubtype is charset_id (127 is a special value - // current attachment charset). // Bits 8-17 hold collation_id for this value. var cs = BitConverter.GetBytes(value); _charset = Charset.TryGetById(cs[0], out var charset) ? charset : Charset.DefaultCharset; } } } public short Length { get { return _length; } set { _length = value; if (IsCharacter()) { _charCount = _length / _charset.BytesPerCharacter; } } } public short NullFlag { get { return _nullFlag; } set { _nullFlag = value; } } public string Name { get { return _name; } set { _name = value.Trim(); } } public string Relation { get { return _relation; } set { _relation = value.Trim(); } } public string Owner { get { return _owner; } set { _owner = value.Trim(); } } public string Alias { get { return _alias; } set { _alias = value.Trim(); } } public Charset Charset { get { return _charset; } } public int CharCount { get { return _charCount; } } public ArrayBase ArrayHandle { get { EnsureArray(); return _arrayHandle; } set { EnsureArray(); _arrayHandle = value; } } public DbValue DbValue { get { return _dbValue; } } #endregion #region Constructors public DbField() { _charCount = -1; _name = string.Empty; _relation = string.Empty; _owner = string.Empty; _alias = string.Empty; _dbValue = new DbValue(this, DBNull.Value); } #endregion #region Methods public bool IsNumeric() { if (_dataType == 0) { return false; } switch (DbDataType) { case DbDataType.SmallInt: case DbDataType.Integer: case DbDataType.BigInt: case DbDataType.Numeric: case DbDataType.Decimal: case DbDataType.Float: case DbDataType.Double: return true; default: return false; } } public bool IsDecimal() { if (_dataType == 0) { return false; } switch (DbDataType) { case DbDataType.Numeric: case DbDataType.Decimal: return true; default: return false; } } public bool IsLong() { if (_dataType == 0) { return false; } switch (DbDataType) { case DbDataType.Binary: case DbDataType.Text: return true; default: return false; } } public bool IsCharacter() { if (_dataType == 0) { return false; } switch (DbDataType) { case DbDataType.Char: case DbDataType.VarChar: case DbDataType.Text: return true; default: return false; } } public bool IsArray() { if (_dataType == 0) { return false; } switch (DbDataType) { case DbDataType.Array: return true; default: return false; } } public bool IsAliased() { return (Name != Alias) ? true : false; } public int GetSize() { if (IsLong()) { return int.MaxValue; } else { if (IsCharacter()) { return CharCount; } else { return Length; } } } public bool AllowDBNull() { return ((DataType & 1) == 1); } public void SetValue(byte[] buffer) { if (buffer == null || NullFlag == -1) { DbValue.SetValue(DBNull.Value); } else { switch (SqlType) { case IscCodes.SQL_TEXT: case IscCodes.SQL_VARYING: if (DbDataType == DbDataType.Guid) { DbValue.SetValue(TypeDecoder.DecodeGuid(buffer)); } else { if (Charset.IsOctetsCharset) { DbValue.SetValue(buffer); } else { var s = Charset.GetString(buffer, 0, buffer.Length); var runes = s.EnumerateRunesToChars().ToList(); if ((Length % Charset.BytesPerCharacter) == 0 && runes.Count > CharCount) { s = new string([.. runes.Take(CharCount).SelectMany(x => x)]); } DbValue.SetValue(s); } } break; case IscCodes.SQL_SHORT: if (_numericScale < 0) { DbValue.SetValue(TypeDecoder.DecodeDecimal(BitConverter.ToInt16(buffer, 0), _numericScale, _dataType)); } else { DbValue.SetValue(BitConverter.ToInt16(buffer, 0)); } break; case IscCodes.SQL_LONG: if (_numericScale < 0) { DbValue.SetValue(TypeDecoder.DecodeDecimal(BitConverter.ToInt32(buffer, 0), _numericScale, _dataType)); } else { DbValue.SetValue(BitConverter.ToInt32(buffer, 0)); } break; case IscCodes.SQL_FLOAT: DbValue.SetValue(BitConverter.ToSingle(buffer, 0)); break; case IscCodes.SQL_DOUBLE: case IscCodes.SQL_D_FLOAT: DbValue.SetValue(BitConverter.ToDouble(buffer, 0)); break; case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: case IscCodes.SQL_BLOB: case IscCodes.SQL_ARRAY: if (_numericScale < 0) { DbValue.SetValue(TypeDecoder.DecodeDecimal(BitConverter.ToInt64(buffer, 0), _numericScale, _dataType)); } else { DbValue.SetValue(BitConverter.ToInt64(buffer, 0)); } break; case IscCodes.SQL_TIMESTAMP: { var date = TypeDecoder.DecodeDate(BitConverter.ToInt32(buffer, 0)); var time = TypeDecoder.DecodeTime(BitConverter.ToInt32(buffer, 4)); DbValue.SetValue(date.Add(time)); break; } case IscCodes.SQL_TYPE_TIME: DbValue.SetValue(TypeDecoder.DecodeTime(BitConverter.ToInt32(buffer, 0))); break; case IscCodes.SQL_TYPE_DATE: DbValue.SetValue(TypeDecoder.DecodeDate(BitConverter.ToInt32(buffer, 0))); break; case IscCodes.SQL_BOOLEAN: DbValue.SetValue(TypeDecoder.DecodeBoolean(buffer)); break; case IscCodes.SQL_TIMESTAMP_TZ: { var date = TypeDecoder.DecodeDate(BitConverter.ToInt32(buffer, 0)); var time = TypeDecoder.DecodeTime(BitConverter.ToInt32(buffer, 4)); var tzId = BitConverter.ToUInt16(buffer, 8); var dt = DateTime.SpecifyKind(date.Add(time), DateTimeKind.Utc); DbValue.SetValue(TypeHelper.CreateZonedDateTime(dt, tzId, null)); break; } case IscCodes.SQL_TIMESTAMP_TZ_EX: { var date = TypeDecoder.DecodeDate(BitConverter.ToInt32(buffer, 0)); var time = TypeDecoder.DecodeTime(BitConverter.ToInt32(buffer, 4)); var tzId = BitConverter.ToUInt16(buffer, 8); var offset = BitConverter.ToInt16(buffer, 10); var dt = DateTime.SpecifyKind(date.Add(time), DateTimeKind.Utc); DbValue.SetValue(TypeHelper.CreateZonedDateTime(dt, tzId, offset)); break; } case IscCodes.SQL_TIME_TZ: { var time = TypeDecoder.DecodeTime(BitConverter.ToInt32(buffer, 0)); var tzId = BitConverter.ToUInt16(buffer, 4); DbValue.SetValue(TypeHelper.CreateZonedTime(time, tzId, null)); break; } case IscCodes.SQL_TIME_TZ_EX: { var time = TypeDecoder.DecodeTime(BitConverter.ToInt32(buffer, 0)); var tzId = BitConverter.ToUInt16(buffer, 4); var offset = BitConverter.ToInt16(buffer, 6); DbValue.SetValue(TypeHelper.CreateZonedTime(time, tzId, offset)); break; } case IscCodes.SQL_DEC16: DbValue.SetValue(DecimalCodec.DecFloat16.ParseBytes(buffer)); break; case IscCodes.SQL_DEC34: DbValue.SetValue(DecimalCodec.DecFloat34.ParseBytes(buffer)); break; case IscCodes.SQL_INT128: if (_numericScale < 0) { DbValue.SetValue(TypeDecoder.DecodeDecimal(Int128Helper.GetInt128(buffer), _numericScale, _dataType)); } else { DbValue.SetValue(Int128Helper.GetInt128(buffer)); } break; default: throw TypeHelper.InvalidDataType(SqlType); } } } public void FixNull() { if (NullFlag == -1 && _dbValue.IsDBNull()) { switch (DbDataType) { case DbDataType.Char: case DbDataType.VarChar: DbValue.SetValue(string.Empty); break; case DbDataType.Guid: DbValue.SetValue(Guid.Empty); break; case DbDataType.SmallInt: DbValue.SetValue((short)0); break; case DbDataType.Integer: DbValue.SetValue((int)0); break; case DbDataType.BigInt: case DbDataType.Binary: case DbDataType.Array: case DbDataType.Text: DbValue.SetValue((long)0); break; case DbDataType.Numeric: case DbDataType.Decimal: DbValue.SetValue((decimal)0); break; case DbDataType.Float: DbValue.SetValue((float)0); break; case DbDataType.Double: DbValue.SetValue((double)0); break; case DbDataType.Date: case DbDataType.TimeStamp: DbValue.SetValue(DateTime.UnixEpoch); break; case DbDataType.Time: DbValue.SetValue(TimeSpan.Zero); break; case DbDataType.Boolean: DbValue.SetValue(false); break; case DbDataType.TimeStampTZ: case DbDataType.TimeStampTZEx: DbValue.SetValue(new FbZonedDateTime(DateTime.UnixEpoch, TimeZoneMapping.DefaultTimeZoneName)); break; case DbDataType.TimeTZ: case DbDataType.TimeTZEx: DbValue.SetValue(new FbZonedTime(TimeSpan.Zero, TimeZoneMapping.DefaultTimeZoneName)); break; case DbDataType.Dec16: case DbDataType.Dec34: DbValue.SetValue(new FbDecFloat(0, 0)); break; case DbDataType.Int128: DbValue.SetValue((BigInteger)0); break; default: throw IscException.ForStrParam($"Unknown sql data type: {DataType}."); } } } public Type GetSystemType() { return TypeHelper.GetTypeFromDbDataType(DbDataType); } public bool HasDataType() { return _dataType != 0; } #endregion #region Private Methods private void EnsureArray() { if (!IsArray()) throw IscException.ForStrParam("Field is not an array type."); } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DbStatementType.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; namespace FirebirdSql.Data.Common; internal enum DbStatementType : int { None = 0, Select = IscCodes.isc_info_sql_stmt_select, Insert = IscCodes.isc_info_sql_stmt_insert, Update = IscCodes.isc_info_sql_stmt_update, Delete = IscCodes.isc_info_sql_stmt_delete, DDL = IscCodes.isc_info_sql_stmt_ddl, GetSegment = IscCodes.isc_info_sql_stmt_get_segment, PutSegment = IscCodes.isc_info_sql_stmt_put_segment, StoredProcedure = IscCodes.isc_info_sql_stmt_exec_procedure, StartTrans = IscCodes.isc_info_sql_stmt_start_trans, Commit = IscCodes.isc_info_sql_stmt_commit, Rollback = IscCodes.isc_info_sql_stmt_rollback, SelectForUpdate = IscCodes.isc_info_sql_stmt_select_for_upd, SetGenerator = IscCodes.isc_info_sql_stmt_set_generator, SavePoint = IscCodes.isc_info_sql_stmt_savepoint } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DbValue.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Globalization; using System.Linq; using System.Numerics; using System.Threading; using System.Threading.Tasks; using FirebirdSql.Data.FirebirdClient; using FirebirdSql.Data.Types; namespace FirebirdSql.Data.Common; internal sealed class DbValue { private StatementBase _statement; private DbField _field; private object _value; public DbField Field { get { return _field; } } public DbValue(DbField field, object value) { _field = field; _value = value ?? DBNull.Value; } public DbValue(StatementBase statement, DbField field, object value) { _statement = statement; _field = field; _value = value ?? DBNull.Value; } public bool IsDBNull() { return TypeHelper.IsDBNull(_value); } public object GetValue() { if (IsDBNull()) { return DBNull.Value; } switch (_field.DbDataType) { case DbDataType.Text: if (_statement == null) { return GetInt64(); } else { return GetString(); } case DbDataType.Binary: if (_statement == null) { return GetInt64(); } else { return GetBinary(); } case DbDataType.Array: if (_statement == null) { return GetInt64(); } else { return GetArray(); } default: return _value; } } public async ValueTask GetValueAsync(CancellationToken cancellationToken = default) { if (IsDBNull()) { return DBNull.Value; } switch (_field.DbDataType) { case DbDataType.Text: if (_statement == null) { return GetInt64(); } else { return await GetStringAsync(cancellationToken).ConfigureAwait(false); } case DbDataType.Binary: if (_statement == null) { return GetInt64(); } else { return await GetBinaryAsync(cancellationToken).ConfigureAwait(false); } case DbDataType.Array: if (_statement == null) { return GetInt64(); } else { return await GetArrayAsync(cancellationToken).ConfigureAwait(false); } default: return _value; } } public void SetValue(object value) { _value = value; } public string GetString() { if (Field.DbDataType == DbDataType.Text && _value is long l) { _value = GetClobData(l); } if (_value is byte[] bytes) { return Field.Charset.GetString(bytes); } return _value.ToString(); } public async ValueTask GetStringAsync(CancellationToken cancellationToken = default) { if (Field.DbDataType == DbDataType.Text && _value is long l) { _value = await GetClobDataAsync(l, cancellationToken).ConfigureAwait(false); } if (_value is byte[] bytes) { return Field.Charset.GetString(bytes); } return _value.ToString(); } public char GetChar() { return Convert.ToChar(_value, CultureInfo.CurrentCulture); } public bool GetBoolean() { return Convert.ToBoolean(_value, CultureInfo.InvariantCulture); } public byte GetByte() { return _value switch { BigInteger bi => (byte)bi, _ => Convert.ToByte(_value, CultureInfo.InvariantCulture), }; } public short GetInt16() { return _value switch { BigInteger bi => (short)bi, _ => Convert.ToInt16(_value, CultureInfo.InvariantCulture), }; } public int GetInt32() { return _value switch { BigInteger bi => (int)bi, _ => Convert.ToInt32(_value, CultureInfo.InvariantCulture), }; } public long GetInt64() { return _value switch { BigInteger bi => (long)bi, _ => Convert.ToInt64(_value, CultureInfo.InvariantCulture), }; } public decimal GetDecimal() { return Convert.ToDecimal(_value, CultureInfo.InvariantCulture); } public float GetFloat() { return Convert.ToSingle(_value, CultureInfo.InvariantCulture); } public Guid GetGuid() { return _value switch { Guid guid => guid, byte[] bytes => TypeDecoder.DecodeGuid(bytes), _ => throw new InvalidOperationException($"Incorrect {nameof(Guid)} value."), }; } public double GetDouble() { return Convert.ToDouble(_value, CultureInfo.InvariantCulture); } public DateTime GetDateTime() { return _value switch { DateTimeOffset dto => dto.DateTime, FbZonedDateTime zdt => zdt.DateTime, _ => Convert.ToDateTime(_value, CultureInfo.CurrentCulture.DateTimeFormat), }; } public TimeSpan GetTimeSpan() { return (TimeSpan)_value; } public FbDecFloat GetDecFloat() { return (FbDecFloat)_value; } public BigInteger GetInt128() { return _value switch { byte b => b, short s => s, int i => i, long l => l, _ => (BigInteger)_value, }; } public FbZonedDateTime GetZonedDateTime() { return (FbZonedDateTime)_value; } public FbZonedTime GetZonedTime() { return (FbZonedTime)_value; } public Array GetArray() { if (_value is long l) { _value = GetArrayData(l); } return (Array)_value; } public async ValueTask GetArrayAsync(CancellationToken cancellationToken = default) { if (_value is long l) { _value = await GetArrayDataAsync(l, cancellationToken).ConfigureAwait(false); } return (Array)_value; } public byte[] GetBinary() { if (_value is long l) { _value = GetBlobData(l); } if (_value is Guid guid) { return TypeEncoder.EncodeGuid(guid); } return (byte[])_value; } public async ValueTask GetBinaryAsync(CancellationToken cancellationToken = default) { if (_value is long l) { _value = await GetBlobDataAsync(l, cancellationToken).ConfigureAwait(false); } if (_value is Guid guid) { return TypeEncoder.EncodeGuid(guid); } return (byte[])_value; } public BlobStream GetBinaryStream() { if (_value is not long l) throw new NotSupportedException(); return GetBlobStream(l); } public ValueTask GetBinaryStreamAsync(CancellationToken cancellationToken = default) { if (_value is not long l) throw new NotSupportedException(); return GetBlobStreamAsync(l, cancellationToken); } public int GetDate() { return _value switch { DateOnly @do => TypeEncoder.EncodeDate(@do), _ => TypeEncoder.EncodeDate(GetDateTime()), }; } public int GetTime() { return _value switch { TimeSpan ts => TypeEncoder.EncodeTime(ts), FbZonedTime zt => TypeEncoder.EncodeTime(zt.Time), TimeOnly to => TypeEncoder.EncodeTime(to), _ => TypeEncoder.EncodeTime(TypeHelper.DateTimeTimeToTimeSpan(GetDateTime())), }; } public ushort GetTimeZoneId() { { if (_value is FbZonedDateTime zdt && TimeZoneMapping.TryGetByName(zdt.TimeZone, out var id)) { return id; } } { if (_value is FbZonedTime zt && TimeZoneMapping.TryGetByName(zt.TimeZone, out var id)) { return id; } } throw new InvalidOperationException($"Incorrect time zone value."); } public byte[] GetBytes() { if (IsDBNull()) { int length = _field.Length; if (Field.SqlType == IscCodes.SQL_VARYING) { // Add two bytes more for store value length length += 2; } return new byte[length]; } switch (Field.DbDataType) { case DbDataType.Char: { var buffer = new byte[Field.Length]; byte[] bytes; if (Field.Charset.IsOctetsCharset) { bytes = GetBinary(); } else if (Field.Charset.IsNoneCharset) { var bvalue = Field.Charset.GetBytes(GetString()); if (bvalue.Length > Field.Length) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } bytes = bvalue; } else { var svalue = GetString(); if ((Field.Length % Field.Charset.BytesPerCharacter) == 0 && svalue.EnumerateRunesToChars().Count() > Field.CharCount) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } bytes = Field.Charset.GetBytes(svalue); } for (var i = 0; i < buffer.Length; i++) { buffer[i] = (byte)' '; } Buffer.BlockCopy(bytes, 0, buffer, 0, bytes.Length); return buffer; } case DbDataType.VarChar: { var buffer = new byte[Field.Length + 2]; byte[] bytes; if (Field.Charset.IsOctetsCharset) { bytes = GetBinary(); } else if (Field.Charset.IsNoneCharset) { var bvalue = Field.Charset.GetBytes(GetString()); if (bvalue.Length > Field.Length) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } bytes = bvalue; } else { var svalue = GetString(); if ((Field.Length % Field.Charset.BytesPerCharacter) == 0 && svalue.EnumerateRunesToChars().Count() > Field.CharCount) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } bytes = Field.Charset.GetBytes(svalue); } Buffer.BlockCopy(BitConverter.GetBytes((short)bytes.Length), 0, buffer, 0, 2); Buffer.BlockCopy(bytes, 0, buffer, 2, bytes.Length); return buffer; } case DbDataType.Numeric: case DbDataType.Decimal: return GetNumericBytes(); case DbDataType.SmallInt: return BitConverter.GetBytes(GetInt16()); case DbDataType.Integer: return BitConverter.GetBytes(GetInt32()); case DbDataType.Array: case DbDataType.Binary: case DbDataType.Text: case DbDataType.BigInt: return BitConverter.GetBytes(GetInt64()); case DbDataType.Float: return BitConverter.GetBytes(GetFloat()); case DbDataType.Double: return BitConverter.GetBytes(GetDouble()); case DbDataType.Date: return BitConverter.GetBytes(GetDate()); case DbDataType.Time: return BitConverter.GetBytes(GetTime()); case DbDataType.TimeStamp: { var dt = GetDateTime(); var date = BitConverter.GetBytes(TypeEncoder.EncodeDate(dt)); var time = BitConverter.GetBytes(TypeEncoder.EncodeTime(TypeHelper.DateTimeTimeToTimeSpan(dt))); var result = new byte[8]; Buffer.BlockCopy(date, 0, result, 0, date.Length); Buffer.BlockCopy(time, 0, result, 4, time.Length); return result; } case DbDataType.Guid: { var bytes = TypeEncoder.EncodeGuid(GetGuid()); byte[] buffer; if (Field.SqlType == IscCodes.SQL_VARYING) { buffer = new byte[bytes.Length + 2]; Buffer.BlockCopy(BitConverter.GetBytes((short)bytes.Length), 0, buffer, 0, 2); Buffer.BlockCopy(bytes, 0, buffer, 2, bytes.Length); } else { buffer = new byte[bytes.Length]; Buffer.BlockCopy(bytes, 0, buffer, 0, bytes.Length); } return buffer; } case DbDataType.Boolean: return BitConverter.GetBytes(GetBoolean()); case DbDataType.TimeStampTZ: { var dt = GetDateTime(); var date = BitConverter.GetBytes(TypeEncoder.EncodeDate(dt)); var time = BitConverter.GetBytes(TypeEncoder.EncodeTime(TypeHelper.DateTimeTimeToTimeSpan(dt))); var tzId = BitConverter.GetBytes(GetTimeZoneId()); var result = new byte[10]; Buffer.BlockCopy(date, 0, result, 0, date.Length); Buffer.BlockCopy(time, 0, result, 4, time.Length); Buffer.BlockCopy(tzId, 0, result, 8, tzId.Length); return result; } case DbDataType.TimeStampTZEx: { var dt = GetDateTime(); var date = BitConverter.GetBytes(TypeEncoder.EncodeDate(dt)); var time = BitConverter.GetBytes(TypeEncoder.EncodeTime(TypeHelper.DateTimeTimeToTimeSpan(dt))); var tzId = BitConverter.GetBytes(GetTimeZoneId()); var offset = new byte[] { 0, 0 }; var result = new byte[12]; Buffer.BlockCopy(date, 0, result, 0, date.Length); Buffer.BlockCopy(time, 0, result, 4, time.Length); Buffer.BlockCopy(tzId, 0, result, 8, tzId.Length); Buffer.BlockCopy(offset, 0, result, 10, offset.Length); return result; } case DbDataType.TimeTZ: { var time = BitConverter.GetBytes(GetTime()); var tzId = BitConverter.GetBytes(GetTimeZoneId()); var result = new byte[6]; Buffer.BlockCopy(time, 0, result, 0, time.Length); Buffer.BlockCopy(tzId, 0, result, 4, tzId.Length); return result; } case DbDataType.TimeTZEx: { var time = BitConverter.GetBytes(GetTime()); var tzId = BitConverter.GetBytes(GetTimeZoneId()); var offset = new byte[] { 0, 0 }; var result = new byte[8]; Buffer.BlockCopy(time, 0, result, 0, time.Length); Buffer.BlockCopy(tzId, 0, result, 4, tzId.Length); Buffer.BlockCopy(offset, 0, result, 6, offset.Length); return result; } case DbDataType.Dec16: return DecimalCodec.DecFloat16.EncodeDecimal(GetDecFloat()); case DbDataType.Dec34: return DecimalCodec.DecFloat34.EncodeDecimal(GetDecFloat()); case DbDataType.Int128: return Int128Helper.GetBytes(GetInt128()); default: throw TypeHelper.InvalidDataType((int)Field.DbDataType); } } public async ValueTask GetBytesAsync(CancellationToken cancellationToken = default) { if (IsDBNull()) { int length = _field.Length; if (Field.SqlType == IscCodes.SQL_VARYING) { // Add two bytes more for store value length length += 2; } return new byte[length]; } switch (Field.DbDataType) { case DbDataType.Char: { var buffer = new byte[Field.Length]; byte[] bytes; if (Field.Charset.IsOctetsCharset) { bytes = await GetBinaryAsync(cancellationToken).ConfigureAwait(false); } else if (Field.Charset.IsNoneCharset) { var bvalue = Field.Charset.GetBytes(await GetStringAsync(cancellationToken).ConfigureAwait(false)); if (bvalue.Length > Field.Length) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } bytes = bvalue; } else { var svalue = await GetStringAsync(cancellationToken).ConfigureAwait(false); if ((Field.Length % Field.Charset.BytesPerCharacter) == 0 && svalue.EnumerateRunesToChars().Count() > Field.CharCount) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } bytes = Field.Charset.GetBytes(svalue); } for (var i = 0; i < buffer.Length; i++) { buffer[i] = (byte)' '; } Buffer.BlockCopy(bytes, 0, buffer, 0, bytes.Length); return buffer; } case DbDataType.VarChar: { var buffer = new byte[Field.Length + 2]; byte[] bytes; if (Field.Charset.IsOctetsCharset) { bytes = await GetBinaryAsync(cancellationToken).ConfigureAwait(false); } else if (Field.Charset.IsNoneCharset) { var bvalue = Field.Charset.GetBytes(await GetStringAsync(cancellationToken).ConfigureAwait(false)); if (bvalue.Length > Field.Length) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } bytes = bvalue; } else { var svalue = await GetStringAsync(cancellationToken).ConfigureAwait(false); if ((Field.Length % Field.Charset.BytesPerCharacter) == 0 && svalue.EnumerateRunesToChars().Count() > Field.CharCount) { throw IscException.ForErrorCodes(new[] { IscCodes.isc_arith_except, IscCodes.isc_string_truncation }); } bytes = Field.Charset.GetBytes(svalue); } Buffer.BlockCopy(BitConverter.GetBytes((short)bytes.Length), 0, buffer, 0, 2); Buffer.BlockCopy(bytes, 0, buffer, 2, bytes.Length); return buffer; } case DbDataType.Numeric: case DbDataType.Decimal: return GetNumericBytes(); case DbDataType.SmallInt: return BitConverter.GetBytes(GetInt16()); case DbDataType.Integer: return BitConverter.GetBytes(GetInt32()); case DbDataType.Array: case DbDataType.Binary: case DbDataType.Text: case DbDataType.BigInt: return BitConverter.GetBytes(GetInt64()); case DbDataType.Float: return BitConverter.GetBytes(GetFloat()); case DbDataType.Double: return BitConverter.GetBytes(GetDouble()); case DbDataType.Date: return BitConverter.GetBytes(GetDate()); case DbDataType.Time: return BitConverter.GetBytes(GetTime()); case DbDataType.TimeStamp: { var dt = GetDateTime(); var date = BitConverter.GetBytes(TypeEncoder.EncodeDate(dt)); var time = BitConverter.GetBytes(TypeEncoder.EncodeTime(TypeHelper.DateTimeTimeToTimeSpan(dt))); var result = new byte[8]; Buffer.BlockCopy(date, 0, result, 0, date.Length); Buffer.BlockCopy(time, 0, result, 4, time.Length); return result; } case DbDataType.Guid: { var bytes = TypeEncoder.EncodeGuid(GetGuid()); byte[] buffer; if (Field.SqlType == IscCodes.SQL_VARYING) { buffer = new byte[bytes.Length + 2]; Buffer.BlockCopy(BitConverter.GetBytes((short)bytes.Length), 0, buffer, 0, 2); Buffer.BlockCopy(bytes, 0, buffer, 2, bytes.Length); } else { buffer = new byte[bytes.Length]; Buffer.BlockCopy(bytes, 0, buffer, 0, bytes.Length); } return buffer; } case DbDataType.Boolean: return BitConverter.GetBytes(GetBoolean()); case DbDataType.TimeStampTZ: { var dt = GetDateTime(); var date = BitConverter.GetBytes(TypeEncoder.EncodeDate(dt)); var time = BitConverter.GetBytes(TypeEncoder.EncodeTime(TypeHelper.DateTimeTimeToTimeSpan(dt))); var tzId = BitConverter.GetBytes(GetTimeZoneId()); var result = new byte[10]; Buffer.BlockCopy(date, 0, result, 0, date.Length); Buffer.BlockCopy(time, 0, result, 4, time.Length); Buffer.BlockCopy(tzId, 0, result, 8, tzId.Length); return result; } case DbDataType.TimeStampTZEx: { var dt = GetDateTime(); var date = BitConverter.GetBytes(TypeEncoder.EncodeDate(dt)); var time = BitConverter.GetBytes(TypeEncoder.EncodeTime(TypeHelper.DateTimeTimeToTimeSpan(dt))); var tzId = BitConverter.GetBytes(GetTimeZoneId()); var offset = new byte[] { 0, 0 }; var result = new byte[12]; Buffer.BlockCopy(date, 0, result, 0, date.Length); Buffer.BlockCopy(time, 0, result, 4, time.Length); Buffer.BlockCopy(tzId, 0, result, 8, tzId.Length); Buffer.BlockCopy(offset, 0, result, 10, offset.Length); return result; } case DbDataType.TimeTZ: { var time = BitConverter.GetBytes(GetTime()); var tzId = BitConverter.GetBytes(GetTimeZoneId()); var result = new byte[6]; Buffer.BlockCopy(time, 0, result, 0, time.Length); Buffer.BlockCopy(tzId, 0, result, 4, tzId.Length); return result; } case DbDataType.TimeTZEx: { var time = BitConverter.GetBytes(GetTime()); var tzId = BitConverter.GetBytes(GetTimeZoneId()); var offset = new byte[] { 0, 0 }; var result = new byte[8]; Buffer.BlockCopy(time, 0, result, 0, time.Length); Buffer.BlockCopy(tzId, 0, result, 4, tzId.Length); Buffer.BlockCopy(offset, 0, result, 6, offset.Length); return result; } case DbDataType.Dec16: return DecimalCodec.DecFloat16.EncodeDecimal(GetDecFloat()); case DbDataType.Dec34: return DecimalCodec.DecFloat34.EncodeDecimal(GetDecFloat()); case DbDataType.Int128: return Int128Helper.GetBytes(GetInt128()); default: throw TypeHelper.InvalidDataType((int)Field.DbDataType); } } private byte[] GetNumericBytes() { var value = GetDecimal(); var numeric = TypeEncoder.EncodeDecimal(value, Field.NumericScale, Field.DataType); switch (_field.SqlType) { case IscCodes.SQL_SHORT: return BitConverter.GetBytes((short)numeric); case IscCodes.SQL_LONG: return BitConverter.GetBytes((int)numeric); case IscCodes.SQL_QUAD: case IscCodes.SQL_INT64: return BitConverter.GetBytes((long)numeric); case IscCodes.SQL_DOUBLE: case IscCodes.SQL_D_FLOAT: return BitConverter.GetBytes((double)numeric); case IscCodes.SQL_INT128: return Int128Helper.GetBytes((BigInteger)numeric); default: return null; } } private string GetClobData(long blobId) { var clob = _statement.CreateBlob(blobId); return clob.ReadString(); } private ValueTask GetClobDataAsync(long blobId, CancellationToken cancellationToken = default) { var clob = _statement.CreateBlob(blobId); return clob.ReadStringAsync(cancellationToken); } private byte[] GetBlobData(long blobId) { var blob = _statement.CreateBlob(blobId); return blob.Read(); } private ValueTask GetBlobDataAsync(long blobId, CancellationToken cancellationToken = default) { var blob = _statement.CreateBlob(blobId); return blob.ReadAsync(cancellationToken); } private BlobStream GetBlobStream(long blobId) { var blob = _statement.CreateBlob(blobId); return new BlobStream(blob); } private ValueTask GetBlobStreamAsync(long blobId, CancellationToken cancellationToken = default) { var blob = _statement.CreateBlob(blobId); return ValueTask.FromResult(new BlobStream(blob)); } private Array GetArrayData(long handle) { if (_field.ArrayHandle == null) { _field.ArrayHandle = _statement.CreateArray(handle, Field.Relation, Field.Name); } var gdsArray = _statement.CreateArray(_field.ArrayHandle.Descriptor); gdsArray.Handle = handle; gdsArray.Database = _statement.Database; gdsArray.Transaction = _statement.Transaction; return gdsArray.Read(); } private async ValueTask GetArrayDataAsync(long handle, CancellationToken cancellationToken = default) { if (_field.ArrayHandle == null) { _field.ArrayHandle = await _statement.CreateArrayAsync(handle, Field.Relation, Field.Name, cancellationToken).ConfigureAwait(false); } var gdsArray = await _statement.CreateArrayAsync(_field.ArrayHandle.Descriptor, cancellationToken).ConfigureAwait(false); gdsArray.Handle = handle; gdsArray.Database = _statement.Database; gdsArray.Transaction = _statement.Transaction; return await gdsArray.ReadAsync(cancellationToken).ConfigureAwait(false); } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DecimalCodec.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Diagnostics; using FirebirdSql.Data.Types; namespace FirebirdSql.Data.Common; // based on Jaybird's implementation class DecimalCodec { class DecimalFormat { const int SignBits = 1; const int CombinationBits = 5; const int BitsPerGroup = DenselyPackedDecimalCodec.BitsPerGroup; const int DigitsPerGroup = DenselyPackedDecimalCodec.DigitsPerGroup; public DecimalFormat(int formatBitLength, int coefficientDigits) { FormatBitLength = formatBitLength; CoefficientDigits = coefficientDigits; FormatByteLength = FormatBitLength / 8; CoefficientContinuationBits = BitsPerGroup * (CoefficientDigits - 1) / DigitsPerGroup; ExponentContinuationBits = FormatBitLength - SignBits - CombinationBits - CoefficientContinuationBits; ELimit = 3 * (1 << ExponentContinuationBits) - 1; EMin = -ELimit / 2; ExponentBias = -EMin + CoefficientDigits - 1; } public int FormatBitLength { get; } public int CoefficientDigits { get; } public int FormatByteLength { get; } public int CoefficientContinuationBits { get; } public int ExponentContinuationBits { get; } public int ELimit { get; } public int EMin { get; } public int ExponentBias { get; } public void ValidateByteLength(byte[] decBytes) { if (decBytes.Length != FormatByteLength) { throw new ArgumentException(nameof(decBytes), $"{nameof(decBytes)} argument must be {FormatByteLength} bytes."); } } public int BiasedExponent(int unbiasedExponent) { return unbiasedExponent + ExponentBias; } public int UnbiasedExponent(int biasedExponent) { return biasedExponent - ExponentBias; } } // Byte pattern that signals that the combination field contains 1 bit of the first digit (for value 8 or 9). const int Combination2 = 0b0_11000_00; const int NegativeBit = 0b1000_0000; const int NegativeSignum = DenselyPackedDecimalCodec.NegativeSignum; const byte TypeMask = 0b0_11111_10; const byte Infinity0 = 0b0_11110_00; const byte Infinity2 = 0b0_11110_10; const byte NaNQuiet = 0b0_11111_00; const byte NaNSignal = 0b0_11111_10; readonly DecimalFormat _decimalFormat; readonly DenselyPackedDecimalCodec _coefficientCoder; public DecimalCodec(int formatBitLength, int coefficientDigits) { _decimalFormat = new DecimalFormat(formatBitLength, coefficientDigits); _coefficientCoder = new DenselyPackedDecimalCodec(coefficientDigits); } public static DecimalCodec DecFloat16 { get; } = new DecimalCodec(64, 16); public static DecimalCodec DecFloat34 { get; } = new DecimalCodec(128, 34); // Parse an IEEE-754 decimal format to a FbDecFloat. public FbDecFloat ParseBytes(byte[] decBytes) { // this (and related) code works with BE if (BitConverter.IsLittleEndian) { Array.Reverse(decBytes); } _decimalFormat.ValidateByteLength(decBytes); var firstByte = decBytes[0] & 0xff; var signum = -1 * (firstByte >>> 7) | 1; var decimalType = DecimalTypeFromFirstByte(firstByte); switch (decimalType) { case DecimalType.Infinity: return signum == NegativeSignum ? FbDecFloat.NegativeInfinity : FbDecFloat.PositiveInfinity; case DecimalType.NaN: return signum == NegativeSignum ? FbDecFloat.NegativeNaN : FbDecFloat.PositiveNaN; case DecimalType.SignalingNaN: return signum == NegativeSignum ? FbDecFloat.NegativeSignalingNaN : FbDecFloat.PositiveSignalingNaN; case DecimalType.Finite: { // NOTE: get exponent MSB from combination field and first 2 bits of exponent continuation in one go int exponentMSB; int firstDigit; if ((firstByte & Combination2) != Combination2) { exponentMSB = (firstByte >>> 3) & 0b01100 | (firstByte & 0b011); firstDigit = (firstByte >>> 2) & 0b0111; } else { exponentMSB = (firstByte >>> 1) & 0b01100 | (firstByte & 0b011); firstDigit = 0b01000 | ((firstByte >>> 2) & 0b01); } var exponentBitsRemaining = _decimalFormat.ExponentContinuationBits - 2; Debug.Assert(exponentBitsRemaining == _decimalFormat.FormatBitLength - 8 - _decimalFormat.CoefficientContinuationBits, $"Unexpected exponent remaining length {exponentBitsRemaining}."); var exponent = _decimalFormat.UnbiasedExponent(DecodeExponent(decBytes, exponentMSB, exponentBitsRemaining)); var coefficient = _coefficientCoder.DecodeValue(signum, firstDigit, decBytes); return new FbDecFloat(DecimalType.Finite, signum == NegativeSignum, coefficient, exponent); } default: throw new ArgumentOutOfRangeException(); } } // Encodes a FbDecFloat to its IEEE-754 format. public byte[] EncodeDecimal(FbDecFloat @decimal) { var decBytes = new byte[_decimalFormat.FormatByteLength]; if (@decimal.Negative) { decBytes[0] = NegativeBit; } if (@decimal.Type == DecimalType.Finite) { EncodeFinite(@decimal, decBytes); } else { decBytes[0] |= GetSpecialBits(@decimal.Type); } // this (and related) code works with BE if (BitConverter.IsLittleEndian) { Array.Reverse(decBytes); } return decBytes; } void EncodeFinite(FbDecFloat @decimal, byte[] decBytes) { var biasedExponent = _decimalFormat.BiasedExponent(@decimal.Exponent); var coefficient = @decimal.Coefficient; var mostSignificantDigit = _coefficientCoder.EncodeValue(coefficient, decBytes); var expMSB = biasedExponent >>> _decimalFormat.ExponentContinuationBits; var expTwoBitCont = (biasedExponent >>> _decimalFormat.ExponentContinuationBits - 2) & 0b011; if (mostSignificantDigit <= 7) { decBytes[0] |= (byte)((expMSB << 5) | (mostSignificantDigit << 2) | expTwoBitCont); } else { decBytes[0] |= (byte)(Combination2 | (expMSB << 3) | ((mostSignificantDigit & 0b01) << 2) | expTwoBitCont); } EncodeExponentContinuation(decBytes, biasedExponent, _decimalFormat.ExponentContinuationBits - 2); } static void EncodeExponentContinuation(byte[] decBytes, int expAndBias, int expBitsRemaining) { var expByteIndex = 1; while (expBitsRemaining > 8) { decBytes[expByteIndex++] = (byte)(expAndBias >>> expBitsRemaining - 8); expBitsRemaining -= 8; } if (expBitsRemaining > 0) { decBytes[expByteIndex] |= (byte)(expAndBias << 8 - expBitsRemaining); } } static int DecodeExponent(byte[] decBytes, int exponentMSB, int exponentBitsRemaining) { var exponent = exponentMSB; var byteIndex = 1; while (exponentBitsRemaining > 8) { exponent = (exponent << 8) | (decBytes[byteIndex] & 0xFF); exponentBitsRemaining -= 8; byteIndex += 1; } if (exponentBitsRemaining > 0) { exponent = (exponent << exponentBitsRemaining) | ((decBytes[byteIndex] & 0xFF) >>> (8 - exponentBitsRemaining)); } return exponent; } static DecimalType DecimalTypeFromFirstByte(int firstByte) { return (firstByte & TypeMask) switch { Infinity0 => DecimalType.Infinity, Infinity2 => DecimalType.Infinity, NaNQuiet => DecimalType.NaN, NaNSignal => DecimalType.SignalingNaN, _ => DecimalType.Finite, }; } static byte GetSpecialBits(DecimalType decimalType) { return decimalType switch { DecimalType.Finite => throw new InvalidOperationException($"{nameof(DecimalType)} {nameof(DecimalType.Finite)} has no special bits."), DecimalType.Infinity => Infinity0, DecimalType.NaN => NaNQuiet, DecimalType.SignalingNaN => NaNSignal, _ => throw new ArgumentOutOfRangeException(), }; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DecimalShiftHelper.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) namespace FirebirdSql.Data.Common; internal static class DecimalShiftHelper { public static decimal ShiftDecimalLeft(decimal d, int shift) { while (shift-- > 0) { d /= 10; } return d; } public static decimal ShiftDecimalRight(decimal d, int shift) { while (shift-- > 0) { d *= 10; } return d; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DecimalType.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) namespace FirebirdSql.Data.Common; public enum DecimalType { Finite, Infinity, NaN, SignalingNaN, } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/DenselyPackedDecimalCodec.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Diagnostics; using System.Numerics; namespace FirebirdSql.Data.Common; // based on Jaybird's implementation class DenselyPackedDecimalCodec { internal const int DigitsPerGroup = 3; internal const int BitsPerGroup = 10; internal const int NegativeSignum = -1; const int BitPerByte = 8; static readonly BigInteger OneThousand = new BigInteger(1000); //@formatter:off // Generated using org.firebirdsql.decimal.generator.GenerateLookupTable static readonly char[] DPDGroupBits2Digits = ( "000" + "001" + "002" + "003" + "004" + "005" + "006" + "007" + "008" + "009" + "080" + "081" + "800" + "801" + "880" + "881" + "010" + "011" + "012" + "013" + "014" + "015" + "016" + "017" + "018" + "019" + "090" + "091" + "810" + "811" + "890" + "891" + "020" + "021" + "022" + "023" + "024" + "025" + "026" + "027" + "028" + "029" + "082" + "083" + "820" + "821" + "808" + "809" + "030" + "031" + "032" + "033" + "034" + "035" + "036" + "037" + "038" + "039" + "092" + "093" + "830" + "831" + "818" + "819" + "040" + "041" + "042" + "043" + "044" + "045" + "046" + "047" + "048" + "049" + "084" + "085" + "840" + "841" + "088" + "089" + "050" + "051" + "052" + "053" + "054" + "055" + "056" + "057" + "058" + "059" + "094" + "095" + "850" + "851" + "098" + "099" + "060" + "061" + "062" + "063" + "064" + "065" + "066" + "067" + "068" + "069" + "086" + "087" + "860" + "861" + "888" + "889" + "070" + "071" + "072" + "073" + "074" + "075" + "076" + "077" + "078" + "079" + "096" + "097" + "870" + "871" + "898" + "899" + "100" + "101" + "102" + "103" + "104" + "105" + "106" + "107" + "108" + "109" + "180" + "181" + "900" + "901" + "980" + "981" + "110" + "111" + "112" + "113" + "114" + "115" + "116" + "117" + "118" + "119" + "190" + "191" + "910" + "911" + "990" + "991" + "120" + "121" + "122" + "123" + "124" + "125" + "126" + "127" + "128" + "129" + "182" + "183" + "920" + "921" + "908" + "909" + "130" + "131" + "132" + "133" + "134" + "135" + "136" + "137" + "138" + "139" + "192" + "193" + "930" + "931" + "918" + "919" + "140" + "141" + "142" + "143" + "144" + "145" + "146" + "147" + "148" + "149" + "184" + "185" + "940" + "941" + "188" + "189" + "150" + "151" + "152" + "153" + "154" + "155" + "156" + "157" + "158" + "159" + "194" + "195" + "950" + "951" + "198" + "199" + "160" + "161" + "162" + "163" + "164" + "165" + "166" + "167" + "168" + "169" + "186" + "187" + "960" + "961" + "988" + "989" + "170" + "171" + "172" + "173" + "174" + "175" + "176" + "177" + "178" + "179" + "196" + "197" + "970" + "971" + "998" + "999" + "200" + "201" + "202" + "203" + "204" + "205" + "206" + "207" + "208" + "209" + "280" + "281" + "802" + "803" + "882" + "883" + "210" + "211" + "212" + "213" + "214" + "215" + "216" + "217" + "218" + "219" + "290" + "291" + "812" + "813" + "892" + "893" + "220" + "221" + "222" + "223" + "224" + "225" + "226" + "227" + "228" + "229" + "282" + "283" + "822" + "823" + "828" + "829" + "230" + "231" + "232" + "233" + "234" + "235" + "236" + "237" + "238" + "239" + "292" + "293" + "832" + "833" + "838" + "839" + "240" + "241" + "242" + "243" + "244" + "245" + "246" + "247" + "248" + "249" + "284" + "285" + "842" + "843" + "288" + "289" + "250" + "251" + "252" + "253" + "254" + "255" + "256" + "257" + "258" + "259" + "294" + "295" + "852" + "853" + "298" + "299" + "260" + "261" + "262" + "263" + "264" + "265" + "266" + "267" + "268" + "269" + "286" + "287" + "862" + "863" + "888" + "889" + "270" + "271" + "272" + "273" + "274" + "275" + "276" + "277" + "278" + "279" + "296" + "297" + "872" + "873" + "898" + "899" + "300" + "301" + "302" + "303" + "304" + "305" + "306" + "307" + "308" + "309" + "380" + "381" + "902" + "903" + "982" + "983" + "310" + "311" + "312" + "313" + "314" + "315" + "316" + "317" + "318" + "319" + "390" + "391" + "912" + "913" + "992" + "993" + "320" + "321" + "322" + "323" + "324" + "325" + "326" + "327" + "328" + "329" + "382" + "383" + "922" + "923" + "928" + "929" + "330" + "331" + "332" + "333" + "334" + "335" + "336" + "337" + "338" + "339" + "392" + "393" + "932" + "933" + "938" + "939" + "340" + "341" + "342" + "343" + "344" + "345" + "346" + "347" + "348" + "349" + "384" + "385" + "942" + "943" + "388" + "389" + "350" + "351" + "352" + "353" + "354" + "355" + "356" + "357" + "358" + "359" + "394" + "395" + "952" + "953" + "398" + "399" + "360" + "361" + "362" + "363" + "364" + "365" + "366" + "367" + "368" + "369" + "386" + "387" + "962" + "963" + "988" + "989" + "370" + "371" + "372" + "373" + "374" + "375" + "376" + "377" + "378" + "379" + "396" + "397" + "972" + "973" + "998" + "999" + "400" + "401" + "402" + "403" + "404" + "405" + "406" + "407" + "408" + "409" + "480" + "481" + "804" + "805" + "884" + "885" + "410" + "411" + "412" + "413" + "414" + "415" + "416" + "417" + "418" + "419" + "490" + "491" + "814" + "815" + "894" + "895" + "420" + "421" + "422" + "423" + "424" + "425" + "426" + "427" + "428" + "429" + "482" + "483" + "824" + "825" + "848" + "849" + "430" + "431" + "432" + "433" + "434" + "435" + "436" + "437" + "438" + "439" + "492" + "493" + "834" + "835" + "858" + "859" + "440" + "441" + "442" + "443" + "444" + "445" + "446" + "447" + "448" + "449" + "484" + "485" + "844" + "845" + "488" + "489" + "450" + "451" + "452" + "453" + "454" + "455" + "456" + "457" + "458" + "459" + "494" + "495" + "854" + "855" + "498" + "499" + "460" + "461" + "462" + "463" + "464" + "465" + "466" + "467" + "468" + "469" + "486" + "487" + "864" + "865" + "888" + "889" + "470" + "471" + "472" + "473" + "474" + "475" + "476" + "477" + "478" + "479" + "496" + "497" + "874" + "875" + "898" + "899" + "500" + "501" + "502" + "503" + "504" + "505" + "506" + "507" + "508" + "509" + "580" + "581" + "904" + "905" + "984" + "985" + "510" + "511" + "512" + "513" + "514" + "515" + "516" + "517" + "518" + "519" + "590" + "591" + "914" + "915" + "994" + "995" + "520" + "521" + "522" + "523" + "524" + "525" + "526" + "527" + "528" + "529" + "582" + "583" + "924" + "925" + "948" + "949" + "530" + "531" + "532" + "533" + "534" + "535" + "536" + "537" + "538" + "539" + "592" + "593" + "934" + "935" + "958" + "959" + "540" + "541" + "542" + "543" + "544" + "545" + "546" + "547" + "548" + "549" + "584" + "585" + "944" + "945" + "588" + "589" + "550" + "551" + "552" + "553" + "554" + "555" + "556" + "557" + "558" + "559" + "594" + "595" + "954" + "955" + "598" + "599" + "560" + "561" + "562" + "563" + "564" + "565" + "566" + "567" + "568" + "569" + "586" + "587" + "964" + "965" + "988" + "989" + "570" + "571" + "572" + "573" + "574" + "575" + "576" + "577" + "578" + "579" + "596" + "597" + "974" + "975" + "998" + "999" + "600" + "601" + "602" + "603" + "604" + "605" + "606" + "607" + "608" + "609" + "680" + "681" + "806" + "807" + "886" + "887" + "610" + "611" + "612" + "613" + "614" + "615" + "616" + "617" + "618" + "619" + "690" + "691" + "816" + "817" + "896" + "897" + "620" + "621" + "622" + "623" + "624" + "625" + "626" + "627" + "628" + "629" + "682" + "683" + "826" + "827" + "868" + "869" + "630" + "631" + "632" + "633" + "634" + "635" + "636" + "637" + "638" + "639" + "692" + "693" + "836" + "837" + "878" + "879" + "640" + "641" + "642" + "643" + "644" + "645" + "646" + "647" + "648" + "649" + "684" + "685" + "846" + "847" + "688" + "689" + "650" + "651" + "652" + "653" + "654" + "655" + "656" + "657" + "658" + "659" + "694" + "695" + "856" + "857" + "698" + "699" + "660" + "661" + "662" + "663" + "664" + "665" + "666" + "667" + "668" + "669" + "686" + "687" + "866" + "867" + "888" + "889" + "670" + "671" + "672" + "673" + "674" + "675" + "676" + "677" + "678" + "679" + "696" + "697" + "876" + "877" + "898" + "899" + "700" + "701" + "702" + "703" + "704" + "705" + "706" + "707" + "708" + "709" + "780" + "781" + "906" + "907" + "986" + "987" + "710" + "711" + "712" + "713" + "714" + "715" + "716" + "717" + "718" + "719" + "790" + "791" + "916" + "917" + "996" + "997" + "720" + "721" + "722" + "723" + "724" + "725" + "726" + "727" + "728" + "729" + "782" + "783" + "926" + "927" + "968" + "969" + "730" + "731" + "732" + "733" + "734" + "735" + "736" + "737" + "738" + "739" + "792" + "793" + "936" + "937" + "978" + "979" + "740" + "741" + "742" + "743" + "744" + "745" + "746" + "747" + "748" + "749" + "784" + "785" + "946" + "947" + "788" + "789" + "750" + "751" + "752" + "753" + "754" + "755" + "756" + "757" + "758" + "759" + "794" + "795" + "956" + "957" + "798" + "799" + "760" + "761" + "762" + "763" + "764" + "765" + "766" + "767" + "768" + "769" + "786" + "787" + "966" + "967" + "988" + "989" + "770" + "771" + "772" + "773" + "774" + "775" + "776" + "777" + "778" + "779" + "796" + "797" + "976" + "977" + "998" + "999" ).ToCharArray(); // from ICU decNumber decDPD.h static readonly int[] Bin2DPD = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 10, 11, 42, 43, 74, 75, 106, 107, 78, 79, 26, 27, 58, 59, 90, 91, 122, 123, 94, 95, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 138, 139, 170, 171, 202, 203, 234, 235, 206, 207, 154, 155, 186, 187, 218, 219, 250, 251, 222, 223, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 266, 267, 298, 299, 330, 331, 362, 363, 334, 335, 282, 283, 314, 315, 346, 347, 378, 379, 350, 351, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 394, 395, 426, 427, 458, 459, 490, 491, 462, 463, 410, 411, 442, 443, 474, 475, 506, 507, 478, 479, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 522, 523, 554, 555, 586, 587, 618, 619, 590, 591, 538, 539, 570, 571, 602, 603, 634, 635, 606, 607, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 650, 651, 682, 683, 714, 715, 746, 747, 718, 719, 666, 667, 698, 699, 730, 731, 762, 763, 734, 735, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 778, 779, 810, 811, 842, 843, 874, 875, 846, 847, 794, 795, 826, 827, 858, 859, 890, 891, 862, 863, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 906, 907, 938, 939, 970, 971, 1002, 1003, 974, 975, 922, 923, 954, 955, 986, 987, 1018, 1019, 990, 991, 12, 13, 268, 269, 524, 525, 780, 781, 46, 47, 28, 29, 284, 285, 540, 541, 796, 797, 62, 63, 44, 45, 300, 301, 556, 557, 812, 813, 302, 303, 60, 61, 316, 317, 572, 573, 828, 829, 318, 319, 76, 77, 332, 333, 588, 589, 844, 845, 558, 559, 92, 93, 348, 349, 604, 605, 860, 861, 574, 575, 108, 109, 364, 365, 620, 621, 876, 877, 814, 815, 124, 125, 380, 381, 636, 637, 892, 893, 830, 831, 14, 15, 270, 271, 526, 527, 782, 783, 110, 111, 30, 31, 286, 287, 542, 543, 798, 799, 126, 127, 140, 141, 396, 397, 652, 653, 908, 909, 174, 175, 156, 157, 412, 413, 668, 669, 924, 925, 190, 191, 172, 173, 428, 429, 684, 685, 940, 941, 430, 431, 188, 189, 444, 445, 700, 701, 956, 957, 446, 447, 204, 205, 460, 461, 716, 717, 972, 973, 686, 687, 220, 221, 476, 477, 732, 733, 988, 989, 702, 703, 236, 237, 492, 493, 748, 749, 1004, 1005, 942, 943, 252, 253, 508, 509, 764, 765, 1020, 1021, 958, 959, 142, 143, 398, 399, 654, 655, 910, 911, 238, 239, 158, 159, 414, 415, 670, 671, 926, 927, 254, 255 }; //@formatter:on readonly int _numberOfDigits; readonly int _digitGroups; // Creates a densely packed decimal coder for the specified number of digits. // Current implementation only supports decoding and encoding `n * 3 + 1` number of digits with // `n > 0`, where the most significant digit is provided by the caller during decoding. public DenselyPackedDecimalCodec(int numberOfDigits) { if (numberOfDigits / DigitsPerGroup <= 0 || numberOfDigits % DigitsPerGroup != 1) throw new ArgumentOutOfRangeException(nameof(numberOfDigits), $"{nameof(numberOfDigits)} must be of form n * 3 + 1 with n > 0, was {numberOfDigits}."); _numberOfDigits = numberOfDigits; _digitGroups = numberOfDigits / DigitsPerGroup; } // Decodes a densely packed decimal from a byte array to a BigInteger. // Digits are read from the end of the array to the front. public BigInteger DecodeValue(int signum, int firstDigit, byte[] decBytes) { return DecodeValue(signum, firstDigit, decBytes, decBytes.Length - 1); } // Decodes a densely packed decimal from a byte array to a BigInteger. // Digits are read from `lsbIndex` of the array to the front. public BigInteger DecodeValue(int signum, int firstDigit, byte[] decBytes, int lsbIndex) { if (firstDigit < 0 || firstDigit > 9) throw new ArgumentOutOfRangeException(nameof(firstDigit), $"{nameof(firstDigit)} must be in range 0 <= firstDigit <= 9, was {firstDigit}."); ValidateLsbIndex(lsbIndex, decBytes.Length); return DecodeValue0(signum, firstDigit, decBytes, lsbIndex); } // Encodes a BigInteger to a densely packed decimal in a byte array. // Digits are written from the end of the array to the front. The most significant digit is not encoded // into the array, but instead returned to the caller. public int EncodeValue(BigInteger value, byte[] decBytes) { return EncodeValue(BigInteger.Abs(value), decBytes, decBytes.Length - 1); } // Encodes a BigInteger to a densely packed decimal in a byte array. // Digits are written from `lsbIndex` of the array to the front. The most significant digit is not encoded // into the array, but instead returned to the caller. public int EncodeValue(BigInteger value, byte[] decBytes, int lsbIndex) { ValidateLsbIndex(lsbIndex, decBytes.Length); return EncodeValue0(BigInteger.Abs(value), decBytes, lsbIndex); } BigInteger DecodeValue0(int signum, int firstDigit, byte[] decBytes, int lsbIndex) { var digitChars = CreateZeroedCharArray(); for (var digitGroup = 0; digitGroup < _digitGroups; digitGroup++) { // Each digit group is 10 bits in two bytes in the array as [.., second, first, ..], // moving to the left for next digit groups. If there are unconsumed bits in the second byte, // the second byte becomes the first byte of the next group. var digitBitsFromEnd = digitGroup * BitsPerGroup; var firstByteBitOffset = digitBitsFromEnd % BitPerByte; var firstByteIndex = lsbIndex - digitBitsFromEnd / BitPerByte; var dpdGroupBits = 0x3FF & ( (decBytes[firstByteIndex] & 0xFF) >>> firstByteBitOffset | decBytes[firstByteIndex - 1] << BitPerByte - firstByteBitOffset); if (dpdGroupBits != 0) { Array.Copy(DPDGroupBits2Digits, dpdGroupBits * DigitsPerGroup, digitChars, digitChars.Length - (digitGroup + 1) * DigitsPerGroup, DigitsPerGroup); } } if (firstDigit != 0) { digitChars[1] = char.Parse(firstDigit.ToString()); } return ToBigInteger(signum, digitChars); } int EncodeValue0(BigInteger value, byte[] decBytes, int lsbIndex) { var remainingValue = value; for (var digitGroup = 0; digitGroup < _digitGroups; digitGroup++) { // Each digit group is 10 bits in two bytes in the array as [.., second, first, ..], // moving to the left for next digit groups. If there are unconsumed bits in the second byte, // the second byte becomes the first byte of the next group. var digitBitsFromEnd = digitGroup * BitsPerGroup; var firstByteBitOffset = digitBitsFromEnd % BitPerByte; var firstByteIndex = lsbIndex - digitBitsFromEnd / BitPerByte; remainingValue = BigInteger.DivRem(remainingValue, OneThousand, out var remainder); var currentGroup = Bin2DPD[(int)remainder]; decBytes[firstByteIndex] = (byte)(decBytes[firstByteIndex] | (currentGroup << firstByteBitOffset)); decBytes[firstByteIndex - 1] = (byte)(decBytes[firstByteIndex - 1] | (currentGroup >>> BitPerByte - firstByteBitOffset)); } var mostSignificantDigit = (int)remainingValue; Debug.Assert(0 <= mostSignificantDigit && mostSignificantDigit <= 9, $"{nameof(mostSignificantDigit)} out of range, was {mostSignificantDigit}."); return mostSignificantDigit; } char[] CreateZeroedCharArray() { var digitChars = new char[_numberOfDigits + 1]; for (var i = 0; i < digitChars.Length; i++) { digitChars[i] = '0'; } return digitChars; } void ValidateLsbIndex(int lsbIndex, int decBytesLength) { if (lsbIndex < 0 || lsbIndex >= decBytesLength) { throw new IndexOutOfRangeException($"{nameof(lsbIndex)} must be within array {nameof(decBytesLength)} with length of {decBytesLength}, was {lsbIndex}."); } if ((lsbIndex + 1) * BitPerByte < BitsPerGroup * _digitGroups) { throw new ArgumentException($"Need at least {(BitsPerGroup * _digitGroups + 7) / BitPerByte} bytes for value, have {lsbIndex + 1} (lsbIndex = {lsbIndex})"); } } static BigInteger ToBigInteger(int signum, char[] digitChars) { var digitCharIndex = FindFirstNonZero(digitChars); if (digitCharIndex == -1) { // All zeroes return BigInteger.Zero; } if (signum == NegativeSignum) { digitChars[--digitCharIndex] = '-'; } var s = new string(digitChars, digitCharIndex, digitChars.Length - digitCharIndex); return BigInteger.Parse(s); } static int FindFirstNonZero(char[] digitChars) { for (var index = 0; index < digitChars.Length; index++) { if (digitChars[index] != '0') { return index; } } return -1; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/Descriptor.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.IO; namespace FirebirdSql.Data.Common; /// /// Descriptor of query input and output parameters. /// /// /// This is similar to the XSQLDA structure described /// in the Interbase 6.0 API docs. /// internal sealed class Descriptor { #region Fields private short _version; private short _count; private short _actualCount; private DbField[] _fields; #endregion #region Properties public short Version { get { return _version; } } public short Count { get { return _count; } } public short ActualCount { get { return _actualCount; } set { _actualCount = value; } } #endregion #region Indexers public DbField this[int index] { get { return _fields[index]; } } #endregion #region Constructors private Descriptor() { } public Descriptor(short n) : this() { _version = IscCodes.SQLDA_VERSION1; _count = n; _actualCount = n; _fields = new DbField[n]; for (var i = 0; i < n; i++) { _fields[i] = new DbField(); } } #endregion #region Methods public void ResetValues() { for (var i = 0; i < _fields.Length; i++) { _fields[i].SetValue(null); } } internal sealed class BlrData { public byte[] Data { get; } public int Length { get; } public BlrData(byte[] data, int length) { Data = data; Length = length; } } public BlrData ToBlr() { using (var blr = new MemoryStream(256)) { var length = 0; blr.WriteByte(IscCodes.blr_version5); blr.WriteByte(IscCodes.blr_begin); blr.WriteByte(IscCodes.blr_message); blr.WriteByte(0); var par_count = Count * 2; blr.WriteByte((byte)(par_count & 255)); blr.WriteByte((byte)(par_count >> 8)); for (var i = 0; i < _fields.Length; i++) { var dtype = _fields[i].SqlType; int len = _fields[i].Length; switch (dtype) { case IscCodes.SQL_VARYING: blr.WriteByte(IscCodes.blr_varying); blr.WriteByte((byte)(len & 255)); blr.WriteByte((byte)(len >> 8)); length = TypeHelper.BlrAlign(length, 2); length += len + 2; break; case IscCodes.SQL_TEXT: blr.WriteByte(IscCodes.blr_text); blr.WriteByte((byte)(len & 255)); blr.WriteByte((byte)(len >> 8)); // no align length += len; break; case IscCodes.SQL_DOUBLE: blr.WriteByte(IscCodes.blr_double); length = TypeHelper.BlrAlign(length, 8); length += 8; break; case IscCodes.SQL_FLOAT: blr.WriteByte(IscCodes.blr_float); length = TypeHelper.BlrAlign(length, 4); length += 4; break; case IscCodes.SQL_D_FLOAT: blr.WriteByte(IscCodes.blr_d_float); length = TypeHelper.BlrAlign(length, 8); length += 8; break; case IscCodes.SQL_TYPE_DATE: blr.WriteByte(IscCodes.blr_sql_date); length = TypeHelper.BlrAlign(length, 4); length += 4; break; case IscCodes.SQL_TYPE_TIME: blr.WriteByte(IscCodes.blr_sql_time); length = TypeHelper.BlrAlign(length, 4); length += 4; break; case IscCodes.SQL_TIMESTAMP: blr.WriteByte(IscCodes.blr_timestamp); length = TypeHelper.BlrAlign(length, 4); length += 8; break; case IscCodes.SQL_BLOB: blr.WriteByte(IscCodes.blr_quad); blr.WriteByte(0); length = TypeHelper.BlrAlign(length, 4); length += 8; break; case IscCodes.SQL_ARRAY: blr.WriteByte(IscCodes.blr_quad); blr.WriteByte(0); length = TypeHelper.BlrAlign(length, 4); length += 8; break; case IscCodes.SQL_LONG: blr.WriteByte(IscCodes.blr_long); blr.WriteByte((byte)_fields[i].NumericScale); length = TypeHelper.BlrAlign(length, 4); length += 4; break; case IscCodes.SQL_SHORT: blr.WriteByte(IscCodes.blr_short); blr.WriteByte((byte)_fields[i].NumericScale); length = TypeHelper.BlrAlign(length, 2); length += 2; break; case IscCodes.SQL_INT64: blr.WriteByte(IscCodes.blr_int64); blr.WriteByte((byte)_fields[i].NumericScale); length = TypeHelper.BlrAlign(length, 8); length += 8; break; case IscCodes.SQL_QUAD: blr.WriteByte(IscCodes.blr_quad); blr.WriteByte((byte)_fields[i].NumericScale); length = TypeHelper.BlrAlign(length, 4); length += 8; break; case IscCodes.SQL_BOOLEAN: blr.WriteByte(IscCodes.blr_bool); length = TypeHelper.BlrAlign(length, 1); length += 1; break; case IscCodes.SQL_TIMESTAMP_TZ_EX: blr.WriteByte(IscCodes.blr_ex_timestamp_tz); length = TypeHelper.BlrAlign(length, 4); length += 12; break; case IscCodes.SQL_TIMESTAMP_TZ: blr.WriteByte(IscCodes.blr_timestamp_tz); length = TypeHelper.BlrAlign(length, 4); length += 10; break; case IscCodes.SQL_TIME_TZ: blr.WriteByte(IscCodes.blr_sql_time_tz); length = TypeHelper.BlrAlign(length, 4); length += 6; break; case IscCodes.SQL_TIME_TZ_EX: blr.WriteByte(IscCodes.blr_ex_time_tz); length = TypeHelper.BlrAlign(length, 4); length += 8; break; case IscCodes.SQL_DEC16: blr.WriteByte(IscCodes.blr_dec64); length = TypeHelper.BlrAlign(length, 8); length += 8; break; case IscCodes.SQL_DEC34: blr.WriteByte(IscCodes.blr_dec128); length = TypeHelper.BlrAlign(length, 8); length += 16; break; case IscCodes.SQL_INT128: blr.WriteByte(IscCodes.blr_int128); blr.WriteByte((byte)_fields[i].NumericScale); length = TypeHelper.BlrAlign(length, 8); length += 16; break; case IscCodes.SQL_NULL: blr.WriteByte(IscCodes.blr_text); blr.WriteByte((byte)(len & 255)); blr.WriteByte((byte)(len >> 8)); // no align length += len; break; } blr.WriteByte(IscCodes.blr_short); blr.WriteByte(0); length = TypeHelper.BlrAlign(length, 2); length += 2; } blr.WriteByte(IscCodes.blr_end); blr.WriteByte(IscCodes.blr_eoc); return new BlrData(blr.ToArray(), length); } } #endregion } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/EmptyDescriptorFiller.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Threading; using System.Threading.Tasks; namespace FirebirdSql.Data.Common; internal sealed class EmptyDescriptorFiller : IDescriptorFiller { public static readonly EmptyDescriptorFiller Instance = new EmptyDescriptorFiller(); private EmptyDescriptorFiller() { } public void Fill(Descriptor descriptor, int index) { } public ValueTask FillAsync(Descriptor descriptor, int index, CancellationToken cancellationToken = default) { return ValueTask.CompletedTask; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/EventParameterBuffer.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System.Text; namespace FirebirdSql.Data.Common; internal sealed class EventParameterBuffer : ParameterBuffer { public EventParameterBuffer(Encoding encoding) { Encoding = encoding; } public void Append(byte[] content, int actualCount) { WriteByte(content.Length); Write(content); Write(actualCount); } public void Append(string content, int actualCount) { Append(Encoding.GetBytes(content), actualCount); } public Encoding Encoding { get; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/ExplicitCancellation.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Threading; namespace FirebirdSql.Data.Common; internal static class ExplicitCancellation { public static ExplicitCancel Enter(CancellationToken cancellationToken, Action explicitCancel) { if (cancellationToken.IsCancellationRequested) { explicitCancel(); throw new OperationCanceledException(cancellationToken); } var ctr = cancellationToken.Register(explicitCancel); return new ExplicitCancel(ctr); } [MethodImpl(MethodImplOptions.AggressiveInlining)] static void ExitExplicitCancel(CancellationTokenRegistration cancellationTokenRegistration) { cancellationTokenRegistration.Dispose(); } [StructLayout(LayoutKind.Auto)] internal readonly struct ExplicitCancel : IDisposable { readonly CancellationTokenRegistration _cancellationTokenRegistration; public ExplicitCancel(CancellationTokenRegistration cancellationTokenRegistration) { _cancellationTokenRegistration = cancellationTokenRegistration; } public void Dispose() { ExitExplicitCancel(_cancellationTokenRegistration); } public readonly CancellationToken CancellationToken => CancellationToken.None; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/Extensions.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Linq; using System.Text; namespace FirebirdSql.Data.Common; internal static class Extensions { extension(IntPtr ptr) { public int AsInt() { return (int)ptr.ToInt64(); } } extension(BinaryReader binaryReader) { public IntPtr ReadIntPtr() { if (IntPtr.Size == sizeof(int)) { return new IntPtr(binaryReader.ReadInt32()); } else if (IntPtr.Size == sizeof(long)) { return new IntPtr(binaryReader.ReadInt64()); } else { throw new NotSupportedException(); } } } extension(byte[] b) { public string ToHexString() { return Convert.ToHexString(b); } } extension(T[] array) { public IEnumerable> Split(int size) { for (var i = 0; i < (float)array.Length / size; i++) { yield return array.Skip(i * size).Take(size); } } } extension(string s) { public IEnumerable EnumerateRunesToChars() { if (s == null) throw new ArgumentNullException(nameof(s)); return s.EnumerateRunes().Select(r => { var result = new char[r.Utf16SequenceLength]; r.EncodeToUtf16(result); return result; }); } } extension(Encoding) { public static Encoding GetANSIEncoding() { try { return Encoding.GetEncoding(CultureInfo.CurrentCulture.TextInfo.ANSICodePage); } catch (Exception) { return Encoding.Default; } } } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/IDescriptorFiller.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Threading; using System.Threading.Tasks; namespace FirebirdSql.Data.Common; internal interface IDescriptorFiller { void Fill(Descriptor descriptor, int index); ValueTask FillAsync(Descriptor descriptor, int index, CancellationToken cancellationToken = default); } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/InfoValuesConverter.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; namespace FirebirdSql.Data.Common; internal static class InfoValuesHelper { public static T ConvertValue(object value) => value is IConvertible ? (T)Convert.ChangeType(value, typeof(T)) : (T)value; } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/Int128Helper.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System; using System.Diagnostics; using System.Numerics; namespace FirebirdSql.Data.Common; internal static class Int128Helper { public static BigInteger GetInt128(byte[] value) { Debug.Assert(value.Length == 16); if (!BitConverter.IsLittleEndian) { Array.Reverse(value); } return new BigInteger(value); } public static byte[] GetBytes(BigInteger value) { var result = value.ToByteArray(); if (result.Length > 16) { throw new ArgumentOutOfRangeException("Value too big for Int128."); } if (result.Length < 16) { var padding = value.Sign == -1 ? (byte)255 : (byte)0; var tmp = new byte[16] { padding, padding, padding, padding, padding, padding, padding, padding, padding, padding, padding, padding, padding, padding, padding, padding }; Buffer.BlockCopy(result, 0, tmp, 0, result.Length); result = tmp; } if (!BitConverter.IsLittleEndian) { Array.Reverse(result); } return result; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/IscCodes.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) // This file was originally ported from Jaybird using System; namespace FirebirdSql.Data.Common; internal static class IscCodes { #region General public const int SQLDA_VERSION1 = 1; public const int SQL_DIALECT_V5 = 1; public const int SQL_DIALECT_V6_TRANSITION = 2; public const int SQL_DIALECT_V6 = 3; public const int SQL_DIALECT_CURRENT = SQL_DIALECT_V6; public const int DSQL_close = 1; public const int DSQL_drop = 2; public const int ARRAY_DESC_COLUMN_MAJOR = 1; /* Set for FORTRAN */ public const int ISC_STATUS_LENGTH = 20; public const ushort INVALID_OBJECT = 0xFFFF; #endregion #region Buffer sizes public const int BUFFER_SIZE_128 = 128; public const int BUFFER_SIZE_256 = 256; public const int BUFFER_SIZE_32K = 32768; public const int DEFAULT_MAX_BUFFER_SIZE = 8192; public const int ROWS_AFFECTED_BUFFER_SIZE = 34; public const int STATEMENT_TYPE_BUFFER_SIZE = 8; public const int PREPARE_INFO_BUFFER_SIZE = 32768; #endregion #region Protocol Codes public const int GenericAchitectureClient = 1; public const int CONNECT_VERSION2 = 2; public const int CONNECT_VERSION3 = 3; public const int PROTOCOL_VERSION3 = 3; public const int PROTOCOL_VERSION4 = 4; public const int PROTOCOL_VERSION5 = 5; public const int PROTOCOL_VERSION6 = 6; public const int PROTOCOL_VERSION7 = 7; public const int PROTOCOL_VERSION8 = 8; public const int PROTOCOL_VERSION9 = 9; public const int PROTOCOL_VERSION10 = 10; public const int FB_PROTOCOL_FLAG = 0x8000; public const int FB_PROTOCOL_MASK = ~FB_PROTOCOL_FLAG; public const int PROTOCOL_VERSION11 = FB_PROTOCOL_FLAG | 11; public const int PROTOCOL_VERSION12 = FB_PROTOCOL_FLAG | 12; public const int PROTOCOL_VERSION13 = FB_PROTOCOL_FLAG | 13; public const int PROTOCOL_VERSION15 = FB_PROTOCOL_FLAG | 15; public const int PROTOCOL_VERSION16 = FB_PROTOCOL_FLAG | 16; public const int p_cnct_min_type = 0; public const int ptype_rpc = 2; public const int ptype_batch_send = 3; public const int ptype_out_of_band = 4; public const int ptype_lazy_send = 5; public const int pflag_compress = 0x100; public const int WIRE_CRYPT_DISABLED = 0; public const int WIRE_CRYPT_ENABLED = 1; public const int WIRE_CRYPT_REQUIRED = 2; #endregion #region Statement Flags public const int STMT_DEFER_EXECUTE = 4; #endregion #region Server Class public const int isc_info_db_class_classic_access = 13; public const int isc_info_db_class_server_access = 14; #endregion #region Operation Codes // Operation (packet) types public const int op_void = 0; // Packet has been voided public const int op_connect = 1; // Connect to remote server public const int op_exit = 2; // Remote end has exitted public const int op_accept = 3; // Server accepts connection public const int op_reject = 4; // Server rejects connection public const int op_protocol = 5; // Protocol selection public const int op_disconnect = 6; // Connect is going away public const int op_credit = 7; // Grant (buffer) credits public const int op_continuation = 8; // Continuation packet public const int op_response = 9; // Generic response block // Page server operations public const int op_open_file = 10; // Open file for page service public const int op_create_file = 11; // Create file for page service public const int op_close_file = 12; // Close file for page service public const int op_read_page = 13; // optionally lock and read page public const int op_write_page = 14; // write page and optionally release lock public const int op_lock = 15; // sieze lock public const int op_convert_lock = 16; // convert existing lock public const int op_release_lock = 17; // release existing lock public const int op_blocking = 18; // blocking lock message // Full context server operations public const int op_attach = 19; // Attach database public const int op_create = 20; // Create database public const int op_detach = 21; // Detach database public const int op_compile = 22; // Request based operations public const int op_start = 23; public const int op_start_and_send = 24; public const int op_send = 25; public const int op_receive = 26; public const int op_unwind = 27; public const int op_release = 28; public const int op_transaction = 29; // Transaction operations public const int op_commit = 30; public const int op_rollback = 31; public const int op_prepare = 32; public const int op_reconnect = 33; public const int op_create_blob = 34; // Blob operations // public const int op_open_blob = 35; public const int op_get_segment = 36; public const int op_put_segment = 37; public const int op_cancel_blob = 38; public const int op_close_blob = 39; public const int op_info_database = 40; // Information services public const int op_info_request = 41; public const int op_info_transaction = 42; public const int op_info_blob = 43; public const int op_batch_segments = 44; // Put a bunch of blob segments public const int op_mgr_set_affinity = 45; // Establish server affinity public const int op_mgr_clear_affinity = 46; // Break server affinity public const int op_mgr_report = 47; // Report on server public const int op_que_events = 48; // Que event notification request public const int op_cancel_events = 49; // Cancel event notification request public const int op_commit_retaining = 50; // Commit retaining (what else) public const int op_prepare2 = 51; // Message form of prepare public const int op_event = 52; // Completed event request (asynchronous) public const int op_connect_request = 53; // Request to establish connection public const int op_aux_connect = 54; // Establish auxiliary connection public const int op_ddl = 55; // DDL call public const int op_open_blob2 = 56; public const int op_create_blob2 = 57; public const int op_get_slice = 58; public const int op_put_slice = 59; public const int op_slice = 60; // Successful response to public const int op_get_slice public const int op_seek_blob = 61; // Blob seek operation // DSQL operations // public const int op_allocate_statement = 62; // allocate a statment handle public const int op_execute = 63; // execute a prepared statement public const int op_exec_immediate = 64; // execute a statement public const int op_fetch = 65; // fetch a record public const int op_fetch_response = 66; // response for record fetch public const int op_free_statement = 67; // free a statement public const int op_prepare_statement = 68; // prepare a statement public const int op_set_cursor = 69; // set a cursor name public const int op_info_sql = 70; public const int op_dummy = 71; // dummy packet to detect loss of client public const int op_response_piggyback = 72; // response block for piggybacked messages public const int op_start_and_receive = 73; public const int op_start_send_and_receive = 74; public const int op_exec_immediate2 = 75; // execute an immediate statement with msgs public const int op_execute2 = 76; // execute a statement with msgs public const int op_insert = 77; public const int op_sql_response = 78; // response from execute; exec immed; insert public const int op_transact = 79; public const int op_transact_response = 80; public const int op_drop_database = 81; public const int op_service_attach = 82; public const int op_service_detach = 83; public const int op_service_info = 84; public const int op_service_start = 85; public const int op_rollback_retaining = 86; // Two following opcode are used in vulcan. // No plans to implement them completely for a while, but to // support protocol 11, where they are used, have them here. public const int op_update_account_info = 87; public const int op_authenticate_user = 88; public const int op_partial = 89; // packet is not complete - delay processing public const int op_trusted_auth = 90; public const int op_cancel = 91; public const int op_cont_auth = 92; public const int op_ping = 93; public const int op_accept_data = 94; public const int op_abort_aux_connection = 95; public const int op_crypt = 96; public const int op_crypt_key_callback = 97; public const int op_cond_accept = 98; public const int op_batch_create = 99; public const int op_batch_msg = 100; public const int op_batch_exec = 101; public const int op_batch_rls = 102; public const int op_batch_cs = 103; public const int op_batch_regblob = 104; public const int op_batch_blob_stream = 105; public const int op_batch_set_bpb = 106; public const int op_repl_data = 107; public const int op_repl_req = 108; public const int op_batch_cancel = 109; #endregion #region Database Parameter Block public const int isc_dpb_version1 = 1; public const int isc_dpb_version2 = 2; public const int isc_dpb_cdd_pathname = 1; public const int isc_dpb_allocation = 2; public const int isc_dpb_journal = 3; public const int isc_dpb_page_size = 4; public const int isc_dpb_num_buffers = 5; public const int isc_dpb_buffer_length = 6; public const int isc_dpb_debug = 7; public const int isc_dpb_garbage_collect = 8; public const int isc_dpb_verify = 9; public const int isc_dpb_sweep = 10; public const int isc_dpb_enable_journal = 11; public const int isc_dpb_disable_journal = 12; public const int isc_dpb_dbkey_scope = 13; public const int isc_dpb_number_of_users = 14; public const int isc_dpb_trace = 15; public const int isc_dpb_no_garbage_collect = 16; public const int isc_dpb_damaged = 17; public const int isc_dpb_license = 18; public const int isc_dpb_sys_user_name = 19; public const int isc_dpb_encrypt_key = 20; public const int isc_dpb_activate_shadow = 21; public const int isc_dpb_sweep_interval = 22; public const int isc_dpb_delete_shadow = 23; public const int isc_dpb_force_write = 24; public const int isc_dpb_begin_log = 25; public const int isc_dpb_quit_log = 26; public const int isc_dpb_no_reserve = 27; public const int isc_dpb_user_name = 28; public const int isc_dpb_password = 29; public const int isc_dpb_password_enc = 30; public const int isc_dpb_sys_user_name_enc = 31; public const int isc_dpb_interp = 32; public const int isc_dpb_online_dump = 33; public const int isc_dpb_old_file_size = 34; public const int isc_dpb_old_num_files = 35; public const int isc_dpb_old_file = 36; public const int isc_dpb_old_start_page = 37; public const int isc_dpb_old_start_seqno = 38; public const int isc_dpb_old_start_file = 39; public const int isc_dpb_drop_walfile = 40; public const int isc_dpb_old_dump_id = 41; public const int isc_dpb_wal_backup_dir = 42; public const int isc_dpb_wal_chkptlen = 43; public const int isc_dpb_wal_numbufs = 44; public const int isc_dpb_wal_bufsize = 45; public const int isc_dpb_wal_grp_cmt_wait = 46; public const int isc_dpb_lc_messages = 47; public const int isc_dpb_lc_ctype = 48; public const int isc_dpb_cache_manager = 49; public const int isc_dpb_shutdown = 50; public const int isc_dpb_online = 51; public const int isc_dpb_shutdown_delay = 52; public const int isc_dpb_reserved = 53; public const int isc_dpb_overwrite = 54; public const int isc_dpb_sec_attach = 55; public const int isc_dpb_disable_wal = 56; public const int isc_dpb_connect_timeout = 57; public const int isc_dpb_dummy_packet_interval = 58; public const int isc_dpb_gbak_attach = 59; public const int isc_dpb_sql_role_name = 60; public const int isc_dpb_set_page_buffers = 61; public const int isc_dpb_working_directory = 62; public const int isc_dpb_sql_dialect = 63; public const int isc_dpb_set_db_readonly = 64; public const int isc_dpb_set_db_sql_dialect = 65; public const int isc_dpb_gfix_attach = 66; public const int isc_dpb_gstat_attach = 67; public const int isc_dpb_set_db_charset = 68; public const int isc_dpb_gsec_attach = 69; public const int isc_dpb_address_path = 70; public const int isc_dpb_process_id = 71; public const int isc_dpb_no_db_triggers = 72; public const int isc_dpb_trusted_auth = 73; public const int isc_dpb_process_name = 74; public const int isc_dpb_trusted_role = 75; public const int isc_dpb_org_filename = 76; public const int isc_dpb_utf8_filename = 77; public const int isc_dpb_ext_call_depth = 78; public const int isc_dpb_auth_block = 79; public const int isc_dpb_client_version = 80; public const int isc_dpb_remote_protocol = 81; public const int isc_dpb_host_name = 82; public const int isc_dpb_os_user = 83; public const int isc_dpb_specific_auth_data = 84; public const int isc_dpb_auth_plugin_list = 85; public const int isc_dpb_auth_plugin_name = 86; public const int isc_dpb_config = 87; public const int isc_dpb_nolinger = 88; public const int isc_dpb_reset_icu = 89; public const int isc_dpb_map_attach = 90; public const int isc_dpb_session_time_zone = 91; public const int isc_dpb_set_db_replica = 92; public const int isc_dpb_set_bind = 93; public const int isc_dpb_decfloat_round = 94; public const int isc_dpb_decfloat_traps = 95; public const int isc_dpb_clear_map = 96; public const int isc_dpb_parallel_workers = 100; public const int isc_dpb_worker_attach = 101; #endregion #region Transaction Parameter Block public const int isc_tpb_version1 = 1; public const int isc_tpb_version3 = 3; public const int isc_tpb_consistency = 1; public const int isc_tpb_concurrency = 2; public const int isc_tpb_shared = 3; public const int isc_tpb_protected = 4; public const int isc_tpb_exclusive = 5; public const int isc_tpb_wait = 6; public const int isc_tpb_nowait = 7; public const int isc_tpb_read = 8; public const int isc_tpb_write = 9; public const int isc_tpb_lock_read = 10; public const int isc_tpb_lock_write = 11; public const int isc_tpb_verb_time = 12; public const int isc_tpb_commit_time = 13; public const int isc_tpb_ignore_limbo = 14; public const int isc_tpb_read_committed = 15; public const int isc_tpb_autocommit = 16; public const int isc_tpb_rec_version = 17; public const int isc_tpb_no_rec_version = 18; public const int isc_tpb_restart_requests = 19; public const int isc_tpb_no_auto_undo = 20; public const int isc_tpb_lock_timeout = 21; public const int isc_tpb_read_consistency = 22; public const int isc_tpb_at_snapshot_number = 23; #endregion #region Services Parameter Block public const int isc_spb_version1 = 1; public const int isc_spb_current_version = 2; public const int isc_spb_version = isc_spb_current_version; public const int isc_spb_version3 = 3; public const int isc_spb_user_name = isc_dpb_user_name; public const int isc_spb_sys_user_name = isc_dpb_sys_user_name; public const int isc_spb_sys_user_name_enc = isc_dpb_sys_user_name_enc; public const int isc_spb_password = isc_dpb_password; public const int isc_spb_password_enc = isc_dpb_password_enc; public const int isc_spb_command_line = 105; public const int isc_spb_dbname = 106; public const int isc_spb_verbose = 107; public const int isc_spb_options = 108; public const int isc_spb_address_path = 109; public const int isc_spb_process_id = 110; public const int isc_spb_trusted_auth = 111; public const int isc_spb_process_name = 112; public const int isc_spb_trusted_role = 113; public const int isc_spb_verbint = 114; public const int isc_spb_auth_block = 115; public const int isc_spb_auth_plugin_name = 116; public const int isc_spb_auth_plugin_list = 117; public const int isc_spb_utf8_filename = 118; public const int isc_spb_client_version = 119; public const int isc_spb_remote_protocol = 120; public const int isc_spb_host_name = 121; public const int isc_spb_os_user = 122; public const int isc_spb_config = 123; public const int isc_spb_expected_db = 124; public const int isc_spb_connect_timeout = isc_dpb_connect_timeout; public const int isc_spb_dummy_packet_interval = isc_dpb_dummy_packet_interval; public const int isc_spb_sql_role_name = isc_dpb_sql_role_name; public const int isc_spb_specific_auth_data = isc_spb_trusted_auth; public const int isc_spb_num_att = 5; public const int isc_spb_num_db = 6; #endregion #region Services Actions public const int isc_action_svc_backup = 1; /* Starts database backup process on the server */ public const int isc_action_svc_restore = 2; /* Starts database restore process on the server */ public const int isc_action_svc_repair = 3; /* Starts database repair process on the server */ public const int isc_action_svc_add_user = 4; /* Adds a new user to the security database */ public const int isc_action_svc_delete_user = 5; /* Deletes a user record from the security database */ public const int isc_action_svc_modify_user = 6; /* Modifies a user record in the security database */ public const int isc_action_svc_display_user = 7; /* Displays a user record from the security database */ public const int isc_action_svc_properties = 8; /* Sets database properties */ public const int isc_action_svc_add_license = 9; /* Adds a license to the license file */ public const int isc_action_svc_remove_license = 10; /* Removes a license from the license file */ public const int isc_action_svc_db_stats = 11; /* Retrieves database statistics */ public const int isc_action_svc_get_ib_log = 12; /* Retrieves the InterBase log file from the server */ public const int isc_action_svc_get_fb_log = 12; /* Retrieves the Firebird log file from the server */ public const int isc_action_svc_nbak = 20; /* Incremental nbackup */ public const int isc_action_svc_nrest = 21; /* Incremental database restore */ public const int isc_action_svc_trace_start = 22; // Start trace session public const int isc_action_svc_trace_stop = 23; // Stop trace session public const int isc_action_svc_trace_suspend = 24; // Suspend trace session public const int isc_action_svc_trace_resume = 25; // Resume trace session public const int isc_action_svc_trace_list = 26; // List existing sessions public const int isc_action_svc_set_mapping = 27; // Set auto admins mapping in security database public const int isc_action_svc_drop_mapping = 28; // Drop auto admins mapping in security database public const int isc_action_svc_display_user_adm = 29; // Displays user(s) from security database with admin info public const int isc_action_svc_validate = 30; // Starts database online validation public const int isc_action_svc_nfix = 31; // Fixup database after file system copy #endregion #region Services Information public const int isc_info_svc_svr_db_info = 50; /* Retrieves the number of attachments and databases */ public const int isc_info_svc_get_license = 51; /* Retrieves all license keys and IDs from the license file */ public const int isc_info_svc_get_license_mask = 52; /* Retrieves a bitmask representing licensed options on the server */ public const int isc_info_svc_get_config = 53; /* Retrieves the parameters and values for IB_CONFIG */ public const int isc_info_svc_version = 54; /* Retrieves the version of the services manager */ public const int isc_info_svc_server_version = 55; /* Retrieves the version of the InterBase server */ public const int isc_info_svc_implementation = 56; /* Retrieves the implementation of the InterBase server */ public const int isc_info_svc_capabilities = 57; /* Retrieves a bitmask representing the server's capabilities */ public const int isc_info_svc_user_dbpath = 58; /* Retrieves the path to the security database in use by the server */ public const int isc_info_svc_get_env = 59; /* Retrieves the setting of $INTERBASE */ public const int isc_info_svc_get_env_lock = 60; /* Retrieves the setting of $INTERBASE_LCK */ public const int isc_info_svc_get_env_msg = 61; /* Retrieves the setting of $INTERBASE_MSG */ public const int isc_info_svc_line = 62; /* Retrieves 1 line of service output per call */ public const int isc_info_svc_to_eof = 63; /* Retrieves as much of the server output as will fit in the supplied buffer */ public const int isc_info_svc_timeout = 64; /* Sets / signifies a timeout value for reading service information */ public const int isc_info_svc_get_licensed_users = 65; /* Retrieves the number of users licensed for accessing the server */ public const int isc_info_svc_limbo_trans = 66; /* Retrieve the limbo transactions */ public const int isc_info_svc_running = 67; /* Checks to see if a service is running on an attachment */ public const int isc_info_svc_get_users = 68; /* Returns the user information from isc_action_svc_display_users */ public const int isc_info_svc_stdin = 78; /* Returns size of data, needed as stdin for service */ #endregion #region Services Properties public const int isc_spb_prp_page_buffers = 5; public const int isc_spb_prp_sweep_interval = 6; public const int isc_spb_prp_shutdown_db = 7; public const int isc_spb_prp_deny_new_attachments = 9; public const int isc_spb_prp_deny_new_transactions = 10; public const int isc_spb_prp_reserve_space = 11; public const int isc_spb_prp_write_mode = 12; public const int isc_spb_prp_access_mode = 13; public const int isc_spb_prp_set_sql_dialect = 14; public const int isc_spb_prp_force_shutdown = 41; public const int isc_spb_prp_attachments_shutdown = 42; public const int isc_spb_prp_transactions_shutdown = 43; public const int isc_spb_prp_shutdown_mode = 44; public const int isc_spb_prp_online_mode = 45; public const int isc_spb_prp_sm_normal = 0; public const int isc_spb_prp_sm_multi = 1; public const int isc_spb_prp_sm_single = 2; public const int isc_spb_prp_sm_full = 3; // RESERVE_SPACE_PARAMETERS public const int isc_spb_prp_res_use_full = 35; public const int isc_spb_prp_res = 36; // WRITE_MODE_PARAMETERS public const int isc_spb_prp_wm_async = 37; public const int isc_spb_prp_wm_sync = 38; // ACCESS_MODE_PARAMETERS public const int isc_spb_prp_am_readonly = 39; public const int isc_spb_prp_am_readwrite = 40; // Option Flags public const int isc_spb_prp_activate = 0x0100; public const int isc_spb_prp_db_online = 0x0200; public const int isc_spb_prp_nolinger = 0x0400; #endregion #region Backup Service public const int isc_spb_bkp_file = 5; public const int isc_spb_bkp_factor = 6; public const int isc_spb_bkp_length = 7; public const int isc_spb_bkp_skip_data = 8; public const int isc_spb_bkp_stat = 15; public const int isc_spb_bkp_keyholder = 16; public const int isc_spb_bkp_keyname = 17; public const int isc_spb_bkp_crypt = 18; public const int isc_spb_bkp_include_data = 19; public const int isc_spb_bkp_parallel_workers = 21; public const int isc_spb_bkp_ignore_checksums = 0x01; public const int isc_spb_bkp_ignore_limbo = 0x02; public const int isc_spb_bkp_metadata_only = 0x04; public const int isc_spb_bkp_no_garbage_collect = 0x08; public const int isc_spb_bkp_old_descriptions = 0x10; public const int isc_spb_bkp_non_transportable = 0x20; public const int isc_spb_bkp_convert = 0x40; public const int isc_spb_bkp_expand = 0x80; public const int isc_spb_bkp_no_triggers = 0x8000; public const int isc_spb_bkp_zip = 0x010000; public const int isc_spb_bkp_direct_io = 0x020000; #endregion #region Restore Service public const int isc_spb_res_skip_data = isc_spb_bkp_skip_data; public const int isc_spb_res_include_data = isc_spb_bkp_include_data; public const int isc_spb_res_buffers = 9; public const int isc_spb_res_page_size = 10; public const int isc_spb_res_length = 11; public const int isc_spb_res_access_mode = 12; public const int isc_spb_res_fix_fss_data = 13; public const int isc_spb_res_fix_fss_metadata = 14; public const int isc_spb_res_keyholder = isc_spb_bkp_keyholder; public const int isc_spb_res_keyname = isc_spb_bkp_keyname; public const int isc_spb_res_crypt = isc_spb_bkp_crypt; public const int isc_spb_res_stat = isc_spb_bkp_stat; public const int isc_spb_res_parallel_workers = isc_spb_bkp_parallel_workers; public const int isc_spb_res_metadata_only = isc_spb_bkp_metadata_only; public const int isc_spb_res_deactivate_idx = 0x0100; public const int isc_spb_res_no_shadow = 0x0200; public const int isc_spb_res_no_validity = 0x0400; public const int isc_spb_res_one_at_a_time = 0x0800; public const int isc_spb_res_replace = 0x1000; public const int isc_spb_res_create = 0x2000; public const int isc_spb_res_use_all_space = 0x4000; public const int isc_spb_res_direct_io = isc_spb_bkp_direct_io; public const int isc_spb_res_replica_mode = 20; public const int isc_spb_res_am_readonly = isc_spb_prp_am_readonly; public const int isc_spb_res_am_readwrite = isc_spb_prp_am_readwrite; #endregion #region Validate Service public const int isc_spb_val_tab_incl = 1; // include filter based on regular expression public const int isc_spb_val_tab_excl = 2; // exclude filter based on regular expression public const int isc_spb_val_idx_incl = 3; // regexp of indices to validate public const int isc_spb_val_idx_excl = 4; // regexp of indices to NOT validate public const int isc_spb_val_lock_timeout = 5; // how long to wait for table lock #endregion #region Repair Service public const int isc_spb_rpr_commit_trans = 15; public const int isc_spb_rpr_rollback_trans = 34; public const int isc_spb_rpr_recover_two_phase = 17; public const int isc_spb_tra_id = 18; public const int isc_spb_single_tra_id = 19; public const int isc_spb_multi_tra_id = 20; public const int isc_spb_tra_state = 21; public const int isc_spb_tra_state_limbo = 22; public const int isc_spb_tra_state_commit = 23; public const int isc_spb_tra_state_rollback = 24; public const int isc_spb_tra_state_unknown = 25; public const int isc_spb_tra_host_site = 26; public const int isc_spb_tra_remote_site = 27; public const int isc_spb_tra_db_path = 28; public const int isc_spb_tra_advise = 29; public const int isc_spb_tra_advise_commit = 30; public const int isc_spb_tra_advise_rollback = 31; public const int isc_spb_tra_advise_unknown = 33; public const int isc_spb_tra_id_64 = 46; public const int isc_spb_single_tra_id_64 = 47; public const int isc_spb_multi_tra_id_64 = 48; public const int isc_spb_rpr_commit_trans_64 = 49; public const int isc_spb_rpr_rollback_trans_64 = 50; public const int isc_spb_rpr_recover_two_phase_64 = 51; public const int isc_spb_rpr_par_workers = 52; public const int isc_spb_rpr_validate_db = 0x01; public const int isc_spb_rpr_sweep_db = 0x02; public const int isc_spb_rpr_mend_db = 0x04; public const int isc_spb_rpr_list_limbo_trans = 0x08; public const int isc_spb_rpr_check_db = 0x10; public const int isc_spb_rpr_ignore_checksum = 0x20; public const int isc_spb_rpr_kill_shadows = 0x40; public const int isc_spb_rpr_full = 0x80; public const int isc_spb_rpr_icu = 0x0800; #endregion #region Security Service public const int isc_spb_sec_userid = 5; public const int isc_spb_sec_groupid = 6; public const int isc_spb_sec_username = 7; public const int isc_spb_sec_password = 8; public const int isc_spb_sec_groupname = 9; public const int isc_spb_sec_firstname = 10; public const int isc_spb_sec_middlename = 11; public const int isc_spb_sec_lastname = 12; #endregion #region NBackup Service public const int isc_spb_nbk_level = 5; public const int isc_spb_nbk_file = 6; public const int isc_spb_nbk_direct = 7; public const int isc_spb_nbk_no_triggers = 0x01; #endregion #region Trace Service public const int isc_spb_trc_id = 1; public const int isc_spb_trc_name = 2; public const int isc_spb_trc_cfg = 3; #endregion #region Configuration Keys public const int ISCCFG_LOCKMEM_KEY = 0; public const int ISCCFG_LOCKSEM_KEY = 1; public const int ISCCFG_LOCKSIG_KEY = 2; public const int ISCCFG_EVNTMEM_KEY = 3; public const int ISCCFG_DBCACHE_KEY = 4; public const int ISCCFG_PRIORITY_KEY = 5; public const int ISCCFG_IPCMAP_KEY = 6; public const int ISCCFG_MEMMIN_KEY = 7; public const int ISCCFG_MEMMAX_KEY = 8; public const int ISCCFG_LOCKORDER_KEY = 9; public const int ISCCFG_ANYLOCKMEM_KEY = 10; public const int ISCCFG_ANYLOCKSEM_KEY = 11; public const int ISCCFG_ANYLOCKSIG_KEY = 12; public const int ISCCFG_ANYEVNTMEM_KEY = 13; public const int ISCCFG_LOCKHASH_KEY = 14; public const int ISCCFG_DEADLOCK_KEY = 15; public const int ISCCFG_LOCKSPIN_KEY = 16; public const int ISCCFG_CONN_TIMEOUT_KEY = 17; public const int ISCCFG_DUMMY_INTRVL_KEY = 18; public const int ISCCFG_TRACE_POOLS_KEY = 19; /* Internal Use only */ public const int ISCCFG_REMOTE_BUFFER_KEY = 20; #endregion #region Common Structural Codes public const int isc_info_end = 1; public const int isc_info_truncated = 2; public const int isc_info_error = 3; public const int isc_info_data_not_ready = 4; public const int isc_info_flag_end = 127; #endregion #region SQL Information public const int isc_info_sql_select = 4; public const int isc_info_sql_bind = 5; public const int isc_info_sql_num_variables = 6; public const int isc_info_sql_describe_vars = 7; public const int isc_info_sql_describe_end = 8; public const int isc_info_sql_sqlda_seq = 9; public const int isc_info_sql_message_seq = 10; public const int isc_info_sql_type = 11; public const int isc_info_sql_sub_type = 12; public const int isc_info_sql_scale = 13; public const int isc_info_sql_length = 14; public const int isc_info_sql_null_ind = 15; public const int isc_info_sql_field = 16; public const int isc_info_sql_relation = 17; public const int isc_info_sql_owner = 18; public const int isc_info_sql_alias = 19; public const int isc_info_sql_sqlda_start = 20; public const int isc_info_sql_stmt_type = 21; public const int isc_info_sql_get_plan = 22; public const int isc_info_sql_records = 23; public const int isc_info_sql_batch_fetch = 24; public const int isc_info_sql_relation_alias = 25; public const int isc_info_sql_explain_plan = 26; public const int isc_info_sql_stmt_flags = 27; #endregion #region SQL Information Return Values public const int isc_info_sql_stmt_select = 1; public const int isc_info_sql_stmt_insert = 2; public const int isc_info_sql_stmt_update = 3; public const int isc_info_sql_stmt_delete = 4; public const int isc_info_sql_stmt_ddl = 5; public const int isc_info_sql_stmt_get_segment = 6; public const int isc_info_sql_stmt_put_segment = 7; public const int isc_info_sql_stmt_exec_procedure = 8; public const int isc_info_sql_stmt_start_trans = 9; public const int isc_info_sql_stmt_commit = 10; public const int isc_info_sql_stmt_rollback = 11; public const int isc_info_sql_stmt_select_for_upd = 12; public const int isc_info_sql_stmt_set_generator = 13; public const int isc_info_sql_stmt_savepoint = 14; #endregion #region Database Information public const int isc_info_db_id = 4; public const int isc_info_reads = 5; public const int isc_info_writes = 6; public const int isc_info_fetches = 7; public const int isc_info_marks = 8; public const int isc_info_implementation = 11; public const int isc_info_isc_version = 12; public const int isc_info_base_level = 13; public const int isc_info_page_size = 14; public const int isc_info_num_buffers = 15; public const int isc_info_limbo = 16; public const int isc_info_current_memory = 17; public const int isc_info_max_memory = 18; public const int isc_info_window_turns = 19; public const int isc_info_license = 20; public const int isc_info_allocation = 21; public const int isc_info_attachment_id = 22; public const int isc_info_read_seq_count = 23; public const int isc_info_read_idx_count = 24; public const int isc_info_insert_count = 25; public const int isc_info_update_count = 26; public const int isc_info_delete_count = 27; public const int isc_info_backout_count = 28; public const int isc_info_purge_count = 29; public const int isc_info_expunge_count = 30; public const int isc_info_sweep_interval = 31; public const int isc_info_ods_version = 32; public const int isc_info_ods_minor_version = 33; public const int isc_info_no_reserve = 34; public const int isc_info_logfile = 35; public const int isc_info_cur_logfile_name = 36; public const int isc_info_cur_log_part_offset = 37; public const int isc_info_num_wal_buffers = 38; public const int isc_info_wal_buffer_size = 39; public const int isc_info_wal_ckpt_length = 40; public const int isc_info_wal_cur_ckpt_interval = 41; public const int isc_info_wal_prv_ckpt_fname = 42; public const int isc_info_wal_prv_ckpt_poffset = 43; public const int isc_info_wal_recv_ckpt_fname = 44; public const int isc_info_wal_recv_ckpt_poffset = 45; public const int isc_info_wal_grpc_wait_usecs = 47; public const int isc_info_wal_num_io = 48; public const int isc_info_wal_avg_io_size = 49; public const int isc_info_wal_num_commits = 50; public const int isc_info_wal_avg_grpc_size = 51; public const int isc_info_forced_writes = 52; public const int isc_info_user_names = 53; public const int isc_info_page_errors = 54; public const int isc_info_record_errors = 55; public const int isc_info_bpage_errors = 56; public const int isc_info_dpage_errors = 57; public const int isc_info_ipage_errors = 58; public const int isc_info_ppage_errors = 59; public const int isc_info_tpage_errors = 60; public const int isc_info_set_page_buffers = 61; public const int isc_info_db_sql_dialect = 62; public const int isc_info_db_read_only = 63; public const int isc_info_db_size_in_pages = 64; public const int frb_info_att_charset = 101; public const int isc_info_db_class = 102; public const int isc_info_firebird_version = 103; public const int isc_info_oldest_transaction = 104; public const int isc_info_oldest_active = 105; public const int isc_info_oldest_snapshot = 106; public const int isc_info_next_transaction = 107; public const int isc_info_db_provider = 108; public const int isc_info_active_transactions = 109; public const int isc_info_active_tran_count = 110; public const int isc_info_creation_date = 111; public const int isc_info_db_file_size = 112; public const int fb_info_page_contents = 113; public const int fb_info_implementation = 114; public const int fb_info_page_warns = 115; public const int fb_info_record_warns = 116; public const int fb_info_bpage_warns = 117; public const int fb_info_dpage_warns = 118; public const int fb_info_ipage_warns = 119; public const int fb_info_ppage_warns = 120; public const int fb_info_tpage_warns = 121; public const int fb_info_pip_errors = 122; public const int fb_info_pip_warns = 123; public const int fb_info_pages_used = 124; public const int fb_info_pages_free = 125; public const int fb_info_ses_idle_timeout_db = 129; public const int fb_info_ses_idle_timeout_att = 130; public const int fb_info_ses_idle_timeout_run = 131; public const int fb_info_conn_flags = 132; public const int fb_info_crypt_key = 133; public const int fb_info_crypt_state = 134; public const int fb_info_statement_timeout_db = 135; public const int fb_info_statement_timeout_att = 136; public const int fb_info_protocol_version = 137; public const int fb_info_crypt_plugin = 138; public const int fb_info_creation_timestamp_tz = 139; public const int fb_info_wire_crypt = 140; public const int fb_info_features = 141; public const int fb_info_next_attachment = 142; public const int fb_info_next_statement = 143; public const int fb_info_db_guid = 144; public const int fb_info_db_file_id = 145; public const int fb_info_replica_mode = 146; public const int fb_info_username = 147; public const int fb_info_sqlrole = 148; #endregion #region Information Request public const int isc_info_number_messages = 4; public const int isc_info_max_message = 5; public const int isc_info_max_send = 6; public const int isc_info_max_receive = 7; public const int isc_info_state = 8; public const int isc_info_message_number = 9; public const int isc_info_message_size = 10; public const int isc_info_request_cost = 11; public const int isc_info_access_path = 12; public const int isc_info_req_select_count = 13; public const int isc_info_req_insert_count = 14; public const int isc_info_req_update_count = 15; public const int isc_info_req_delete_count = 16; #endregion #region Array Slice Description Language public const int isc_sdl_version1 = 1; public const int isc_sdl_eoc = 255; public const int isc_sdl_relation = 2; public const int isc_sdl_rid = 3; public const int isc_sdl_field = 4; public const int isc_sdl_fid = 5; public const int isc_sdl_struct = 6; public const int isc_sdl_variable = 7; public const int isc_sdl_scalar = 8; public const int isc_sdl_tiny_integer = 9; public const int isc_sdl_short_integer = 10; public const int isc_sdl_long_integer = 11; public const int isc_sdl_literal = 12; public const int isc_sdl_add = 13; public const int isc_sdl_subtract = 14; public const int isc_sdl_multiply = 15; public const int isc_sdl_divide = 16; public const int isc_sdl_negate = 17; public const int isc_sdl_eql = 18; public const int isc_sdl_neq = 19; public const int isc_sdl_gtr = 20; public const int isc_sdl_geq = 21; public const int isc_sdl_lss = 22; public const int isc_sdl_leq = 23; public const int isc_sdl_and = 24; public const int isc_sdl_or = 25; public const int isc_sdl_not = 26; public const int isc_sdl_while = 27; public const int isc_sdl_assignment = 28; public const int isc_sdl_label = 29; public const int isc_sdl_leave = 30; public const int isc_sdl_begin = 31; public const int isc_sdl_end = 32; public const int isc_sdl_do3 = 33; public const int isc_sdl_do2 = 34; public const int isc_sdl_do1 = 35; public const int isc_sdl_element = 36; #endregion #region Blob Parameter Block public const int isc_bpb_version1 = 1; public const int isc_bpb_source_type = 1; public const int isc_bpb_target_type = 2; public const int isc_bpb_type = 3; public const int isc_bpb_source_interp = 4; public const int isc_bpb_target_interp = 5; public const int isc_bpb_filter_parameter = 6; public const int isc_bpb_type_segmented = 0; public const int isc_bpb_type_stream = 1; public const int RBL_eof = 1; public const int RBL_segment = 2; public const int RBL_eof_pending = 4; public const int RBL_create = 8; public const int isc_blb_seek_from_head = 0; public const int isc_blb_seek_relative = 1; public const int isc_blb_seek_from_tail = 2; #endregion #region Blob Information public const int isc_info_blob_num_segments = 4; public const int isc_info_blob_max_segment = 5; public const int isc_info_blob_total_length = 6; public const int isc_info_blob_type = 7; #endregion #region Event Codes public const int P_REQ_async = 1; // Auxiliary asynchronous port public const int EPB_version1 = 1; #endregion #region ISC Error codes public const int isc_facility = 20; public const int isc_err_base = 335544320; public const int isc_err_factor = 1; public const int isc_arg_end = 0; // end of argument list public const int isc_arg_gds = 1; // generic DSRI status value public const int isc_arg_string = 2; // string argument public const int isc_arg_cstring = 3; // count & string argument public const int isc_arg_number = 4; // numeric argument (long) public const int isc_arg_interpreted = 5; // interpreted status code (string) public const int isc_arg_vms = 6; // VAX/VMS status code (long) public const int isc_arg_unix = 7; // UNIX error code public const int isc_arg_domain = 8; // Apollo/Domain error code public const int isc_arg_dos = 9; // MSDOS/OS2 error code public const int isc_arg_mpexl = 10; // HP MPE/XL error code public const int isc_arg_mpexl_ipc = 11; // HP MPE/XL IPC error code public const int isc_arg_next_mach = 15; // NeXT/Mach error code public const int isc_arg_netware = 16; // NetWare error code public const int isc_arg_win32 = 17; // Win32 error code public const int isc_arg_warning = 18; // warning argument public const int isc_arg_sql_state = 19; // SQLSTATE public const int isc_open_trans = 335544357; public const int isc_segment = 335544366; public const int isc_segstr_eof = 335544367; public const int isc_connect_reject = 335544421; public const int isc_invalid_dimension = 335544458; public const int isc_tra_state = 335544468; public const int isc_except = 335544517; public const int isc_dsql_sqlda_err = 335544583; public const int isc_network_error = 335544721; public const int isc_net_read_err = 335544726; public const int isc_net_write_err = 335544727; public const int isc_stack_trace = 335544842; public const int isc_except2 = 335544848; public const int isc_arith_except = 335544321; public const int isc_string_truncation = 335544914; public const int isc_formatted_exception = 335545016; public const int isc_wirecrypt_incompatible = 335545064; public const int isc_cancelled = 335544794; public const int isc_nothing_to_cancel = 335544933; #endregion #region BLR Codes public const int blr_version5 = 5; public const int blr_begin = 2; public const int blr_message = 4; public const int blr_eoc = 76; public const int blr_end = 255; public const int blr_text = 14; public const int blr_text2 = 15; public const int blr_short = 7; public const int blr_long = 8; public const int blr_quad = 9; public const int blr_int64 = 16; public const int blr_float = 10; public const int blr_double = 27; public const int blr_d_float = 11; public const int blr_timestamp = 35; public const int blr_varying = 37; public const int blr_varying2 = 38; public const int blr_blob = 261; public const int blr_cstring = 40; public const int blr_cstring2 = 41; public const int blr_blob_id = 45; public const int blr_sql_date = 12; public const int blr_sql_time = 13; public const int blr_bool = 23; public const int blr_dec64 = 24; public const int blr_dec128 = 25; public const int blr_int128 = 26; public const int blr_sql_time_tz = 28; public const int blr_timestamp_tz = 29; public const int blr_ex_time_tz = 30; public const int blr_ex_timestamp_tz = 31; public const int blr_null = 45; #endregion #region DataType Definitions public const int SQL_TEXT = 452; public const int SQL_VARYING = 448; public const int SQL_SHORT = 500; public const int SQL_LONG = 496; public const int SQL_FLOAT = 482; public const int SQL_DOUBLE = 480; public const int SQL_D_FLOAT = 530; public const int SQL_TIMESTAMP = 510; public const int SQL_BLOB = 520; public const int SQL_ARRAY = 540; public const int SQL_QUAD = 550; public const int SQL_TYPE_TIME = 560; public const int SQL_TYPE_DATE = 570; public const int SQL_INT64 = 580; public const int SQL_TIMESTAMP_TZ_EX = 32748; public const int SQL_TIME_TZ_EX = 32750; public const int SQL_INT128 = 32752; public const int SQL_TIMESTAMP_TZ = 32754; public const int SQL_TIME_TZ = 32756; public const int SQL_DEC16 = 32760; public const int SQL_DEC34 = 32762; public const int SQL_BOOLEAN = 32764; public const int SQL_NULL = 32766; // Historical alias for pre V6 applications public const int SQL_DATE = SQL_TIMESTAMP; #endregion #region Cancel types public const int fb_cancel_disable = 1; public const int fb_cancel_enable = 2; public const int fb_cancel_raise = 3; public const int fb_cancel_abort = 4; #endregion #region User identification data public const int CNCT_user = 1; public const int CNCT_passwd = 2; public const int CNCT_host = 4; public const int CNCT_group = 5; public const int CNCT_user_verification = 6; public const int CNCT_specific_data = 7; public const int CNCT_plugin_name = 8; public const int CNCT_login = 9; public const int CNCT_plugin_list = 10; public const int CNCT_client_crypt = 11; #endregion #region Transaction information items public const int isc_info_tra_id = 4; public const int isc_info_tra_oldest_interesting = 5; public const int isc_info_tra_oldest_snapshot = 6; public const int isc_info_tra_oldest_active = 7; public const int isc_info_tra_isolation = 8; public const int isc_info_tra_access = 9; public const int isc_info_tra_lock_timeout = 10; public const int fb_info_tra_dbpath = 11; public const int fb_info_tra_snapshot_number = 12; // isc_info_tra_isolation responses public const int isc_info_tra_consistency = 1; public const int isc_info_tra_concurrency = 2; public const int isc_info_tra_read_committed = 3; // isc_info_tra_read_committed options public const int isc_info_tra_no_rec_version = 0; public const int isc_info_tra_rec_version = 1; public const int isc_info_tra_read_consistency = 2; // isc_info_tra_access responses public const int isc_info_tra_readonly = 0; public const int isc_info_tra_readwrite = 1; #endregion public static class Batch { public const int VERSION1 = 1; public const int TAG_MULTIERROR = 1; public const int TAG_RECORD_COUNTS = 2; public const int TAG_BUFFER_BYTES_SIZE = 3; public const int TAG_BLOB_POLICY = 4; public const int TAG_DETAILED_ERRORS = 5; public const int BLOB_NONE = 0; public const int BLOB_ID_ENGINE = 1; public const int BLOB_ID_USER = 2; public const int BLOB_STREAM = 3; public const int BLOB_SEGHDR_ALIGN = 2; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/IscError.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Carlos Guzman Alvarez, Jiri Cincura (jiri@cincura.net) using System; using System.Globalization; namespace FirebirdSql.Data.Common; [Serializable] internal sealed class IscError { private string _strParam; public string Message { get; set; } public int ErrorCode { get; } public int Type { get; } public string StrParam { get { switch (Type) { case IscCodes.isc_arg_interpreted: case IscCodes.isc_arg_string: case IscCodes.isc_arg_cstring: case IscCodes.isc_arg_sql_state: return _strParam; case IscCodes.isc_arg_number: return ErrorCode.ToString(CultureInfo.InvariantCulture); default: return string.Empty; } } } public bool IsArgument { get { switch (Type) { case IscCodes.isc_arg_interpreted: case IscCodes.isc_arg_string: case IscCodes.isc_arg_cstring: case IscCodes.isc_arg_number: return true; default: return false; } } } public bool IsWarning { get { return Type == IscCodes.isc_arg_warning; } } internal IscError(int errorCode) { ErrorCode = errorCode; } internal IscError(int type, string strParam) { Type = type; _strParam = strParam; } internal IscError(int type, int errorCode) { Type = type; ErrorCode = errorCode; } } ================================================ FILE: src/FirebirdSql.Data.FirebirdClient/Common/IscErrorMessages.cs ================================================ /* * The contents of this file are subject to the Initial * Developer's Public License Version 1.0 (the "License"); * you may not use this file except in compliance with the * License. You may obtain a copy of the License at * https://github.com/FirebirdSQL/NETProvider/raw/master/license.txt. * * Software distributed under the License is distributed on * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either * express or implied. See the License for the specific * language governing rights and limitations under the License. * * All Rights Reserved. */ //$Authors = Jiri Cincura (jiri@cincura.net) using System.Collections.Generic; namespace FirebirdSql.Data.Common; internal static class IscErrorMessages { static Dictionary _messages = new Dictionary() { {335544320, ""}, {335544321, "arithmetic exception, numeric overflow, or string truncation"}, /* arith_except */ {335544322, "invalid database key"}, /* bad_dbkey */ {335544323, "file {0} is not a valid database"}, /* bad_db_format */ {335544324, "invalid database handle (no active connection)"}, /* bad_db_handle */ {335544325, "bad parameters on attach or create database"}, /* bad_dpb_content */ {335544326, "unrecognized database parameter block"}, /* bad_dpb_form */ {335544327, "invalid request handle"}, /* bad_req_handle */ {335544328, "invalid BLOB handle"}, /* bad_segstr_handle */ {335544329, "invalid BLOB ID"}, /* bad_segstr_id */ {335544330, "invalid parameter in transaction parameter block"}, /* bad_tpb_content */ {335544331, "invalid format for transaction parameter block"}, /* bad_tpb_form */ {335544332, "invalid transaction handle (expecting explicit transaction start)"}, /* bad_trans_handle */ {335544333, "internal Firebird consistency check ({0})"}, /* bug_check */ {335544334, "conversion error from string \"{0}\""}, /* convert_error */ {335544335, "database file appears corrupt ({0})"}, /* db_corrupt */ {335544336, "deadlock"}, /* deadlock */ {335544337, "attempt to start more than {0} transactions"}, /* excess_trans */ {335544338, "no match for first value expression"}, /* from_no_match */ {335544339, "information type inappropriate for object specified"}, /* infinap */ {335544340, "no information of this type available for object specified"}, /* infona */ {335544341, "unknown information item"}, /* infunk */ {335544342, "action cancelled by trigger ({0}) to preserve data integrity"}, /* integ_fail */ {335544343, "invalid request BLR at offset {0}"}, /* invalid_blr */ {335544344, "I/O error during \"{0}\" operation for file \"{1}\""}, /* io_error */ {335544345, "lock conflict on no wait transaction"}, /* lock_conflict */ {335544346, "corrupt system table"}, /* metadata_corrupt */ {335544347, "validation error for column {0}, value \"{1}\""}, /* not_valid */ {335544348, "no current record for fetch operation"}, /* no_cur_rec */ {335544349, "attempt to store duplicate value (visible to active transactions) in unique index \"{0}\""}, /* no_dup */ {335544350, "program attempted to exit without finishing database"}, /* no_finish */ {335544351, "unsuccessful metadata update"}, /* no_meta_update */ {335544352, "no permission for {0} access to {1} {2}"}, /* no_priv */ {335544353, "transaction is not in limbo"}, /* no_recon */ {335544354, "invalid database key"}, /* no_record */ {335544355, "BLOB was not closed"}, /* no_segstr_close */ {335544356, "metadata is obsolete"}, /* obsolete_metadata */ {335544357, "cannot disconnect database with open transactions ({0} active)"}, /* open_trans */ {335544358, "message length error (encountered {0}, expected {1})"}, /* port_len */ {335544359, "attempted update of read-only column {0}"}, /* read_only_field */ {335544360, "attempted update of read-only table"}, /* read_only_rel */ {335544361, "attempted update during read-only transaction"}, /* read_only_trans */ {335544362, "cannot update read-only view {0}"}, /* read_only_view */ {335544363, "no transaction for request"}, /* req_no_trans */ {335544364, "request synchronization error"}, /* req_sync */ {335544365, "request referenced an unavailable database"}, /* req_wrong_db */ {335544366, "segment buffer length shorter than expected"}, /* segment */ {335544367, "attempted retrieval of more segments than exist"}, /* segstr_eof */ {335544368, "attempted invalid operation on a BLOB"}, /* segstr_no_op */ {335544369, "attempted read of a new, open BLOB"}, /* segstr_no_read */ {335544370, "attempted action on BLOB outside transaction"}, /* segstr_no_trans */ {335544371, "attempted write to read-only BLOB"}, /* segstr_no_write */ {335544372, "attempted reference to BLOB in unavailable database"}, /* segstr_wrong_db */ {335544373, "operating system directive {0} failed"}, /* sys_request */ {335544374, "attempt to fetch past the last record in a record stream"}, /* stream_eof */ {335544375, "unavailable database"}, /* unavailable */ {335544376, "table {0} was omitted from the transaction reserving list"}, /* unres_rel */ {335544377, "request includes a DSRI extension not supported in this implementation"}, /* uns_ext */ {335544378, "feature is not supported"}, /* wish_list */ {335544379, "unsupported on-disk structure for file {0}; found {1}.{2}, support {3}.{4}"}, /* wrong_ods */ {335544380, "wrong number of arguments on call"}, /* wronumarg */ {335544381, "Implementation limit exceeded"}, /* imp_exc */ {335544382, "{0}"}, /* random */ {335544383, "unrecoverable conflict with limbo transaction {0}"}, /* fatal_conflict */ {335544384, "internal error"}, /* badblk */ {335544385, "internal error"}, /* invpoolcl */ {335544386, "too many requests"}, /* nopoolids */ {335544387, "internal error"}, /* relbadblk */ {335544388, "block size exceeds implementation restriction"}, /* blktoobig */ {335544389, "buffer exhausted"}, /* bufexh */ {335544390, "BLR syntax error: expected {0} at offset {1}, encountered {2}"}, /* syntaxerr */ {335544391, "buffer in use"}, /* bufinuse */ {335544392, "internal error"}, /* bdbincon */ {335544393, "request in use"}, /* reqinuse */ {335544394, "incompatible version of on-disk structure"}, /* badodsver */ {335544395, "table {0} is not defined"}, /* relnotdef */ {335544396, "column {0} is not defined in table {1}"}, /* fldnotdef */ {335544397, "internal error"}, /* dirtypage */ {335544398, "internal error"}, /* waifortra */ {335544399, "internal error"}, /* doubleloc */ {335544400, "internal error"}, /* nodnotfnd */ {335544401, "internal error"}, /* dupnodfnd */ {335544402, "internal error"}, /* locnotmar */ {335544403, "page {0} is of wrong type (expected {1}, found {2})"}, /* badpagtyp */ {335544404, "database corrupted"}, /* corrupt */ {335544405, "checksum error on database page {0}"}, /* badpage */ {335544406, "index is broken"}, /* badindex */ {335544407, "database handle not zero"}, /* dbbnotzer */ {335544408, "transaction handle not zero"}, /* tranotzer */ {335544409, "transaction--request mismatch (synchronization error)"}, /* trareqmis */ {335544410, "bad handle count"}, /* badhndcnt */ {335544411, "wrong version of transaction parameter block"}, /* wrotpbver */ {335544412, "unsupported BLR version (expected {0}, encountered {1})"}, /* wroblrver */ {335544413, "wrong version of database parameter block"}, /* wrodpbver */ {335544414, "BLOB and array data types are not supported for {0} operation"}, /* blobnotsup */ {335544415, "database corrupted"}, /* badrelation */ {335544416, "internal error"}, /* nodetach */ {335544417, "internal error"}, /* notremote */ {335544418, "transaction in limbo"}, /* trainlim */ {335544419, "transaction not in limbo"}, /* notinlim */ {335544420, "transaction outstanding"}, /* traoutsta */ {335544421, "connection rejected by remote interface"}, /* connect_reject */ {335544422, "internal error"}, /* dbfile */ {335544423, "internal error"}, /* orphan */ {335544424, "no lock manager available"}, /* no_lock_mgr */ {335544425, "context already in use (BLR error)"}, /* ctxinuse */ {335544426, "context not defined (BLR error)"}, /* ctxnotdef */ {335544427, "data operation not supported"}, /* datnotsup */ {335544428, "undefined message number"}, /* badmsgnum */ {335544429, "undefined parameter number"}, /* badparnum */ {335544430, "unable to allocate memory from operating system"}, /* virmemexh */ {335544431, "blocking signal has been received"}, /* blocking_signal */ {335544432, "lock manager error"}, /* lockmanerr */ {335544433, "communication error with journal \"{0}\""}, /* journerr */ {335544434, "key size exceeds implementation restriction for index \"{0}\""}, /* keytoobig */ {335544435, "null segment of UNIQUE KEY"}, /* nullsegkey */ {335544436, "SQL error code = {0}"}, /* sqlerr */ {335544437, "wrong DYN version"}, /* wrodynver */ {335544438, "function {0} is not defined"}, /* funnotdef */ {335544439, "function {0} could not be matched"}, /* funmismat */ {335544440, ""}, /* bad_msg_vec */ {335544441, "database detach completed with errors"}, /* bad_detach */ {335544442, "database system cannot read argument {0}"}, /* noargacc_read */ {335544443, "database system cannot write argument {0}"}, /* noargacc_write */ {335544444, "operation not supported"}, /* read_only */ {335544445, "{0} extension error"}, /* ext_err */ {335544446, "not updatable"}, /* non_updatable */ {335544447, "no rollback performed"}, /* no_rollback */ {335544448, ""}, /* bad_sec_info */ {335544449, ""}, /* invalid_sec_info */ {335544450, "{0}"}, /* misc_interpreted */ {335544451, "update conflicts with concurrent update"}, /* update_conflict */ {335544452, "product {0} is not licensed"}, /* unlicensed */ {335544453, "object {0} is in use"}, /* obj_in_use */ {335544454, "filter not found to convert type {0} to type {1}"}, /* nofilter */ {335544455, "cannot attach active shadow file"}, /* shadow_accessed */ {335544456, "invalid slice description language at offset {0}"}, /* invalid_sdl */ {335544457, "subscript out of bounds"}, /* out_of_bounds */ {335544458, "column not array or invalid dimensions (expected {0}, encountered {1})"}, /* invalid_dimension */ {335544459, "record from transaction {0} is stuck in limbo"}, /* rec_in_limbo */ {335544460, "a file in manual shadow {0} is unavailable"}, /* shadow_missing */ {335544461, "secondary server attachments cannot validate databases"}, /* cant_validate */ {335544462, "secondary server attachments cannot start journaling"}, /* cant_start_journal */ {335544463, "generator {0} is not defined"}, /* gennotdef */ {335544464, "secondary server attachments cannot start logging"}, /* cant_start_logging */ {335544465, "invalid BLOB type for operation"}, /* bad_segstr_type */ {335544466, "violation of FOREIGN KEY constraint \"{0}\" on table \"{1}\""}, /* foreign_key */ {335544467, "minor version too high found {0} expected {1}"}, /* high_minor */ {335544468, "transaction {0} is {1}"}, /* tra_state */ {335544469, "transaction marked invalid and cannot be committed"}, /* trans_invalid */ {335544470, "cache buffer for page {0} invalid"}, /* buf_invalid */ {335544471, "there is no index in table {0} with id {1}"}, /* indexnotdefined */ {335544472, "Your user name and password are not defined. Ask your database administrator to set up a Firebird login."}, /* login */ {335544473, "invalid bookmark handle"}, /* invalid_bookmark */ {335544474, "invalid lock level {0}"}, /* bad_lock_level */ {335544475, "lock on table {0} conflicts with existing lock"}, /* relation_lock */ {335544476, "requested record lock conflicts with existing lock"}, /* record_lock */ {335544477, "maximum indexes per table ({0}) exceeded"}, /* max_idx */ {335544478, "enable journal for database before starting online dump"}, /* jrn_enable */ {335544479, "online dump failure. Retry dump"}, /* old_failure */ {335544480, "an online dump is already in progress"}, /* old_in_progress */ {335544481, "no more disk/tape space. Cannot continue online dump"}, /* old_no_space */ {335544482, "journaling allowed only if database has Write-ahead Log"}, /* no_wal_no_jrn */ {335544483, "maximum number of online dump files that can be specified is 16"}, /* num_old_files */ {335544484, "error in opening Write-ahead Log file during recovery"}, /* wal_file_open */ {335544485, "invalid statement handle"}, /* bad_stmt_handle */ {335544486, "Write-ahead log subsystem failure"}, /* wal_failure */ {335544487, "WAL Writer error"}, /* walw_err */ {335544488, "Log file header of {0} too small"}, /* logh_small */ {335544489, "Invalid version of log file {0}"}, /* logh_inv_version */ {335544490, "Log file {0} not latest in the chain but open flag still set"}, /* logh_open_flag */ {335544491, "Log file {0} not closed properly; database recovery may be required"}, /* logh_open_flag2 */ {335544492, "Database name in the log file {0} is different"}, /* logh_diff_dbname */ {335544493, "Unexpected end of log file {0} at offset {1}"}, /* logf_unexpected_eof */ {335544494, "Incomplete log record at offset {0} in log file {1}"}, /* logr_incomplete */ {335544495, "Log record header too small at offset {0} in log file {1}"}, /* logr_header_small */ {335544496, "Log block too small at offset {0} in log file {1}"}, /* logb_small */ {335544497, "Illegal attempt to attach to an uninitialized WAL segment for {0}"}, /* wal_illegal_attach */ {335544498, "Invalid WAL parameter block option {0}"}, /* wal_invalid_wpb */ {335544499, "Cannot roll over to the next log file {0}"}, /* wal_err_rollover */ {335544500, "database does not use Write-ahead Log"}, /* no_wal */ {335544501, "cannot drop log file when journaling is enabled"}, /* drop_wal */ {335544502, "reference to invalid stream number"}, /* stream_not_defined */ {335544503, "WAL subsystem encountered error"}, /* wal_subsys_error */ {335544504, "WAL subsystem corrupted"}, /* wal_subsys_corrupt */ {335544505, "must specify archive file when enabling long term journal for databases with round-robin log files"}, /* no_archive */ {335544506, "database {0} shutdown in progress"}, /* shutinprog */ {335544507, "refresh range number {0} already in use"}, /* range_in_use */ {335544508, "refresh range number {0} not found"}, /* range_not_found */ {335544509, "CHARACTER SET {0} is not defined"}, /* charset_not_found */ {335544510, "lock time-out on wait transaction"}, /* lock_timeout */ {335544511, "procedure {0} is not defined"}, /* prcnotdef */ {335544512, "Input parameter mismatch for procedure {0}"}, /* prcmismat */ {335544513, "Database {0}: WAL subsystem bug for pid {1}\n{2}"}, /* wal_bugcheck */ {335544514, "Could not expand the WAL segment for database {0}"}, /* wal_cant_expand */ {335544515, "status code {0} unknown"}, /* codnotdef */ {335544516, "exception {0} not defined"}, /* xcpnotdef */ {335544517, "exception {0}"}, /* except */ {335544518, "restart shared cache manager"}, /* cache_restart */ {335544519, "invalid lock handle"}, /* bad_lock_handle */ {335544520, "long-term journaling already enabled"}, /* jrn_present */ {335544521, "Unable to roll over please see Firebird log."}, /* wal_err_rollover2 */ {335544522, "WAL I/O error. Please see Firebird log."}, /* wal_err_logwrite */ {335544523, "WAL writer - Journal server communication error. Please see Firebird log."}, /* wal_err_jrn_comm */ {335544524, "WAL buffers cannot be increased. Please see Firebird log."}, /* wal_err_expansion */ {335544525, "WAL setup error. Please see Firebird log."}, /* wal_err_setup */ {335544526, "obsolete"}, /* wal_err_ww_sync */ {335544527, "Cannot start WAL writer for the database {0}"}, /* wal_err_ww_start */ {335544528, "database {0} shutdown"}, /* shutdown */ {335544529, "cannot modify an existing user privilege"}, /* existing_priv_mod */ {335544530, "Cannot delete PRIMARY KEY being used in FOREIGN KEY definition."}, /* primary_key_ref */ {335544531, "Column used in a PRIMARY constraint must be NOT NULL."}, /* primary_key_notnull */ {335544532, "Name of Referential Constraint not defined in constraints table."}, /* ref_cnstrnt_notfound */ {335544533, "Non-existent PRIMARY or UNIQUE KEY specified for FOREIGN KEY."}, /* foreign_key_notfound */ {335544534, "Cannot update constraints (RDB$REF_CONSTRAINTS)."}, /* ref_cnstrnt_update */ {335544535, "Cannot update constraints (RDB$CHECK_CONSTRAINTS)."}, /* check_cnstrnt_update */ {335544536, "Cannot delete CHECK constraint entry (RDB$CHECK_CONSTRAINTS)"}, /* check_cnstrnt_del */ {335544537, "Cannot delete index segment used by an Integrity Constraint"}, /* integ_index_seg_del */ {335544538, "Cannot update index segment used by an Integrity Constraint"}, /* integ_index_seg_mod */ {335544539, "Cannot delete index used by an Integrity Constraint"}, /* integ_index_del */ {335544540, "Cannot modify index used by an Integrity Constraint"}, /* integ_index_mod */ {335544541, "Cannot delete trigger used by a CHECK Constraint"}, /* check_trig_del */ {335544542, "Cannot update trigger used by a CHECK Constraint"}, /* check_trig_update */ {335544543, "Cannot delete column being used in an Integrity Constraint."}, /* cnstrnt_fld_del */ {335544544, "Cannot rename column being used in an Integrity Constraint."}, /* cnstrnt_fld_rename */ {335544545, "Cannot update constraints (RDB$RELATION_CONSTRAINTS)."}, /* rel_cnstrnt_update */ {335544546, "Cannot define constraints on views"}, /* constaint_on_view */ {335544547, "internal Firebird consistency check (invalid RDB$CONSTRAINT_TYPE)"}, /* invld_cnstrnt_type */ {335544548, "Attempt to define a second PRIMARY KEY for the same table"}, /* primary_key_exists */ {335544549, "cannot modify or erase a system trigger"}, /* systrig_update */ {335544550, "only the owner of a table may reassign ownership"}, /* not_rel_owner */ {335544551, "could not find object for GRANT"}, /* grant_obj_notfound */ {335544552, "could not find column for GRANT"}, /* grant_fld_notfound */ {335544553, "user does not have GRANT privileges for operation"}, /* grant_nopriv */ {335544554, "object has non-SQL security class defined"}, /* nonsql_security_rel */ {335544555, "column has non-SQL security class defined"}, /* nonsql_security_fld */ {335544556, "Write-ahead Log without shared cache configuration not allowed"}, /* wal_cache_err */ {335544557, "database shutdown unsuccessful"}, /* shutfail */ {335544558, "Operation violates CHECK constraint {0} on view or table {1}"}, /* check_constraint */ {335544559, "invalid service handle"}, /* bad_svc_handle */ {335544560, "database {0} shutdown in {1} seconds"}, /* shutwarn */ {335544561, "wrong version of service parameter block"}, /* wrospbver */ {335544562, "unrecognized service parameter block"}, /* bad_spb_form */ {335544563, "service {0} is not defined"}, /* svcnotdef */ {335544564, "long-term journaling not enabled"}, /* no_jrn */ {335544565, "Cannot transliterate character between character sets"}, /* transliteration_failed */ {335544566, "WAL defined; Cache Manager must be started first"}, /* start_cm_for_wal */ {335544567, "Overflow log specification required for round-robin log"}, /* wal_ovflow_log_required */ {335544568, "Implementation of text subtype {0} not located."}, /* text_subtype */ {335544569, "Dynamic SQL Error"}, /* dsql_error */ {335544570, "Invalid command"}, /* dsql_command_err */ {335544571, "Data type for constant unknown"}, /* dsql_constant_err */ {335544572, "Invalid cursor reference"}, /* dsql_cursor_err */ {335544573, "Data type unknown"}, /* dsql_datatype_err */ {335544574, "Invalid cursor declaration"}, /* dsql_decl_err */ {335544575, "Cursor {0} is not updatable"}, /* dsql_cursor_update_err */ {335544576, "Attempt to reopen an open cursor"}, /* dsql_cursor_open_err */ {335544577, "Attempt to reclose a closed cursor"}, /* dsql_cursor_close_err */ {335544578, "Column unknown"}, /* dsql_field_err */ {335544579, "Internal error"}, /* dsql_internal_err */ {335544580, "Table unknown"}, /* dsql_relation_err */ {335544581, "Procedure unknown"}, /* dsql_procedure_err */ {335544582, "Request unknown"}, /* dsql_request_err */ {335544583, "SQLDA error"}, /* dsql_sqlda_err */ {335544584, "Count of read-write columns does not equal count of values"}, /* dsql_var_count_err */ {335544585, "Invalid statement handle"}, /* dsql_stmt_handle */ {335544586, "Function unknown"}, /* dsql_function_err */ {335544587, "Column is not a BLOB"}, /* dsql_blob_err */ {335544588, "COLLATION {0} for CHARACTER SET {1} is not defined"}, /* collation_not_found */ {335544589, "COLLATION {0} is not valid for specified CHARACTER SET"}, /* collation_not_for_charset */ {335544590, "Option specified more than once"}, /* dsql_dup_option */ {335544591, "Unknown transaction option"}, /* dsql_tran_err */ {335544592, "Invalid array reference"}, /* dsql_invalid_array */ {335544593, "Array declared with too many dimensions"}, /* dsql_max_arr_dim_exceeded */ {335544594, "Illegal array dimension range"}, /* dsql_arr_range_error */ {335544595, "Trigger unknown"}, /* dsql_trigger_err */ {335544596, "Subselect illegal in this context"}, /* dsql_subselect_err */ {335544597, "Cannot prepare a CREATE DATABASE/SCHEMA statement"}, /* dsql_crdb_prepare_err */ {335544598, "must specify column name for view select expression"}, /* specify_field_err */ {335544599, "number of columns does not match select list"}, /* num_field_err */ {335544600, "Only simple column names permitted for VIEW WITH CHECK OPTION"}, /* col_name_err */ {335544601, "No WHERE clause for VIEW WITH CHECK OPTION"}, /* where_err */ {335544602, "Only one table allowed for VIEW WITH CHECK OPTION"}, /* table_view_err */ {335544603, "DISTINCT, GROUP or HAVING not permitted for VIEW WITH CHECK OPTION"}, /* distinct_err */ {335544604, "FOREIGN KEY column count does not match PRIMARY KEY"}, /* key_field_count_err */ {335544605, "No subqueries permitted for VIEW WITH CHECK OPTION"}, /* subquery_err */ {335544606, "expression evaluation not supported"}, /* expression_eval_err */ {335544607, "gen.c: node not supported"}, /* node_err */ {335544608, "Unexpected end of command"}, /* command_end_err */ {335544609, "INDEX {0}"}, /* index_name */ {335544610, "EXCEPTION {0}"}, /* exception_name */ {335544611, "COLUMN {0}"}, /* field_name */ {335544612, "Token unknown"}, /* token_err */ {335544613, "union not supported"}, /* union_err */ {335544614, "Unsupported DSQL construct"}, /* dsql_construct_err */ {335544615, "column used with aggregate"}, /* field_aggregate_err */ {335544616, "invalid column reference"}, /* field_ref_err */ {335544617, "invalid ORDER BY clause"}, /* order_by_err */ {335544618, "Return mode by value not allowed for this data type"}, /* return_mode_err */ {335544619, "External functions cannot have more than 10 parameters"}, /* extern_func_err */ {335544620, "alias {0} conflicts with an alias in the same statement"}, /* alias_conflict_err */ {335544621, "alias {0} conflicts with a procedure in the same statement"}, /* procedure_conflict_error */ {335544622, "alias {0} conflicts with a table in the same statement"}, /* relation_conflict_err */ {335544623, "Illegal use of keyword VALUE"}, /* dsql_domain_err */ {335544624, "segment count of 0 defined for index {0}"}, /* idx_seg_err */ {335544625, "A node name is not permitted in a secondary, shadow, cache or log file name"}, /* node_name_err */ {335544626, "TABLE {0}"}, /* table_name */ {335544627, "PROCEDURE {0}"}, /* proc_name */ {335544628, "cannot create index {0}"}, /* idx_create_err */ {335544629, "Write-ahead Log with shadowing configuration not allowed"}, /* wal_shadow_err */ {335544630, "there are {0} dependencies"}, /* dependency */ {335544631, "too many keys defined for index {0}"}, /* idx_key_err */ {335544632, "Preceding file did not specify length, so {0} must include starting page number"}, /* dsql_file_length_err */ {335544633, "Shadow number must be a positive integer"}, /* dsql_shadow_number_err */ {335544634, "Token unknown - line {0}, column {1}"}, /* dsql_token_unk_err */ {335544635, "there is no alias or table named {0} at this scope level"}, /* dsql_no_relation_alias */ {335544636, "there is no index {0} for table {1}"}, /* indexname */ {335544637, "table or procedure {0} is not referenced in plan"}, /* no_stream_plan */ {335544638, "table or procedure {0} is referenced more than once in plan; use aliases to distinguish"}, /* stream_twice */ {335544639, "table or procedure {0} is referenced in the plan but not the from list"}, /* stream_not_found */ {335544640, "Invalid use of CHARACTER SET or COLLATE"}, /* collation_requires_text */ {335544641, "Specified domain or source column {0} does not exist"}, /* dsql_domain_not_found */ {335544642, "index {0} cannot be used in the specified plan"}, /* index_unused */ {335544643, "the table {0} is referenced twice; use aliases to differentiate"}, /* dsql_self_join */ {335544644, "attempt to fetch before the first record in a record stream"}, /* stream_bof */ {335544645, "the current position is on a crack"}, /* stream_crack */ {335544646, "database or file exists"}, /* db_or_file_exists */ {335544647, "invalid comparison operator for find operation"}, /* invalid_operator */ {335544648, "Connection lost to pipe server"}, /* conn_lost */ {335544649, "bad checksum"}, /* bad_checksum */ {335544650, "wrong page type"}, /* page_type_err */ {335544651, "Cannot insert because the file is readonly or is on a read only medium."}, /* ext_readonly_err */ {335544652, "multiple rows in singleton select"}, /* sing_select_err */ {335544653, "cannot attach to password database"}, /* psw_attach */ {335544654, "cannot start transaction for password database"}, /* psw_start_trans */ {335544655, "invalid direction for find operation"}, /* invalid_direction */ {335544656, "variable {0} conflicts with parameter in same procedure"}, /* dsql_var_conflict */ {335544657, "Array/BLOB/DATE data types not allowed in arithmetic"}, /* dsql_no_blob_array */ {335544658, "{0} is not a valid base table of the specified view"}, /* dsql_base_table */ {335544659, "table or procedure {0} is referenced twice in view; use an alias to distinguish"}, /* duplicate_base_table */ {335544660, "view {0} has more than one base table; use aliases to distinguish"}, /* view_alias */ {335544661, "cannot add index, index root page is full."}, /* index_root_page_full */ {335544662, "BLOB SUB_TYPE {0} is not defined"}, /* dsql_blob_type_unknown */ {335544663, "Too many concurrent executions of the same request"}, /* req_max_clones_exceeded */ {335544664, "duplicate specification of {0} - not supported"}, /* dsql_duplicate_spec */ {335544665, "violation of PRIMARY or UNIQUE KEY constraint \"{0}\" on table \"{1}\""}, /* unique_key_violation */ {335544666, "server version too old to support all CREATE DATABASE options"}, /* srvr_version_too_old */ {335544667, "drop database completed with errors"}, /* drdb_completed_with_errs */ {335544668, "procedure {0} does not return any values"}, /* dsql_procedure_use_err */ {335544669, "count of column list and variable list do not match"}, /* dsql_count_mismatch */ {335544670, "attempt to index BLOB column in index {0}"}, /* blob_idx_err */ {335544671, "attempt to index array column in index {0}"}, /* array_idx_err */ {335544672, "too few key columns found for index {0} (incorrect column name?)"}, /* key_field_err */ {335544673, "cannot delete"}, /* no_delete */ {335544674, "last column in a table cannot be deleted"}, /* del_last_field */ {335544675, "sort error"}, /* sort_err */ {335544676, "sort error: not enough memory"}, /* sort_mem_err */ {335544677, "too many versions"}, /* version_err */ {335544678, "invalid key position"}, /* inval_key_posn */ {335544679, "segments not allowed in expression index {0}"}, /* no_segments_err */ {335544680, "sort error: corruption in data structure"}, /* crrp_data_err */ {335544681, "new record size of {0} bytes is too big"}, /* rec_size_err */ {335544682, "Inappropriate self-reference of column"}, /* dsql_field_ref */ {335544683, "request depth exceeded. (Recursive definition?)"}, /* req_depth_exceeded */ {335544684, "cannot access column {0} in view {1}"}, /* no_field_access */ {335544685, "dbkey not available for multi-table views"}, /* no_dbkey */ {335544686, "journal file wrong format"}, /* jrn_format_err */ {335544687, "intermediate journal file full"}, /* jrn_file_full */ {335544688, "The prepare statement identifies a prepare statement with an open cursor"}, /* dsql_open_cursor_request */ {335544689, "Firebird error"}, /* ib_error */ {335544690, "Cache redefined"}, /* cache_redef */ {335544691, "Insufficient memory to allocate page buffer cache"}, /* cache_too_small */ {335544692, "Log redefined"}, /* log_redef */ {335544693, "Log size too small"}, /* log_too_small */ {335544694, "Log partition size too small"}, /* partition_too_small */ {335544695, "Partitions not supported in series of log file specification"}, /* partition_not_supp */ {335544696, "Total length of a partitioned log must be specified"}, /* log_length_spec */ {335544697, "Precision must be from 1 to 18"}, /* precision_err */ {335544698, "Scale must be between zero and precision"}, /* scale_nogt */ {335544699, "Short integer expected"}, /* expec_short */ {335544700, "Long integer expected"}, /* expec_long */ {335544701, "Unsigned short integer expected"}, /* expec_ushort */ {335544702, "Invalid ESCAPE sequence"}, /* escape_invalid */ {335544703, "service {0} does not have an associated executable"}, /* svcnoexe */ {335544704, "Failed to locate host machine."}, /* net_lookup_err */ {335544705, "Undefined service {0}/{1}."}, /* service_unknown */ {335544706, "The specified name was not found in the hosts file or Domain Name Services."}, /* host_unknown */ {335544707, "user does not have GRANT privileges on base table/view for operation"}, /* grant_nopriv_on_base */ {335544708, "Ambiguous column reference."}, /* dyn_fld_ambiguous */ {335544709, "Invalid aggregate reference"}, /* dsql_agg_ref_err */ {335544710, "navigational stream {0} references a view with more than one base table"}, /* complex_view */ {335544711, "Attempt to execute an unprepared dynamic SQL statement."}, /* unprepared_stmt */ {335544712, "Positive value expected"}, /* expec_positive */ {335544713, "Incorrect values within SQLDA structure"}, /* dsql_sqlda_value_err */ {335544714, "invalid blob id"}, /* invalid_array_id */ {335544715, "Operation not supported for EXTERNAL FILE table {0}"}, /* extfile_uns_op */ {335544716, "Service is currently busy: {0}"}, /* svc_in_use */ {335544717, "stack size insufficent to execute current request"}, /* err_stack_limit */ {335544718, "Invalid key for find operation"}, /* invalid_key */ {335544719, "Error initializing the network software."}, /* net_init_error */ {335544720, "Unable to load required library {0}."}, /* loadlib_failure */ {335544721, "Unable to complete network request to host \"{0}\"."}, /* network_error */ {335544722, "Failed to establish a connection."}, /* net_connect_err */ {335544723, "Error while listening for an incoming connection."}, /* net_connect_listen_err */ {335544724, "Failed to establish a secondary connection for event processing."}, /* net_event_connect_err */ {335544725, "Error while listening for an incoming event connection request."}, /* net_event_listen_err */ {335544726, "Error reading data from the connection."}, /* net_read_err */ {335544727, "Error writing data to the connection."}, /* net_write_err */ {335544728, "Cannot deactivate index used by an integrity constraint"}, /* integ_index_deactivate */ {335544729, "Cannot deactivate index used by a PRIMARY/UNIQUE constraint"}, /* integ_deactivate_primary */ {335544730, "Client/Server Express not supported in this release"}, /* cse_not_supported */ {335544731, ""}, /* tra_must_sweep */ {335544732, "Access to databases on file servers is not supported."}, /* unsupported_network_drive */ {335544733, "Error while trying to create file"}, /* io_create_err */ {335544734, "Error while trying to open file"}, /* io_open_err */ {335544735, "Error while trying to close file"}, /* io_close_err */ {335544736, "Error while trying to read from file"}, /* io_read_err */ {335544737, "Error while trying to write to file"}, /* io_write_err */ {335544738, "Error while trying to delete file"}, /* io_delete_err */ {335544739, "Error while trying to access file"}, /* io_access_err */ {335544740, "A fatal exception occurred during the execution of a user defined function."}, /* udf_exception */ {335544741, "connection lost to database"}, /* lost_db_connection */ {335544742, "User cannot write to RDB$USER_PRIVILEGES"}, /* no_write_user_priv */ {335544743, "token size exceeds limit"}, /* token_too_long */ {335544744, "Maximum user count exceeded. Contact your database administrator."}, /* max_att_exceeded */ {335544745, "Your login {0} is same as one of the SQL role name. Ask your database administrator to set up a valid Firebird login."}, /* login_same_as_role_name */ {335544746, "\"REFERENCES table\" without \"(column)\" requires PRIMARY KEY on referenced table"}, /* reftable_requires_pk */ {335544747, "The username entered is too long. Maximum length is 31 bytes."}, /* usrname_too_long */ {335544748, "The password specified is too long. Maximum length is 8 bytes."}, /* password_too_long */ {335544749, "A username is required for this operation."}, /* usrname_required */ {335544750, "A password is required for this operation"}, /* password_required */ {335544751, "The network protocol specified is invalid"}, /* bad_protocol */ {335544752, "A duplicate user name was found in the security database"}, /* dup_usrname_found */ {335544753, "The user name specified was not found in the security database"}, /* usrname_not_found */ {335544754, "An error occurred while attempting to add the user."}, /* error_adding_sec_record */ {335544755, "An error occurred while attempting to modify the user record."}, /* error_modifying_sec_record */ {335544756, "An error occurred while attempting to delete the user record."}, /* error_deleting_sec_record */ {335544757, "An error occurred while updating the security database."}, /* error_updating_sec_db */ {335544758, "sort record size of {0} bytes is too big"}, /* sort_rec_size_err */ {335544759, "can not define a not null column with NULL as default value"}, /* bad_default_value */ {335544760, "invalid clause --- '{0}'"}, /* invalid_clause */ {335544761, "too many open handles to database"}, /* too_many_handles */ {335544762, "size of optimizer block exceeded"}, /* optimizer_blk_exc */ {335544763, "a string constant is delimited by double quotes"}, /* invalid_string_constant */ {335544764, "DATE must be changed to TIMESTAMP"}, /* transitional_date */ {335544765, "attempted update on read-only database"}, /* read_only_database */ {335544766, "SQL dialect {0} is not supported in this database"}, /* must_be_dialect_2_and_up */ {335544767, "A fatal exception occurred during the execution of a blob filter."}, /* blob_filter_exception */ {335544768, "Access violation. The code attempted to access a virtual address without privilege to do so."}, /* exception_access_violation */ {335544769, "Datatype misalignment. The attempted to read or write a value that was not stored on a memory boundary."}, /* exception_datatype_missalignment */ {335544770, "Array bounds exceeded. The code attempted to access an array element that is out of bounds."}, /* exception_array_bounds_exceeded */ {335544771, "Float denormal operand. One of the floating-point operands is too small to represent a standard float value."}, /* exception_float_denormal_operand */ {335544772, "Floating-point divide by zero. The code attempted to divide a floating-point value by zero."}, /* exception_float_divide_by_zero */ {335544773, "Floating-point inexact result. The result of a floating-point operation cannot be represented as a decimal fraction."}, /* exception_float_inexact_result */ {335544774, "Floating-point invalid operand. An indeterminant error occurred during a floating-point operation."}, /* exception_float_invalid_operand */ {335544775, "Floating-point overflow. The exponent of a floating-point operation is greater than the magnitude allowed."}, /* exception_float_overflow */ {335544776, "Floating-point stack check. The stack overflowed or underflowed as the result of a floating-point operation."}, /* exception_float_stack_check */ {335544777, "Floating-point underflow. The exponent of a floating-point operation is less than the magnitude allowed."}, /* exception_float_underflow */ {335544778, "Integer divide by zero. The code attempted to divide an integer value by an integer divisor of zero."}, /* exception_integer_divide_by_zero */ {335544779, "Integer overflow. The result of an integer operation caused the most significant bit of the result to carry."}, /* exception_integer_overflow */ {335544780, "An exception occurred that does not have a description. Exception number {0}."}, /* exception_unknown */ {335544781, "Stack overflow. The resource requirements of the runtime stack have exceeded the memory available to it."}, /* exception_stack_overflow */ {335544782, "Segmentation Fault. The code attempted to access memory without privileges."}, /* exception_sigsegv */ {335544783, "Illegal Instruction. The Code attempted to perform an illegal operation."}, /* exception_sigill */ {335544784, "Bus Error. The Code caused a system bus error."}, /* exception_sigbus */ {335544785, "Floating Point Error. The Code caused an Arithmetic Exception or a floating point exception."}, /* exception_sigfpe */ {335544786, "Cannot delete rows from external files."}, /* ext_file_delete */ {335544787, "Cannot update rows in external files."}, /* ext_file_modify */ {335544788, "Unable to perform operation"}, /* adm_task_denied */ {335544789, "Specified EXTRACT part does not exist in input datatype"}, /* extract_input_mismatch */ {335544790, "Service {0} requires SYSDBA permissions. Reattach to the Service Manager using the SYSDBA account."}, /* insufficient_svc_privileges */ {335544791, "The file {0} is currently in use by another process. Try again later."}, /* file_in_use */ {335544792, "Cannot attach to services manager"}, /* service_att_err */ {335544793, "Metadata update statement is not allowed by the current database SQL dialect {0}"}, /* ddl_not_allowed_by_db_sql_dial */ {335544794, "operation was cancelled"}, /* cancelled */ {335544795, "unexpected item in service parameter block, expected {0}"}, /* unexp_spb_form */ {335544796, "Client SQL dialect {0} does not support reference to {1} datatype"}, /* sql_dialect_datatype_unsupport */ {335544797, "user name and password are required while attaching to the services manager"}, /* svcnouser */ {335544798, "You created an indirect dependency on uncommitted metadata. You must roll back the current transaction."}, /* depend_on_uncommitted_rel */ {335544799, "The service name was not specified."}, /* svc_name_missing */ {335544800, "Too many Contexts of Relation/Procedure/Views. Maximum allowed is 256"}, /* too_many_contexts */ {335544801, "data type not supported for arithmetic"}, /* datype_notsup */ {335544802, "Database dialect being changed from 3 to 1"}, /* dialect_reset_warning */ {335544803, "Database dialect not changed."}, /* dialect_not_changed */ {335544804, "Unable to create database {0}"}, /* database_create_failed */ {335544805, "Database dialect {0} is not a valid dialect."}, /* inv_dialect_specified */ {335544806, "Valid database dialects are {0}."}, /* valid_db_dialects */ {335544807, "SQL warning code = {0}"}, /* sqlwarn */ {335544808, "DATE data type is now called TIMESTAMP"}, /* dtype_renamed */ {335544809, "Function {0} is in {1}, which is not in a permitted directory for external functions."}, /* extern_func_dir_error */ {335544810, "value exceeds the range for valid dates"}, /* date_range_exceeded */ {335544811, "passed client dialect {0} is not a valid dialect."}, /* inv_client_dialect_specified */ {335544812, "Valid client dialects are {0}."}, /* valid_client_dialects */ {335544813, "Unsupported field type specified in BETWEEN predicate."}, /* optimizer_between_err */ {335544814, "Services functionality will be supported in a later version of the product"}, /* service_not_supported */ {335544815, "GENERATOR {0}"}, /* generator_name */ {335544816, "Function {0}"}, /* udf_name */ {335544817, "Invalid parameter to FETCH or FIRST. Only integers >= 0 are allowed."}, /* bad_limit_param */ {335544818, "Invalid parameter to OFFSET or SKIP. Only integers >= 0 are allowed."}, /* bad_skip_param */ {335544819, "File exceeded maximum size of 2GB. Add another database file or use a 64 bit I/O version of Firebird."}, /* io_32bit_exceeded_err */ {335544820, "Unable to find savepoint with name {0} in transaction context"}, /* invalid_savepoint */ {335544821, "Invalid column position used in the {0} clause"}, /* dsql_column_pos_err */ {335544822, "Cannot use an aggregate or window function in a WHERE clause, use HAVING (for aggregate only) instead"}, /* dsql_agg_where_err */ {335544823, "Cannot use an aggregate or window function in a GROUP BY clause"}, /* dsql_agg_group_err */ {335544824, "Invalid expression in the {0} (not contained in either an aggregate function or the GROUP BY clause)"}, /* dsql_agg_column_err */ {335544825, "Invalid expression in the {0} (neither an aggregate function nor a part of the GROUP BY clause)"}, /* dsql_agg_having_err */ {335544826, "Nested aggregate and window functions are not allowed"}, /* dsql_agg_nested_err */ {335544827, "Invalid argument in EXECUTE STATEMENT - cannot convert to string"}, /* exec_sql_invalid_arg */ {335544828, "Wrong request type in EXECUTE STATEMENT '{0}'"}, /* exec_sql_invalid_req */ {335544829, "Variable type (position {0}) in EXECUTE STATEMENT '{1}' INTO does not match returned column type"}, /* exec_sql_invalid_var */ {335544830, "Too many recursion levels of EXECUTE STATEMENT"}, /* exec_sql_max_call_exceeded */ {335544831, "Use of {0} at location {1} is not allowed by server configuration"}, /* conf_access_denied */ {335544832, "Cannot change difference file name while database is in backup mode"}, /* wrong_backup_state */ {335544833, "Physical backup is not allowed while Write-Ahead Log is in use"}, /* wal_backup_err */ {335544834, "Cursor is not open"}, /* cursor_not_open */ {335544835, "Target shutdown mode is invalid for database \"{0}\""}, /* bad_shutdown_mode */ {335544836, "Concatenation overflow. Resulting string cannot exceed 32765 bytes in length."}, /* concat_overflow */ {335544837, "Invalid offset parameter {0} to SUBSTRING. Only positive integers are allowed."}, /* bad_substring_offset */ {335544838, "Foreign key reference target does not exist"}, /* foreign_key_target_doesnt_exist */ {335544839, "Foreign key references are present for the record"}, /* foreign_key_references_present */ {335544840, "cannot update"}, /* no_update */ {335544841, "Cursor is already open"}, /* cursor_already_open */ {335544842, "{0}"}, /* stack_trace */ {335544843, "Context variable '{0}' is not found in namespace '{1}'"}, /* ctx_var_not_found */ {335544844, "Invalid namespace name '{0}' passed to {1}"}, /* ctx_namespace_invalid */ {335544845, "Too many context variables"}, /* ctx_too_big */ {335544846, "Invalid argument passed to {0}"}, /* ctx_bad_argument */ {335544847, "BLR syntax error. Identifier {0}... is too long"}, /* identifier_too_long */ {335544848, "exception {0}"}, /* except2 */ {335544849, "Malformed string"}, /* malformed_string */ {335544850, "Output parameter mismatch for procedure {0}"}, /* prc_out_param_mismatch */ {335544851, "Unexpected end of command - line {0}, column {1}"}, /* command_end_err2 */ {335544852, "partner index segment no {0} has incompatible data type"}, /* partner_idx_incompat_type */ {335544853, "Invalid length parameter {0} to SUBSTRING. Negative integers are not allowed."}, /* bad_substring_length */ {335544854, "CHARACTER SET {0} is not installed"}, /* charset_not_installed */ {335544855, "COLLATION {0} for CHARACTER SET {1} is not installed"}, /* collation_not_installed */ {335544856, "connection shutdown"}, /* att_shutdown */ {335544857, "Maximum BLOB size exceeded"}, /* blobtoobig */ {335544858, "Can't have relation with only computed fields or constraints"}, /* must_have_phys_field */ {335544859, "Time precision exceeds allowed range (0-{0})"}, /* invalid_time_precision */ {335544860, "Unsupported conversion to target type BLOB (subtype {0})"}, /* blob_convert_error */ {335544861, "Unsupported conversion to target type ARRAY"}, /* array_convert_error */ {335544862, "Stream does not support record locking"}, /* record_lock_not_supp */ {335544863, "Cannot create foreign key constraint {0}. Partner index does not exist or is inactive."}, /* partner_idx_not_found */ {335544864, "Transactions count exceeded. Perform backup and restore to make database operable again"}, /* tra_num_exc */ {335544865, "Column has been unexpectedly deleted"}, /* field_disappeared */ {335544866, "{0} cannot depend on {1}"}, /* met_wrong_gtt_scope */ {335544867, "Blob sub_types bigger than 1 (text) are for internal use only"}, /* subtype_for_internal_use */ {335544868, "Procedure {0} is not selectable (it does not contain a SUSPEND statement)"}, /* illegal_prc_type */ {335544869, "Datatype {0} is not supported for sorting operation"}, /* invalid_sort_datatype */ {335544870, "COLLATION {0}"}, /* collation_name */ {335544871, "DOMAIN {0}"}, /* domain_name */ {335544872, "domain {0} is not defined"}, /* domnotdef */ {335544873, "Array data type can use up to {0} dimensions"}, /* array_max_dimensions */ {335544874, "A multi database transaction cannot span more than {0} databases"}, /* max_db_per_trans_allowed */ {335544875, "Bad debug info format"}, /* bad_debug_format */ {335544876, "Error while parsing procedure {0}'s BLR"}, /* bad_proc_BLR */ {335544877, "index key too big"}, /* key_too_big */ {335544878, "concurrent transaction number is {0}"}, /* concurrent_transaction */ {335544879, "validation error for variable {0}, value \"{1}\""}, /* not_valid_for_var */ {335544880, "validation error for {0}, value \"{1}\""}, /* not_valid_for */ {335544881, "Difference file name should be set explicitly for database on raw device"}, /* need_difference */ {335544882, "Login name too long ({0} characters, maximum allowed {1})"}, /* long_login */ {335544883, "column {0} is not defined in procedure {1}"}, /* fldnotdef2 */ {335544884, "Invalid SIMILAR TO pattern"}, /* invalid_similar_pattern */ {335544885, "Invalid TEB format"}, /* bad_teb_form */ {335544886, "Found more than one transaction isolation in TPB"}, /* tpb_multiple_txn_isolation */ {335544887, "Table reservation lock type {0} requires table name before in TPB"}, /* tpb_reserv_before_table */ {335544888, "Found more than one {0} specification in TPB"}, /* tpb_multiple_spec */ {335544889, "Option {0} requires READ COMMITTED isolation in TPB"}, /* tpb_option_without_rc */ {335544890, "Option {0} is not valid if {1} was used previously in TPB"}, /* tpb_conflicting_options */ {335544891, "Table name length missing after table reservation {0} in TPB"}, /* tpb_reserv_missing_tlen */ {335544892, "Table name length {0} is too long after table reservation {1} in TPB"}, /* tpb_reserv_long_tlen */ {335544893, "Table name length {0} without table name after table reservation {1} in TPB"}, /* tpb_reserv_missing_tname */ {335544894, "Table name length {0} goes beyond the remaining TPB size after table reservation {1}"}, /* tpb_reserv_corrup_tlen */ {335544895, "Table name length is zero after table reservation {0} in TPB"}, /* tpb_reserv_null_tlen */ {335544896, "Table or view {0} not defined in system tables after table reservation {1} in TPB"}, /* tpb_reserv_relnotfound */ {335544897, "Base table or view {0} for view {1} not defined in system tables after table reservation {2} in TPB"}, /* tpb_reserv_baserelnotfound */ {335544898, "Option length missing after option {0} in TPB"}, /* tpb_missing_len */ {335544899, "Option length {0} without value after option {1} in TPB"}, /* tpb_missing_value */ {335544900, "Option length {0} goes beyond the remaining TPB size after option {1}"}, /* tpb_corrupt_len */ {335544901, "Option length is zero after table reservation {0} in TPB"}, /* tpb_null_len */ {335544902, "Option length {0} exceeds the range for option {1} in TPB"}, /* tpb_overflow_len */ {335544903, "Option value {0} is invalid for the option {1} in TPB"}, /* tpb_invalid_value */ {335544904, "Preserving previous table reservation {0} for table {1}, stronger than new {2} in TPB"}, /* tpb_reserv_stronger_wng */ {335544905, "Table reservation {0} for table {1} already specified and is stronger than new {2} in TPB"}, /* tpb_reserv_stronger */ {335544906, "Table reservation reached maximum recursion of {0} when expanding views in TPB"}, /* tpb_reserv_max_recursion */ {335544907, "Table reservation in TPB cannot be applied to {0} because it's a virtual table"}, /* tpb_reserv_virtualtbl */ {335544908, "Table reservation in TPB cannot be applied to {0} because it's a system table"}, /* tpb_reserv_systbl */ {335544909, "Table reservation {0} or {1} in TPB cannot be applied to {2} because it's a temporary table"}, /* tpb_reserv_temptbl */ {335544910, "Cannot set the transaction in read only mode after a table reservation isc_tpb_lock_write in TPB"}, /* tpb_readtxn_after_writelock */ {335544911, "Cannot take a table reservation isc_tpb_lock_write in TPB because the transaction is in read only mode"}, /* tpb_writelock_after_readtxn */ {335544912, "value exceeds the range for a valid time"}, /* time_range_exceeded */ {335544913, "value exceeds the range for valid timestamps"}, /* datetime_range_exceeded */ {335544914, "string right truncation"}, /* string_truncation */ {335544915, "blob truncation when converting to a string: length limit exceeded"}, /* blob_truncation */ {335544916, "numeric value is out of range"}, /* numeric_out_of_range */ {335544917, "Firebird shutdown is still in progress after the specified timeout"}, /* shutdown_timeout */ {335544918, "Attachment handle is busy"}, /* att_handle_busy */ {335544919, "Bad written UDF detected: pointer returned in FREE_IT function was not allocated by ib_util_malloc"}, /* bad_udf_freeit */ {335544920, "External Data Source provider '{0}' not found"}, /* eds_provider_not_found */ {335544921, "Execute statement error at {0} :\n{1}Data source : {2}"}, /* eds_connection */ {335544922, "Execute statement preprocess SQL error"}, /* eds_preprocess */ {335544923, "Statement expected"}, /* eds_stmt_expected */ {335544924, "Parameter name expected"}, /* eds_prm_name_expected */ {335544925, "Unclosed comment found near '{0}'"}, /* eds_unclosed_comment */ {335544926, "Execute statement error at {0} :\n{1}Statement : {2}\nData source : {3}"}, /* eds_statement */ {335544927, "Input parameters mismatch"}, /* eds_input_prm_mismatch */ {335544928, "Output parameters mismatch"}, /* eds_output_prm_mismatch */ {335544929, "Input parameter '{0}' have no value set"}, /* eds_input_prm_not_set */ {335544930, "BLR stream length {0} exceeds implementation limit {1}"}, /* too_big_blr */ {335544931, "Monitoring table space exhausted"}, /* montabexh */ {335544932, "module name or entrypoint could not be found"}, /* modnotfound */ {335544933, "nothing to cancel"}, /* nothing_to_cancel */ {335544934, "ib_util library has not been loaded to deallocate memory returned by FREE_IT function"}, /* ibutil_not_loaded */ {335544935, "Cannot have circular dependencies with computed fields"}, /* circular_computed */ {335544936, "Security database error"}, /* psw_db_error */ {335544937, "Invalid data type in DATE/TIME/TIMESTAMP addition or subtraction in add_datettime()"}, /* invalid_type_datetime_op */ {335544938, "Only a TIME value can be added to a DATE value"}, /* onlycan_add_timetodate */ {335544939, "Only a DATE value can be added to a TIME value"}, /* onlycan_add_datetotime */ {335544940, "TIMESTAMP values can be subtracted only from another TIMESTAMP value"}, /* onlycansub_tstampfromtstamp */ {335544941, "Only one operand can be of type TIMESTAMP"}, /* onlyoneop_mustbe_tstamp */ {335544942, "Only HOUR, MINUTE, SECOND and MILLISECOND can be extracted from TIME values"}, /* invalid_extractpart_time */ {335544943, "HOUR, MINUTE, SECOND and MILLISECOND cannot be extracted from DATE values"}, /* invalid_extractpart_date */ {335544944, "Invalid argument for EXTRACT() not being of DATE/TIME/TIMESTAMP type"}, /* invalidarg_extract */ {335544945, "Arguments for {0} must be integral types or NUMERIC/DECIMAL without scale"}, /* sysf_argmustbe_exact */ {335544946, "First argument for {0} must be integral type or floating point type"}, /* sysf_argmustbe_exact_or_fp */ {335544947, "Human readable UUID argument for {0} must be of string type"}, /* sysf_argviolates_uuidtype */ {335544948, "Human readable UUID argument for {1} must be of exact length {0}"}, /* sysf_argviolates_uuidlen */ {335544949, "Human readable UUID argument for {2} must have \"-\" at position {1} instead of \"{0}\""}, /* sysf_argviolates_uuidfmt */ {335544950, "Human readable UUID argument for {2} must have hex digit at position {1} instead of \"{0}\""}, /* sysf_argviolates_guidigits */ {335544951, "Only HOUR, MINUTE, SECOND and MILLISECOND can be added to TIME values in {0}"}, /* sysf_invalid_addpart_time */ {335544952, "Invalid data type in addition of part to DATE/TIME/TIMESTAMP in {0}"}, /* sysf_invalid_add_datetime */ {335544953, "Invalid part {0} to be added to a DATE/TIME/TIMESTAMP value in {1}"}, /* sysf_invalid_addpart_dtime */ {335544954, "Expected DATE/TIME/TIMESTAMP type in evlDateAdd() result"}, /* sysf_invalid_add_dtime_rc */ {335544955, "Expected DATE/TIME/TIMESTAMP type as first and second argument to {0}"}, /* sysf_invalid_diff_dtime */ {335544956, "The result of TIME- in {0} cannot be expressed in YEAR, MONTH, DAY or WEEK"}, /* sysf_invalid_timediff */ {335544957, "The result of TIME-TIMESTAMP or TIMESTAMP-TIME in {0} cannot be expressed in HOUR, MINUTE, SECOND or MILLISECOND"}, /* sysf_invalid_tstamptimediff */ {335544958, "The result of DATE-TIME or TIME-DATE in {0} cannot be expressed in HOUR, MINUTE, SECOND and MILLISECOND"}, /* sysf_invalid_datetimediff */ {335544959, "Invalid part {0} to express the difference between two DATE/TIME/TIMESTAMP values in {1}"}, /* sysf_invalid_diffpart */ {335544960, "Argument for {0} must be positive"}, /* sysf_argmustbe_positive */ {335544961, "Base for {0} must be positive"}, /* sysf_basemustbe_positive */ {335544962, "Argument #{0} for {1} must be zero or positive"}, /* sysf_argnmustbe_nonneg */ {335544963, "Argument #{0} for {1} must be positive"}, /* sysf_argnmustbe_positive */ {335544964, "Base for {0} cannot be zero if exponent is negative"}, /* sysf_invalid_zeropowneg */ {335544965, "Base for {0} cannot be negative if exponent is not an integral value"}, /* sysf_invalid_negpowfp */ {335544966, "The numeric scale must be between -128 and 127 in {0}"}, /* sysf_invalid_scale */ {335544967, "Argument for {0} must be zero or positive"}, /* sysf_argmustbe_nonneg */ {335544968, "Binary UUID argument for {0} must be of string type"}, /* sysf_binuuid_mustbe_str */ {335544969, "Binary UUID argument for {1} must use {0} bytes"}, /* sysf_binuuid_wrongsize */ {335544970, "Missing required item {0} in service parameter block"}, /* missing_required_spb */ {335544971, "{0} server is shutdown"}, /* net_server_shutdown */ {335544972, "Invalid connection string"}, /* bad_conn_str */ {335544973, "Unrecognized events block"}, /* bad_epb_form */ {335544974, "Could not start first worker thread - shutdown server"}, /* no_threads */ {335544975, "Timeout occurred while waiting for a secondary connection for event processing"}, /* net_event_connect_timeout */ {335544976, "Argument for {0} must be different than zero"}, /* sysf_argmustbe_nonzero */ {335544977, "Argument for {0} must be in the range [-1, 1]"}, /* sysf_argmustbe_range_inc1_1 */ {335544978, "Argument for {0} must be greater or equal than one"}, /* sysf_argmustbe_gteq_one */ {335544979, "Argument for {0} must be in the range ]-1, 1["}, /* sysf_argmustbe_range_exc1_1 */ {335544980, "Incorrect parameters provided to internal function {0}"}, /* internal_rejected_params */ {335544981, "Floating point overflow in built-in function {0}"}, /* sysf_fp_overflow */ {335544982, "Floating point overflow in result from UDF {0}"}, /* udf_fp_overflow */ {335544983, "Invalid floating point value returned by UDF {0}"}, /* udf_fp_nan */ {335544984, "Shared memory area is probably already created by another engine instance in another Windows session"}, /* instance_conflict */ {335544985, "No free space found in temporary directories"}, /* out_of_temp_space */ {335544986, "Explicit transaction control is not allowed"}, /* eds_expl_tran_ctrl */ {335544987, "Use of TRUSTED switches in spb_command_line is prohibited"}, /* no_trusted_spb */ {335544988, "PACKAGE {0}"}, /* package_name */ {335544989, "Cannot make field {0} of table {1} NOT NULL because there are NULLs present"}, /* cannot_make_not_null */ {335544990, "Feature {0} is not supported anymore"}, /* feature_removed */ {335544991, "VIEW {0}"}, /* view_name */ {335544992, "Can not access lock files directory {0}"}, /* lock_dir_access */ {335544993, "Fetch option {0} is invalid for a non-scrollable cursor"}, /* invalid_fetch_option */ {335544994, "Error while parsing function {0}'s BLR"}, /* bad_fun_BLR */ {335544995, "Cannot execute function {0} of the unimplemented package {1}"}, /* func_pack_not_implemented */ {335544996, "Cannot execute procedure {0} of the unimplemented package {1}"}, /* proc_pack_not_implemented */ {335544997, "External function {0} not returned by the external engine plugin {1}"}, /* eem_func_not_returned */ {335544998, "External procedure {0} not returned by the external engine plugin {1}"}, /* eem_proc_not_returned */ {335544999, "External trigger {0} not returned by the external engine plugin {1}"}, /* eem_trig_not_returned */ {335545000, "Incompatible plugin version {0} for external engine {1}"}, /* eem_bad_plugin_ver */ {335545001, "External engine {0} not found"}, /* eem_engine_notfound */ {335545002, "Attachment is in use"}, /* attachment_in_use */ {335545003, "Transaction is in use"}, /* transaction_in_use */ {335545004, "Error loading plugin {0}"}, /* pman_cannot_load_plugin */ {335545005, "Loadable module {0} not found"}, /* pman_module_notfound */ {335545006, "Standard plugin entrypoint does not exist in module {0}"}, /* pman_entrypoint_notfound */ {335545007, "Module {0} exists but can not be loaded"}, /* pman_module_bad */ {335545008, "Module {0} does not contain plugin {1} type {2}"}, /* pman_plugin_notfound */ {335545009, "Invalid usage of context namespace DDL_TRIGGER"}, /* sysf_invalid_trig_namespace */ {335545010, "Value is NULL but isNull parameter was not informed"}, /* unexpected_null */ {335545011, "Type {0} is incompatible with BLOB"}, /* type_notcompat_blob */ {335545012, "Invalid date"}, /* invalid_date_val */ {335545013, "Invalid time"}, /* invalid_time_val */ {335545014, "Invalid timestamp"}, /* invalid_timestamp_val */ {335545015, "Invalid index {0} in function {1}"}, /* invalid_index_val */ {335545016, "{0}"}, /* formatted_exception */ {335545017, "Asynchronous call is already running for this attachment"}, /* async_active */ {335545018, "Function {0} is private to package {1}"}, /* private_function */ {335545019, "Procedure {0} is private to package {1}"}, /* private_procedure */ {335545020, "Request can't access new records in relation {0} and should be recompiled"}, /* request_outdated */ {335545021, "invalid events id (handle)"}, /* bad_events_handle */ {335545022, "Cannot copy statement {0}"}, /* cannot_copy_stmt */ {335545023, "Invalid usage of boolean expression"}, /* invalid_boolean_usage */ {335545024, "Arguments for {0} cannot both be zero"}, /* sysf_argscant_both_be_zero */ {335545025, "missing service ID in spb"}, /* spb_no_id */ {335545026, "External BLR message mismatch: invalid null descriptor at field {0}"}, /* ee_blr_mismatch_null */ {335545027, "External BLR message mismatch: length = {0}, expected {1}"}, /* ee_blr_mismatch_length */ {335545028, "Subscript {0} out of bounds [{1}, {2}]"}, /* ss_out_of_bounds */ {335545029, "Install incomplete. To complete security database initialization please CREATE USER. For details read doc/README.security_database.txt."}, /* missing_data_structures */ {335545030, "{0} operation is not allowed for system table {1}"}, /* protect_sys_tab */ {335545031, "Libtommath error code {0} in function {1}"}, /* libtommath_generic */ {335545032, "unsupported BLR version (expected between {0} and {1}, encountered {2})"}, /* wroblrver2 */ {335545033, "expected length {0}, actual {1}"}, /* trunc_limits */ {335545034, "Wrong info requested in isc_svc_query() for anonymous service"}, /* info_access */ {335545035, "No isc_info_svc_stdin in user request, but service thread requested stdin data"}, /* svc_no_stdin */ {335545036, "Start request for anonymous service is impossible"}, /* svc_start_failed */ {335545037, "All services except for getting server log require switches"}, /* svc_no_switches */ {335545038, "Size of stdin data is more than was requested from client"}, /* svc_bad_size */ {335545039, "Crypt plugin {0} failed to load"}, /* no_crypt_plugin */ {335545040, "Length of crypt plugin name should not exceed {0} bytes"}, /* cp_name_too_long */ {335545041, "Crypt failed - already crypting database"}, /* cp_process_active */ {335545042, "Crypt failed - database is already in requested state"}, /* cp_already_crypted */ {335545043, "Missing crypt plugin, but page appears encrypted"}, /* decrypt_error */ {335545044, "No providers loaded"}, /* no_providers */ {335545045, "NULL data with non-zero SPB length"}, /* null_spb */ {335545046, "Maximum ({0}) number of arguments exceeded for function {1}"}, /* max_args_exceeded */ {335545047, "External BLR message mismatch: names count = {0}, blr count = {1}"}, /* ee_blr_mismatch_names_count */ {335545048, "External BLR message mismatch: name {0} not found"}, /* ee_blr_mismatch_name_not_found */ {335545049, "Invalid resultset interface"}, /* bad_result_set */ {335545050, "Message length passed from user application does not match set of columns"}, /* wrong_message_length */ {335545051, "Resultset is missing output format information"}, /* no_output_format */ {335545052, "Message metadata not ready - item {0} is not finished"}, /* item_finish */ {335545053, "Missing configuration file: {0}"}, /* miss_config */ {335545054, "{0}: illegal line <{1}>"}, /* conf_line */ {335545055, "Invalid include operator in {0} for <{1}>"}, /* conf_include */ {335545056, "Include depth too big"}, /* include_depth */ {335545057, "File to include not found"}, /* include_miss */ {335545058, "Only the owner can change the ownership"}, /* protect_ownership */ {335545059, "undefined variable number"}, /* badvarnum */ {335545060, "Missing security context for {0}"}, /* sec_context */ {335545061, "Missing segment {0} in multisegment connect block parameter"}, /* multi_segment */ {335545062, "Different logins in connect and attach packets - client library error"}, /* login_changed */ {335545063, "Exceeded exchange limit during authentication handshake"}, /* auth_handshake_limit */ {335545064, "Incompatible wire encryption levels requested on client and server"}, /* wirecrypt_incompatible */ {335545065, "Client attempted to attach unencrypted but wire encryption is required"}, /* miss_wirecrypt */ {335545066, "Client attempted to start wire encryption using unknown key {0}"}, /* wirecrypt_key */ {335545067, "Client attempted to start wire encryption using unsupported plugin {0}"}, /* wirecrypt_plugin */ {335545068, "Error getting security database name from configuration file"}, /* secdb_name */ {335545069, "Client authentication plugin is missing required data from server"}, /* auth_data */ {335545070, "Client authentication plugin expected {1} bytes of {2} from server, got {0}"}, /* auth_datalength */ {335545071, "Attempt to get information about an unprepared dynamic SQL statement."}, /* info_unprepared_stmt */ {335545072, "Problematic key value is {0}"}, /* idx_key_value */ {335545073, "Cannot select virtual table {0} for update WITH LOCK"}, /* forupdate_virtualtbl */ {335545074, "Cannot select system table {0} for update WITH LOCK"}, /* forupdate_systbl */ {335545075, "Cannot select temporary table {0} for update WITH LOCK"}, /* forupdate_temptbl */ {335545076, "System {0} {1} cannot be modified"}, /* cant_modify_sysobj */ {335545077, "Server misconfigured - contact administrator please"}, /* server_misconfigured */ {335545078, "Deprecated backward compatibility ALTER ROLE ... SET/DROP AUTO ADMIN mapping may be used only for RDB$ADMIN role"}, /* alter_role */ {335545079, "Mapping {0} already exists"}, /* map_already_exists */ {335545080, "Mapping {0} does not exist"}, /* map_not_exists */ {335545081, "{0} failed when loading mapping cache"}, /* map_load */ {335545082, "Invalid name <*> in authentication block"}, /* map_aster */ {335545083, "Multiple maps found for {0}"}, /* map_multi */ {335545084, "Undefined mapping result - more than one different results found"}, /* map_undefined */ {335545085, "Incompatible mode of attachment to damaged database"}, /* baddpb_damaged_mode */ {335545086, "Attempt to set in database number of buffers which is out of acceptable range [{0}:{1}]"}, /* baddpb_buffers_range */ {335545087, "Attempt to temporarily set number of buffers less than {0}"}, /* baddpb_temp_buffers */ {335545088, "Global mapping is not available when database {0} is not present"}, /* map_nodb */ {335545089, "Global mapping is not available when table RDB$MAP is not present in database {0}"}, /* map_notable */ {335545090, "Your attachment has no trusted role"}, /* miss_trusted_role */ {335545091, "Role {0} is invalid or unavailable"}, /* set_invalid_role */ {335545092, "Cursor {0} is not positioned in a valid record"}, /* cursor_not_positioned */ {335545093, "Duplicated user attribute {0}"}, /* dup_attribute */ {335545094, "There is no privilege for this operation"}, /* dyn_no_priv */ {335545095, "Using GRANT OPTION on {0} not allowed"}, /* dsql_cant_grant_option */ {335545096, "read conflicts with concurrent update"}, /* read_conflict */ {335545097, "{0} failed when working with CREATE DATABASE grants"}, /* crdb_load */ {335545098, "CREATE DATABASE grants check is not possible when database {0} is not present"}, /* crdb_nodb */ {335545099, "CREATE DATABASE grants check is not possible when table RDB$DB_CREATORS is not present in database {0}"}, /* crdb_notable */ {335545100, "Interface {2} version too old: expected {0}, found {1}"}, /* interface_version_too_old */ {335545101, "Input parameter mismatch for function {0}"}, /* fun_param_mismatch */ {335545102, "Error during savepoint backout - transaction invalidated"}, /* savepoint_backout_err */ {335545103, "Domain used in the PRIMARY KEY constraint of table {0} must be NOT NULL"}, /* domain_primary_key_notnull */ {335545104, "CHARACTER SET {0} cannot be used as a attachment character set"}, /* invalid_attachment_charset */ {335545105, "Some database(s) were shutdown when trying to read mapping data"}, /* map_down */ {335545106, "Error occurred during login, please check server firebird.log for details"}, /* login_error */ {335545107, "Database already opened with engine instance, incompatible with current"}, /* already_opened */ {335545108, "Invalid crypt key {0}"}, /* bad_crypt_key */ {335545109, "Page requires encryption but crypt plugin is missing"}, /* encrypt_error */ {335545110, "Maximum index depth ({0} levels) is reached"}, /* max_idx_depth */ {335545111, "System privilege {0} does not exist"}, /* wrong_prvlg */ {335545112, "System privilege {0} is missing"}, /* miss_prvlg */ {335545113, "Invalid or missing checksum of encrypted database"}, /* crypt_checksum */ {335545114, "You must have SYSDBA rights at this server"}, /* not_dba */ {335545115, "Cannot open cursor for non-SELECT statement"}, /* no_cursor */ {335545116, "If specifies {0}, then shall not specify {1}"}, /* dsql_window_incompat_frames */ {335545117, "RANGE based window with {PRECEDING | FOLLOWING} cannot have ORDER BY with more than one value"}, /* dsql_window_range_multi_key */ {335545118, "RANGE based window with PRECEDING/FOLLOWING must have a single ORDER BY key of numerical, date, time or timestamp types"}, /* dsql_window_range_inv_key_type */ {335545119, "Window RANGE/ROWS PRECEDING/FOLLOWING value must be of a numerical type"}, /* dsql_window_frame_value_inv_type */ {335545120, "Invalid PRECEDING or FOLLOWING offset in window function: cannot be negative"}, /* window_frame_value_invalid */ {335545121, "Window {0} not found"}, /* dsql_window_not_found */ {335545122, "Cannot use PARTITION BY clause while overriding the window {0}"}, /* dsql_window_cant_overr_part */ {335545123, "Cannot use ORDER BY clause while overriding the window {0} which already has an ORDER BY clause"}, /* dsql_window_cant_overr_order */ {335545124, "Cannot override the window {0} because it has a frame clause. Tip: it can be used without parenthesis in OVER"}, /* dsql_window_cant_overr_frame */ {335545125, "Duplicate window definition for {0}"}, /* dsql_window_duplicate */ {335545126, "SQL statement is too long. Maximum size is {0} bytes."}, /* sql_too_long */ {335545127, "Config level timeout expired."}, /* cfg_stmt_timeout */ {335545128, "Attachment level timeout expired."}, /* att_stmt_timeout */ {335545129, "Statement level timeout expired."}, /* req_stmt_timeout */ {335545130, "Killed by database administrator."}, /* att_shut_killed */ {335545131, "Idle timeout expired."}, /* att_shut_idle */ {335545132, "Database is shutdown."}, /* att_shut_db_down */ {335545133, "Engine is shutdown."}, /* att_shut_engine */ {335545134, "OVERRIDING clause can be used only when an identity column is present in the INSERT's field list for table/view {0}"}, /* overriding_without_identity */ {335545135, "OVERRIDING SYSTEM VALUE can be used only for identity column defined as 'GENERATED ALWAYS' in INSERT for table/view {0}"}, /* overriding_system_invalid */ {335545136, "OVERRIDING USER VALUE can be used only for identity column defined as 'GENERATED BY DEFAULT' in INSERT for table/view {0}"}, /* overriding_user_invalid */ {335545137, "OVERRIDING clause should be used when an identity column defined as 'GENERATED ALWAYS' is present in the INSERT's field list for table table/view {0}"}, /* overriding_missing */ {335545138, "DecFloat precision must be 16 or 34"}, /* decprecision_err */ {335545139, "Decimal float divide by zero. The code attempted to divide a DECFLOAT value by zero."}, /* decfloat_divide_by_zero */ {335545140, "Decimal float inexact result. The result of an operation cannot be represented as a decimal fraction."}, /* decfloat_inexact_result */ {335545141, "Decimal float invalid operation. An indeterminant error occurred during an operation."}, /* decfloat_invalid_operation */ {335545142, "Decimal float overflow. The exponent of a result is greater than the magnitude allowed."}, /* decfloat_overflow */ {335545143, "Decimal float underflow. The exponent of a result is less than the magnitude allowed."}, /* decfloat_underflow */ {335545144, "Sub-function {0} has not been defined"}, /* subfunc_notdef */ {335545145, "Sub-procedure {0} has not been defined"}, /* subproc_notdef */ {335545146, "Sub-function {0} has a signature mismatch with its forward declaration"}, /* subfunc_signat */ {335545147, "Sub-procedure {0} has a signature mismatch with its forward declaration"}, /* subproc_signat */ {335545148, "Default values for parameters are not allowed in definition of the previously declared sub-function {0}"}, /* subfunc_defvaldecl */ {335545149, "Default values for parameters are not allowed in definition of the previously declared sub-procedure {0}"}, /* subproc_defvaldecl */ {335545150, "Sub-function {0} was declared but not implemented"}, /* subfunc_not_impl */ {335545151, "Sub-procedure {0} was declared but not implemented"}, /* subproc_not_impl */ {335545152, "Invalid HASH algorithm {0}"}, /* sysf_invalid_hash_algorithm */ {335545153, "Expression evaluation error for index \"{0}\" on table \"{1}\""}, /* expression_eval_index */ {335545154, "Invalid decfloat trap state {0}"}, /* invalid_decfloat_trap */ {335545155, "Invalid decfloat rounding mode {0}"}, /* invalid_decfloat_round */ {335545156, "Invalid part {0} to calculate the {0} of a DATE/TIMESTAMP"}, /* sysf_invalid_first_last_part */ {335545157, "Expected DATE/TIMESTAMP value in {0}"}, /* sysf_invalid_date_timestamp */ {335545158, "Precision must be from {0} to {1}"}, /* precision_err2 */ {335545159, "invalid batch handle"}, /* bad_batch_handle */ {335545160, "Bad international character in tag {0}"}, /* intl_char */ {335545161, "Null data in parameters block with non-zero length"}, /* null_block */ {335545162, "Items working with running service and getting generic server information should not be mixed in single info block"}, /* mixed_info */ {335545163, "Unknown information item, code {0}"}, /* unknown_info */ {335545164, "Wrong version of blob parameters block {0}, should be {1}"}, /* bpb_version */ {335545165, "User management plugin is missing or failed to load"}, /* user_manager */ {335545166, "Missing entrypoint {0} in ICU library"}, /* icu_entrypoint */ {335545167, "Could not find acceptable ICU library"}, /* icu_library */ {335545168, "Name {0} not found in system MetadataBuilder"}, /* metadata_name */ {335545169, "Parse to tokens error"}, /* tokens_parse */ {335545170, "Error opening international conversion descriptor from {0} to {1}"}, /* iconv_open */ {335545171, "Message {0} is out of range, only {1} messages in batch"}, /* batch_compl_range */ {335545172, "Detailed error info for message {0} is missing in batch"}, /* batch_compl_detail */ {335545173, "Compression stream init error {0}"}, /* deflate_init */ {335545174, "Decompression stream init error {0}"}, /* inflate_init */ {335545175, "Segment size ({0}) should not exceed 65535 (64K - 1) when using segmented blob"}, /* big_segment */ {335545176, "Invalid blob policy in the batch for {0}() call"}, /* batch_policy */ {335545177, "Can't change default BPB after adding any data to batch"}, /* batch_defbpb */ {335545178, "Unexpected info buffer structure querying for server batch parameters"}, /* batch_align */ {335545179, "Duplicated segment {0} in multisegment connect block parameter"}, /* multi_segment_dup */ {335545180, "Plugin not supported by network protocol"}, /* non_plugin_protocol */ {335545181, "Error parsing message format"}, /* message_format */ {335545182, "Wrong version of batch parameters block {0}, should be {1}"}, /* batch_param_version */ {335545183, "Message size ({0}) in batch exceeds internal buffer size ({1})"}, /* batch_msg_long */ {335545184, "Batch already opened for this statement"}, /* batch_open */ {335545185, "Invalid type of statement used in batch"}, /* batch_type */ {335545186, "Statement used in batch must have parameters"}, /* batch_param */ {335545187, "There are no blobs in associated with batch statement"}, /* batch_blobs */ {335545188, "appendBlobData() is used to append data to last blob but no such blob was added to the batch"}, /* batch_blob_append */ {335545189, "Portions of data, passed as blob stream, should have size multiple to the alignment required for blobs"}, /* batch_stream_align */ {335545190, "Repeated blob id {0} in registerBlob()"}, /* batch_rpt_blob */ {335545191, "Blob buffer format error"}, /* batch_blob_buf */ {335545192, "Unusable (too small) data remained in {0} buffer"}, /* batch_small_data */ {335545193, "Blob continuation should not contain BPB"}, /* batch_cont_bpb */ {335545194, "Size of BPB ({0}) greater than remaining data ({1})"}, /* batch_big_bpb */ {335545195, "Size of segment ({0}) greater than current BLOB data ({1})"}, /* batch_big_segment */ {335545196, "Size of segment ({0}) greater than available data ({1})"}, /* batch_big_seg2 */ {335545197, "Unknown blob ID {0} in the batch message"}, /* batch_blob_id */ {335545198, "Internal buffer overflow - batch too big"}, /* batch_too_big */ {335545199, "Numeric literal too long"}, /* num_literal */ {335545200, "Error using events in mapping shared memory: {0}"}, /* map_event */ {335545201, "Global mapping memory overflow"}, /* map_overflow */ {335545202, "Header page overflow - too many clumplets on it"}, /* hdr_overflow */ {335545203, "No matching client/server authentication plugins configured for execute statement in embedded datasource"}, /* vld_plugins */ {335545204, "Missing database encryption key for your attachment"}, /* db_crypt_key */ {335545205, "Key holder plugin {0} failed to load"}, /* no_keyholder_plugin */ {335545206, "Cannot reset user session"}, /* ses_reset_err */ {335545207, "There are open transactions ({0} active)"}, /* ses_reset_open_trans */ {335545208, "Session was reset with warning(s)"}, /* ses_reset_warn */ {335545209, "Transaction is rolled back due to session reset, all changes are lost"}, /* ses_reset_tran_rollback */ {335545210, "Plugin {0}:"}, /* plugin_name */ {335545211, "PARAMETER {0}"}, /* parameter_name */ {335545212, "Starting page number for file {0} must be {1} or greater"}, /* file_starting_page_err */ {335545213, "Invalid time zone offset: {0} - must use format +/-hours:minutes and be between -14:00 and +14:00"}, /* invalid_timezone_offset */ {335545214, "Invalid time zone region: {0}"}, /* invalid_timezone_region */ {335545215, "Invalid time zone ID: {0}"}, /* invalid_timezone_id */ {335545216, "Wrong base64 text length {0}, should be multiple of 4"}, /* tom_decode64len */ {335545217, "Invalid first parameter datatype - need string or blob"}, /* tom_strblob */ {335545218, "Error registering {0} - probably bad tomcrypt library"}, /* tom_reg */ {335545219, "Unknown crypt algorithm {0} in USING clause"}, /* tom_algorithm */ {335545220, "Should specify mode parameter for symmetric cipher"}, /* tom_mode_miss */ {335545221, "Unknown symmetric crypt mode specified"}, /* tom_mode_bad */ {335545222, "Mode parameter makes no sense for chosen cipher"}, /* tom_no_mode */ {335545223, "Should specify initialization vector (IV) for chosen cipher and/or mode"}, /* tom_iv_miss */ {335545224, "Initialization vector (IV) makes no sense for chosen cipher and/or mode"}, /* tom_no_iv */ {335545225, "Invalid counter endianess {0}"}, /* tom_ctrtype_bad */ {335545226, "Counter endianess parameter is not used in mode {0}"}, /* tom_no_ctrtype */ {335545227, "Too big counter value {0}, maximum {1} can be used"}, /* tom_ctr_big */ {335545228, "Counter length/value parameter is not used with {0} {1}"}, /* tom_no_ctr */ {335545229, "Invalid initialization vector (IV) length {0}, need {1}"}, /* tom_iv_length */ {335545230, "TomCrypt library error: {0}"}, /* tom_error */ {335545231, "Starting PRNG yarrow"}, /* tom_yarrow_start */ {335545232, "Setting up PRNG yarrow"}, /* tom_yarrow_setup */ {335545233, "Initializing {0} mode"}, /* tom_init_mode */ {335545234, "Encrypting in {0} mode"}, /* tom_crypt_mode */ {335545235, "Decrypting in {0} mode"}, /* tom_decrypt_mode */ {335545236, "Initializing cipher {0}"}, /* tom_init_cip */ {335545237, "Encrypting using cipher {0}"}, /* tom_crypt_cip */ {335545238, "Decrypting using cipher {0}"}, /* tom_decrypt_cip */ {335545239, "Setting initialization vector (IV) for {0}"}, /* tom_setup_cip */ {335545240, "Invalid initialization vector (IV) length {0}, need 8 or 12"}, /* tom_setup_chacha */ {335545241, "Encoding {0}"}, /* tom_encode */ {335545242, "Decoding {0}"}, /* tom_decode */ {335545243, "Importing RSA key"}, /* tom_rsa_import */ {335545244, "Invalid OAEP packet"}, /* tom_oaep */ {335545245, "Unknown hash algorithm {0}"}, /* tom_hash_bad */ {335545246, "Making RSA key"}, /* tom_rsa_make */ {335545247, "Exporting {0} RSA key"}, /* tom_rsa_export */ {335545248, "RSA-signing data"}, /* tom_rsa_sign */ {335545249, "Verifying RSA-signed data"}, /* tom_rsa_verify */ {335545250, "Invalid key length {0}, need 16 or 32"}, /* tom_chacha_key */ {335545251, "invalid replicator handle"}, /* bad_repl_handle */ {335545252, "Transaction's base snapshot number does not exist"}, /* tra_snapshot_does_not_exist */ {335545253, "Input parameter '{0}' is not used in SQL query text"}, /* eds_input_prm_not_used */ {335545254, "Effective user is {0}"}, /* effective_user */ {335545255, "Invalid time zone bind mode {0}"}, /* invalid_time_zone_bind */ {335545256, "Invalid decfloat bind mode {0}"}, /* invalid_decfloat_bind */ {335545257, "Invalid hex text length {0}, should be multiple of 2"}, /* odd_hex_len */ {335545258, "Invalid hex digit {0} at position {1}"}, /* invalid_hex_digit */ {335545259, "Error processing isc_dpb_set_bind clumplet \"{0}\""}, /* bind_err */ {335545260, "The following statement failed: {0}"}, /* bind_statement */ {335545261, "Can not convert {0} to {1}"}, /* bind_convert */ {335545262, "cannot update old BLOB"}, /* cannot_update_old_blob */ {335545263, "cannot read from new BLOB"}, /* cannot_read_new_blob */ {335545264, "No permission for CREATE {0} operation"}, /* dyn_no_create_priv */ {335545265, "SUSPEND could not be used without RETURNS clause in PROCEDURE or EXECUTE BLOCK"}, /* suspend_without_returns */ {335545266, "String truncated warning due to the following reason"}, /* truncate_warn */ {335545267, "Monitoring data does not fit into the field"}, /* truncate_monitor */ {335545268, "Engine data does not fit into return value of system function"}, /* truncate_context */ {335545269, "Multiple source records cannot match the same target during MERGE"}, /* merge_dup_update */ {335545270, "RDB$PAGES written by non-system transaction, DB appears to be damaged"}, /* wrong_page */ {335545271, "Replication error"}, /* repl_error */ {335545272, "Reset of user session failed. Connection is shut down."}, /* ses_reset_failed */ {335545273, "File size is less than expected"}, /* block_size */ {335545274, "Invalid key length {0}, need >{1}"}, /* tom_key_length */ {335545275, "Invalid information arguments"}, /* inf_invalid_args */ {335545276, "Empty or NULL parameter {0} is not accepted"}, /* sysf_invalid_null_empty */ {335545277, "Undefined local table number {0}"}, /* bad_loctab_num */ {335545278, "Invalid text <{0}> after quoted string"}, /* quoted_str_bad */ {335545279, "Missing terminating quote <{0}> in the end of quoted string"}, /* quoted_str_miss */ {335545280, "{0}: inconsistent shared memory type/version; found {1}, expected {2}"}, /* wrong_shmem_ver */ {335545281, "{0}-bit engine can't open database already opened by {1}-bit engine"}, /* wrong_shmem_bitness */ {335545282, "Procedures cannot specify access type other than NATURAL in the plan"}, /* wrong_proc_plan */ {335545283, "Invalid RDB$BLOB_UTIL handle"}, /* invalid_blob_util_handle */ {335545284, "Invalid temporary BLOB ID"}, /* bad_temp_blob_id */ {335545285, "ODS upgrade failed while adding new system {0}"}, /* ods_upgrade_err */ {335545286, "Wrong parallel workers value {0}, valid range are from 1 to {1}"}, /* bad_par_workers */ {335545287, "Definition of index expression is not found for index {0}"}, /* idx_expr_not_found */ {335545288, "Definition of index condition is not found for index {0}"}, /* idx_cond_not_found */ {335740929, "data base file name ({0}) already given"}, /* gfix_db_name */ {335740930, "invalid switch {0}"}, /* gfix_invalid_sw */ {335740931, "gfix version {0}"}, /* gfix_version */ {335740932, "incompatible switch combination"}, /* gfix_incmp_sw */ {335740933, "replay log pathname required"}, /* gfix_replay_req */ {335740934, "number of page buffers for cache required"}, /* gfix_pgbuf_req */ {335740935, "numeric value required"}, /* gfix_val_req */ {335740936, "positive numeric value required"}, /* gfix_pval_req */ {335740937, "number of transactions per sweep required"}, /* gfix_trn_req */ {335740938, "transaction number or \"all\" required"}, /* gfix_trn_all_req */ {335740939, "\"sync\" or \"async\" required"}, /* gfix_sync_req */ {335740940, "\"full\" or \"reserve\" required"}, /* gfix_full_req */ {335740941, "user name required"}, /* gfix_usrname_req */ {335740942, "password required"}, /* gfix_pass_req */ {335740943, "subsystem name"}, /* gfix_subs_name */ {335740944, "\"wal\" required"}, /* gfix_wal_req */ {335740945, "number of seconds required"}, /* gfix_sec_req */ {335740946, "numeric value between 0 and 32767 inclusive required"}, /* gfix_nval_req */ {335740947, "must specify type of shutdown"}, /* gfix_type_shut */ {335740948, "please retry, specifying an option"}, /* gfix_retry */ {335740949, "plausible options are:"}, /* gfix_opt */ {335740950, "\\n Options can be abbreviated to the unparenthesized characters"}, /* gfix_qualifiers */ {335740951, "please retry, giving a database name"}, /* gfix_retry_db */ {335740952, "Summary of validation errors"}, /* gfix_summary */ {335740953, " -ac(tivate_shadow) activate shadow file for database usage"}, /* gfix_opt_active */ {335740954, " -at(tach) shutdown new database attachments"}, /* gfix_opt_attach */ {335740955, "\t-begin_log\tbegin logging for replay utility"}, /* gfix_opt_begin_log */ {335740956, " -b(uffers) set page buffers "}, /* gfix_opt_buffers */ {335740957, " -co(mmit) commit transaction "}, /* gfix_opt_commit */ {335740958, " -ca(che) shutdown cache manager"}, /* gfix_opt_cache */ {335740959, "\t-disable\tdisable WAL"}, /* gfix_opt_disable */ {335740960, " -fu(ll) validate record fragments (-v)"}, /* gfix_opt_full */ {335740961, " -fo(rce_shutdown) force database shutdown"}, /* gfix_opt_force */ {335740962, " -h(ousekeeping) set sweep interval "}, /* gfix_opt_housekeep */ {335740963, " -i(gnore) ignore checksum errors"}, /* gfix_opt_ignore */ {335740964, " -k(ill_shadow) kill all unavailable shadow files"}, /* gfix_opt_kill */ {335740965, " -l(ist) show limbo transactions"}, /* gfix_opt_list */ {335740966, " -me(nd) prepare corrupt database for backup"}, /* gfix_opt_mend */ {335740967, " -n(o_update) read-only validation (-v)"}, /* gfix_opt_no_update */ {335740968, " -o(nline) database online "}, /* gfix_opt_online */ {335740969, " -pr(ompt) prompt for commit/rollback (-l)"}, /* gfix_opt_prompt */ {335740970, " -pa(ssword) default password"}, /* gfix_opt_password */ {335740971, "\t-quit_log\tquit logging for replay utility"}, /* gfix_opt_quit_log */ {335740972, " -r(ollback) rollback transaction "}, /* gfix_opt_rollback */ {335740973, " -sw(eep) force garbage collection"}, /* gfix_opt_sweep */ {335740974, " -sh(utdown) shutdown "}, /* gfix_opt_shut */ {335740975, " -tw(o_phase) perform automated two-phase recovery"}, /* gfix_opt_two_phase */ {335740976, " -tra(nsaction) shutdown transaction startup"}, /* gfix_opt_tran */ {335740977, " -u(se) use full or reserve space for versions"}, /* gfix_opt_use */ {335740978, " -user default user name"}, /* gfix_opt_user */ {335740979, " -v(alidate) validate database structure"}, /* gfix_opt_validate */ {335740980, " -w(rite) write synchronously or asynchronously"}, /* gfix_opt_write */ {335740981, " -x set debug on"}, /* gfix_opt_x */ {335740982, " -z print software version number"}, /* gfix_opt_z */ {335740983, "\\n\tNumber of record level errors\t: {0}"}, /* gfix_rec_err */ {335740984, "\tNumber of Blob page errors\t: {0}"}, /* gfix_blob_err */ {335740985, "\tNumber of data page errors\t: {0}"}, /* gfix_data_err */ {335740986, "\tNumber of index page errors\t: {0}"}, /* gfix_index_err */ {335740987, "\tNumber of pointer page errors\t: {0}"}, /* gfix_pointer_err */ {335740988, "\tNumber of transaction page errors\t: {0}"}, /* gfix_trn_err */ {335740989, "\tNumber of database page errors\t: {0}"}, /* gfix_db_err */ {335740990, "bad block type"}, /* gfix_bad_block */ {335740991, "internal block exceeds maximum size"}, /* gfix_exceed_max */ {335740992, "corrupt pool"}, /* gfix_corrupt_pool */ {335740993, "virtual memory exhausted"}, /* gfix_mem_exhausted */ {335740994, "bad pool id"}, /* gfix_bad_pool */ {335740995, "Transaction state {0} not in valid range."}, /* gfix_trn_not_valid */ {335740996, "ATTACH_DATABASE: attempted attach of {0},"}, /* gfix_dbg_attach */ {335740997, " failed"}, /* gfix_dbg_failed */ {335740998, " succeeded"}, /* gfix_dbg_success */ {335740999, "Transaction {0} is in limbo."}, /* gfix_trn_limbo */ {335741000, "More limbo transactions than fit. Try again"}, /* gfix_try_again */ {335741001, "Unrecognized info item {0}"}, /* gfix_unrec_item */ {335741002, "A commit of transaction {0} will violate two-phase commit."}, /* gfix_commit_violate */ {335741003, "A rollback of transaction {0} is needed to preserve two-phase commit."}, /* gfix_preserve */ {335741004, "Transaction {0} has already been partially committed."}, /* gfix_part_commit */ {335741005, "A rollback of this transaction will violate two-phase commit."}, /* gfix_rback_violate */ {335741006, "Transaction {0} has been partially committed."}, /* gfix_part_commit2 */ {335741007, "A commit is necessary to preserve the two-phase commit."}, /* gfix_commit_pres */ {335741008, "Insufficient information is available to determine"}, /* gfix_insuff_info */ {335741009, "a proper action for transaction {0}."}, /* gfix_action */ {335741010, "Transaction {0}: All subtransactions have been prepared."}, /* gfix_all_prep */ {335741011, "Either commit or rollback is possible."}, /* gfix_comm_rback */ {335741012, "unexpected end of input"}, /* gfix_unexp_eoi */ {335741013, "Commit, rollback, or neither (c, r, or n)?"}, /* gfix_ask */ {335741014, "Could not reattach to database for transaction {0}."}, /* gfix_reattach_failed */ {335741015, "Original path: {0}"}, /* gfix_org_path */ {335741016, "Enter a valid path:"}, /* gfix_enter_path */ {335741017, "Attach unsuccessful."}, /* gfix_att_unsucc */ {335741018, "failed to reconnect to a transaction in database {0}"}, /* gfix_recon_fail */ {335741019, "Transaction {0}:"}, /* gfix_trn2 */ {335741020, " Multidatabase transaction:"}, /* gfix_mdb_trn */ {335741021, " Host Site: {0}"}, /* gfix_host_site */ {335741022, " Transaction {0}"}, /* gfix_trn */ {335741023, "has been prepared."}, /* gfix_prepared */ {335741024, "has been committed."}, /* gfix_committed */ {335741025, "has been rolled back."}, /* gfix_rolled_back */ {335741026, "is not available."}, /* gfix_not_available */ {335741027, "is not found, assumed not prepared."}, /* gfix_not_prepared */ {335741028, "is not found, assumed to be committed."}, /* gfix_be_committed */ {335741029, " Remote Site: {0}"}, /* gfix_rmt_site */ {335741030, " Database Path: {0}"}, /* gfix_db_path */ {335741031, " Automated recovery would commit this transaction."}, /* gfix_auto_comm */ {335741032, " Automated recovery would rollback this transaction."}, /* gfix_auto_rback */ {335741033, "Warning: Multidatabase transaction is in inconsistent state for recovery."}, /* gfix_warning */ {335741034, "Transaction {0} was committed, but prior ones were rolled back."}, /* gfix_trn_was_comm */ {335741035, "Transaction {0} was rolled back, but prior ones were committed."}, /* gfix_trn_was_rback */ {335741036, "Transaction description item unknown"}, /* gfix_trn_unknown */ {335741037, " -mo(de) read_only or read_write database"}, /* gfix_opt_mode */ {335741038, "\"read_only\" or \"read_write\" required"}, /* gfix_mode_req */ {335741039, " -sq(l_dialect) set database dialect n"}, /* gfix_opt_SQL_dialect */ {335741040, "database SQL dialect must be one of '{0}'"}, /* gfix_SQL_dialect */ {335741041, "dialect number required"}, /* gfix_dialect_req */ {335741042, "positive or zero numeric value required"}, /* gfix_pzval_req */ {335741043, " -tru(sted) use trusted authentication"}, /* gfix_opt_trusted */ {335741044, "could not open password file {0}, errno {1}"}, {335741045, "could not read password file {0}, errno {1}"}, {335741046, "empty password file {0}"}, {335741047, " -fe(tch_password) fetch password from file"}, {335741048, "usage: gfix [options] "}, {335741049, " -nol(inger) close database ignoring linger setting for it"}, /* gfix_opt_nolinger */ {335741050, "\tNumber of inventory page errors\t: {0}"}, /* gfix_pip_err */ {335741051, "\tNumber of record level warnings\t: {0}"}, /* gfix_rec_warn */ {335741052, "\tNumber of blob page warnings\t: {0}"}, /* gfix_blob_warn */ {335741053, "\tNumber of data page warnings\t: {0}"}, /* gfix_data_warn */ {335741054, "\tNumber of index page warnings\t: {0}"}, /* gfix_index_warn */ {335741055, "\tNumber of pointer page warnings\t: {0}"}, /* gfix_pointer_warn */ {335741056, "\tNumber of transaction page warnings\t: {0}"}, /* gfix_trn_warn */ {335741057, "\tNumber of database page warnings\t: {0}"}, /* gfix_db_warn */ {335741058, "\tNumber of inventory page warnings\t: {0}"}, /* gfix_pip_warn */ {335741059, " -icu fix database to be usable with present ICU version"}, /* gfix_opt_icu */ {335741060, " -role set SQL role name"}, /* gfix_opt_role */ {335741061, "SQL role name required"}, /* gfix_role_req */ {335741062, " -repl(ica) replica mode "}, /* gfix_opt_repl */ {335741063, "replica mode (none / read_only / read_write) required"}, /* gfix_repl_mode_req */ {335741064, " -par(allel) parallel workers (-sweep, -icu)"}, /* gfix_opt_parallel */ {335741065, " -up(grade) upgrade database ODS"}, /* gfix_opt_upgrade */ {336003074, "Cannot SELECT RDB$DB_KEY from a stored procedure."}, /* dsql_dbkey_from_non_table */ {336003075, "Precision 10 to 18 changed from DOUBLE PRECISION in SQL dialect 1 to 64-bit scaled integer in SQL dialect 3"}, /* dsql_transitional_numeric */ {336003076, "Use of {0} expression that returns different results in dialect 1 and dialect 3"}, /* dsql_dialect_warning_expr */ {336003077, "Database SQL dialect {0} does not support reference to {1} datatype"}, /* sql_db_dialect_dtype_unsupport */ {336003078, ""}, {336003079, "DB dialect {0} and client dialect {1} conflict with respect to numeric precision {2}."}, /* sql_dialect_conflict_num */ {336003080, "WARNING: Numeric literal {0} is interpreted as a floating-point"}, /* dsql_warning_number_ambiguous */ {336003081, "value in SQL dialect 1, but as an exact numeric value in SQL dialect 3."}, /* dsql_warning_number_ambiguous1 */ {336003082, "WARNING: NUMERIC and DECIMAL fields with precision 10 or greater are stored"}, /* dsql_warn_precision_ambiguous */ {336003083, "as approximate floating-point values in SQL dialect 1, but as 64-bit"}, /* dsql_warn_precision_ambiguous1 */ {336003084, "integers in SQL dialect 3."}, /* dsql_warn_precision_ambiguous2 */ {336003085, "Ambiguous field name between {0} and {1}"}, /* dsql_ambiguous_field_name */ {336003086, "External function should have return position between 1 and {0}"}, /* dsql_udf_return_pos_err */ {336003087, "Label {0} {1} in the current scope"}, /* dsql_invalid_label */ {336003088, "Datatypes {0}are not comparable in expression {1}"}, /* dsql_datatypes_not_comparable */ {336003089, "Empty cursor name is not allowed"}, /* dsql_cursor_invalid */ {336003090, "Statement already has a cursor {0} assigned"}, /* dsql_cursor_redefined */ {336003091, "Cursor {0} is not found in the current context"}, /* dsql_cursor_not_found */ {336003092, "Cursor {0} already exists in the current context"}, /* dsql_cursor_exists */ {336003093, "Relation {0} is ambiguous in cursor {1}"}, /* dsql_cursor_rel_ambiguous */ {336003094, "Relation {0} is not found in cursor {1}"}, /* dsql_cursor_rel_not_found */ {336003095, "Cursor is not open"}, /* dsql_cursor_not_open */ {336003096, "Data type {0} is not supported for EXTERNAL TABLES. Relation '{1}', field '{2}'"}, /* dsql_type_not_supp_ext_tab */ {336003097, "Feature not supported on ODS version older than {0}.{1}"}, /* dsql_feature_not_supported_ods */ {336003098, "Primary key required on table {0}"}, /* primary_key_required */ {336003099, "UPDATE OR INSERT field list does not match primary key of table {0}"}, /* upd_ins_doesnt_match_pk */ {336003100, "UPDATE OR INSERT field list does not match MATCHING clause"}, /* upd_ins_doesnt_match_matching */ {336003101, "UPDATE OR INSERT without MATCHING could not be used with views based on more than one table"}, /* upd_ins_with_complex_view */ {336003102, "Incompatible trigger type"}, /* dsql_incompatible_trigger_type */ {336003103, "Database trigger type can't be changed"}, /* dsql_db_trigger_type_cant_change */ {336003104, "To be used with RDB$RECORD_VERSION, {0} must be a table or a view of single table"}, /* dsql_record_version_table */ {336003105, "SQLDA version expected between {0} and {1}, found {2}"}, /* dsql_invalid_sqlda_version */ {336003106, "at SQLVAR index {0}"}, /* dsql_sqlvar_index */ {336003107, "empty pointer to NULL indicator variable"}, /* dsql_no_sqlind */ {336003108, "empty pointer to data"}, /* dsql_no_sqldata */ {336003109, "No SQLDA for input values provided"}, /* dsql_no_input_sqlda */ {336003110, "No SQLDA for output values provided"}, /* dsql_no_output_sqlda */ {336003111, "Wrong number of parameters (expected {0}, got {1})"}, /* dsql_wrong_param_num */ {336003112, "Invalid DROP SQL SECURITY clause"}, /* dsql_invalid_drop_ss_clause */ {336003113, "UPDATE OR INSERT value for field {0}, part of the implicit or explicit MATCHING clause, cannot be DEFAULT"}, /* upd_ins_cannot_default */ {336068609, "ODS version not supported by DYN"}, {336068610, "unsupported DYN verb"}, {336068611, "STORE RDB$FIELD_DIMENSIONS failed"}, {336068612, "unsupported DYN verb"}, {336068613, "{0}"}, {336068614, "unsupported DYN verb"}, {336068615, "DEFINE BLOB FILTER failed"}, {336068616, "DEFINE GENERATOR failed"}, {336068617, "DEFINE GENERATOR unexpected DYN verb"}, {336068618, "DEFINE FUNCTION failed"}, {336068619, "unsupported DYN verb"}, {336068620, "DEFINE FUNCTION ARGUMENT failed"}, {336068621, "STORE RDB$FIELDS failed"}, {336068622, "No table specified for index"}, {336068623, "STORE RDB$INDEX_SEGMENTS failed"}, {336068624, "unsupported DYN verb"}, {336068625, "PRIMARY KEY column lookup failed"}, {336068626, "could not find UNIQUE or PRIMARY KEY constraint in table {0} with specified columns"}, {336068627, "PRIMARY KEY lookup failed"}, {336068628, "could not find PRIMARY KEY index in specified table {0}"}, {336068629, "STORE RDB$INDICES failed"}, {336068630, "STORE RDB$FIELDS failed"}, {336068631, "STORE RDB$RELATION_FIELDS failed"}, {336068632, "STORE RDB$RELATIONS failed"}, {336068633, "STORE RDB$USER_PRIVILEGES failed defining a table"}, {336068634, "unsupported DYN verb"}, {336068635, "STORE RDB$RELATIONS failed"}, {336068636, "STORE RDB$FIELDS failed"}, {336068637, "STORE RDB$RELATION_FIELDS failed"}, {336068638, "unsupported DYN verb"}, {336068639, "DEFINE TRIGGER failed"}, {336068640, "unsupported DYN verb"}, {336068641, "DEFINE TRIGGER MESSAGE failed"}, {336068642, "STORE RDB$VIEW_RELATIONS failed"}, {336068643, "ERASE RDB$FIELDS failed"}, {336068644, "ERASE BLOB FILTER failed"}, {336068645, "BLOB Filter {0} not found"}, /* dyn_filter_not_found */ {336068646, "unsupported DYN verb"}, {336068647, "ERASE RDB$FUNCTION_ARGUMENTS failed"}, {336068648, "ERASE RDB$FUNCTIONS failed"}, {336068649, "Function {0} not found"}, /* dyn_func_not_found */ {336068650, "unsupported DYN verb"}, {336068651, "Domain {0} is used in table {1} (local name {2}) and cannot be dropped"}, {336068652, "ERASE RDB$FIELDS failed"}, {336068653, "ERASE RDB$FIELDS failed"}, {336068654, "Column not found"}, {336068655, "ERASE RDB$INDICES failed"}, {336068656, "Index not found"}, /* dyn_index_not_found */ {336068657, "ERASE RDB$INDEX_SEGMENTS failed"}, {336068658, "No segments found for index"}, {336068659, "No table specified in ERASE RFR"}, {336068660, "Column {0} from table {1} is referenced in view {2}"}, {336068661, "ERASE RDB$RELATION_FIELDS failed"}, {336068662, "View {0} not found"}, /* dyn_view_not_found */ {336068663, "Column not found for table"}, {336068664, "ERASE RDB$INDEX_SEGMENTS failed"}, {336068665, "ERASE RDB$INDICES failed"}, {336068666, "ERASE RDB$RELATION_FIELDS failed"}, {336068667, "ERASE RDB$VIEW_RELATIONS failed"}, {336068668, "ERASE RDB$RELATIONS failed"}, {336068669, "Table not found"}, {336068670, "ERASE RDB$USER_PRIVILEGES failed"}, {336068671, "ERASE RDB$FILES failed"}, {336068672, "unsupported DYN verb"}, {336068673, "ERASE RDB$TRIGGER_MESSAGES failed"}, {336068674, "ERASE RDB$TRIGGERS failed"}, {336068675, "Trigger not found"}, {336068676, "MODIFY RDB$VIEW_RELATIONS failed"}, {336068677, "unsupported DYN verb"}, {336068678, "TRIGGER NAME expected"}, {336068679, "ERASE TRIGGER MESSAGE failed"}, {336068680, "Trigger Message not found"}, {336068681, "unsupported DYN verb"}, {336068682, "ERASE RDB$SECURITY_CLASSES failed"}, {336068683, "Security class not found"}, {336068684, "unsupported DYN verb"}, {336068685, "SELECT RDB$USER_PRIVILEGES failed in grant"}, {336068686, "SELECT RDB$USER_PRIVILEGES failed in grant"}, {336068687, "STORE RDB$USER_PRIVILEGES failed in grant"}, {336068688, "Specified domain or source column does not exist"}, {336068689, "Generation of column name failed"}, {336068690, "Generation of index name failed"}, {336068691, "Generation of trigger name failed"}, {336068692, "MODIFY DATABASE failed"}, {336068693, "MODIFY RDB$CHARACTER_SETS failed"}, {336068694, "MODIFY RDB$COLLATIONS failed"}, {336068695, "MODIFY RDB$FIELDS failed"}, {336068696, "MODIFY RDB$BLOB_FILTERS failed"}, {336068697, "Domain not found"}, /* dyn_domain_not_found */ {336068698, "unsupported DYN verb"}, {336068699, "MODIFY RDB$INDICES failed"}, {336068700, "MODIFY RDB$FUNCTIONS failed"}, {336068701, "Index column not found"}, {336068702, "MODIFY RDB$GENERATORS failed"}, {336068703, "MODIFY RDB$RELATION_FIELDS failed"}, {336068704, "Local column {0} not found"}, {336068705, "add EXTERNAL FILE not allowed"}, {336068706, "drop EXTERNAL FILE not allowed"}, {336068707, "MODIFY RDB$RELATIONS failed"}, {336068708, "MODIFY RDB$PROCEDURE_PARAMETERS failed"}, {336068709, "Table column not found"}, {336068710, "MODIFY TRIGGER failed"}, {336068711, "TRIGGER NAME expected"}, {336068712, "unsupported DYN verb"}, {336068713, "MODIFY TRIGGER MESSAGE failed"}, {336068714, "Create metadata BLOB failed"}, {336068715, "Write metadata BLOB failed"}, {336068716, "Close metadata BLOB failed"}, {336068717, "Triggers created automatically cannot be modified"}, /* dyn_cant_modify_auto_trig */ {336068718, "unsupported DYN verb"}, {336068719, "ERASE RDB$USER_PRIVILEGES failed in revoke(1)"}, {336068720, "Access to RDB$USER_PRIVILEGES failed in revoke(2)"}, {336068721, "ERASE RDB$USER_PRIVILEGES failed in revoke (3)"}, {336068722, "Access to RDB$USER_PRIVILEGES failed in revoke (4)"}, {336068723, "CREATE VIEW failed"}, {336068724, " attempt to index BLOB column in INDEX {0}"}, {336068725, " attempt to index array column in index {0}"}, {336068726, "key size too big for index {0}"}, {336068727, "no keys for index {0}"}, {336068728, "Unknown columns in index {0}"}, {336068729, "STORE RDB$RELATION_CONSTRAINTS failed"}, {336068730, "STORE RDB$CHECK_CONSTRAINTS failed"}, {336068731, "Column: {0} not defined as NOT NULL - cannot be used in PRIMARY KEY constraint definition"}, {336068732, "A column name is repeated in the definition of constraint: {0}"}, {336068733, "Integrity Constraint lookup failed"}, {336068734, "Same set of columns cannot be used in more than one PRIMARY KEY and/or UNIQUE constraint definition"}, {336068735, "STORE RDB$REF_CONSTRAINTS failed"}, {336068736, "No table specified in delete_constraint"}, {336068737, "ERASE RDB$RELATION_CONSTRAINTS failed"}, {336068738, "CONSTRAINT {0} does not exist."}, {336068739, "Generation of constraint name failed"}, {336068740, "Table {0} already exists"}, /* dyn_dup_table */ {336068741, "Number of referencing columns do not equal number of referenced columns"}, {336068742, "STORE RDB$PROCEDURES failed"}, {336068743, "Procedure {0} already exists"}, /* dyn_dup_procedure */ {336068744, "STORE RDB$PROCEDURE_PARAMETERS failed"}, {336068745, "Store into system table {0} failed"}, {336068746, "ERASE RDB$PROCEDURE_PARAMETERS failed"}, {336068747, "ERASE RDB$PROCEDURES failed"}, {336068748, "Procedure {0} not found"}, /* dyn_proc_not_found */ {336068749, "MODIFY RDB$PROCEDURES failed"}, {336068750, "DEFINE EXCEPTION failed"}, {336068751, "ERASE EXCEPTION failed"}, {336068752, "Exception not found"}, /* dyn_exception_not_found */ {336068753, "MODIFY EXCEPTION failed"}, {336068754, "Parameter {0} in procedure {1} not found"}, /* dyn_proc_param_not_found */ {336068755, "Trigger {0} not found"}, /* dyn_trig_not_found */ {336068756, "Only one data type change to the domain {0} allowed at a time"}, {336068757, "Only one data type change to the field {0} allowed at a time"}, {336068758, "STORE RDB$FILES failed"}, {336068759, "Character set {0} not found"}, /* dyn_charset_not_found */ {336068760, "Collation {0} not found"}, /* dyn_collation_not_found */ {336068761, "ERASE RDB$LOG_FILES failed"}, {336068762, "STORE RDB$LOG_FILES failed"}, {336068763, "Role {0} not found"}, /* dyn_role_not_found */ {336068764, "Difference file lookup failed"}, {336068765, "DEFINE SHADOW failed"}, {336068766, "MODIFY RDB$ROLES failed"}, {336068767, "Name longer than database column size"}, /* dyn_name_longer */ {336068768, "\"Only one constraint allowed for a domain\""}, {336068770, "Looking up column position failed"}, {336068771, "A node name is not permitted in a table with external file definition"}, {336068772, "Shadow lookup failed"}, {336068773, "Shadow {0} already exists"}, {336068774, "Cannot add file with the same name as the database or added files"}, {336068775, "no grant option for privilege {0} on column {1} of table/view {2}"}, {336068776, "no grant option for privilege {0} on column {1} of base table/view {2}"}, {336068777, "no grant option for privilege {0} on table/view {1} (for column {2})"}, {336068778, "no grant option for privilege {0} on base table/view {1} (for column {2})"}, {336068779, "no {0} privilege with grant option on table/view {1} (for column {2})"}, {336068780, "no {0} privilege with grant option on base table/view {1} (for column {2})"}, {336068781, "no grant option for privilege {0} on table/view {1}"}, {336068782, "no {0} privilege with grant option on table/view {1}"}, {336068783, "table/view {0} does not exist"}, {336068784, "column {0} does not exist in table/view {1}"}, /* dyn_column_does_not_exist */ {336068785, "Can not alter a view"}, {336068786, "EXTERNAL FILE table not supported in this context"}, {336068787, "attempt to index COMPUTED BY column in INDEX {0}"}, {336068788, "Table Name lookup failed"}, {336068789, "attempt to index a view"}, {336068790, "SELECT RDB$RELATIONS failed in grant"}, {336068791, "SELECT RDB$RELATION_FIELDS failed in grant"}, {336068792, "SELECT RDB$RELATIONS/RDB$OWNER_NAME failed in grant"}, {336068793, "SELECT RDB$USER_PRIVILEGES failed in grant"}, {336068794, "SELECT RDB$VIEW_RELATIONS/RDB$RELATION_FIELDS/... failed in grant"}, {336068795, "column {0} from table {1} is referenced in index {2}"}, {336068796, "SQL role {0} does not exist"}, /* dyn_role_does_not_exist */ {336068797, "user {0} has no grant admin option on SQL role {1}"}, /* dyn_no_grant_admin_opt */ {336068798, "user {0} is not a member of SQL role {1}"}, /* dyn_user_not_role_member */ {336068799, "{0} is not the owner of SQL role {1}"}, /* dyn_delete_role_failed */ {336068800, "{0} is a SQL role and not a user"}, /* dyn_grant_role_to_user */ {336068801, "user name {0} could not be used for SQL role"}, /* dyn_inv_sql_role_name */ {336068802, "SQL role {0} already exists"}, /* dyn_dup_sql_role */ {336068803, "keyword {0} can not be used as a SQL role name"}, /* dyn_kywd_spec_for_role */ {336068804, "SQL roles are not supported in on older versions of the database. A backup and restore of the database is required."}, /* dyn_roles_not_supported */ {336068812, "Cannot rename domain {0} to {1}. A domain with that name already exists."}, /* dyn_domain_name_exists */ {336068813, "Cannot rename column {0} to {1}. A column with that name already exists in table {2}."}, /* dyn_field_name_exists */ {336068814, "Column {0} from table {1} is referenced in {2}"}, /* dyn_dependency_exists */ {336068815, "Cannot change datatype for column {0}. Changing datatype is not supported for BLOB or ARRAY columns."}, /* dyn_dtype_invalid */ {336068816, "New size specified for column {0} must be at least {1} characters."}, /* dyn_char_fld_too_small */ {336068817, "Cannot change datatype for {0}. Conversion from base type {1} to {2} is not supported."}, /* dyn_invalid_dtype_conversion */ {336068818, "Cannot change datatype for column {0} from a character type to a non-character type."}, /* dyn_dtype_conv_invalid */ {336068819, "unable to allocate memory from the operating system"}, /* dyn_virmemexh */ {336068820, "Zero length identifiers are not allowed"}, /* dyn_zero_len_id */ {336068821, "ERASE RDB$GENERATORS failed"}, /* del_gen_fail */ {336068822, "Sequence {0} not found"}, /* dyn_gen_not_found */ {336068823, "Difference file is not defined"}, {336068824, "Difference file is already defined"}, {336068825, "Database is already in the physical backup mode"}, {336068826, "Database is not in the physical backup mode"}, {336068827, "DEFINE COLLATION failed"}, {336068828, "CREATE COLLATION statement is not supported in older versions of the database. A backup and restore is required."}, {336068829, "Maximum number of collations per character set exceeded"}, /* max_coll_per_charset */ {336068830, "Invalid collation attributes"}, /* invalid_coll_attr */ {336068831, "Collation {0} not installed for character set {1}"}, {336068832, "Cannot use the internal domain {0} as new type for field {1}"}, {336068833, "Default value is not allowed for array type in field {0}"}, {336068834, "Default value is not allowed for array type in domain {0}"}, {336068835, "DYN_UTIL_is_array failed for domain {0}"}, {336068836, "DYN_UTIL_copy_domain failed for domain {0}"}, {336068837, "Local column {0} doesn't have a default"}, {336068838, "Local column {0} default belongs to domain {1}"}, {336068839, "File name is invalid"}, {336068840, "{0} cannot reference {1}"}, /* dyn_wrong_gtt_scope */ {336068841, "Local column {0} is computed, cannot set a default value"}, {336068842, "ERASE RDB$COLLATIONS failed"}, /* del_coll_fail */ {336068843, "Collation {0} is used in table {1} (field name {2}) and cannot be dropped"}, /* dyn_coll_used_table */ {336068844, "Collation {0} is used in domain {1} and cannot be dropped"}, /* dyn_coll_used_domain */ {336068845, "Cannot delete system collation"}, /* dyn_cannot_del_syscoll */ {336068846, "Cannot delete default collation of CHARACTER SET {0}"}, /* dyn_cannot_del_def_coll */ {336068847, "Domain {0} is used in procedure {1} (parameter name {2}) and cannot be dropped"}, {336068848, "Field {0} cannot be used twice in index {1}"}, {336068849, "Table {0} not found"}, /* dyn_table_not_found */ {336068850, "attempt to reference a view ({0}) in a foreign key"}, {336068851, "Collation {0} is used in procedure {1} (parameter name {2}) and cannot be dropped"}, /* dyn_coll_used_procedure */ {336068852, "New scale specified for column {0} must be at most {1}."}, /* dyn_scale_too_big */ {336068853, "New precision specified for column {0} must be at least {1}."}, /* dyn_precision_too_small */ {336068854, "{0} is not grantor of {1} on {2} to {3}."}, {336068855, "Warning: {0} on {1} is not granted to {2}."}, /* dyn_miss_priv_warning */ {336068856, "Feature '{0}' is not supported in ODS {1}.{2}"}, /* dyn_ods_not_supp_feature */ {336068857, "Cannot add or remove COMPUTED from column {0}"}, /* dyn_cannot_addrem_computed */ {336068858, "Password should not be empty string"}, /* dyn_no_empty_pw */ {336068859, "Index {0} already exists"}, /* dyn_dup_index */ {336068860, "Only {0} or user with privilege USE_GRANTED_BY_CLAUSE can use GRANTED BY clause"}, /* dyn_locksmith_use_granted */ {336068861, "Exception {0} already exists"}, /* dyn_dup_exception */ {336068862, "Sequence {0} already exists"}, /* dyn_dup_generator */ {336068863, "ERASE RDB$USER_PRIVILEGES failed in REVOKE ALL ON ALL"}, {336068864, "Package {0} not found"}, /* dyn_package_not_found */ {336068865, "Schema {0} not found"}, /* dyn_schema_not_found */ {336068866, "Cannot ALTER or DROP system procedure {0}"}, /* dyn_cannot_mod_sysproc */ {336068867, "Cannot ALTER or DROP system trigger {0}"}, /* dyn_cannot_mod_systrig */ {336068868, "Cannot ALTER or DROP system function {0}"}, /* dyn_cannot_mod_sysfunc */ {336068869, "Invalid DDL statement for procedure {0}"}, /* dyn_invalid_ddl_proc */ {336068870, "Invalid DDL statement for trigger {0}"}, /* dyn_invalid_ddl_trig */ {336068871, "Function {0} has not been defined on the package body {1}"}, /* dyn_funcnotdef_package */ {336068872, "Procedure {0} has not been defined on the package body {1}"}, /* dyn_procnotdef_package */ {336068873, "Function {0} has a signature mismatch on package body {1}"}, /* dyn_funcsignat_package */ {336068874, "Procedure {0} has a signature mismatch on package body {1}"}, /* dyn_procsignat_package */ {336068875, "Default values for parameters are not allowed in the definition of a previously declared packaged procedure {0}.{1}"}, /* dyn_defvaldecl_package_proc */ {336068876, "Function {0} already exists"}, /* dyn_dup_function */ {336068877, "Package body {0} already exists"}, /* dyn_package_body_exists */ {336068878, "Invalid DDL statement for function {0}"}, /* dyn_invalid_ddl_func */ {336068879, "Cannot alter new style function {0} with ALTER EXTERNAL FUNCTION. Use ALTER FUNCTION instead."}, /* dyn_newfc_oldsyntax */ {336068880, "Cannot delete system generator {0}"}, {336068881, "Identity column {0} of table {1} must be of exact number type with zero scale"}, {336068882, "Identity column {0} of table {1} cannot be changed to NULLable"}, {336068883, "Identity column {0} of table {1} cannot have default value"}, {336068884, "Domain {0} must be of exact number type with zero scale because it's used in an identity column"}, {336068885, "Generation of generator name failed"}, {336068886, "Parameter {0} in function {1} not found"}, /* dyn_func_param_not_found */ {336068887, "Parameter {0} of routine {1} not found"}, /* dyn_routine_param_not_found */ {336068888, "Parameter {0} of routine {1} is ambiguous (found in both procedures and functions). Use a specifier keyword."}, /* dyn_routine_param_ambiguous */ {336068889, "Collation {0} is used in function {1} (parameter name {2}) and cannot be dropped"}, /* dyn_coll_used_function */ {336068890, "Domain {0} is used in function {1} (parameter name {2}) and cannot be dropped"}, /* dyn_domain_used_function */ {336068891, "ALTER USER requires at least one clause to be specified"}, /* dyn_alter_user_no_clause */ {336068892, "Cannot delete system SQL role {0}"}, {336068893, "Column {0} is not an identity column"}, {336068894, "Duplicate {0} {1}"}, /* dyn_duplicate_package_item */ {336068895, "System {0} {1} cannot be modified"}, /* dyn_cant_modify_sysobj */ {336068896, "INCREMENT BY 0 is an illegal option for sequence {0}"}, /* dyn_cant_use_zero_increment */ {336068897, "Can't use {0} in FOREIGN KEY constraint"}, /* dyn_cant_use_in_foreignkey */ {336068898, "Default values for parameters are not allowed in the definition of a previously declared packaged function {0}.{1}"}, /* dyn_defvaldecl_package_func */ {336068899, "Password must be specified when creating user"}, /* dyn_create_user_no_password */ {336068900, "role {0} can not be granted to role {1}"}, /* dyn_cyclic_role */ {336068901, "DROP SYSTEM PRIVILEGES should not be used in CREATE ROLE operator"}, {336068902, "Access to SYSTEM PRIVILEGES in ROLES denied to {0}"}, {336068903, "Only {0}, DB owner {1} or user with privilege USE_GRANTED_BY_CLAUSE can use GRANTED BY clause"}, {336068904, "INCREMENT BY 0 is an illegal option for identity column {0} of table {1}"}, /* dyn_cant_use_zero_inc_ident */ {336068905, "Concurrent ALTER DATABASE is not supported"}, /* dyn_concur_alter_database */ {336068906, "Incompatible ALTER DATABASE clauses: '{0}' and '{1}'"}, /* dyn_incompat_alter_database */ {336068907, "no {0} privilege with grant option on DDL {1}"}, /* dyn_no_ddl_grant_opt_priv */ {336068908, "no {0} privilege with grant option on object {1}"}, /* dyn_no_grant_opt_priv */ {336068909, "Function {0} does not exist"}, /* dyn_func_not_exist */ {336068910, "Procedure {0} does not exist"}, /* dyn_proc_not_exist */ {336068911, "Package {0} does not exist"}, /* dyn_pack_not_exist */ {336068912, "Trigger {0} does not exist"}, /* dyn_trig_not_exist */ {336068913, "View {0} does not exist"}, /* dyn_view_not_exist */ {336068914, "Table {0} does not exist"}, /* dyn_rel_not_exist */ {336068915, "Exception {0} does not exist"}, /* dyn_exc_not_exist */ {336068916, "Generator/Sequence {0} does not exist"}, /* dyn_gen_not_exist */ {336068917, "Field {0} of table {1} does not exist"}, /* dyn_fld_not_exist */ {336330752, "could not locate appropriate error message"}, {336330753, "found unknown switch"}, /* gbak_unknown_switch */ {336330754, "page size parameter missing"}, /* gbak_page_size_missing */ {336330755, "Page size specified ({0}) greater than limit (32768 bytes)"}, /* gbak_page_size_toobig */ {336330756, "redirect location for output is not specified"}, /* gbak_redir_ouput_missing */ {336330757, "conflicting switches for backup/restore"}, /* gbak_switches_conflict */ {336330758, "device type {0} not known"}, /* gbak_unknown_device */ {336330759, "protection is not there yet"}, /* gbak_no_protection */ {336330760, "page size is allowed only on restore or create"}, /* gbak_page_size_not_allowed */ {336330761, "multiple sources or destinations specified"}, /* gbak_multi_source_dest */ {336330762, "requires both input and output filenames"}, /* gbak_filename_missing */ {336330763, "input and output have the same name. Disallowed."}, /* gbak_dup_inout_names */ {336330764, "expected page size, encountered \"{0}\""}, /* gbak_inv_page_size */ {336330765, "REPLACE specified, but the first file {0} is a database"}, /* gbak_db_specified */ {336330766, "database {0} already exists. To replace it, use the -REP switch"}, /* gbak_db_exists */ {336330767, "device type not specified"}, /* gbak_unk_device */ {336330768, "cannot create APOLLO tape descriptor file {0}"}, {336330769, "cannot set APOLLO tape descriptor attribute for {0}"}, {336330770, "cannot create APOLLO cartridge descriptor file {0}"}, {336330771, "cannot close APOLLO tape descriptor file {0}"}, {336330772, "gds_$blob_info failed"}, /* gbak_blob_info_failed */ {336330773, "do not understand BLOB INFO item {0}"}, /* gbak_unk_blob_item */ {336330774, "gds_$get_segment failed"}, /* gbak_get_seg_failed */ {336330775, "gds_$close_blob failed"}, /* gbak_close_blob_failed */ {336330776, "gds_$open_blob failed"}, /* gbak_open_blob_failed */ {336330777, "Failed in put_blr_gen_id"}, /* gbak_put_blr_gen_id_failed */ {336330778, "data type {0} not understood"}, /* gbak_unk_type */ {336330779, "gds_$compile_request failed"}, /* gbak_comp_req_failed */ {336330780, "gds_$start_request failed"}, /* gbak_start_req_failed */ {336330781, "gds_$receive failed"}, /* gbak_rec_failed */ {336330782, "gds_$release_request failed"}, /* gbak_rel_req_failed */ {336330783, "gds_$database_info failed"}, /* gbak_db_info_failed */ {336330784, "Expected database description record"}, /* gbak_no_db_desc */ {336330785, "failed to create database {0}"}, /* gbak_db_create_failed */ {336330786, "RESTORE: decompression length error"}, /* gbak_decomp_len_error */ {336330787, "cannot find table {0}"}, /* gbak_tbl_missing */ {336330788, "Cannot find column for BLOB"}, /* gbak_blob_col_missing */ {336330789, "gds_$create_blob failed"}, /* gbak_create_blob_failed */ {336330790, "gds_$put_segment failed"}, /* gbak_put_seg_failed */ {336330791, "expected record length"}, /* gbak_rec_len_exp */ {336330792, "wrong length record, expected {0} encountered {1}"}, /* gbak_inv_rec_len */ {336330793, "expected data attribute"}, /* gbak_exp_data_type */ {336330794, "Failed in store_blr_gen_id"}, /* gbak_gen_id_failed */ {336330795, "do not recognize record type {0}"}, /* gbak_unk_rec_type */ {336330796, "Expected backup version 1..10. Found {0}"}, /* gbak_inv_bkup_ver */ {336330797, "expected backup description record"}, /* gbak_missing_bkup_desc */ {336330798, "string truncated"}, /* gbak_string_trunc */ {336330799, "warning -- record could not be restored"}, /* gbak_cant_rest_record */ {336330800, "gds_$send failed"}, /* gbak_send_failed */ {336330801, "no table name for data"}, /* gbak_no_tbl_name */ {336330802, "unexpected end of file on backup file"}, /* gbak_unexp_eof */ {336330803, "database format {0} is too old to restore to"}, /* gbak_db_format_too_old */ {336330804, "array dimension for column {0} is invalid"}, /* gbak_inv_array_dim */ {336330805, "expected array version number {0} but instead found {1}"}, {336330806, "expected array dimension {0} but instead found {1}"}, {336330807, "Expected XDR record length"}, /* gbak_xdr_len_expected */ {336330808, "Unexpected I/O error while {0} backup file"}, {336330809, "adding file {0}, starting at page {1}"}, {336330810, "array"}, {336330811, "backup"}, {336330812, " {0}B(ACKUP_DATABASE) backup database to file"}, {336330813, "\t\tbackup file is compressed"}, {336330814, " {0}D(EVICE) backup file device type on APOLLO (CT or MT)"}, {336330815, " {0}M(ETA_DATA) backup or restore metadata only"}, {336330816, "blob"}, {336330817, "cannot open backup file {0}"}, /* gbak_open_bkup_error */ {336330818, "cannot open status and error output file {0}"}, /* gbak_open_error */ {336330819, "closing file, committing, and finishing"}, {336330820, "committing metadata"}, {336330821, "commit failed on table {0}"}, {336330822, "committing secondary files"}, {336330823, "creating index {0}"}, {336330824, "committing data for table {0}"}, {336330825, " {0}C(REATE_DATABASE) create database from backup file (restore)"}, {336330826, "created database {0}, page_size {1} bytes"}, {336330827, "creating file {0}"}, {336330828, "creating indexes"}, {336330829, "database {0} has a page size of {1} bytes."}, {336330830, " {0}I(NACTIVE) deactivate indexes during restore"}, {336330831, "do not understand BLOB INFO item {0}"}, {336330832, "do not recognize {0} attribute {1} -- continuing"}, {336330833, "error accessing BLOB column {0} -- continuing"}, {336330834, "Exiting before completion due to errors"}, {336330835, "Exiting before completion due to errors"}, {336330836, "column"}, {336330837, "file"}, {336330838, "file length"}, {336330839, "filter"}, {336330840, "finishing, closing, and going home"}, {336330841, "function"}, {336330842, "function argument"}, {336330843, "gbak version {0}"}, {336330844, "domain"}, {336330845, "index"}, {336330846, "trigger {0} is invalid"}, {336330847, "legal switches are:"}, {336330848, "length given for initial file ({0}) is less than minimum ({1})"}, {336330849, " {0}E(XPAND) no data compression"}, {336330850, " {0}L(IMBO) ignore transactions in limbo"}, {336330851, " {0}O(NE_AT_A_TIME) restore one table at a time"}, {336330852, "opened file {0}"}, {336330853, " {0}P(AGE_SIZE) override default page size"}, {336330854, "page size"}, {336330855, "page size specified ({0} bytes) rounded up to {1} bytes"}, {336330856, " {0}Z print version number"}, {336330857, "privilege"}, {336330858, " {0} records ignored"}, {336330859, " {0} records restored"}, {336330860, "{0} records written"}, {336330861, " {0}Y redirect/suppress status message output"}, {336330862, "Reducing the database page size from {0} bytes to {1} bytes"}, {336330863, "table"}, {336330864, " {0}REP(LACE_DATABASE) replace database from backup file (restore)"}, {336330865, " {0}V(ERIFY) report each action taken"}, {336330866, "restore failed for record in table {0}"}, {336330867, " restoring column {0}"}, {336330868, " restoring file {0}"}, {336330869, " restoring filter {0}"}, {336330870, "restoring function {0}"}, {336330871, " restoring argument for function {0}"}, {336330872, " restoring gen id value of: {0}"}, {336330873, "restoring domain {0}"}, {336330874, " restoring index {0}"}, {336330875, " restoring privilege for user {0}"}, {336330876, "restoring data for table {0}"}, {336330877, "restoring security class {0}"}, {336330878, " restoring trigger {0}"}, {336330879, " restoring trigger message for {0}"}, {336330880, " restoring type {0} for column {1}"}, {336330881, "started transaction"}, {336330882, "starting transaction"}, {336330883, "security class"}, {336330884, "switches can be abbreviated to the unparenthesized characters"}, {336330885, "transportable backup -- data in XDR format"}, {336330886, "trigger"}, {336330887, "trigger message"}, {336330888, "trigger type"}, {336330889, "unknown switch \"{0}\""}, {336330890, "validation error on column in table {0}"}, {336330891, " Version(s) for database \"{0}\""}, {336330892, "view"}, {336330893, " writing argument for function {0}"}, {336330894, " writing data for table {0}"}, {336330895, " writing gen id of: {0}"}, {336330896, " writing column {0}"}, {336330897, " writing filter {0}"}, {336330898, "writing filters"}, {336330899, " writing function {0}"}, {336330900, "writing functions"}, {336330901, " writing domain {0}"}, {336330902, "writing domains"}, {336330903, " writing index {0}"}, {336330904, " writing privilege for user {0}"}, {336330905, " writing table {0}"}, {336330906, "writing tables"}, {336330907, " writing security class {0}"}, {336330908, " writing trigger {0}"}, {336330909, " writing trigger message for {0}"}, {336330910, "writing trigger messages"}, {336330911, "writing triggers"}, {336330912, " writing type {0} for column {1}"}, {336330913, "writing types"}, {336330914, "writing shadow files"}, {336330915, " writing shadow file {0}"}, {336330916, "writing id generators"}, {336330917, " writing generator {0} value {1}"}, {336330918, "readied database {0} for backup"}, {336330919, "restoring table {0}"}, {336330920, "type"}, {336330921, "gbak:"}, {336330922, "committing metadata for table {0}"}, {336330923, "error committing metadata for table {0}"}, {336330924, " {0}K(ILL) restore without creating shadows"}, {336330925, "cannot commit index {0}"}, {336330926, "cannot commit files"}, {336330927, " {0}T(RANSPORTABLE) transportable backup -- data in XDR format"}, {336330928, "closing file, committing, and finishing. {0} bytes written"}, {336330929, " {0}G(ARBAGE_COLLECT) inhibit garbage collection"}, {336330930, " {0}IG(NORE) ignore bad checksums"}, {336330931, "\tcolumn {0} used in index {1} seems to have vanished"}, {336330932, "index {0} omitted because {1} of the expected {2} keys were found"}, {336330933, " {0}FA(CTOR) blocking factor"}, {336330934, "blocking factor parameter missing"}, /* gbak_missing_block_fac */ {336330935, "expected blocking factor, encountered \"{0}\""}, /* gbak_inv_block_fac */ {336330936, "a blocking factor may not be used in conjunction with device CT"}, /* gbak_block_fac_specified */ {336330937, "restoring generator {0} value: {1}"}, {336330938, " {0}OL(D_DESCRIPTIONS) save old style metadata descriptions"}, {336330939, " {0}N(O_VALIDITY) do not restore database validity conditions"}, {336330940, "user name parameter missing"}, /* gbak_missing_username */ {336330941, "password parameter missing"}, /* gbak_missing_password */ {336330942, " {0}PAS(SWORD) Firebird password"}, {336330943, " {0}USER Firebird user name"}, {336330944, "writing stored procedures"}, {336330945, "writing stored procedure {0}"}, {336330946, "writing parameter {0} for stored procedure"}, {336330947, "restoring stored procedure {0}"}, {336330948, " restoring parameter {0} for stored procedure"}, {336330949, "writing exceptions"}, {336330950, "writing exception {0}"}, {336330951, "restoring exception {0}"}, {336330952, " missing parameter for the number of bytes to be skipped"}, /* gbak_missing_skipped_bytes */ {336330953, "expected number of bytes to be skipped, encountered \"{0}\""}, /* gbak_inv_skipped_bytes */ {336330954, "adjusting an invalid decompression length from {0} to {1}"}, {336330955, "skipped {0} bytes after reading a bad attribute {1}"}, {336330956, " {0}S(KIP_BAD_DATA) skip number of bytes after reading bad data"}, {336330957, "skipped {0} bytes looking for next valid attribute, encountered attribute {1}"}, {336330958, "writing table constraints"}, {336330959, "writing constraint {0}"}, {336330960, "table constraint"}, {336330961, "writing referential constraints"}, {336330962, "writing check constraints"}, {336330963, "writing character sets"}, /* msgVerbose_write_charsets */ {336330964, "writing collations"}, /* msgVerbose_write_collations */ {336330965, "character set"}, /* gbak_err_restore_charset */ {336330966, "writing character set {0}"}, /* msgVerbose_restore_charset */ {336330967, "collation"}, /* gbak_err_restore_collation */ {336330968, "writing collation {0}"}, /* msgVerbose_restore_collation */ {336330972, "Unexpected I/O error while reading from backup file"}, /* gbak_read_error */ {336330973, "Unexpected I/O error while writing to backup file"}, /* gbak_write_error */ {336330974, "\n\nCould not open file name \"{0}\""}, {336330975, "\n\nCould not write to file \"{0}\""}, {336330976, "\n\nCould not read from file \"{0}\""}, {336330977, "Done with volume #{0}, \"{1}\""}, {336330978, "\tPress return to reopen that file, or type a new\n\tname followed by return to open a different file."}, {336330979, "Type a file name to open and hit return"}, {336330980, " Name: "}, {336330981, "\n\nERROR: Backup incomplete"}, {336330982, "Expected backup start time {0}, found {1}"}, {336330983, "Expected backup database {0}, found {1}"}, {336330984, "Expected volume number {0}, found volume {1}"}, {336330985, "could not drop database {0} (no privilege or database might be in use)"}, /* gbak_db_in_use */ {336330986, "Skipped bad security class entry: {0}"}, {336330987, "Unknown V3 SUB_TYPE: {0} in FIELD: {1}."}, {336330988, "Converted V3 sub_type: {0} to character_set_id: {1} and collate_id: {2}."}, {336330989, "Converted V3 scale: {0} to character_set_id: {1} and callate_id: {2}."}, {336330990, "System memory exhausted"}, /* gbak_sysmemex */ {336330991, " {0}NT Non-Transportable backup file format"}, {336330992, "Index \"{0}\" failed to activate because:"}, {336330993, " The unique index has duplicate values or NULLs."}, {336330994, " Delete or Update duplicate values or NULLs, and activate index with"}, {336330995, " ALTER INDEX \"{0}\" ACTIVE;"}, {336330996, " Not enough disk space to create the sort file for an index."}, {336330997, " Set the TMP environment variable to a directory on a filesystem that does have enough space, and activate index with"}, {336330998, "Database is not online due to failure to activate one or more indices."}, {336330999, "Run gfix -online to bring database online without active indices."}, {336331000, "writing SQL roles"}, /* write_role_1 */ {336331001, " writing SQL role: {0}"}, /* write_role_2 */ {336331002, "SQL role"}, /* gbak_restore_role_failed */ {336331003, " restoring SQL role: {0}"}, /* restore_role */ {336331004, " {0}RO(LE) Firebird SQL role"}, /* gbak_role_op */ {336331005, "SQL role parameter missing"}, /* gbak_role_op_missing */ {336331006, " {0}CO(NVERT) backup external files as tables"}, /* gbak_convert_ext_tables */ {336331007, "gbak: WARNING:"}, /* gbak_warning */ {336331008, "gbak: ERROR:"}, /* gbak_error */ {336331009, " {0}BU(FFERS) override page buffers default"}, /* gbak_page_buffers */ {336331010, "page buffers parameter missing"}, /* gbak_page_buffers_missing */ {336331011, "expected page buffers, encountered \"{0}\""}, /* gbak_page_buffers_wrong_param */ {336331012, "page buffers is allowed only on restore or create"}, /* gbak_page_buffers_restore */ {336331013, "Starting with volume #{0}, \"{1}\""}, {336331014, "size specification either missing or incorrect for file {0}"}, /* gbak_inv_size */ {336331015, "file {0} out of sequence"}, /* gbak_file_outof_sequence */ {336331016, "can't join -- one of the files missing"}, /* gbak_join_file_missing */ {336331017, " standard input is not supported when using join operation"}, /* gbak_stdin_not_supptd */ {336331018, "standard output is not supported when using split operation or in verbose mode"}, /* gbak_stdout_not_supptd */ {336331019, "backup file {0} might be corrupt"}, /* gbak_bkup_corrupt */ {336331020, "database file specification missing"}, /* gbak_unk_db_file_spec */ {336331021, "can't write a header record to file {0}"}, /* gbak_hdr_write_failed */ {336331022, "free disk space exhausted"}, /* gbak_disk_space_ex */ {336331023, "file size given ({0}) is less than minimum allowed ({1})"}, /* gbak_size_lt_min */ {336331024, "Warning -- free disk space exhausted for file {0}, the rest of the bytes ({1}) will be written to file {2}"}, {336331025, "service name parameter missing"}, /* gbak_svc_name_missing */ {336331026, "Cannot restore over current database, must be SYSDBA or owner of the existing database."}, /* gbak_not_ownr */ {336331027, ""}, {336331028, " {0}USE_(ALL_SPACE) do not reserve space for record versions"}, {336331029, " {0}SE(RVICE) use services manager"}, {336331030, " {0}MO(DE) \"read_only\" or \"read_write\" access"}, /* gbak_opt_mode */ {336331031, "\"read_only\" or \"read_write\" required"}, /* gbak_mode_req */ {336331032, "setting database to read-only access"}, {336331033, "just data ignore all constraints etc."}, /* gbak_just_data */ {336331034, "restoring data only ignoring foreign key, unique, not null & other constraints"}, /* gbak_data_only */ {336331035, "closing file, committing, and finishing. {0} bytes written"}, {336331036, " {0}R(ECREATE_DATABASE) [O(VERWRITE)] create (or replace if OVERWRITE used)\\n\t\t\t\tdatabase from backup file (restore)"}, {336331037, " activating and creating deferred index {0}"}, /* gbak_activating_idx */ {336331038, "check constraint"}, {336331039, "exception"}, {336331040, "array dimensions"}, {336331041, "generator"}, {336331042, "procedure"}, {336331043, "procedure parameter"}, {336331044, "referential constraint"}, {336331045, "type (in RDB$TYPES)"}, {336331046, " {0}NOD(BTRIGGERS) do not run database triggers"}, {336331047, " {0}TRU(STED) use trusted authentication"}, {336331048, "writing names mapping"}, /* write_map_1 */ {336331049, " writing map for {0}"}, /* write_map_2 */ {336331050, " restoring map for {0}"}, /* get_map_1 */ {336331051, "name mapping"}, /* get_map_2 */ {336331052, "cannot restore arbitrary mapping"}, /* get_map_3 */ {336331053, "restoring names mapping"}, /* get_map_4 */ {336331054, " {0}FIX_FSS_D(ATA) fix malformed UNICODE_FSS data"}, {336331055, " {0}FIX_FSS_M(ETADATA) fix malformed UNICODE_FSS metadata"}, {336331056, "Character set parameter missing"}, {336331057, "Character set {0} not found"}, {336331058, " {0}FE(TCH_PASSWORD) fetch password from file"}, {336331059, "too many passwords provided"}, {336331060, "could not open password file {0}, errno {1}"}, {336331061, "could not read password file {0}, errno {1}"}, {336331062, "empty password file {0}"}, {336331063, "Attribute {0} was already processed for exception {1}"}, {336331064, "Skipping attribute {0} because the message already exists for exception {1}"}, {336331065, "Trying to recover from unexpected attribute {0} due to wrong message length for exception {1}"}, {336331066, "Attribute not specified for storing text bigger than 255 bytes"}, {336331067, "Unable to store text bigger than 65536 bytes"}, {336331068, "Failed while adjusting the security class name"}, {336331069, "Usage:"}, {336331070, " gbak -b [backup options] [general options]"}, {336331071, " gbak -c [restore options] [general options]"}, {336331072, " = | ... (size in db pages)"}, {336331073, " = | ... (size in bytes = n[K|M|G])"}, {336331074, " -recreate overwrite and -replace can be used instead of -c"}, {336331075, "backup options are:"}, {336331076, "restore options are:"}, {336331077, "general options are:"}, {336331078, "verbose interval value parameter missing"}, /* gbak_missing_interval */ {336331079, "verbose interval value cannot be smaller than {0}"}, /* gbak_wrong_interval */ {336331080, " {0}VERBI(NT) verbose information with explicit interval"}, {336331081, "verify (verbose) and verbint options are mutually exclusive"}, /* gbak_verify_verbint */ {336331082, "option -{0} is allowed only on restore or create"}, /* gbak_option_only_restore */ {336331083, "option -{0} is allowed only on backup"}, /* gbak_option_only_backup */ {336331084, "options -{0} and -{1} are mutually exclusive"}, /* gbak_option_conflict */ {336331085, "parameter for option -{0} was already specified with value \"{1}\""}, /* gbak_param_conflict */ {336331086, "option -{0} was already specified"}, /* gbak_option_repeated */ {336331087, "writing package {0}"}, {336331088, "writing packages"}, {336331089, "restoring package {0}"}, {336331090, "package"}, {336331091, "dependency depth greater than {0} for view {1}"}, /* gbak_max_dbkey_recursion */ {336331092, "value greater than {0} when calculating length of rdb$db_key for view {1}"}, /* gbak_max_dbkey_length */ {336331093, "Invalid metadata detected. Use -FIX_FSS_METADATA option."}, /* gbak_invalid_metadata */ {336331094, "Invalid data detected. Use -FIX_FSS_DATA option."}, /* gbak_invalid_data */ {336331095, "text for attribute {0} is too large in {1}, truncating to {2} bytes"}, {336331096, "Expected backup version {1}..{2}. Found {0}"}, /* gbak_inv_bkup_ver2 */ {336331097, " writing view {0}"}, {336331098, " table {0} is a view"}, {336331099, "writing security classes"}, {336331100, "database format {0} is too old to backup"}, /* gbak_db_format_too_old2 */ {336331101, "backup version is {0}"}, {336331102, "adjusting system generators"}, {336331103, "Error closing database, but backup file is OK"}, {336331104, "database"}, {336331105, "required mapping attributes are missing in backup file"}, {336331106, "missing regular expression to skip tables"}, {336331107, " {0}SKIP_D(ATA) skip data for table"}, {336331108, "regular expression to skip tables was already set"}, {336331109, "adjusting views dbkey length"}, {336331110, "updating ownership of packages, procedures and tables"}, {336331111, "adding missing privileges"}, {336331112, "adjusting the ONLINE and FORCED WRITES flags"}, {336331113, " {0}ST(ATISTICS) TDRW show statistics:"}, {336331114, " T time from start"}, {336331115, " D delta time"}, {336331116, " R page reads"}, {336331117, " W page writes"}, {336331118, "statistics parameter missing"}, /* gbak_missing_perf */ {336331119, "wrong char \"{0}\" at statistics parameter"}, /* gbak_wrong_perf */ {336331120, "too many chars at statistics parameter"}, /* gbak_too_long_perf */ {336331121, "total statistics"}, {336331122, "could not append BLOB data to batch"}, {336331123, "could not start batch when restoring table {0}, trying old way"}, {336331124, " {0}KEYNAME name of a key to be used for encryption"}, {336331125, " {0}CRYPT crypt plugin name"}, {336331126, " {0}ZIP backup file is in zip compressed format"}, {336331127, "Keyname parameter missing"}, {336331128, "Key holder parameter missing but backup file is encrypted"}, {336331129, "CryptPlugin parameter missing"}, {336331130, "Unknown crypt plugin name - use -CRYPT switch"}, {336331131, "Inflate error {0}"}, {336331132, "Deflate error {0}"}, {336331133, "Key holder parameter missing"}, {336331134, " {0}KEYHOLDER name of a key holder plugin"}, {336331135, "Decompression stream init error {0}"}, {336331136, "Compression stream init error {0}"}, {336331137, "Invalid reply from getInfo() when waiting for DB encryption"}, {336331138, "Problems with just created database encryption"}, {336331139, "Skipped trigger {0} on system table {1}"}, {336331140, " {0}INCLUDE(_DATA) backup data of table(s)"}, {336331141, "missing regular expression to include tables"}, {336331142, "regular expression to include tables was already set"}, {336331143, "writing database create grants"}, {336331144, " database create grant for {0}"}, {336331145, " restoring database create grant for {0}"}, {336331146, "restoring database create grants"}, {336331147, "database create grant"}, {336331148, "writing publications"}, {336331149, " writing publication {0}"}, {336331150, " writing publication for table {0}"}, {336331151, "restoring publication {0}"}, {336331152, "publication"}, {336331153, "restoring publication for table {0}"}, {336331154, "publication for table"}, {336331155, " {0}REPLICA \"none\", \"read_only\" or \"read_write\" replica mode"}, /* gbak_opt_replica */ {336331156, "\"none\", \"read_only\" or \"read_write\" required"}, /* gbak_replica_req */ {336331157, "could not access batch parameters"}, {336331158, " {0}PAR(ALLEL) parallel workers"}, {336331159, "parallel workers parameter missing"}, /* gbak_missing_prl_wrks */ {336331160, "expected parallel workers, encountered \"{0}\""}, /* gbak_inv_prl_wrks */ {336331161, " {0}D(IRECT_IO) direct IO for backup file(s)"}, {336331162, "use up to {0} parallel workers"}, {336396289, "Firebird error"}, {336396362, "Rollback not performed"}, {336396364, "Connection error"}, {336396365, "Connection not established"}, {336396366, "Connection authorization failure."}, {336396375, "deadlock"}, {336396376, "Unsuccessful execution caused by deadlock."}, {336396377, "record from transaction {0} is stuck in limbo"}, {336396379, "operation completed with errors"}, {336396382, "the SQL statement cannot be executed"}, {336396384, "Unsuccessful execution caused by an unavailable resource."}, {336396386, "Unsuccessful execution caused by a system error that precludes successful execution of subsequent statements"}, {336396387, "Unsuccessful execution caused by system error that does not preclude successful execution of subsequent statements"}, {336396446, "Wrong numeric type"}, {336396447, "too many versions"}, {336396448, "intermediate journal file full"}, {336396449, "journal file wrong format"}, {336396450, "database {0} shutdown in {1} seconds"}, {336396451, "restart shared cache manager"}, {336396452, "exception {0}"}, {336396453, "bad checksum"}, {336396454, "refresh range number {0} not found"}, {336396455, "expression evaluation not supported"}, {336396456, "FOREIGN KEY column count does not match PRIMARY KEY"}, {336396457, "Attempt to define a second PRIMARY KEY for the same table"}, {336396458, "column used with aggregate"}, {336396459, "invalid column reference"}, {336396460, "invalid key position"}, {336396461, "invalid direction for find operation"}, {336396462, "Invalid statement handle"}, {336396463, "invalid lock handle"}, {336396464, "invalid lock level {0}"}, {336396465, "invalid bookmark handle"}, {336396468, "wrong or obsolete version"}, {336396471, "The INSERT, UPDATE, DELETE, DDL or authorization statement cannot be executed because the transaction is inquiry only"}, {336396472, "external file could not be opened for output"}, {336396477, "multiple rows in singleton select"}, {336396478, "No subqueries permitted for VIEW WITH CHECK OPTION"}, {336396479, "DISTINCT, GROUP or HAVING not permitted for VIEW WITH CHECK OPTION"}, {336396480, "Only one table allowed for VIEW WITH CHECK OPTION"}, {336396481, "No WHERE clause for VIEW WITH CHECK OPTION"}, {336396482, "Only simple column names permitted for VIEW WITH CHECK OPTION"}, {336396484, "An error was found in the application program input parameters for the SQL statement."}, {336396485, "Invalid insert or update value(s): object columns are constrained - no 2 table rows can have duplicate column values"}, {336396486, "Arithmetic overflow or division by zero has occurred."}, {336396594, "cannot access column {0} in view {1}"}, {336396595, "Too many concurrent executions of the same request"}, {336396596, "maximum indexes per table ({0}) exceeded"}, {336396597, "new record size of {0} bytes is too big"}, {336396598, "segments not allowed in expression index {0}"}, {336396599, "wrong page type"}, {336396603, "invalid ARRAY or BLOB operation"}, {336396611, "{0} extension error"}, {336396624, "key size exceeds implementation restriction for index \"{0}\""}, {336396625, "definition error for index {0}"}, {336396628, "cannot create index"}, {336396651, "duplicate specification of {0} - not supported"}, {336396663, "The insert failed because a column definition includes validation constraints."}, {336396670, "Cannot delete object referenced by another object"}, {336396671, "Cannot modify object referenced by another object"}, {336396672, "Object is referenced by another object"}, {336396673, "lock on conflicts with existing lock"}, {336396681, "This operation is not defined for system tables."}, {336396683, "Inappropriate self-reference of column"}, {336396684, "Illegal array dimension range"}, {336396687, "database or file exists"}, {336396688, "sort error: corruption in data structure"}, {336396689, "node not supported"}, {336396690, "Shadow number must be a positive integer"}, {336396691, "Preceding file did not specify length, so {0} must include starting page number"}, {336396692, "illegal operation when at beginning of stream"}, {336396693, "the current position is on a crack"}, {336396735, "cannot modify an existing user privilege"}, {336396736, "user does not have the privilege to perform operation"}, {336396737, "This user does not have privilege to perform this operation on this object."}, {336396756, "transaction marked invalid by I/O error"}, {336396757, "Cannot prepare a CREATE DATABASE/SCHEMA statement"}, {336396758, "violation of FOREIGN KEY constraint \"{0}\""}, {336396769, "The prepare statement identifies a prepare statement with an open cursor"}, {336396770, "Unknown statement or request"}, {336396778, "Attempt to update non-updatable cursor"}, {336396780, "The cursor identified in the UPDATE or DELETE statement is not positioned on a row."}, {336396784, "Unknown cursor"}, {336396786, "The cursor identified in an OPEN statement is already open."}, {336396787, "The cursor identified in a FETCH or CLOSE statement is not open."}, {336396875, "Overflow occurred during data type conversion."}, {336396881, "null segment of UNIQUE KEY"}, {336396882, "subscript out of bounds"}, {336396886, "data operation not supported"}, {336396887, "invalid comparison operator for find operation"}, {336396974, "Cannot transliterate character between character sets"}, {336396975, "count of column list and variable list do not match"}, {336396985, "Incompatible column/host variable data type"}, {336396991, "Operation violates CHECK constraint {0} on view or table"}, {336396992, "internal Firebird consistency check (invalid RDB$CONSTRAINT_TYPE)"}, {336396993, "Cannot update constraints (RDB$RELATION_CONSTRAINTS)."}, {336396994, "Cannot delete CHECK constraint entry (RDB$CHECK_CONSTRAINTS)"}, {336396995, "Cannot update constraints (RDB$CHECK_CONSTRAINTS)."}, {336396996, "Cannot update constraints (RDB$REF_CONSTRAINTS)."}, {336396997, "Column used in a PRIMARY constraint must be NOT NULL."}, {336397004, "index {0} cannot be used in the specified plan"}, {336397005, "table {0} is referenced in the plan but not the from list"}, {336397006, "the table {0} is referenced twice; use aliases to differentiate"}, {336397007, "table {0} is not referenced in plan"}, {336397027, "Log file specification partition error"}, {336397028, "Cache or Log redefined"}, {336397029, "Write-ahead Log with shadowing configuration not allowed"}, {336397030, "Overflow log specification required for round-robin log"}, {336397031, "WAL defined; Cache Manager must be started first"}, {336397033, "Write-ahead Log without shared cache configuration not allowed"}, {336397034, "Cannot start WAL writer for the database {0}"}, {336397035, "WAL writer synchronization error for the database {0}"}, {336397036, "WAL setup error. Please see Firebird log."}, {336397037, "WAL buffers cannot be increased. Please see Firebird log."}, {336397038, "WAL writer - Journal server communication error. Please see Firebird log."}, {336397039, "WAL I/O error. Please see Firebird log."}, {336397040, "Unable to roll over; please see Firebird log."}, {336397041, "obsolete"}, {336397042, "obsolete"}, {336397043, "obsolete"}, {336397044, "obsolete"}, {336397045, "database does not use Write-ahead Log"}, {336397046, "Cannot roll over to the next log file {0}"}, {336397047, "obsolete"}, {336397048, "obsolete"}, {336397049, "Cache or Log size too small"}, {336397050, "Log record header too small at offset {0} in log file {1}"}, {336397051, "Incomplete log record at offset {0} in log file {1}"}, {336397052, "Unexpected end of log file {0} at offset {1}"}, {336397053, "Database name in the log file {0} is different"}, {336397054, "Log file {0} not closed properly; database recovery may be required"}, {336397055, "Log file {0} not latest in the chain but open flag still set"}, {336397056, "Invalid version of log file {0}"}, {336397057, "Log file header of {0} too small"}, {336397058, "obsolete"}, {336397069, "table {0} is not defined"}, {336397080, "invalid ORDER BY clause"}, {336397082, "Column does not belong to referenced table"}, {336397083, "column {0} is not defined in table {1}"}, {336397084, "Undefined name"}, {336397085, "Ambiguous column reference."}, {336397116, "function {0} is not defined"}, {336397117, "Invalid data type, length, or value"}, {336397118, "Invalid number of arguments"}, {336397126, "dbkey not available for multi-table views"}, {336397130, "number of columns does not match select list"}, {336397131, "must specify column name for view select expression"}, {336397133, "{0} is not a valid base table of the specified view"}, {336397137, "This column cannot be updated because it is derived from an SQL function or expression."}, {336397138, "The object of the INSERT, DELETE or UPDATE statement is a view for which the requested operation is not permitted."}, {336397183, "Invalid String"}, {336397184, "Invalid token"}, {336397185, "Invalid numeric literal"}, {336397203, "An error occurred while trying to update the security database"}, {336397204, "non-SQL security class defined"}, {336397205, "ODS versions before ODS{0} are not supported"}, /* dsql_too_old_ods */ {336397206, "Table {0} does not exist"}, /* dsql_table_not_found */ {336397207, "View {0} does not exist"}, /* dsql_view_not_found */ {336397208, "At line {0}, column {1}"}, /* dsql_line_col_error */ {336397209, "At unknown line and column"}, /* dsql_unknown_pos */ {336397210, "Column {0} cannot be repeated in {1} statement"}, /* dsql_no_dup_name */ {336397211, "Too many values (more than {0}) in member list to match against"}, /* dsql_too_many_values */ {336397212, "Array and BLOB data types not allowed in computed field"}, /* dsql_no_array_computed */ {336397213, "Implicit domain name {0} not allowed in user created domain"}, /* dsql_implicit_domain_name */ {336397214, "scalar operator used on field {0} which is not an array"}, /* dsql_only_can_subscript_array */ {336397215, "cannot sort on more than 255 items"}, /* dsql_max_sort_items */ {336397216, "cannot group on more than 255 items"}, /* dsql_max_group_items */ {336397217, "Cannot include the same field ({0}.{1}) twice in the ORDER BY clause with conflicting sorting options"}, /* dsql_conflicting_sort_field */ {336397218, "column list from derived table {0} has more columns than the number of items in its SELECT statement"}, /* dsql_derived_table_more_columns */ {336397219, "column list from derived table {0} has less columns than the number of items in its SELECT statement"}, /* dsql_derived_table_less_columns */ {336397220, "no column name specified for column number {0} in derived table {1}"}, /* dsql_derived_field_unnamed */ {336397221, "column {0} was specified multiple times for derived table {1}"}, /* dsql_derived_field_dup_name */ {336397222, "Internal dsql error: alias type expected by pass1_expand_select_node"}, /* dsql_derived_alias_select */ {336397223, "Internal dsql error: alias type expected by pass1_field"}, /* dsql_derived_alias_field */ {336397224, "Internal dsql error: column position out of range in pass1_union_auto_cast"}, /* dsql_auto_field_bad_pos */ {336397225, "Recursive CTE member ({0}) can refer itself only in FROM clause"}, /* dsql_cte_wrong_reference */ {336397226, "CTE '{0}' has cyclic dependencies"}, /* dsql_cte_cycle */ {336397227, "Recursive member of CTE can't be member of an outer join"}, /* dsql_cte_outer_join */ {336397228, "Recursive member of CTE can't reference itself more than once"}, /* dsql_cte_mult_references */ {336397229, "Recursive CTE ({0}) must be an UNION"}, /* dsql_cte_not_a_union */ {336397230, "CTE '{0}' defined non-recursive member after recursive"}, /* dsql_cte_nonrecurs_after_recurs */ {336397231, "Recursive member of CTE '{0}' has {1} clause"}, /* dsql_cte_wrong_clause */ {336397232, "Recursive members of CTE ({0}) must be linked with another members via UNION ALL"}, /* dsql_cte_union_all */ {336397233, "Non-recursive member is missing in CTE '{0}'"}, /* dsql_cte_miss_nonrecursive */ {336397234, "WITH clause can't be nested"}, /* dsql_cte_nested_with */ {336397235, "column {0} appears more than once in USING clause"}, /* dsql_col_more_than_once_using */ {336397236, "feature is not supported in dialect {0}"}, /* dsql_unsupp_feature_dialect */ {336397237, "CTE \"{0}\" is not used in query"}, /* dsql_cte_not_used */ {336397238, "column {0} appears more than once in ALTER VIEW"}, /* dsql_col_more_than_once_view */ {336397239, "{0} is not supported inside IN AUTONOMOUS TRANSACTION block"}, /* dsql_unsupported_in_auto_trans */ {336397240, "Unknown node type {0} in dsql/GEN_expr"}, /* dsql_eval_unknode */ {336397241, "Argument for {0} in dialect 1 must be string or numeric"}, /* dsql_agg_wrongarg */ {336397242, "Argument for {0} in dialect 3 must be numeric"}, /* dsql_agg2_wrongarg */ {336397243, "Strings cannot be added to or subtracted from DATE or TIME types"}, /* dsql_nodateortime_pm_string */ {336397244, "Invalid data type for subtraction involving DATE, TIME or TIMESTAMP types"}, /* dsql_invalid_datetime_subtract */ {336397245, "Adding two DATE values or two TIME values is not allowed"}, /* dsql_invalid_dateortime_add */ {336397246, "DATE value cannot be subtracted from the provided data type"}, /* dsql_invalid_type_minus_date */ {336397247, "Strings cannot be added or subtracted in dialect 3"}, /* dsql_nostring_addsub_dial3 */ {336397248, "Invalid data type for addition or subtraction in dialect 3"}, /* dsql_invalid_type_addsub_dial3 */ {336397249, "Invalid data type for multiplication in dialect 1"}, /* dsql_invalid_type_multip_dial1 */ {336397250, "Strings cannot be multiplied in dialect 3"}, /* dsql_nostring_multip_dial3 */ {336397251, "Invalid data type for multiplication in dialect 3"}, /* dsql_invalid_type_multip_dial3 */ {336397252, "Division in dialect 1 must be between numeric data types"}, /* dsql_mustuse_numeric_div_dial1 */ {336397253, "Strings cannot be divided in dialect 3"}, /* dsql_nostring_div_dial3 */ {336397254, "Invalid data type for division in dialect 3"}, /* dsql_invalid_type_div_dial3 */ {336397255, "Strings cannot be negated (applied the minus operator) in dialect 3"}, /* dsql_nostring_neg_dial3 */ {336397256, "Invalid data type for negation (minus operator)"}, /* dsql_invalid_type_neg */ {336397257, "Cannot have more than 255 items in DISTINCT / UNION DISTINCT list"}, /* dsql_max_distinct_items */ {336397258, "ALTER CHARACTER SET {0} failed"}, /* dsql_alter_charset_failed */ {336397259, "COMMENT ON {0} failed"}, /* dsql_comment_on_failed */ {336397260, "CREATE FUNCTION {0} failed"}, /* dsql_create_func_failed */ {336397261, "ALTER FUNCTION {0} failed"}, /* dsql_alter_func_failed */ {336397262, "CREATE OR ALTER FUNCTION {0} failed"}, /* dsql_create_alter_func_failed */ {336397263, "DROP FUNCTION {0} failed"}, /* dsql_drop_func_failed */ {336397264, "RECREATE FUNCTION {0} failed"}, /* dsql_recreate_func_failed */ {336397265, "CREATE PROCEDURE {0} failed"}, /* dsql_create_proc_failed */ {336397266, "ALTER PROCEDURE {0} failed"}, /* dsql_alter_proc_failed */ {336397267, "CREATE OR ALTER PROCEDURE {0} failed"}, /* dsql_create_alter_proc_failed */ {336397268, "DROP PROCEDURE {0} failed"}, /* dsql_drop_proc_failed */ {336397269, "RECREATE PROCEDURE {0} failed"}, /* dsql_recreate_proc_failed */ {336397270, "CREATE TRIGGER {0} failed"}, /* dsql_create_trigger_failed */ {336397271, "ALTER TRIGGER {0} failed"}, /* dsql_alter_trigger_failed */ {336397272, "CREATE OR ALTER TRIGGER {0} failed"}, /* dsql_create_alter_trigger_failed */ {336397273, "DROP TRIGGER {0} failed"}, /* dsql_drop_trigger_failed */ {336397274, "RECREATE TRIGGER {0} failed"}, /* dsql_recreate_trigger_failed */ {336397275, "CREATE COLLATION {0} failed"}, /* dsql_create_collation_failed */ {336397276, "DROP COLLATION {0} failed"}, /* dsql_drop_collation_failed */ {336397277, "CREATE DOMAIN {0} failed"}, /* dsql_create_domain_failed */ {336397278, "ALTER DOMAIN {0} failed"}, /* dsql_alter_domain_failed */ {336397279, "DROP DOMAIN {0} failed"}, /* dsql_drop_domain_failed */ {336397280, "CREATE EXCEPTION {0} failed"}, /* dsql_create_except_failed */ {336397281, "ALTER EXCEPTION {0} failed"}, /* dsql_alter_except_failed */ {336397282, "CREATE OR ALTER EXCEPTION {0} failed"}, /* dsql_create_alter_except_failed */ {336397283, "RECREATE EXCEPTION {0} failed"}, /* dsql_recreate_except_failed */ {336397284, "DROP EXCEPTION {0} failed"}, /* dsql_drop_except_failed */ {336397285, "CREATE SEQUENCE {0} failed"}, /* dsql_create_sequence_failed */ {336397286, "CREATE TABLE {0} failed"}, /* dsql_create_table_failed */ {336397287, "ALTER TABLE {0} failed"}, /* dsql_alter_table_failed */ {336397288, "DROP TABLE {0} failed"}, /* dsql_drop_table_failed */ {336397289, "RECREATE TABLE {0} failed"}, /* dsql_recreate_table_failed */ {336397290, "CREATE PACKAGE {0} failed"}, /* dsql_create_pack_failed */ {336397291, "ALTER PACKAGE {0} failed"}, /* dsql_alter_pack_failed */ {336397292, "CREATE OR ALTER PACKAGE {0} failed"}, /* dsql_create_alter_pack_failed */ {336397293, "DROP PACKAGE {0} failed"}, /* dsql_drop_pack_failed */ {336397294, "RECREATE PACKAGE {0} failed"}, /* dsql_recreate_pack_failed */ {336397295, "CREATE PACKAGE BODY {0} failed"}, /* dsql_create_pack_body_failed */ {336397296, "DROP PACKAGE BODY {0} failed"}, /* dsql_drop_pack_body_failed */ {336397297, "RECREATE PACKAGE BODY {0} failed"}, /* dsql_recreate_pack_body_failed */ {336397298, "CREATE VIEW {0} failed"}, /* dsql_create_view_failed */ {336397299, "ALTER VIEW {0} failed"}, /* dsql_alter_view_failed */ {336397300, "CREATE OR ALTER VIEW {0} failed"}, /* dsql_create_alter_view_failed */ {336397301, "RECREATE VIEW {0} failed"}, /* dsql_recreate_view_failed */ {336397302, "DROP VIEW {0} failed"}, /* dsql_drop_view_failed */ {336397303, "DROP SEQUENCE {0} failed"}, /* dsql_drop_sequence_failed */ {336397304, "RECREATE SEQUENCE {0} failed"}, /* dsql_recreate_sequence_failed */ {336397305, "DROP INDEX {0} failed"}, /* dsql_drop_index_failed */ {336397306, "DROP FILTER {0} failed"}, /* dsql_drop_filter_failed */ {336397307, "DROP SHADOW {0} failed"}, /* dsql_drop_shadow_failed */ {336397308, "DROP ROLE {0} failed"}, /* dsql_drop_role_failed */ {336397309, "DROP USER {0} failed"}, /* dsql_drop_user_failed */ {336397310, "CREATE ROLE {0} failed"}, /* dsql_create_role_failed */ {336397311, "ALTER ROLE {0} failed"}, /* dsql_alter_role_failed */ {336397312, "ALTER INDEX {0} failed"}, /* dsql_alter_index_failed */ {336397313, "ALTER DATABASE failed"}, /* dsql_alter_database_failed */ {336397314, "CREATE SHADOW {0} failed"}, /* dsql_create_shadow_failed */ {336397315, "DECLARE FILTER {0} failed"}, /* dsql_create_filter_failed */ {336397316, "CREATE INDEX {0} failed"}, /* dsql_create_index_failed */ {336397317, "CREATE USER {0} failed"}, /* dsql_create_user_failed */ {336397318, "ALTER USER {0} failed"}, /* dsql_alter_user_failed */ {336397319, "GRANT failed"}, /* dsql_grant_failed */ {336397320, "REVOKE failed"}, /* dsql_revoke_failed */ {336397321, "Recursive member of CTE cannot use aggregate or window function"}, /* dsql_cte_recursive_aggregate */ {336397322, "{1} MAPPING {0} failed"}, /* dsql_mapping_failed */ {336397323, "ALTER SEQUENCE {0} failed"}, /* dsql_alter_sequence_failed */ {336397324, "CREATE GENERATOR {0} failed"}, /* dsql_create_generator_failed */ {336397325, "SET GENERATOR {0} failed"}, /* dsql_set_generator_failed */ {336397326, "WITH LOCK can be used only with a single physical table"}, /* dsql_wlock_simple */ {336397327, "FIRST/SKIP cannot be used with OFFSET/FETCH or ROWS"}, /* dsql_firstskip_rows */ {336397328, "WITH LOCK cannot be used with aggregates"}, /* dsql_wlock_aggregates */ {336397329, "WITH LOCK cannot be used with {0}"}, /* dsql_wlock_conflict */ {336397330, "Number of arguments ({0}) exceeds the maximum ({1}) number of EXCEPTION USING arguments"}, /* dsql_max_exception_arguments */ {336397331, "String literal with {0} bytes exceeds the maximum length of {1} bytes"}, /* dsql_string_byte_length */ {336397332, "String literal with {0} characters exceeds the maximum length of {1} characters for the {2} character set"}, /* dsql_string_char_length */ {336397333, "Too many BEGIN...END nesting. Maximum level is {0}"}, /* dsql_max_nesting */ {336397334, "RECREATE USER {0} failed"}, /* dsql_recreate_user_failed */ {336461924, "Row not found for fetch, update or delete, or the result of a query is an empty table."}, {336461925, "segment buffer length shorter than expected"}, {336462125, "Datatype needs modification"}, {336462436, "Duplicate column or domain name found."}, {336527507, "invalid block type encountered"}, {336527508, "wrong packet type"}, {336527509, "cannot map page"}, {336527510, "request to allocate invalid block type"}, {336527511, "request to allocate block type larger than maximum size"}, {336527512, "memory pool free list is invalid"}, {336527513, "invalid pool id encountered"}, {336527514, "attempt to release free block"}, {336527515, "attempt to release block overlapping following free block"}, {336527516, "attempt to release block overlapping prior free block"}, {336527517, "cannot sort on a field that does not exist"}, {336527518, "database file not available"}, {336527519, "cannot assert logical lock"}, {336527520, "wrong ACL version"}, {336527521, "shadow block not found"}, {336527522, "shadow lock not synchronized properly"}, {336527523, "root file name not listed for shadow"}, {336527524, "failed to remove symbol from hash table"}, {336527525, "cannot find tip page"}, {336527526, "invalid rsb type"}, {336527527, "invalid SEND request"}, {336527528, "looper: action not yet implemented"}, {336527529, "return data type not supported"}, {336527530, "unexpected reply from journal server"}, {336527531, "journal server is incompatible version"}, {336527532, "journal server refused connection"}, {336527533, "referenced index description not found"}, {336527534, "index key too big"}, {336527535, "partner index description not found"}, {336527536, "bad difference record"}, {336527537, "applied differences will not fit in record"}, {336527538, "record length inconsistent"}, {336527539, "decompression overran buffer"}, {336527540, "cannot reposition for update after sort for RMS"}, {336527541, "external access type not implemented"}, {336527542, "differences record too long"}, {336527543, "wrong record length"}, {336527544, "limbo impossible"}, {336527545, "wrong record version"}, {336527546, "record disappeared"}, {336527547, "cannot delete system tables"}, {336527548, "cannot update erased record"}, {336527549, "comparison not supported for specified data types"}, {336527550, "conversion not supported for specified data types"}, {336527551, "conversion error"}, {336527552, "overflow during conversion"}, {336527553, "null or invalid array"}, {336527554, "BLOB not found"}, {336527555, "cannot update old BLOB"}, {336527556, "relation for array not known"}, {336527557, "field for array not known"}, {336527558, "array subscript computation error"}, {336527559, "expected field node"}, {336527560, "invalid BLOB ID"}, {336527561, "cannot find BLOB page"}, {336527562, "unknown data type"}, {336527563, "shadow block not found for extend file"}, {336527564, "index inconsistent"}, {336527565, "index bucket overfilled"}, {336527566, "exceeded index level"}, {336527567, "page already in use"}, {336527568, "page not accessed for write"}, {336527569, "attempt to release page not acquired"}, {336527570, "page in use during flush"}, {336527571, "attempt to remove page from dirty page list when not there"}, {336527572, "CCH_precedence: block marked"}, {336527573, "insufficient cache size"}, {336527574, "no cache buffers available for reuse"}, {336527575, "page {0}, page type {1} lock conversion denied"}, {336527576, "page {0}, page type {1} lock denied"}, {336527577, "buffer marked for update"}, {336527578, "CCH: {0}, status = {1} (218)"}, {336527579, "request of unknown resource"}, {336527580, "release of unknown resource"}, {336527581, "(CMP) copy: cannot remap"}, {336527582, "bad BLR -- invalid stream"}, {336527583, "argument of scalar operation must be an array"}, {336527584, "quad word arithmetic not supported"}, {336527585, "data type not supported for arithmetic"}, {336527586, "request size limit exceeded"}, {336527587, "cannot access field {0} in view {1}"}, {336527588, "cannot access field in view {0}"}, {336527589, "EVL_assign_to: invalid operation"}, {336527590, "EVL_bitmap: invalid operation"}, {336527591, "EVL_boolean: invalid operation"}, {336527592, "EVL_expr: invalid operation"}, {336527593, "eval_statistical: invalid operation"}, {336527594, "Unimplemented conversion, FAO directive O,Z,S"}, {336527595, "Unimplemented conversion, FAO directive X,U"}, {336527596, "Error parsing RDB FAO msg string"}, {336527597, "Error parsing RDB FAO msg str"}, {336527598, "unknown parameter in RdB status vector"}, {336527599, "Firebird status vector inconsistent"}, {336527600, "Firebird/RdB message parameter inconsistency"}, {336527601, "error parsing RDB FAO message string"}, {336527602, "unimplemented FAO directive"}, {336527603, "missing pointer page in DPM_data_pages"}, {336527604, "Fragment does not exist"}, {336527605, "pointer page disappeared in DPM_delete"}, {336527606, "pointer page lost from DPM_delete_relation"}, {336527607, "missing pointer page in DPM_dump"}, {336527608, "cannot find record fragment"}, {336527609, "pointer page vanished from DPM_next"}, {336527610, "temporary page buffer too small"}, {336527611, "damaged data page"}, {336527612, "header fragment length changed"}, {336527613, "pointer page vanished from extend_relation"}, {336527614, "pointer page vanished from relation list in locate_space"}, {336527615, "cannot find free space"}, {336527616, "pointer page vanished from mark_full"}, {336527617, "bad record in RDB$PAGES"}, {336527618, "page slot not empty"}, {336527619, "bad pointer page"}, {336527620, "index unexpectedly deleted"}, {336527621, "scalar operator used on field which is not an array"}, {336527622, "active"}, {336527623, "committed"}, {336527624, "rolled back"}, {336527625, "in an ill-defined state"}, {336527626, "next transaction older than oldest active transaction"}, {336527627, "next transaction older than oldest transaction"}, {336527628, "buffer marked during cache unwind"}, {336527629, "error in recovery! database corrupted"}, {336527630, "error in recovery! wrong data page record"}, {336527631, "error in recovery! no space on data page"}, {336527632, "error in recovery! wrong header page record"}, {336527633, "error in recovery! wrong generator page record"}, {336527634, "error in recovery! wrong b-tree page record"}, {336527635, "error in recovery! wrong page inventory page record"}, {336527636, "error in recovery! wrong pointer page record"}, {336527637, "error in recovery! wrong index root page record"}, {336527638, "error in recovery! wrong transaction page record"}, {336527639, "error in recovery! out of sequence log record encountered"}, {336527640, "error in recovery! unknown page type"}, {336527641, "error in recovery! unknown record type"}, {336527642, "journal server cannot archive to specified archive directory"}, {336527643, "checksum error in log record when reading from log file"}, {336527644, "cannot restore singleton select data"}, {336527645, "lock not found in internal lock manager"}, {336527646, "size of opt block exceeded"}, {336527647, "Too many savepoints"}, {336527648, "garbage collect record disappeared"}, {336527649, "Unknown BLOB FILTER ACTION_"}, {336527650, "error during savepoint backout"}, /* savepoint_error */ {336527651, "cannot find record back version"}, {336527652, "Illegal user_type."}, {336527653, "bad ACL"}, {336527654, "inconsistent LATCH_mark release"}, {336527655, "inconsistent LATCH_mark call"}, {336527656, "inconsistent latch downgrade call"}, {336527657, "bdb is unexpectedly marked"}, {336527658, "missing exclusive latch"}, {336527659, "exceeded maximum number of shared latches on a bdb"}, {336527660, "can't find shared latch"}, {336527661, "Non-zero use_count of a buffer in the empty que"}, /* cache_non_zero_use_count */ {336527662, "Unexpected page change from latching"}, /* unexpected_page_change */ {336527663, "Invalid expression for evaluation"}, {336527664, "RDB$FLAGS for trigger {0} in RDB$TRIGGERS is corrupted"}, /* rdb$triggers_rdb$flags_corrupt */ {336527665, "Blobs accounting is inconsistent"}, {336527666, "Found array data type with more than 16 dimensions"}, {336658432, "Statement failed, SQLSTATE = {0}"}, /* GEN_ERR */ {336658433, "usage: isql [options] []"}, /* USAGE */ {336658434, "Unknown switch: {0}"}, /* SWITCH */ {336658435, "Use CONNECT or CREATE DATABASE to specify a database"}, /* NO_DB */ {336658436, "Unable to open {0}"}, /* FILE_OPEN_ERR */ {336658437, "Commit current transaction (y/n)?"}, /* COMMIT_PROMPT */ {336658438, "Committing."}, /* COMMIT_MSG */ {336658439, "Rolling back work."}, /* ROLLBACK_MSG */ {336658440, "Command error: {0}"}, /* CMD_ERR */ {336658441, "Enter data or NULL for each column. RETURN to end."}, /* ADD_PROMPT */ {336658442, "ISQL Version: {0}"}, /* VERSION */ {336658443, "\t-a(ll) extract metadata incl. legacy non-SQL tables"}, /* USAGE_ALL */ {336658444, "Number of DB pages allocated = {0}"}, /* NUMBER_PAGES */ {336658445, "Sweep interval = {0}"}, /* SWEEP_INTERV */ {336658446, "Number of wal buffers = {0}"}, /* NUM_WAL_BUFF */ {336658447, "Wal buffer size = {0}"}, /* WAL_BUFF_SIZE */ {336658448, "Check point length = {0}"}, /* CKPT_LENGTH */ {336658449, "Check point interval = {0}"}, /* CKPT_INTERV */ {336658450, "Wal group commit wait = {0}"}, /* WAL_GRPC_WAIT */ {336658451, "Base level = {0}"}, /* BASE_LEVEL */ {336658452, "Transaction in limbo = {0}"}, /* LIMBO */ {336658453, "Frontend commands:"}, /* HLP_FRONTEND */ {336658454, "BLOBVIEW -- view BLOB in text editor"}, /* HLP_BLOBED */ {336658455, "BLOBDUMP -- dump BLOB to a file"}, /* HLP_BLOBDMP */ {336658456, "EDIT [] -- edit SQL script file and execute"}, /* HLP_EDIT */ {336658457, "INput -- take input from the named SQL file"}, /* HLP_INPUT */ {336658458, "OUTput [] -- write output to named file"}, /* HLP_OUTPUT */ {336658459, "SHELL -- execute Operating System command in sub-shell"}, /* HLP_SHELL */ {336658460, "HELP -- display this menu"}, /* HLP_HELP */ {336658461, "Set commands:"}, /* HLP_SETCOM */ {336658462, " SET -- display current SET options"}, /* HLP_SET */ {336658463, " SET AUTOddl -- toggle autocommit of DDL statements"}, /* HLP_SETAUTO */ {336658464, " SET BLOB [ALL|] -- display BLOBS of subtype or ALL"}, /* HLP_SETBLOB */ {336658465, " SET COUNT -- toggle count of selected rows on/off"}, /* HLP_SETCOUNT */ {336658466, " SET ECHO -- toggle command echo on/off"}, /* HLP_SETECHO */ {336658467, " SET STATs -- toggle display of performance statistics"}, /* HLP_SETSTAT */ {336658468, " SET TERM -- change statement terminator string"}, /* HLP_SETTERM */ {336658469, "SHOW [] -- display system information"}, /* HLP_SHOW */ {336658470, " = CHECK, COLLATION, DATABASE, DOMAIN, EXCEPTION, FILTER, FUNCTION,"}, /* HLP_OBJTYPE */ {336658471, "EXIT -- exit and commit changes"}, /* HLP_EXIT */ {336658472, "QUIT -- exit and roll back changes"}, /* HLP_QUIT */ {336658473, "All commands may be abbreviated to letters in CAPitals"}, /* HLP_ALL */ {336658474, "\tSET SCHema/DB -- changes current database"}, /* HLP_SETSCHEMA */ {336658475, "Yes"}, /* YES_ANS */ {336658476, "Current memory = !c\nDelta memory = !d\nMax memory = !x\nElapsed time = !e sec\n"}, /* REPORT1 */ {336658477, "Cpu = !u sec\nBuffers = !b\nReads = !r\nWrites = !w\nFetches = !f"}, /* REPORT2 */ {336658478, "BLOB display set to subtype {0}. This BLOB: subtype = {1}"}, /* BLOB_SUBTYPE */ {336658479, "BLOB: {0}, type 'edit' or filename to load>"}, /* BLOB_PROMPT */ {336658480, "Enter {0} as Y/M/D>"}, /* DATE_PROMPT */ {336658481, "Enter {0}>"}, /* NAME_PROMPT */ {336658482, "Bad date {0}"}, /* DATE_ERR */ {336658483, "CON> "}, /* CON_PROMPT */ {336658484, " SET LIST -- toggle column or table display format"}, /* HLP_SETLIST */ {336658485, "{0} not found"}, /* NOT_FOUND */ {336658486, "Errors occurred (possibly duplicate domains) in creating {0} in {1}"}, /* COPY_ERR */ {336658487, "Server version too old to support the isql command"}, /* SERVER_TOO_OLD */ {336658488, "Records affected: {0}"}, /* REC_COUNT */ {336658489, "Unlicensed for database \"{0}\""}, /* UNLICENSED */ {336658490, " SET WIDTH [] -- set/unset print width to for column "}, /* HLP_SETWIDTH */ {336658491, " SET PLAN -- toggle display of query access plan"}, /* HLP_SETPLAN */ {336658492, " SET TIME -- toggle display of timestamp with DATE values"}, /* HLP_SETTIME */ {336658493, "EDIT -- edit current command buffer and execute"}, /* HLP_EDIT2 */ {336658494, "OUTput -- return output to stdout"}, /* HLP_OUTPUT2 */ {336658495, " SET NAMES -- set name of runtime character set"}, /* HLP_SETNAMES */ {336658496, " GENERATOR, GRANT, INDEX, PACKAGE, PROCEDURE, ROLE, SQL DIALECT,"}, /* HLP_OBJTYPE2 */ {336658497, " SET BLOB -- turn off BLOB display"}, /* HLP_SETBLOB2 */ {336658498, "SET