Repository: AugustNagro/magnum Branch: master Commit: 280136415ead Files: 126 Total size: 285.6 KB Directory structure: gitextract_tcy7nv57/ ├── .github/ │ └── workflows/ │ └── ci.yml ├── .gitignore ├── .scalafmt.conf ├── LICENSE ├── README.md ├── build.sbt ├── magnum/ │ └── src/ │ ├── main/ │ │ └── scala/ │ │ └── com/ │ │ └── augustnagro/ │ │ └── magnum/ │ │ ├── BatchUpdateResult.scala │ │ ├── ClickhouseDbType.scala │ │ ├── ColumnName.scala │ │ ├── ColumnNames.scala │ │ ├── DbCodec.scala │ │ ├── DbCon.scala │ │ ├── DbTx.scala │ │ ├── DbType.scala │ │ ├── DerivingUtil.scala │ │ ├── Frag.scala │ │ ├── FragWriter.scala │ │ ├── H2DbType.scala │ │ ├── Id.scala │ │ ├── ImmutableRepo.scala │ │ ├── MySqlDbType.scala │ │ ├── NullOrder.scala │ │ ├── OracleDbType.scala │ │ ├── PostgresDbType.scala │ │ ├── Query.scala │ │ ├── Repo.scala │ │ ├── RepoDefaults.scala │ │ ├── ResultSetIterator.scala │ │ ├── Returning.scala │ │ ├── Seek.scala │ │ ├── SeekDir.scala │ │ ├── Sort.scala │ │ ├── SortOrder.scala │ │ ├── Spec.scala │ │ ├── SpecImpl.scala │ │ ├── SqlException.scala │ │ ├── SqlExceptionEvent.scala │ │ ├── SqlLiteral.scala │ │ ├── SqlLogger.scala │ │ ├── SqlName.scala │ │ ├── SqlNameMapper.scala │ │ ├── SqlSuccessEvent.scala │ │ ├── SqliteDbType.scala │ │ ├── Table.scala │ │ ├── TableExprs.scala │ │ ├── TableInfo.scala │ │ ├── Transactor.scala │ │ ├── UUIDCodec.scala │ │ ├── Update.scala │ │ └── util.scala │ └── test/ │ ├── resources/ │ │ ├── clickhouse/ │ │ │ ├── big-dec.sql │ │ │ ├── car.sql │ │ │ ├── my-time.sql │ │ │ ├── no-id.sql │ │ │ └── person.sql │ │ ├── h2/ │ │ │ ├── big-dec.sql │ │ │ ├── car.sql │ │ │ ├── my-time.sql │ │ │ ├── my-user.sql │ │ │ ├── no-id.sql │ │ │ └── person.sql │ │ ├── mysql/ │ │ │ ├── big-dec.sql │ │ │ ├── car.sql │ │ │ ├── my-time.sql │ │ │ ├── my-user.sql │ │ │ ├── no-id.sql │ │ │ └── person.sql │ │ └── pg/ │ │ ├── big-dec.sql │ │ ├── car.sql │ │ ├── my-time.sql │ │ ├── my-user.sql │ │ ├── no-id.sql │ │ └── person.sql │ └── scala/ │ ├── ClickHouseTests.scala │ ├── EffectiveSubsetTests.scala │ ├── H2Tests.scala │ ├── MySqlTests.scala │ ├── OracleTests.scala │ ├── PgTests.scala │ ├── SqliteTests.scala │ ├── opaques.scala │ └── shared/ │ ├── BigDecTests.scala │ ├── Color.scala │ ├── DateTimeTests.scala │ ├── EmbeddedFragTests.scala │ ├── EntityCreatorTests.scala │ ├── ImmutableRepoTests.scala │ ├── MultilineFragTests.scala │ ├── NoIdTests.scala │ ├── OptionalProductTests.scala │ ├── RepoTests.scala │ ├── SharedTests.scala │ ├── SpecTests.scala │ ├── SqlNameTests.scala │ ├── TableInfoTests.scala │ └── TupleTests.scala ├── magnum-pg/ │ └── src/ │ ├── main/ │ │ └── scala/ │ │ └── com/ │ │ └── augustnagro/ │ │ └── magnum/ │ │ └── pg/ │ │ ├── PgCodec.scala │ │ ├── SqlArrayCodec.scala │ │ ├── enums/ │ │ │ ├── PgEnumDbCodec.scala │ │ │ ├── PgEnumToScalaEnumSqlArrayCodec.scala │ │ │ └── PgStringToScalaEnumSqlArrayCodec.scala │ │ ├── json/ │ │ │ ├── JsonBDbCodec.scala │ │ │ └── JsonDbCodec.scala │ │ └── xml/ │ │ └── XmlDbCodec.scala │ └── test/ │ ├── resources/ │ │ ├── pg-car.sql │ │ ├── pg-service-list.sql │ │ └── pg-user.sql │ └── scala/ │ ├── CirceJsonBDbCodec.scala │ ├── CirceJsonDbCodec.scala │ ├── Color.scala │ ├── LastService.scala │ ├── MagCar.scala │ ├── MagUser.scala │ ├── MyJsonB.scala │ ├── MyXml.scala │ └── PgCodecTests.scala ├── magnum-zio/ │ └── src/ │ ├── main/ │ │ └── scala/ │ │ └── com/ │ │ └── augustnagro/ │ │ └── magnum/ │ │ └── magzio/ │ │ └── TransactorZIO.scala │ └── test/ │ ├── resources/ │ │ └── pg/ │ │ ├── big-dec.sql │ │ ├── car.sql │ │ ├── my-user.sql │ │ ├── no-id.sql │ │ └── person.sql │ └── scala/ │ └── com/ │ └── augustnagro/ │ └── magnum/ │ └── magzio/ │ ├── ImmutableRepoZioTests.scala │ └── PgZioTests.scala └── project/ ├── build.properties └── plugins.sbt ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/ci.yml ================================================ name: CI on: push: branches: ["master"] pull_request: branches: ["master"] permissions: contents: read jobs: ci: runs-on: ubuntu-latest strategy: fail-fast: false steps: - name: Checkout current branch uses: actions/checkout@v6.0.2 - name: Setup Java uses: actions/setup-java@v5.2.0 with: distribution: temurin java-version: 25 check-latest: true - name: Setup sbt uses: sbt/setup-sbt@v1 - name: Cache scala dependencies uses: coursier/cache-action@v8 - name: Run tests run: sbt 'scalafmtCheckAll; test' ================================================ FILE: .gitignore ================================================ .settings .DS_Store *.iml .idea target nbproject nb-configuration.xml .class .log .jar .war .ear .zip .tar.gz .rar hs_err_pid* *.log .bsp *.sc *.db *metals* .bloop .vscode ================================================ FILE: .scalafmt.conf ================================================ version = 3.8.4-RC3 runner.dialect = scala3 rewrite.scala3.insertEndMarkerMinLines = 20 rewrite.scala3.removeEndMarkerMaxLines = 19 binPack.parentConstructors = Oneline ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ ## Magnum [![Latest version](https://index.scala-lang.org/augustnagro/magnum/magnum/latest.svg?color=orange)](https://index.scala-lang.org/augustnagro/magnum/magnum) Yet another database client for Scala. No dependencies, high productivity. * [Installing](#installing) * [ScalaDoc](#scaladoc) * [Documentation](#documentation) * [`connect` creates a database connection](#connect-creates-a-database-connection) * [`transact` creates a database transaction](#transact-creates-a-database-transaction) * [Type-safe Transaction & Connection Management](#type-safe-transaction--connection-management) * [Customizing Transactions](#customizing-transactions) * [Sql Interpolator, Frag, Query, Update, Returning](#sql-interpolator-frag-query-and-update) * [Batch Updates](#batch-updates) * [Immutable Repositories](#immutable-repositories) * [Repositories](#repositories) * [Database generated columns](#database-generated-columns) * [Specifications](#specifications) * [Scala 3 Enum & NewType Support](#scala-3-enum--newtype-support) * [`DbCodec`: Typeclass for JDBC reading & writing](#dbcodec-typeclass-for-jdbc-reading--writing) * [Future-Proof Queries](#future-proof-queries) * [Splicing Literal Values into Frags](#splicing-literal-values-into-frags) * [Postgres Module](#postgres-module) * [Logging](#logging-sql-queries) * [Integrations](#integrations) * [ZIO](#zio) * [Motivation](#motivation) * [Feature List And Database Support](#feature-list) * [Talks and Blogs](#talks-and-blogs) * [Frequently Asked Questions](#frequently-asked-questions) ## Installing ``` "com.augustnagro" %% "magnum" % "1.3.0" ``` Magnum requires Scala >= 3.3.0 You must also install the JDBC driver for your database, for example: ``` "org.postgresql" % "postgresql" % "" ``` And for performance, a JDBC connection pool like [HikariCP](https://github.com/brettwooldridge/HikariCP) ## ScalaDoc https://javadoc.io/doc/com.augustnagro/magnum_3 ## Documentation ### `connect` creates a database connection. `connect` takes two parameters; the database Transactor, and a context function with a given `DbCon` connection. For example: ```scala import com.augustnagro.magnum.* val dataSource: javax.sql.DataSource = ??? val xa = Transactor(dataSource) val users: Vector[User] = connect(xa): sql"SELECT * FROM user".query[User].run() ``` ### `transact` creates a database transaction. Like `connect`, `transact` accepts a Transactor and context function. The context function provides a `DbTx` instance. If the function throws, the transaction will be rolled back. ```scala // update is rolled back transact(xa): sql"UPDATE user SET first_name = $firstName WHERE id = $id".update.run() thisMethodThrows() ``` ### Type-safe Transaction & Connection Management Annotate transactional methods with `using DbTx`, and connections with `using DbCon`. Since `DbTx <: DbCon`, it's impossible to call a method with the wrong context. For example, this compiles: ```scala def runUpdateAndGetUsers()(using DbTx): Vector[User] = userRepo.deleteById(1L) getUsers def getUsers(using DbCon): Vector[User] = sql"SELECT * FROM user".query.run() ``` But not this: ```scala def runSomeQueries(using DbCon): Vector[User] = runUpdateAndGetUsers() ``` ### Customizing transactions `Transactor` lets you customize the transaction (or connection) behavior. ```scala val xa = Transactor( dataSource = ???, sqlLogger = SqlLogger.logSlowQueries(500.milliseconds), connectionConfig = con => con.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ) ) transact(xa): sql"SELECT id from myUser".query[Long].run() ``` ### Sql Interpolator, Frag, Query, and Update The `sql` interpolator can express any SQL expression, returning a `Frag` sql fragment. You can interpolate values without the risk of SQL-injection attacks. ```scala val firstNameOpt = Some("John") val twoDaysAgo = OffsetDateTime.now.minusDays(2) val frag: Frag = sql""" SELECT id, last_name FROM user WHERE first_name = $firstNameOpt AND created <= $twoDaysAgo """ ``` Frags can be turned into queries with the `query[T](using DbCodec[T])` method: ```scala val query = frag.query[(Long, String)] // Query[(Long, String)] ``` Or updates via `update` ```scala val update: Update = sql"UPDATE user SET first_name = 'Buddha' WHERE id = 3".update ``` Or an update with a `RETURNING` clause via `returning`: ```scala val updateReturning: Returning = sql""" UPDATE user SET first_name = 'Buddha' WHERE last_name = 'Harper' RETURNING id """.returning[Long] ``` All are executed via `run()(using DbCon)`: ```scala transact(xa): val tuples: Vector[(Long, String)] = query.run() val updatedRows: Int = update.run() val updatedIds: Vector[Long] = updateReturning.run() ``` ### Batch Updates Batch updates are supported via `batchUpdate` method in package `com.augustnagro.magnum`. ```scala connect(xa): val users: Iterable[User] = ??? val updateResult: BatchUpdateResult = batchUpdate(users): user => sql"...".update ``` `batchUpdate` returns a `BatchUpdateResult` enum, which is `Success(numRowsUpdated)` or `SuccessNoInfo` otherwise. ### Immutable Repositories The `ImmutableRepo` class auto-generates the following methods at compile-time: ```scala def count(using DbCon): Long def existsById(id: ID)(using DbCon): Boolean def findAll(using DbCon): Vector[E] def findAll(spec: Spec[E])(using DbCon): Vector[E] def findById(id: ID)(using DbCon): Option[E] def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] ``` Here's an example: ```scala @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) case class User( @Id id: Long, firstName: Option[String], lastName: String, created: OffsetDateTime ) derives DbCodec val userRepo = ImmutableRepo[User, Long] transact(xa): val cnt = userRepo.count val userOpt = userRepo.findById(2L) ``` Importantly, class User is annotated with `@Table`, which defines the table's database type. The annotation optionally specifies the name-mapping between scala fields and column names. You can also use the `@SqlName` annotation on individual fields. Finally, The table must `derive DbCodec`, or otherwise provide an implicit DbCodec instance. The optional `@Id` annotation denotes the table's primary key. Not setting `@Id` will default to using the first field. If there is no logical id, then remove the annotation and use Null in the ID type parameter of Repositories (see next). It is a best practice to extend ImmutableRepo to encapsulate your SQL in repositories. This way, it's easier to maintain since they're grouped together. ```scala class UserRepo extends ImmutableRepo[User, Long]: def firstNamesForLast(lastName: String)(using DbCon): Vector[String] = sql""" SELECT DISTINCT first_name FROM user WHERE last_name = $lastName """.query[String].run() // other User-related queries here ``` ### Repositories The `Repo` class auto-generates the following methods at compile-time: ```scala def count(using DbCon): Long def existsById(id: ID)(using DbCon): Boolean def findAll(using DbCon): Vector[E] def findAll(spec: Spec[E])(using DbCon): Vector[E] def findById(id: ID)(using DbCon): Option[E] def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] def delete(entity: E)(using DbCon): Unit def deleteById(id: ID)(using DbCon): Unit def truncate()(using DbCon): Unit def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult def deleteAllById(ids: Iterable[ID])(using DbCon): BatchUpdateResult def insert(entityCreator: EC)(using DbCon): Unit def insertAll(entityCreators: Iterable[EC])(using DbCon): Unit def insertReturning(entityCreator: EC)(using DbCon): E def insertAllReturning(entityCreators: Iterable[EC])(using DbCon): Vector[E] def update(entity: E)(using DbCon): Unit def updateAll(entities: Iterable[E])(using DbCon): BatchUpdateResult ``` Here's an example: ```scala @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) case class User( @Id id: Long, firstName: Option[String], lastName: String, created: OffsetDateTime ) derives DbCodec val userRepo = Repo[User, User, Long] val countAfterUpdate = transact(xa): userRepo.deleteById(2L) userRepo.count ``` It is a best practice to encapsulate your SQL in repositories. ```scala class UserRepo extends Repo[User, User, Long] ``` Also note that Repo extends ImmutableRepo. Some databases cannot support every method, and will throw UnsupportedOperationException. ### Database generated columns It is often the case that database columns are auto-generated, for example, primary key IDs. This is why the Repo class has 3 type parameters. The first defines the Entity-Creator, which should omit any fields that are auto-generated. The entity-creator class must be an 'effective' subclass of the entity class, but it does not have to subclass the entity. This is verified at compile time. The second type parameter is the Entity class, and the final is for the ID. If the Entity does not have a logical ID, use Null. ```scala case class UserCreator( firstName: Option[String], lastName: String, ) derives DbCodec @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) case class User( @Id id: Long, firstName: Option[String], lastName: String, created: OffsetDateTime ) derives DbCodec val userRepo = Repo[UserCreator, User, Long] val newUser: User = transact(xa): userRepo.insertReturning( UserCreator(Some("Adam"), "Smith") ) ``` ### Specifications Specifications help you write safe, dynamic queries. An example use-case would be a search results page that allows users to sort and filter the paginated data. 1. If you need to perform joins to get the data needed, first create a database view. 2. Next, create an entity class that derives DbCodec. 3. Finally, use the Spec class to create a specification. Here's an example: ```scala val partialName = "Ja%" val lastNameOpt = Option("Brown") val searchDate = OffsetDateTime.now.minusDays(2) val idPosition = 42L val spec = Spec[User] .where(sql"first_name ILIKE $partialName") .where(lastNameOpt.map(ln => sql"last_name = $ln").getOrElse(sql"")) .where(sql"created >= $searchDate") .seek("id", SeekDir.Gt, idPosition, SortOrder.Asc) .limit(10) val users: Vector[User] = userRepo.findAll(spec) ``` Note that both [seek pagination](https://blog.jooq.org/faster-sql-paging-with-jooq-using-the-seek-method/) and offset pagination is supported. ### Scala 3 Enum & NewType Support Magnum supports Scala 3 enums (non-adt) fully, by default writing & reading them as Strings. For example, ```scala @Table(PostgresDbType, SqlNameMapper.CamelToUpperSnakeCase) enum Color derives DbCodec: case Red, Green, Blue @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) case class User( @Id id: Long, firstName: Option[String], lastName: String, created: OffsetDateTime, favoriteColor: Color ) derives DbCodec ``` NewTypes and Opaque Type Alias can cause issues with derivation since given DbCodecs are not available. A simple way to provide them is using DbCodec.bimap: ```scala opaque type MyId = Long object MyId: def apply(id: Long): MyId = require(id >= 0) id extension (myId: MyId) def underlying: Long = myId given DbCodec[MyId] = DbCodec[Long].biMap(MyId.apply, _.underlying) transact(xa): val id = MyId(123L) sql"UPDATE my_table SET x = true WHERE id = $id".update.run() ``` ### `DbCodec`: Typeclass for JDBC reading & writing DbCodec is a Typeclass for JDBC reading & writing. Built-in DbCodecs are provided for many types, including primitives, dates, Options, and Tuples. You can derive DbCodecs by adding `derives DbCodec` to your case class or enum. ```scala val rs: ResultSet = ??? val ints: Vector[Int] = DbCodec[Int].read(rs) val ps: PreparedStatement = ??? DbCodec[Int].writeSingle(22, ps) ``` ### Defining your own DbCodecs To modify the JDBC mappings, implement a given DbCodec instance as you would for any Typeclass. ### Future-Proof Queries A common problem when writing SQL queries is that they're difficult to refactor. When a column or table name changes you have to do a global find & replace. And if you miss a query, it's discovered at runtime. There's also lots of repetition when writing SQL. Magnum's repositories help scrap the boilerplate, but writing `SELECT a, b, c, d, ...` for a large table quickly gets tiring. To help with this, Magnum offers a `TableInfo` class to enable 'future-proof' queries. An important caveat is that these queries are harder to copy/paste into SQL editors like PgAdmin or DbBeaver. Here's some examples: ```scala import com.augustnagro.magnum.* case class UserCreator(firstName: String, age: Int) derives DbCodec @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) case class User(id: Long, firstName: String, age: Int) derives DbCodec object User: val Table = TableInfo[UserCreator, User, Long] def allUsers(using DbCon): Vector[User] = val u = User.Table // equiv to // SELECT id, first_name, age FROM user sql"SELECT ${u.all} FROM $u".query[User].run() def firstNamesForLast(lastName: String)(using DbCon): Vector[String] = val u = User.Table // equiv to // SELECT DISTINCT first_name FROM user WHERE last_name = ? sql""" SELECT DISTINCT ${u.firstName} FROM $u WHERE ${u.lastName} = $lastName """.query[String].run() def insertOrIgnore(creator: UserCreator)(using DbCon): Unit = val u = User.Table // equiv to // INSERT OR IGNORE INTO user (first_name, age) VALUES (?, ?) sql"INSERT OR IGNORE INTO $u ${u.insertCols} VALUES ($creator)".update.run() ``` It's important that `val Table = TableInfo[X, Y, Z]` is not explicitly typed, otherwise its structural typing will be destroyed. In the case of multiple joins, you can use `TableInfo.alias(String)` to prevent name conflicts: ```scala val c = TableInfo[Car].alias("c") val p = TableInfo[Person].alias("p") sql""" SELECT ${c.all}, ${p.firstName} FROM $c JOIN $p ON ${p.id} = ${c.personId} """.query.run() ``` ### Splicing Literal Values into Frags To splice Strings directly into `sql` statements, you can interpolate `SqlLiteral` values. For example, ```scala val table = SqlLiteral("beans") sql"select * from $table" ``` This feature should be used sparingly and never with untrusted input. ### Postgres Module The Postgres Module adds support for [Geometric Types](https://www.postgresql.org/docs/current/datatype-geometric.html) and [Arrays](https://www.postgresql.org/docs/current/arrays.html). Postgres Arrays can be decoded into Scala List/Vector/IArray, etc; multi-dimensionality is also supported. ``` "com.augustnagro" %% "magnumpg" % "1.3.0" ``` Example: Insert into a table with a `point[]` type column. With table: ```sql create table my_geo ( id bigint primary key, pnts point[] not null ); ``` ```scala import org.postgresql.geometric.* import com.augustnagro.magnum.* import com.augustnagro.magnum.pg.PgCodec.given @Table(PostgresDbType) case class MyGeo(@Id id: Long, pnts: IArray[PGpoint]) derives DbCodec val dataSource: javax.sql.DataSource = ??? val xa = Transactor(dataSource) val myGeoRepo = Repo[MyGeo, MyGeo, Long] transact(xa): myGeoRepo.insert(MyGeo(1L, IArray(PGpoint(1, 1), PGPoint(2, 2)))) ``` The import of `PgCodec.given` is required to bring Geo/Array DbCodecs into scope. #### Arrays of Enums The `pg` module supports arrays of simple (non-ADT) enums. If you want to map an array of [Postgres enums](https://www.postgresql.org/docs/current/datatype-enum.html) to a sequence of Scala enums, use the following import when deriving the DbCodec: ```scala import com.augustnagro.magnum.pg.PgCodec.given import com.augustnagro.magnum.pg.enums.PgEnumToScalaEnumSqlArrayCodec // in postgres: `create type Color as enum ('Red', 'Green', 'Blue');` enum Color derives DbCodec: case Red, Green, Blue @Table(PostgresDbType) case class Car(@Id id: Long, colors: Vector[Color]) derives DbCodec ``` If instead your Postgres type is an array of varchar or text, use the following import: ```scala import com.augustnagro.magnum.pg.enums.PgStringToScalaEnumSqlArrayCodec ``` ### Logging SQL queries If you set the java.util Logging level to DEBUG, all SQL queries will be logged. Setting to TRACE will log SQL queries and their parameters. #### Logging Slow Queries You can log slow queries by using the `Transactor` class in conjunction with `SqlLogger.logSlowQueries(FiniteDuration)`. See [Customizing Transactions](#customizing-transactions) for an example. You can also implement your own SqlLogger subclass as desired. ## Integrations ### ZIO Magnum provides a fine layer of integration with ZIO. The `magnum-zio` module provides an implementation of the `connect` and `transact` utils that return a ZIO effect. To use the ZIO integration, add the following dependency: ```scala "com.augustnagro" %% "magnumzio" % "x.x.x" ``` and import these utils in your code with: ```scala import com.augustnagro.magnum.magzio.* ``` ## Motivation Historically, database clients on the JVM fall into three categories. * Object Oriented Repositories (Spring-Data, Hibernate) * Functional DSLs (JOOQ, Slick, quill, zio-sql) * SQL String interpolators (Anorm, doobie, plain jdbc) Magnum is a Scala 3 library combining aspects of all three, providing a typesafe and refactorable SQL interface, which can express all SQL expressions, on all JDBC-supported databases. Like in Zoolander (the movie), Magnum represents a 'new look' for Database access in Scala. ## Feature List * Supports any database with a JDBC driver, including Postgres, MySql, Oracle, ClickHouse, H2, and Sqlite * Efficient `sql" "` interpolator * Purely-functional API * Common queries (like insert, update, delete) generated at compile time * Difficult to hit [N+1 query problem](https://stackoverflow.com/questions/97197/what-is-the-n1-selects-problem-in-orm-object-relational-mapping) * Type-safe Transactions * Supports database-generated columns * Easy to use, Loom-ready API (no Futures or Effect Systems) * Easy to define entities. Easy to implement DB support & codecs for custom types. * Scales to complex SQL queries * Specifications for building dynamic queries, such as table filters with pagination * Supports high-performance [Seek pagination](https://blog.jooq.org/faster-sql-paging-with-jooq-using-the-seek-method/) * Performant batch-queries ## Developing The tests are written using TestContainers, which requires Docker be installed. ## Talks and Blogs * Scala Days 2023: [slides](/Magnum-Slides-to-Share.pdf), [talk](https://www.youtube.com/watch?v=iKNRS5b1zAY) ## Frequently Asked Questions #### Does Magnum support nested entities like: ```scala @Table(H2DbType, SqlNameMapper.CamelToSnakeCase) case class Company( name: String, address: Address, ) derives DbCodec case class Address( street: String, city: String, zipCode: String, country: String ) derives DbCodec ``` NO; Magnum only supports deriving flat entity class structures. This keeps things simple and makes it obvious how the Scala entity class maps to the SQL table. We may add support for SQL UDTs (user defined types) in the future; however at the time of writing, UDTs are not well-supported by JDBC drivers. You could also express the above example using a foreign key to an Address table, like so: ```scala @Table(H2DbType, SqlNameMapper.CamelToSnakeCase) case class Company( name: String, addressId: AddressId, ) derives DbCodec opaque type AddressId = Long object AddressId: def apply(id: Long): AddressId = id extension (id: AddressId) def underlying: Long = id given DbCodec[AddressId] = DbCodec[Long].biMap(AddressId.apply, _.underlying) @Table(H2DbType, SqlNameMapper.CamelToSnakeCase) case class Address( @Id id: AddressId, street: String, city: String, zipCode: String, country: String ) derives DbCodec ``` #### UUID DbCodec doesn't work for my database Some databases directly support the UUID type; these include Postgres, Clickhouse, and H2. When using the built-in `DbCodec[UUID]`, defined in `DbCodec.scala`, serialization and deserialization of `java.util.UUID` will work as expected. Other databases like MySql, Oracle, and Sqlite, however, do not natively support UUID columns. Users have to choose an alternate datatype to store the UUID: most commonly `varchar(36)` or `binary(16)`. The JDBC drivers for these databases do not support direct serialization and deserialization of `java.util.UUID`, therefore the default `DbCodec[UUID]` will not be sufficient. Instead, import the appropriate codec from `com.augustnagro.magnum.UUIDCodec`. For example, ```scala import com.augustnagro.magnum.* import com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec import java.util.UUID @Table(MySqlDbType) case class Person(@Id id: Long, name: String, tracking_id: Option[UUID]) derives DbCodec ``` ## Todo * JSON / XML support * Support MSSql * Cats Effect & ZIO modules * Explicit Nulls support ================================================ FILE: build.sbt ================================================ ThisBuild / organization := "com.augustnagro" ThisBuild / version := "2.0.0-SNAPSHOT" ThisBuild / versionScheme := Some("early-semver") ThisBuild / scalaVersion := "3.3.7" ThisBuild / scalacOptions ++= Seq("-deprecation") ThisBuild / homepage := Some(url("https://github.com/AugustNagro/magnum")) ThisBuild / licenses += ( "Apache-2.0", url( "https://opensource.org/licenses/Apache-2.0" ) ) ThisBuild / scmInfo := Some( ScmInfo( url("https://github.com/AugustNagro/magnum"), "scm:git:git@github.com:augustnagro/magnum.git", Some("scm:git:git@github.com:augustnagro/magnum.git") ) ) ThisBuild / developers := List( Developer( id = "augustnagro@gmail.com", name = "August Nagro", email = "augustnagro@gmail.com", url = url("https://augustnagro.com") ) ) ThisBuild / publishMavenStyle := true ThisBuild / pomIncludeRepository := { _ => false } ThisBuild / publishTo := { val centralSnapshots = "https://central.sonatype.com/repository/maven-snapshots/" if (isSnapshot.value) Some("central-snapshots" at centralSnapshots) else localStaging.value } ThisBuild / publish / skip := true addCommandAlias("fmt", "scalafmtAll") val testcontainersVersion = "0.44.1" val circeVersion = "0.14.10" val munitVersion = "1.1.0" val postgresDriverVersion = "42.7.4" lazy val root = project .in(file(".")) .aggregate(magnum, magnumPg, magnumZio) lazy val magnum = project .in(file("magnum")) .settings( publish / skip := false, libraryDependencies ++= Seq( "org.scalameta" %% "munit" % munitVersion % Test, "com.dimafeng" %% "testcontainers-scala-munit" % testcontainersVersion % Test, "com.dimafeng" %% "testcontainers-scala-postgresql" % testcontainersVersion % Test, "org.postgresql" % "postgresql" % postgresDriverVersion % Test, "com.dimafeng" %% "testcontainers-scala-mysql" % testcontainersVersion % Test, "com.mysql" % "mysql-connector-j" % "9.0.0" % Test, "com.h2database" % "h2" % "2.3.232" % Test, "com.dimafeng" %% "testcontainers-scala-oracle-xe" % testcontainersVersion % Test, "com.oracle.database.jdbc" % "ojdbc11" % "21.9.0.0" % Test, "com.dimafeng" %% "testcontainers-scala-clickhouse" % testcontainersVersion % Test, "com.clickhouse" % "clickhouse-jdbc" % "0.6.0" % Test classifier "http", "org.xerial" % "sqlite-jdbc" % "3.46.1.3" % Test ) ) lazy val magnumPg = project .in(file("magnum-pg")) .dependsOn(magnum) .settings( publish / skip := false, libraryDependencies ++= Seq( "org.postgresql" % "postgresql" % postgresDriverVersion % "provided", "org.scalameta" %% "munit" % munitVersion % Test, "com.dimafeng" %% "testcontainers-scala-munit" % testcontainersVersion % Test, "com.dimafeng" %% "testcontainers-scala-postgresql" % testcontainersVersion % Test, "io.circe" %% "circe-core" % circeVersion % Test, "io.circe" %% "circe-parser" % circeVersion % Test, "org.scala-lang.modules" %% "scala-xml" % "2.3.0" % Test ) ) lazy val magnumZio = project .in(file("magnum-zio")) .dependsOn(magnum) .settings( publish / skip := false, libraryDependencies ++= Seq( "dev.zio" %% "zio" % "2.1.24" % Provided, "org.scalameta" %% "munit" % munitVersion % Test, "com.dimafeng" %% "testcontainers-scala-munit" % testcontainersVersion % Test, "com.dimafeng" %% "testcontainers-scala-postgresql" % testcontainersVersion % Test, "org.postgresql" % "postgresql" % postgresDriverVersion % Test ) ) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/BatchUpdateResult.scala ================================================ package com.augustnagro.magnum import scala.util.boundary /** The total number of rows updated, or SuccessNoInfo if unknown. */ enum BatchUpdateResult: case Success(rowsUpdated: Long) case SuccessNoInfo ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/ClickhouseDbType.scala ================================================ package com.augustnagro.magnum import java.sql.{Connection, PreparedStatement, ResultSet, Statement} import java.time.OffsetDateTime import java.util.StringJoiner import scala.collection.View import scala.deriving.Mirror import scala.reflect.ClassTag import scala.util.{Failure, Success, Using, boundary} object ClickhouseDbType extends DbType: def buildRepoDefaults[EC, E, ID]( tableNameSql: String, eElemNames: Seq[String], eElemNamesSql: Seq[String], eElemCodecs: Seq[DbCodec[?]], ecElemNames: Seq[String], ecElemNamesSql: Seq[String], idIndex: Int )(using eCodec: DbCodec[E], ecCodec: DbCodec[EC], idCodec: DbCodec[ID], eClassTag: ClassTag[E], ecClassTag: ClassTag[EC], idClassTag: ClassTag[ID] ): RepoDefaults[EC, E, ID] = require( eClassTag.runtimeClass == ecClassTag.runtimeClass, "ClickHouse does not support generated keys, so EC must equal E" ) val idName = eElemNamesSql(idIndex) val selectKeys = eElemNamesSql.mkString(", ") val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")") val countSql = s"SELECT count(*) FROM $tableNameSql" val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long] val existsByIdSql = s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val findAllSql = s"SELECT $selectKeys FROM $tableNameSql" val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E] val findByIdSql = s"SELECT $selectKeys FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val deleteByIdSql = s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val truncateSql = s"TRUNCATE TABLE $tableNameSql" val truncateUpdate = Frag(truncateSql, Vector.empty, FragWriter.empty).update val insertSql = s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})" def idWriter(id: ID): FragWriter = (ps, pos) => idCodec.writeSingle(id, ps, pos) pos + idCodec.cols.length new RepoDefaults[EC, E, ID]: def count(using con: DbCon): Long = countQuery.run().head def existsById(id: ID)(using DbCon): Boolean = Frag(existsByIdSql, IArray(id), idWriter(id)) .query[Int] .run() .nonEmpty def findAll(using DbCon): Vector[E] = findAllQuery.run() def findAll(spec: Spec[E])(using DbCon): Vector[E] = SpecImpl.Default.findAll(spec, tableNameSql) def findById(id: ID)(using DbCon): Option[E] = Frag(findByIdSql, IArray(id), idWriter(id)) .query[E] .run() .headOption def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] = throw UnsupportedOperationException() def delete(entity: E)(using DbCon): Unit = deleteById( entity .asInstanceOf[Product] .productElement(idIndex) .asInstanceOf[ID] ) def deleteById(id: ID)(using DbCon): Unit = Frag(deleteByIdSql, IArray(id), idWriter(id)).update .run() def truncate()(using DbCon): Unit = truncateUpdate.run() def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult = deleteAllById( entities.map(e => e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID] ) ) def deleteAllById(ids: Iterable[ID])(using con: DbCon ): BatchUpdateResult = handleQuery(deleteByIdSql, ids): Using(con.connection.prepareStatement(deleteByIdSql)): ps => idCodec.write(ids, ps) timed(batchUpdateResult(ps.executeBatch())) def insert(entityCreator: EC)(using con: DbCon): Unit = handleQuery(insertSql, entityCreator): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.writeSingle(entityCreator, ps) timed(ps.executeUpdate()) def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit = handleQuery(insertSql, entityCreators): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.write(entityCreators, ps) timed(batchUpdateResult(ps.executeBatch())) def insertReturning(entityCreator: EC)(using con: DbCon): E = handleQuery(insertSql, entityCreator): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.writeSingle(entityCreator, ps) timed: ps.executeUpdate() entityCreator.asInstanceOf[E] def insertAllReturning( entityCreators: Iterable[EC] )(using con: DbCon): Vector[E] = handleQuery(insertSql, entityCreators): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.write(entityCreators, ps) timed: batchUpdateResult(ps.executeBatch()) entityCreators.toVector.asInstanceOf[Vector[E]] def update(entity: E)(using DbCon): Unit = throw UnsupportedOperationException() def updateAll(entities: Iterable[E])(using con: DbCon ): BatchUpdateResult = throw UnsupportedOperationException() end new end buildRepoDefaults end ClickhouseDbType ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/ColumnName.scala ================================================ package com.augustnagro.magnum /** Represents an entity column. Can be interpolated in sql"" expressions */ class ColumnName( val scalaName: String, val sqlName: String, val queryRepr: String ) extends SqlLiteral ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/ColumnNames.scala ================================================ package com.augustnagro.magnum /** A grouping of schema names, which may be interpolated in sql"" expressions. * @param queryRepr * The query representation. For example, "myColA, myColB" * @param columnNames * The column names. */ class ColumnNames(val queryRepr: String, val columnNames: IArray[ColumnName]) extends SqlLiteral ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/DbCodec.scala ================================================ package com.augustnagro.magnum import java.net.URL import java.sql.{JDBCType, PreparedStatement, ResultSet, Types} import java.time.{ Instant, LocalDate, LocalDateTime, LocalTime, OffsetDateTime, ZoneId, ZoneOffset } import java.util.UUID import scala.annotation.implicitNotFound import scala.deriving.Mirror import scala.compiletime.{ constValue, constValueTuple, erasedValue, error, summonAll, summonFrom, summonInline } import scala.quoted.* import scala.reflect.ClassTag import scala.util.boundary /** Typeclass for JDBC reading & writing. */ trait DbCodec[E]: self => /** Syntax used when querying the db. For example, * * DbCodec[Int].queryRepr == "?" * * DbCodec[(String, Boolean)].queryRepr = "(?, ?)" * * case class User(id: Long, name: String) derives DbCodec * DbCodec[User].queryRepr = "? ?" */ def queryRepr: String /** The `java.sql.Types` constant for every "?" in `queryRepr`. For mapping * database-specific types, Types.JAVA_OBJECT is recommended. */ def cols: IArray[Int] /** Read an E from the ResultSet starting at position `pos` and ending after * reading `cols` number of columns. Make sure the ResultSet is in a valid * state (ie, ResultSet::next has been called). */ def readSingle(resultSet: ResultSet, pos: Int): E /** Build an E from the ResultSet starting at position 1 and ending after * reading `cols` number of columns. Make sure the ResultSet is in a valid * state (ie, ResultSet::next has been called). */ def readSingle(resultSet: ResultSet): E = readSingle(resultSet, 1) /** Read an Option[E] from the ResultSet starting at position `pos` and ending * after reading `cols` number of columns. Make sure the ResultSet is in a * valid state (ie, ResultSet::next has been called). */ def readSingleOption(resultSet: ResultSet, pos: Int): Option[E] /** Build every row in the ResultSet into a sequence of E. The ResultSet * should be in its initial position before calling (ie, ResultSet::next not * called). */ def read(resultSet: ResultSet): Vector[E] = val res = Vector.newBuilder[E] while resultSet.next() do res += readSingle(resultSet) res.result() /** Write the entity to the PreparedStatement starting at position `pos` */ def writeSingle(entity: E, ps: PreparedStatement, pos: Int): Unit /** Write the entity to the resultSet starting at position 1 */ def writeSingle(entity: E, ps: PreparedStatement): Unit = writeSingle(entity, ps, 1) /** Writes multiple entities to the preparedStatement via * PreparedStatement::addBatch */ def write(entities: Iterable[E], ps: PreparedStatement): Unit = for e <- entities do writeSingle(e, ps) ps.addBatch() def biMap[E2](to: E => E2, from: E2 => E): DbCodec[E2] = new DbCodec[E2]: val cols: IArray[Int] = self.cols def readSingle(rs: ResultSet, pos: Int): E2 = to(self.readSingle(rs, pos)) def readSingleOption(rs: ResultSet, pos: Int): Option[E2] = self.readSingleOption(rs, pos).map(to) def writeSingle(e: E2, ps: PreparedStatement, pos: Int): Unit = self.writeSingle(from(e), ps, pos) def queryRepr: String = self.queryRepr end DbCodec object DbCodec: inline def apply[E](using codec: DbCodec[E]): DbCodec[E] = codec given AnyCodec: DbCodec[Any] with val cols: IArray[Int] = IArray(Types.JAVA_OBJECT) def readSingle(rs: ResultSet, pos: Int): Any = rs.getObject(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Any] = Option(rs.getObject(pos)) def writeSingle(a: Any, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, a) def queryRepr: String = "?" given StringCodec: DbCodec[String] with val cols: IArray[Int] = IArray(Types.VARCHAR) def readSingle(rs: ResultSet, pos: Int): String = rs.getString(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[String] = Option(rs.getString(pos)) def writeSingle(s: String, ps: PreparedStatement, pos: Int): Unit = ps.setString(pos, s) def queryRepr: String = "?" given BooleanCodec: DbCodec[Boolean] with val cols: IArray[Int] = IArray(Types.BOOLEAN) def readSingle(rs: ResultSet, pos: Int): Boolean = rs.getBoolean(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Boolean] = readOptImpl(this, rs, pos) def writeSingle(b: Boolean, ps: PreparedStatement, pos: Int): Unit = ps.setBoolean(pos, b) def queryRepr: String = "?" given ByteCodec: DbCodec[Byte] with val cols: IArray[Int] = IArray(Types.TINYINT) def readSingle(rs: ResultSet, pos: Int): Byte = rs.getByte(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Byte] = readOptImpl(this, rs, pos) def writeSingle(b: Byte, ps: PreparedStatement, pos: Int): Unit = ps.setByte(pos, b) def queryRepr: String = "?" given ShortCodec: DbCodec[Short] with val cols: IArray[Int] = IArray(Types.SMALLINT) def readSingle(rs: ResultSet, pos: Int): Short = rs.getShort(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Short] = readOptImpl(this, rs, pos) def writeSingle(s: Short, ps: PreparedStatement, pos: Int): Unit = ps.setShort(pos, s) def queryRepr: String = "?" given IntCodec: DbCodec[Int] with val cols: IArray[Int] = IArray(Types.INTEGER) def readSingle(rs: ResultSet, pos: Int): Int = rs.getInt(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Int] = readOptImpl(this, rs, pos) def writeSingle(i: Int, ps: PreparedStatement, pos: Int): Unit = ps.setInt(pos, i) def queryRepr: String = "?" given LongCodec: DbCodec[Long] with val cols: IArray[Int] = IArray(Types.BIGINT) def readSingle(rs: ResultSet, pos: Int): Long = rs.getLong(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Long] = readOptImpl(this, rs, pos) def writeSingle(l: Long, ps: PreparedStatement, pos: Int): Unit = ps.setLong(pos, l) def queryRepr: String = "?" given FloatCodec: DbCodec[Float] with val cols: IArray[Int] = IArray(Types.REAL) def readSingle(rs: ResultSet, pos: Int): Float = rs.getFloat(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Float] = readOptImpl(this, rs, pos) def writeSingle(f: Float, ps: PreparedStatement, pos: Int): Unit = ps.setFloat(pos, f) def queryRepr: String = "?" given DoubleCodec: DbCodec[Double] with val cols: IArray[Int] = IArray(Types.DOUBLE) def readSingle(rs: ResultSet, pos: Int): Double = rs.getDouble(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Double] = readOptImpl(this, rs, pos) def writeSingle(d: Double, ps: PreparedStatement, pos: Int): Unit = ps.setDouble(pos, d) def queryRepr: String = "?" given ByteArrayCodec: DbCodec[Array[Byte]] with val cols: IArray[Int] = IArray(Types.BINARY) def readSingle(rs: ResultSet, pos: Int): Array[Byte] = rs.getBytes(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Array[Byte]] = Option(rs.getBytes(pos)) def writeSingle(bytes: Array[Byte], ps: PreparedStatement, pos: Int): Unit = ps.setBytes(pos, bytes) def queryRepr: String = "?" given ByteIArrayCodec: DbCodec[IArray[Byte]] with val cols: IArray[Int] = IArray(Types.BINARY) def readSingle(rs: ResultSet, pos: Int): IArray[Byte] = IArray.unsafeFromArray(rs.getBytes(pos)) def readSingleOption(rs: ResultSet, pos: Int): Option[IArray[Byte]] = ByteArrayCodec.readSingleOption(rs, pos).map(IArray.unsafeFromArray) def writeSingle( bytes: IArray[Byte], ps: PreparedStatement, pos: Int ): Unit = ps.setBytes(pos, IArray.genericWrapArray(bytes).toArray) def queryRepr: String = "?" given SqlDateCodec: DbCodec[java.sql.Date] with val cols: IArray[Int] = IArray(Types.DATE) def readSingle(rs: ResultSet, pos: Int): java.sql.Date = rs.getDate(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Date] = Option(rs.getDate(pos)) def writeSingle( date: java.sql.Date, ps: PreparedStatement, pos: Int ): Unit = ps.setDate(pos, date) def queryRepr: String = "?" given SqlTimeCodec: DbCodec[java.sql.Time] with val cols: IArray[Int] = IArray(Types.TIME) def readSingle(rs: ResultSet, pos: Int): java.sql.Time = rs.getTime(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Time] = Option(rs.getTime(pos)) def writeSingle( time: java.sql.Time, ps: PreparedStatement, pos: Int ): Unit = ps.setTime(pos, time) def queryRepr: String = "?" given SqlTimestampCodec: DbCodec[java.sql.Timestamp] with val cols: IArray[Int] = IArray(Types.TIMESTAMP) def readSingle(rs: ResultSet, pos: Int): java.sql.Timestamp = rs.getTimestamp(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Timestamp] = Option(rs.getTimestamp(pos)) def writeSingle( t: java.sql.Timestamp, ps: PreparedStatement, pos: Int ): Unit = ps.setTimestamp(pos, t) def queryRepr: String = "?" given OffsetDateTimeCodec: DbCodec[OffsetDateTime] with val cols: IArray[Int] = IArray(Types.TIMESTAMP_WITH_TIMEZONE) def readSingle(rs: ResultSet, pos: Int): OffsetDateTime = rs.getObject(pos, classOf[OffsetDateTime]) def readSingleOption(rs: ResultSet, pos: Int): Option[OffsetDateTime] = readOptImpl(this, rs, pos) def writeSingle(dt: OffsetDateTime, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, dt) def queryRepr: String = "?" given InstantCodec: DbCodec[Instant] = OffsetDateTimeCodec.biMap(_.toInstant, _.atOffset(ZoneOffset.UTC)) given LocalDateCodec: DbCodec[LocalDate] with val cols: IArray[Int] = IArray(Types.DATE) def readSingle(rs: ResultSet, pos: Int): LocalDate = rs.getObject(pos, classOf[LocalDate]) def readSingleOption(rs: ResultSet, pos: Int): Option[LocalDate] = readOptImpl(this, rs, pos) def writeSingle(ld: LocalDate, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, ld) def queryRepr: String = "?" given LocalTimeCodec: DbCodec[LocalTime] with val cols: IArray[Int] = IArray(Types.TIME) def readSingle(rs: ResultSet, pos: Int): LocalTime = rs.getObject(pos, classOf[LocalTime]) def readSingleOption(rs: ResultSet, pos: Int): Option[LocalTime] = readOptImpl(this, rs, pos) def writeSingle(lt: LocalTime, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, lt) def queryRepr: String = "?" given LocalDateTimeCodec: DbCodec[LocalDateTime] with val cols: IArray[Int] = IArray(Types.TIMESTAMP) def readSingle(rs: ResultSet, pos: Int): LocalDateTime = rs.getObject(pos, classOf[LocalDateTime]) def readSingleOption(rs: ResultSet, pos: Int): Option[LocalDateTime] = readOptImpl(this, rs, pos) def writeSingle(ldt: LocalDateTime, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, ldt) def queryRepr: String = "?" given ZoneIdCodec: DbCodec[ZoneId] = StringCodec.biMap(ZoneId.of, _.toString) given SqlRefCodec: DbCodec[java.sql.Ref] with val cols: IArray[Int] = IArray(Types.REF) def readSingle(rs: ResultSet, pos: Int): java.sql.Ref = rs.getRef(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Ref] = readOptImpl(this, rs, pos) def writeSingle(ref: java.sql.Ref, ps: PreparedStatement, pos: Int): Unit = ps.setRef(pos, ref) def queryRepr: String = "?" given SqlBlobCodec: DbCodec[java.sql.Blob] with val cols: IArray[Int] = IArray(Types.BLOB) def readSingle(rs: ResultSet, pos: Int): java.sql.Blob = rs.getBlob(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Blob] = readOptImpl(this, rs, pos) def writeSingle(b: java.sql.Blob, ps: PreparedStatement, pos: Int): Unit = ps.setBlob(pos, b) def queryRepr: String = "?" given SqlClobCodec: DbCodec[java.sql.Clob] with val cols: IArray[Int] = IArray(Types.CLOB) def readSingle(rs: ResultSet, pos: Int): java.sql.Clob = rs.getClob(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Clob] = readOptImpl(this, rs, pos) def writeSingle(c: java.sql.Clob, ps: PreparedStatement, pos: Int): Unit = ps.setClob(pos, c) def queryRepr: String = "?" given URLCodec: DbCodec[URL] with val cols: IArray[Int] = IArray(Types.VARCHAR) def readSingle(rs: ResultSet, pos: Int): URL = rs.getURL(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[URL] = Option(rs.getURL(pos)) def writeSingle(url: URL, ps: PreparedStatement, pos: Int): Unit = ps.setURL(pos, url) def queryRepr: String = "?" given RowIdCodec: DbCodec[java.sql.RowId] with val cols: IArray[Int] = IArray(Types.ROWID) def readSingle(rs: ResultSet, pos: Int): java.sql.RowId = rs.getRowId(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.RowId] = Option(rs.getRowId(pos)) def writeSingle( rowId: java.sql.RowId, ps: PreparedStatement, pos: Int ): Unit = ps.setRowId(pos, rowId) def queryRepr: String = "?" given SqlNClobCodec: DbCodec[java.sql.NClob] with val cols: IArray[Int] = IArray(Types.NCLOB) def readSingle(rs: ResultSet, pos: Int): java.sql.NClob = rs.getNClob(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.NClob] = readOptImpl(this, rs, pos) def writeSingle(nc: java.sql.NClob, ps: PreparedStatement, pos: Int): Unit = ps.setNClob(pos, nc) def queryRepr: String = "?" given SqlXmlCodec: DbCodec[java.sql.SQLXML] with val cols: IArray[Int] = IArray(Types.SQLXML) def readSingle(rs: ResultSet, pos: Int): java.sql.SQLXML = rs.getSQLXML(pos) def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.SQLXML] = readOptImpl(this, rs, pos) def writeSingle(s: java.sql.SQLXML, ps: PreparedStatement, pos: Int): Unit = ps.setSQLXML(pos, s) def queryRepr: String = "?" given JavaBigDecimalCodec: DbCodec[java.math.BigDecimal] with val cols: IArray[Int] = IArray(Types.NUMERIC) def readSingle(rs: ResultSet, pos: Int): java.math.BigDecimal = rs.getBigDecimal(pos) def readSingleOption( rs: ResultSet, pos: Int ): Option[java.math.BigDecimal] = Option(rs.getBigDecimal(pos)) def writeSingle( bd: java.math.BigDecimal, ps: PreparedStatement, pos: Int ): Unit = ps.setBigDecimal(pos, bd) def queryRepr: String = "?" given ScalaBigDecimalCodec: DbCodec[scala.math.BigDecimal] with val cols: IArray[Int] = IArray(Types.NUMERIC) def readSingle(rs: ResultSet, pos: Int): scala.math.BigDecimal = scala.math.BigDecimal(rs.getBigDecimal(pos)) def readSingleOption(rs: ResultSet, pos: Int): Option[BigDecimal] = JavaBigDecimalCodec .readSingleOption(rs, pos) .map(scala.math.BigDecimal.apply) def writeSingle( bd: scala.math.BigDecimal, ps: PreparedStatement, pos: Int ): Unit = ps.setBigDecimal(pos, bd.underlying) def queryRepr: String = "?" given UUIDCodec: DbCodec[UUID] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.OTHER) def readSingle(rs: ResultSet, pos: Int): UUID = rs.getObject(pos, classOf[UUID]) def readSingleOption(rs: ResultSet, pos: Int): Option[UUID] = val res = rs.getObject(pos, classOf[UUID]) if rs.wasNull then None else Some(res) def writeSingle(entity: UUID, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) given OptionCodec[A](using codec: DbCodec[A]): DbCodec[Option[A]] with def cols: IArray[Int] = codec.cols def readSingle(rs: ResultSet, pos: Int): Option[A] = codec.readSingleOption(rs, pos) def readSingleOption(rs: ResultSet, pos: Int): Option[Option[A]] = Some(codec.readSingleOption(rs, pos)) def writeSingle(opt: Option[A], ps: PreparedStatement, pos: Int): Unit = opt match case Some(a) => codec.writeSingle(a, ps, pos) case None => for i <- cols.indices do ps.setNull(pos + i, cols(i)) def queryRepr: String = codec.queryRepr given SomeCodec[A](using codec: DbCodec[A]): DbCodec[Some[A]] with def cols: IArray[Int] = codec.cols def readSingle(rs: ResultSet, pos: Int): Some[A] = Some(codec.readSingle(rs, pos)) def readSingleOption(rs: ResultSet, pos: Int): Option[Some[A]] = codec.readSingleOption(rs, pos).map(Some.apply) def writeSingle(s: Some[A], ps: PreparedStatement, pos: Int): Unit = codec.writeSingle(s.get, ps, pos) def queryRepr: String = codec.queryRepr given Tuple2Codec[A, B](using aCodec: DbCodec[A], bCodec: DbCodec[B] ): DbCodec[(A, B)] with val cols: IArray[Int] = IArray.concat(aCodec.cols, bCodec.cols) def readSingle(rs: ResultSet, pos: Int): (A, B) = ( aCodec.readSingle(rs, pos), bCodec.readSingle(rs, pos + aCodec.cols.length) ) def readSingleOption(rs: ResultSet, pos: Int): Option[(A, B)] = val a = aCodec.readSingleOption(rs, pos) val b = bCodec.readSingleOption(rs, pos + aCodec.cols.length) (a, b) match case (Some(a), Some(b)) => Some((a, b)) case _ => None def writeSingle(tup: (A, B), ps: PreparedStatement, pos: Int): Unit = aCodec.writeSingle(tup._1, ps, pos) bCodec.writeSingle(tup._2, ps, pos + aCodec.cols.length) val queryRepr: String = s"(${aCodec.queryRepr}, ${bCodec.queryRepr})" given Tuple3Codec[A, B, C](using aCodec: DbCodec[A], bCodec: DbCodec[B], cCodec: DbCodec[C] ): DbCodec[(A, B, C)] with val cols: IArray[Int] = IArray.concat(aCodec.cols, bCodec.cols, cCodec.cols) def readSingle(rs: ResultSet, pos: Int): (A, B, C) = var i = pos val a = aCodec.readSingle(rs, i) i += aCodec.cols.length val b = bCodec.readSingle(rs, i) i += bCodec.cols.length val c = cCodec.readSingle(rs, i) (a, b, c) def readSingleOption(rs: ResultSet, pos: Int): Option[(A, B, C)] = var i = pos val a = aCodec.readSingleOption(rs, i) i += aCodec.cols.length val b = bCodec.readSingleOption(rs, i) i += bCodec.cols.length val c = cCodec.readSingleOption(rs, i) (a, b, c) match case (Some(a), Some(b), Some(c)) => Some((a, b, c)) case _ => None def writeSingle(tup: (A, B, C), ps: PreparedStatement, pos: Int): Unit = var i = pos aCodec.writeSingle(tup._1, ps, i) i += aCodec.cols.length bCodec.writeSingle(tup._2, ps, i) i += bCodec.cols.length cCodec.writeSingle(tup._3, ps, i) val queryRepr: String = s"(${aCodec.queryRepr}, ${bCodec.queryRepr}, ${cCodec.queryRepr})" end Tuple3Codec given Tuple4Codec[A, B, C, D](using aCodec: DbCodec[A], bCodec: DbCodec[B], cCodec: DbCodec[C], dCodec: DbCodec[D] ): DbCodec[(A, B, C, D)] with val cols: IArray[Int] = IArray.concat(aCodec.cols, bCodec.cols, cCodec.cols, dCodec.cols) def readSingle(rs: ResultSet, pos: Int): (A, B, C, D) = var i = pos val a = aCodec.readSingle(rs, i) i += aCodec.cols.length val b = bCodec.readSingle(rs, i) i += bCodec.cols.length val c = cCodec.readSingle(rs, i) i += cCodec.cols.length val d = dCodec.readSingle(rs, i) (a, b, c, d) def readSingleOption(rs: ResultSet, pos: Int): Option[(A, B, C, D)] = var i = pos val a = aCodec.readSingleOption(rs, i) i += aCodec.cols.length val b = bCodec.readSingleOption(rs, i) i += bCodec.cols.length val c = cCodec.readSingleOption(rs, i) i += cCodec.cols.length val d = dCodec.readSingleOption(rs, i) (a, b, c, d) match case (Some(a), Some(b), Some(c), Some(d)) => Some((a, b, c, d)) case _ => None def writeSingle(tup: (A, B, C, D), ps: PreparedStatement, pos: Int): Unit = var i = pos aCodec.writeSingle(tup._1, ps, i) i += aCodec.cols.length bCodec.writeSingle(tup._2, ps, i) i += bCodec.cols.length cCodec.writeSingle(tup._3, ps, i) i += cCodec.cols.length dCodec.writeSingle(tup._4, ps, i) val queryRepr: String = s"(${aCodec.queryRepr}, ${bCodec.queryRepr}, ${cCodec.queryRepr}, ${dCodec.queryRepr})" end Tuple4Codec inline given TupleNCodec[T <: Tuple]: DbCodec[T] = ${ tupleNCodecImpl[T] } private def codecExprs[T <: Tuple: Type]( res: Vector[Expr[DbCodec[?]]] = Vector.empty )(using Quotes): Expr[IArray[DbCodec[?]]] = import quotes.reflect.* Type.of[T] match case '[EmptyTuple] => '{ IArray.from(${ Expr.ofSeq(res) }) } case '[t *: ts] => val tCodec = Expr.summon[DbCodec[t]].getOrElse { report.errorAndAbort(s"No DbCodec found for type ${Type.show[t]}") } codecExprs[ts](res :+ tCodec) def tupleNCodecImpl[T <: Tuple: Type](using Quotes): Expr[DbCodec[T]] = import quotes.reflect.* Type.of[T] match case '[EmptyTuple] => report.errorAndAbort("Cannot derive DbCodec for EmptyTuple") case '[t *: ts] => val tCodecsExpr = codecExprs[t *: ts]() '{ new DbCodec[t *: ts] { val tCodecs = ${ tCodecsExpr } val cols: IArray[Int] = tCodecs.flatMap(codec => codec.cols) def readSingle(rs: ResultSet, pos: Int): t *: ts = val tupleSize = constValue[Tuple.Size[t *: ts]] val result = Array.ofDim[Any](tupleSize) var tupleIdx = 0 var psIdx = pos while tupleIdx < tupleSize do val codec = tCodecs(tupleIdx) result(tupleIdx) = codec.readSingle(rs, psIdx) tupleIdx += 1 psIdx += codec.cols.length Tuple.fromArray(result).asInstanceOf[t *: ts] def readSingleOption(rs: ResultSet, pos: Int): Option[t *: ts] = boundary: val tupleSize = constValue[Tuple.Size[t *: ts]] val res = Array.ofDim[Any](tupleSize) var tupleIdx = 0 var psIdx = pos while tupleIdx < tupleSize do val codec = tCodecs(tupleIdx) codec.readSingleOption(rs, psIdx) match case Some(value) => res(tupleIdx) = value case None => boundary.break(Option.empty) tupleIdx += 1 psIdx += codec.cols.length Some(Tuple.fromArray(res)).asInstanceOf[Option[t *: ts]] def writeSingle(e: t *: ts, ps: PreparedStatement, pos: Int): Unit = val tupleSize = constValue[Tuple.Size[t *: ts]] var tupleIdx = 0 var psIdx = pos while tupleIdx < tupleSize do val codec = tCodecs(tupleIdx) codec .asInstanceOf[DbCodec[Any]] .writeSingle(e.productElement(tupleIdx), ps, psIdx) tupleIdx += 1 psIdx += codec.cols.length val queryRepr: String = tCodecs.map(_.queryRepr).mkString("(", ", ", ")") }.asInstanceOf[DbCodec[T]] } end match end tupleNCodecImpl private inline def readOptImpl[A]( codec: DbCodec[A], resultSet: ResultSet, pos: Int ): Option[A] = val res = codec.readSingle(resultSet, pos) if resultSet.wasNull then None else Some(res) inline def derived[E: Mirror.Of]: DbCodec[E] = ${ dbCodecImpl[E] } private def dbCodecImpl[E: Type](using Quotes): Expr[DbCodec[E]] = import quotes.reflect.* val mirror = Expr.summon[Mirror.Of[E]].getOrElse { report.errorAndAbort( "Can only derive DbCodec for case classes, sealed traits or enums (products and sums)." ) } mirror match case '{ $mp: Mirror.ProductOf[E] { type MirroredElemTypes = mets } } => val colsExpr = buildColsExpr[mets]() '{ new DbCodec[E] { val cols: IArray[Int] = $colsExpr def readSingle(rs: ResultSet, pos: Int): E = ${ productReadSingle[E, mets]('{ rs }, mp, Vector.empty, '{ pos }) } def readSingleOption(rs: ResultSet, pos: Int): Option[E] = ${ productReadOption[E, mets]('{ rs }, mp, Vector.empty, '{ pos }) } def writeSingle(e: E, ps: PreparedStatement, pos: Int): Unit = ${ productWriteSingle[E, mets]('{ e }, '{ ps }, '{ pos }, '{ 0 }) } val queryRepr: String = ${ productQueryRepr[mets]() } } } case '{ $ms: Mirror.SumOf[E] { type MirroredElemTypes = mets type MirroredElemLabels = mels type MirroredLabel = mel } } => val nameMapExpr = DerivingUtil.buildSqlNameMapForEnum[E, mels, mets] val melExpr = Expr(Type.valueOfConstant[mel].get.toString) '{ new DbCodec[E] { val nameMap: Seq[(String, E)] = $nameMapExpr val cols: IArray[Int] = IArray(Types.VARCHAR) def readSingle(rs: ResultSet, pos: Int): E = val str = rs.getString(pos) nameMap.find((name, _) => name == str) match case Some((_, v)) => v case None => throw IllegalArgumentException( str + " not convertible to " + $melExpr ) def readSingleOption(rs: ResultSet, pos: Int): Option[E] = Option(rs.getString(pos)).map(str => nameMap.find((name, _) => name == str) match case Some((_, v)) => v case None => throw IllegalArgumentException( str + " not convertible to " + $melExpr ) ) def writeSingle(entity: E, ps: PreparedStatement, pos: Int): Unit = nameMap.find((_, v) => v == entity) match case Some((k, _)) => ps.setString(pos, k) case None => throw IllegalArgumentException( entity.toString + " not convertible to " + $melExpr ) def queryRepr: String = "?" } } end match end dbCodecImpl private def productQueryRepr[Mets: Type]( elemReprs: Vector[Expr[String]] = Vector.empty )(using Quotes): Expr[String] = import quotes.reflect.* Type.of[Mets] match case '[met *: metTail] => Expr.summon[DbCodec[met]] match case Some(codec) => productQueryRepr[metTail](elemReprs :+ '{ $codec.queryRepr }) case None => productQueryRepr[metTail](elemReprs :+ '{ "?" }) case '[EmptyTuple] => val seqExpr = Expr.ofSeq(elemReprs) '{ $seqExpr.mkString(", ") } private def buildColsExpr[Mets: Type]( res: Vector[Expr[IArray[Int]]] = Vector.empty )(using Quotes): Expr[IArray[Int]] = import quotes.reflect.* Type.of[Mets] match case '[met *: metTail] => val metCodec = Expr.summon[DbCodec[met]].getOrElse { val metType = TypeRepr.of[met].show report.errorAndAbort( s"Cannot find a DbCodec instance for $metType! Provide one or derive it." ) } val newCols = '{ $metCodec.cols } buildColsExpr[metTail](res :+ newCols) case '[EmptyTuple] => '{ val iArrays: Seq[IArray[Int]] = ${ Expr.ofSeq(res) } IArray.concat(iArrays*) } private def productReadSingle[E: Type, Mets: Type]( rs: Expr[ResultSet], m: Expr[Mirror.ProductOf[E]], res: Vector[Expr[Any]], pos: Expr[Int] )(using Quotes): Expr[E] = import quotes.reflect.* Type.of[Mets] match case '[met *: metTail] => Expr.summon[DbCodec[met]] match case Some(codecExpr) => '{ val posValue = $pos val codec = $codecExpr val metValue = codec.readSingle($rs, posValue) val newPos = posValue + codec.cols.length ${ productReadSingle[E, metTail]( rs, m, res :+ '{ metValue }, '{ newPos } ) } } case None => Expr.summon[ClassTag[met]] match case Some(clsTagExpr) => report.info( s"Could not find DbCodec for ${TypeRepr.of[met].show}. Defaulting to ResultSet::[get|set]Object" ) '{ val posValue = $pos val metValue = $rs.getObject( posValue, $clsTagExpr.runtimeClass.asInstanceOf[Class[met]] ) val newPos = posValue + 1 ${ productReadSingle[E, metTail]( rs, m, res :+ '{ metValue }, '{ newPos } ) } } case None => report.errorAndAbort( "Could not find DbCodec or ClassTag for ${TypeRepr.of[met].show}" ) case '[EmptyTuple] => '{ val product = ${ Expr.ofTupleFromSeq(res) } $m.fromProduct(product) } end match end productReadSingle private def productReadOption[E: Type, Mets: Type]( rs: Expr[ResultSet], m: Expr[Mirror.ProductOf[E]], res: Vector[Expr[Any]], pos: Expr[Int] )(using Quotes): Expr[Option[E]] = import quotes.reflect.* Type.of[Mets] match case '[met *: metTail] => Expr.summon[DbCodec[met]] match case Some(codecExpr) => '{ val posValue = $pos val codec = $codecExpr codec.readSingleOption($rs, posValue) match case Some(metValue) => val newPos = posValue + codec.cols.length ${ productReadOption[E, metTail]( rs, m, res :+ '{ metValue }, '{ newPos } ) } case None => None } case None => Expr.summon[ClassTag[met]] match case Some(clsTagExpr) => report.info( s"Could not find DbCodec for ${TypeRepr.of[met].show}. Defaulting to ResultSet::[get|set]Object" ) '{ val posValue = $pos val metValue = $rs.getObject( posValue, $clsTagExpr.runtimeClass.asInstanceOf[Class[met]] ) if $rs.wasNull then None else val newPos = posValue + 1 ${ productReadOption[E, metTail]( rs, m, res :+ '{ metValue }, '{ newPos } ) } } case None => report.errorAndAbort( "Could not find DbCodec or ClassTag for ${TypeRepr.of[met].show}" ) case '[EmptyTuple] => '{ val product = ${ Expr.ofTupleFromSeq(res) } Some($m.fromProduct(product)) } end match end productReadOption private def productWriteSingle[E: Type, Mets: Type]( e: Expr[E], ps: Expr[PreparedStatement], pos: Expr[Int], i: Expr[Int] )(using Quotes): Expr[Unit] = import quotes.reflect.* Type.of[Mets] match case '[met *: metTail] => Expr.summon[DbCodec[met]] match case Some(codecExpr) => '{ val iValue = $i val posValue = $pos val metValue = $e .asInstanceOf[Product] .productElement(iValue) .asInstanceOf[met] val codec = $codecExpr codec.writeSingle(metValue, $ps, posValue) val newPos = posValue + $codecExpr.cols.length val newI = iValue + 1 ${ productWriteSingle[E, metTail](e, ps, '{ newPos }, '{ newI }) } } case None => '{ val iValue = $i val posValue = $pos val metValue = $e .asInstanceOf[Product] .productElement(iValue) $ps.setObject(posValue, metValue) val newPos = posValue + 1 val newI = iValue + 1 ${ productWriteSingle[E, metTail](e, ps, '{ newPos }, '{ newI }) } } case '[EmptyTuple] => '{} end match end productWriteSingle end DbCodec ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/DbCon.scala ================================================ package com.augustnagro.magnum import java.sql.Connection /** Simple wrapper around java.sql.Connection. See * `com.augustnagro.magnum.connect` and `transact` */ class DbCon private[magnum] ( val connection: Connection, val sqlLogger: SqlLogger ) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/DbTx.scala ================================================ package com.augustnagro.magnum import java.sql.Connection import scala.util.Using /** Represents a transactional [[DbCon]] */ class DbTx private[magnum] (connection: Connection, sqlLogger: SqlLogger) extends DbCon(connection, sqlLogger) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/DbType.scala ================================================ package com.augustnagro.magnum import scala.reflect.ClassTag import scala.deriving.Mirror /** Factory for Repo default methods */ trait DbType: def buildRepoDefaults[EC, E, ID]( tableNameSql: String, eElemNames: Seq[String], eElemNamesSql: Seq[String], eElemCodecs: Seq[DbCodec[?]], ecElemNames: Seq[String], ecElemNamesSql: Seq[String], idIndex: Int )(using eCodec: DbCodec[E], ecCodec: DbCodec[EC], idCodec: DbCodec[ID], eClassTag: ClassTag[E], ecClassTag: ClassTag[EC], idClassTag: ClassTag[ID] ): RepoDefaults[EC, E, ID] ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/DerivingUtil.scala ================================================ package com.augustnagro.magnum import scala.deriving.Mirror import scala.compiletime.{ constValue, constValueTuple, erasedValue, error, summonFrom, summonInline } import scala.quoted.* import scala.reflect.ClassTag /** Not useful for typical user code; provided to help implement custom DbCodecs * and associated typeclasses */ object DerivingUtil: /** For a Simple (non-ADT) enum type E, constructs a sequence of mappings from * sql string representation to enum value. For example, * * {{{ * @Table(PostgresDbType, SqlNameMapper.CamelToUpperSnakeCase) * enum Color { case Red, @SqlName("greeeeeen") Green, Blue } * }}} * * Results in * * {{{ * Seq("Red" -> Color.Red, "greeeeeen" -> Color.Green, "Blue" -> Color.Blue) * }}} * * Will produce a compile error if the enum is not simple (non-adt). * * @tparam E * the enum type, like Color * @tparam Mels * enum Mirror's MirroredElemLabels * @tparam Mets * enum Mirror's MirroredElemTypes */ def buildSqlNameMapForEnum[ E: Type, Mels: Type, Mets: Type ](using q: Quotes): Expr[Seq[(String, E)]] = import q.reflect.* val tableAnnot = TypeRepr.of[Table].typeSymbol val defaultNameMapper: Expr[SqlNameMapper] = TypeRepr .of[E] .typeSymbol .getAnnotation(tableAnnot) match case Some(term) => val tableExpr = term.asExprOf[Table] '{ $tableExpr.nameMapper } case None => '{ SqlNameMapper.SameCase } val sumValueExprs: Vector[Expr[E]] = sumValues[E, Mets]() val scalaNames = getScalaNames[Mels]() val sqlNameAnnot = TypeRepr.of[SqlName].typeSymbol val enumCaseSymbols = TypeRepr.of[E].typeSymbol.children val sqlNameExprs: Vector[Expr[(String, E)]] = scalaNames .zip(sumValueExprs) .map((scalaName, sumExpr) => val nameAnnot = enumCaseSymbols .find(sym => sym.name == scalaName && sym.hasAnnotation(sqlNameAnnot)) .flatMap(sym => sym.getAnnotation(sqlNameAnnot)) nameAnnot match case Some(term) => val sqlNameExpr: Expr[SqlName] = term.asExprOf[SqlName] '{ ($sqlNameExpr.name.toString, $sumExpr) } case None => val scalaNameExpr = Expr(scalaName) '{ ($defaultNameMapper.toColumnName($scalaNameExpr), $sumExpr) } ) Expr.ofSeq(sqlNameExprs) end buildSqlNameMapForEnum private def getScalaNames[Mels: Type](res: Vector[String] = Vector.empty)( using Quotes ): Vector[String] = import quotes.reflect.* Type.of[Mels] match case '[mel *: melTail] => val melString = Type.valueOfConstant[mel].get.toString getScalaNames[melTail](res :+ melString) case '[EmptyTuple] => res private def sumValues[E: Type, Mets: Type]( res: Vector[Expr[E]] = Vector.empty )(using Quotes): Vector[Expr[E]] = import quotes.reflect.* Type.of[Mets] match case '[met *: metTail] => val expr = Expr.summon[Mirror.ProductOf[met]] match case Some(m) if isSingleton[met] => '{ $m.fromProduct(EmptyTuple).asInstanceOf[E] } case _ => report.errorAndAbort("Can only derive simple (non-adt) enums") sumValues[E, metTail](res :+ expr) case '[EmptyTuple] => res private def isSingleton[T: Type](using Quotes): Boolean = import quotes.reflect.* Expr.summon[Mirror.ProductOf[T]] match case Some('{ $mp: Mirror.ProductOf[T] { type MirroredElemTypes = mets } }) => tupleArity[mets]() == 0 case _ => false private def tupleArity[T: Type](res: Int = 0)(using Quotes): Int = import quotes.reflect.* Type.of[T] match case '[x *: xs] => tupleArity[xs](res + 1) case '[EmptyTuple] => res /** Finds the first SqlName annotation on type T */ def sqlTableNameAnnot[T: Type](using Quotes): Option[Expr[SqlName]] = import quotes.reflect._ val annot = TypeRepr.of[SqlName] TypeRepr .of[T] .typeSymbol .annotations .find(_.tpe =:= annot) .map(term => term.asExprOf[SqlName]) /** Finds the first Table annotation on type T */ def tableAnnot[T: Type](using Quotes): Option[Expr[Table]] = import quotes.reflect.* val annot = TypeRepr.of[Table] TypeRepr .of[T] .typeSymbol .annotations .find(_.tpe =:= annot) .map(term => term.asExprOf[Table]) end DerivingUtil ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Frag.scala ================================================ package com.augustnagro.magnum import java.lang.System.Logger.Level import java.sql.{PreparedStatement, ResultSet, Statement} import scala.collection.immutable.ArraySeq import scala.util.{Failure, Success, Using} /** Sql fragment */ class Frag( val sqlString: String, val params: Seq[Any], val writer: FragWriter ): def query[E](using reader: DbCodec[E]): Query[E] = Query(this, reader) def update: Update = Update(this) /** For databases like Postgres that support RETURNING statements via * `getResultSet` */ def returning[E](using reader: DbCodec[E]): Returning[E] = Returning(this, reader, Vector.empty) /** For databases that support RETURNING statements via `getGeneratedKeys` */ def returningKeys[E](colName: String, xs: String*)(using reader: DbCodec[E] ): Returning[E] = Returning(this, reader, colName +: xs) /** For databases that support RETURNING statements via `getGeneratedKeys` */ def returningKeys[E](colName: ColumnName, xs: ColumnName*)(using reader: DbCodec[E] ): Returning[E] = Returning(this, reader, (colName +: xs).map(_.queryRepr)) /** For databases that support RETURNING statements via `getGeneratedKeys` */ def returningKeys[E](colNames: ColumnNames)(using reader: DbCodec[E] ): Returning[E] = Returning(this, reader, colNames.columnNames.map(_.queryRepr)) /** Strips leading whitespace characters followed by a specified char from the * beginning of each line in this {@link Frag} . * * This method is useful when you want to format SQL strings in a more * readable multi-line way within your code. * * @param marginChar * the character that indicates the margin. * @return * a new {@link Frag} instance with the modified `sqlString`. */ def stripMargin(marginChar: Char): Frag = Frag(sqlString.stripMargin(marginChar), params, writer) /** Strips leading whitespace characters followed by a vertical bar (`|`) from * the beginning of each line in this {@link Frag} . * * This method is useful when you want to format SQL strings in a more * readable multi-line way within your code. * * @return * a new {@link Frag} instance with the modified `sqlString`. */ def stripMargin: Frag = stripMargin('|') end Frag ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/FragWriter.scala ================================================ package com.augustnagro.magnum import java.sql.PreparedStatement trait FragWriter: /** Writes a Frag's values to `ps`, staring at postion `pos`. Returns the new * position. */ def write(ps: PreparedStatement, pos: Int): Int object FragWriter: val empty: FragWriter = (_, pos) => pos ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/H2DbType.scala ================================================ package com.augustnagro.magnum import java.sql.{Connection, JDBCType, PreparedStatement, ResultSet, Statement} import java.time.OffsetDateTime import scala.collection.View import scala.deriving.Mirror import scala.reflect.ClassTag import scala.util.{Failure, Success, Using} object H2DbType extends DbType: def buildRepoDefaults[EC, E, ID]( tableNameSql: String, eElemNames: Seq[String], eElemNamesSql: Seq[String], eElemCodecs: Seq[DbCodec[?]], ecElemNames: Seq[String], ecElemNamesSql: Seq[String], idIndex: Int )(using eCodec: DbCodec[E], ecCodec: DbCodec[EC], idCodec: DbCodec[ID], eClassTag: ClassTag[E], ecClassTag: ClassTag[EC], idClassTag: ClassTag[ID] ): RepoDefaults[EC, E, ID] = val idName = eElemNamesSql(idIndex) val selectKeys = eElemNamesSql.mkString(", ") val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")") val updateKeys: String = eElemNamesSql .lazyZip(eElemCodecs) .map((sqlName, codec) => sqlName + " = " + codec.queryRepr) .patch(idIndex, Seq.empty, 1) .mkString(", ") val updateCodecs = eElemCodecs .patch(idIndex, Seq.empty, 1) .appended(idCodec) .asInstanceOf[Seq[DbCodec[Any]]] val insertGenKeys: Array[String] = Array.from(eElemNamesSql) val countSql = s"SELECT count(*) FROM $tableNameSql" val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long] val existsByIdSql = s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val findAllSql = s"SELECT * FROM $tableNameSql" val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E] val findByIdSql = s"SELECT * FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val findAllByIdSql = s"SELECT * FROM $tableNameSql WHERE $idName = ANY(?)" val deleteByIdSql = s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val truncateSql = s"TRUNCATE TABLE $tableNameSql" val truncateUpdate = Frag(truncateSql, Vector.empty, FragWriter.empty).update val insertSql = s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})" val updateSql = s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}" val compositeId = idCodec.cols.distinct.size != 1 val idFirstTypeName = JDBCType.valueOf(idCodec.cols.head).getName def idWriter(id: ID): FragWriter = (ps, pos) => idCodec.writeSingle(id, ps, pos) pos + idCodec.cols.length new RepoDefaults[EC, E, ID]: def count(using con: DbCon): Long = countQuery.run().head def existsById(id: ID)(using DbCon): Boolean = Frag(existsByIdSql, IArray(id), idWriter(id)) .query[Int] .run() .nonEmpty def findAll(using DbCon): Vector[E] = findAllQuery.run() def findAll(spec: Spec[E])(using DbCon): Vector[E] = SpecImpl.Default.findAll(spec, tableNameSql) def findById(id: ID)(using DbCon): Option[E] = Frag(findByIdSql, IArray(id), idWriter(id)) .query[E] .run() .headOption def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] = if compositeId then throw UnsupportedOperationException( "Composite ids unsupported for findAllById." ) val idsArray = Array.from[Any](ids) Frag( findAllByIdSql, IArray(idsArray), (ps, pos) => val sqlArray = ps.getConnection.createArrayOf(idFirstTypeName, idsArray) ps.setArray(pos, sqlArray) pos + 1 ).query[E].run() // // h2 doesn't support setObject(..) with primitive arrays, // // so we need to convert to Array[Object] // val builder = Array.newBuilder[Object] // if ids.knownSize > -1 then builder.sizeHint(ids.knownSize) // for id <- ids do builder += id.asInstanceOf[Object] // Sql(findAllByIdSql, Vector(builder.result())).run def delete(entity: E)(using DbCon): Unit = deleteById( entity .asInstanceOf[Product] .productElement(idIndex) .asInstanceOf[ID] ) def deleteById(id: ID)(using DbCon): Unit = Frag(deleteByIdSql, IArray(id), idWriter(id)).update.run() def truncate()(using DbCon): Unit = truncateUpdate.run() def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult = deleteAllById( entities.map(e => e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID] ) ) def deleteAllById(ids: Iterable[ID])(using con: DbCon ): BatchUpdateResult = handleQuery(deleteByIdSql, ids): Using(con.connection.prepareStatement(deleteByIdSql)): ps => idCodec.write(ids, ps) timed(batchUpdateResult(ps.executeBatch())) def insert(entityCreator: EC)(using con: DbCon): Unit = handleQuery(insertSql, entityCreator): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.writeSingle(entityCreator, ps) timed(ps.executeUpdate()) def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit = handleQuery(insertSql, entityCreators): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.write(entityCreators, ps) timed(batchUpdateResult(ps.executeBatch())) def insertReturning(entityCreator: EC)(using con: DbCon): E = handleQuery(insertSql, entityCreator): Using.Manager: use => val ps = use(con.connection.prepareStatement(insertSql, insertGenKeys)) ecCodec.writeSingle(entityCreator, ps) timed: ps.executeUpdate() val rs = use(ps.getGeneratedKeys) rs.next() eCodec.readSingle(rs) def insertAllReturning( entityCreators: Iterable[EC] )(using con: DbCon): Vector[E] = handleQuery(insertSql, entityCreators): Using.Manager: use => val ps = use(con.connection.prepareStatement(insertSql, insertGenKeys)) ecCodec.write(entityCreators, ps) timed: batchUpdateResult(ps.executeBatch()) val rs = use(ps.getGeneratedKeys) eCodec.read(rs) def update(entity: E)(using con: DbCon): Unit = handleQuery(updateSql, entity): Using(con.connection.prepareStatement(updateSql)): ps => val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length timed(ps.executeUpdate()) def updateAll(entities: Iterable[E])(using con: DbCon ): BatchUpdateResult = handleQuery(updateSql, entities): Using(con.connection.prepareStatement(updateSql)): ps => for entity <- entities do val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length ps.addBatch() timed(batchUpdateResult(ps.executeBatch())) end new end buildRepoDefaults end H2DbType ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Id.scala ================================================ package com.augustnagro.magnum import scala.annotation.StaticAnnotation class Id extends StaticAnnotation ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/ImmutableRepo.scala ================================================ package com.augustnagro.magnum import java.sql.ResultSet import javax.sql.DataSource import scala.util.{Try, Using} /** Repository supporting read-only queries. When entity `E` does not have an * id, use `Null` for the `Id` type. * @tparam E * database entity class * @tparam ID * id type of E */ open class ImmutableRepo[E, ID](using defaults: RepoDefaults[?, E, ID]): /** Count of all entities */ def count(using DbCon): Long = defaults.count /** Returns true if an E exists with the given id */ def existsById(id: ID)(using DbCon): Boolean = defaults.existsById(id) /** Returns all entity values */ def findAll(using DbCon): Vector[E] = defaults.findAll /** Find all entities matching the specification. See the scaladoc of [[Spec]] * for more details */ def findAll(spec: Spec[E])(using DbCon): Vector[E] = defaults.findAll(spec) /** Returns Some(entity) if a matching E is found */ def findById(id: ID)(using DbCon): Option[E] = defaults.findById(id) /** Find all entities having ids in the Iterable. If an Id is not found, no * error is thrown. */ def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] = defaults.findAllById(ids) end ImmutableRepo ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/MySqlDbType.scala ================================================ package com.augustnagro.magnum import java.sql.{Connection, PreparedStatement, ResultSet, Statement} import java.time.OffsetDateTime import scala.collection.View import scala.deriving.Mirror import scala.reflect.ClassTag import scala.util.{Failure, Success, Using} object MySqlDbType extends DbType: private val specImpl = new SpecImpl: override def sortSql(sort: Sort): String = val column = sort.column val nullSort = sort.nullOrder match case NullOrder.Default => "" case NullOrder.First => s"$column IS NOT NULL, " case NullOrder.Last => s"$column IS NULL, " case _ => throw UnsupportedOperationException() val dir = sort.direction match case SortOrder.Default => "" case SortOrder.Asc => " ASC" case SortOrder.Desc => " DESC" case _ => throw UnsupportedOperationException() nullSort + column + dir override def offsetLimitSql( offset: Option[Long], limit: Option[Int] ): Option[String] = (offset, limit) match case (Some(o), Some(l)) => Some(s"LIMIT $o, $l") case (Some(o), None) => Some(s"LIMIT $o, ${Long.MaxValue}") case (None, Some(l)) => Some(s"LIMIT $l") case (None, None) => None def buildRepoDefaults[EC, E, ID]( tableNameSql: String, eElemNames: Seq[String], eElemNamesSql: Seq[String], eElemCodecs: Seq[DbCodec[?]], ecElemNames: Seq[String], ecElemNamesSql: Seq[String], idIndex: Int )(using eCodec: DbCodec[E], ecCodec: DbCodec[EC], idCodec: DbCodec[ID], eClassTag: ClassTag[E], ecClassTag: ClassTag[EC], idClassTag: ClassTag[ID] ): RepoDefaults[EC, E, ID] = val idName = eElemNamesSql(idIndex) val selectKeys = eElemNamesSql.mkString(", ") val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")") val insertGenKeys = Array(idName) val updateKeys: String = eElemNamesSql .lazyZip(eElemCodecs) .map((sqlName, codec) => sqlName + " = " + codec.queryRepr) .patch(idIndex, Seq.empty, 1) .mkString(", ") val updateCodecs = eElemCodecs .patch(idIndex, Seq.empty, 1) .appended(idCodec) .asInstanceOf[Seq[DbCodec[Any]]] val countSql = s"SELECT count(*) FROM $tableNameSql" val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long] val existsByIdSql = s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val findAllSql = s"SELECT * FROM $tableNameSql" val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E] val findByIdSql = s"SELECT * FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val deleteByIdSql = s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val truncateSql = s"TRUNCATE TABLE $tableNameSql" val truncateUpdate = Frag(truncateSql, Vector.empty, FragWriter.empty).update val insertSql = s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})" val updateSql = s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}" val insertAndFindByIdSql = insertSql + "\n" + findByIdSql def idWriter(id: ID): FragWriter = (ps, pos) => idCodec.writeSingle(id, ps, pos) pos + idCodec.cols.length new RepoDefaults[EC, E, ID]: def count(using con: DbCon): Long = countQuery.run().head def existsById(id: ID)(using DbCon): Boolean = Frag(existsByIdSql, IArray(id), idWriter(id)) .query[Int] .run() .nonEmpty def findAll(using DbCon): Vector[E] = findAllQuery.run() def findAll(spec: Spec[E])(using DbCon): Vector[E] = specImpl.findAll(spec, tableNameSql) def findById(id: ID)(using DbCon): Option[E] = Frag(findByIdSql, IArray(id), idWriter(id)) .query[E] .run() .headOption def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] = throw UnsupportedOperationException( "MySql does not support 'ANY' keyword, and does not support long IN parameter lists. Use findById in a loop instead." ) def delete(entity: E)(using DbCon): Unit = deleteById( entity .asInstanceOf[Product] .productElement(idIndex) .asInstanceOf[ID] ) def deleteById(id: ID)(using DbCon): Unit = Frag(deleteByIdSql, IArray(id), idWriter(id)).update .run() def truncate()(using DbCon): Unit = truncateUpdate.run() def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult = deleteAllById( entities.map(e => e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID] ) ) def deleteAllById(ids: Iterable[ID])(using con: DbCon ): BatchUpdateResult = handleQuery(deleteByIdSql, ids): Using(con.connection.prepareStatement(deleteByIdSql)): ps => idCodec.write(ids, ps) timed(batchUpdateResult(ps.executeBatch())) def insert(entityCreator: EC)(using con: DbCon): Unit = handleQuery(insertSql, entityCreator): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.writeSingle(entityCreator, ps) timed(ps.executeUpdate()) def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit = handleQuery(insertSql, entityCreators): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.write(entityCreators, ps) timed(batchUpdateResult(ps.executeBatch())) def insertReturning(entityCreator: EC)(using con: DbCon): E = // unfortunately, mysql only will return auto_incremented keys. // it doesn't return default columns, and adding other columns to // the insertGenKeys array doesn't change this behavior. throw UnsupportedOperationException() def insertAllReturning( entityCreators: Iterable[EC] )(using con: DbCon): Vector[E] = throw UnsupportedOperationException() def update(entity: E)(using con: DbCon): Unit = handleQuery(updateSql, entity): Using(con.connection.prepareStatement(updateSql)): ps => val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length timed(ps.executeUpdate()) def updateAll(entities: Iterable[E])(using con: DbCon ): BatchUpdateResult = handleQuery(updateSql, entities): Using(con.connection.prepareStatement(updateSql)): ps => for entity <- entities do val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length ps.addBatch() timed(batchUpdateResult(ps.executeBatch())) end new end buildRepoDefaults end MySqlDbType ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/NullOrder.scala ================================================ package com.augustnagro.magnum trait NullOrder object NullOrder: case object Default extends NullOrder case object First extends NullOrder case object Last extends NullOrder ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/OracleDbType.scala ================================================ package com.augustnagro.magnum import java.sql.{Connection, PreparedStatement, ResultSet, Statement} import java.time.OffsetDateTime import scala.collection.View import scala.deriving.Mirror import scala.reflect.ClassTag import scala.util.{Failure, Success, Using} object OracleDbType extends DbType: private val specImpl = new SpecImpl: override def offsetLimitSql( offset: Option[Long], limit: Option[Int] ): Option[String] = (offset, limit) match case (Some(o), Some(l)) => Some(s"OFFSET $o ROWS FETCH NEXT $l ROWS ONLY") case (Some(o), None) => Some(s"OFFSET $o ROWS") case (None, Some(l)) => Some(s"FETCH NEXT $l ROWS ONLY") case (None, None) => None def buildRepoDefaults[EC, E, ID]( tableNameSql: String, eElemNames: Seq[String], eElemNamesSql: Seq[String], eElemCodecs: Seq[DbCodec[?]], ecElemNames: Seq[String], ecElemNamesSql: Seq[String], idIndex: Int )(using eCodec: DbCodec[E], ecCodec: DbCodec[EC], idCodec: DbCodec[ID], eClassTag: ClassTag[E], ecClassTag: ClassTag[EC], idClassTag: ClassTag[ID] ): RepoDefaults[EC, E, ID] = val idName = eElemNamesSql(idIndex) val selectKeys = eElemNamesSql.mkString(", ") val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")") val updateKeys: String = eElemNamesSql .lazyZip(eElemCodecs) .map((sqlName, codec) => sqlName + " = " + codec.queryRepr) .patch(idIndex, Seq.empty, 1) .mkString(", ") val updateCodecs = eElemCodecs .patch(idIndex, Seq.empty, 1) .appended(idCodec) .asInstanceOf[Seq[DbCodec[Any]]] val insertGenKeys = Array.from(eElemNamesSql) val countSql = s"SELECT count(*) FROM $tableNameSql" val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long] val existsByIdSql = s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val findAllSql = s"SELECT * FROM $tableNameSql" val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E] val findByIdSql = s"SELECT * FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val deleteByIdSql = s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val truncateSql = s"TRUNCATE TABLE $tableNameSql" val truncateUpdate = Frag(truncateSql, Vector.empty, FragWriter.empty).update val insertSql = s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})" val updateSql = s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}" def idWriter(id: ID): FragWriter = (ps, pos) => idCodec.writeSingle(id, ps, pos) pos + idCodec.cols.length new RepoDefaults[EC, E, ID]: def count(using con: DbCon): Long = countQuery.run().head def existsById(id: ID)(using DbCon): Boolean = Frag(existsByIdSql, IArray(id), idWriter(id)) .query[Int] .run() .nonEmpty def findAll(using DbCon): Vector[E] = findAllQuery.run() def findAll(spec: Spec[E])(using DbCon): Vector[E] = specImpl.findAll(spec, tableNameSql) def findById(id: ID)(using DbCon): Option[E] = Frag(findByIdSql, IArray(id), idWriter(id)) .query[E] .run() .headOption def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] = throw UnsupportedOperationException( "Oracle does not support SQL arrays, and does not support long IN parameter lists. Use findById in a loop instead." ) def delete(entity: E)(using DbCon): Unit = deleteById( entity .asInstanceOf[Product] .productElement(idIndex) .asInstanceOf[ID] ) def deleteById(id: ID)(using DbCon): Unit = Frag(deleteByIdSql, IArray(id), idWriter(id)).update .run() def truncate()(using DbCon): Unit = truncateUpdate.run() def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult = deleteAllById( entities.map(e => e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID] ) ) def deleteAllById(ids: Iterable[ID])(using con: DbCon ): BatchUpdateResult = handleQuery(deleteByIdSql, ids): Using(con.connection.prepareStatement(deleteByIdSql)): ps => idCodec.write(ids, ps) timed(batchUpdateResult(ps.executeBatch())) def insert(entityCreator: EC)(using con: DbCon): Unit = handleQuery(insertSql, entityCreator): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.writeSingle(entityCreator, ps) timed(ps.executeUpdate()) def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit = handleQuery(insertSql, entityCreators): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.write(entityCreators, ps) timed(batchUpdateResult(ps.executeBatch())) def insertReturning(entityCreator: EC)(using con: DbCon): E = handleQuery(insertSql, entityCreator): Using.Manager: use => val ps = use(con.connection.prepareStatement(insertSql, insertGenKeys)) ecCodec.writeSingle(entityCreator, ps) timed: ps.executeUpdate() val rs = use(ps.getGeneratedKeys) rs.next() eCodec.readSingle(rs) def insertAllReturning( entityCreators: Iterable[EC] )(using con: DbCon): Vector[E] = // oracle jdbc does not support batch RETURNING entityCreators.map(insertReturning).toVector def update(entity: E)(using con: DbCon): Unit = handleQuery(updateSql, entity): Using(con.connection.prepareStatement(updateSql)): ps => val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length timed(ps.executeUpdate()) def updateAll(entities: Iterable[E])(using con: DbCon ): BatchUpdateResult = handleQuery(updateSql, entities): Using(con.connection.prepareStatement(updateSql)): ps => for entity <- entities do val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length ps.addBatch() timed(batchUpdateResult(ps.executeBatch())) end new end buildRepoDefaults end OracleDbType ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/PostgresDbType.scala ================================================ package com.augustnagro.magnum import java.sql.{Connection, JDBCType, PreparedStatement, ResultSet, Statement} import java.time.OffsetDateTime import scala.collection.View import scala.deriving.Mirror import scala.reflect.ClassTag import scala.util.{Failure, Success, Using} import java.util.StringJoiner object PostgresDbType extends DbType: def buildRepoDefaults[EC, E, ID]( tableNameSql: String, eElemNames: Seq[String], eElemNamesSql: Seq[String], eElemCodecs: Seq[DbCodec[?]], ecElemNames: Seq[String], ecElemNamesSql: Seq[String], idIndex: Int )(using eCodec: DbCodec[E], ecCodec: DbCodec[EC], idCodec: DbCodec[ID], eClassTag: ClassTag[E], ecClassTag: ClassTag[EC], idClassTag: ClassTag[ID] ): RepoDefaults[EC, E, ID] = val idName = eElemNamesSql(idIndex) val selectKeys = eElemNamesSql.mkString(", ") val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")") val updateKeys: String = eElemNamesSql .lazyZip(eElemCodecs) .map((sqlName, codec) => sqlName + " = " + codec.queryRepr) .patch(idIndex, Seq.empty, 1) .mkString(", ") val updateCodecs = eElemCodecs .patch(idIndex, Seq.empty, 1) .appended(idCodec) .asInstanceOf[Seq[DbCodec[Any]]] val countSql = s"SELECT count(*) FROM $tableNameSql" val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long] val existsByIdSql = s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val findAllSql = s"SELECT $selectKeys FROM $tableNameSql" val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E] val findByIdSql = s"SELECT $selectKeys FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val findAllByIdSql = s"SELECT $selectKeys FROM $tableNameSql WHERE $idName = ANY(?)" val deleteByIdSql = s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val truncateSql = s"TRUNCATE TABLE $tableNameSql" val truncateUpdate = Frag(truncateSql, Vector.empty, FragWriter.empty).update val insertSql = s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})" val updateSql = s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}" val compositeId = idCodec.cols.distinct.size != 1 val idFirstTypeName = JDBCType.valueOf(idCodec.cols.head).getName def idWriter(id: ID): FragWriter = (ps, pos) => idCodec.writeSingle(id, ps, pos) pos + idCodec.cols.length new RepoDefaults[EC, E, ID]: def count(using con: DbCon): Long = countQuery.run().head def existsById(id: ID)(using DbCon): Boolean = Frag(existsByIdSql, IArray(id), idWriter(id)) .query[Int] .run() .nonEmpty def findAll(using DbCon): Vector[E] = findAllQuery.run() def findAll(spec: Spec[E])(using DbCon): Vector[E] = SpecImpl.Default.findAll(spec, tableNameSql) def findById(id: ID)(using DbCon): Option[E] = Frag(findByIdSql, IArray(id), idWriter(id)) .query[E] .run() .headOption def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] = if compositeId then throw UnsupportedOperationException( "Composite ids unsupported for findAllById." ) val idsArray = Array.from[Any](ids) Frag( findAllByIdSql, IArray(idsArray), (ps, pos) => val sqlArray = ps.getConnection.createArrayOf(idFirstTypeName, idsArray) ps.setArray(pos, sqlArray) pos + 1 ).query[E].run() def delete(entity: E)(using DbCon): Unit = deleteById( entity .asInstanceOf[Product] .productElement(idIndex) .asInstanceOf[ID] ) def deleteById(id: ID)(using DbCon): Unit = Frag(deleteByIdSql, IArray(id), idWriter(id)).update .run() def truncate()(using DbCon): Unit = truncateUpdate.run() def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult = deleteAllById( entities.map(e => e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID] ) ) def deleteAllById(ids: Iterable[ID])(using con: DbCon ): BatchUpdateResult = handleQuery(deleteByIdSql, ids): Using(con.connection.prepareStatement(deleteByIdSql)): ps => idCodec.write(ids, ps) timed(batchUpdateResult(ps.executeBatch())) def insert(entityCreator: EC)(using con: DbCon): Unit = handleQuery(insertSql, entityCreator): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.writeSingle(entityCreator, ps) timed(ps.executeUpdate()) def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit = handleQuery(insertSql, entityCreators): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.write(entityCreators, ps) timed(batchUpdateResult(ps.executeBatch())) def insertReturning(entityCreator: EC)(using con: DbCon): E = handleQuery(insertSql, entityCreator): Using.Manager: use => val ps = use( con.connection .prepareStatement(insertSql, Statement.RETURN_GENERATED_KEYS) ) ecCodec.writeSingle(entityCreator, ps) timed: ps.executeUpdate() val rs = use(ps.getGeneratedKeys) rs.next() eCodec.readSingle(rs) def insertAllReturning( entityCreators: Iterable[EC] )(using con: DbCon): Vector[E] = handleQuery(insertSql, entityCreators): Using.Manager: use => val ps = use( con.connection .prepareStatement(insertSql, Statement.RETURN_GENERATED_KEYS) ) ecCodec.write(entityCreators, ps) timed: batchUpdateResult(ps.executeBatch()) val rs = use(ps.getGeneratedKeys) eCodec.read(rs) def update(entity: E)(using con: DbCon): Unit = handleQuery(updateSql, entity): Using(con.connection.prepareStatement(updateSql)): ps => val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length timed(ps.executeUpdate()) def updateAll(entities: Iterable[E])(using con: DbCon ): BatchUpdateResult = handleQuery(updateSql, entities): Using(con.connection.prepareStatement(updateSql)): ps => for entity <- entities do val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length ps.addBatch() timed(batchUpdateResult(ps.executeBatch())) end new end buildRepoDefaults end PostgresDbType ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Query.scala ================================================ package com.augustnagro.magnum import java.util.concurrent.TimeUnit import scala.concurrent.duration.FiniteDuration import scala.util.Using.Manager import scala.util.control.NonFatal import scala.util.{Failure, Success, Try, Using} class Query[E] private[magnum] (val frag: Frag, reader: DbCodec[E]): def run()(using con: DbCon): Vector[E] = handleQuery(frag.sqlString, frag.params): Using.Manager: use => val ps = use(con.connection.prepareStatement(frag.sqlString)) frag.writer.write(ps, 1) timed: val rs = use(ps.executeQuery()) reader.read(rs) /** Streaming [[Iterator]]. Set [[fetchSize]] to give the JDBC driver a hint * as to how many rows to fetch per request */ def iterator( fetchSize: Int = 0 )(using con: DbCon, use: Manager): Iterator[E] = handleQuery(frag.sqlString, frag.params): Try: val ps = use(con.connection.prepareStatement(frag.sqlString)) ps.setFetchSize(fetchSize) frag.writer.write(ps, 1) timed: val rs = use(ps.executeQuery()) ResultSetIterator(rs, frag, reader, con.sqlLogger) end Query ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Repo.scala ================================================ package com.augustnagro.magnum import javax.sql.DataSource /** A read & write data repository * * @tparam EC * 'Entity Creator', which should have all fields of E minus those * auto-generated by the database. Can be the same type as E. * @tparam E * database entity class * @tparam ID * id type of E */ open class Repo[EC, E, ID](using defaults: RepoDefaults[EC, E, ID]) extends ImmutableRepo[E, ID]: /** Deletes an entity using its id */ def delete(entity: E)(using DbCon): Unit = defaults.delete(entity) /** Deletes an entity using its id */ def deleteById(id: ID)(using DbCon): Unit = defaults.deleteById(id) /** Deletes ALL entities */ def truncate()(using DbCon): Unit = defaults.truncate() /** Delete all provided entities */ def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult = defaults.deleteAll(entities) /** Deletes all entities with an Iterable of ids */ def deleteAllById(ids: Iterable[ID])(using DbCon): BatchUpdateResult = defaults.deleteAllById(ids) /** Insert and return entity E */ def insert(entityCreator: EC)(using DbCon): Unit = defaults.insert(entityCreator) /** Insert and return all new entities */ def insertAll(entityCreators: Iterable[EC])(using DbCon): Unit = defaults.insertAll(entityCreators) def insertReturning(entityCreator: EC)(using DbCon): E = defaults.insertReturning(entityCreator) def insertAllReturning(entityCreators: Iterable[EC])(using DbCon): Vector[E] = defaults.insertAllReturning(entityCreators) /** Update the entity */ def update(entity: E)(using DbCon): Unit = defaults.update(entity) /** Update all entities */ def updateAll(entities: Iterable[E])(using DbCon): BatchUpdateResult = defaults.updateAll(entities) end Repo ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/RepoDefaults.scala ================================================ package com.augustnagro.magnum import scala.compiletime.* import scala.deriving.* import scala.quoted.* import scala.reflect.ClassTag trait RepoDefaults[EC, E, ID]: def count(using DbCon): Long def existsById(id: ID)(using DbCon): Boolean def findAll(using DbCon): Vector[E] def findAll(spec: Spec[E])(using DbCon): Vector[E] def findById(id: ID)(using DbCon): Option[E] def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] def delete(entity: E)(using DbCon): Unit def deleteById(id: ID)(using DbCon): Unit def truncate()(using DbCon): Unit def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult def deleteAllById(ids: Iterable[ID])(using DbCon): BatchUpdateResult def insert(entityCreator: EC)(using DbCon): Unit def insertAll(entityCreators: Iterable[EC])(using DbCon): Unit def insertReturning(entityCreator: EC)(using DbCon): E def insertAllReturning(entityCreators: Iterable[EC])(using DbCon): Vector[E] def update(entity: E)(using DbCon): Unit def updateAll(entities: Iterable[E])(using DbCon): BatchUpdateResult object RepoDefaults: inline given genImmutableRepo[E: DbCodec: Mirror.Of, ID] : RepoDefaults[E, E, ID] = genRepo[E, E, ID] inline given genRepo[ EC: DbCodec: Mirror.Of, E: DbCodec: Mirror.Of, ID ]: RepoDefaults[EC, E, ID] = ${ genImpl[EC, E, ID] } private def genImpl[EC: Type, E: Type, ID: Type](using Quotes ): Expr[RepoDefaults[EC, E, ID]] = import quotes.reflect.* val exprs = tableExprs[EC, E, ID] val eElemCodecs = getEElemCodecs[E] val eCodec = Expr.summon[DbCodec[E]].get val ecCodec = Expr.summon[DbCodec[EC]].get val idCodec = if TypeRepr.of[ID] =:= TypeRepr.of[Null] then '{ DbCodec.AnyCodec.asInstanceOf[DbCodec[ID]] } else Expr.summon[DbCodec[ID]].get val eClassTag = Expr.summon[ClassTag[E]].get val ecClassTag = Expr.summon[ClassTag[EC]].get val idClassTag = if TypeRepr.of[ID] =:= TypeRepr.of[Null] then '{ ClassTag.Any.asInstanceOf[ClassTag[ID]] } else Expr.summon[ClassTag[ID]].get '{ ${ exprs.tableAnnot }.dbType.buildRepoDefaults[EC, E, ID]( ${ exprs.tableNameSql }, ${ Expr(exprs.eElemNames) }, ${ Expr.ofSeq(exprs.eElemNamesSql) }, $eElemCodecs, ${ Expr(exprs.ecElemNames) }, ${ Expr.ofSeq(exprs.ecElemNamesSql) }, ${ exprs.idIndex } )(using $eCodec, $ecCodec, $idCodec, $eClassTag, $ecClassTag, $idClassTag ) } end genImpl private def getEElemCodecs[E: Type](using Quotes): Expr[Seq[DbCodec[?]]] = import quotes.reflect.* Expr.summon[Mirror.ProductOf[E]] match case Some('{ $m: Mirror.ProductOf[E] { type MirroredElemTypes = mets } }) => getProductCodecs[mets]() case _ => val sumCodec = Expr.summon[DbCodec[E]].get '{ Seq($sumCodec) } private def getProductCodecs[Mets: Type]( res: Vector[Expr[DbCodec[?]]] = Vector.empty )(using Quotes): Expr[Seq[DbCodec[?]]] = Type.of[Mets] match case '[met *: metTail] => Expr.summon[DbCodec[met]] match case Some(codec) => getProductCodecs[metTail](res :+ codec) case None => getProductCodecs[metTail](res :+ '{ DbCodec.AnyCodec }) case '[EmptyTuple] => Expr.ofSeq(res) end RepoDefaults ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/ResultSetIterator.scala ================================================ package com.augustnagro.magnum import java.sql.ResultSet import scala.util.control.NonFatal private class ResultSetIterator[E]( rs: ResultSet, frag: Frag, reader: DbCodec[E], sqlLogger: SqlLogger ) extends Iterator[E] { private var rsHasNext: Boolean = try rs.next() catch case NonFatal(t) => throw SqlException( sqlLogger.exceptionMsg( SqlExceptionEvent(frag.sqlString, frag.params, t) ), t ) override def hasNext: Boolean = rsHasNext override def next(): E = if !rsHasNext then throw IllegalStateException("ResultSet is empty") try val e = reader.readSingle(rs) rsHasNext = rs.next() e catch case NonFatal(t) => throw SqlException( sqlLogger.exceptionMsg( SqlExceptionEvent(frag.sqlString, frag.params, t) ), t ) } ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Returning.scala ================================================ package com.augustnagro.magnum import scala.util.{Failure, Success, Try, Using} import Using.Manager import java.sql.Statement import java.sql.ResultSet class Returning[E] private[magnum] ( val frag: Frag, reader: DbCodec[E], keyColumns: Iterable[String] ): def run()(using con: DbCon): Vector[E] = withResultSet(reader.read) /** Streaming [[Iterator]]. Set [[fetchSize]] to give the JDBC driver a hint * as to how many rows to fetch per request */ def iterator( fetchSize: Int = 0 )(using con: DbCon, use: Manager): Iterator[E] = withResultSet(ResultSetIterator(_, frag, reader, con.sqlLogger)) private def withResultSet[A](f: ResultSet => A)(using con: DbCon): A = handleQuery(frag.sqlString, frag.params): Manager: use => if keyColumns.isEmpty then val ps = use(con.connection.prepareStatement(frag.sqlString)) frag.writer.write(ps, 1) timed: val hasResults = ps.execute() if hasResults then val rs = use(ps.getResultSet) f(rs) else throw UnsupportedOperationException( "No results for RETURNING clause" ) else val ps = use( con.connection.prepareStatement(frag.sqlString, keyColumns.toArray) ) frag.writer.write(ps, 1) timed: ps.execute() val rs = use(ps.getGeneratedKeys) f(rs) end Returning ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Seek.scala ================================================ package com.augustnagro.magnum class Seek private[magnum] ( val column: String, val seekDirection: SeekDir, val value: Any, val columnSort: SortOrder, val nullOrder: NullOrder, val codec: DbCodec[?] ) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SeekDir.scala ================================================ package com.augustnagro.magnum trait SeekDir object SeekDir: case object Gt extends SeekDir case object Lt extends SeekDir ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Sort.scala ================================================ package com.augustnagro.magnum class Sort private[magnum] ( val column: String, val direction: SortOrder, val nullOrder: NullOrder ) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SortOrder.scala ================================================ package com.augustnagro.magnum trait SortOrder object SortOrder: case object Default extends SortOrder case object Asc extends SortOrder case object Desc extends SortOrder ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Spec.scala ================================================ package com.augustnagro.magnum import java.util.StringJoiner class Spec[E] private ( val prefix: Option[Frag], val predicates: Vector[Frag], val limit: Option[Int], val offset: Option[Long], val sorts: Vector[Sort], val seeks: Vector[Seek] ): def prefix(sql: Frag): Spec[E] = new Spec(Some(sql), predicates, limit, offset, sorts, seeks) def where(sql: Frag): Spec[E] = new Spec(prefix, predicates :+ sql, limit, offset, sorts, seeks) def orderBy( column: String, direction: SortOrder = SortOrder.Default, nullOrder: NullOrder = NullOrder.Default ): Spec[E] = val sort = Sort(column, direction, nullOrder) new Spec(prefix, predicates, limit, offset, sorts :+ sort, seeks) def limit(limit: Int): Spec[E] = new Spec(prefix, predicates, Some(limit), offset, sorts, seeks) def offset(offset: Long): Spec[E] = new Spec(prefix, predicates, limit, Some(offset), sorts, seeks) def seek[V]( column: String, seekDirection: SeekDir, value: V, columnSort: SortOrder, nullOrder: NullOrder = NullOrder.Default )(using codec: DbCodec[V]): Spec[E] = val seek = Seek(column, seekDirection, value, columnSort, nullOrder, codec) new Spec(prefix, predicates, limit, offset, sorts, seeks :+ seek) end Spec object Spec: def apply[E]: Spec[E] = new Spec(None, Vector.empty, None, None, Vector.empty, Vector.empty) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SpecImpl.scala ================================================ package com.augustnagro.magnum import java.util.StringJoiner private trait SpecImpl: def sortSql(sort: Sort): String = val dir = sort.direction match case SortOrder.Default => "" case SortOrder.Asc => " ASC" case SortOrder.Desc => " DESC" case _ => throw UnsupportedOperationException() val nullOrder = sort.nullOrder match case NullOrder.Default => "" case NullOrder.First => " NULLS FIRST" case NullOrder.Last => " NULLS LAST" case _ => throw UnsupportedOperationException() sort.column + dir + nullOrder def offsetLimitSql(offset: Option[Long], limit: Option[Int]): Option[String] = (offset, limit) match case (Some(o), Some(l)) => Some(s"OFFSET $o LIMIT $l") case (Some(o), None) => Some(s"OFFSET $o") case (None, Some(l)) => Some(s"LIMIT $l") case (None, None) => None def seekSql(seek: Seek): String = val seekDir = seek.seekDirection match case SeekDir.Gt => ">" case SeekDir.Lt => "<" case _ => throw UnsupportedOperationException() s"${seek.column} $seekDir ?" def findAll[E: DbCodec](spec: Spec[E], tableNameSql: String)(using DbCon ): Vector[E] = val whereClause = StringJoiner(" AND ", "WHERE ", "").setEmptyValue("") val allParams = Vector.newBuilder[Any] val tableNameLiteral = SqlLiteral(tableNameSql) val prefixFrag = spec.prefix.getOrElse(sql"SELECT * FROM $tableNameLiteral") allParams ++= prefixFrag.params val seekPredicates = spec.seeks.map(seek => val codec = seek.codec.asInstanceOf[DbCodec[Any]] Frag( seekSql(seek), Vector(seek.value), (ps, pos) => codec.writeSingle(seek.value, ps, pos) pos + codec.cols.length ) ) val whereFrags = (spec.predicates ++ seekPredicates).filter(_.sqlString.nonEmpty) for frag <- whereFrags do whereClause.add("(" + frag.sqlString + ")") allParams ++= frag.params val seekSorts = spec.seeks.map(seek => Sort(seek.column, seek.columnSort, seek.nullOrder)) val orderByClause = StringJoiner(", ", "ORDER BY ", "").setEmptyValue("") for sort <- spec.sorts ++ seekSorts do orderByClause.add(sortSql(sort)) val finalSj = StringJoiner(" ") if prefixFrag.sqlString.nonEmpty then finalSj.add(prefixFrag.sqlString) val whereClauseStr = whereClause.toString if whereClauseStr.nonEmpty then finalSj.add(whereClauseStr) val orderByClauseStr = orderByClause.toString if orderByClauseStr.nonEmpty then finalSj.add(orderByClauseStr) for offsetLimit <- offsetLimitSql(spec.offset, spec.limit) do finalSj.add(offsetLimit) val allFrags = prefixFrag +: whereFrags val fragWriter: FragWriter = (ps, startingPos) => allFrags.foldLeft(startingPos)((pos, frag) => frag.writer.write(ps, pos)) Frag(finalSj.toString, allParams.result(), fragWriter) .query[E] .run() end findAll end SpecImpl private object SpecImpl: object Default extends SpecImpl ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlException.scala ================================================ package com.augustnagro.magnum class SqlException private[magnum] (message: String, cause: Throwable = null) extends RuntimeException(message, cause) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlExceptionEvent.scala ================================================ package com.augustnagro.magnum /** Metadata for a exceptional SQL statement. */ class SqlExceptionEvent private[magnum] ( /** The SQL string */ val sql: String, anyParams: Any, /** Cause of the exception */ val cause: Throwable ): /** The parameters used when executing. The type is `Iterator[Iterator[Any]]` * to support logging batched updates. For example, * {{{ * repo.insert(User(a, b, c)) // provides Iterator(Iterator(a, b, c)) * repo.insertAll(List(User(a, b, c), User(d, e, f))) // provides Iterator(Iterator(a, b, c), Iterator(d, e, f)) * }}} */ def params: Iterator[Iterator[Any]] = parseParams(anyParams) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlLiteral.scala ================================================ package com.augustnagro.magnum /** A SQL string that is interpolated directly into a sql"" query (and not as a * PreparedStatement parameter) * * For example, * * {{{ * val myQaSchema = SqlLiteral("db_qa") * sql"SELECT * FROM $myQaSchema.table_name" * }}} * * Generates the SQL: * {{{ * "SELECT * FROM db_qa.table_name" * }}} */ trait SqlLiteral: def queryRepr: String object SqlLiteral: def apply(s: String): SqlLiteral = new SqlLiteral: def queryRepr: String = s ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlLogger.scala ================================================ package com.augustnagro.magnum import java.lang.System.Logger.Level import scala.concurrent.duration.FiniteDuration /** Trait to provide logging of Magnum SQL statements. */ trait SqlLogger: /** Log a successful SQL statement execution. If a query fails a * [[SqlException]] will be thrown, and this logger will not be triggered. */ def log(successEvent: SqlSuccessEvent): Unit /** Constructs the exception message for [[SqlException]]s */ def exceptionMsg(exceptionEvent: SqlExceptionEvent): String object SqlLogger: object NoOp extends SqlLogger: override def log(successEvent: SqlSuccessEvent): Unit = () override def exceptionMsg(exceptionEvent: SqlExceptionEvent): String = exceptionEvent.cause.getMessage object Default extends SqlLogger: override def log(successEvent: SqlSuccessEvent): Unit = if Log.isLoggable(Level.TRACE) then Log.log( Level.TRACE, s"""Executed Query in ${successEvent.execTime}: |${successEvent.sql} | |With values: |${paramsString(successEvent.params)} |""".stripMargin ) else if Log.isLoggable(Level.DEBUG) then Log.log( Level.DEBUG, s"""Executed Query in ${successEvent.execTime}: |${successEvent.sql} |""".stripMargin ) override def exceptionMsg(exceptionEvent: SqlExceptionEvent): String = if Log.isLoggable(System.Logger.Level.TRACE) then s"""Error executing query: |${exceptionEvent.sql} |With message: |${exceptionEvent.cause.getMessage} |And values: |${paramsString(exceptionEvent.params)} |""".stripMargin else s"""Error executing query: |${exceptionEvent.sql} |With message: |${exceptionEvent.cause} |""".stripMargin end Default def logSlowQueries(slowerThan: FiniteDuration): SqlLogger = new: override def log(logEvent: SqlSuccessEvent): Unit = if logEvent.execTime > slowerThan then if Log.isLoggable(Level.TRACE) then Log.log( Level.WARNING, s"""Executed SLOW Query in ${logEvent.execTime}: |${logEvent.sql} | |With values: |${paramsString(logEvent.params)} |""".stripMargin ) else if Log.isLoggable(Level.WARNING) then Log.log( Level.WARNING, s"""Executed SLOW Query in ${logEvent.execTime}: |${logEvent.sql} |""".stripMargin ) end if else Default.log(logEvent) override def exceptionMsg(exceptionEvent: SqlExceptionEvent): String = Default.exceptionMsg(exceptionEvent) end SqlLogger ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlName.scala ================================================ package com.augustnagro.magnum import scala.annotation.StaticAnnotation class SqlName(val name: String) extends StaticAnnotation ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlNameMapper.scala ================================================ package com.augustnagro.magnum /** Mapping from scala terms to sql terms */ trait SqlNameMapper: def toColumnName(scalaName: String): String def toTableName(scalaName: String): String object SqlNameMapper: /** Converts camelCase scala names to snake_case */ object CamelToSnakeCase extends SqlNameMapper: def toColumnName(scalaName: String): String = toCase(scalaName) def toTableName(scalaName: String): String = toCase(scalaName) private def toCase(scalaName: String): String = val res = StringBuilder().append(scalaName.head.toLower) for i <- 1 until scalaName.length do val c = scalaName.charAt(i) if c.isUpper then res.append('_').append(c.toLower) else res.append(c) res.result() object CamelToUpperSnakeCase extends SqlNameMapper: def toColumnName(scalaName: String): String = toCase(scalaName) def toTableName(scalaName: String): String = toCase(scalaName) private def toCase(scalaName: String): String = val res = StringBuilder().append(scalaName.head.toUpper) for i <- 1 until scalaName.length do val c = scalaName.charAt(i) if c.isUpper then res.append('_').append(c) else res.append(c.toUpper) res.result() /** SqlNameMapper that keeps the same case as the provided scala names */ object SameCase extends SqlNameMapper: def toColumnName(scalaName: String): String = scalaName def toTableName(scalaName: String): String = scalaName end SqlNameMapper ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlSuccessEvent.scala ================================================ package com.augustnagro.magnum import scala.concurrent.duration.FiniteDuration /** Metadata for a successfully executed SQL statement. */ class SqlSuccessEvent private[magnum] ( /** The SQL String */ val sql: String, anyParams: Any, /** Time taken to execute the query, fetch data, and build the results. Does * not include time to construct the preparedStatement. For streaming * methods like `Query.iterator`, sqlExecTime is only calculated for the * first fetch. */ val execTime: FiniteDuration ): /** The parameters used when executing. The type is `Iterator[Iterator[Any]]` * to support logging batched updates. For example, * {{{ * repo.insert(User(a, b, c)) // provides Iterator(Iterator(a, b, c)) * repo.insertAll(List(User(a, b, c), User(d, e, f))) // provides Iterator(Iterator(a, b, c), Iterator(d, e, f)) * }}} */ def params: Iterator[Iterator[Any]] = parseParams(anyParams) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/SqliteDbType.scala ================================================ package com.augustnagro.magnum import java.sql.{Connection, PreparedStatement, ResultSet, Statement} import java.time.OffsetDateTime import scala.collection.View import scala.deriving.Mirror import scala.reflect.ClassTag import scala.util.{Failure, Success, Using} object SqliteDbType extends DbType: private val specImpl = new SpecImpl: override def offsetLimitSql( offset: Option[Long], limit: Option[Int] ): Option[String] = (offset, limit) match case (Some(o), Some(l)) => Some(s"LIMIT $o, $l") case (Some(o), None) => Some(s"LIMIT $o, ${Long.MaxValue}") case (None, Some(l)) => Some(s"LIMIT $l") case (None, None) => None def buildRepoDefaults[EC, E, ID]( tableNameSql: String, eElemNames: Seq[String], eElemNamesSql: Seq[String], eElemCodecs: Seq[DbCodec[?]], ecElemNames: Seq[String], ecElemNamesSql: Seq[String], idIndex: Int )(using eCodec: DbCodec[E], ecCodec: DbCodec[EC], idCodec: DbCodec[ID], eClassTag: ClassTag[E], ecClassTag: ClassTag[EC], idClassTag: ClassTag[ID] ): RepoDefaults[EC, E, ID] = val idName = eElemNamesSql(idIndex) val selectKeys = eElemNamesSql.mkString(", ") val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")") val updateKeys: String = eElemNamesSql .lazyZip(eElemCodecs) .map((sqlName, codec) => sqlName + " = " + codec.queryRepr) .patch(idIndex, Seq.empty, 1) .mkString(", ") val updateCodecs = eElemCodecs .patch(idIndex, Seq.empty, 1) .appended(idCodec) .asInstanceOf[Seq[DbCodec[Any]]] val insertGenKeys = eElemNamesSql.toArray val countSql = s"SELECT count(*) FROM $tableNameSql" val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long] val existsByIdSql = s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val findAllSql = s"SELECT * FROM $tableNameSql" val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E] val findByIdSql = s"SELECT * FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val deleteByIdSql = s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}" val truncateSql = s"DELETE FROM $tableNameSql" val truncateUpdate = Frag(truncateSql, Vector.empty, FragWriter.empty).update val insertSql = s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})" val updateSql = s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}" def idWriter(id: ID): FragWriter = (ps, pos) => idCodec.writeSingle(id, ps, pos) pos + idCodec.cols.length new RepoDefaults[EC, E, ID]: def count(using con: DbCon): Long = countQuery.run().head def existsById(id: ID)(using DbCon): Boolean = Frag(existsByIdSql, IArray(id), idWriter(id)) .query[Int] .run() .nonEmpty def findAll(using DbCon): Vector[E] = findAllQuery.run() def findAll(spec: Spec[E])(using DbCon): Vector[E] = specImpl.findAll(spec, tableNameSql) def findById(id: ID)(using DbCon): Option[E] = Frag(findByIdSql, IArray(id), idWriter(id)) .query[E] .run() .headOption def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] = throw UnsupportedOperationException( "Sqlite does not support 'ANY' keyword, and does not support long IN parameter lists. Use findById in a loop instead." ) def delete(entity: E)(using DbCon): Unit = deleteById( entity .asInstanceOf[Product] .productElement(idIndex) .asInstanceOf[ID] ) def deleteById(id: ID)(using DbCon): Unit = Frag(deleteByIdSql, IArray(id), idWriter(id)).update .run() def truncate()(using DbCon): Unit = truncateUpdate.run() def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult = deleteAllById( entities.map(e => e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID] ) ) def deleteAllById(ids: Iterable[ID])(using con: DbCon ): BatchUpdateResult = handleQuery(deleteByIdSql, ids): Using(con.connection.prepareStatement(deleteByIdSql)): ps => idCodec.write(ids, ps) timed(batchUpdateResult(ps.executeBatch())) def insert(entityCreator: EC)(using con: DbCon): Unit = handleQuery(insertSql, entityCreator): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.writeSingle(entityCreator, ps) timed(ps.executeUpdate()) def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit = handleQuery(insertSql, entityCreators): Using(con.connection.prepareStatement(insertSql)): ps => ecCodec.write(entityCreators, ps) timed(batchUpdateResult(ps.executeBatch())) // https://github.com/AugustNagro/magnum/issues/87#issuecomment-2591823574 def insertReturning(entityCreator: EC)(using con: DbCon): E = throw UnsupportedOperationException() // https://github.com/AugustNagro/magnum/issues/87#issuecomment-2591823574 def insertAllReturning( entityCreators: Iterable[EC] )(using con: DbCon): Vector[E] = throw UnsupportedOperationException() def update(entity: E)(using con: DbCon): Unit = handleQuery(updateSql, entity): Using(con.connection.prepareStatement(updateSql)): ps => val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length timed(ps.executeUpdate()) def updateAll(entities: Iterable[E])(using con: DbCon ): BatchUpdateResult = handleQuery(updateSql, entities): Using(con.connection.prepareStatement(updateSql)): ps => for entity <- entities do val entityValues: Vector[Any] = entity .asInstanceOf[Product] .productIterator .toVector // put ID at the end val updateValues = entityValues .patch(idIndex, Vector.empty, 1) .appended(entityValues(idIndex)) var pos = 1 for (field, codec) <- updateValues.lazyZip(updateCodecs) do codec.writeSingle(field, ps, pos) pos += codec.cols.length ps.addBatch() timed(batchUpdateResult(ps.executeBatch())) end new end buildRepoDefaults end SqliteDbType ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Table.scala ================================================ package com.augustnagro.magnum import scala.annotation.StaticAnnotation class Table( val dbType: DbType, val nameMapper: SqlNameMapper = SqlNameMapper.SameCase ) extends StaticAnnotation ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/TableExprs.scala ================================================ package com.augustnagro.magnum import scala.quoted.* private case class TableExprs( tableAnnot: Expr[Table], tableNameScala: Expr[String], tableNameSql: Expr[String], eElemNames: Seq[String], eElemNamesSql: Seq[Expr[String]], ecElemNames: List[String], ecElemNamesSql: Seq[Expr[String]], idIndex: Expr[Int] ) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/TableInfo.scala ================================================ package com.augustnagro.magnum import scala.deriving.* import scala.compiletime.* import scala.quoted.* /** Metadata about a Table, which can be interpolated in sql"" expressions * * For example, * * {{{ * @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) * case class User(@Id id: Long, firstName: String) * derives DbCodec * * val u = TableInfo[User, User, Long].alias("u") * * sql"SELECT ${u.firstName} FROM $u".sqlString == * "SELECT u.first_name FROM user u" * }}} */ class TableInfo[EC, E, ID]( val all: ColumnNames, val insertColumns: ColumnNames, val alias: Option[String], val queryRepr: String, val idColumn: Option[ColumnName], private[magnum] val table: String, private[magnum] val eClassName: String ) extends Selectable, SqlLiteral: def selectDynamic(scalaName: String): ColumnName = all.columnNames.find(_.scalaName == scalaName).get def alias(tableAlias: String): this.type = require(tableAlias.nonEmpty, "custom tableAlias cannot be empty") val queryRepr = table + " " + tableAlias val allSchemaNames = all.columnNames.map(cn => val sqlName = cn.sqlName ColumnName( scalaName = cn.scalaName, sqlName = sqlName, queryRepr = tableAlias + "." + sqlName ) ) val allQueryRepr = allSchemaNames.map(_.queryRepr).mkString(", ") val allCols = ColumnNames(allQueryRepr, allSchemaNames) val newIdColumn = idColumn.flatMap(oldId => allSchemaNames.find(_.scalaName == oldId.scalaName) ) new TableInfo[EC, E, ID]( all = allCols, insertColumns = insertColumns, alias = Some(tableAlias), queryRepr = queryRepr, idColumn = newIdColumn, table = table, eClassName = eClassName ).asInstanceOf[this.type] end alias end TableInfo object TableInfo: transparent inline def apply[EC: Mirror.Of, E: Mirror.Of, ID] = ${ dbSchemaImpl[EC, E, ID] } private def dbSchemaImpl[EC: Type, E: Type, ID: Type](using Quotes ): Expr[Any] = import quotes.reflect.* val exprs = tableExprs[EC, E, ID] val refinement = exprs.eElemNames .foldLeft(TypeRepr.of[TableInfo[EC, E, ID]])((typeRepr, elemName) => Refinement(typeRepr, elemName, TypeRepr.of[ColumnName]) ) val allColumnsExpr = Expr.ofSeq( exprs.eElemNames .lazyZip(exprs.eElemNamesSql) .map((elemName, elemNameSqlExpr) => '{ val elemNameSql = $elemNameSqlExpr ColumnName(${ Expr(elemName) }, elemNameSql, elemNameSql) } ) ) val insertColumnsExpr = Expr.ofSeq( exprs.ecElemNames .lazyZip(exprs.ecElemNamesSql) .map((elemName, elemNameSqlExpr) => '{ val elemNameSql = $elemNameSqlExpr ColumnName(${ Expr(elemName) }, elemNameSql, elemNameSql) } ) ) val idIdx = if TypeRepr.of[ID] =:= TypeRepr.of[Null] then '{ None } else '{ Some(${ exprs.idIndex }) } refinement.asType match case '[tpe] => '{ val allColumns = IArray.from($allColumnsExpr) val allQueryRepr = allColumns.map(_.queryRepr).mkString(", ") val allCols = ColumnNames(allQueryRepr, allColumns) val insertColumns = IArray.from($insertColumnsExpr) val insertQueryRepr = insertColumns.map(_.queryRepr).mkString("(", ", ", ")") val insertCols = ColumnNames(insertQueryRepr, insertColumns) val idColumn = $idIdx.map(idx => allColumns(idx)) val tableName = ${ exprs.tableNameSql } new TableInfo[EC, E, ID]( all = allCols, insertColumns = insertCols, alias = None, table = tableName, queryRepr = tableName, idColumn = idColumn, eClassName = ${ exprs.tableNameScala } ).asInstanceOf[tpe] } end match end dbSchemaImpl end TableInfo ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Transactor.scala ================================================ package com.augustnagro.magnum import java.sql.Connection import javax.sql.DataSource import scala.util.Using class Transactor private ( dataSource: DataSource, sqlLogger: SqlLogger = SqlLogger.Default, connectionConfig: Connection => Unit = con => () ): def withSqlLogger(sqlLogger: SqlLogger): Transactor = new Transactor(dataSource, sqlLogger, connectionConfig) def withConnectionConfig(connectionConfig: Connection => Unit): Transactor = new Transactor(dataSource, sqlLogger, connectionConfig) def connect[T](f: DbCon ?=> T): T = Using.resource(dataSource.getConnection): con => connectionConfig(con) f(using DbCon(con, sqlLogger)) def transact[T](f: DbTx ?=> T): T = Using.resource(dataSource.getConnection): con => connectionConfig(con) con.setAutoCommit(false) try val res = f(using DbTx(con, sqlLogger)) con.commit() res catch case t => try con.rollback() catch { case t2 => t.addSuppressed(t2) } throw t end Transactor object Transactor: def apply( dataSource: DataSource, sqlLogger: SqlLogger, connectionConfig: Connection => Unit ): Transactor = new Transactor(dataSource, sqlLogger, connectionConfig) def apply(dataSource: DataSource, sqlLogger: SqlLogger): Transactor = new Transactor(dataSource, sqlLogger, _ => ()) def apply( dataSource: DataSource, connectionConfig: Connection => Unit ): Transactor = new Transactor(dataSource, SqlLogger.Default, connectionConfig) def apply(dataSource: DataSource): Transactor = new Transactor(dataSource, SqlLogger.Default, _ => ()) end Transactor ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/UUIDCodec.scala ================================================ package com.augustnagro.magnum import java.sql.{PreparedStatement, ResultSet, Types} import java.util.UUID object UUIDCodec: given VarCharUUIDCodec: DbCodec[UUID] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.VARCHAR) def readSingle(rs: ResultSet, pos: Int): UUID = UUID.fromString(rs.getString(pos)) def readSingleOption(rs: ResultSet, pos: Int): Option[UUID] = Option(rs.getString(pos)).map(UUID.fromString) def writeSingle(entity: UUID, ps: PreparedStatement, pos: Int): Unit = ps.setString(pos, entity.toString) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/Update.scala ================================================ package com.augustnagro.magnum import java.util.concurrent.TimeUnit import scala.concurrent.duration.FiniteDuration import scala.util.{Failure, Success, Using} class Update private[magnum] (val frag: Frag): /** Exactly like [[java.sql.PreparedStatement]].executeUpdate */ def run()(using con: DbCon): Int = handleQuery(frag.sqlString, frag.params): Using(con.connection.prepareStatement(frag.sqlString)): ps => frag.writer.write(ps, 1) timed(ps.executeUpdate()) ================================================ FILE: magnum/src/main/scala/com/augustnagro/magnum/util.scala ================================================ package com.augustnagro.magnum import com.augustnagro.magnum.SqlException import java.lang.System.Logger.Level import java.sql.{Connection, PreparedStatement, ResultSet, Statement} import java.util.StringJoiner import java.util.concurrent.TimeUnit import javax.sql.DataSource import scala.collection.mutable as m import scala.util.{Failure, Success, Try, Using, boundary} import scala.deriving.Mirror import scala.compiletime.{ constValue, constValueTuple, erasedValue, error, summonInline } import scala.compiletime.ops.any.== import scala.compiletime.ops.boolean.&& import scala.concurrent.duration.FiniteDuration import scala.reflect.ClassTag import scala.quoted.* def connect[T](transactor: Transactor)(f: DbCon ?=> T): T = transactor.connect(f) def connect[T](dataSource: DataSource)(f: DbCon ?=> T): T = Transactor(dataSource).connect(f) def transact[T](transactor: Transactor)(f: DbTx ?=> T): T = transactor.transact(f) def transact[T](dataSource: DataSource)(f: DbTx ?=> T): T = Transactor(dataSource).transact(f) def transact[T](dataSource: DataSource, connectionConfig: Connection => Unit)( f: DbTx ?=> T ): T = val transactor = Transactor(dataSource = dataSource, connectionConfig = connectionConfig) transactor.transact(f) extension (inline sc: StringContext) inline def sql(inline args: Any*): Frag = ${ sqlImpl('{ sc }, '{ args }) } private def sqlImpl(sc: Expr[StringContext], args: Expr[Seq[Any]])(using Quotes ): Expr[Frag] = import quotes.reflect.* val allArgsExprs: Seq[Expr[Any]] = args match case Varargs(ae) => ae // val stringExprs: Seq[Expr[String]] = sc match // case '{ StringContext(${ Varargs(strings) }: _*) } => strings '{ val args: Seq[Any] = ${ Expr.ofSeq(allArgsExprs) } val sqlQueryReprs: Vector[String] = ${ queryReprs(allArgsExprs, '{ args }, '{ Vector.newBuilder }) } val queryExpr: String = $sc.s(sqlQueryReprs: _*) val flattenedArgs: Vector[Any] = ${ flattenedArgsExpr(allArgsExprs, '{ args }, '{ Vector.newBuilder }) } val writer: FragWriter = (ps: PreparedStatement, pos: Int) => { ${ sqlWriter('{ ps }, '{ pos }, '{ args }, allArgsExprs) } } Frag(queryExpr, flattenedArgs, writer) } end sqlImpl private def flattenedArgsExpr( argsExprs: Seq[Expr[Any]], allArgs: Expr[Seq[Any]], builder: Expr[m.Builder[Any, Vector[Any]]], i: Int = 0 )(using Quotes): Expr[Vector[Any]] = argsExprs match case '{ $arg: SqlLiteral } +: tail => flattenedArgsExpr(tail, allArgs, builder, i + 1) case '{ $arg: Frag } +: tail => val newBuilder = '{ $builder ++= $allArgs(${ Expr(i) }).asInstanceOf[Frag].params } flattenedArgsExpr(tail, allArgs, newBuilder, i + 1) case '{ $arg: tp } +: tail => val newBuilder = '{ $builder += $allArgs(${ Expr(i) }) } flattenedArgsExpr(tail, allArgs, newBuilder, i + 1) case Seq() => '{ $builder.result() } private def queryReprs( argsExprs: Seq[Expr[Any]], allArgs: Expr[Seq[Any]], builder: Expr[m.Builder[String, Vector[String]]], i: Int = 0 )(using Quotes): Expr[Vector[String]] = argsExprs match case '{ $arg: SqlLiteral } +: tail => val newBuilder = '{ $builder += $allArgs(${ Expr(i) }).asInstanceOf[SqlLiteral].queryRepr } queryReprs(tail, allArgs, newBuilder, i + 1) case '{ $arg: Frag } +: tail => val newBuilder = '{ $builder += $allArgs(${ Expr(i) }).asInstanceOf[Frag].sqlString } queryReprs(tail, allArgs, newBuilder, i + 1) case '{ $arg: tp } +: tail => val codecExpr = summonWriter[tp] val newBuilder = '{ $builder += $codecExpr.queryRepr } queryReprs(tail, allArgs, newBuilder, i + 1) case Seq() => '{ $builder.result() } private def sqlWriter( psExpr: Expr[PreparedStatement], posExpr: Expr[Int], args: Expr[Seq[Any]], argsExprs: Seq[Expr[Any]], i: Int = 0 )(using Quotes): Expr[Int] = import quotes.reflect.* argsExprs match case '{ $arg: SqlLiteral } +: tail => sqlWriter(psExpr, posExpr, args, tail, i + 1) case '{ $arg: Frag } +: tail => '{ val frag = $args(${ Expr(i) }).asInstanceOf[Frag] val pos = $posExpr val newPos = frag.writer.write($psExpr, pos) ${ sqlWriter(psExpr, '{ newPos }, args, tail, i + 1) } } case '{ $arg: tp } +: tail => val codecExpr = summonWriter[tp] '{ val argValue = $args(${ Expr(i) }).asInstanceOf[tp] val pos = $posExpr val codec = $codecExpr codec.writeSingle(argValue, $psExpr, pos) val newPos = pos + codec.cols.length ${ sqlWriter(psExpr, '{ newPos }, args, tail, i + 1) } } case Seq() => posExpr end match end sqlWriter private def summonWriter[T: Type](using Quotes): Expr[DbCodec[T]] = import quotes.reflect.* Expr .summon[DbCodec[T]] .orElse( TypeRepr.of[T].widen.asType match case '[tpe] => Expr .summon[DbCodec[tpe]] .map(codec => '{ $codec.asInstanceOf[DbCodec[T]] }) ) .getOrElse: report.info( s"Could not find given DbCodec for ${TypeRepr.of[T].show}. Using PreparedStatement::setObject instead." ) '{ DbCodec.AnyCodec.asInstanceOf[DbCodec[T]] } def batchUpdate[T](values: Iterable[T])(f: T => Update)(using con: DbCon ): BatchUpdateResult = val it = values.iterator if !it.hasNext then return BatchUpdateResult.Success(0) val firstUpdate = f(it.next()) val firstFrag = firstUpdate.frag Using.Manager(use => val ps = use(con.connection.prepareStatement(firstFrag.sqlString)) firstFrag.writer.write(ps, 1) ps.addBatch() while it.hasNext do val frag = f(it.next()).frag assert( frag.sqlString == firstFrag.sqlString, "all queries must be the same for batch PreparedStatement" ) frag.writer.write(ps, 1) ps.addBatch() batchUpdateResult(ps.executeBatch()) ) match case Success(res) => res case Failure(t) => throw SqlException( con.sqlLogger.exceptionMsg( SqlExceptionEvent(firstFrag.sqlString, firstFrag.params, t) ), t ) end match end batchUpdate private val Log = System.getLogger("com.augustnagro.magnum") private def parseParams(params: Any): Iterator[Iterator[Any]] = params match case p: Product => Iterator(p.productIterator) case it: Iterable[?] => it.headOption match case Some(h: Product) => it.asInstanceOf[Iterable[Product]] .iterator .map(_.productIterator) case _ => Iterator(it.iterator) case x => Iterator(Iterator(x)) private def paramsString(params: Iterator[Iterator[Any]]): String = params.map(_.mkString("(", ", ", ")")).mkString("", ",\n", "\n") private def timed[T](f: => T): (T, FiniteDuration) = val start = System.currentTimeMillis() val res = f val execTime = FiniteDuration( System.currentTimeMillis() - start, TimeUnit.MILLISECONDS ) (res, execTime) private def batchUpdateResult(updateCounts: Array[Int]): BatchUpdateResult = boundary: val updatedRows = updateCounts.foldLeft(0L)((res, c) => c match case rowCount if rowCount >= 0 => res + rowCount case Statement.SUCCESS_NO_INFO => boundary.break(BatchUpdateResult.SuccessNoInfo) case errorCode => throw RuntimeException(s"Received JDBC error code $errorCode") ) BatchUpdateResult.Success(updatedRows) private def assertECIsSubsetOfE[EC: Type, E: Type](using Quotes): Unit = import quotes.reflect.* val eRepr = TypeRepr.of[E] val ecRepr = TypeRepr.of[EC] val eFields = eRepr.typeSymbol.caseFields val ecFields = ecRepr.typeSymbol.caseFields for ecField <- ecFields do if !eFields.exists(f => f.name == ecField.name && f.signature.resultSig == ecField.signature.resultSig ) then report.error( s"""${ecRepr.show} must be an effective subset of ${eRepr.show}. |Are there any fields on ${ecRepr.show} you forgot to update on ${eRepr.show}? |""".stripMargin ) private def tableExprs[EC: Type, E: Type, ID: Type](using Quotes ): TableExprs = import quotes.reflect.* assertECIsSubsetOfE[EC, E] val idIndex = idAnnotIndex[E] val table: Expr[Table] = DerivingUtil.tableAnnot[E] match case Some(table) => table case None => report.errorAndAbort( s"${TypeRepr.of[E].show} must have @Table annotation" ) val nameMapper: Expr[SqlNameMapper] = '{ $table.nameMapper } Expr.summon[Mirror.Of[E]] match case Some('{ $eMirror: Mirror.Of[E] { type MirroredLabel = eLabel type MirroredElemLabels = eMels } }) => Expr.summon[Mirror.Of[EC]] match case Some('{ $ecMirror: Mirror.Of[EC] { type MirroredElemLabels = ecMels } }) => val tableNameScala = Type.valueOfConstant[eLabel].get.toString val tableNameScalaExpr = Expr(tableNameScala) val tableNameSql = DerivingUtil.sqlTableNameAnnot[E] match case Some(sqlName) => '{ $sqlName.name } case None => '{ $nameMapper.toTableName($tableNameScalaExpr) } val eElemNames = elemNames[eMels]() val eElemNamesSql = eElemNames.map(elemName => sqlNameAnnot[E](elemName) match case Some(sqlName) => '{ $sqlName.name } case None => '{ $nameMapper.toColumnName(${ Expr(elemName) }) } ) val ecElemNames = elemNames[ecMels]() val ecElemNamesSql = ecElemNames.map(elemName => sqlNameAnnot[E](elemName) match case Some(sqlName) => '{ $sqlName.name } case None => '{ $nameMapper.toColumnName(${ Expr(elemName) }) } ) TableExprs( table, tableNameScalaExpr, tableNameSql, eElemNames, eElemNamesSql, ecElemNames, ecElemNamesSql, idIndex ) case _ => report.errorAndAbort( s"A Mirror is required to derive RepoDefaults for ${TypeRepr.of[EC].show}" ) case _ => report.errorAndAbort( s"A Mirror is required to derive RepoDefaults for ${TypeRepr.of[E].show}" ) end match end tableExprs private def idAnnotIndex[E: Type](using q: Quotes): Expr[Int] = import q.reflect.* val idAnnot = TypeRepr.of[Id].typeSymbol val index = TypeRepr .of[E] .typeSymbol .primaryConstructor .paramSymss .head .indexWhere(sym => sym.hasAnnotation(idAnnot)) match case -1 => 0 case x => x Expr(index) private def elemNames[Mels: Type](res: List[String] = Nil)(using Quotes ): List[String] = import quotes.reflect.* Type.of[Mels] match case '[mel *: melTail] => val melString = Type.valueOfConstant[mel].get.toString elemNames[melTail](melString :: res) case '[EmptyTuple] => res.reverse private def sqlNameAnnot[T: Type](elemName: String)(using Quotes ): Option[Expr[SqlName]] = import quotes.reflect.* val annot = TypeRepr.of[SqlName].typeSymbol TypeRepr .of[T] .typeSymbol .primaryConstructor .paramSymss .head .find(sym => sym.name == elemName && sym.hasAnnotation(annot)) .flatMap(sym => sym.getAnnotation(annot)) .map(term => term.asExprOf[SqlName]) private def handleQuery[A](sql: String, params: Any)( attempt: Try[(A, FiniteDuration)] )(using con: DbCon): A = attempt match case Success((res, execTime)) => con.sqlLogger.log(SqlSuccessEvent(sql, params, execTime)) res case Failure(t) => val msg = con.sqlLogger.exceptionMsg(SqlExceptionEvent(sql, params, t)) throw SqlException(msg, t) ================================================ FILE: magnum/src/test/resources/clickhouse/big-dec.sql ================================================ drop table if exists big_dec; create table big_dec ( id Int64 NOT NULL, my_big_dec Nullable(Int256) ) ENGINE = MergeTree() ORDER BY id; insert into big_dec values (1, 123), (2, null); ================================================ FILE: magnum/src/test/resources/clickhouse/car.sql ================================================ drop table if exists car; CREATE TABLE car ( model String NOT NULL, id Int64 NOT NULL, top_speed Int32 NOT NULL, vin Nullable(Int32), color Enum('Red', 'Green', 'Blue'), created DateTime NOT NULL ) ENGINE = MergeTree() ORDER BY created; INSERT INTO car (model, id, top_speed, vin, color, created) VALUES ('McLaren Senna', 1, 208, 123, 'Red', toDateTime('2024-11-24 22:17:30', 'UTC')), ('Ferrari F8 Tributo', 2, 212, 124, 'Green', toDateTime('2024-11-24 22:17:31', 'UTC')), ('Aston Martin Superleggera', 3, 211, null, 'Blue', toDateTime('2024-11-24 22:17:32', 'UTC')); ================================================ FILE: magnum/src/test/resources/clickhouse/my-time.sql ================================================ drop table if exists my_time; create table my_time ( a DateTime not null, b Date not null, c String not null, d DateTime not null ) engine = MergeTree() order by a; insert into my_time values (toDateTime('2025-03-30 21:19:23'), toDate('2025-03-30'), '05:20:04', toDateTime('2025-04-02 20:16:38')), (toDateTime('2025-03-31 21:19:23'), toDate('2025-03-31'), '05:30:04', toDateTime('2025-04-02T20:17:38')); ================================================ FILE: magnum/src/test/resources/clickhouse/no-id.sql ================================================ drop table if exists no_id; CREATE TABLE no_id ( created_at DateTime NOT NULL, user_name String NOT NULL, user_action String NOT NULL ) ENGINE = MergeTree() ORDER BY created_at; INSERT INTO no_id VALUES (timestamp '1997-08-15', 'Josh', 'clicked a button'), (timestamp '1997-08-16', 'Danny', 'opened a toaster'), (timestamp '1997-08-17', 'Greg', 'ran some QA tests'); ================================================ FILE: magnum/src/test/resources/clickhouse/person.sql ================================================ drop table if exists person; create table person ( id Int64 not null, first_name Nullable(String), last_name String not null, is_admin Bool not null, created DateTime not null, social_id Nullable(UUID) ) engine = MergeTree() order by created; insert into person values (1, 'George', 'Washington', true, toDateTime('2023-03-05 02:26:00'), toUUID('d06443a6-3efb-46c4-a66a-a80a8a9a5388')), (2, 'Alexander', 'Hamilton', true, toDateTime('2023-03-05 02:27:00'), toUUID('529b6c6d-7228-4da5-81d7-13b706f78ddb')), (3, 'John', 'Adams', true, toDateTime('2023-03-05 02:28:00'), null), (4, 'Benjamin', 'Franklin', true, toDateTime('2023-03-05 02:29:00'), null), (5, 'John', 'Jay', true, toDateTime('2023-03-05 02:30:00'), null), (6, 'Thomas', 'Jefferson', true, toDateTime('2023-03-05 02:31:00'), null), (7, 'James', 'Madison', true, toDateTime('2023-03-05 02:32:00'), null), (8, null, 'Nagro', false, toDateTime('2023-03-05 02:33:00'), null); ================================================ FILE: magnum/src/test/resources/h2/big-dec.sql ================================================ drop table if exists big_dec cascade; create table big_dec ( id int auto_increment primary key, my_big_dec numeric ); insert into big_dec values (1, 123), (2, null); ================================================ FILE: magnum/src/test/resources/h2/car.sql ================================================ drop table if exists car; create table car ( model varchar(50) not null, id bigint auto_increment primary key, top_speed int not null, vin int, color enum('Red', 'Green', 'Blue'), created timestamp with time zone not null ); insert into car (model, top_speed, vin, color, created) values ('McLaren Senna', 208, 123, 'Red', '2024-11-24T22:17:30.000000000Z'), ('Ferrari F8 Tributo', 212, 124, 'Green', '2024-11-24T22:17:31.000000000Z'), ('Aston Martin Superleggera', 211, null, 'Blue', '2024-11-24T22:17:32.000000000Z'); ================================================ FILE: magnum/src/test/resources/h2/my-time.sql ================================================ drop table if exists my_time cascade; create table my_time ( a timestamp with time zone not null, b date not null, c time not null, d timestamp not null ); insert into my_time values ('2025-03-30T21:19:23Z', '2025-03-30', '05:20:04', '2025-04-02T20:16:38'), ('2025-03-31T21:19:23Z', '2025-03-31', '05:30:04', '2025-04-02T20:17:38'); ================================================ FILE: magnum/src/test/resources/h2/my-user.sql ================================================ drop table if exists my_user cascade; create table my_user ( first_name text not null, id bigint auto_increment primary key ); insert into my_user (first_name) values ('George'), ('Alexander'), ('John'); ================================================ FILE: magnum/src/test/resources/h2/no-id.sql ================================================ drop table if exists no_id; create table no_id ( created_at timestamp with time zone default now() not null, user_name varchar not null, user_action varchar not null ); insert into no_id values (timestamp '1997-08-15', 'Josh', 'clicked a button'), (timestamp '1997-08-16', 'Danny', 'opened a toaster'), (timestamp '1997-08-17', 'Greg', 'ran some QA tests'); ================================================ FILE: magnum/src/test/resources/h2/person.sql ================================================ drop table if exists person cascade; create table person ( id bigint primary key, first_name varchar(50), last_name varchar(50) not null, is_admin boolean not null, created timestamp with time zone, social_id UUID ); insert into person (id, first_name, last_name, is_admin, created, social_id) values (1, 'George', 'Washington', true, now(), 'd06443a6-3efb-46c4-a66a-a80a8a9a5388'), (2, 'Alexander', 'Hamilton', true, now(), '529b6c6d-7228-4da5-81d7-13b706f78ddb'), (3, 'John', 'Adams', true, now(), null), (4, 'Benjamin', 'Franklin', true, now(), null), (5, 'John', 'Jay', true, now(), null), (6, 'Thomas', 'Jefferson', true, now(), null), (7, 'James', 'Madison', true, now(), null), (8, null, 'Nagro', false, now(), null); ================================================ FILE: magnum/src/test/resources/mysql/big-dec.sql ================================================ drop table if exists big_dec cascade; create table big_dec ( id int primary key, my_big_dec numeric ); insert into big_dec values (1, 123), (2, null); ================================================ FILE: magnum/src/test/resources/mysql/car.sql ================================================ drop table if exists car; create table car ( model varchar(50) not null, id bigint primary key, top_speed int not null, vin int, color enum('Red', 'Green', 'Blue'), created datetime not null ); insert into car (model, id, top_speed, vin, color, created) values ('McLaren Senna', 1, 208, 123, 'Red', '2024-11-24 22:17:30'), ('Ferrari F8 Tributo', 2, 212, 124, 'Green', '2024-11-24 22:17:31'), ('Aston Martin Superleggera', 3, 211, null, 'Blue', '2024-11-24 22:17:32'); ================================================ FILE: magnum/src/test/resources/mysql/my-time.sql ================================================ drop table if exists my_time cascade; create table my_time ( a timestamp not null, b date not null, c time not null, d datetime not null ); insert into my_time values ('2025-03-30 21:19:23', '2025-03-30', '05:20:04', '2025-04-02 20:16:38'), ('2025-03-31 21:19:23', '2025-03-31', '05:30:04', '2025-04-02T20:17:38'); ================================================ FILE: magnum/src/test/resources/mysql/my-user.sql ================================================ drop table if exists my_user cascade; create table my_user ( first_name varchar(200) not null, id bigint auto_increment primary key ); insert into my_user (first_name) values ('George'), ('Alexander'), ('John'); ================================================ FILE: magnum/src/test/resources/mysql/no-id.sql ================================================ drop table if exists no_id; create table no_id ( created_at datetime not null default now(), user_name varchar(200) not null, user_action varchar(200) not null ); insert into no_id values ('1997-08-15', 'Josh', 'clicked a button'), ('1997-08-16', 'Danny', 'opened a toaster'), ('1997-08-17', 'Greg', 'ran some QA tests'); ================================================ FILE: magnum/src/test/resources/mysql/person.sql ================================================ drop table if exists person cascade; create table person ( id bigint primary key, first_name varchar(50), last_name varchar(50) not null, is_admin boolean not null, created datetime not null, social_id varchar(36) ); insert into person (id, first_name, last_name, is_admin, created, social_id) values (1, 'George', 'Washington', true, now(), 'd06443a6-3efb-46c4-a66a-a80a8a9a5388'), (2, 'Alexander', 'Hamilton', true, now(), '529b6c6d-7228-4da5-81d7-13b706f78ddb'), (3, 'John', 'Adams', true, now(), null), (4, 'Benjamin', 'Franklin', true, now(), null), (5, 'John', 'Jay', true, now(), null), (6, 'Thomas', 'Jefferson', true, now(), null), (7, 'James', 'Madison', true, now(), null), (8, null, 'Nagro', false, now(), null); ================================================ FILE: magnum/src/test/resources/pg/big-dec.sql ================================================ drop table if exists big_dec cascade; create table big_dec ( id int primary key, my_big_dec numeric ); insert into big_dec values (1, 123), (2, null); ================================================ FILE: magnum/src/test/resources/pg/car.sql ================================================ DROP TABLE IF EXISTS car; CREATE TABLE car ( model VARCHAR(50) NOT NULL, id bigint PRIMARY KEY, top_speed INT NOT NULL, vin INT, color TEXT NOT NULL CHECK (color IN ('Red', 'Green', 'Blue')), created TIMESTAMP WITH TIME ZONE NOT NULL ); INSERT INTO car (model, id, top_speed, vin, color, created) VALUES ('McLaren Senna', 1, 208, 123, 'Red', '2024-11-24T22:17:30.000000000Z'::timestamptz), ('Ferrari F8 Tributo', 2, 212, 124, 'Green', '2024-11-24T22:17:31.000000000Z'::timestamptz), ('Aston Martin Superleggera', 3, 211, null, 'Blue', '2024-11-24T22:17:32.000000000Z'::timestamptz); ================================================ FILE: magnum/src/test/resources/pg/my-time.sql ================================================ drop table if exists my_time cascade; create table my_time ( a timestamptz not null, b date not null, c time not null, d timestamp not null ); insert into my_time values ('2025-03-30T21:19:23Z', '2025-03-30', '05:20:04', '2025-04-02T20:16:38'), ('2025-03-31T21:19:23Z', '2025-03-31', '05:30:04', '2025-04-02T20:17:38'); ================================================ FILE: magnum/src/test/resources/pg/my-user.sql ================================================ drop table if exists my_user cascade; create table my_user ( first_name text not null, id bigint primary key generated always as identity ); insert into my_user (first_name) values ('George'), ('Alexander'), ('John'); ================================================ FILE: magnum/src/test/resources/pg/no-id.sql ================================================ drop table if exists no_id; create table no_id ( created_at timestamptz not null default now(), user_name text not null, user_action text not null ); insert into no_id values (timestamp '1997-08-15', 'Josh', 'clicked a button'), (timestamp '1997-08-16', 'Danny', 'opened a toaster'), (timestamp '1997-08-17', 'Greg', 'ran some QA tests'); ================================================ FILE: magnum/src/test/resources/pg/person.sql ================================================ drop table if exists person cascade; create table person ( id bigint primary key, first_name varchar(50), last_name varchar(50) not null, is_admin boolean not null, created timestamptz not null, social_id UUID ); insert into person (id, first_name, last_name, is_admin, created, social_id) values (1, 'George', 'Washington', true, now(), 'd06443a6-3efb-46c4-a66a-a80a8a9a5388'), (2, 'Alexander', 'Hamilton', true, now(), '529b6c6d-7228-4da5-81d7-13b706f78ddb'), (3, 'John', 'Adams', true, now(), null), (4, 'Benjamin', 'Franklin', true, now(), null), (5, 'John', 'Jay', true, now(), null), (6, 'Thomas', 'Jefferson', true, now(), null), (7, 'James', 'Madison', true, now(), null), (8, null, 'Nagro', false, timestamp '1997-08-12', null); ================================================ FILE: magnum/src/test/scala/ClickHouseTests.scala ================================================ import com.augustnagro.magnum.* import com.clickhouse.client.config.ClickHouseDefaults import com.clickhouse.jdbc.ClickHouseDataSource import com.dimafeng.testcontainers.ClickHouseContainer import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures import munit.{AnyFixture, FunSuite, Location} import org.testcontainers.utility.DockerImageName import shared.* import java.nio.file.{Files, Path} import java.util.{Properties, UUID} import scala.util.Using class ClickHouseTests extends FunSuite, TestContainersFixtures: sharedTests(this, ClickhouseDbType, xa) test("only allows EC =:= E"): intercept[IllegalArgumentException]: case class UserCreator(name: String) derives DbCodec @Table(ClickhouseDbType) case class User(id: UUID, name: String) derives DbCodec val repo = Repo[UserCreator, User, UUID] val clickHouseContainer = ForAllContainerFixture( ClickHouseContainer .Def(dockerImageName = DockerImageName.parse("clickhouse/clickhouse-server:24.3.12.75") ) .createContainer() ) override def munitFixtures: Seq[AnyFixture[_]] = super.munitFixtures :+ clickHouseContainer def xa(): Transactor = val clickHouse = clickHouseContainer() val props = Properties() props.put(ClickHouseDefaults.USER.getKey, clickHouse.username) props.put(ClickHouseDefaults.PASSWORD.getKey, clickHouse.password) val ds = ClickHouseDataSource(clickHouse.jdbcUrl, props) val tableDDLs = Vector( "clickhouse/car.sql", "clickhouse/no-id.sql", "clickhouse/person.sql", "clickhouse/big-dec.sql", "clickhouse/my-time.sql" ).map(p => Files.readString(Path.of(getClass.getResource(p).toURI))) Using .Manager(use => val con = use(ds.getConnection) val stmt = use(con.createStatement) for ddl <- tableDDLs do stmt.execute(ddl) ) .get Transactor(ds) end xa end ClickHouseTests ================================================ FILE: magnum/src/test/scala/EffectiveSubsetTests.scala ================================================ import com.augustnagro.magnum.* import munit.FunSuite class EffectiveSubsetTests extends FunSuite: test("DbSchema macro error if EC not an effective subset of E"): case class PersonCreator(first: String, last: String) case class Person(id: Long, last: String) derives DbCodec compileErrors("DbSchema[PersonCreator, Person, Long]") test("Repo macro error if EC not an effective subset of E"): case class PersonCreator(first: String, last: String) case class Person(id: Long, last: String) derives DbCodec compileErrors("Repo[PersonCreator, Person, Long]") ================================================ FILE: magnum/src/test/scala/H2Tests.scala ================================================ import com.augustnagro.magnum.* import munit.FunSuite import org.h2.jdbcx.JdbcDataSource import shared.* import java.nio.file.{Files, Path} import scala.util.Using import scala.util.Using.Manager class H2Tests extends FunSuite: sharedTests(this, H2DbType, xa) lazy val h2DbPath = Files.createTempDirectory(null).toAbsolutePath def xa(): Transactor = val ds = JdbcDataSource() ds.setURL("jdbc:h2:" + h2DbPath) ds.setUser("sa") ds.setPassword("") val tableDDLs = Vector( "/h2/car.sql", "/h2/person.sql", "/h2/my-user.sql", "/h2/no-id.sql", "/h2/big-dec.sql", "/h2/my-time.sql" ).map(p => Files.readString(Path.of(getClass.getResource(p).toURI))) Manager(use => val con = use(ds.getConnection) val stmt = use(con.createStatement) for ddl <- tableDDLs do stmt.execute(ddl) ) Transactor(ds) end H2Tests ================================================ FILE: magnum/src/test/scala/MySqlTests.scala ================================================ import com.augustnagro.magnum.* import com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec import com.dimafeng.testcontainers.MySQLContainer import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures import com.mysql.cj.jdbc.MysqlDataSource import munit.{AnyFixture, FunSuite, Location} import org.testcontainers.utility.DockerImageName import shared.* import java.nio.file.{Files, Path} import scala.util.Using import scala.util.Using.Manager class MySqlTests extends FunSuite, TestContainersFixtures: sharedTests(this, MySqlDbType, xa) val mySqlContainer = ForAllContainerFixture( MySQLContainer .Def(dockerImageName = DockerImageName.parse("mysql:8.0.32")) .createContainer() ) override def munitFixtures: Seq[AnyFixture[_]] = super.munitFixtures :+ mySqlContainer def xa(): Transactor = val mySql = mySqlContainer() val ds = MysqlDataSource() ds.setURL(mySql.jdbcUrl) ds.setUser(mySql.username) ds.setPassword(mySql.password) ds.setAllowMultiQueries(true) ds.setServerTimezone("UTC") val tableDDLs = Vector( "/mysql/car.sql", "/mysql/person.sql", "/mysql/my-user.sql", "/mysql/no-id.sql", "/mysql/big-dec.sql", "/mysql/my-time.sql" ).map(p => Files.readString(Path.of(getClass.getResource(p).toURI))) Manager(use => val con = use(ds.getConnection) val stmt = use(con.createStatement()) for ddl <- tableDDLs do stmt.execute(ddl) ).get Transactor(ds) end xa end MySqlTests ================================================ FILE: magnum/src/test/scala/OracleTests.scala ================================================ import com.augustnagro.magnum.* import com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec import com.dimafeng.testcontainers.OracleContainer import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures import munit.{AnyFixture, FunSuite} import oracle.jdbc.datasource.impl.OracleDataSource import org.testcontainers.utility.DockerImageName import shared.* import java.sql.Statement import java.time.LocalTime import scala.util.Using class OracleTests extends FunSuite, TestContainersFixtures: given DbCodec[Boolean] = DbCodec[String].biMap(_ == "Y", b => if b then "Y" else "N") given DbCodec[LocalTime] = DbCodec[String].biMap(LocalTime.parse, _.toString) sharedTests(this, OracleDbType, xa) val oracleContainer = ForAllContainerFixture( OracleContainer .Def(dockerImageName = DockerImageName.parse( "gvenzl/oracle-xe:21.3.0" ) ) .createContainer() ) override def munitFixtures: Seq[AnyFixture[_]] = super.munitFixtures :+ oracleContainer def xa(): Transactor = val oracle = oracleContainer() val ds = OracleDataSource() ds.setURL(oracle.jdbcUrl) ds.setUser(oracle.username) ds.setPassword(oracle.password) // oracle doesn't support drop if exists, // or multi-statement queries Using .Manager(use => val con = use(ds.getConnection()) val stmt = use(con.createStatement()) try stmt.execute("drop table car") catch case _ => () stmt.execute( """create table car ( | model varchar2(50) not null, | id number primary key, | top_speed number not null, | vin number, | color varchar2(50) not null check (color in ('Red', 'Green', 'Blue')), | created timestamp not null |)""".stripMargin ) stmt.execute( """insert into car (model, id, top_speed, vin, color, created) |values ('McLaren Senna', 1, 208, 123, 'Red', timestamp '2024-11-24 22:17:30')""".stripMargin ) stmt.execute( """insert into car (model, id, top_speed, vin, color, created) |values ('Ferrari F8 Tributo', 2, 212, 124, 'Green', timestamp '2024-11-24 22:17:31')""".stripMargin ) stmt.execute( """insert into car (model, id, top_speed, vin, color, created) |values ('Aston Martin Superleggera', 3, 211, null, 'Blue', timestamp '2024-11-24 22:17:32')""".stripMargin ) try stmt.execute("drop table person") catch case _ => () stmt.execute( """create table person ( | id number primary key, | first_name varchar2(50), | last_name varchar2(50) not null, | is_admin varchar2(1) not null, | created timestamp not null, | social_id varchar2(36) |)""".stripMargin ) stmt.execute( """insert into person (id, first_name, last_name, is_admin, created, social_id) values |(1, 'George', 'Washington', 'Y', current_timestamp, 'd06443a6-3efb-46c4-a66a-a80a8a9a5388')""".stripMargin ) stmt.execute( """insert into person (id, first_name, last_name, is_admin, created, social_id) values |(2, 'Alexander', 'Hamilton', 'Y', current_timestamp, '529b6c6d-7228-4da5-81d7-13b706f78ddb')""".stripMargin ) stmt.execute( """insert into person (id, first_name, last_name, is_admin, created, social_id) values |(3, 'John', 'Adams', 'Y', current_timestamp, null)""".stripMargin ) stmt.execute( """insert into person (id, first_name, last_name, is_admin, created, social_id) values |(4, 'Benjamin', 'Franklin', 'Y', current_timestamp, null)""".stripMargin ) stmt.execute( """insert into person (id, first_name, last_name, is_admin, created, social_id) values |(5, 'John', 'Jay', 'Y', current_timestamp, null)""".stripMargin ) stmt.execute( """insert into person (id, first_name, last_name, is_admin, created, social_id) values |(6, 'Thomas', 'Jefferson', 'Y', current_timestamp, null)""".stripMargin ) stmt.execute( """insert into person (id, first_name, last_name, is_admin, created, social_id) values |(7, 'James', 'Madison', 'Y', current_timestamp, null)""".stripMargin ) stmt.execute( """insert into person (id, first_name, last_name, is_admin, created, social_id) values |(8, null, 'Nagro', 'N', current_timestamp, null)""".stripMargin ) try stmt.execute("drop table my_user") catch case _ => () stmt.execute( """create table my_user ( | first_name varchar2(200) not null, | id number generated always as identity, | primary key (id) |) |""".stripMargin ) stmt.execute("""insert into my_user (first_name) values ('George')""") stmt.execute( """insert into my_user (first_name) values ('Alexander')""" ) stmt.execute("""insert into my_user (first_name) values ('John')""") try stmt.execute("drop table no_id") catch case _ => () stmt.execute( """create table no_id ( | created_at timestamp not null, | user_name varchar2(200) not null, | user_action varchar2(200) not null |) |""".stripMargin ) stmt.execute( """insert into no_id (created_at, user_name, user_action) values |(timestamp '1997-08-15 00:00:00', 'Josh', 'clicked a button')""".stripMargin ) stmt.execute( """insert into no_id (created_at, user_name, user_action) values |(timestamp '1997-08-16 00:00:00', 'Danny', 'opened a toaster')""".stripMargin ) stmt.execute( """insert into no_id (created_at, user_name, user_action) values |(timestamp '1997-08-17 00:00:00', 'Greg', 'ran some QA tests')""".stripMargin ) try stmt.execute("drop table big_dec") catch case _ => () stmt.execute( """create table big_dec ( | id number primary key, | my_big_dec numeric |)""".stripMargin ) stmt.execute("insert into big_dec (id, my_big_dec) values (1, 123)") stmt.execute("insert into big_dec (id, my_big_dec) values (2, null)") try stmt.execute("drop table my_time") catch case _ => () stmt.execute( """create table my_time ( | a timestamp with local time zone not null, | b date not null, | c VARCHAR2(100) not null, | d timestamp not null |) |""".stripMargin ) stmt.execute( "insert into my_time values (timestamp '2025-03-30 21:19:23 -00:00', date '2025-03-30', '05:20:04', timestamp '2025-04-02 20:16:38')" ) stmt.execute( "insert into my_time values (timestamp '2025-03-31 21:19:23 -00:00', date '2025-03-31', '05:30:04', timestamp '2025-04-02 20:17:38')" ) ) .get Transactor(ds) end xa end OracleTests ================================================ FILE: magnum/src/test/scala/PgTests.scala ================================================ import com.augustnagro.magnum.* import com.dimafeng.testcontainers.PostgreSQLContainer import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures import munit.{AnyFixture, FunSuite, Location} import org.postgresql.ds.PGSimpleDataSource import org.testcontainers.utility.DockerImageName import shared.* import java.nio.file.{Files, Path} import scala.util.Using import scala.util.Using.Manager class PgTests extends FunSuite, TestContainersFixtures: sharedTests(this, PostgresDbType, xa) val pgContainer = ForAllContainerFixture( PostgreSQLContainer .Def(dockerImageName = DockerImageName.parse("postgres:17.0")) .createContainer() ) override def munitFixtures: Seq[AnyFixture[_]] = super.munitFixtures :+ pgContainer def xa(): Transactor = val ds = PGSimpleDataSource() val pg = pgContainer() ds.setUrl(pg.jdbcUrl) ds.setUser(pg.username) ds.setPassword(pg.password) val tableDDLs = Vector( "/pg/car.sql", "/pg/person.sql", "/pg/my-user.sql", "/pg/no-id.sql", "/pg/big-dec.sql", "/pg/my-time.sql" ).map(p => Files.readString(Path.of(getClass.getResource(p).toURI))) Manager(use => val con = use(ds.getConnection) val stmt = use(con.createStatement) for ddl <- tableDDLs do stmt.execute(ddl) ).get Transactor(ds) end xa end PgTests ================================================ FILE: magnum/src/test/scala/SqliteTests.scala ================================================ import com.augustnagro.magnum.* import com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec import munit.FunSuite import org.sqlite.SQLiteDataSource import shared.* import java.nio.file.Files import java.time.{LocalDate, LocalDateTime, LocalTime, OffsetDateTime} import java.util.UUID import scala.util.Using import scala.util.Using.Manager class SqliteTests extends FunSuite: given DbCodec[OffsetDateTime] = DbCodec[String].biMap(OffsetDateTime.parse, _.toString) given DbCodec[LocalDate] = DbCodec[String].biMap(LocalDate.parse, _.toString) given DbCodec[UUID] = DbCodec[String].biMap(UUID.fromString, _.toString) given DbCodec[Boolean] = DbCodec[Int].biMap(_ != 0, b => if b then 1 else 0) given DbCodec[BigDecimal] = DbCodec[String].biMap(BigDecimal.apply, _.toString()) given DbCodec[LocalTime] = DbCodec[String].biMap(LocalTime.parse, _.toString) given DbCodec[LocalDateTime] = DbCodec[String].biMap(LocalDateTime.parse, _.toString) sharedTests(this, SqliteDbType, xa) lazy val sqliteDbPath = Files.createTempFile(null, ".db").toAbsolutePath def xa(): Transactor = val ds = SQLiteDataSource() ds.setUrl("jdbc:sqlite:" + sqliteDbPath) Manager(use => val con = use(ds.getConnection) val stmt = use(con.createStatement) stmt.execute("drop table if exists car") stmt.execute( """create table car ( | model text not null, | id integer primary key, | top_speed integer not null, | vin integer, | color text check (color in ('Red', 'Green', 'Blue')) not null, | created text not null |)""".stripMargin ) stmt.execute( """insert into car (model, id, top_speed, vin, color, created) values |('McLaren Senna', 1, 208, 123, 'Red', '2024-11-24T22:17:30.000000000Z'), |('Ferrari F8 Tributo', 2, 212, 124, 'Green', '2024-11-24T22:17:31.000000000Z'), |('Aston Martin Superleggera', 3, 211, null, 'Blue', '2024-11-24T22:17:32.000000000Z')""".stripMargin ) stmt.execute("drop table if exists person") stmt.execute( """create table person ( | id integer primary key, | first_name text, | last_name text not null, | is_admin integer not null, | created text not null, | social_id varchar(36) |)""".stripMargin ) stmt.execute( """insert into person (id, first_name, last_name, is_admin, created, social_id) values |(1, 'George', 'Washington', true, '2024-11-24T22:17:30.000000000Z', 'd06443a6-3efb-46c4-a66a-a80a8a9a5388'), |(2, 'Alexander', 'Hamilton', true, '2024-11-24T22:17:30.000000000Z', '529b6c6d-7228-4da5-81d7-13b706f78ddb'), |(3, 'John', 'Adams', true, '2024-11-24T22:17:30.000000000Z', null), |(4, 'Benjamin', 'Franklin', true, '2024-11-24T22:17:30.000000000Z', null), |(5, 'John', 'Jay', true, '2024-11-24T22:17:30.000000000Z', null), |(6, 'Thomas', 'Jefferson', true, '2024-11-24T22:17:30.000000000Z', null), |(7, 'James', 'Madison', true, '2024-11-24T22:17:30.000000000Z', null), |(8, null, 'Nagro', false, '2024-11-24T22:17:30.000000000Z', null)""".stripMargin ) stmt.execute("drop table if exists my_user") stmt.execute( """create table my_user ( | first_name text not null, | id integer primary key |)""".stripMargin ) stmt.execute( """insert into my_user (first_name) values |('George'), |('Alexander'), |('John')""".stripMargin ) stmt.execute("drop table if exists no_id") stmt.execute( """create table no_id ( | created_at text not null, | user_name text not null, | user_action text not null |)""".stripMargin ) stmt.execute( """insert into no_id values |('2024-11-24T22:17:30.000000000Z', 'Josh', 'clicked a button'), |('2024-11-24T22:17:30.000000000Z', 'Danny', 'opened a toaster'), |('2024-11-24T22:17:30.000000000Z', 'Greg', 'ran some QA tests');""".stripMargin ) stmt.execute("drop table if exists big_dec") stmt.execute( """create table big_dec ( | id integer primary key, | my_big_dec text |)""".stripMargin ) stmt.execute( """insert into big_dec values |(1, '123'), |(2, null)""".stripMargin ) stmt.execute("drop table if exists my_time") stmt.execute( """create table my_time ( | a text not null, | b text not null, | c text not null, | d text not null |)""".stripMargin ) stmt.execute( """insert into my_time values |('2025-03-30T21:19:23Z', '2025-03-30', '05:20:04', '2025-04-02T20:16:38'), |('2025-03-31T21:19:23Z', '2025-03-31', '05:30:04', '2025-04-02T20:17:38')""".stripMargin ) ).get Transactor(ds) end xa end SqliteTests ================================================ FILE: magnum/src/test/scala/opaques.scala ================================================ import com.augustnagro.magnum.* object opaques: opaque type Age = Int object Age: def apply(value: Int): Age = value extension (opaque: Age) def value: Int = opaque given DbCodec[opaques.Age] = DbCodec.IntCodec.biMap(opaques.Age(_), _.value) ================================================ FILE: magnum/src/test/scala/shared/BigDecTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} def bigDecTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)(using Location, DbCodec[BigDecimal] ): Unit = import suite.* @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class BigDec(id: Int, myBigDec: Option[BigDecimal]) derives DbCodec val bigDecRepo = Repo[BigDec, BigDec, Int] test("option of bigdecimal"): xa().transact: val bigDec1 = bigDecRepo.findById(1).get assert(bigDec1.myBigDec == Some(BigDecimal(123))) val bigDec2 = bigDecRepo.findById(2).get assert(bigDec2.myBigDec == None) ================================================ FILE: magnum/src/test/scala/shared/Color.scala ================================================ package shared import com.augustnagro.magnum.DbCodec enum Color derives DbCodec: case Red, Green, Blue ================================================ FILE: magnum/src/test/scala/shared/DateTimeTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} import java.sql.Connection import java.time.{ LocalDate, LocalDateTime, LocalTime, OffsetDateTime, ZoneOffset } import scala.util.Using def dateTimeTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)(using Location, DbCodec[OffsetDateTime], DbCodec[LocalDate], DbCodec[LocalTime] ): Unit = import suite.* @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class MyTime( a: OffsetDateTime, b: LocalDate, c: LocalTime, d: LocalDateTime ) derives DbCodec val myTimeRepo = Repo[MyTime, MyTime, Null] val all = Vector( MyTime( a = OffsetDateTime.parse("2025-03-30T21:19:23Z"), b = LocalDate.parse("2025-03-30"), c = LocalTime.parse("05:20:04"), d = LocalDateTime.parse("2025-04-02T20:16:38") ), MyTime( a = OffsetDateTime.parse("2025-03-31T21:19:23Z"), b = LocalDate.parse("2025-03-31"), c = LocalTime.parse("05:30:04"), d = LocalDateTime.parse("2025-04-02T20:17:38") ) ) test("can read all JDBC 4.2 time types"): // https://jcp.org/aboutJava/communityprocess/maintenance/jsr221/JDBC4.2MR-Oct232013.pdf (table B-4) // https://jdbc.postgresql.org/documentation/query/ xa().connect: assertEquals(myTimeRepo.findAll, all) test("can write all JDBC 4.2 time types"): xa().connect: val newTime = MyTime( a = OffsetDateTime.parse("2025-04-05T21:18:23Z"), b = LocalDate.parse("2025-04-01"), c = LocalTime.parse("05:20:04"), d = LocalDateTime.parse("2025-04-02T20:17:38") ) myTimeRepo.insert(newTime) val res = sql"SELECT * FROM my_time ORDER BY a".query[MyTime].run() assertEquals(all :+ newTime, res) end dateTimeTests ================================================ FILE: magnum/src/test/scala/shared/EmbeddedFragTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} import java.util.UUID def embeddedFragTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)( using Location ): Unit = import suite.* test("embed Frag into Frag"): def findPersonCnt(filter: Frag)(using DbCon): Int = val x = sql"id != ${util.Random.nextInt(20) + 20}" sql"SELECT count(*) FROM person WHERE $filter AND $x" .query[Int] .run() .head val isAdminFrag = if dbType == OracleDbType then sql"is_admin = 'Y'" else sql"is_admin = true" xa().connect: val johnCnt = findPersonCnt(sql"$isAdminFrag AND first_name = 'John'") assert(johnCnt == 2) test("embedded frag param exprs should be evaluated only once"): object Holder: var uuid: UUID = _ def set(uuid: UUID): UUID = this.uuid = uuid uuid val frag = sql"select * from person where ${sql"social_id = ${Holder.set(UUID.randomUUID)}"}" assert(frag.params.size == 1) assert(frag.params.head == Holder.uuid) end embeddedFragTests ================================================ FILE: magnum/src/test/scala/shared/EntityCreatorTests.scala ================================================ package shared import com.augustnagro.magnum.* import com.augustnagro.magnum.SqlException import munit.{FunSuite, Location} import scala.util.Using def entityCreatorTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)( using Location ): Unit = import suite.* if dbType == ClickhouseDbType then return case class MyUserCreator(firstName: String) derives DbCodec @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class MyUser(firstName: String, id: Long) derives DbCodec val userRepo = Repo[MyUserCreator, MyUser, Long] val user = TableInfo[MyUserCreator, MyUser, Long] test("insert EntityCreator"): xa().connect: userRepo.insert(MyUserCreator("Ash")) userRepo.insert(MyUserCreator("Steve")) assert(userRepo.count == 5L) assert(userRepo.findAll.map(_.firstName).contains("Steve")) test("insertReturning EntityCreator"): assume(dbType != MySqlDbType) assume(dbType != SqliteDbType) xa().connect: val user = userRepo.insertReturning(MyUserCreator("Ash")) assert(user.firstName == "Ash") test("insertAllReturning EntityCreator"): assume(dbType != MySqlDbType) assume(dbType != SqliteDbType) xa().connect: val newUsers = Vector( MyUserCreator("Ash"), MyUserCreator("Steve"), MyUserCreator("Josh") ) val users = userRepo.insertAllReturning(newUsers) assert(userRepo.count == 6L) assert(users.size == 3) assert(users.last.firstName == newUsers.last.firstName) test("insert invalid EntityCreator"): intercept[SqlException]: xa().connect: val invalidUser = MyUserCreator(null) userRepo.insert(invalidUser) test("insertAll EntityCreator"): xa().connect: val newUsers = Vector( MyUserCreator("Ash"), MyUserCreator("Steve"), MyUserCreator("Josh") ) userRepo.insertAll(newUsers) assert(userRepo.count == 6L) assert( userRepo.findAll.map(_.firstName).contains(newUsers.last.firstName) ) test("custom insert EntityCreator"): xa().connect: val u = MyUserCreator("Ash") val update = sql"insert into $user ${user.insertColumns} values ($u)".update assertNoDiff( update.frag.sqlString, "insert into my_user (first_name) values (?)" ) val rowsInserted = update.run() assert(rowsInserted == 1) assert(userRepo.count == 4L) assert(userRepo.findAll.exists(_.firstName == "Ash")) test("custom update EntityCreator"): xa().connect: val u = userRepo.findAll.head val newName = "Ash" val update = sql"update $user set ${user.firstName} = $newName where ${user.id} = ${u.id}".update assertNoDiff( update.frag.sqlString, "update my_user set first_name = ? where id = ?" ) val rowsUpdated = update.run() assert(rowsUpdated == 1) assert(userRepo.findAll.exists(_.firstName == "Ash")) test(".returning iterator"): assume(dbType != MySqlDbType) assume(dbType != SqliteDbType) xa().connect: Using.Manager(implicit use => val it = if dbType == H2DbType then sql"INSERT INTO $user ${user.insertColumns} VALUES ('Bob')" .returningKeys[Long](user.id) .iterator() else sql"INSERT INTO $user ${user.insertColumns} VALUES ('Bob') RETURNING ${user.id}" .returning[Long] .iterator() assert(it.size == 1) ) end entityCreatorTests ================================================ FILE: magnum/src/test/scala/shared/ImmutableRepoTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} import java.sql.{Connection, PreparedStatement, ResultSet} import java.time.{OffsetDateTime, ZoneOffset} import scala.util.Using def immutableRepoTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)( using Location, DbCodec[OffsetDateTime] ): Unit = import suite.* @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class Car( model: String, @Id id: Long, topSpeed: Int, @SqlName("vin") vinNumber: Option[Int], color: Color, created: OffsetDateTime ) derives DbCodec val carRepo = ImmutableRepo[Car, Long] val car = TableInfo[Car, Car, Long] val allCars = Vector( Car( model = "McLaren Senna", id = 1L, topSpeed = 208, vinNumber = Some(123), color = Color.Red, created = OffsetDateTime.parse("2024-11-24T22:17:30.000000000Z") ), Car( model = "Ferrari F8 Tributo", id = 2L, topSpeed = 212, vinNumber = Some(124), color = Color.Green, created = OffsetDateTime.parse("2024-11-24T22:17:31.000000000Z") ), Car( model = "Aston Martin Superleggera", id = 3L, topSpeed = 211, vinNumber = None, color = Color.Blue, created = OffsetDateTime.parse("2024-11-24T22:17:32.000000000Z") ) ) test("count"): xa().connect: assert(carRepo.count == 3L) test("existsById"): xa().connect: assert(carRepo.existsById(3L)) assert(!carRepo.existsById(4L)) test("findAll"): val cars = xa().connect: carRepo.findAll assert(cars == allCars) test("findById"): xa().connect: assert(carRepo.findById(3L).get == allCars.last) assert(carRepo.findById(4L) == None) test("findAllByIds"): assume(dbType != ClickhouseDbType) assume(dbType != MySqlDbType) assume(dbType != OracleDbType) assume(dbType != SqliteDbType) xa().connect: val ids = carRepo.findAllById(Vector(1L, 3L)).map(_.id) assert(ids == Vector(1L, 3L)) test("serializable transaction"): xa() .withConnectionConfig(withSerializable) .transact: assert(carRepo.count == 3L) def withSerializable(con: Connection): Unit = con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE) test("select query"): xa().connect: val minSpeed: Int = 210 val query = sql"select ${car.all} from $car where ${car.topSpeed} > $minSpeed" .query[Car] assertNoDiff( query.frag.sqlString, "select model, id, top_speed, vin, color, created from car where top_speed > ?" ) assert(query.frag.params == Vector(minSpeed)) assert(query.run() == allCars.tail) test("select query with aliasing"): xa().connect: val minSpeed = 210 val cAlias = car.alias("c") val query = sql"select ${cAlias.all} from $cAlias where ${cAlias.topSpeed} > $minSpeed" .query[Car] assertNoDiff( query.frag.sqlString, "select c.model, c.id, c.top_speed, c.vin, c.color, c.created from car c where c.top_speed > ?" ) assert(query.frag.params == Vector(minSpeed)) assert(query.run() == allCars.tail) test("select via option"): xa().connect: val vin = Option(124) val cars = sql"select * from car where vin = $vin" .query[Car] .run() assert(cars == allCars.filter(_.vinNumber == vin)) test("tuple select"): xa().connect: val tuples = sql"select model, color from car where id = 2" .query[(String, Color)] .run() assert(tuples == Vector(allCars(1).model -> allCars(1).color)) test("reads null int as None and not Some(0)"): xa().connect: assert(carRepo.findById(3L).get.vinNumber == None) test("created timestamps should match"): xa().connect: assert(carRepo.findAll.map(_.created) == allCars.map(_.created)) test(".query iterator"): xa().connect: Using.Manager(implicit use => val it = sql"SELECT * FROM car".query[Car].iterator() assert(it.map(_.id).size == 3) ) test("sql interpolator selects right DbCodec"): case class Coord(x: Double, y: Double) given DbCodec[Coord] with def cols: IArray[Int] = IArray(java.sql.Types.BINARY) def queryRepr: String = "MyCoord(?)" def readSingle(rs: ResultSet, pos: Int): Coord = ??? def readSingleOption(rs: ResultSet, pos: Int): Option[Coord] = ??? def writeSingle(coord: Coord, ps: PreparedStatement, pos: Int): Unit = ??? val myCoord = Coord(1, 2) val query = sql"SELECT * FROM test WHERE coord = $myCoord" assert(query.sqlString.contains("MyCoord(?)")) end immutableRepoTests ================================================ FILE: magnum/src/test/scala/shared/MultilineFragTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} def multilineFragTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)( using Location ): Unit = import suite.* def testSelectPersonFrag(frag: Frag): Unit = xa().connect: val res = frag.query[Int].run() assert(res.size == 1) assert(frag.sqlString == "SELECT count(*)\nFROM person\nWHERE id = ?") assert(frag.params.size == 1) def personId = util.Random.nextInt(8) + 1 test("multiline Frag"): testSelectPersonFrag( frag = sql"""SELECT count(*) FROM person WHERE id = $personId""" ) test("multiline Frag with stripMargin"): testSelectPersonFrag( frag = sql"""SELECT count(*) |FROM person |WHERE id = $personId""".stripMargin ) test("multiline Frag with stripMargin('*')"): testSelectPersonFrag( frag = sql"""SELECT count(*) *FROM person *WHERE id = $personId""".stripMargin('*') ) end multilineFragTests ================================================ FILE: magnum/src/test/scala/shared/NoIdTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} import java.time.OffsetDateTime def noIdTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)(using Location, DbCodec[OffsetDateTime] ): Unit = import suite.* @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class NoId( createdAt: OffsetDateTime, userName: String, userAction: String ) derives DbCodec val noIdRepo = Repo[NoId, NoId, Null]() val noIdTableInfo = TableInfo[NoId, NoId, Null] test("insert NoId entities"): xa().connect: val entity = NoId(OffsetDateTime.now, "Dan", "Fishing") noIdRepo.insert(entity) assert(noIdRepo.findAll.exists(_.userName == "Dan")) test("select NoId usernames via TableInfo"): xa().connect: val userNames = sql"SELECT ${noIdTableInfo.userName} FROM $noIdTableInfo" .query[String] .run() assert(userNames.size == 3) end noIdTests ================================================ FILE: magnum/src/test/scala/shared/OptionalProductTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} import java.time.OffsetDateTime def optionalProductTests( suite: FunSuite, dbType: DbType, xa: () => Transactor )(using Location, DbCodec[BigDecimal], DbCodec[OffsetDateTime]): Unit = import suite.* @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class Car( model: String, @Id id: Long, topSpeed: Int, @SqlName("vin") vinNumber: Option[Int], color: Color, created: OffsetDateTime ) derives DbCodec @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class BigDec(id: Int, myBigDec: Option[BigDecimal]) derives DbCodec test("left join with optional product type"): assume(dbType != ClickhouseDbType) xa().connect: val res = sql"select * from car c left join big_dec bd on bd.id = c.id" .query[(Car, Option[BigDec])] .run() assert(res.exists((_, bigDec) => bigDec.isEmpty)) end optionalProductTests ================================================ FILE: magnum/src/test/scala/shared/RepoTests.scala ================================================ package shared import com.augustnagro.magnum.* import com.augustnagro.magnum.{BatchUpdateResult, SqlException, ColumnNames} import munit.FunSuite import java.time.OffsetDateTime import java.util.UUID def repoTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)(using munit.Location, DbCodec[UUID], DbCodec[Boolean], DbCodec[OffsetDateTime] ): Unit = import suite.* @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class Person( id: Long, firstName: Option[String], lastName: String, isAdmin: Boolean, created: OffsetDateTime, socialId: Option[UUID] ) derives DbCodec val personRepo = Repo[Person, Person, Long] val person = TableInfo[Person, Person, Long] test("delete"): xa().connect: val p = personRepo.findById(1L).get personRepo.delete(p) assert(personRepo.findById(1L) == None) test("delete invalid"): xa().connect: personRepo.delete( Person(999L, None, "", false, OffsetDateTime.now, None) ) assert(8L == personRepo.count) test("deleteById"): xa().connect: personRepo.deleteById(1L) personRepo.deleteById(2L) personRepo.deleteById(1L) assert(personRepo.findAll.size == 6) test("deleteAll"): xa().connect: val p1 = personRepo.findById(1L).get val p2 = p1.copy(id = 2L) val p3 = p1.copy(id = 999L) val expectedRowsUpdate = dbType match case ClickhouseDbType => 3 case _ => 2 val res = personRepo.deleteAll(Vector(p1, p2, p3)) assert(res == BatchUpdateResult.Success(expectedRowsUpdate)) assert(6L == personRepo.count) test("deleteAllById"): xa().connect: val expectedRowsUpdate = dbType match case ClickhouseDbType => 3 case _ => 2 val res = personRepo.deleteAllById(Vector(1L, 2L, 1L)) assert(res == BatchUpdateResult.Success(expectedRowsUpdate)) assert(6L == personRepo.count) test("truncate"): xa().connect: personRepo.truncate() assert(personRepo.count == 0L) test("insert"): xa().connect: personRepo.insert( Person( id = 9L, firstName = Some("John"), lastName = "Smith", isAdmin = false, socialId = Some(UUID.randomUUID), created = OffsetDateTime.now ) ) personRepo.insert( Person( id = 10L, firstName = None, lastName = "Prince", isAdmin = true, socialId = None, created = OffsetDateTime.now ) ) assert(personRepo.count == 10L) assert(personRepo.findAll.map(_.lastName).contains("Smith")) test("insertReturning"): assume(dbType != MySqlDbType) assume(dbType != SqliteDbType) xa().connect: val person = personRepo.insertReturning( Person( id = 9L, firstName = Some("John"), lastName = "Smith", isAdmin = false, socialId = None, created = OffsetDateTime.now ) ) assert(person.lastName == "Smith") test("insertAllReturning"): assume(dbType != MySqlDbType) assume(dbType != SqliteDbType) xa().connect: val newPc = Vector( Person( id = 9L, firstName = Some("Chandler"), lastName = "Johnsored", isAdmin = true, socialId = Some(UUID.randomUUID()), created = OffsetDateTime.now ), Person( id = 10L, firstName = None, lastName = "Odysseus", isAdmin = false, socialId = None, created = OffsetDateTime.now ), Person( id = 11L, firstName = Some("Jorge"), lastName = "Masvidal", isAdmin = true, socialId = None, created = OffsetDateTime.now ) ) val people = personRepo.insertAllReturning(newPc) assert(personRepo.count == 11L) assert(people.size == 3) assert(people.last.lastName == newPc.last.lastName) test("insert invalid"): intercept[SqlException]: xa().connect: val invalidP = Person(9L, None, null, false, OffsetDateTime.now, None) personRepo.insert(invalidP) test("update"): assume(dbType != ClickhouseDbType) xa().connect: val p = personRepo.findById(1L).get val updated = p.copy(firstName = None, isAdmin = false) personRepo.update(updated) assert(personRepo.findById(1L).get == updated) test("update invalid"): assume(dbType != ClickhouseDbType) intercept[SqlException]: xa().connect: val p = personRepo.findById(1L).get val updated = p.copy(lastName = null) personRepo.update(updated) test("insertAll"): xa().connect: val newPeople = Vector( Person( id = 9L, firstName = Some("Chandler"), lastName = "Johnsored", isAdmin = true, socialId = Some(UUID.randomUUID()), created = OffsetDateTime.now ), Person( id = 10L, firstName = None, lastName = "Odysseus", isAdmin = false, socialId = None, created = OffsetDateTime.now ), Person( id = 11L, firstName = Some("Jorge"), lastName = "Masvidal", isAdmin = true, socialId = None, created = OffsetDateTime.now ) ) personRepo.insertAll(newPeople) assert(personRepo.count == 11L) assert( personRepo.findAll.map(_.lastName).contains(newPeople.last.lastName) ) test("updateAll"): assume(dbType != ClickhouseDbType) xa().connect: val newPeople = Vector( personRepo.findById(1L).get.copy(lastName = "Peterson"), personRepo.findById(2L).get.copy(lastName = "Moreno") ) val res = personRepo.updateAll(newPeople) assert(res == BatchUpdateResult.Success(2)) assert(personRepo.findById(1L).get == newPeople(0)) assert(personRepo.findById(2L).get == newPeople(1)) test("transact"): assume(dbType != ClickhouseDbType) val count = xa().transact: val p = Person( id = 9L, firstName = Some("Chandler"), lastName = "Brown", isAdmin = false, created = OffsetDateTime.now, socialId = None ) personRepo.insert(p) personRepo.count assert(count == 9L) test("transact failed"): assume(dbType != ClickhouseDbType) val transactor = xa() val p = Person( id = 9L, firstName = Some("Chandler"), lastName = "Brown", isAdmin = false, created = OffsetDateTime.now, socialId = None ) try transactor.transact: personRepo.insert(p) throw RuntimeException() fail("should not reach") catch case _: Exception => transactor.transact: assert(personRepo.count == 8L) test("custom insert"): xa().connect: val p = Person( id = 9L, firstName = Some("Chandler"), lastName = "Brown", isAdmin = false, socialId = None, created = OffsetDateTime.now ) val update = sql"insert into $person ${person.insertColumns} values ($p)".update assertNoDiff( update.frag.sqlString, "insert into person (id, first_name, last_name, is_admin, created, social_id) values (?, ?, ?, ?, ?, ?)" ) val rowsInserted = update.run() assert(rowsInserted == 1) assert(personRepo.count == 9L) assert( personRepo.findAll.exists(fetched => fetched.firstName == p.firstName && fetched.lastName == p.lastName && fetched.isAdmin == p.isAdmin ) ) test("custom update"): xa().connect: val p = Person( id = 9L, firstName = Some("Chandler"), lastName = "Brown", isAdmin = false, socialId = Some(UUID.randomUUID()), created = OffsetDateTime.now ) personRepo.insert(p) val newIsAdmin = true val update = sql"update $person set ${person.isAdmin} = $newIsAdmin where ${person.id} = ${p.id}".update assertNoDiff( update.frag.sqlString, "update person set is_admin = ? where id = ?" ) val rowsUpdated = update.run() assert(rowsUpdated == 1) assert(personRepo.findById(p.id).get.isAdmin == true) test("custom returning a single column"): assume(dbType != ClickhouseDbType) assume(dbType != MySqlDbType) assume(dbType != SqliteDbType) xa().connect: val personId = if dbType == H2DbType then sql"""insert into person (id, first_name, last_name, created, is_admin) values (9, 'Arton', 'Senna', now(), true) """ .returningKeys[Long]("id") .run() .head else if dbType == OracleDbType then sql"""insert into person (id, first_name, last_name, created, is_admin) values (9, 'Arton', 'Senna', current_timestamp, 'Y')""" .returningKeys[Long]("id") .run() .head else sql"""insert into person (id, first_name, last_name, created, is_admin) values (9, 'Arton', 'Senna', now(), 'Y') RETURNING id """.returning[Long].run().head assert(personRepo.findById(personId).get.lastName == "Senna") test("custom returning multiple columns"): assume(dbType != ClickhouseDbType) assume(dbType != MySqlDbType) assume(dbType != SqliteDbType) assume(dbType != OracleDbType) xa().connect: val cols = if dbType == H2DbType then sql"""insert into person (id, first_name, last_name, created, is_admin) values (9, 'Arton', 'Senna', now(), true), (10, 'Demo', 'User', now(), false) """ .returningKeys[(Long, OffsetDateTime)]( person.id, person.created ) .run() else sql"""insert into person (id, first_name, last_name, created, is_admin) values (9, 'Arton', 'Senna', now(), true), (10, 'Demo', 'User', now(), false) RETURNING id, created """.returning[(Long, OffsetDateTime)].run() val newLastNames = cols.map((id, _) => personRepo.findById(id).get.lastName) assert(newLastNames == Vector("Senna", "User")) test("custom returning with no rows updated"): assume(dbType != ClickhouseDbType) assume(dbType != MySqlDbType) assume(dbType != SqliteDbType) xa().connect: val personIds = if dbType == H2DbType || dbType == OracleDbType then sql"update person set first_name = 'xxx' where last_name = 'Not Here'" .returningKeys[Long](ColumnNames("id", IArray(person.id))) .run() else sql"update person set first_name = 'xxx' where last_name = 'Not Here' returning id" .returning[Long] .run() assert(personIds.isEmpty) test("returning non primary key column"): assume(dbType != ClickhouseDbType) assume(dbType != MySqlDbType) assume(dbType != SqliteDbType) xa().connect: val personFirstNames = if dbType == H2DbType || dbType == OracleDbType then sql"update person set last_name = 'xxx'" .returningKeys[String](person.firstName) .run() else sql"update person set last_name = 'xxx' returning first_name" .returning[String] .run() assert(personFirstNames.nonEmpty) end repoTests ================================================ FILE: magnum/src/test/scala/shared/SharedTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} import java.time.{LocalTime, OffsetDateTime} import java.util.UUID def sharedTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)(using Location, DbCodec[UUID], DbCodec[Boolean], DbCodec[OffsetDateTime], DbCodec[BigDecimal], DbCodec[LocalTime] ): Unit = immutableRepoTests(suite, dbType, xa) repoTests(suite, dbType, xa) entityCreatorTests(suite, dbType, xa) specTests(suite, dbType, xa) sqlNameTests(suite, dbType, xa) noIdTests(suite, dbType, xa) embeddedFragTests(suite, dbType, xa) multilineFragTests(suite, dbType, xa) bigDecTests(suite, dbType, xa) optionalProductTests(suite, dbType, xa) dateTimeTests(suite, dbType, xa) tupleTests(suite, dbType, xa) tableInfoTests(suite, dbType, xa) end sharedTests ================================================ FILE: magnum/src/test/scala/shared/SpecTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} import java.time.{OffsetDateTime, ZoneOffset} opaque type CarId = Long object CarId: def apply(value: Long): CarId = value extension (opaque: CarId) def value: Long = opaque given DbCodec[CarId] = DbCodec.LongCodec.biMap(CarId.apply, _.value) def specTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)(using Location, DbCodec[OffsetDateTime] ): Unit = import suite.* @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class Car( model: String, @Id id: Long, topSpeed: Int, @SqlName("vin") vinNumber: Option[Int], color: Color, created: OffsetDateTime ) derives DbCodec val carRepo = ImmutableRepo[Car, Long] val car = TableInfo[Car, Car, Long] val allCars = Vector( Car( model = "McLaren Senna", id = 1L, topSpeed = 208, vinNumber = Some(123), color = Color.Red, created = OffsetDateTime.parse("2024-11-24T22:17:30.000000000Z") ), Car( model = "Ferrari F8 Tributo", id = 2L, topSpeed = 212, vinNumber = Some(124), color = Color.Green, created = OffsetDateTime.parse("2024-11-24T22:17:31.000000000Z") ), Car( model = "Aston Martin Superleggera", id = 3L, topSpeed = 211, vinNumber = None, color = Color.Blue, created = OffsetDateTime.parse("2024-11-24T22:17:32.000000000Z") ) ) test("like"): xa().transact: val model = "Ferr%" val spec = Spec[Car].where(sql"model LIKE $model") assert(carRepo.findAll(spec) == Vector(allCars(1))) test("select all"): xa().transact: val spec = Spec[Car] assert(carRepo.findAll(spec) == allCars) test("empty predicate"): xa().transact: val spec = Spec[Car].where(sql"") assert(carRepo.findAll(spec) == allCars) test("predicate having param at end"): xa().transact: val id = CarId(2L) val spec = Spec[Car].where(sql"$id < id") assert(carRepo.findAll(spec) == Vector(allCars.last)) test("AND in where predicate"): xa().transact: val color = Color.Red val model = "MCLAREN SENNA" val spec = Spec[Car].where(sql"color = $color AND $model = upper(model)") assert(carRepo.findAll(spec) == Vector(allCars.head)) test("multiple where parameters"): xa().transact: val color = Color.Red val model = "MCLAREN SENNA" val spec = Spec[Car] .where(sql"color = $color") .where(sql"$model = upper(model)") assert(carRepo.findAll(spec) == Vector(allCars.head)) test("orderBy"): xa().transact: val spec = Spec[Car].orderBy("top_speed") assert(carRepo.findAll(spec) == allCars.sortBy(_.topSpeed)) test("orderBy null with sort order and null order"): xa().transact: val spec = Spec[Car] .orderBy("vin", SortOrder.Desc, NullOrder.First) assert(carRepo.findAll(spec) == allCars.reverse) test("limit"): xa().transact: val spec = Spec[Car].limit(2) assert(carRepo.findAll(spec).size == 2) test("offset"): xa().transact: val spec = Spec[Car].offset(1) assert(carRepo.findAll(spec) == allCars.tail) test("seek"): xa().transact: val spec = Spec[Car].seek("id", SeekDir.Gt, 2, SortOrder.Asc) assert(carRepo.findAll(spec).size == 1) test("seek multiple"): xa().transact: val spec = Spec[Car] .seek("id", SeekDir.Lt, 3, SortOrder.Asc) .seek("top_speed", SeekDir.Gt, 210, SortOrder.Asc) assert(carRepo.findAll(spec) == Vector(allCars(1))) test("everything"): xa().transact: val idOpt = Option.empty[CarId] val speed = 210 val spec = Spec[Car] .where(idOpt.map(id => sql"id = $id").getOrElse(sql"")) .where(sql"top_speed > $speed") .orderBy("model", SortOrder.Desc) .limit(1) .seek("vin", SeekDir.Gt, 1, SortOrder.Asc, NullOrder.Last) assert(carRepo.findAll(spec) == Vector(allCars(1))) test("prefix"): xa().transact: val c = car.alias("c") val color = Color.Red val spec = Spec[Car] .prefix(sql"SELECT ${c.all} FROM $c") .where(sql"${c.color} = $color") assert(carRepo.findAll(spec) == Vector(allCars.head)) test("prefix with embedded sql"): xa().transact: val c = car.alias("c") val color = Color.Red val selectPart = sql"SELECT ${c.all}" val fromPart = sql"FROM $c" val spec = Spec[Car] .prefix(sql"$selectPart $fromPart") .where(sql"${c.color} = $color") assert(carRepo.findAll(spec) == Vector(allCars.head)) end specTests ================================================ FILE: magnum/src/test/scala/shared/SqlNameTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} import java.time.OffsetDateTime def sqlNameTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)(using Location ): Unit = import suite.* @SqlName("car") @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class CustomCar( model: String, @Id id: Long, topSpeed: Int, @SqlName("vin") vinNumber: Option[Int], color: Color, created: OffsetDateTime ) derives DbCodec val customCarRepo = Repo[CustomCar, CustomCar, Long] test("count with manual table name"): val count = connect(xa())(customCarRepo.count) assert(count == 3L) end sqlNameTests ================================================ FILE: magnum/src/test/scala/shared/TableInfoTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.FunSuite import java.time.OffsetDateTime import java.util.UUID def tableInfoTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)(using munit.Location, DbCodec[UUID], DbCodec[Boolean], DbCodec[OffsetDateTime] ): Unit = import suite.* @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class Person( id: Long, firstName: Option[String], lastName: String, isAdmin: Boolean, created: OffsetDateTime, socialId: Option[UUID] ) derives DbCodec val person = TableInfo[Person, Person, Long] @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class Car( model: String, @Id id: Long, topSpeed: Int, @SqlName("vin") vinNumber: Option[Int], color: Color, created: OffsetDateTime ) derives DbCodec val car = TableInfo[Car, Car, Long] @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class NoId( createdAt: OffsetDateTime, userName: String, userAction: String ) derives DbCodec val noIdTableInfo = TableInfo[NoId, NoId, Null] test("NoId TableInfo.idColumn == None"): assert(noIdTableInfo.idColumn == None) test("can use TableInfo.idColumn to scrap boilerplate"): extension [EC: DbCodec, E, ID](table: TableInfo[EC, E, ID]) def onConflictDoUpdate(entityCreator: EC): Update = val updatedCols = table.all.columnNames .filterNot(col => table.idColumn.exists(_.scalaName == col.scalaName)) .map(col => sql"$col = EXCLUDED.$col") .reduceLeft((a, b) => sql"$a, $b") sql"""INSERT INTO $table ${table.insertColumns} VALUES ($entityCreator) |ON CONFLICT DO UPDATE SET $updatedCols |""".stripMargin.update assertEquals( person .onConflictDoUpdate( Person(1, None, "Smith", false, OffsetDateTime.now, None) ) .frag .sqlString, """INSERT INTO person (id, first_name, last_name, is_admin, created, social_id) VALUES (?, ?, ?, ?, ?, ?) |ON CONFLICT DO UPDATE SET first_name = EXCLUDED.first_name, last_name = EXCLUDED.last_name, is_admin = EXCLUDED.is_admin, created = EXCLUDED.created, social_id = EXCLUDED.social_id |""".stripMargin ) assertEquals( car .onConflictDoUpdate( Car("Tesla", 2L, 123, None, Color.Red, OffsetDateTime.now) ) .frag .sqlString, """INSERT INTO car (model, id, top_speed, vin, color, created) VALUES (?, ?, ?, ?, ?, ?) |ON CONFLICT DO UPDATE SET model = EXCLUDED.model, top_speed = EXCLUDED.top_speed, vin = EXCLUDED.vin, color = EXCLUDED.color, created = EXCLUDED.created |""".stripMargin ) assertEquals( noIdTableInfo .onConflictDoUpdate(NoId(OffsetDateTime.now, "abc", "def")) .frag .sqlString, """INSERT INTO no_id (created_at, user_name, user_action) VALUES (?, ?, ?) |ON CONFLICT DO UPDATE SET created_at = EXCLUDED.created_at, user_name = EXCLUDED.user_name, user_action = EXCLUDED.user_action |""".stripMargin ) end tableInfoTests ================================================ FILE: magnum/src/test/scala/shared/TupleTests.scala ================================================ package shared import com.augustnagro.magnum.* import munit.{FunSuite, Location} import java.sql.{PreparedStatement, ResultSet} import java.time.OffsetDateTime def tupleTests(suite: FunSuite, dbType: DbType, xa: () => Transactor)(using Location, DbCodec[OffsetDateTime] ): Unit = import suite.* @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class Car( model: String, @Id id: Long, topSpeed: Int, @SqlName("vin") vinNumber: Option[Int], color: Color, created: OffsetDateTime ) derives DbCodec val car = TableInfo[Car, Car, Long] val car1 = Car( model = "Ferrari F8 Tributo", id = 2L, topSpeed = 212, vinNumber = Some(124), color = Color.Green, created = OffsetDateTime.parse("2024-11-24T22:17:31.000000000Z") ) test("large tuple support does not override hand-rolled Tuple[2-4] codecs"): val tuple2ACodec = summon[DbCodec[(String, Color)]] val tuple2BCodec = summon[DbCodec[(String, Int)]] assert(tuple2ACodec.getClass == tuple2BCodec.getClass) val tuple5ACodec = summon[DbCodec[(String, Color, Int, Long, Option[Int])]] assert(tuple5ACodec.getClass != tuple2ACodec.getClass) val tuple5BCodec = summon[DbCodec[(Int, Int, Int, Long, Option[Int])]] assert(tuple5BCodec.getClass != tuple5ACodec.getClass) test("large tuple select"): val tuple = xa().connect: sql"select model, color, top_speed, id, vin from car where id = 2" .query[(String, Color, Int, Long, Option[Int])] .run() .head assert( tuple == (car1.model, car1.color, car1.topSpeed, car1.id, car1.vinNumber) ) test("large tuple select option"): assume(dbType != OracleDbType) val tupleA = xa().connect: sql"select model, color, top_speed, id, vin from car where id = 1" .query[Option[(String, Color, Int, Long, Option[Int])]] .run() .head assert(tupleA.isDefined) val someTuple = xa().connect: sql"select 1, 1, 1, 1, 1, 1" .query[Option[(Int, Int, Int, Int, Int, Int)]] .run() .head assert(someTuple.isDefined) val noneTuple = xa().connect: sql"select 1, 1, 1, 1, null, 1" .query[Option[(Int, Int, Int, Int, Int, Int)]] .run() .head assert(noneTuple.isEmpty) val optionTupleOption = xa().connect: sql"select 1, 1, 1, 1, null, 1" .query[Option[(Int, Int, Int, Int, Option[Int], Int)]] .run() .head assert(optionTupleOption.isDefined) test("large tuple write"): val tup = ("Ford Edge", 4L, 101, Option(12345), Color.Red, OffsetDateTime.now) val tupWithSome = ("Ford Edge", 5L, 101, Some(12345), Color.Red, OffsetDateTime.now) xa().connect: sql"insert into $car ${car.insertColumns} values $tup".update.run() sql"insert into $car ${car.insertColumns} values $tupWithSome".update .run() val res = sql"select * from $car where ${car.id} = 4".query[Car].run().head assert(res.color == Color.Red) test("large tuple in large tuple"): assume(dbType != OracleDbType) xa().connect: val tuple = sql"select 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12" .query[(Int, Int, (Int, Int, Int, Int, Int, Int), Int, Int, Int, Int)] .run() assert(tuple.nonEmpty) end tupleTests ================================================ FILE: magnum-pg/src/main/scala/com/augustnagro/magnum/pg/PgCodec.scala ================================================ package com.augustnagro.magnum.pg import com.augustnagro.magnum.DbCodec import org.postgresql.geometric.{ PGbox, PGcircle, PGline, PGlseg, PGpath, PGpoint, PGpolygon } import org.postgresql.util.PGInterval import java.sql import java.sql.{JDBCType, PreparedStatement, ResultSet, Types} import scala.reflect.ClassTag import scala.collection.{IterableFactory, mutable as m} import scala.compiletime.* object PgCodec: inline given ArrayCodec[A](using aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A], cTag: ClassTag[Array[A]] ): DbCodec[Array[A]] = inline erasedValue[A] match // https://jdbc.postgresql.org/documentation/server-prepare/#arrays case _: Short | _: java.lang.Short | _: Int | _: java.lang.Integer | _: Long | _: java.lang.Long | _: Float | _: java.lang.Float | _: Double | _: java.lang.Double | _: Boolean | _: java.lang.Boolean | _: String | _: IArray[Byte] | _: Array[Byte] => arrayFastPath(aCodec, aArrayCodec, cTag) case _ => arraySlowPath(aCodec, aArrayCodec, cTag) inline given IArrayCodec[A](using aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A], cTag: ClassTag[IArray[A]] ): DbCodec[IArray[A]] = inline erasedValue[A] match // https://jdbc.postgresql.org/documentation/server-prepare/#arrays case _: Short | _: java.lang.Short | _: Int | _: java.lang.Integer | _: Long | _: java.lang.Long | _: Float | _: java.lang.Float | _: Double | _: java.lang.Double | _: Boolean | _: java.lang.Boolean | _: String | _: IArray[Byte] | _: Array[Byte] => iArrayFastPath(aCodec, aArrayCodec, cTag) case _ => iArraySlowPath(aCodec, aArrayCodec, cTag) given SeqCodec[A](using aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A] ): DbCodec[Seq[A]] = new DbCodec[Seq[A]]: require(aCodec.cols.length == 1) def queryRepr: String = "?" val cols: IArray[Int] = aCodec.cols def readSingle(resultSet: ResultSet, pos: Int): Seq[A] = readCImpl(Seq, resultSet, pos) def readSingleOption(resultSet: ResultSet, pos: Int): Option[Seq[A]] = readCOptImpl(Seq, resultSet, pos) def writeSingle(entity: Seq[A], ps: PreparedStatement, pos: Int): Unit = writeCImpl(entity, ps, pos) given ListCodec[A](using aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A] ): DbCodec[List[A]] = new DbCodec[List[A]]: require(aCodec.cols.length == 1) def queryRepr: String = "?" val cols: IArray[Int] = aCodec.cols def readSingle(resultSet: ResultSet, pos: Int): List[A] = readCImpl(List, resultSet, pos) def readSingleOption(resultSet: ResultSet, pos: Int): Option[List[A]] = readCOptImpl(List, resultSet, pos) def writeSingle(entity: List[A], ps: PreparedStatement, pos: Int): Unit = writeCImpl(entity, ps, pos) given VectorCodec[A](using aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A] ): DbCodec[Vector[A]] = new DbCodec[Vector[A]]: require(aCodec.cols.length == 1) def queryRepr: String = "?" val cols: IArray[Int] = aCodec.cols def readSingle(resultSet: ResultSet, pos: Int): Vector[A] = readCImpl(Vector, resultSet, pos) def readSingleOption(resultSet: ResultSet, pos: Int): Option[Vector[A]] = readCOptImpl(Vector, resultSet, pos) def writeSingle(entity: Vector[A], ps: PreparedStatement, pos: Int): Unit = writeCImpl(entity, ps, pos) given BufferCodec[A](using aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A] ): DbCodec[m.Buffer[A]] = new DbCodec[m.Buffer[A]]: require(aCodec.cols.length == 1) private val jdbcTypeName = JDBCType.valueOf(aCodec.cols.head).getName def queryRepr: String = "?" val cols: IArray[Int] = aCodec.cols def readSingle(resultSet: ResultSet, pos: Int): m.Buffer[A] = readCImpl(m.Buffer, resultSet, pos) def readSingleOption(resultSet: ResultSet, pos: Int): Option[m.Buffer[A]] = readCOptImpl(m.Buffer, resultSet, pos) def writeSingle( entity: m.Buffer[A], ps: PreparedStatement, pos: Int ): Unit = writeCImpl(entity, ps, pos) given PgBoxCodec: DbCodec[PGbox] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.JAVA_OBJECT) def readSingle(resultSet: ResultSet, pos: Int): PGbox = resultSet.getObject(pos, classOf[PGbox]) def readSingleOption(resultSet: ResultSet, pos: Int): Option[PGbox] = readOptImpl(this, resultSet, pos) def writeSingle(entity: PGbox, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) given PgCircleCodec: DbCodec[PGcircle] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.JAVA_OBJECT) def readSingle(resultSet: ResultSet, pos: Int): PGcircle = resultSet.getObject(pos, classOf[PGcircle]) def readSingleOption(resultSet: ResultSet, pos: Int): Option[PGcircle] = readOptImpl(this, resultSet, pos) def writeSingle(entity: PGcircle, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) given PgIntervalCodec: DbCodec[PGInterval] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.JAVA_OBJECT) def readSingle(resultSet: ResultSet, pos: Int): PGInterval = resultSet.getObject(pos, classOf[PGInterval]) def readSingleOption(resultSet: ResultSet, pos: Int): Option[PGInterval] = readOptImpl(this, resultSet, pos) def writeSingle(entity: PGInterval, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) given PgLineCodec: DbCodec[PGline] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.JAVA_OBJECT) def readSingle(resultSet: ResultSet, pos: Int): PGline = resultSet.getObject(pos, classOf[PGline]) def readSingleOption(resultSet: ResultSet, pos: Int): Option[PGline] = readOptImpl(this, resultSet, pos) def writeSingle(entity: PGline, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) given PgLSegCodec: DbCodec[PGlseg] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.JAVA_OBJECT) def readSingle(resultSet: ResultSet, pos: Int): PGlseg = resultSet.getObject(pos, classOf[PGlseg]) def readSingleOption(resultSet: ResultSet, pos: Int): Option[PGlseg] = readOptImpl(this, resultSet, pos) def writeSingle(entity: PGlseg, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) given PgPathCodec: DbCodec[PGpath] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.JAVA_OBJECT) def readSingle(resultSet: ResultSet, pos: Int): PGpath = resultSet.getObject(pos, classOf[PGpath]) def readSingleOption(resultSet: ResultSet, pos: Int): Option[PGpath] = readOptImpl(this, resultSet, pos) def writeSingle(entity: PGpath, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) given PgPointCodec: DbCodec[PGpoint] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.JAVA_OBJECT) def readSingle(resultSet: ResultSet, pos: Int): PGpoint = resultSet.getObject(pos, classOf[PGpoint]) def readSingleOption(resultSet: ResultSet, pos: Int): Option[PGpoint] = readOptImpl(this, resultSet, pos) def writeSingle(entity: PGpoint, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) given PgPolygonCodec: DbCodec[PGpolygon] with def queryRepr: String = "?" val cols: IArray[Int] = IArray(Types.JAVA_OBJECT) def readSingle(resultSet: ResultSet, pos: Int): PGpolygon = resultSet.getObject(pos, classOf[PGpolygon]) def readSingleOption(resultSet: ResultSet, pos: Int): Option[PGpolygon] = readOptImpl(this, resultSet, pos) def writeSingle(entity: PGpolygon, ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) private def iArrayFastPath[A]( aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A], cTag: ClassTag[IArray[A]] ): DbCodec[IArray[A]] = new DbCodec[IArray[A]]: require(aCodec.cols.length == 1) def queryRepr: String = "?" val cols: IArray[Int] = aCodec.cols def readSingle(resultSet: ResultSet, pos: Int): IArray[A] = val jdbcArray = resultSet.getArray(pos) try val arr = aArrayCodec.readArray(jdbcArray.getArray) IArray.unsafeFromArray(arr) finally jdbcArray.free() def readSingleOption(resultSet: ResultSet, pos: Int): Option[IArray[A]] = val jdbcArray = resultSet.getArray(pos) if resultSet.wasNull then None else try val arr = aArrayCodec.readArray(jdbcArray.getArray) Some(IArray.unsafeFromArray(arr)) finally jdbcArray.free() def writeSingle(entity: IArray[A], ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) private def iArraySlowPath[A]( aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A], cTag: ClassTag[IArray[A]] ): DbCodec[IArray[A]] = new DbCodec[IArray[A]]: require(aCodec.cols.length == 1) def queryRepr: String = "?" val cols: IArray[Int] = aCodec.cols def readSingle(resultSet: ResultSet, pos: Int): IArray[A] = val jdbcArray = resultSet.getArray(pos) try val arr = aArrayCodec.readArray(jdbcArray.getArray) IArray.unsafeFromArray(arr) finally jdbcArray.free() def readSingleOption(resultSet: ResultSet, pos: Int): Option[IArray[A]] = val jdbcArray = resultSet.getArray(pos) if resultSet.wasNull then None else try val arr = aArrayCodec.readArray(jdbcArray.getArray) Some(IArray.unsafeFromArray(arr)) finally jdbcArray.free() def writeSingle(entity: IArray[A], ps: PreparedStatement, pos: Int): Unit = val arr = entity.iterator.map(aArrayCodec.toArrayObj).toArray val jdbcArr = ps.getConnection.createArrayOf(aArrayCodec.jdbcTypeName, arr) ps.setArray(pos, jdbcArr) private def arrayFastPath[A]( aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A], cTag: ClassTag[Array[A]] ): DbCodec[Array[A]] = new DbCodec[Array[A]]: require(aCodec.cols.length == 1) def queryRepr: String = "?" val cols: IArray[Int] = aCodec.cols def readSingle(resultSet: ResultSet, pos: Int): Array[A] = val jdbcArray = resultSet.getArray(pos) try aArrayCodec.readArray(jdbcArray.getArray) finally jdbcArray.free() def readSingleOption(resultSet: ResultSet, pos: Int): Option[Array[A]] = val jdbcArray = resultSet.getArray(pos) if resultSet.wasNull then None else try Some(aArrayCodec.readArray(jdbcArray.getArray)) finally jdbcArray.free() def writeSingle(entity: Array[A], ps: PreparedStatement, pos: Int): Unit = ps.setObject(pos, entity) private def arraySlowPath[A]( aCodec: DbCodec[A], aArrayCodec: SqlArrayCodec[A], cTag: ClassTag[Array[A]] ): DbCodec[Array[A]] = new DbCodec[Array[A]]: require(aCodec.cols.length == 1) def queryRepr: String = "?" val cols: IArray[Int] = aCodec.cols def readSingle(resultSet: ResultSet, pos: Int): Array[A] = val jdbcArray = resultSet.getArray(pos) try aArrayCodec.readArray(jdbcArray.getArray) finally jdbcArray.free() def readSingleOption(resultSet: ResultSet, pos: Int): Option[Array[A]] = val jdbcArray = resultSet.getArray(pos) if resultSet.wasNull then None else try Some(aArrayCodec.readArray(jdbcArray.getArray)) finally jdbcArray.free() def writeSingle(entity: Array[A], ps: PreparedStatement, pos: Int): Unit = val arr = entity.iterator.map(aArrayCodec.toArrayObj).toArray val jdbcArr = ps.getConnection.createArrayOf(aArrayCodec.jdbcTypeName, arr) ps.setArray(pos, jdbcArr) private inline def readCImpl[C[_], A]( factory: IterableFactory[C], resultSet: ResultSet, pos: Int )(using arrayCodec: SqlArrayCodec[A]): C[A] = val jdbcArray = resultSet.getArray(pos) try val arr = arrayCodec.readArray(jdbcArray.getArray) factory.from(arr) finally jdbcArray.free() private inline def readCOptImpl[C[_], A]( factory: IterableFactory[C], resultSet: ResultSet, pos: Int )(using arrayCodec: SqlArrayCodec[A]): Option[C[A]] = val jdbcArray = resultSet.getArray(pos) if resultSet.wasNull then None else try val arr = arrayCodec.readArray(jdbcArray.getArray) Some(factory.from(arr)) finally jdbcArray.free() private inline def writeCImpl[C[_], A]( entity: Iterable[A], ps: PreparedStatement, pos: Int )(using arrayCodec: SqlArrayCodec[A]): Unit = val arr = entity.iterator.map(arrayCodec.toArrayObj).toArray val jdbcArr = ps.getConnection.createArrayOf(arrayCodec.jdbcTypeName, arr) ps.setArray(pos, jdbcArr) private inline def readOptImpl[A]( codec: DbCodec[A], resultSet: ResultSet, pos: Int ): Option[A] = val res = codec.readSingle(resultSet, pos) if resultSet.wasNull then None else Some(res) end PgCodec ================================================ FILE: magnum-pg/src/main/scala/com/augustnagro/magnum/pg/SqlArrayCodec.scala ================================================ package com.augustnagro.magnum.pg import java.sql import java.sql.JDBCType import java.time.{OffsetDateTime, ZoneOffset} import java.util.UUID import scala.reflect.ClassTag import scala.collection.mutable as m import org.postgresql.core.Oid /** Typeclass for converting between raw JDBC Object arrays and type A */ trait SqlArrayCodec[A]: def jdbcTypeName: String /** Converts the raw JDBC array to an IArray[A] */ def readArray(array: Object): Array[A] /** Maps entity A to an object for use in JDBC Array */ def toArrayObj(entity: A): Object object SqlArrayCodec: given AnySqlArrayCodec: SqlArrayCodec[Any] with val jdbcTypeName: String = JDBCType.JAVA_OBJECT.getName def readArray(array: Object): Array[Any] = array.asInstanceOf[Array[Any]] def toArrayObj(entity: Any): Object = entity.asInstanceOf[Object] given StringSqlArrayCodec: SqlArrayCodec[String] with val jdbcTypeName: String = JDBCType.VARCHAR.getName def readArray(array: Object): Array[String] = array.asInstanceOf[Array[String]] def toArrayObj(entity: String): Object = entity given BooleanSqlArrayCodec: SqlArrayCodec[Boolean] with val jdbcTypeName: String = JDBCType.BOOLEAN.getName def readArray(array: Object): Array[Boolean] = array match case boxed: Array[java.lang.Boolean] => boxed.map(Boolean.unbox) case primitive: Array[Boolean] => primitive def toArrayObj(entity: Boolean): Object = Boolean.box(entity) given ByteSqlArrayCodec: SqlArrayCodec[Byte] with val jdbcTypeName: String = JDBCType.TINYINT.getName def readArray(array: Object): Array[Byte] = array match case boxed: Array[java.lang.Byte] => boxed.map(Byte.unbox) case primitive: Array[Byte] => primitive def toArrayObj(entity: Byte): Object = Byte.box(entity) given ShortSqlArrayCodec: SqlArrayCodec[Short] with val jdbcTypeName: String = JDBCType.SMALLINT.getName def readArray(array: Object): Array[Short] = array match case boxed: Array[java.lang.Short] => boxed.map(Short.unbox) case primitive: Array[Short] => primitive def toArrayObj(entity: Short): Object = Short.box(entity) given IntSqlArrayCodec: SqlArrayCodec[Int] with val jdbcTypeName: String = JDBCType.INTEGER.getName def readArray(array: Object): Array[Int] = array match case boxed: Array[Integer] => boxed.map(Int.unbox) case primitive: Array[Int] => primitive def toArrayObj(entity: Int): Object = Int.box(entity) given LongSqlArrayCodec: SqlArrayCodec[Long] with val jdbcTypeName: String = JDBCType.BIGINT.getName def readArray(array: Object): Array[Long] = array match case boxed: Array[java.lang.Long] => boxed.map(Long.unbox) case primitive: Array[Long] => primitive def toArrayObj(entity: Long): Object = Long.box(entity) given FloatSqlArrayCodec: SqlArrayCodec[Float] with val jdbcTypeName: String = JDBCType.REAL.getName def readArray(array: Object): Array[Float] = array match case boxed: Array[java.lang.Float] => boxed.map(Float.unbox) case primitive: Array[Float] => primitive def toArrayObj(entity: Float): Object = Float.box(entity) given DoubleSqlArrayCodec: SqlArrayCodec[Double] with val jdbcTypeName: String = JDBCType.DOUBLE.getName def readArray(array: Object): Array[Double] = array match case boxed: Array[java.lang.Double] => boxed.map(Double.unbox) case primitive: Array[Double] => primitive def toArrayObj(entity: Double): Object = Double.box(entity) given SqlDateSqlArrayCodec: SqlArrayCodec[sql.Date] with val jdbcTypeName: String = JDBCType.DATE.getName def readArray(array: Object): Array[sql.Date] = array.asInstanceOf[Array[sql.Date]] def toArrayObj(entity: sql.Date): Object = entity given SqlTimeSqlArrayCodec: SqlArrayCodec[sql.Time] with val jdbcTypeName: String = JDBCType.TIME.getName def readArray(array: Object): Array[sql.Time] = array.asInstanceOf[Array[sql.Time]] def toArrayObj(entity: sql.Time): Object = entity given SqlTimestampSqlArrayCodec: SqlArrayCodec[sql.Timestamp] with val jdbcTypeName: String = JDBCType.TIMESTAMP.getName def readArray(array: Object): Array[sql.Timestamp] = array.asInstanceOf[Array[sql.Timestamp]] def toArrayObj(entity: sql.Timestamp): Object = entity given OffsetDateTimeSqlArrayCodec: SqlArrayCodec[OffsetDateTime] with val jdbcTypeName: String = JDBCType.TIMESTAMP.getName def readArray(array: Object): Array[OffsetDateTime] = array .asInstanceOf[Array[sql.Timestamp]] .map(_.toInstant.atOffset(ZoneOffset.UTC)) def toArrayObj(entity: OffsetDateTime): Object = sql.Timestamp.from(entity.toInstant) given UUIDSqlArrayCodec: SqlArrayCodec[UUID] with val jdbcTypeName: String = Oid.toString(Oid.UUID) def readArray(array: Object): Array[UUID] = array.asInstanceOf[Array[UUID]] def toArrayObj(entity: UUID): Object = entity given ArraySqlArrayCodec[A](using aCodec: SqlArrayCodec[A], cTag: ClassTag[Array[A]] ): SqlArrayCodec[Array[A]] with def jdbcTypeName: String = aCodec.jdbcTypeName def readArray(array: Object): Array[Array[A]] = val objArr = array.asInstanceOf[Array[Object]] objArr.map(aCodec.readArray) def toArrayObj(entity: Array[A]): Object = entity.iterator.map(aCodec.toArrayObj).toArray given IArraySqlArrayCodec[A](using aCodec: SqlArrayCodec[A], cTag: ClassTag[IArray[A]] ): SqlArrayCodec[IArray[A]] with def jdbcTypeName: String = aCodec.jdbcTypeName def readArray(array: Object): Array[IArray[A]] = val objArray = array.asInstanceOf[Array[Object]] objArray.map(obj => IArray.unsafeFromArray(aCodec.readArray(obj))) def toArrayObj(entity: IArray[A]): Object = entity.iterator.map(aCodec.toArrayObj).toArray given SeqSqlArrayCodec[A](using aCodec: SqlArrayCodec[A] ): SqlArrayCodec[Seq[A]] with def jdbcTypeName: String = aCodec.jdbcTypeName def readArray(array: Object): Array[Seq[A]] = val objArray = array.asInstanceOf[Array[Object]] objArray.map(obj => Seq.from(aCodec.readArray(obj))) def toArrayObj(entity: Seq[A]): Object = entity.iterator.map(aCodec.toArrayObj).toArray given ListSqlArrayCodec[A](using aCodec: SqlArrayCodec[A] ): SqlArrayCodec[List[A]] with def jdbcTypeName: String = aCodec.jdbcTypeName def readArray(array: Object): Array[List[A]] = val objArray = array.asInstanceOf[Array[Object]] objArray.map(obj => List.from(aCodec.readArray(obj))) def toArrayObj(entity: List[A]): Object = entity.iterator.map(aCodec.toArrayObj).toArray given VectorSqlArrayCodec[A](using aCodec: SqlArrayCodec[A] ): SqlArrayCodec[Vector[A]] with def jdbcTypeName: String = aCodec.jdbcTypeName def readArray(array: Object): Array[Vector[A]] = val objArr = array.asInstanceOf[Array[Object]] objArr.map(obj => Vector.from(aCodec.readArray(obj))) def toArrayObj(entity: Vector[A]): Object = entity.iterator.map(aCodec.toArrayObj).toArray given BufferSqlArrayCodec[A](using aCodec: SqlArrayCodec[A] ): SqlArrayCodec[m.Buffer[A]] with def jdbcTypeName: String = aCodec.jdbcTypeName def readArray(array: Object): Array[m.Buffer[A]] = val objArray = array.asInstanceOf[Array[Object]] objArray.map(obj => m.Buffer.from(aCodec.readArray(obj))) def toArrayObj(entity: m.Buffer[A]): Object = entity.iterator.map(aCodec.toArrayObj).toArray end SqlArrayCodec ================================================ FILE: magnum-pg/src/main/scala/com/augustnagro/magnum/pg/enums/PgEnumDbCodec.scala ================================================ package com.augustnagro.magnum.pg.enums import com.augustnagro.magnum.{DbCodec, DerivingUtil} import java.sql.{JDBCType, PreparedStatement, ResultSet, Types} import scala.deriving.Mirror import scala.compiletime.{ constValue, constValueTuple, erasedValue, error, summonFrom, summonInline } import scala.quoted.* import scala.reflect.ClassTag inline given PgEnumDbCodec[A <: scala.reflect.Enum: Mirror.SumOf]: DbCodec[A] = ${ pgEnumDbCodecImpl[A] } private def pgEnumDbCodecImpl[A: Type](using Quotes): Expr[DbCodec[A]] = import quotes.reflect.* val mirror = Expr.summon[Mirror.SumOf[A]].getOrElse { report.errorAndAbort( "Can only derive PgEnumDbCodec for simple (non ADT) Enums" ) } mirror match case '{ $ms: Mirror.SumOf[A] { type MirroredElemTypes = mets type MirroredElemLabels = mels type MirroredLabel = mel } } => val nameMapExpr = DerivingUtil.buildSqlNameMapForEnum[A, mels, mets] val melExpr = Expr(Type.valueOfConstant[mel].get.toString) val sqlTypeNameExpr: Expr[String] = DerivingUtil .sqlTableNameAnnot[A] .map(sqlNameExpr => '{ $sqlNameExpr.name }) .orElse( DerivingUtil .tableAnnot[A] .map(tableExpr => '{ $tableExpr.nameMapper.toTableName($melExpr) } ) ) .getOrElse(melExpr) '{ new DbCodec[A] { val nameMap: Seq[(String, A)] = $nameMapExpr val cols: IArray[Int] = IArray(Types.VARCHAR) def readSingle(rs: ResultSet, pos: Int): A = val str = rs.getString(pos) nameMap.find((name, _) => name == str) match case Some((_, v)) => v case None => throw IllegalArgumentException( str + " not convertible to " + $melExpr ) def readSingleOption(rs: ResultSet, pos: Int): Option[A] = Option(rs.getString(pos)).map(str => nameMap.find((name, _) => name == str) match case Some((_, v)) => v case None => throw IllegalArgumentException( str + " not convertible to " + $melExpr ) ) def writeSingle(entity: A, ps: PreparedStatement, pos: Int): Unit = nameMap.find((_, v) => v == entity) match case Some((k, _)) => ps.setString(pos, k) case None => throw IllegalArgumentException( entity.toString + " not convertible to " + $melExpr ) def queryRepr: String = "?::" + ${ sqlTypeNameExpr } } } end match end pgEnumDbCodecImpl ================================================ FILE: magnum-pg/src/main/scala/com/augustnagro/magnum/pg/enums/PgEnumToScalaEnumSqlArrayCodec.scala ================================================ package com.augustnagro.magnum.pg.enums import com.augustnagro.magnum.DerivingUtil import com.augustnagro.magnum.pg.SqlArrayCodec import java.sql.JDBCType import scala.deriving.Mirror import scala.compiletime.{ constValue, constValueTuple, erasedValue, error, summonFrom, summonInline } import scala.quoted.* import scala.reflect.ClassTag /** Supports mapping between Postgres Enums and Scala Enums in multi-dimensional * arrays, such as between * {{{create type Color as enum ('Red', 'Green', 'Blue');}}} and * {{{ * enum Color: * case Red, Green, Blue * }}} */ inline given PgEnumToScalaEnumSqlArrayCodec[ A <: scala.reflect.Enum: Mirror.SumOf: ClassTag ]: SqlArrayCodec[A] = ${ pgEnumToScalaEnumSqlArrayCodecImpl[A] } private def pgEnumToScalaEnumSqlArrayCodecImpl[A: Type](using Quotes ): Expr[SqlArrayCodec[A]] = import quotes.reflect.* val mirror = Expr.summon[Mirror.SumOf[A]].getOrElse { report.errorAndAbort( "Can only derive SqlArrayCodec for simple (non ADT) Enums" ) } mirror match case '{ $ms: Mirror.SumOf[A] { type MirroredElemTypes = mets type MirroredElemLabels = mels type MirroredLabel = mel } } => val nameMapExpr = DerivingUtil.buildSqlNameMapForEnum[A, mels, mets] val melExpr = Expr(Type.valueOfConstant[mel].get.toString) val classTagExpr = Expr.summon[ClassTag[A]].get val sqlTypeNameExpr: Expr[String] = DerivingUtil .sqlTableNameAnnot[A] .map(sqlNameExpr => '{ $sqlNameExpr.name }) .orElse( DerivingUtil .tableAnnot[A] .map(tableExpr => '{ $tableExpr.nameMapper.toTableName($melExpr) } ) ) .getOrElse(melExpr) '{ new SqlArrayCodec[A]: val nameMap = $nameMapExpr val ct = $classTagExpr val jdbcTypeName: String = $sqlTypeNameExpr def readArray(array: Object): Array[A] = array .asInstanceOf[Array[String]] .map(enumName => nameMap.find((k, _) => k == enumName) match case Some((_, v)) => v case None => throw IllegalArgumentException( enumName + " not convertible to " + $melExpr ) )(using ct) def toArrayObj(entity: A): Object = nameMap.find((_, v) => v == entity) match case Some((k, _)) => k case None => throw IllegalArgumentException( entity.toString + " not convertible to " + $melExpr ) } end match end pgEnumToScalaEnumSqlArrayCodecImpl ================================================ FILE: magnum-pg/src/main/scala/com/augustnagro/magnum/pg/enums/PgStringToScalaEnumSqlArrayCodec.scala ================================================ package com.augustnagro.magnum.pg.enums import com.augustnagro.magnum.DerivingUtil import com.augustnagro.magnum.pg.SqlArrayCodec import java.sql.JDBCType import scala.deriving.Mirror import scala.compiletime.{ constValue, constValueTuple, erasedValue, error, summonFrom, summonInline } import scala.quoted.* import scala.reflect.ClassTag /** Supports mapping between Postgres Strings (Varchar, text, etc) and Scala * Enums in multi-dimensional arrays. */ inline given PgStringToScalaEnumSqlArrayCodec[ A <: scala.reflect.Enum: Mirror.SumOf: ClassTag ]: SqlArrayCodec[A] = ${ pgStringToScalaEnumSqlArrayCodecImpl[A] } private def pgStringToScalaEnumSqlArrayCodecImpl[A: Type](using Quotes ): Expr[SqlArrayCodec[A]] = import quotes.reflect.* val mirror = Expr.summon[Mirror.SumOf[A]].getOrElse { report.errorAndAbort( "Can only derive SqlArrayCodec for simple (non ADT) Enums" ) } mirror match case '{ $ms: Mirror.SumOf[A] { type MirroredElemTypes = mets type MirroredElemLabels = mels type MirroredLabel = mel } } => val nameMapExpr = DerivingUtil.buildSqlNameMapForEnum[A, mels, mets] val melExpr = Expr(Type.valueOfConstant[mel].get.toString) val classTagExpr = Expr.summon[ClassTag[A]].get '{ new SqlArrayCodec[A]: val nameMap = $nameMapExpr val ct = $classTagExpr val jdbcTypeName: String = JDBCType.VARCHAR.getName def readArray(array: Object): Array[A] = array .asInstanceOf[Array[String]] .map(enumName => nameMap.find((k, _) => k == enumName) match case Some((_, v)) => v case None => throw IllegalArgumentException( enumName + " not convertible to " + $melExpr ) )(using ct) def toArrayObj(entity: A): Object = nameMap.find((_, v) => v == entity) match case Some((k, _)) => k case None => throw IllegalArgumentException( entity.toString + " not convertible to " + $melExpr ) } end match end pgStringToScalaEnumSqlArrayCodecImpl ================================================ FILE: magnum-pg/src/main/scala/com/augustnagro/magnum/pg/json/JsonBDbCodec.scala ================================================ package com.augustnagro.magnum.pg.json import com.augustnagro.magnum.DbCodec import org.postgresql.util.PGobject import java.sql.{PreparedStatement, ResultSet, Types} trait JsonBDbCodec[A] extends DbCodec[A]: def encode(a: A): String def decode(json: String): A override def queryRepr: String = "?" override val cols: IArray[Int] = IArray(Types.OTHER) override def readSingle(resultSet: ResultSet, pos: Int): A = decode(resultSet.getString(pos)) override def readSingleOption(resultSet: ResultSet, pos: Int): Option[A] = val rawJson = resultSet.getString(pos) if rawJson == null then None else Some(decode(rawJson)) override def writeSingle(entity: A, ps: PreparedStatement, pos: Int): Unit = val jsonObject = PGobject() jsonObject.setType("jsonb") jsonObject.setValue(encode(entity)) ps.setObject(pos, jsonObject) end JsonBDbCodec ================================================ FILE: magnum-pg/src/main/scala/com/augustnagro/magnum/pg/json/JsonDbCodec.scala ================================================ package com.augustnagro.magnum.pg.json import com.augustnagro.magnum.DbCodec import org.postgresql.util.PGobject import java.sql.{PreparedStatement, ResultSet, Types} trait JsonDbCodec[A] extends DbCodec[A]: def encode(a: A): String def decode(json: String): A override def queryRepr: String = "?" override val cols: IArray[Int] = IArray(Types.OTHER) override def readSingle(resultSet: ResultSet, pos: Int): A = decode(resultSet.getString(pos)) override def readSingleOption(resultSet: ResultSet, pos: Int): Option[A] = val rawJson = resultSet.getString(pos) if rawJson == null then None else Some(decode(rawJson)) override def writeSingle(entity: A, ps: PreparedStatement, pos: Int): Unit = val jsonObject = PGobject() jsonObject.setType("json") jsonObject.setValue(encode(entity)) ps.setObject(pos, jsonObject) end JsonDbCodec ================================================ FILE: magnum-pg/src/main/scala/com/augustnagro/magnum/pg/xml/XmlDbCodec.scala ================================================ package com.augustnagro.magnum.pg.xml import com.augustnagro.magnum.DbCodec import org.postgresql.util.PGobject import java.sql.{PreparedStatement, ResultSet, Types} trait XmlDbCodec[A] extends DbCodec[A]: def encode(a: A): String def decode(xml: String): A override def queryRepr: String = "?" override val cols: IArray[Int] = IArray(Types.SQLXML) override def readSingle(resultSet: ResultSet, pos: Int): A = decode(resultSet.getString(pos)) override def readSingleOption(resultSet: ResultSet, pos: Int): Option[A] = val xmlString = resultSet.getString(pos) if xmlString == null then None else Some(decode(xmlString)) override def writeSingle(entity: A, ps: PreparedStatement, pos: Int): Unit = val xmlObject = PGobject() xmlObject.setType("xml") xmlObject.setValue(encode(entity)) ps.setObject(pos, xmlObject) end XmlDbCodec ================================================ FILE: magnum-pg/src/test/resources/pg-car.sql ================================================ drop table if exists mag_car; create table mag_car ( id bigint primary key, text_colors text[] not null, text_color_map text[][] not null, last_service json, my_json_b jsonb, my_xml xml ); insert into mag_car values (1, '{"red_orange", "Greenish"}', '{{"red_orange", "red_orange"}, {"Greenish", "Greenish"}}', '{"mechanic": "Bob", "date": "2024-05-04"}', '{"a": [1, 2, 3], "b": "hello world"}', 'blue'), (2, '{"Greenish", "blue"}', '{{"red_orange", "Greenish"}, {"Greenish", "blue"}}', null, null, null); ================================================ FILE: magnum-pg/src/test/resources/pg-service-list.sql ================================================ drop table if exists mag_service_list; create table mag_service_list ( id bigint primary key generated by default as identity, service json, created timestamptz default now() ); ================================================ FILE: magnum-pg/src/test/resources/pg-user.sql ================================================ drop table if exists mag_user; drop type if exists colour; create type Colour as enum ('red_orange', 'Greenish', 'blue'); create table mag_user ( id bigint primary key, name text not null, friends text[] not null, matrix integer[][] not null, test integer[] not null, dates timestamptz[] not null, bx box not null, c circle not null, iv interval not null, l line not null, lSeg lseg not null, p path not null, pnt point not null, poly polygon not null, colors Colour[] not null, colorMap Colour[][] not null, color Colour not null, idUuid uuid not null, uuids uuid[] not null ); insert into mag_user values (1, 'Abby', '{"Jane", "Mary"}', '{{1, 2}, {3, 4}, {5, 6}}', '{1}', '{"2023-07-30T12:21:36Z", "2023-07-30T12:21:37Z"}', '(1, 2, 3, 4)', '<(1, 2), 3>', '1 hour', '{1, 1, 1}', '1, 1, 2, 2', '[(1, 1), (2, 2)]', '(1, 1)', '((0, 0), (-1, 1), (1, 1))', '{"red_orange", "Greenish"}', '{{"red_orange", "red_orange"}, {"Greenish", "Greenish"}}', 'blue', '00000000-0000-0000-0000-000000000001', '{"00000000-0000-0001-0000-000000000000", "00000000-0000-0001-0000-000000000001"}'), (2, 'Jacob', '{"Grace", "Aubrey"}', '{{7, 8}, {9, 10}}', '{}', '{}', '(5, 6, 7, 8)', '<(4, 5), 6>', '2 days', '{2, 2, 2}', '2, 2, 3, 3', '[(2, 2), (3, 3)]', '(2, 2)', '((0, 0), (-1, -1), (1, -1))', '{"Greenish", "blue"}', '{{"red_orange", "Greenish"}, {"Greenish", "blue"}}', 'blue', '00000000-0000-0000-0000-000000000002', '{"00000000-0000-0002-0000-000000000000"}'); ================================================ FILE: magnum-pg/src/test/scala/CirceJsonBDbCodec.scala ================================================ import com.augustnagro.magnum.pg.json.JsonBDbCodec import io.circe.{Codec, Decoder, Encoder, JsonObject} import io.circe.parser.{decode as circeDecode, *} import io.circe.syntax.* trait CirceJsonBDbCodec[A] extends JsonBDbCodec[A] object CirceJsonBDbCodec: def derived[A: Encoder: Decoder]: CirceJsonBDbCodec[A] = new: def encode(a: A): String = a.asJson.toString def decode(json: String): A = circeDecode[A](json) match case Right(a) => a case Left(err) => throw err ================================================ FILE: magnum-pg/src/test/scala/CirceJsonDbCodec.scala ================================================ import com.augustnagro.magnum.pg.json.JsonDbCodec import io.circe.{Codec, Decoder, Encoder, JsonObject} import io.circe.parser.{decode as circeDecode, *} import io.circe.syntax.* trait CirceJsonDbCodec[A] extends JsonDbCodec[A] object CirceJsonDbCodec: def derived[A: Encoder: Decoder]: CirceJsonDbCodec[A] = new: def encode(a: A): String = a.asJson.toString def decode(json: String): A = circeDecode[A](json) match case Right(a) => a case Left(err) => throw err ================================================ FILE: magnum-pg/src/test/scala/Color.scala ================================================ import com.augustnagro.magnum.{ DbCodec, PostgresDbType, SqlName, SqlNameMapper, Table } @SqlName("colour") @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) enum Color derives DbCodec: case RedOrange @SqlName("Greenish") case Green case Blue ================================================ FILE: magnum-pg/src/test/scala/LastService.scala ================================================ import com.augustnagro.magnum.pg.json.JsonDbCodec import io.circe.Codec import java.time.LocalDate case class LastService(mechanic: String, date: LocalDate) derives Codec.AsObject, CirceJsonDbCodec ================================================ FILE: magnum-pg/src/test/scala/MagCar.scala ================================================ import com.augustnagro.magnum.{ DbCodec, Id, PostgresDbType, SqlNameMapper, Table } import com.augustnagro.magnum.pg.PgCodec.given import com.augustnagro.magnum.pg.enums.PgStringToScalaEnumSqlArrayCodec @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase) case class MagCar( @Id id: Long, textColors: Seq[Color], textColorMap: Vector[List[Color]], lastService: Option[LastService], myJsonB: Option[MyJsonB], myXml: Option[MyXml] ) derives DbCodec ================================================ FILE: magnum-pg/src/test/scala/MagUser.scala ================================================ import com.augustnagro.magnum.{DbCodec, Id, PostgresDbType, SqlName, Table} import com.augustnagro.magnum.pg.PgCodec.given import com.augustnagro.magnum.pg.enums.PgEnumToScalaEnumSqlArrayCodec import com.augustnagro.magnum.pg.enums.PgEnumDbCodec import org.postgresql.geometric.{ PGbox, PGcircle, PGline, PGlseg, PGpath, PGpoint, PGpolygon } import org.postgresql.util.PGInterval import java.time.OffsetDateTime import java.util.Objects import java.util.UUID @Table(PostgresDbType) @SqlName("mag_user") case class MagUser( @Id id: Long, name: String, friends: Vector[String], matrix: IArray[IArray[Int]], test: IArray[Int], dates: IArray[OffsetDateTime], bx: PGbox, c: PGcircle, iv: PGInterval, l: PGline, lSeg: PGlseg, p: PGpath, pnt: PGpoint, poly: PGpolygon, colors: List[Color], colorMap: List[Vector[Color]], color: Color, idUuid: UUID, uuids: List[UUID] ) derives DbCodec: override def equals(obj: Any): Boolean = obj match case u: MagUser => id == u.id && name == u.name && friends == u.friends && Objects.deepEquals(matrix, u.matrix) && Objects.deepEquals(test, u.test) && Objects.deepEquals(dates, u.dates) && bx == u.bx && c == u.c && iv == u.iv && l == u.l && lSeg == u.lSeg && p == u.p && pnt == u.pnt && poly == u.poly && colors == u.colors && colorMap == u.colorMap && color == u.color && idUuid == u.idUuid && uuids == u.uuids case _ => false end MagUser ================================================ FILE: magnum-pg/src/test/scala/MyJsonB.scala ================================================ import io.circe.Codec case class MyJsonB(a: Vector[Int], b: String) derives Codec.AsObject, CirceJsonBDbCodec ================================================ FILE: magnum-pg/src/test/scala/MyXml.scala ================================================ import com.augustnagro.magnum.DbCodec import com.augustnagro.magnum.pg.xml.XmlDbCodec import scala.xml.{Document, XML, Elem} case class MyXml(elem: Elem) object MyXml: given XmlDbCodec[MyXml] with def encode(a: MyXml): String = a.elem.toString def decode(xml: String): MyXml = MyXml(XML.loadString(xml)) ================================================ FILE: magnum-pg/src/test/scala/PgCodecTests.scala ================================================ import com.dimafeng.testcontainers.PostgreSQLContainer import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures import munit.{AnyFixture, FunSuite} import org.testcontainers.utility.DockerImageName import org.postgresql.ds.PGSimpleDataSource import org.postgresql.geometric.* import com.augustnagro.magnum.* import com.augustnagro.magnum.pg.PgCodec.given import com.augustnagro.magnum.pg.enums.PgEnumToScalaEnumSqlArrayCodec import org.postgresql.util.PGInterval import java.nio.file.{Files, Path} import java.time.{LocalDate, OffsetDateTime, ZoneOffset} import java.util.Objects import java.util.UUID import javax.sql.DataSource import scala.util.Using.Manager class PgCodecTests extends FunSuite, TestContainersFixtures: val userRepo = Repo[MagUser, MagUser, Long] val allUsers = Vector( MagUser( id = 1L, name = "Abby", friends = Vector("Jane", "Mary"), matrix = IArray(IArray(1, 2), IArray(3, 4), IArray(5, 6)), test = IArray(1), dates = IArray( OffsetDateTime.parse("2023-07-30T12:21:36Z"), OffsetDateTime.parse("2023-07-30T12:21:37Z") ), bx = PGbox(1, 2, 3, 4), c = PGcircle(1, 2, 3), iv = PGInterval("1 hour"), l = PGline(1, 1, 1), lSeg = PGlseg(1, 1, 2, 2), p = PGpath(Array(PGpoint(1, 1), PGpoint(2, 2)), true), pnt = PGpoint(1, 1), poly = PGpolygon(Array(PGpoint(0, 0), PGpoint(-1, 1), PGpoint(1, 1))), colors = List(Color.RedOrange, Color.Green), colorMap = List( Vector(Color.RedOrange, Color.RedOrange), Vector(Color.Green, Color.Green) ), color = Color.Blue, idUuid = UUID.fromString("00000000-0000-0000-0000-000000000001"), uuids = List( UUID.fromString("00000000-0000-0001-0000-000000000000"), UUID.fromString("00000000-0000-0001-0000-000000000001") ) ), MagUser( id = 2L, name = "Jacob", friends = Vector("Grace", "Aubrey"), matrix = IArray(IArray(7, 8), IArray(9, 10)), test = IArray.emptyIntIArray, dates = IArray.empty, bx = PGbox(5, 6, 7, 8), c = PGcircle(4, 5, 6), iv = PGInterval("2 days"), l = PGline(2, 2, 2), lSeg = PGlseg(2, 2, 3, 3), p = PGpath(Array(PGpoint(2, 2), PGpoint(3, 3)), true), pnt = PGpoint(2, 2), poly = PGpolygon(Array(PGpoint(0, 0), PGpoint(-1, -1), PGpoint(1, -1))), colors = List(Color.Green, Color.Blue), colorMap = List( Vector(Color.RedOrange, Color.Green), Vector(Color.Green, Color.Blue) ), color = Color.Blue, idUuid = UUID.fromString("00000000-0000-0000-0000-000000000002"), uuids = List( UUID.fromString("00000000-0000-0002-0000-000000000000") ) ) ) val carRepo = Repo[MagCar, MagCar, Long] val allCars = Vector( MagCar( id = 1, textColors = Seq(Color.RedOrange, Color.Green), textColorMap = Vector( List(Color.RedOrange, Color.RedOrange), List(Color.Green, Color.Green) ), lastService = Some(LastService("Bob", LocalDate.of(2024, 5, 4))), myJsonB = Some(MyJsonB(Vector(1, 2, 3), "hello world")), myXml = Some(MyXml(blue)) ), MagCar( id = 2, textColors = Seq(Color.Green, Color.Blue), textColorMap = Vector( List(Color.RedOrange, Color.Green), List(Color.Green, Color.Blue) ), lastService = None, myJsonB = None, myXml = None ) ) test("select all MagUser"): connect(ds()): assert(userRepo.findAll == allUsers) test("select all MagCar"): connect(ds()): assert(carRepo.findAll == allCars) test("insert MagUser"): connect(ds()): val u = MagUser( id = 3L, name = "Matt", friends = Vector.empty, matrix = IArray(IArray(1, 2), IArray(3, 4)), test = IArray(4), dates = IArray(OffsetDateTime.parse("2023-07-30T13:57:29.059335Z")), bx = PGbox(1, 2, 3, 4), c = PGcircle(1, 1, 1), iv = PGInterval("1 minute"), l = PGline(3, 4, 5), lSeg = PGlseg(0, 0, -1, -1), p = PGpath(Array(PGpoint(3, 3), PGpoint(4, 4)), true), pnt = PGpoint(3, 4), poly = PGpolygon(Array(PGpoint(0, 0), PGpoint(-1, 1), PGpoint(1, 1))), colors = List(Color.Blue), colorMap = List(Vector(Color.Blue), Vector(Color.Green)), color = Color.Green, idUuid = UUID.fromString("00000000-0000-0000-0000-000000000003"), uuids = List( UUID.fromString("00000000-0000-0003-0000-000000000000"), UUID.fromString("00000000-0000-0003-0000-000000000001") ) ) userRepo.insert(u) val dbU = userRepo.findById(3L).get assert(dbU == u) test("select MagUser where uuid in set"): connect(ds()): val ids = Vector( UUID.fromString("00000000-0000-0000-0000-000000000001"), UUID.fromString("00000000-0000-0000-0000-000000000002") ) val users = sql"SELECT * FROM mag_user WHERE idUuid = ANY($ids)" .query[MagUser] .run() assert(users == allUsers) test("insert MagCar"): connect(ds()): val c = MagCar( id = 3L, textColors = Vector(Color.RedOrange, Color.RedOrange), textColorMap = Vector( List(Color.RedOrange, Color.RedOrange), List(Color.RedOrange, Color.RedOrange) ), lastService = Some(LastService("James", LocalDate.of(1970, 4, 22))), myJsonB = None, myXml = None ) carRepo.insert(c) val dbC = carRepo.findById(3L).get assert(dbC == c) test("update MagUser arrays"): connect(ds()): val newMatrix = IArray(IArray(0, 0), IArray(0, 9)) sql"UPDATE mag_user SET matrix = $newMatrix WHERE id = 2".update .run() val newUser = userRepo.findById(2L).get assert(Objects.deepEquals(newUser.matrix, newMatrix)) test("update MagCar arrays"): connect(ds()): val newTextColorMap = Vector(List(Color.Blue, Color.Blue), List(Color.Blue, Color.Blue)) sql"UPDATE mag_car SET text_color_map = $newTextColorMap WHERE id = 2".update .run() val newCar = carRepo.findById(2L).get assert(newCar.textColorMap == newTextColorMap) test("MagCar xml string values"): connect(ds()): val found = sql"SELECT my_xml FROM mag_car" .query[Option[MyXml]] .run() .flatten .map(_.elem.toString) val expected = allCars.flatMap(_.myXml).map(_.elem.toString) assert(found == expected) test("where = ANY()"): connect(ds()): val ids = Vector(1L, 2L) val cars = sql"SELECT * FROM mag_car WHERE id = ANY($ids)".query[MagCar].run() assert(cars == allCars) test("insert MagServiceList interpolated"): connect(ds()): val service = LastService("James", LocalDate.of(1970, 4, 22)) val frag = sql"INSERT INTO mag_service_list (service) VALUES ($service)" assertEquals( frag.sqlString, "INSERT INTO mag_service_list (service) VALUES (?)" ) frag.update.run() assertEquals( sql"SELECT service FROM mag_service_list".query[LastService].run().head, service ) val pgContainer = ForAllContainerFixture( PostgreSQLContainer .Def(dockerImageName = DockerImageName.parse("postgres:17.0")) .createContainer() ) override def munitFixtures: Seq[AnyFixture[_]] = super.munitFixtures :+ pgContainer def ds(): DataSource = val ds = PGSimpleDataSource() val pg = pgContainer() ds.setUrl(pg.jdbcUrl) ds.setUser(pg.username) ds.setPassword(pg.password) val userSql = Files.readString(Path.of(getClass.getResource("/pg-user.sql").toURI)) val carSql = Files.readString(Path.of(getClass.getResource("/pg-car.sql").toURI)) val serviceListSql = Files.readString( Path.of(getClass.getResource("/pg-service-list.sql").toURI) ) Manager { use => val con = use(ds.getConnection) val stmt = use(con.createStatement) stmt.execute(userSql) stmt.execute(carSql) stmt.execute(serviceListSql) }.get ds end ds end PgCodecTests ================================================ FILE: magnum-zio/src/main/scala/com/augustnagro/magnum/magzio/TransactorZIO.scala ================================================ package com.augustnagro.magnum.magzio import com.augustnagro.magnum.{DbCon, DbTx, SqlException, SqlLogger} import zio.{Task, Trace, UIO, URLayer, ZIO, ZLayer} import java.sql.Connection import javax.sql.DataSource import scala.util.control.NonFatal class TransactorZIO private ( dataSource: DataSource, sqlLogger: SqlLogger, connectionConfig: Connection => Unit ): def withSqlLogger(sqlLogger: SqlLogger): TransactorZIO = new TransactorZIO( dataSource, sqlLogger, connectionConfig ) def withConnectionConfig( connectionConfig: Connection => Unit ): TransactorZIO = new TransactorZIO( dataSource, sqlLogger, connectionConfig ) def connect[A](f: DbCon ?=> A)(using Trace): Task[A] = ZIO.blocking( ZIO.acquireReleaseWith(acquireConnection)(releaseConnection)(cn => ZIO.attempt { connectionConfig(cn) f(using DbCon(cn, sqlLogger)) } ) ) def transact[A](f: DbTx ?=> A)(using Trace): Task[A] = ZIO.blocking( ZIO.acquireReleaseWith(acquireConnection)(releaseConnection)(cn => ZIO.attempt { connectionConfig(cn) cn.setAutoCommit(false) try val res = f(using DbTx(cn, sqlLogger)) cn.commit() res catch case NonFatal(t) => try cn.rollback() catch { case t2 => t.addSuppressed(t2) } throw t }.uninterruptible ) ) private def acquireConnection(using Trace): Task[Connection] = ZIO .attempt(dataSource.getConnection()) .mapError(t => SqlException("Unable to acquire DB Connection", t)) private def releaseConnection(con: Connection)(using Trace): UIO[Unit] = if con eq null then ZIO.unit else ZIO .attempt(con.close()) .orDieWith(t => SqlException("Unable to close DB Connection, will die", t) ) end TransactorZIO object TransactorZIO: private val noOpConnectionConfig: Connection => Unit = _ => () /** Construct a TransactorZIO * * @param sqlLogger * Logging configuration * @param connectionConfig * Customize the underlying JDBC Connections */ def layer( sqlLogger: SqlLogger, connectionConfig: Connection => Unit ): URLayer[DataSource, TransactorZIO] = ZLayer .service[DataSource] .project(ds => TransactorZIO( dataSource = ds, sqlLogger = sqlLogger, connectionConfig = connectionConfig ) ) /** Construct a TransactorZIO * * @param sqlLogger * Logging configuration */ def layer(sqlLogger: SqlLogger): URLayer[DataSource, TransactorZIO] = layer( sqlLogger = sqlLogger, connectionConfig = noOpConnectionConfig ) /** Construct a TransactorZIO */ def layer: URLayer[DataSource, TransactorZIO] = layer( sqlLogger = SqlLogger.Default, connectionConfig = noOpConnectionConfig ) /** Construct a TransactorZIO * * @param connectionConfig * Customize the underlying JDBC Connections */ def layer( connectionConfig: Connection => Unit ): URLayer[DataSource, TransactorZIO] = layer( sqlLogger = SqlLogger.Default, connectionConfig = connectionConfig ) end TransactorZIO ================================================ FILE: magnum-zio/src/test/resources/pg/big-dec.sql ================================================ drop table if exists big_dec cascade; create table big_dec ( id int primary key, my_big_dec numeric ); insert into big_dec values (1, 123), (2, null); ================================================ FILE: magnum-zio/src/test/resources/pg/car.sql ================================================ DROP TABLE IF EXISTS car; CREATE TABLE car ( model VARCHAR(50) NOT NULL, id bigint PRIMARY KEY, top_speed INT NOT NULL, vin INT, color TEXT NOT NULL CHECK (color IN ('Red', 'Green', 'Blue')), created TIMESTAMP WITH TIME ZONE NOT NULL ); INSERT INTO car (model, id, top_speed, vin, color, created) VALUES ('McLaren Senna', 1, 208, 123, 'Red', '2024-11-24T22:17:30.000000000Z'::timestamptz), ('Ferrari F8 Tributo', 2, 212, 124, 'Green', '2024-11-24T22:17:31.000000000Z'::timestamptz), ('Aston Martin Superleggera', 3, 211, null, 'Blue', '2024-11-24T22:17:32.000000000Z'::timestamptz); ================================================ FILE: magnum-zio/src/test/resources/pg/my-user.sql ================================================ drop table if exists my_user cascade; create table my_user ( first_name text not null, id bigint primary key generated always as identity ); insert into my_user (first_name) values ('George'), ('Alexander'), ('John'); ================================================ FILE: magnum-zio/src/test/resources/pg/no-id.sql ================================================ drop table if exists no_id; create table no_id ( created_at timestamptz not null default now(), user_name text not null, user_action text not null ); insert into no_id values (timestamp '1997-08-15', 'Josh', 'clicked a button'), (timestamp '1997-08-16', 'Danny', 'opened a toaster'), (timestamp '1997-08-17', 'Greg', 'ran some QA tests'); ================================================ FILE: magnum-zio/src/test/resources/pg/person.sql ================================================ drop table if exists person cascade; create table person ( id bigint primary key, first_name varchar(50), last_name varchar(50) not null, is_admin boolean not null, created timestamptz not null, social_id UUID ); insert into person (id, first_name, last_name, is_admin, created, social_id) values (1, 'George', 'Washington', true, now(), 'd06443a6-3efb-46c4-a66a-a80a8a9a5388'), (2, 'Alexander', 'Hamilton', true, now(), '529b6c6d-7228-4da5-81d7-13b706f78ddb'), (3, 'John', 'Adams', true, now(), null), (4, 'Benjamin', 'Franklin', true, now(), null), (5, 'John', 'Jay', true, now(), null), (6, 'Thomas', 'Jefferson', true, now(), null), (7, 'James', 'Madison', true, now(), null), (8, null, 'Nagro', false, timestamp '1997-08-12', null); ================================================ FILE: magnum-zio/src/test/scala/com/augustnagro/magnum/magzio/ImmutableRepoZioTests.scala ================================================ package com.augustnagro.magnum.magzio import com.augustnagro.magnum.* import munit.{FunSuite, Location} import zio.* import java.sql.Connection import java.time.OffsetDateTime import scala.util.{Success, Using} def immutableRepoZioTests( suite: FunSuite, dbType: DbType, xa: () => TransactorZIO )(using Location, DbCodec[OffsetDateTime] ): Unit = import suite.* val runtime: Runtime[Any] = zio.Runtime.default def runIO[A](io: ZIO[Any, Throwable, A]): A = Unsafe.unsafe { implicit unsafe => runtime.unsafe.run(io).getOrThrow() } enum Color derives DbCodec: case Red, Green, Blue @Table(dbType, SqlNameMapper.CamelToSnakeCase) case class Car( model: String, @Id id: Long, topSpeed: Int, @SqlName("vin") vinNumber: Option[Int], color: Color, created: OffsetDateTime ) derives DbCodec val carRepo = ImmutableRepo[Car, Long] val car = TableInfo[Car, Car, Long] val allCars = Vector( Car( model = "McLaren Senna", id = 1L, topSpeed = 208, vinNumber = Some(123), color = Color.Red, created = OffsetDateTime.parse("2024-11-24T22:17:30.000000000Z") ), Car( model = "Ferrari F8 Tributo", id = 2L, topSpeed = 212, vinNumber = Some(124), color = Color.Green, created = OffsetDateTime.parse("2024-11-24T22:17:31.000000000Z") ), Car( model = "Aston Martin Superleggera", id = 3L, topSpeed = 211, vinNumber = None, color = Color.Blue, created = OffsetDateTime.parse("2024-11-24T22:17:32.000000000Z") ) ) test("count"): val count = runIO: xa().connect: carRepo.count assert(count == 3L) test("existsById"): val (exists3, exists4) = runIO: xa().connect: carRepo.existsById(3L) -> carRepo.existsById(4L) assert(exists3) assert(!exists4) test("findAll"): val cars = runIO: xa().connect: carRepo.findAll assert(cars == allCars) test("findById"): val (exists3, exists4) = runIO: xa().connect: carRepo.findById(3L) -> carRepo.findById(4L) assert(exists3.get == allCars.last) assert(exists4 == None) test("findAllByIds"): assume(dbType != ClickhouseDbType) assume(dbType != MySqlDbType) assume(dbType != OracleDbType) assume(dbType != SqliteDbType) val ids = runIO: xa().connect: carRepo.findAllById(Vector(1L, 3L)).map(_.id) assert(ids == Vector(1L, 3L)) test("serializable transaction"): val count = runIO: xa() .withConnectionConfig(withSerializable) .transact: carRepo.count assert(count == 3L) def withSerializable(con: Connection): Unit = con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE) test("select query"): val minSpeed: Int = 210 val query = sql"select ${car.all} from $car where ${car.topSpeed} > $minSpeed" .query[Car] val result = runIO: xa().connect: query.run() assertNoDiff( query.frag.sqlString, "select model, id, top_speed, vin, color, created from car where top_speed > ?" ) assert(query.frag.params == Vector(minSpeed)) assert(result == allCars.tail) test("select query with aliasing"): val minSpeed = 210 val cAlias = car.alias("c") val query = sql"select ${cAlias.all} from $cAlias where ${cAlias.topSpeed} > $minSpeed" .query[Car] val result = runIO: xa().connect: query.run() assertNoDiff( query.frag.sqlString, "select c.model, c.id, c.top_speed, c.vin, c.color, c.created from car c where c.top_speed > ?" ) assert(query.frag.params == Vector(minSpeed)) assert(result == allCars.tail) test("select via option"): val vin = Option(124) val cars = runIO: xa().connect: sql"select * from car where vin = $vin" .query[Car] .run() assert(cars == allCars.filter(_.vinNumber == vin)) test("tuple select"): val tuples = runIO: xa().connect: sql"select model, color from car where id = 2" .query[(String, Color)] .run() assert(tuples == Vector(allCars(1).model -> allCars(1).color)) test("large tuple support does not override hand-rolled Tuple[2-4] codecs"): val tuple2ACodec = summon[DbCodec[(String, Color)]] val tuple2BCodec = summon[DbCodec[(String, Int)]] assert(tuple2ACodec.getClass == tuple2BCodec.getClass) val tuple5ACodec = summon[DbCodec[(String, Color, Int, Long, Option[Int])]] assert(tuple5ACodec.getClass != tuple2ACodec.getClass) val tuple5BCodec = summon[DbCodec[(Int, Int, Int, Long, Option[Int])]] assert(tuple5BCodec.getClass != tuple5ACodec.getClass) test("large tuple select"): val tuple = runIO: xa().connect: sql"select model, color, top_speed, id, vin from car where id = 2" .query[(String, Color, Int, Long, Option[Int])] .run() .head val c = allCars(1) assert(tuple == (c.model, c.color, c.topSpeed, c.id, c.vinNumber)) test("reads null int as None and not Some(0)"): val maybeCar = runIO: xa().connect: carRepo.findById(3L) assert(maybeCar.get.vinNumber == None) test("created timestamps should match"): val allCars = runIO: xa().connect: carRepo.findAll assert(allCars.map(_.created) == allCars.map(_.created)) test(".query iterator"): val carsCount = runIO: xa().connect: Using.Manager(implicit use => val it = sql"SELECT * FROM car".query[Car].iterator() it.map(_.id).size ) assert(carsCount == Success(3)) end immutableRepoZioTests ================================================ FILE: magnum-zio/src/test/scala/com/augustnagro/magnum/magzio/PgZioTests.scala ================================================ package com.augustnagro.magnum.magzio import com.augustnagro.magnum.* import com.dimafeng.testcontainers.PostgreSQLContainer import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures import munit.{AnyFixture, FunSuite, Location} import org.postgresql.ds.PGSimpleDataSource import org.testcontainers.utility.DockerImageName import zio.{Scope, Unsafe, ZLayer} import java.nio.file.{Files, Path} import scala.util.Using import scala.util.Using.Manager class PgZioTests extends FunSuite, TestContainersFixtures: immutableRepoZioTests(this, PostgresDbType, xa) val pgContainer = ForAllContainerFixture( PostgreSQLContainer .Def(dockerImageName = DockerImageName.parse("postgres:17.0")) .createContainer() ) override def munitFixtures: Seq[AnyFixture[_]] = super.munitFixtures :+ pgContainer def xa(): TransactorZIO = val ds = PGSimpleDataSource() val pg = pgContainer() ds.setUrl(pg.jdbcUrl) ds.setUser(pg.username) ds.setPassword(pg.password) val tableDDLs = Vector( "/pg/car.sql", "/pg/person.sql", "/pg/my-user.sql", "/pg/no-id.sql", "/pg/big-dec.sql" ).map(p => Files.readString(Path.of(getClass.getResource(p).toURI))) Manager(use => val con = use(ds.getConnection) val stmt = use(con.createStatement) for ddl <- tableDDLs do stmt.execute(ddl) ).get // todo unsafe Unsafe.unsafe { implicit unsafe => zio.Runtime.default.unsafe .run( TransactorZIO.layer .build(Scope.global) .map(_.get) .provide( ZLayer.succeed(ds) ++ zio.Runtime.enableLoomBasedBlockingExecutor ) ) .getOrThrow() } end xa end PgZioTests ================================================ FILE: project/build.properties ================================================ sbt.version=1.12.8 ================================================ FILE: project/plugins.sbt ================================================ addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.5.2") addSbtPlugin("com.github.sbt" % "sbt-pgp" % "2.3.0")