Full Code of AugustNagro/magnum for AI

master 280136415ead cached
126 files
285.6 KB
81.6k tokens
31 symbols
1 requests
Download .txt
Showing preview only (321K chars total). Download the full file or copy to clipboard to get everything.
Repository: AugustNagro/magnum
Branch: master
Commit: 280136415ead
Files: 126
Total size: 285.6 KB

Directory structure:
gitextract_tcy7nv57/

├── .github/
│   └── workflows/
│       └── ci.yml
├── .gitignore
├── .scalafmt.conf
├── LICENSE
├── README.md
├── build.sbt
├── magnum/
│   └── src/
│       ├── main/
│       │   └── scala/
│       │       └── com/
│       │           └── augustnagro/
│       │               └── magnum/
│       │                   ├── BatchUpdateResult.scala
│       │                   ├── ClickhouseDbType.scala
│       │                   ├── ColumnName.scala
│       │                   ├── ColumnNames.scala
│       │                   ├── DbCodec.scala
│       │                   ├── DbCon.scala
│       │                   ├── DbTx.scala
│       │                   ├── DbType.scala
│       │                   ├── DerivingUtil.scala
│       │                   ├── Frag.scala
│       │                   ├── FragWriter.scala
│       │                   ├── H2DbType.scala
│       │                   ├── Id.scala
│       │                   ├── ImmutableRepo.scala
│       │                   ├── MySqlDbType.scala
│       │                   ├── NullOrder.scala
│       │                   ├── OracleDbType.scala
│       │                   ├── PostgresDbType.scala
│       │                   ├── Query.scala
│       │                   ├── Repo.scala
│       │                   ├── RepoDefaults.scala
│       │                   ├── ResultSetIterator.scala
│       │                   ├── Returning.scala
│       │                   ├── Seek.scala
│       │                   ├── SeekDir.scala
│       │                   ├── Sort.scala
│       │                   ├── SortOrder.scala
│       │                   ├── Spec.scala
│       │                   ├── SpecImpl.scala
│       │                   ├── SqlException.scala
│       │                   ├── SqlExceptionEvent.scala
│       │                   ├── SqlLiteral.scala
│       │                   ├── SqlLogger.scala
│       │                   ├── SqlName.scala
│       │                   ├── SqlNameMapper.scala
│       │                   ├── SqlSuccessEvent.scala
│       │                   ├── SqliteDbType.scala
│       │                   ├── Table.scala
│       │                   ├── TableExprs.scala
│       │                   ├── TableInfo.scala
│       │                   ├── Transactor.scala
│       │                   ├── UUIDCodec.scala
│       │                   ├── Update.scala
│       │                   └── util.scala
│       └── test/
│           ├── resources/
│           │   ├── clickhouse/
│           │   │   ├── big-dec.sql
│           │   │   ├── car.sql
│           │   │   ├── my-time.sql
│           │   │   ├── no-id.sql
│           │   │   └── person.sql
│           │   ├── h2/
│           │   │   ├── big-dec.sql
│           │   │   ├── car.sql
│           │   │   ├── my-time.sql
│           │   │   ├── my-user.sql
│           │   │   ├── no-id.sql
│           │   │   └── person.sql
│           │   ├── mysql/
│           │   │   ├── big-dec.sql
│           │   │   ├── car.sql
│           │   │   ├── my-time.sql
│           │   │   ├── my-user.sql
│           │   │   ├── no-id.sql
│           │   │   └── person.sql
│           │   └── pg/
│           │       ├── big-dec.sql
│           │       ├── car.sql
│           │       ├── my-time.sql
│           │       ├── my-user.sql
│           │       ├── no-id.sql
│           │       └── person.sql
│           └── scala/
│               ├── ClickHouseTests.scala
│               ├── EffectiveSubsetTests.scala
│               ├── H2Tests.scala
│               ├── MySqlTests.scala
│               ├── OracleTests.scala
│               ├── PgTests.scala
│               ├── SqliteTests.scala
│               ├── opaques.scala
│               └── shared/
│                   ├── BigDecTests.scala
│                   ├── Color.scala
│                   ├── DateTimeTests.scala
│                   ├── EmbeddedFragTests.scala
│                   ├── EntityCreatorTests.scala
│                   ├── ImmutableRepoTests.scala
│                   ├── MultilineFragTests.scala
│                   ├── NoIdTests.scala
│                   ├── OptionalProductTests.scala
│                   ├── RepoTests.scala
│                   ├── SharedTests.scala
│                   ├── SpecTests.scala
│                   ├── SqlNameTests.scala
│                   ├── TableInfoTests.scala
│                   └── TupleTests.scala
├── magnum-pg/
│   └── src/
│       ├── main/
│       │   └── scala/
│       │       └── com/
│       │           └── augustnagro/
│       │               └── magnum/
│       │                   └── pg/
│       │                       ├── PgCodec.scala
│       │                       ├── SqlArrayCodec.scala
│       │                       ├── enums/
│       │                       │   ├── PgEnumDbCodec.scala
│       │                       │   ├── PgEnumToScalaEnumSqlArrayCodec.scala
│       │                       │   └── PgStringToScalaEnumSqlArrayCodec.scala
│       │                       ├── json/
│       │                       │   ├── JsonBDbCodec.scala
│       │                       │   └── JsonDbCodec.scala
│       │                       └── xml/
│       │                           └── XmlDbCodec.scala
│       └── test/
│           ├── resources/
│           │   ├── pg-car.sql
│           │   ├── pg-service-list.sql
│           │   └── pg-user.sql
│           └── scala/
│               ├── CirceJsonBDbCodec.scala
│               ├── CirceJsonDbCodec.scala
│               ├── Color.scala
│               ├── LastService.scala
│               ├── MagCar.scala
│               ├── MagUser.scala
│               ├── MyJsonB.scala
│               ├── MyXml.scala
│               └── PgCodecTests.scala
├── magnum-zio/
│   └── src/
│       ├── main/
│       │   └── scala/
│       │       └── com/
│       │           └── augustnagro/
│       │               └── magnum/
│       │                   └── magzio/
│       │                       └── TransactorZIO.scala
│       └── test/
│           ├── resources/
│           │   └── pg/
│           │       ├── big-dec.sql
│           │       ├── car.sql
│           │       ├── my-user.sql
│           │       ├── no-id.sql
│           │       └── person.sql
│           └── scala/
│               └── com/
│                   └── augustnagro/
│                       └── magnum/
│                           └── magzio/
│                               ├── ImmutableRepoZioTests.scala
│                               └── PgZioTests.scala
└── project/
    ├── build.properties
    └── plugins.sbt

================================================
FILE CONTENTS
================================================

================================================
FILE: .github/workflows/ci.yml
================================================
name: CI

on:
  push:
    branches: ["master"]
  pull_request:
    branches: ["master"]

permissions:
  contents: read

jobs:
  ci:
    runs-on: ubuntu-latest
    strategy:
      fail-fast: false
    steps:
      - name: Checkout current branch
        uses: actions/checkout@v6.0.2
      - name: Setup Java
        uses: actions/setup-java@v5.2.0
        with:
          distribution: temurin
          java-version: 25
          check-latest: true
      - name: Setup sbt
        uses: sbt/setup-sbt@v1
      - name: Cache scala dependencies
        uses: coursier/cache-action@v8
      - name: Run tests
        run: sbt 'scalafmtCheckAll; test'


================================================
FILE: .gitignore
================================================
.settings
.DS_Store

*.iml
.idea
target
nbproject
nb-configuration.xml

.class
.log
.jar
.war
.ear
.zip
.tar.gz
.rar

hs_err_pid*

*.log

.bsp

*.sc
*.db

*metals*
.bloop
.vscode

================================================
FILE: .scalafmt.conf
================================================
version = 3.8.4-RC3
runner.dialect = scala3
rewrite.scala3.insertEndMarkerMinLines = 20
rewrite.scala3.removeEndMarkerMaxLines = 19
binPack.parentConstructors = Oneline


================================================
FILE: LICENSE
================================================

                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [yyyy] [name of copyright owner]

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.

================================================
FILE: README.md
================================================
## Magnum

[![Latest version](https://index.scala-lang.org/augustnagro/magnum/magnum/latest.svg?color=orange)](https://index.scala-lang.org/augustnagro/magnum/magnum)

Yet another database client for Scala. No dependencies, high productivity.

* [Installing](#installing)
* [ScalaDoc](#scaladoc)
* [Documentation](#documentation)
  * [`connect` creates a database connection](#connect-creates-a-database-connection)
  * [`transact` creates a database transaction](#transact-creates-a-database-transaction)
  * [Type-safe Transaction & Connection Management](#type-safe-transaction--connection-management)
  * [Customizing Transactions](#customizing-transactions)
  * [Sql Interpolator, Frag, Query, Update, Returning](#sql-interpolator-frag-query-and-update)
  * [Batch Updates](#batch-updates)
  * [Immutable Repositories](#immutable-repositories)
  * [Repositories](#repositories)
  * [Database generated columns](#database-generated-columns)
  * [Specifications](#specifications)
  * [Scala 3 Enum & NewType Support](#scala-3-enum--newtype-support)
  * [`DbCodec`: Typeclass for JDBC reading & writing](#dbcodec-typeclass-for-jdbc-reading--writing)
  * [Future-Proof Queries](#future-proof-queries)
  * [Splicing Literal Values into Frags](#splicing-literal-values-into-frags)
  * [Postgres Module](#postgres-module)
  * [Logging](#logging-sql-queries)
* [Integrations](#integrations)
  * [ZIO](#zio) 
* [Motivation](#motivation)
* [Feature List And Database Support](#feature-list)
* [Talks and Blogs](#talks-and-blogs)
* [Frequently Asked Questions](#frequently-asked-questions)

## Installing

```
"com.augustnagro" %% "magnum" % "1.3.0"
```

Magnum requires Scala >= 3.3.0

You must also install the JDBC driver for your database, for example:

```
"org.postgresql" % "postgresql" % "<version>"
```

And for performance, a JDBC connection pool like [HikariCP](https://github.com/brettwooldridge/HikariCP)

## ScalaDoc

https://javadoc.io/doc/com.augustnagro/magnum_3

## Documentation

### `connect` creates a database connection.

`connect` takes two parameters; the database Transactor,
and a context function with a given `DbCon` connection.
For example:

```scala
import com.augustnagro.magnum.*

val dataSource: javax.sql.DataSource = ???
val xa = Transactor(dataSource)

val users: Vector[User] = connect(xa):
  sql"SELECT * FROM user".query[User].run()
```

### `transact` creates a database transaction.

Like `connect`, `transact` accepts a Transactor and context function.
The context function provides a `DbTx` instance.
If the function throws, the transaction will be rolled back.

```scala
// update is rolled back
transact(xa):
  sql"UPDATE user SET first_name = $firstName WHERE id = $id".update.run()
  thisMethodThrows()
```

### Type-safe Transaction & Connection Management

Annotate transactional methods with `using DbTx`, and connections with `using DbCon`.

Since `DbTx <: DbCon`, it's impossible to call a method with the wrong context.

For example, this compiles:

```scala
def runUpdateAndGetUsers()(using DbTx): Vector[User] =
  userRepo.deleteById(1L)
  getUsers

def getUsers(using DbCon): Vector[User] =
  sql"SELECT * FROM user".query.run()
```

But not this:

```scala
def runSomeQueries(using DbCon): Vector[User] =
  runUpdateAndGetUsers()
```

### Customizing transactions

`Transactor` lets you customize the transaction (or connection) behavior.

```scala
val xa = Transactor(
  dataSource = ???,
  sqlLogger = SqlLogger.logSlowQueries(500.milliseconds),
  connectionConfig = con =>
    con.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ)
)

transact(xa):
  sql"SELECT id from myUser".query[Long].run()
```

### Sql Interpolator, Frag, Query, and Update

The `sql` interpolator can express any SQL expression, returning a `Frag` sql fragment. You can interpolate values without the risk of SQL-injection attacks.

```scala
val firstNameOpt = Some("John")
val twoDaysAgo = OffsetDateTime.now.minusDays(2)

val frag: Frag =
  sql"""
    SELECT id, last_name FROM user
    WHERE first_name = $firstNameOpt
    AND created <= $twoDaysAgo
    """
```

Frags can be turned into queries with the `query[T](using DbCodec[T])` method:

```scala
val query = frag.query[(Long, String)] // Query[(Long, String)]
```

Or updates via `update`

```scala
val update: Update =
  sql"UPDATE user SET first_name = 'Buddha' WHERE id = 3".update
```

Or an update with a `RETURNING` clause via `returning`:

```scala
val updateReturning: Returning =
  sql"""
     UPDATE user SET first_name = 'Buddha'
     WHERE last_name = 'Harper'
     RETURNING id
     """.returning[Long]
```

All are executed via `run()(using DbCon)`:

```scala
transact(xa):
  val tuples: Vector[(Long, String)] = query.run()
  val updatedRows: Int = update.run()
  val updatedIds: Vector[Long] = updateReturning.run()
```

### Batch Updates

Batch updates are supported via `batchUpdate` method in package `com.augustnagro.magnum`.

```scala
connect(xa):
  val users: Iterable[User] = ???
  val updateResult: BatchUpdateResult =
    batchUpdate(users): user =>
      sql"...".update
```

`batchUpdate` returns a `BatchUpdateResult` enum, which is `Success(numRowsUpdated)` or `SuccessNoInfo` otherwise.

### Immutable Repositories

The `ImmutableRepo` class auto-generates the following methods at compile-time:

```scala
  def count(using DbCon): Long
  def existsById(id: ID)(using DbCon): Boolean
  def findAll(using DbCon): Vector[E]
  def findAll(spec: Spec[E])(using DbCon): Vector[E]
  def findById(id: ID)(using DbCon): Option[E]
  def findAllById(ids: Iterable[ID])(using DbCon): Vector[E]
```

Here's an example:

```scala
@Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase)
case class User(
  @Id id: Long,
  firstName: Option[String],
  lastName: String,
  created: OffsetDateTime
) derives DbCodec

val userRepo = ImmutableRepo[User, Long]

transact(xa):
  val cnt = userRepo.count
  val userOpt = userRepo.findById(2L)
```

Importantly, class User is annotated with `@Table`, which defines the table's database type. The annotation optionally specifies the name-mapping between scala fields and column names. You can also use the `@SqlName` annotation on individual fields. Finally, The table must `derive DbCodec`, or otherwise provide an implicit DbCodec instance.

The optional `@Id` annotation denotes the table's primary key. Not setting `@Id` will default to using the first field. If there is no logical id, then remove the annotation and use Null in the ID type parameter of Repositories (see next).

It is a best practice to extend ImmutableRepo to encapsulate your SQL in repositories. This way, it's easier to maintain since they're grouped together.

```scala
class UserRepo extends ImmutableRepo[User, Long]:
  def firstNamesForLast(lastName: String)(using DbCon): Vector[String] =
    sql"""
      SELECT DISTINCT first_name
      FROM user
      WHERE last_name = $lastName
      """.query[String].run()
        
  // other User-related queries here
```

### Repositories

The `Repo` class auto-generates the following methods at compile-time:

```scala
  def count(using DbCon): Long
  def existsById(id: ID)(using DbCon): Boolean
  def findAll(using DbCon): Vector[E]
  def findAll(spec: Spec[E])(using DbCon): Vector[E]
  def findById(id: ID)(using DbCon): Option[E]
  def findAllById(ids: Iterable[ID])(using DbCon): Vector[E]
  
  def delete(entity: E)(using DbCon): Unit
  def deleteById(id: ID)(using DbCon): Unit
  def truncate()(using DbCon): Unit
  def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult
  def deleteAllById(ids: Iterable[ID])(using DbCon): BatchUpdateResult
  def insert(entityCreator: EC)(using DbCon): Unit
  def insertAll(entityCreators: Iterable[EC])(using DbCon): Unit
  def insertReturning(entityCreator: EC)(using DbCon): E
  def insertAllReturning(entityCreators: Iterable[EC])(using DbCon): Vector[E]
  def update(entity: E)(using DbCon): Unit
  def updateAll(entities: Iterable[E])(using DbCon): BatchUpdateResult
```

Here's an example:

```scala
@Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase)
case class User(
  @Id id: Long,
  firstName: Option[String],
  lastName: String,
  created: OffsetDateTime
) derives DbCodec

val userRepo = Repo[User, User, Long]

val countAfterUpdate = transact(xa):
  userRepo.deleteById(2L)
  userRepo.count
```

It is a best practice to encapsulate your SQL in repositories.

```scala
class UserRepo extends Repo[User, User, Long]
```

Also note that Repo extends ImmutableRepo. Some databases cannot support every method, and will throw UnsupportedOperationException.

### Database generated columns

It is often the case that database columns are auto-generated, for example, primary key IDs. This is why the Repo class has 3 type parameters. 

The first defines the Entity-Creator, which should omit any fields that are auto-generated. The entity-creator class must be an 'effective' subclass of the entity class, but it does not have to subclass the entity. This is verified at compile time.

The second type parameter is the Entity class, and the final is for the ID. If the Entity does not have a logical ID, use Null.

```scala
case class UserCreator(
  firstName: Option[String],
  lastName: String,
) derives DbCodec

@Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase)
case class User(
  @Id id: Long,
  firstName: Option[String],
  lastName: String,
  created: OffsetDateTime
) derives DbCodec

val userRepo = Repo[UserCreator, User, Long]

val newUser: User = transact(xa):
  userRepo.insertReturning(
    UserCreator(Some("Adam"), "Smith")
  )
```

### Specifications

Specifications help you write safe, dynamic queries.
An example use-case would be a search results page that allows users to sort and filter the paginated data.

1. If you need to perform joins to get the data needed, first create a database view.
2. Next, create an entity class that derives DbCodec.
3. Finally, use the Spec class to create a specification.

Here's an example:

```scala
val partialName = "Ja%"
val lastNameOpt = Option("Brown")
val searchDate = OffsetDateTime.now.minusDays(2)
val idPosition = 42L

val spec = Spec[User]
  .where(sql"first_name ILIKE $partialName")
  .where(lastNameOpt.map(ln => sql"last_name = $ln").getOrElse(sql""))
  .where(sql"created >= $searchDate")
  .seek("id", SeekDir.Gt, idPosition, SortOrder.Asc)
  .limit(10)

val users: Vector[User] = userRepo.findAll(spec)
```

Note that both [seek pagination](https://blog.jooq.org/faster-sql-paging-with-jooq-using-the-seek-method/) and offset pagination is supported.

### Scala 3 Enum & NewType Support

Magnum supports Scala 3 enums (non-adt) fully, by default writing & reading them as Strings. For example,

```scala
@Table(PostgresDbType, SqlNameMapper.CamelToUpperSnakeCase)
enum Color derives DbCodec:
  case Red, Green, Blue

@Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase)
case class User(
  @Id id: Long,
  firstName: Option[String],
  lastName: String,
  created: OffsetDateTime,
  favoriteColor: Color
) derives DbCodec
```

NewTypes and Opaque Type Alias can cause issues with derivation since given DbCodecs are not available. A simple way to provide them is using DbCodec.bimap:

```scala
opaque type MyId = Long

object MyId:
  def apply(id: Long): MyId =
    require(id >= 0)
    id

  extension (myId: MyId)
    def underlying: Long = myId

  given DbCodec[MyId] =
    DbCodec[Long].biMap(MyId.apply, _.underlying)

transact(xa):
  val id = MyId(123L)
  sql"UPDATE my_table SET x = true WHERE id = $id".update.run()
```

### `DbCodec`: Typeclass for JDBC reading & writing

DbCodec is a Typeclass for JDBC reading & writing.

Built-in DbCodecs are provided for many types, including primitives, dates, Options, and Tuples. You can derive DbCodecs by adding `derives DbCodec` to your case class or enum.

```scala
val rs: ResultSet = ???
val ints: Vector[Int] = DbCodec[Int].read(rs)

val ps: PreparedStatement = ???
DbCodec[Int].writeSingle(22, ps)
```

### Defining your own DbCodecs

To modify the JDBC mappings, implement a given DbCodec instance as you would for any Typeclass.

### Future-Proof Queries

A common problem when writing SQL queries is that they're difficult to refactor. When a column or table name changes you have to do a global find & replace. And if you miss a query, it's discovered at runtime.

There's also lots of repetition when writing SQL. Magnum's repositories help scrap the boilerplate, but writing `SELECT a, b, c, d, ...` for a large table quickly gets tiring.

To help with this, Magnum offers a `TableInfo` class to enable 'future-proof' queries. An important caveat is that these queries are harder to copy/paste into SQL editors like PgAdmin or DbBeaver.

Here's some examples:

```scala
import com.augustnagro.magnum.*

case class UserCreator(firstName: String, age: Int) derives DbCodec

@Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase)
case class User(id: Long, firstName: String, age: Int) derives DbCodec

object User:
  val Table = TableInfo[UserCreator, User, Long]

def allUsers(using DbCon): Vector[User] =
  val u = User.Table
  // equiv to 
  // SELECT id, first_name, age FROM user
  sql"SELECT ${u.all} FROM $u".query[User].run()

def firstNamesForLast(lastName: String)(using DbCon): Vector[String] =
  val u = User.Table
  // equiv to
  // SELECT DISTINCT first_name FROM user WHERE last_name = ?
  sql"""
    SELECT DISTINCT ${u.firstName} FROM $u
    WHERE ${u.lastName} = $lastName
  """.query[String].run()

def insertOrIgnore(creator: UserCreator)(using DbCon): Unit =
  val u = User.Table
  // equiv to
  // INSERT OR IGNORE INTO user (first_name, age) VALUES (?, ?)
  sql"INSERT OR IGNORE INTO $u ${u.insertCols} VALUES ($creator)".update.run()
```

It's important that `val Table = TableInfo[X, Y, Z]` is not explicitly typed, otherwise its structural typing will be destroyed.

In the case of multiple joins, you can use `TableInfo.alias(String)` to prevent name conflicts:

```scala
val c = TableInfo[Car].alias("c")
val p = TableInfo[Person].alias("p")

sql"""
   SELECT ${c.all}, ${p.firstName}
   FROM $c
   JOIN $p ON ${p.id} = ${c.personId}
   """.query.run()
```

### Splicing Literal Values into Frags

To splice Strings directly into `sql` statements, you can interpolate `SqlLiteral` values. For example,

```scala
val table = SqlLiteral("beans")
  
sql"select * from $table"
```

This feature should be used sparingly and never with untrusted input. 

### Postgres Module

The Postgres Module adds support for [Geometric Types](https://www.postgresql.org/docs/current/datatype-geometric.html) and [Arrays](https://www.postgresql.org/docs/current/arrays.html). Postgres Arrays can be decoded into Scala List/Vector/IArray, etc; multi-dimensionality is also supported.

```
"com.augustnagro" %% "magnumpg" % "1.3.0"
```

Example: Insert into a table with a `point[]` type column.

With table:

```sql
create table my_geo (
  id bigint primary key,
  pnts point[] not null
);
```

```scala
import org.postgresql.geometric.*
import com.augustnagro.magnum.*
import com.augustnagro.magnum.pg.PgCodec.given

@Table(PostgresDbType)
case class MyGeo(@Id id: Long, pnts: IArray[PGpoint]) derives DbCodec

val dataSource: javax.sql.DataSource = ???
val xa = Transactor(dataSource)

val myGeoRepo = Repo[MyGeo, MyGeo, Long]

transact(xa):
  myGeoRepo.insert(MyGeo(1L, IArray(PGpoint(1, 1), PGPoint(2, 2))))
```

The import of `PgCodec.given` is required to bring Geo/Array DbCodecs into scope.

#### Arrays of Enums

The `pg` module supports arrays of simple (non-ADT) enums.

If you want to map an array of [Postgres enums](https://www.postgresql.org/docs/current/datatype-enum.html) to a sequence of Scala enums, use the following import when deriving the DbCodec:

```scala
import com.augustnagro.magnum.pg.PgCodec.given
import com.augustnagro.magnum.pg.enums.PgEnumToScalaEnumSqlArrayCodec

// in postgres: `create type Color as enum ('Red', 'Green', 'Blue');`
enum Color derives DbCodec:
  case Red, Green, Blue

@Table(PostgresDbType)
case class Car(@Id id: Long, colors: Vector[Color]) derives DbCodec
```

If instead your Postgres type is an array of varchar or text, use the following import:

```scala
import com.augustnagro.magnum.pg.enums.PgStringToScalaEnumSqlArrayCodec
```

### Logging SQL queries

If you set the java.util Logging level to DEBUG, all SQL queries will be logged.
Setting to TRACE will log SQL queries and their parameters.

#### Logging Slow Queries

You can log slow queries by using the `Transactor` class in conjunction with `SqlLogger.logSlowQueries(FiniteDuration)`. See [Customizing Transactions](#customizing-transactions) for an example. You can also implement your own SqlLogger subclass as desired.

## Integrations

### ZIO

Magnum provides a fine layer of integration with ZIO.    
The `magnum-zio` module provides an implementation of the `connect` and `transact` utils that return a ZIO effect.

To use the ZIO integration, add the following dependency:
```scala
"com.augustnagro" %% "magnumzio" % "x.x.x"
```

and import these utils in your code with:
```scala
import com.augustnagro.magnum.magzio.*
```

## Motivation

Historically, database clients on the JVM fall into three categories.

* Object Oriented Repositories (Spring-Data, Hibernate)
* Functional DSLs (JOOQ, Slick, quill, zio-sql)
* SQL String interpolators (Anorm, doobie, plain jdbc)

Magnum is a Scala 3 library combining aspects of all three,
providing a typesafe and refactorable SQL interface,
which can express all SQL expressions, on all JDBC-supported databases.

Like in Zoolander (the movie), Magnum represents a 'new look' for Database access in Scala.

## Feature List

* Supports any database with a JDBC driver,
  including Postgres, MySql, Oracle, ClickHouse, H2, and Sqlite
* Efficient `sql" "` interpolator
* Purely-functional API
* Common queries (like insert, update, delete) generated at compile time
* Difficult to hit [N+1 query problem](https://stackoverflow.com/questions/97197/what-is-the-n1-selects-problem-in-orm-object-relational-mapping)
* Type-safe Transactions
* Supports database-generated columns
* Easy to use, Loom-ready API (no Futures or Effect Systems)
* Easy to define entities. Easy to implement DB support & codecs for custom types.
* Scales to complex SQL queries
* Specifications for building dynamic queries, such as table filters with pagination
* Supports high-performance [Seek pagination](https://blog.jooq.org/faster-sql-paging-with-jooq-using-the-seek-method/)
* Performant batch-queries

## Developing
The tests are written using TestContainers, which requires Docker be installed.

## Talks and Blogs

* Scala Days 2023: [slides](/Magnum-Slides-to-Share.pdf), [talk](https://www.youtube.com/watch?v=iKNRS5b1zAY)

## Frequently Asked Questions

#### Does Magnum support nested entities like:

```scala
@Table(H2DbType, SqlNameMapper.CamelToSnakeCase)
case class Company(
  name: String,
  address: Address,
  ) derives DbCodec

case class Address(
  street: String,
  city: String,
  zipCode: String,
  country: String
) derives DbCodec
```

NO; Magnum only supports deriving flat entity class structures. This keeps things simple and makes it obvious how the Scala entity class maps to the SQL table.

We may add support for SQL UDTs (user defined types) in the future; however at the time of writing, UDTs are not well-supported by JDBC drivers.

You could also express the above example using a foreign key to an Address table, like so:

```scala
@Table(H2DbType, SqlNameMapper.CamelToSnakeCase)
case class Company(
  name: String,
  addressId: AddressId,
) derives DbCodec

opaque type AddressId = Long
object AddressId:
  def apply(id: Long): AddressId = id
  extension (id: AddressId)
    def underlying: Long = id
  given DbCodec[AddressId] =
    DbCodec[Long].biMap(AddressId.apply, _.underlying)

@Table(H2DbType, SqlNameMapper.CamelToSnakeCase)
case class Address(
  @Id id: AddressId,
  street: String,
  city: String,
  zipCode: String,
  country: String
) derives DbCodec
```

#### UUID DbCodec doesn't work for my database

Some databases directly support the UUID type; these include Postgres, Clickhouse, and H2. When using the built-in `DbCodec[UUID]`, defined in `DbCodec.scala`, serialization and deserialization of `java.util.UUID` will work as expected.

Other databases like MySql, Oracle, and Sqlite, however, do not natively support UUID columns. Users have to choose an alternate datatype to store the UUID: most commonly `varchar(36)` or `binary(16)`. The JDBC drivers for these databases do not support direct serialization and deserialization of `java.util.UUID`, therefore the default `DbCodec[UUID]` will not be sufficient. Instead, import the appropriate codec from `com.augustnagro.magnum.UUIDCodec`. For example,

```scala
import com.augustnagro.magnum.*
import com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec
import java.util.UUID

@Table(MySqlDbType)
case class Person(@Id id: Long, name: String, tracking_id: Option[UUID]) derives DbCodec
```

## Todo
* JSON / XML support
* Support MSSql
* Cats Effect & ZIO modules
* Explicit Nulls support


================================================
FILE: build.sbt
================================================
ThisBuild / organization := "com.augustnagro"
ThisBuild / version := "2.0.0-SNAPSHOT"
ThisBuild / versionScheme := Some("early-semver")
ThisBuild / scalaVersion := "3.3.7"
ThisBuild / scalacOptions ++= Seq("-deprecation")
ThisBuild / homepage := Some(url("https://github.com/AugustNagro/magnum"))
ThisBuild / licenses += (
  "Apache-2.0",
  url(
    "https://opensource.org/licenses/Apache-2.0"
  )
)
ThisBuild / scmInfo := Some(
  ScmInfo(
    url("https://github.com/AugustNagro/magnum"),
    "scm:git:git@github.com:augustnagro/magnum.git",
    Some("scm:git:git@github.com:augustnagro/magnum.git")
  )
)
ThisBuild / developers := List(
  Developer(
    id = "augustnagro@gmail.com",
    name = "August Nagro",
    email = "augustnagro@gmail.com",
    url = url("https://augustnagro.com")
  )
)
ThisBuild / publishMavenStyle := true
ThisBuild / pomIncludeRepository := { _ => false }
ThisBuild / publishTo := {
  val centralSnapshots =
    "https://central.sonatype.com/repository/maven-snapshots/"
  if (isSnapshot.value) Some("central-snapshots" at centralSnapshots)
  else localStaging.value
}
ThisBuild / publish / skip := true

addCommandAlias("fmt", "scalafmtAll")

val testcontainersVersion = "0.44.1"
val circeVersion = "0.14.10"
val munitVersion = "1.1.0"
val postgresDriverVersion = "42.7.4"

lazy val root = project
  .in(file("."))
  .aggregate(magnum, magnumPg, magnumZio)

lazy val magnum = project
  .in(file("magnum"))
  .settings(
    publish / skip := false,
    libraryDependencies ++= Seq(
      "org.scalameta" %% "munit" % munitVersion % Test,
      "com.dimafeng" %% "testcontainers-scala-munit" % testcontainersVersion % Test,
      "com.dimafeng" %% "testcontainers-scala-postgresql" % testcontainersVersion % Test,
      "org.postgresql" % "postgresql" % postgresDriverVersion % Test,
      "com.dimafeng" %% "testcontainers-scala-mysql" % testcontainersVersion % Test,
      "com.mysql" % "mysql-connector-j" % "9.0.0" % Test,
      "com.h2database" % "h2" % "2.3.232" % Test,
      "com.dimafeng" %% "testcontainers-scala-oracle-xe" % testcontainersVersion % Test,
      "com.oracle.database.jdbc" % "ojdbc11" % "21.9.0.0" % Test,
      "com.dimafeng" %% "testcontainers-scala-clickhouse" % testcontainersVersion % Test,
      "com.clickhouse" % "clickhouse-jdbc" % "0.6.0" % Test classifier "http",
      "org.xerial" % "sqlite-jdbc" % "3.46.1.3" % Test
    )
  )

lazy val magnumPg = project
  .in(file("magnum-pg"))
  .dependsOn(magnum)
  .settings(
    publish / skip := false,
    libraryDependencies ++= Seq(
      "org.postgresql" % "postgresql" % postgresDriverVersion % "provided",
      "org.scalameta" %% "munit" % munitVersion % Test,
      "com.dimafeng" %% "testcontainers-scala-munit" % testcontainersVersion % Test,
      "com.dimafeng" %% "testcontainers-scala-postgresql" % testcontainersVersion % Test,
      "io.circe" %% "circe-core" % circeVersion % Test,
      "io.circe" %% "circe-parser" % circeVersion % Test,
      "org.scala-lang.modules" %% "scala-xml" % "2.3.0" % Test
    )
  )

lazy val magnumZio = project
  .in(file("magnum-zio"))
  .dependsOn(magnum)
  .settings(
    publish / skip := false,
    libraryDependencies ++= Seq(
      "dev.zio" %% "zio" % "2.1.24" % Provided,
      "org.scalameta" %% "munit" % munitVersion % Test,
      "com.dimafeng" %% "testcontainers-scala-munit" % testcontainersVersion % Test,
      "com.dimafeng" %% "testcontainers-scala-postgresql" % testcontainersVersion % Test,
      "org.postgresql" % "postgresql" % postgresDriverVersion % Test
    )
  )


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/BatchUpdateResult.scala
================================================
package com.augustnagro.magnum

import scala.util.boundary

/** The total number of rows updated, or SuccessNoInfo if unknown. */
enum BatchUpdateResult:
  case Success(rowsUpdated: Long)
  case SuccessNoInfo


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/ClickhouseDbType.scala
================================================
package com.augustnagro.magnum

import java.sql.{Connection, PreparedStatement, ResultSet, Statement}
import java.time.OffsetDateTime
import java.util.StringJoiner
import scala.collection.View
import scala.deriving.Mirror
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Using, boundary}

object ClickhouseDbType extends DbType:
  def buildRepoDefaults[EC, E, ID](
      tableNameSql: String,
      eElemNames: Seq[String],
      eElemNamesSql: Seq[String],
      eElemCodecs: Seq[DbCodec[?]],
      ecElemNames: Seq[String],
      ecElemNamesSql: Seq[String],
      idIndex: Int
  )(using
      eCodec: DbCodec[E],
      ecCodec: DbCodec[EC],
      idCodec: DbCodec[ID],
      eClassTag: ClassTag[E],
      ecClassTag: ClassTag[EC],
      idClassTag: ClassTag[ID]
  ): RepoDefaults[EC, E, ID] =
    require(
      eClassTag.runtimeClass == ecClassTag.runtimeClass,
      "ClickHouse does not support generated keys, so EC must equal E"
    )
    val idName = eElemNamesSql(idIndex)
    val selectKeys = eElemNamesSql.mkString(", ")
    val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")")

    val countSql = s"SELECT count(*) FROM $tableNameSql"
    val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long]
    val existsByIdSql =
      s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val findAllSql = s"SELECT $selectKeys FROM $tableNameSql"
    val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E]
    val findByIdSql =
      s"SELECT $selectKeys FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val deleteByIdSql =
      s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val truncateSql = s"TRUNCATE TABLE $tableNameSql"
    val truncateUpdate =
      Frag(truncateSql, Vector.empty, FragWriter.empty).update
    val insertSql =
      s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})"

    def idWriter(id: ID): FragWriter = (ps, pos) =>
      idCodec.writeSingle(id, ps, pos)
      pos + idCodec.cols.length

    new RepoDefaults[EC, E, ID]:
      def count(using con: DbCon): Long = countQuery.run().head

      def existsById(id: ID)(using DbCon): Boolean =
        Frag(existsByIdSql, IArray(id), idWriter(id))
          .query[Int]
          .run()
          .nonEmpty

      def findAll(using DbCon): Vector[E] = findAllQuery.run()

      def findAll(spec: Spec[E])(using DbCon): Vector[E] =
        SpecImpl.Default.findAll(spec, tableNameSql)

      def findById(id: ID)(using DbCon): Option[E] =
        Frag(findByIdSql, IArray(id), idWriter(id))
          .query[E]
          .run()
          .headOption

      def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] =
        throw UnsupportedOperationException()

      def delete(entity: E)(using DbCon): Unit =
        deleteById(
          entity
            .asInstanceOf[Product]
            .productElement(idIndex)
            .asInstanceOf[ID]
        )

      def deleteById(id: ID)(using DbCon): Unit =
        Frag(deleteByIdSql, IArray(id), idWriter(id)).update
          .run()

      def truncate()(using DbCon): Unit =
        truncateUpdate.run()

      def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult =
        deleteAllById(
          entities.map(e =>
            e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID]
          )
        )

      def deleteAllById(ids: Iterable[ID])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(deleteByIdSql, ids):
          Using(con.connection.prepareStatement(deleteByIdSql)): ps =>
            idCodec.write(ids, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insert(entityCreator: EC)(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreator):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.writeSingle(entityCreator, ps)
            timed(ps.executeUpdate())

      def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreators):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.write(entityCreators, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insertReturning(entityCreator: EC)(using con: DbCon): E =
        handleQuery(insertSql, entityCreator):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.writeSingle(entityCreator, ps)
            timed:
              ps.executeUpdate()
              entityCreator.asInstanceOf[E]

      def insertAllReturning(
          entityCreators: Iterable[EC]
      )(using con: DbCon): Vector[E] =
        handleQuery(insertSql, entityCreators):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.write(entityCreators, ps)
            timed:
              batchUpdateResult(ps.executeBatch())
              entityCreators.toVector.asInstanceOf[Vector[E]]

      def update(entity: E)(using DbCon): Unit =
        throw UnsupportedOperationException()

      def updateAll(entities: Iterable[E])(using
          con: DbCon
      ): BatchUpdateResult =
        throw UnsupportedOperationException()

    end new
  end buildRepoDefaults
end ClickhouseDbType


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/ColumnName.scala
================================================
package com.augustnagro.magnum

/** Represents an entity column. Can be interpolated in sql"" expressions */
class ColumnName(
    val scalaName: String,
    val sqlName: String,
    val queryRepr: String
) extends SqlLiteral


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/ColumnNames.scala
================================================
package com.augustnagro.magnum

/** A grouping of schema names, which may be interpolated in sql"" expressions.
  * @param queryRepr
  *   The query representation. For example, "myColA, myColB"
  * @param columnNames
  *   The column names.
  */
class ColumnNames(val queryRepr: String, val columnNames: IArray[ColumnName])
    extends SqlLiteral


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/DbCodec.scala
================================================
package com.augustnagro.magnum

import java.net.URL
import java.sql.{JDBCType, PreparedStatement, ResultSet, Types}
import java.time.{
  Instant,
  LocalDate,
  LocalDateTime,
  LocalTime,
  OffsetDateTime,
  ZoneId,
  ZoneOffset
}
import java.util.UUID
import scala.annotation.implicitNotFound
import scala.deriving.Mirror
import scala.compiletime.{
  constValue,
  constValueTuple,
  erasedValue,
  error,
  summonAll,
  summonFrom,
  summonInline
}
import scala.quoted.*
import scala.reflect.ClassTag
import scala.util.boundary

/** Typeclass for JDBC reading & writing.
  */
trait DbCodec[E]:
  self =>

  /** Syntax used when querying the db. For example,
    *
    * DbCodec[Int].queryRepr == "?"
    *
    * DbCodec[(String, Boolean)].queryRepr = "(?, ?)"
    *
    * case class User(id: Long, name: String) derives DbCodec
    * DbCodec[User].queryRepr = "? ?"
    */
  def queryRepr: String

  /** The `java.sql.Types` constant for every "?" in `queryRepr`. For mapping
    * database-specific types, Types.JAVA_OBJECT is recommended.
    */
  def cols: IArray[Int]

  /** Read an E from the ResultSet starting at position `pos` and ending after
    * reading `cols` number of columns. Make sure the ResultSet is in a valid
    * state (ie, ResultSet::next has been called).
    */
  def readSingle(resultSet: ResultSet, pos: Int): E

  /** Build an E from the ResultSet starting at position 1 and ending after
    * reading `cols` number of columns. Make sure the ResultSet is in a valid
    * state (ie, ResultSet::next has been called).
    */
  def readSingle(resultSet: ResultSet): E = readSingle(resultSet, 1)

  /** Read an Option[E] from the ResultSet starting at position `pos` and ending
    * after reading `cols` number of columns. Make sure the ResultSet is in a
    * valid state (ie, ResultSet::next has been called).
    */
  def readSingleOption(resultSet: ResultSet, pos: Int): Option[E]

  /** Build every row in the ResultSet into a sequence of E. The ResultSet
    * should be in its initial position before calling (ie, ResultSet::next not
    * called).
    */
  def read(resultSet: ResultSet): Vector[E] =
    val res = Vector.newBuilder[E]
    while resultSet.next() do res += readSingle(resultSet)
    res.result()

  /** Write the entity to the PreparedStatement starting at position `pos` */
  def writeSingle(entity: E, ps: PreparedStatement, pos: Int): Unit

  /** Write the entity to the resultSet starting at position 1 */
  def writeSingle(entity: E, ps: PreparedStatement): Unit =
    writeSingle(entity, ps, 1)

  /** Writes multiple entities to the preparedStatement via
    * PreparedStatement::addBatch
    */
  def write(entities: Iterable[E], ps: PreparedStatement): Unit =
    for e <- entities do
      writeSingle(e, ps)
      ps.addBatch()

  def biMap[E2](to: E => E2, from: E2 => E): DbCodec[E2] =
    new DbCodec[E2]:
      val cols: IArray[Int] = self.cols
      def readSingle(rs: ResultSet, pos: Int): E2 =
        to(self.readSingle(rs, pos))
      def readSingleOption(rs: ResultSet, pos: Int): Option[E2] =
        self.readSingleOption(rs, pos).map(to)
      def writeSingle(e: E2, ps: PreparedStatement, pos: Int): Unit =
        self.writeSingle(from(e), ps, pos)
      def queryRepr: String = self.queryRepr
end DbCodec

object DbCodec:

  inline def apply[E](using codec: DbCodec[E]): DbCodec[E] = codec

  given AnyCodec: DbCodec[Any] with
    val cols: IArray[Int] = IArray(Types.JAVA_OBJECT)
    def readSingle(rs: ResultSet, pos: Int): Any = rs.getObject(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Any] =
      Option(rs.getObject(pos))
    def writeSingle(a: Any, ps: PreparedStatement, pos: Int): Unit =
      ps.setObject(pos, a)
    def queryRepr: String = "?"

  given StringCodec: DbCodec[String] with
    val cols: IArray[Int] = IArray(Types.VARCHAR)
    def readSingle(rs: ResultSet, pos: Int): String = rs.getString(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[String] =
      Option(rs.getString(pos))
    def writeSingle(s: String, ps: PreparedStatement, pos: Int): Unit =
      ps.setString(pos, s)
    def queryRepr: String = "?"

  given BooleanCodec: DbCodec[Boolean] with
    val cols: IArray[Int] = IArray(Types.BOOLEAN)
    def readSingle(rs: ResultSet, pos: Int): Boolean = rs.getBoolean(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Boolean] =
      readOptImpl(this, rs, pos)
    def writeSingle(b: Boolean, ps: PreparedStatement, pos: Int): Unit =
      ps.setBoolean(pos, b)
    def queryRepr: String = "?"

  given ByteCodec: DbCodec[Byte] with
    val cols: IArray[Int] = IArray(Types.TINYINT)
    def readSingle(rs: ResultSet, pos: Int): Byte = rs.getByte(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Byte] =
      readOptImpl(this, rs, pos)
    def writeSingle(b: Byte, ps: PreparedStatement, pos: Int): Unit =
      ps.setByte(pos, b)
    def queryRepr: String = "?"

  given ShortCodec: DbCodec[Short] with
    val cols: IArray[Int] = IArray(Types.SMALLINT)
    def readSingle(rs: ResultSet, pos: Int): Short = rs.getShort(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Short] =
      readOptImpl(this, rs, pos)
    def writeSingle(s: Short, ps: PreparedStatement, pos: Int): Unit =
      ps.setShort(pos, s)
    def queryRepr: String = "?"

  given IntCodec: DbCodec[Int] with
    val cols: IArray[Int] = IArray(Types.INTEGER)
    def readSingle(rs: ResultSet, pos: Int): Int = rs.getInt(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Int] =
      readOptImpl(this, rs, pos)
    def writeSingle(i: Int, ps: PreparedStatement, pos: Int): Unit =
      ps.setInt(pos, i)
    def queryRepr: String = "?"

  given LongCodec: DbCodec[Long] with
    val cols: IArray[Int] = IArray(Types.BIGINT)
    def readSingle(rs: ResultSet, pos: Int): Long = rs.getLong(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Long] =
      readOptImpl(this, rs, pos)
    def writeSingle(l: Long, ps: PreparedStatement, pos: Int): Unit =
      ps.setLong(pos, l)
    def queryRepr: String = "?"

  given FloatCodec: DbCodec[Float] with
    val cols: IArray[Int] = IArray(Types.REAL)
    def readSingle(rs: ResultSet, pos: Int): Float = rs.getFloat(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Float] =
      readOptImpl(this, rs, pos)
    def writeSingle(f: Float, ps: PreparedStatement, pos: Int): Unit =
      ps.setFloat(pos, f)
    def queryRepr: String = "?"

  given DoubleCodec: DbCodec[Double] with
    val cols: IArray[Int] = IArray(Types.DOUBLE)
    def readSingle(rs: ResultSet, pos: Int): Double = rs.getDouble(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Double] =
      readOptImpl(this, rs, pos)
    def writeSingle(d: Double, ps: PreparedStatement, pos: Int): Unit =
      ps.setDouble(pos, d)
    def queryRepr: String = "?"

  given ByteArrayCodec: DbCodec[Array[Byte]] with
    val cols: IArray[Int] = IArray(Types.BINARY)
    def readSingle(rs: ResultSet, pos: Int): Array[Byte] = rs.getBytes(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Array[Byte]] =
      Option(rs.getBytes(pos))
    def writeSingle(bytes: Array[Byte], ps: PreparedStatement, pos: Int): Unit =
      ps.setBytes(pos, bytes)
    def queryRepr: String = "?"

  given ByteIArrayCodec: DbCodec[IArray[Byte]] with
    val cols: IArray[Int] = IArray(Types.BINARY)
    def readSingle(rs: ResultSet, pos: Int): IArray[Byte] =
      IArray.unsafeFromArray(rs.getBytes(pos))
    def readSingleOption(rs: ResultSet, pos: Int): Option[IArray[Byte]] =
      ByteArrayCodec.readSingleOption(rs, pos).map(IArray.unsafeFromArray)
    def writeSingle(
        bytes: IArray[Byte],
        ps: PreparedStatement,
        pos: Int
    ): Unit =
      ps.setBytes(pos, IArray.genericWrapArray(bytes).toArray)
    def queryRepr: String = "?"

  given SqlDateCodec: DbCodec[java.sql.Date] with
    val cols: IArray[Int] = IArray(Types.DATE)
    def readSingle(rs: ResultSet, pos: Int): java.sql.Date = rs.getDate(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Date] =
      Option(rs.getDate(pos))
    def writeSingle(
        date: java.sql.Date,
        ps: PreparedStatement,
        pos: Int
    ): Unit = ps.setDate(pos, date)
    def queryRepr: String = "?"

  given SqlTimeCodec: DbCodec[java.sql.Time] with
    val cols: IArray[Int] = IArray(Types.TIME)
    def readSingle(rs: ResultSet, pos: Int): java.sql.Time =
      rs.getTime(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Time] =
      Option(rs.getTime(pos))
    def writeSingle(
        time: java.sql.Time,
        ps: PreparedStatement,
        pos: Int
    ): Unit = ps.setTime(pos, time)
    def queryRepr: String = "?"

  given SqlTimestampCodec: DbCodec[java.sql.Timestamp] with
    val cols: IArray[Int] = IArray(Types.TIMESTAMP)
    def readSingle(rs: ResultSet, pos: Int): java.sql.Timestamp =
      rs.getTimestamp(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Timestamp] =
      Option(rs.getTimestamp(pos))
    def writeSingle(
        t: java.sql.Timestamp,
        ps: PreparedStatement,
        pos: Int
    ): Unit = ps.setTimestamp(pos, t)
    def queryRepr: String = "?"

  given OffsetDateTimeCodec: DbCodec[OffsetDateTime] with
    val cols: IArray[Int] = IArray(Types.TIMESTAMP_WITH_TIMEZONE)
    def readSingle(rs: ResultSet, pos: Int): OffsetDateTime =
      rs.getObject(pos, classOf[OffsetDateTime])
    def readSingleOption(rs: ResultSet, pos: Int): Option[OffsetDateTime] =
      readOptImpl(this, rs, pos)
    def writeSingle(dt: OffsetDateTime, ps: PreparedStatement, pos: Int): Unit =
      ps.setObject(pos, dt)
    def queryRepr: String = "?"

  given InstantCodec: DbCodec[Instant] =
    OffsetDateTimeCodec.biMap(_.toInstant, _.atOffset(ZoneOffset.UTC))

  given LocalDateCodec: DbCodec[LocalDate] with
    val cols: IArray[Int] = IArray(Types.DATE)
    def readSingle(rs: ResultSet, pos: Int): LocalDate =
      rs.getObject(pos, classOf[LocalDate])
    def readSingleOption(rs: ResultSet, pos: Int): Option[LocalDate] =
      readOptImpl(this, rs, pos)
    def writeSingle(ld: LocalDate, ps: PreparedStatement, pos: Int): Unit =
      ps.setObject(pos, ld)
    def queryRepr: String = "?"

  given LocalTimeCodec: DbCodec[LocalTime] with
    val cols: IArray[Int] = IArray(Types.TIME)
    def readSingle(rs: ResultSet, pos: Int): LocalTime =
      rs.getObject(pos, classOf[LocalTime])
    def readSingleOption(rs: ResultSet, pos: Int): Option[LocalTime] =
      readOptImpl(this, rs, pos)
    def writeSingle(lt: LocalTime, ps: PreparedStatement, pos: Int): Unit =
      ps.setObject(pos, lt)
    def queryRepr: String = "?"

  given LocalDateTimeCodec: DbCodec[LocalDateTime] with
    val cols: IArray[Int] = IArray(Types.TIMESTAMP)
    def readSingle(rs: ResultSet, pos: Int): LocalDateTime =
      rs.getObject(pos, classOf[LocalDateTime])
    def readSingleOption(rs: ResultSet, pos: Int): Option[LocalDateTime] =
      readOptImpl(this, rs, pos)
    def writeSingle(ldt: LocalDateTime, ps: PreparedStatement, pos: Int): Unit =
      ps.setObject(pos, ldt)
    def queryRepr: String = "?"

  given ZoneIdCodec: DbCodec[ZoneId] =
    StringCodec.biMap(ZoneId.of, _.toString)

  given SqlRefCodec: DbCodec[java.sql.Ref] with
    val cols: IArray[Int] = IArray(Types.REF)
    def readSingle(rs: ResultSet, pos: Int): java.sql.Ref = rs.getRef(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Ref] =
      readOptImpl(this, rs, pos)
    def writeSingle(ref: java.sql.Ref, ps: PreparedStatement, pos: Int): Unit =
      ps.setRef(pos, ref)
    def queryRepr: String = "?"

  given SqlBlobCodec: DbCodec[java.sql.Blob] with
    val cols: IArray[Int] = IArray(Types.BLOB)
    def readSingle(rs: ResultSet, pos: Int): java.sql.Blob = rs.getBlob(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Blob] =
      readOptImpl(this, rs, pos)
    def writeSingle(b: java.sql.Blob, ps: PreparedStatement, pos: Int): Unit =
      ps.setBlob(pos, b)
    def queryRepr: String = "?"

  given SqlClobCodec: DbCodec[java.sql.Clob] with
    val cols: IArray[Int] = IArray(Types.CLOB)
    def readSingle(rs: ResultSet, pos: Int): java.sql.Clob = rs.getClob(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.Clob] =
      readOptImpl(this, rs, pos)
    def writeSingle(c: java.sql.Clob, ps: PreparedStatement, pos: Int): Unit =
      ps.setClob(pos, c)
    def queryRepr: String = "?"

  given URLCodec: DbCodec[URL] with
    val cols: IArray[Int] = IArray(Types.VARCHAR)
    def readSingle(rs: ResultSet, pos: Int): URL = rs.getURL(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[URL] =
      Option(rs.getURL(pos))
    def writeSingle(url: URL, ps: PreparedStatement, pos: Int): Unit =
      ps.setURL(pos, url)
    def queryRepr: String = "?"

  given RowIdCodec: DbCodec[java.sql.RowId] with
    val cols: IArray[Int] = IArray(Types.ROWID)
    def readSingle(rs: ResultSet, pos: Int): java.sql.RowId = rs.getRowId(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.RowId] =
      Option(rs.getRowId(pos))
    def writeSingle(
        rowId: java.sql.RowId,
        ps: PreparedStatement,
        pos: Int
    ): Unit =
      ps.setRowId(pos, rowId)
    def queryRepr: String = "?"

  given SqlNClobCodec: DbCodec[java.sql.NClob] with
    val cols: IArray[Int] = IArray(Types.NCLOB)
    def readSingle(rs: ResultSet, pos: Int): java.sql.NClob = rs.getNClob(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.NClob] =
      readOptImpl(this, rs, pos)
    def writeSingle(nc: java.sql.NClob, ps: PreparedStatement, pos: Int): Unit =
      ps.setNClob(pos, nc)
    def queryRepr: String = "?"

  given SqlXmlCodec: DbCodec[java.sql.SQLXML] with
    val cols: IArray[Int] = IArray(Types.SQLXML)
    def readSingle(rs: ResultSet, pos: Int): java.sql.SQLXML = rs.getSQLXML(pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[java.sql.SQLXML] =
      readOptImpl(this, rs, pos)
    def writeSingle(s: java.sql.SQLXML, ps: PreparedStatement, pos: Int): Unit =
      ps.setSQLXML(pos, s)
    def queryRepr: String = "?"

  given JavaBigDecimalCodec: DbCodec[java.math.BigDecimal] with
    val cols: IArray[Int] = IArray(Types.NUMERIC)
    def readSingle(rs: ResultSet, pos: Int): java.math.BigDecimal =
      rs.getBigDecimal(pos)
    def readSingleOption(
        rs: ResultSet,
        pos: Int
    ): Option[java.math.BigDecimal] =
      Option(rs.getBigDecimal(pos))
    def writeSingle(
        bd: java.math.BigDecimal,
        ps: PreparedStatement,
        pos: Int
    ): Unit =
      ps.setBigDecimal(pos, bd)
    def queryRepr: String = "?"

  given ScalaBigDecimalCodec: DbCodec[scala.math.BigDecimal] with
    val cols: IArray[Int] = IArray(Types.NUMERIC)
    def readSingle(rs: ResultSet, pos: Int): scala.math.BigDecimal =
      scala.math.BigDecimal(rs.getBigDecimal(pos))
    def readSingleOption(rs: ResultSet, pos: Int): Option[BigDecimal] =
      JavaBigDecimalCodec
        .readSingleOption(rs, pos)
        .map(scala.math.BigDecimal.apply)
    def writeSingle(
        bd: scala.math.BigDecimal,
        ps: PreparedStatement,
        pos: Int
    ): Unit =
      ps.setBigDecimal(pos, bd.underlying)
    def queryRepr: String = "?"

  given UUIDCodec: DbCodec[UUID] with
    def queryRepr: String = "?"
    val cols: IArray[Int] = IArray(Types.OTHER)
    def readSingle(rs: ResultSet, pos: Int): UUID =
      rs.getObject(pos, classOf[UUID])
    def readSingleOption(rs: ResultSet, pos: Int): Option[UUID] =
      val res = rs.getObject(pos, classOf[UUID])
      if rs.wasNull then None
      else Some(res)
    def writeSingle(entity: UUID, ps: PreparedStatement, pos: Int): Unit =
      ps.setObject(pos, entity)

  given OptionCodec[A](using codec: DbCodec[A]): DbCodec[Option[A]] with
    def cols: IArray[Int] = codec.cols
    def readSingle(rs: ResultSet, pos: Int): Option[A] =
      codec.readSingleOption(rs, pos)
    def readSingleOption(rs: ResultSet, pos: Int): Option[Option[A]] =
      Some(codec.readSingleOption(rs, pos))
    def writeSingle(opt: Option[A], ps: PreparedStatement, pos: Int): Unit =
      opt match
        case Some(a) =>
          codec.writeSingle(a, ps, pos)
        case None =>
          for i <- cols.indices do ps.setNull(pos + i, cols(i))
    def queryRepr: String = codec.queryRepr

  given SomeCodec[A](using codec: DbCodec[A]): DbCodec[Some[A]] with
    def cols: IArray[Int] = codec.cols
    def readSingle(rs: ResultSet, pos: Int): Some[A] =
      Some(codec.readSingle(rs, pos))
    def readSingleOption(rs: ResultSet, pos: Int): Option[Some[A]] =
      codec.readSingleOption(rs, pos).map(Some.apply)
    def writeSingle(s: Some[A], ps: PreparedStatement, pos: Int): Unit =
      codec.writeSingle(s.get, ps, pos)
    def queryRepr: String = codec.queryRepr

  given Tuple2Codec[A, B](using
      aCodec: DbCodec[A],
      bCodec: DbCodec[B]
  ): DbCodec[(A, B)] with
    val cols: IArray[Int] = IArray.concat(aCodec.cols, bCodec.cols)
    def readSingle(rs: ResultSet, pos: Int): (A, B) = (
      aCodec.readSingle(rs, pos),
      bCodec.readSingle(rs, pos + aCodec.cols.length)
    )
    def readSingleOption(rs: ResultSet, pos: Int): Option[(A, B)] =
      val a = aCodec.readSingleOption(rs, pos)
      val b = bCodec.readSingleOption(rs, pos + aCodec.cols.length)
      (a, b) match
        case (Some(a), Some(b)) => Some((a, b))
        case _                  => None
    def writeSingle(tup: (A, B), ps: PreparedStatement, pos: Int): Unit =
      aCodec.writeSingle(tup._1, ps, pos)
      bCodec.writeSingle(tup._2, ps, pos + aCodec.cols.length)
    val queryRepr: String = s"(${aCodec.queryRepr}, ${bCodec.queryRepr})"

  given Tuple3Codec[A, B, C](using
      aCodec: DbCodec[A],
      bCodec: DbCodec[B],
      cCodec: DbCodec[C]
  ): DbCodec[(A, B, C)] with
    val cols: IArray[Int] =
      IArray.concat(aCodec.cols, bCodec.cols, cCodec.cols)
    def readSingle(rs: ResultSet, pos: Int): (A, B, C) =
      var i = pos
      val a = aCodec.readSingle(rs, i)
      i += aCodec.cols.length
      val b = bCodec.readSingle(rs, i)
      i += bCodec.cols.length
      val c = cCodec.readSingle(rs, i)
      (a, b, c)
    def readSingleOption(rs: ResultSet, pos: Int): Option[(A, B, C)] =
      var i = pos
      val a = aCodec.readSingleOption(rs, i)
      i += aCodec.cols.length
      val b = bCodec.readSingleOption(rs, i)
      i += bCodec.cols.length
      val c = cCodec.readSingleOption(rs, i)
      (a, b, c) match
        case (Some(a), Some(b), Some(c)) => Some((a, b, c))
        case _                           => None
    def writeSingle(tup: (A, B, C), ps: PreparedStatement, pos: Int): Unit =
      var i = pos
      aCodec.writeSingle(tup._1, ps, i)
      i += aCodec.cols.length
      bCodec.writeSingle(tup._2, ps, i)
      i += bCodec.cols.length
      cCodec.writeSingle(tup._3, ps, i)
    val queryRepr: String =
      s"(${aCodec.queryRepr}, ${bCodec.queryRepr}, ${cCodec.queryRepr})"
  end Tuple3Codec

  given Tuple4Codec[A, B, C, D](using
      aCodec: DbCodec[A],
      bCodec: DbCodec[B],
      cCodec: DbCodec[C],
      dCodec: DbCodec[D]
  ): DbCodec[(A, B, C, D)] with
    val cols: IArray[Int] =
      IArray.concat(aCodec.cols, bCodec.cols, cCodec.cols, dCodec.cols)
    def readSingle(rs: ResultSet, pos: Int): (A, B, C, D) =
      var i = pos
      val a = aCodec.readSingle(rs, i)
      i += aCodec.cols.length
      val b = bCodec.readSingle(rs, i)
      i += bCodec.cols.length
      val c = cCodec.readSingle(rs, i)
      i += cCodec.cols.length
      val d = dCodec.readSingle(rs, i)
      (a, b, c, d)
    def readSingleOption(rs: ResultSet, pos: Int): Option[(A, B, C, D)] =
      var i = pos
      val a = aCodec.readSingleOption(rs, i)
      i += aCodec.cols.length
      val b = bCodec.readSingleOption(rs, i)
      i += bCodec.cols.length
      val c = cCodec.readSingleOption(rs, i)
      i += cCodec.cols.length
      val d = dCodec.readSingleOption(rs, i)
      (a, b, c, d) match
        case (Some(a), Some(b), Some(c), Some(d)) => Some((a, b, c, d))
        case _                                    => None
    def writeSingle(tup: (A, B, C, D), ps: PreparedStatement, pos: Int): Unit =
      var i = pos
      aCodec.writeSingle(tup._1, ps, i)
      i += aCodec.cols.length
      bCodec.writeSingle(tup._2, ps, i)
      i += bCodec.cols.length
      cCodec.writeSingle(tup._3, ps, i)
      i += cCodec.cols.length
      dCodec.writeSingle(tup._4, ps, i)
    val queryRepr: String =
      s"(${aCodec.queryRepr}, ${bCodec.queryRepr}, ${cCodec.queryRepr}, ${dCodec.queryRepr})"
  end Tuple4Codec

  inline given TupleNCodec[T <: Tuple]: DbCodec[T] = ${ tupleNCodecImpl[T] }

  private def codecExprs[T <: Tuple: Type](
      res: Vector[Expr[DbCodec[?]]] = Vector.empty
  )(using Quotes): Expr[IArray[DbCodec[?]]] =
    import quotes.reflect.*
    Type.of[T] match
      case '[EmptyTuple] => '{ IArray.from(${ Expr.ofSeq(res) }) }
      case '[t *: ts] =>
        val tCodec = Expr.summon[DbCodec[t]].getOrElse {
          report.errorAndAbort(s"No DbCodec found for type ${Type.show[t]}")
        }
        codecExprs[ts](res :+ tCodec)

  def tupleNCodecImpl[T <: Tuple: Type](using Quotes): Expr[DbCodec[T]] =
    import quotes.reflect.*
    Type.of[T] match
      case '[EmptyTuple] =>
        report.errorAndAbort("Cannot derive DbCodec for EmptyTuple")
      case '[t *: ts] =>
        val tCodecsExpr = codecExprs[t *: ts]()
        '{
          new DbCodec[t *: ts] {
            val tCodecs = ${ tCodecsExpr }
            val cols: IArray[Int] =
              tCodecs.flatMap(codec => codec.cols)

            def readSingle(rs: ResultSet, pos: Int): t *: ts =
              val tupleSize = constValue[Tuple.Size[t *: ts]]
              val result = Array.ofDim[Any](tupleSize)
              var tupleIdx = 0
              var psIdx = pos
              while tupleIdx < tupleSize do
                val codec = tCodecs(tupleIdx)
                result(tupleIdx) = codec.readSingle(rs, psIdx)
                tupleIdx += 1
                psIdx += codec.cols.length
              Tuple.fromArray(result).asInstanceOf[t *: ts]

            def readSingleOption(rs: ResultSet, pos: Int): Option[t *: ts] =
              boundary:
                val tupleSize = constValue[Tuple.Size[t *: ts]]
                val res = Array.ofDim[Any](tupleSize)
                var tupleIdx = 0
                var psIdx = pos
                while tupleIdx < tupleSize do
                  val codec = tCodecs(tupleIdx)
                  codec.readSingleOption(rs, psIdx) match
                    case Some(value) => res(tupleIdx) = value
                    case None        => boundary.break(Option.empty)
                  tupleIdx += 1
                  psIdx += codec.cols.length
                Some(Tuple.fromArray(res)).asInstanceOf[Option[t *: ts]]

            def writeSingle(e: t *: ts, ps: PreparedStatement, pos: Int): Unit =
              val tupleSize = constValue[Tuple.Size[t *: ts]]
              var tupleIdx = 0
              var psIdx = pos
              while tupleIdx < tupleSize do
                val codec = tCodecs(tupleIdx)
                codec
                  .asInstanceOf[DbCodec[Any]]
                  .writeSingle(e.productElement(tupleIdx), ps, psIdx)
                tupleIdx += 1
                psIdx += codec.cols.length

            val queryRepr: String =
              tCodecs.map(_.queryRepr).mkString("(", ", ", ")")
          }.asInstanceOf[DbCodec[T]]
        }
    end match
  end tupleNCodecImpl

  private inline def readOptImpl[A](
      codec: DbCodec[A],
      resultSet: ResultSet,
      pos: Int
  ): Option[A] =
    val res = codec.readSingle(resultSet, pos)
    if resultSet.wasNull then None
    else Some(res)

  inline def derived[E: Mirror.Of]: DbCodec[E] =
    ${ dbCodecImpl[E] }

  private def dbCodecImpl[E: Type](using Quotes): Expr[DbCodec[E]] =
    import quotes.reflect.*
    val mirror = Expr.summon[Mirror.Of[E]].getOrElse {
      report.errorAndAbort(
        "Can only derive DbCodec for case classes, sealed traits or enums (products and sums)."
      )
    }
    mirror match
      case '{
            $mp: Mirror.ProductOf[E] {
              type MirroredElemTypes = mets
            }
          } =>
        val colsExpr = buildColsExpr[mets]()
        '{
          new DbCodec[E] {
            val cols: IArray[Int] = $colsExpr
            def readSingle(rs: ResultSet, pos: Int): E =
              ${
                productReadSingle[E, mets]('{ rs }, mp, Vector.empty, '{ pos })
              }
            def readSingleOption(rs: ResultSet, pos: Int): Option[E] =
              ${
                productReadOption[E, mets]('{ rs }, mp, Vector.empty, '{ pos })
              }
            def writeSingle(e: E, ps: PreparedStatement, pos: Int): Unit =
              ${
                productWriteSingle[E, mets]('{ e }, '{ ps }, '{ pos }, '{ 0 })
              }
            val queryRepr: String = ${ productQueryRepr[mets]() }
          }
        }
      case '{
            $ms: Mirror.SumOf[E] {
              type MirroredElemTypes = mets
              type MirroredElemLabels = mels
              type MirroredLabel = mel
            }
          } =>
        val nameMapExpr = DerivingUtil.buildSqlNameMapForEnum[E, mels, mets]
        val melExpr = Expr(Type.valueOfConstant[mel].get.toString)
        '{
          new DbCodec[E] {
            val nameMap: Seq[(String, E)] = $nameMapExpr
            val cols: IArray[Int] = IArray(Types.VARCHAR)
            def readSingle(rs: ResultSet, pos: Int): E =
              val str = rs.getString(pos)
              nameMap.find((name, _) => name == str) match
                case Some((_, v)) => v
                case None =>
                  throw IllegalArgumentException(
                    str + " not convertible to " + $melExpr
                  )
            def readSingleOption(rs: ResultSet, pos: Int): Option[E] =
              Option(rs.getString(pos)).map(str =>
                nameMap.find((name, _) => name == str) match
                  case Some((_, v)) => v
                  case None =>
                    throw IllegalArgumentException(
                      str + " not convertible to " + $melExpr
                    )
              )
            def writeSingle(entity: E, ps: PreparedStatement, pos: Int): Unit =
              nameMap.find((_, v) => v == entity) match
                case Some((k, _)) => ps.setString(pos, k)
                case None =>
                  throw IllegalArgumentException(
                    entity.toString + " not convertible to " + $melExpr
                  )
            def queryRepr: String = "?"
          }
        }
    end match
  end dbCodecImpl

  private def productQueryRepr[Mets: Type](
      elemReprs: Vector[Expr[String]] = Vector.empty
  )(using Quotes): Expr[String] =
    import quotes.reflect.*
    Type.of[Mets] match
      case '[met *: metTail] =>
        Expr.summon[DbCodec[met]] match
          case Some(codec) =>
            productQueryRepr[metTail](elemReprs :+ '{ $codec.queryRepr })
          case None =>
            productQueryRepr[metTail](elemReprs :+ '{ "?" })
      case '[EmptyTuple] =>
        val seqExpr = Expr.ofSeq(elemReprs)
        '{ $seqExpr.mkString(", ") }

  private def buildColsExpr[Mets: Type](
      res: Vector[Expr[IArray[Int]]] = Vector.empty
  )(using Quotes): Expr[IArray[Int]] =
    import quotes.reflect.*
    Type.of[Mets] match
      case '[met *: metTail] =>
        val metCodec = Expr.summon[DbCodec[met]].getOrElse {
          val metType = TypeRepr.of[met].show
          report.errorAndAbort(
            s"Cannot find a DbCodec instance for $metType! Provide one or derive it."
          )
        }
        val newCols = '{ $metCodec.cols }
        buildColsExpr[metTail](res :+ newCols)
      case '[EmptyTuple] =>
        '{
          val iArrays: Seq[IArray[Int]] = ${ Expr.ofSeq(res) }
          IArray.concat(iArrays*)
        }

  private def productReadSingle[E: Type, Mets: Type](
      rs: Expr[ResultSet],
      m: Expr[Mirror.ProductOf[E]],
      res: Vector[Expr[Any]],
      pos: Expr[Int]
  )(using Quotes): Expr[E] =
    import quotes.reflect.*
    Type.of[Mets] match
      case '[met *: metTail] =>
        Expr.summon[DbCodec[met]] match
          case Some(codecExpr) =>
            '{
              val posValue = $pos
              val codec = $codecExpr
              val metValue = codec.readSingle($rs, posValue)
              val newPos = posValue + codec.cols.length
              ${
                productReadSingle[E, metTail](
                  rs,
                  m,
                  res :+ '{ metValue },
                  '{ newPos }
                )
              }
            }
          case None =>
            Expr.summon[ClassTag[met]] match
              case Some(clsTagExpr) =>
                report.info(
                  s"Could not find DbCodec for ${TypeRepr.of[met].show}. Defaulting to ResultSet::[get|set]Object"
                )
                '{
                  val posValue = $pos
                  val metValue = $rs.getObject(
                    posValue,
                    $clsTagExpr.runtimeClass.asInstanceOf[Class[met]]
                  )
                  val newPos = posValue + 1
                  ${
                    productReadSingle[E, metTail](
                      rs,
                      m,
                      res :+ '{ metValue },
                      '{ newPos }
                    )
                  }
                }
              case None =>
                report.errorAndAbort(
                  "Could not find DbCodec or ClassTag for ${TypeRepr.of[met].show}"
                )
      case '[EmptyTuple] =>
        '{
          val product = ${ Expr.ofTupleFromSeq(res) }
          $m.fromProduct(product)
        }
    end match
  end productReadSingle

  private def productReadOption[E: Type, Mets: Type](
      rs: Expr[ResultSet],
      m: Expr[Mirror.ProductOf[E]],
      res: Vector[Expr[Any]],
      pos: Expr[Int]
  )(using Quotes): Expr[Option[E]] =
    import quotes.reflect.*
    Type.of[Mets] match
      case '[met *: metTail] =>
        Expr.summon[DbCodec[met]] match
          case Some(codecExpr) =>
            '{
              val posValue = $pos
              val codec = $codecExpr
              codec.readSingleOption($rs, posValue) match
                case Some(metValue) =>
                  val newPos = posValue + codec.cols.length
                  ${
                    productReadOption[E, metTail](
                      rs,
                      m,
                      res :+ '{ metValue },
                      '{ newPos }
                    )
                  }
                case None => None
            }
          case None =>
            Expr.summon[ClassTag[met]] match
              case Some(clsTagExpr) =>
                report.info(
                  s"Could not find DbCodec for ${TypeRepr.of[met].show}. Defaulting to ResultSet::[get|set]Object"
                )
                '{
                  val posValue = $pos
                  val metValue = $rs.getObject(
                    posValue,
                    $clsTagExpr.runtimeClass.asInstanceOf[Class[met]]
                  )
                  if $rs.wasNull then None
                  else
                    val newPos = posValue + 1
                    ${
                      productReadOption[E, metTail](
                        rs,
                        m,
                        res :+ '{ metValue },
                        '{ newPos }
                      )
                    }
                }
              case None =>
                report.errorAndAbort(
                  "Could not find DbCodec or ClassTag for ${TypeRepr.of[met].show}"
                )
      case '[EmptyTuple] =>
        '{
          val product = ${ Expr.ofTupleFromSeq(res) }
          Some($m.fromProduct(product))
        }
    end match
  end productReadOption

  private def productWriteSingle[E: Type, Mets: Type](
      e: Expr[E],
      ps: Expr[PreparedStatement],
      pos: Expr[Int],
      i: Expr[Int]
  )(using Quotes): Expr[Unit] =
    import quotes.reflect.*
    Type.of[Mets] match
      case '[met *: metTail] =>
        Expr.summon[DbCodec[met]] match
          case Some(codecExpr) =>
            '{
              val iValue = $i
              val posValue = $pos
              val metValue = $e
                .asInstanceOf[Product]
                .productElement(iValue)
                .asInstanceOf[met]
              val codec = $codecExpr
              codec.writeSingle(metValue, $ps, posValue)
              val newPos = posValue + $codecExpr.cols.length
              val newI = iValue + 1
              ${ productWriteSingle[E, metTail](e, ps, '{ newPos }, '{ newI }) }
            }
          case None =>
            '{
              val iValue = $i
              val posValue = $pos
              val metValue = $e
                .asInstanceOf[Product]
                .productElement(iValue)
              $ps.setObject(posValue, metValue)
              val newPos = posValue + 1
              val newI = iValue + 1
              ${ productWriteSingle[E, metTail](e, ps, '{ newPos }, '{ newI }) }
            }
      case '[EmptyTuple] => '{}
    end match
  end productWriteSingle
end DbCodec


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/DbCon.scala
================================================
package com.augustnagro.magnum

import java.sql.Connection

/** Simple wrapper around java.sql.Connection. See
  * `com.augustnagro.magnum.connect` and `transact`
  */
class DbCon private[magnum] (
    val connection: Connection,
    val sqlLogger: SqlLogger
)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/DbTx.scala
================================================
package com.augustnagro.magnum

import java.sql.Connection
import scala.util.Using

/** Represents a transactional [[DbCon]]
  */
class DbTx private[magnum] (connection: Connection, sqlLogger: SqlLogger)
    extends DbCon(connection, sqlLogger)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/DbType.scala
================================================
package com.augustnagro.magnum

import scala.reflect.ClassTag
import scala.deriving.Mirror

/** Factory for Repo default methods */
trait DbType:
  def buildRepoDefaults[EC, E, ID](
      tableNameSql: String,
      eElemNames: Seq[String],
      eElemNamesSql: Seq[String],
      eElemCodecs: Seq[DbCodec[?]],
      ecElemNames: Seq[String],
      ecElemNamesSql: Seq[String],
      idIndex: Int
  )(using
      eCodec: DbCodec[E],
      ecCodec: DbCodec[EC],
      idCodec: DbCodec[ID],
      eClassTag: ClassTag[E],
      ecClassTag: ClassTag[EC],
      idClassTag: ClassTag[ID]
  ): RepoDefaults[EC, E, ID]


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/DerivingUtil.scala
================================================
package com.augustnagro.magnum

import scala.deriving.Mirror
import scala.compiletime.{
  constValue,
  constValueTuple,
  erasedValue,
  error,
  summonFrom,
  summonInline
}
import scala.quoted.*
import scala.reflect.ClassTag

/** Not useful for typical user code; provided to help implement custom DbCodecs
  * and associated typeclasses
  */
object DerivingUtil:
  /** For a Simple (non-ADT) enum type E, constructs a sequence of mappings from
    * sql string representation to enum value. For example,
    *
    * {{{
    *   @Table(PostgresDbType, SqlNameMapper.CamelToUpperSnakeCase)
    *   enum Color { case Red, @SqlName("greeeeeen") Green, Blue }
    * }}}
    *
    * Results in
    *
    * {{{
    *   Seq("Red" -> Color.Red, "greeeeeen" -> Color.Green, "Blue" -> Color.Blue)
    * }}}
    *
    * Will produce a compile error if the enum is not simple (non-adt).
    *
    * @tparam E
    *   the enum type, like Color
    * @tparam Mels
    *   enum Mirror's MirroredElemLabels
    * @tparam Mets
    *   enum Mirror's MirroredElemTypes
    */
  def buildSqlNameMapForEnum[
      E: Type,
      Mels: Type,
      Mets: Type
  ](using q: Quotes): Expr[Seq[(String, E)]] =
    import q.reflect.*
    val tableAnnot = TypeRepr.of[Table].typeSymbol
    val defaultNameMapper: Expr[SqlNameMapper] =
      TypeRepr
        .of[E]
        .typeSymbol
        .getAnnotation(tableAnnot) match
        case Some(term) =>
          val tableExpr = term.asExprOf[Table]
          '{ $tableExpr.nameMapper }
        case None =>
          '{ SqlNameMapper.SameCase }

    val sumValueExprs: Vector[Expr[E]] = sumValues[E, Mets]()
    val scalaNames = getScalaNames[Mels]()

    val sqlNameAnnot = TypeRepr.of[SqlName].typeSymbol
    val enumCaseSymbols = TypeRepr.of[E].typeSymbol.children

    val sqlNameExprs: Vector[Expr[(String, E)]] = scalaNames
      .zip(sumValueExprs)
      .map((scalaName, sumExpr) =>
        val nameAnnot = enumCaseSymbols
          .find(sym => sym.name == scalaName && sym.hasAnnotation(sqlNameAnnot))
          .flatMap(sym => sym.getAnnotation(sqlNameAnnot))
        nameAnnot match
          case Some(term) =>
            val sqlNameExpr: Expr[SqlName] = term.asExprOf[SqlName]
            '{ ($sqlNameExpr.name.toString, $sumExpr) }
          case None =>
            val scalaNameExpr = Expr(scalaName)
            '{ ($defaultNameMapper.toColumnName($scalaNameExpr), $sumExpr) }
      )
    Expr.ofSeq(sqlNameExprs)
  end buildSqlNameMapForEnum

  private def getScalaNames[Mels: Type](res: Vector[String] = Vector.empty)(
      using Quotes
  ): Vector[String] =
    import quotes.reflect.*
    Type.of[Mels] match
      case '[mel *: melTail] =>
        val melString = Type.valueOfConstant[mel].get.toString
        getScalaNames[melTail](res :+ melString)
      case '[EmptyTuple] => res

  private def sumValues[E: Type, Mets: Type](
      res: Vector[Expr[E]] = Vector.empty
  )(using Quotes): Vector[Expr[E]] =
    import quotes.reflect.*
    Type.of[Mets] match
      case '[met *: metTail] =>
        val expr = Expr.summon[Mirror.ProductOf[met]] match
          case Some(m) if isSingleton[met] =>
            '{ $m.fromProduct(EmptyTuple).asInstanceOf[E] }
          case _ =>
            report.errorAndAbort("Can only derive simple (non-adt) enums")
        sumValues[E, metTail](res :+ expr)
      case '[EmptyTuple] => res

  private def isSingleton[T: Type](using Quotes): Boolean =
    import quotes.reflect.*
    Expr.summon[Mirror.ProductOf[T]] match
      case Some('{
            $mp: Mirror.ProductOf[T] {
              type MirroredElemTypes = mets
            }
          }) =>
        tupleArity[mets]() == 0
      case _ => false

  private def tupleArity[T: Type](res: Int = 0)(using Quotes): Int =
    import quotes.reflect.*
    Type.of[T] match
      case '[x *: xs]    => tupleArity[xs](res + 1)
      case '[EmptyTuple] => res

  /** Finds the first SqlName annotation on type T */
  def sqlTableNameAnnot[T: Type](using Quotes): Option[Expr[SqlName]] =
    import quotes.reflect._
    val annot = TypeRepr.of[SqlName]
    TypeRepr
      .of[T]
      .typeSymbol
      .annotations
      .find(_.tpe =:= annot)
      .map(term => term.asExprOf[SqlName])

  /** Finds the first Table annotation on type T */
  def tableAnnot[T: Type](using Quotes): Option[Expr[Table]] =
    import quotes.reflect.*
    val annot = TypeRepr.of[Table]
    TypeRepr
      .of[T]
      .typeSymbol
      .annotations
      .find(_.tpe =:= annot)
      .map(term => term.asExprOf[Table])

end DerivingUtil


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Frag.scala
================================================
package com.augustnagro.magnum

import java.lang.System.Logger.Level
import java.sql.{PreparedStatement, ResultSet, Statement}
import scala.collection.immutable.ArraySeq
import scala.util.{Failure, Success, Using}

/** Sql fragment */
class Frag(
    val sqlString: String,
    val params: Seq[Any],
    val writer: FragWriter
):
  def query[E](using reader: DbCodec[E]): Query[E] = Query(this, reader)

  def update: Update = Update(this)

  /** For databases like Postgres that support RETURNING statements via
    * `getResultSet`
    */
  def returning[E](using reader: DbCodec[E]): Returning[E] =
    Returning(this, reader, Vector.empty)

  /** For databases that support RETURNING statements via `getGeneratedKeys`
    */
  def returningKeys[E](colName: String, xs: String*)(using
      reader: DbCodec[E]
  ): Returning[E] =
    Returning(this, reader, colName +: xs)

  /** For databases that support RETURNING statements via `getGeneratedKeys`
    */
  def returningKeys[E](colName: ColumnName, xs: ColumnName*)(using
      reader: DbCodec[E]
  ): Returning[E] =
    Returning(this, reader, (colName +: xs).map(_.queryRepr))

  /** For databases that support RETURNING statements via `getGeneratedKeys`
    */
  def returningKeys[E](colNames: ColumnNames)(using
      reader: DbCodec[E]
  ): Returning[E] =
    Returning(this, reader, colNames.columnNames.map(_.queryRepr))

  /** Strips leading whitespace characters followed by a specified char from the
    * beginning of each line in this {@link Frag} .
    *
    * This method is useful when you want to format SQL strings in a more
    * readable multi-line way within your code.
    *
    * @param marginChar
    *   the character that indicates the margin.
    * @return
    *   a new {@link Frag} instance with the modified `sqlString`.
    */
  def stripMargin(marginChar: Char): Frag =
    Frag(sqlString.stripMargin(marginChar), params, writer)

  /** Strips leading whitespace characters followed by a vertical bar (`|`) from
    * the beginning of each line in this {@link Frag} .
    *
    * This method is useful when you want to format SQL strings in a more
    * readable multi-line way within your code.
    *
    * @return
    *   a new {@link Frag} instance with the modified `sqlString`.
    */
  def stripMargin: Frag = stripMargin('|')

end Frag


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/FragWriter.scala
================================================
package com.augustnagro.magnum

import java.sql.PreparedStatement

trait FragWriter:
  /** Writes a Frag's values to `ps`, staring at postion `pos`. Returns the new
    * position.
    */
  def write(ps: PreparedStatement, pos: Int): Int

object FragWriter:
  val empty: FragWriter = (_, pos) => pos


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/H2DbType.scala
================================================
package com.augustnagro.magnum

import java.sql.{Connection, JDBCType, PreparedStatement, ResultSet, Statement}
import java.time.OffsetDateTime
import scala.collection.View
import scala.deriving.Mirror
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Using}

object H2DbType extends DbType:

  def buildRepoDefaults[EC, E, ID](
      tableNameSql: String,
      eElemNames: Seq[String],
      eElemNamesSql: Seq[String],
      eElemCodecs: Seq[DbCodec[?]],
      ecElemNames: Seq[String],
      ecElemNamesSql: Seq[String],
      idIndex: Int
  )(using
      eCodec: DbCodec[E],
      ecCodec: DbCodec[EC],
      idCodec: DbCodec[ID],
      eClassTag: ClassTag[E],
      ecClassTag: ClassTag[EC],
      idClassTag: ClassTag[ID]
  ): RepoDefaults[EC, E, ID] =
    val idName = eElemNamesSql(idIndex)
    val selectKeys = eElemNamesSql.mkString(", ")
    val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")")

    val updateKeys: String = eElemNamesSql
      .lazyZip(eElemCodecs)
      .map((sqlName, codec) => sqlName + " = " + codec.queryRepr)
      .patch(idIndex, Seq.empty, 1)
      .mkString(", ")

    val updateCodecs = eElemCodecs
      .patch(idIndex, Seq.empty, 1)
      .appended(idCodec)
      .asInstanceOf[Seq[DbCodec[Any]]]

    val insertGenKeys: Array[String] = Array.from(eElemNamesSql)

    val countSql = s"SELECT count(*) FROM $tableNameSql"
    val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long]
    val existsByIdSql =
      s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val findAllSql = s"SELECT * FROM $tableNameSql"
    val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E]
    val findByIdSql =
      s"SELECT * FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val findAllByIdSql = s"SELECT * FROM $tableNameSql WHERE $idName = ANY(?)"
    val deleteByIdSql =
      s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val truncateSql = s"TRUNCATE TABLE $tableNameSql"
    val truncateUpdate =
      Frag(truncateSql, Vector.empty, FragWriter.empty).update
    val insertSql =
      s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})"
    val updateSql =
      s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}"

    val compositeId = idCodec.cols.distinct.size != 1
    val idFirstTypeName = JDBCType.valueOf(idCodec.cols.head).getName

    def idWriter(id: ID): FragWriter = (ps, pos) =>
      idCodec.writeSingle(id, ps, pos)
      pos + idCodec.cols.length

    new RepoDefaults[EC, E, ID]:
      def count(using con: DbCon): Long = countQuery.run().head

      def existsById(id: ID)(using DbCon): Boolean =
        Frag(existsByIdSql, IArray(id), idWriter(id))
          .query[Int]
          .run()
          .nonEmpty

      def findAll(using DbCon): Vector[E] = findAllQuery.run()

      def findAll(spec: Spec[E])(using DbCon): Vector[E] =
        SpecImpl.Default.findAll(spec, tableNameSql)

      def findById(id: ID)(using DbCon): Option[E] =
        Frag(findByIdSql, IArray(id), idWriter(id))
          .query[E]
          .run()
          .headOption

      def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] =
        if compositeId then
          throw UnsupportedOperationException(
            "Composite ids unsupported for findAllById."
          )
        val idsArray = Array.from[Any](ids)
        Frag(
          findAllByIdSql,
          IArray(idsArray),
          (ps, pos) =>
            val sqlArray =
              ps.getConnection.createArrayOf(idFirstTypeName, idsArray)
            ps.setArray(pos, sqlArray)
            pos + 1
        ).query[E].run()
//        // h2 doesn't support setObject(..) with primitive arrays,
//        // so we need to convert to Array[Object]
//        val builder = Array.newBuilder[Object]
//        if ids.knownSize > -1 then builder.sizeHint(ids.knownSize)
//        for id <- ids do builder += id.asInstanceOf[Object]
//        Sql(findAllByIdSql, Vector(builder.result())).run

      def delete(entity: E)(using DbCon): Unit =
        deleteById(
          entity
            .asInstanceOf[Product]
            .productElement(idIndex)
            .asInstanceOf[ID]
        )

      def deleteById(id: ID)(using DbCon): Unit =
        Frag(deleteByIdSql, IArray(id), idWriter(id)).update.run()

      def truncate()(using DbCon): Unit = truncateUpdate.run()

      def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult =
        deleteAllById(
          entities.map(e =>
            e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID]
          )
        )

      def deleteAllById(ids: Iterable[ID])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(deleteByIdSql, ids):
          Using(con.connection.prepareStatement(deleteByIdSql)): ps =>
            idCodec.write(ids, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insert(entityCreator: EC)(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreator):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.writeSingle(entityCreator, ps)
            timed(ps.executeUpdate())

      def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreators):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.write(entityCreators, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insertReturning(entityCreator: EC)(using con: DbCon): E =
        handleQuery(insertSql, entityCreator):
          Using.Manager: use =>
            val ps =
              use(con.connection.prepareStatement(insertSql, insertGenKeys))
            ecCodec.writeSingle(entityCreator, ps)
            timed:
              ps.executeUpdate()
              val rs = use(ps.getGeneratedKeys)
              rs.next()
              eCodec.readSingle(rs)

      def insertAllReturning(
          entityCreators: Iterable[EC]
      )(using con: DbCon): Vector[E] =
        handleQuery(insertSql, entityCreators):
          Using.Manager: use =>
            val ps =
              use(con.connection.prepareStatement(insertSql, insertGenKeys))
            ecCodec.write(entityCreators, ps)
            timed:
              batchUpdateResult(ps.executeBatch())
              val rs = use(ps.getGeneratedKeys)
              eCodec.read(rs)

      def update(entity: E)(using con: DbCon): Unit =
        handleQuery(updateSql, entity):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            val entityValues: Vector[Any] = entity
              .asInstanceOf[Product]
              .productIterator
              .toVector
            // put ID at the end
            val updateValues = entityValues
              .patch(idIndex, Vector.empty, 1)
              .appended(entityValues(idIndex))

            var pos = 1
            for (field, codec) <- updateValues.lazyZip(updateCodecs) do
              codec.writeSingle(field, ps, pos)
              pos += codec.cols.length
            timed(ps.executeUpdate())

      def updateAll(entities: Iterable[E])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(updateSql, entities):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            for entity <- entities do
              val entityValues: Vector[Any] = entity
                .asInstanceOf[Product]
                .productIterator
                .toVector
              // put ID at the end
              val updateValues = entityValues
                .patch(idIndex, Vector.empty, 1)
                .appended(entityValues(idIndex))

              var pos = 1
              for (field, codec) <- updateValues.lazyZip(updateCodecs) do
                codec.writeSingle(field, ps, pos)
                pos += codec.cols.length
              ps.addBatch()

            timed(batchUpdateResult(ps.executeBatch()))

    end new
  end buildRepoDefaults
end H2DbType


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Id.scala
================================================
package com.augustnagro.magnum

import scala.annotation.StaticAnnotation

class Id extends StaticAnnotation


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/ImmutableRepo.scala
================================================
package com.augustnagro.magnum

import java.sql.ResultSet
import javax.sql.DataSource
import scala.util.{Try, Using}

/** Repository supporting read-only queries. When entity `E` does not have an
  * id, use `Null` for the `Id` type.
  * @tparam E
  *   database entity class
  * @tparam ID
  *   id type of E
  */
open class ImmutableRepo[E, ID](using defaults: RepoDefaults[?, E, ID]):

  /** Count of all entities */
  def count(using DbCon): Long = defaults.count

  /** Returns true if an E exists with the given id */
  def existsById(id: ID)(using DbCon): Boolean = defaults.existsById(id)

  /** Returns all entity values */
  def findAll(using DbCon): Vector[E] = defaults.findAll

  /** Find all entities matching the specification. See the scaladoc of [[Spec]]
    * for more details
    */
  def findAll(spec: Spec[E])(using DbCon): Vector[E] = defaults.findAll(spec)

  /** Returns Some(entity) if a matching E is found */
  def findById(id: ID)(using DbCon): Option[E] = defaults.findById(id)

  /** Find all entities having ids in the Iterable. If an Id is not found, no
    * error is thrown.
    */
  def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] =
    defaults.findAllById(ids)

end ImmutableRepo


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/MySqlDbType.scala
================================================
package com.augustnagro.magnum

import java.sql.{Connection, PreparedStatement, ResultSet, Statement}
import java.time.OffsetDateTime
import scala.collection.View
import scala.deriving.Mirror
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Using}

object MySqlDbType extends DbType:

  private val specImpl = new SpecImpl:
    override def sortSql(sort: Sort): String =
      val column = sort.column
      val nullSort = sort.nullOrder match
        case NullOrder.Default => ""
        case NullOrder.First   => s"$column IS NOT NULL, "
        case NullOrder.Last    => s"$column IS NULL, "
        case _                 => throw UnsupportedOperationException()
      val dir = sort.direction match
        case SortOrder.Default => ""
        case SortOrder.Asc     => " ASC"
        case SortOrder.Desc    => " DESC"
        case _                 => throw UnsupportedOperationException()
      nullSort + column + dir

    override def offsetLimitSql(
        offset: Option[Long],
        limit: Option[Int]
    ): Option[String] =
      (offset, limit) match
        case (Some(o), Some(l)) => Some(s"LIMIT $o, $l")
        case (Some(o), None)    => Some(s"LIMIT $o, ${Long.MaxValue}")
        case (None, Some(l))    => Some(s"LIMIT $l")
        case (None, None)       => None

  def buildRepoDefaults[EC, E, ID](
      tableNameSql: String,
      eElemNames: Seq[String],
      eElemNamesSql: Seq[String],
      eElemCodecs: Seq[DbCodec[?]],
      ecElemNames: Seq[String],
      ecElemNamesSql: Seq[String],
      idIndex: Int
  )(using
      eCodec: DbCodec[E],
      ecCodec: DbCodec[EC],
      idCodec: DbCodec[ID],
      eClassTag: ClassTag[E],
      ecClassTag: ClassTag[EC],
      idClassTag: ClassTag[ID]
  ): RepoDefaults[EC, E, ID] =
    val idName = eElemNamesSql(idIndex)
    val selectKeys = eElemNamesSql.mkString(", ")
    val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")")

    val insertGenKeys = Array(idName)

    val updateKeys: String = eElemNamesSql
      .lazyZip(eElemCodecs)
      .map((sqlName, codec) => sqlName + " = " + codec.queryRepr)
      .patch(idIndex, Seq.empty, 1)
      .mkString(", ")

    val updateCodecs = eElemCodecs
      .patch(idIndex, Seq.empty, 1)
      .appended(idCodec)
      .asInstanceOf[Seq[DbCodec[Any]]]

    val countSql = s"SELECT count(*) FROM $tableNameSql"
    val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long]
    val existsByIdSql =
      s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val findAllSql = s"SELECT * FROM $tableNameSql"
    val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E]
    val findByIdSql =
      s"SELECT * FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val deleteByIdSql =
      s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val truncateSql = s"TRUNCATE TABLE $tableNameSql"
    val truncateUpdate =
      Frag(truncateSql, Vector.empty, FragWriter.empty).update
    val insertSql =
      s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})"
    val updateSql =
      s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}"
    val insertAndFindByIdSql = insertSql + "\n" + findByIdSql

    def idWriter(id: ID): FragWriter = (ps, pos) =>
      idCodec.writeSingle(id, ps, pos)
      pos + idCodec.cols.length

    new RepoDefaults[EC, E, ID]:
      def count(using con: DbCon): Long = countQuery.run().head

      def existsById(id: ID)(using DbCon): Boolean =
        Frag(existsByIdSql, IArray(id), idWriter(id))
          .query[Int]
          .run()
          .nonEmpty

      def findAll(using DbCon): Vector[E] = findAllQuery.run()

      def findAll(spec: Spec[E])(using DbCon): Vector[E] =
        specImpl.findAll(spec, tableNameSql)

      def findById(id: ID)(using DbCon): Option[E] =
        Frag(findByIdSql, IArray(id), idWriter(id))
          .query[E]
          .run()
          .headOption

      def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] =
        throw UnsupportedOperationException(
          "MySql does not support 'ANY' keyword, and does not support long IN parameter lists. Use findById in a loop instead."
        )

      def delete(entity: E)(using DbCon): Unit =
        deleteById(
          entity
            .asInstanceOf[Product]
            .productElement(idIndex)
            .asInstanceOf[ID]
        )

      def deleteById(id: ID)(using DbCon): Unit =
        Frag(deleteByIdSql, IArray(id), idWriter(id)).update
          .run()

      def truncate()(using DbCon): Unit = truncateUpdate.run()

      def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult =
        deleteAllById(
          entities.map(e =>
            e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID]
          )
        )

      def deleteAllById(ids: Iterable[ID])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(deleteByIdSql, ids):
          Using(con.connection.prepareStatement(deleteByIdSql)): ps =>
            idCodec.write(ids, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insert(entityCreator: EC)(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreator):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.writeSingle(entityCreator, ps)
            timed(ps.executeUpdate())

      def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreators):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.write(entityCreators, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insertReturning(entityCreator: EC)(using con: DbCon): E =
        // unfortunately, mysql only will return auto_incremented keys.
        // it doesn't return default columns, and adding other columns to
        // the insertGenKeys array doesn't change this behavior.
        throw UnsupportedOperationException()

      def insertAllReturning(
          entityCreators: Iterable[EC]
      )(using con: DbCon): Vector[E] =
        throw UnsupportedOperationException()

      def update(entity: E)(using con: DbCon): Unit =
        handleQuery(updateSql, entity):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            val entityValues: Vector[Any] = entity
              .asInstanceOf[Product]
              .productIterator
              .toVector
            // put ID at the end
            val updateValues = entityValues
              .patch(idIndex, Vector.empty, 1)
              .appended(entityValues(idIndex))

            var pos = 1
            for (field, codec) <- updateValues.lazyZip(updateCodecs) do
              codec.writeSingle(field, ps, pos)
              pos += codec.cols.length
            timed(ps.executeUpdate())

      def updateAll(entities: Iterable[E])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(updateSql, entities):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            for entity <- entities do
              val entityValues: Vector[Any] = entity
                .asInstanceOf[Product]
                .productIterator
                .toVector
              // put ID at the end
              val updateValues = entityValues
                .patch(idIndex, Vector.empty, 1)
                .appended(entityValues(idIndex))

              var pos = 1
              for (field, codec) <- updateValues.lazyZip(updateCodecs) do
                codec.writeSingle(field, ps, pos)
                pos += codec.cols.length
              ps.addBatch()

            timed(batchUpdateResult(ps.executeBatch()))
    end new
  end buildRepoDefaults
end MySqlDbType


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/NullOrder.scala
================================================
package com.augustnagro.magnum

trait NullOrder

object NullOrder:
  case object Default extends NullOrder
  case object First extends NullOrder
  case object Last extends NullOrder


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/OracleDbType.scala
================================================
package com.augustnagro.magnum

import java.sql.{Connection, PreparedStatement, ResultSet, Statement}
import java.time.OffsetDateTime
import scala.collection.View
import scala.deriving.Mirror
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Using}

object OracleDbType extends DbType:

  private val specImpl = new SpecImpl:
    override def offsetLimitSql(
        offset: Option[Long],
        limit: Option[Int]
    ): Option[String] =
      (offset, limit) match
        case (Some(o), Some(l)) =>
          Some(s"OFFSET $o ROWS FETCH NEXT $l ROWS ONLY")
        case (Some(o), None) => Some(s"OFFSET $o ROWS")
        case (None, Some(l)) => Some(s"FETCH NEXT $l ROWS ONLY")
        case (None, None)    => None

  def buildRepoDefaults[EC, E, ID](
      tableNameSql: String,
      eElemNames: Seq[String],
      eElemNamesSql: Seq[String],
      eElemCodecs: Seq[DbCodec[?]],
      ecElemNames: Seq[String],
      ecElemNamesSql: Seq[String],
      idIndex: Int
  )(using
      eCodec: DbCodec[E],
      ecCodec: DbCodec[EC],
      idCodec: DbCodec[ID],
      eClassTag: ClassTag[E],
      ecClassTag: ClassTag[EC],
      idClassTag: ClassTag[ID]
  ): RepoDefaults[EC, E, ID] =
    val idName = eElemNamesSql(idIndex)
    val selectKeys = eElemNamesSql.mkString(", ")
    val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")")

    val updateKeys: String = eElemNamesSql
      .lazyZip(eElemCodecs)
      .map((sqlName, codec) => sqlName + " = " + codec.queryRepr)
      .patch(idIndex, Seq.empty, 1)
      .mkString(", ")

    val updateCodecs = eElemCodecs
      .patch(idIndex, Seq.empty, 1)
      .appended(idCodec)
      .asInstanceOf[Seq[DbCodec[Any]]]

    val insertGenKeys = Array.from(eElemNamesSql)

    val countSql = s"SELECT count(*) FROM $tableNameSql"
    val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long]
    val existsByIdSql =
      s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val findAllSql = s"SELECT * FROM $tableNameSql"
    val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E]
    val findByIdSql =
      s"SELECT * FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val deleteByIdSql =
      s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val truncateSql = s"TRUNCATE TABLE $tableNameSql"
    val truncateUpdate =
      Frag(truncateSql, Vector.empty, FragWriter.empty).update
    val insertSql =
      s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})"
    val updateSql =
      s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}"

    def idWriter(id: ID): FragWriter = (ps, pos) =>
      idCodec.writeSingle(id, ps, pos)
      pos + idCodec.cols.length

    new RepoDefaults[EC, E, ID]:
      def count(using con: DbCon): Long = countQuery.run().head

      def existsById(id: ID)(using DbCon): Boolean =
        Frag(existsByIdSql, IArray(id), idWriter(id))
          .query[Int]
          .run()
          .nonEmpty

      def findAll(using DbCon): Vector[E] = findAllQuery.run()

      def findAll(spec: Spec[E])(using DbCon): Vector[E] =
        specImpl.findAll(spec, tableNameSql)

      def findById(id: ID)(using DbCon): Option[E] =
        Frag(findByIdSql, IArray(id), idWriter(id))
          .query[E]
          .run()
          .headOption

      def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] =
        throw UnsupportedOperationException(
          "Oracle does not support SQL arrays, and does not support long IN parameter lists. Use findById in a loop instead."
        )

      def delete(entity: E)(using DbCon): Unit =
        deleteById(
          entity
            .asInstanceOf[Product]
            .productElement(idIndex)
            .asInstanceOf[ID]
        )

      def deleteById(id: ID)(using DbCon): Unit =
        Frag(deleteByIdSql, IArray(id), idWriter(id)).update
          .run()

      def truncate()(using DbCon): Unit = truncateUpdate.run()

      def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult =
        deleteAllById(
          entities.map(e =>
            e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID]
          )
        )

      def deleteAllById(ids: Iterable[ID])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(deleteByIdSql, ids):
          Using(con.connection.prepareStatement(deleteByIdSql)): ps =>
            idCodec.write(ids, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insert(entityCreator: EC)(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreator):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.writeSingle(entityCreator, ps)
            timed(ps.executeUpdate())

      def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreators):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.write(entityCreators, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insertReturning(entityCreator: EC)(using con: DbCon): E =
        handleQuery(insertSql, entityCreator):
          Using.Manager: use =>
            val ps =
              use(con.connection.prepareStatement(insertSql, insertGenKeys))
            ecCodec.writeSingle(entityCreator, ps)
            timed:
              ps.executeUpdate()
              val rs = use(ps.getGeneratedKeys)
              rs.next()
              eCodec.readSingle(rs)

      def insertAllReturning(
          entityCreators: Iterable[EC]
      )(using con: DbCon): Vector[E] =
        // oracle jdbc does not support batch RETURNING
        entityCreators.map(insertReturning).toVector

      def update(entity: E)(using con: DbCon): Unit =
        handleQuery(updateSql, entity):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            val entityValues: Vector[Any] = entity
              .asInstanceOf[Product]
              .productIterator
              .toVector
            // put ID at the end
            val updateValues = entityValues
              .patch(idIndex, Vector.empty, 1)
              .appended(entityValues(idIndex))

            var pos = 1
            for (field, codec) <- updateValues.lazyZip(updateCodecs) do
              codec.writeSingle(field, ps, pos)
              pos += codec.cols.length
            timed(ps.executeUpdate())

      def updateAll(entities: Iterable[E])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(updateSql, entities):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            for entity <- entities do
              val entityValues: Vector[Any] = entity
                .asInstanceOf[Product]
                .productIterator
                .toVector
              // put ID at the end
              val updateValues = entityValues
                .patch(idIndex, Vector.empty, 1)
                .appended(entityValues(idIndex))

              var pos = 1
              for (field, codec) <- updateValues.lazyZip(updateCodecs) do
                codec.writeSingle(field, ps, pos)
                pos += codec.cols.length
              ps.addBatch()

            timed(batchUpdateResult(ps.executeBatch()))
    end new
  end buildRepoDefaults
end OracleDbType


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/PostgresDbType.scala
================================================
package com.augustnagro.magnum

import java.sql.{Connection, JDBCType, PreparedStatement, ResultSet, Statement}
import java.time.OffsetDateTime
import scala.collection.View
import scala.deriving.Mirror
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Using}
import java.util.StringJoiner

object PostgresDbType extends DbType:

  def buildRepoDefaults[EC, E, ID](
      tableNameSql: String,
      eElemNames: Seq[String],
      eElemNamesSql: Seq[String],
      eElemCodecs: Seq[DbCodec[?]],
      ecElemNames: Seq[String],
      ecElemNamesSql: Seq[String],
      idIndex: Int
  )(using
      eCodec: DbCodec[E],
      ecCodec: DbCodec[EC],
      idCodec: DbCodec[ID],
      eClassTag: ClassTag[E],
      ecClassTag: ClassTag[EC],
      idClassTag: ClassTag[ID]
  ): RepoDefaults[EC, E, ID] =
    val idName = eElemNamesSql(idIndex)
    val selectKeys = eElemNamesSql.mkString(", ")
    val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")")

    val updateKeys: String = eElemNamesSql
      .lazyZip(eElemCodecs)
      .map((sqlName, codec) => sqlName + " = " + codec.queryRepr)
      .patch(idIndex, Seq.empty, 1)
      .mkString(", ")

    val updateCodecs = eElemCodecs
      .patch(idIndex, Seq.empty, 1)
      .appended(idCodec)
      .asInstanceOf[Seq[DbCodec[Any]]]

    val countSql = s"SELECT count(*) FROM $tableNameSql"
    val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long]
    val existsByIdSql =
      s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val findAllSql = s"SELECT $selectKeys FROM $tableNameSql"
    val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E]
    val findByIdSql =
      s"SELECT $selectKeys FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val findAllByIdSql =
      s"SELECT $selectKeys FROM $tableNameSql WHERE $idName = ANY(?)"
    val deleteByIdSql =
      s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val truncateSql = s"TRUNCATE TABLE $tableNameSql"
    val truncateUpdate =
      Frag(truncateSql, Vector.empty, FragWriter.empty).update
    val insertSql =
      s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})"
    val updateSql =
      s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}"

    val compositeId = idCodec.cols.distinct.size != 1
    val idFirstTypeName = JDBCType.valueOf(idCodec.cols.head).getName

    def idWriter(id: ID): FragWriter = (ps, pos) =>
      idCodec.writeSingle(id, ps, pos)
      pos + idCodec.cols.length

    new RepoDefaults[EC, E, ID]:
      def count(using con: DbCon): Long = countQuery.run().head

      def existsById(id: ID)(using DbCon): Boolean =
        Frag(existsByIdSql, IArray(id), idWriter(id))
          .query[Int]
          .run()
          .nonEmpty

      def findAll(using DbCon): Vector[E] = findAllQuery.run()

      def findAll(spec: Spec[E])(using DbCon): Vector[E] =
        SpecImpl.Default.findAll(spec, tableNameSql)

      def findById(id: ID)(using DbCon): Option[E] =
        Frag(findByIdSql, IArray(id), idWriter(id))
          .query[E]
          .run()
          .headOption

      def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] =
        if compositeId then
          throw UnsupportedOperationException(
            "Composite ids unsupported for findAllById."
          )
        val idsArray = Array.from[Any](ids)
        Frag(
          findAllByIdSql,
          IArray(idsArray),
          (ps, pos) =>
            val sqlArray =
              ps.getConnection.createArrayOf(idFirstTypeName, idsArray)
            ps.setArray(pos, sqlArray)
            pos + 1
        ).query[E].run()

      def delete(entity: E)(using DbCon): Unit =
        deleteById(
          entity
            .asInstanceOf[Product]
            .productElement(idIndex)
            .asInstanceOf[ID]
        )

      def deleteById(id: ID)(using DbCon): Unit =
        Frag(deleteByIdSql, IArray(id), idWriter(id)).update
          .run()

      def truncate()(using DbCon): Unit =
        truncateUpdate.run()

      def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult =
        deleteAllById(
          entities.map(e =>
            e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID]
          )
        )

      def deleteAllById(ids: Iterable[ID])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(deleteByIdSql, ids):
          Using(con.connection.prepareStatement(deleteByIdSql)): ps =>
            idCodec.write(ids, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insert(entityCreator: EC)(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreator):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.writeSingle(entityCreator, ps)
            timed(ps.executeUpdate())

      def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreators):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.write(entityCreators, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insertReturning(entityCreator: EC)(using con: DbCon): E =
        handleQuery(insertSql, entityCreator):
          Using.Manager: use =>
            val ps = use(
              con.connection
                .prepareStatement(insertSql, Statement.RETURN_GENERATED_KEYS)
            )
            ecCodec.writeSingle(entityCreator, ps)
            timed:
              ps.executeUpdate()
              val rs = use(ps.getGeneratedKeys)
              rs.next()
              eCodec.readSingle(rs)

      def insertAllReturning(
          entityCreators: Iterable[EC]
      )(using con: DbCon): Vector[E] =
        handleQuery(insertSql, entityCreators):
          Using.Manager: use =>
            val ps = use(
              con.connection
                .prepareStatement(insertSql, Statement.RETURN_GENERATED_KEYS)
            )
            ecCodec.write(entityCreators, ps)
            timed:
              batchUpdateResult(ps.executeBatch())
              val rs = use(ps.getGeneratedKeys)
              eCodec.read(rs)

      def update(entity: E)(using con: DbCon): Unit =
        handleQuery(updateSql, entity):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            val entityValues: Vector[Any] = entity
              .asInstanceOf[Product]
              .productIterator
              .toVector
            // put ID at the end
            val updateValues = entityValues
              .patch(idIndex, Vector.empty, 1)
              .appended(entityValues(idIndex))

            var pos = 1
            for (field, codec) <- updateValues.lazyZip(updateCodecs) do
              codec.writeSingle(field, ps, pos)
              pos += codec.cols.length
            timed(ps.executeUpdate())

      def updateAll(entities: Iterable[E])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(updateSql, entities):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            for entity <- entities do
              val entityValues: Vector[Any] = entity
                .asInstanceOf[Product]
                .productIterator
                .toVector
              // put ID at the end
              val updateValues = entityValues
                .patch(idIndex, Vector.empty, 1)
                .appended(entityValues(idIndex))

              var pos = 1
              for (field, codec) <- updateValues.lazyZip(updateCodecs) do
                codec.writeSingle(field, ps, pos)
                pos += codec.cols.length
              ps.addBatch()

            timed(batchUpdateResult(ps.executeBatch()))
    end new
  end buildRepoDefaults
end PostgresDbType


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Query.scala
================================================
package com.augustnagro.magnum

import java.util.concurrent.TimeUnit
import scala.concurrent.duration.FiniteDuration
import scala.util.Using.Manager
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try, Using}

class Query[E] private[magnum] (val frag: Frag, reader: DbCodec[E]):

  def run()(using con: DbCon): Vector[E] =
    handleQuery(frag.sqlString, frag.params):
      Using.Manager: use =>
        val ps = use(con.connection.prepareStatement(frag.sqlString))
        frag.writer.write(ps, 1)
        timed:
          val rs = use(ps.executeQuery())
          reader.read(rs)

  /** Streaming [[Iterator]]. Set [[fetchSize]] to give the JDBC driver a hint
    * as to how many rows to fetch per request
    */
  def iterator(
      fetchSize: Int = 0
  )(using con: DbCon, use: Manager): Iterator[E] =
    handleQuery(frag.sqlString, frag.params):
      Try:
        val ps = use(con.connection.prepareStatement(frag.sqlString))
        ps.setFetchSize(fetchSize)
        frag.writer.write(ps, 1)
        timed:
          val rs = use(ps.executeQuery())
          ResultSetIterator(rs, frag, reader, con.sqlLogger)

end Query


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Repo.scala
================================================
package com.augustnagro.magnum

import javax.sql.DataSource

/** A read & write data repository
  *
  * @tparam EC
  *   'Entity Creator', which should have all fields of E minus those
  *   auto-generated by the database. Can be the same type as E.
  * @tparam E
  *   database entity class
  * @tparam ID
  *   id type of E
  */
open class Repo[EC, E, ID](using defaults: RepoDefaults[EC, E, ID])
    extends ImmutableRepo[E, ID]:

  /** Deletes an entity using its id */
  def delete(entity: E)(using DbCon): Unit = defaults.delete(entity)

  /** Deletes an entity using its id */
  def deleteById(id: ID)(using DbCon): Unit = defaults.deleteById(id)

  /** Deletes ALL entities */
  def truncate()(using DbCon): Unit = defaults.truncate()

  /** Delete all provided entities */
  def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult =
    defaults.deleteAll(entities)

  /** Deletes all entities with an Iterable of ids */
  def deleteAllById(ids: Iterable[ID])(using DbCon): BatchUpdateResult =
    defaults.deleteAllById(ids)

  /** Insert and return entity E */
  def insert(entityCreator: EC)(using DbCon): Unit =
    defaults.insert(entityCreator)

  /** Insert and return all new entities */
  def insertAll(entityCreators: Iterable[EC])(using DbCon): Unit =
    defaults.insertAll(entityCreators)

  def insertReturning(entityCreator: EC)(using DbCon): E =
    defaults.insertReturning(entityCreator)

  def insertAllReturning(entityCreators: Iterable[EC])(using DbCon): Vector[E] =
    defaults.insertAllReturning(entityCreators)

  /** Update the entity */
  def update(entity: E)(using DbCon): Unit = defaults.update(entity)

  /** Update all entities */
  def updateAll(entities: Iterable[E])(using DbCon): BatchUpdateResult =
    defaults.updateAll(entities)

end Repo


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/RepoDefaults.scala
================================================
package com.augustnagro.magnum

import scala.compiletime.*
import scala.deriving.*
import scala.quoted.*
import scala.reflect.ClassTag

trait RepoDefaults[EC, E, ID]:
  def count(using DbCon): Long
  def existsById(id: ID)(using DbCon): Boolean
  def findAll(using DbCon): Vector[E]
  def findAll(spec: Spec[E])(using DbCon): Vector[E]
  def findById(id: ID)(using DbCon): Option[E]
  def findAllById(ids: Iterable[ID])(using DbCon): Vector[E]
  def delete(entity: E)(using DbCon): Unit
  def deleteById(id: ID)(using DbCon): Unit
  def truncate()(using DbCon): Unit
  def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult
  def deleteAllById(ids: Iterable[ID])(using DbCon): BatchUpdateResult
  def insert(entityCreator: EC)(using DbCon): Unit
  def insertAll(entityCreators: Iterable[EC])(using DbCon): Unit
  def insertReturning(entityCreator: EC)(using DbCon): E
  def insertAllReturning(entityCreators: Iterable[EC])(using DbCon): Vector[E]
  def update(entity: E)(using DbCon): Unit
  def updateAll(entities: Iterable[E])(using DbCon): BatchUpdateResult

object RepoDefaults:

  inline given genImmutableRepo[E: DbCodec: Mirror.Of, ID]
      : RepoDefaults[E, E, ID] =
    genRepo[E, E, ID]

  inline given genRepo[
      EC: DbCodec: Mirror.Of,
      E: DbCodec: Mirror.Of,
      ID
  ]: RepoDefaults[EC, E, ID] = ${ genImpl[EC, E, ID] }

  private def genImpl[EC: Type, E: Type, ID: Type](using
      Quotes
  ): Expr[RepoDefaults[EC, E, ID]] =
    import quotes.reflect.*
    val exprs = tableExprs[EC, E, ID]
    val eElemCodecs = getEElemCodecs[E]
    val eCodec = Expr.summon[DbCodec[E]].get
    val ecCodec = Expr.summon[DbCodec[EC]].get
    val idCodec =
      if TypeRepr.of[ID] =:= TypeRepr.of[Null] then
        '{ DbCodec.AnyCodec.asInstanceOf[DbCodec[ID]] }
      else Expr.summon[DbCodec[ID]].get
    val eClassTag = Expr.summon[ClassTag[E]].get
    val ecClassTag = Expr.summon[ClassTag[EC]].get
    val idClassTag =
      if TypeRepr.of[ID] =:= TypeRepr.of[Null] then
        '{ ClassTag.Any.asInstanceOf[ClassTag[ID]] }
      else Expr.summon[ClassTag[ID]].get
    '{
      ${ exprs.tableAnnot }.dbType.buildRepoDefaults[EC, E, ID](
        ${ exprs.tableNameSql },
        ${ Expr(exprs.eElemNames) },
        ${ Expr.ofSeq(exprs.eElemNamesSql) },
        $eElemCodecs,
        ${ Expr(exprs.ecElemNames) },
        ${ Expr.ofSeq(exprs.ecElemNamesSql) },
        ${ exprs.idIndex }
      )(using
        $eCodec,
        $ecCodec,
        $idCodec,
        $eClassTag,
        $ecClassTag,
        $idClassTag
      )
    }
  end genImpl

  private def getEElemCodecs[E: Type](using Quotes): Expr[Seq[DbCodec[?]]] =
    import quotes.reflect.*
    Expr.summon[Mirror.ProductOf[E]] match
      case Some('{
            $m: Mirror.ProductOf[E] {
              type MirroredElemTypes = mets
            }
          }) =>
        getProductCodecs[mets]()
      case _ =>
        val sumCodec = Expr.summon[DbCodec[E]].get
        '{ Seq($sumCodec) }

  private def getProductCodecs[Mets: Type](
      res: Vector[Expr[DbCodec[?]]] = Vector.empty
  )(using Quotes): Expr[Seq[DbCodec[?]]] =
    Type.of[Mets] match
      case '[met *: metTail] =>
        Expr.summon[DbCodec[met]] match
          case Some(codec) => getProductCodecs[metTail](res :+ codec)
          case None => getProductCodecs[metTail](res :+ '{ DbCodec.AnyCodec })
      case '[EmptyTuple] => Expr.ofSeq(res)

end RepoDefaults


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/ResultSetIterator.scala
================================================
package com.augustnagro.magnum

import java.sql.ResultSet
import scala.util.control.NonFatal

private class ResultSetIterator[E](
    rs: ResultSet,
    frag: Frag,
    reader: DbCodec[E],
    sqlLogger: SqlLogger
) extends Iterator[E] {

  private var rsHasNext: Boolean =
    try rs.next()
    catch
      case NonFatal(t) =>
        throw SqlException(
          sqlLogger.exceptionMsg(
            SqlExceptionEvent(frag.sqlString, frag.params, t)
          ),
          t
        )

  override def hasNext: Boolean = rsHasNext

  override def next(): E =
    if !rsHasNext then throw IllegalStateException("ResultSet is empty")
    try
      val e = reader.readSingle(rs)
      rsHasNext = rs.next()
      e
    catch
      case NonFatal(t) =>
        throw SqlException(
          sqlLogger.exceptionMsg(
            SqlExceptionEvent(frag.sqlString, frag.params, t)
          ),
          t
        )

}


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Returning.scala
================================================
package com.augustnagro.magnum

import scala.util.{Failure, Success, Try, Using}
import Using.Manager
import java.sql.Statement
import java.sql.ResultSet

class Returning[E] private[magnum] (
    val frag: Frag,
    reader: DbCodec[E],
    keyColumns: Iterable[String]
):
  def run()(using con: DbCon): Vector[E] =
    withResultSet(reader.read)

  /** Streaming [[Iterator]]. Set [[fetchSize]] to give the JDBC driver a hint
    * as to how many rows to fetch per request
    */
  def iterator(
      fetchSize: Int = 0
  )(using con: DbCon, use: Manager): Iterator[E] =
    withResultSet(ResultSetIterator(_, frag, reader, con.sqlLogger))

  private def withResultSet[A](f: ResultSet => A)(using con: DbCon): A =
    handleQuery(frag.sqlString, frag.params):
      Manager: use =>
        if keyColumns.isEmpty then
          val ps = use(con.connection.prepareStatement(frag.sqlString))
          frag.writer.write(ps, 1)
          timed:
            val hasResults = ps.execute()
            if hasResults then
              val rs = use(ps.getResultSet)
              f(rs)
            else
              throw UnsupportedOperationException(
                "No results for RETURNING clause"
              )
        else
          val ps = use(
            con.connection.prepareStatement(frag.sqlString, keyColumns.toArray)
          )
          frag.writer.write(ps, 1)
          timed:
            ps.execute()
            val rs = use(ps.getGeneratedKeys)
            f(rs)

end Returning


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Seek.scala
================================================
package com.augustnagro.magnum

class Seek private[magnum] (
    val column: String,
    val seekDirection: SeekDir,
    val value: Any,
    val columnSort: SortOrder,
    val nullOrder: NullOrder,
    val codec: DbCodec[?]
)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SeekDir.scala
================================================
package com.augustnagro.magnum

trait SeekDir

object SeekDir:
  case object Gt extends SeekDir
  case object Lt extends SeekDir


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Sort.scala
================================================
package com.augustnagro.magnum

class Sort private[magnum] (
    val column: String,
    val direction: SortOrder,
    val nullOrder: NullOrder
)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SortOrder.scala
================================================
package com.augustnagro.magnum

trait SortOrder

object SortOrder:
  case object Default extends SortOrder
  case object Asc extends SortOrder
  case object Desc extends SortOrder


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Spec.scala
================================================
package com.augustnagro.magnum

import java.util.StringJoiner

class Spec[E] private (
    val prefix: Option[Frag],
    val predicates: Vector[Frag],
    val limit: Option[Int],
    val offset: Option[Long],
    val sorts: Vector[Sort],
    val seeks: Vector[Seek]
):

  def prefix(sql: Frag): Spec[E] =
    new Spec(Some(sql), predicates, limit, offset, sorts, seeks)

  def where(sql: Frag): Spec[E] =
    new Spec(prefix, predicates :+ sql, limit, offset, sorts, seeks)

  def orderBy(
      column: String,
      direction: SortOrder = SortOrder.Default,
      nullOrder: NullOrder = NullOrder.Default
  ): Spec[E] =
    val sort = Sort(column, direction, nullOrder)
    new Spec(prefix, predicates, limit, offset, sorts :+ sort, seeks)

  def limit(limit: Int): Spec[E] =
    new Spec(prefix, predicates, Some(limit), offset, sorts, seeks)

  def offset(offset: Long): Spec[E] =
    new Spec(prefix, predicates, limit, Some(offset), sorts, seeks)

  def seek[V](
      column: String,
      seekDirection: SeekDir,
      value: V,
      columnSort: SortOrder,
      nullOrder: NullOrder = NullOrder.Default
  )(using codec: DbCodec[V]): Spec[E] =
    val seek = Seek(column, seekDirection, value, columnSort, nullOrder, codec)
    new Spec(prefix, predicates, limit, offset, sorts, seeks :+ seek)

end Spec

object Spec:
  def apply[E]: Spec[E] =
    new Spec(None, Vector.empty, None, None, Vector.empty, Vector.empty)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SpecImpl.scala
================================================
package com.augustnagro.magnum

import java.util.StringJoiner

private trait SpecImpl:
  def sortSql(sort: Sort): String =
    val dir = sort.direction match
      case SortOrder.Default => ""
      case SortOrder.Asc     => " ASC"
      case SortOrder.Desc    => " DESC"
      case _                 => throw UnsupportedOperationException()
    val nullOrder = sort.nullOrder match
      case NullOrder.Default => ""
      case NullOrder.First   => " NULLS FIRST"
      case NullOrder.Last    => " NULLS LAST"
      case _                 => throw UnsupportedOperationException()
    sort.column + dir + nullOrder

  def offsetLimitSql(offset: Option[Long], limit: Option[Int]): Option[String] =
    (offset, limit) match
      case (Some(o), Some(l)) => Some(s"OFFSET $o LIMIT $l")
      case (Some(o), None)    => Some(s"OFFSET $o")
      case (None, Some(l))    => Some(s"LIMIT $l")
      case (None, None)       => None

  def seekSql(seek: Seek): String =
    val seekDir = seek.seekDirection match
      case SeekDir.Gt => ">"
      case SeekDir.Lt => "<"
      case _          => throw UnsupportedOperationException()
    s"${seek.column} $seekDir ?"

  def findAll[E: DbCodec](spec: Spec[E], tableNameSql: String)(using
      DbCon
  ): Vector[E] =
    val whereClause = StringJoiner(" AND ", "WHERE ", "").setEmptyValue("")

    val allParams = Vector.newBuilder[Any]

    val tableNameLiteral = SqlLiteral(tableNameSql)
    val prefixFrag = spec.prefix.getOrElse(sql"SELECT * FROM $tableNameLiteral")
    allParams ++= prefixFrag.params

    val seekPredicates = spec.seeks.map(seek =>
      val codec = seek.codec.asInstanceOf[DbCodec[Any]]
      Frag(
        seekSql(seek),
        Vector(seek.value),
        (ps, pos) =>
          codec.writeSingle(seek.value, ps, pos)
          pos + codec.cols.length
      )
    )

    val whereFrags =
      (spec.predicates ++ seekPredicates).filter(_.sqlString.nonEmpty)
    for frag <- whereFrags do
      whereClause.add("(" + frag.sqlString + ")")
      allParams ++= frag.params

    val seekSorts =
      spec.seeks.map(seek => Sort(seek.column, seek.columnSort, seek.nullOrder))
    val orderByClause =
      StringJoiner(", ", "ORDER BY ", "").setEmptyValue("")
    for sort <- spec.sorts ++ seekSorts do orderByClause.add(sortSql(sort))

    val finalSj = StringJoiner(" ")
    if prefixFrag.sqlString.nonEmpty then finalSj.add(prefixFrag.sqlString)
    val whereClauseStr = whereClause.toString
    if whereClauseStr.nonEmpty then finalSj.add(whereClauseStr)
    val orderByClauseStr = orderByClause.toString
    if orderByClauseStr.nonEmpty then finalSj.add(orderByClauseStr)

    for offsetLimit <- offsetLimitSql(spec.offset, spec.limit) do
      finalSj.add(offsetLimit)

    val allFrags = prefixFrag +: whereFrags
    val fragWriter: FragWriter = (ps, startingPos) =>
      allFrags.foldLeft(startingPos)((pos, frag) => frag.writer.write(ps, pos))

    Frag(finalSj.toString, allParams.result(), fragWriter)
      .query[E]
      .run()
  end findAll
end SpecImpl

private object SpecImpl:
  object Default extends SpecImpl


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlException.scala
================================================
package com.augustnagro.magnum

class SqlException private[magnum] (message: String, cause: Throwable = null)
    extends RuntimeException(message, cause)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlExceptionEvent.scala
================================================
package com.augustnagro.magnum

/** Metadata for a exceptional SQL statement. */
class SqlExceptionEvent private[magnum] (
    /** The SQL string */
    val sql: String,
    anyParams: Any,
    /** Cause of the exception */
    val cause: Throwable
):
  /** The parameters used when executing. The type is `Iterator[Iterator[Any]]`
    * to support logging batched updates. For example,
    * {{{
    *   repo.insert(User(a, b, c)) // provides Iterator(Iterator(a, b, c))
    *   repo.insertAll(List(User(a, b, c), User(d, e, f))) // provides Iterator(Iterator(a, b, c), Iterator(d, e, f))
    * }}}
    */
  def params: Iterator[Iterator[Any]] = parseParams(anyParams)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlLiteral.scala
================================================
package com.augustnagro.magnum

/** A SQL string that is interpolated directly into a sql"" query (and not as a
  * PreparedStatement parameter)
  *
  * For example,
  *
  * {{{
  *   val myQaSchema = SqlLiteral("db_qa")
  *   sql"SELECT * FROM $myQaSchema.table_name"
  * }}}
  *
  * Generates the SQL:
  * {{{
  *   "SELECT * FROM db_qa.table_name"
  * }}}
  */
trait SqlLiteral:
  def queryRepr: String

object SqlLiteral:
  def apply(s: String): SqlLiteral =
    new SqlLiteral:
      def queryRepr: String = s


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlLogger.scala
================================================
package com.augustnagro.magnum

import java.lang.System.Logger.Level
import scala.concurrent.duration.FiniteDuration

/** Trait to provide logging of Magnum SQL statements.
  */
trait SqlLogger:
  /** Log a successful SQL statement execution. If a query fails a
    * [[SqlException]] will be thrown, and this logger will not be triggered.
    */
  def log(successEvent: SqlSuccessEvent): Unit

  /** Constructs the exception message for [[SqlException]]s */
  def exceptionMsg(exceptionEvent: SqlExceptionEvent): String

object SqlLogger:
  object NoOp extends SqlLogger:
    override def log(successEvent: SqlSuccessEvent): Unit = ()
    override def exceptionMsg(exceptionEvent: SqlExceptionEvent): String =
      exceptionEvent.cause.getMessage

  object Default extends SqlLogger:
    override def log(successEvent: SqlSuccessEvent): Unit =
      if Log.isLoggable(Level.TRACE) then
        Log.log(
          Level.TRACE,
          s"""Executed Query in ${successEvent.execTime}:
             |${successEvent.sql}
             |
             |With values:
             |${paramsString(successEvent.params)}
             |""".stripMargin
        )
      else if Log.isLoggable(Level.DEBUG) then
        Log.log(
          Level.DEBUG,
          s"""Executed Query in ${successEvent.execTime}:
             |${successEvent.sql}
             |""".stripMargin
        )

    override def exceptionMsg(exceptionEvent: SqlExceptionEvent): String =
      if Log.isLoggable(System.Logger.Level.TRACE) then
        s"""Error executing query:
           |${exceptionEvent.sql}
           |With message:
           |${exceptionEvent.cause.getMessage}
           |And values:
           |${paramsString(exceptionEvent.params)}
           |""".stripMargin
      else s"""Error executing query:
              |${exceptionEvent.sql}
              |With message:
              |${exceptionEvent.cause}
              |""".stripMargin
  end Default

  def logSlowQueries(slowerThan: FiniteDuration): SqlLogger = new:
    override def log(logEvent: SqlSuccessEvent): Unit =
      if logEvent.execTime > slowerThan then
        if Log.isLoggable(Level.TRACE) then
          Log.log(
            Level.WARNING,
            s"""Executed SLOW Query in ${logEvent.execTime}:
               |${logEvent.sql}
               |
               |With values:
               |${paramsString(logEvent.params)}
               |""".stripMargin
          )
        else if Log.isLoggable(Level.WARNING) then
          Log.log(
            Level.WARNING,
            s"""Executed SLOW Query in ${logEvent.execTime}:
               |${logEvent.sql}
               |""".stripMargin
          )
        end if
      else Default.log(logEvent)

    override def exceptionMsg(exceptionEvent: SqlExceptionEvent): String =
      Default.exceptionMsg(exceptionEvent)
end SqlLogger


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlName.scala
================================================
package com.augustnagro.magnum

import scala.annotation.StaticAnnotation

class SqlName(val name: String) extends StaticAnnotation


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlNameMapper.scala
================================================
package com.augustnagro.magnum

/** Mapping from scala terms to sql terms */
trait SqlNameMapper:
  def toColumnName(scalaName: String): String
  def toTableName(scalaName: String): String

object SqlNameMapper:

  /** Converts camelCase scala names to snake_case */
  object CamelToSnakeCase extends SqlNameMapper:

    def toColumnName(scalaName: String): String = toCase(scalaName)

    def toTableName(scalaName: String): String = toCase(scalaName)

    private def toCase(scalaName: String): String =
      val res = StringBuilder().append(scalaName.head.toLower)
      for i <- 1 until scalaName.length do
        val c = scalaName.charAt(i)
        if c.isUpper then res.append('_').append(c.toLower)
        else res.append(c)
      res.result()

  object CamelToUpperSnakeCase extends SqlNameMapper:
    def toColumnName(scalaName: String): String = toCase(scalaName)

    def toTableName(scalaName: String): String = toCase(scalaName)

    private def toCase(scalaName: String): String =
      val res = StringBuilder().append(scalaName.head.toUpper)
      for i <- 1 until scalaName.length do
        val c = scalaName.charAt(i)
        if c.isUpper then res.append('_').append(c)
        else res.append(c.toUpper)
      res.result()

  /** SqlNameMapper that keeps the same case as the provided scala names */
  object SameCase extends SqlNameMapper:
    def toColumnName(scalaName: String): String = scalaName
    def toTableName(scalaName: String): String = scalaName
end SqlNameMapper


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SqlSuccessEvent.scala
================================================
package com.augustnagro.magnum

import scala.concurrent.duration.FiniteDuration

/** Metadata for a successfully executed SQL statement. */
class SqlSuccessEvent private[magnum] (
    /** The SQL String */
    val sql: String,
    anyParams: Any,
    /** Time taken to execute the query, fetch data, and build the results. Does
      * not include time to construct the preparedStatement. For streaming
      * methods like `Query.iterator`, sqlExecTime is only calculated for the
      * first fetch.
      */
    val execTime: FiniteDuration
):
  /** The parameters used when executing. The type is `Iterator[Iterator[Any]]`
    * to support logging batched updates. For example,
    * {{{
    *   repo.insert(User(a, b, c)) // provides Iterator(Iterator(a, b, c))
    *   repo.insertAll(List(User(a, b, c), User(d, e, f))) // provides Iterator(Iterator(a, b, c), Iterator(d, e, f))
    * }}}
    */
  def params: Iterator[Iterator[Any]] = parseParams(anyParams)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/SqliteDbType.scala
================================================
package com.augustnagro.magnum

import java.sql.{Connection, PreparedStatement, ResultSet, Statement}
import java.time.OffsetDateTime
import scala.collection.View
import scala.deriving.Mirror
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Using}

object SqliteDbType extends DbType:

  private val specImpl = new SpecImpl:
    override def offsetLimitSql(
        offset: Option[Long],
        limit: Option[Int]
    ): Option[String] =
      (offset, limit) match
        case (Some(o), Some(l)) => Some(s"LIMIT $o, $l")
        case (Some(o), None)    => Some(s"LIMIT $o, ${Long.MaxValue}")
        case (None, Some(l))    => Some(s"LIMIT $l")
        case (None, None)       => None

  def buildRepoDefaults[EC, E, ID](
      tableNameSql: String,
      eElemNames: Seq[String],
      eElemNamesSql: Seq[String],
      eElemCodecs: Seq[DbCodec[?]],
      ecElemNames: Seq[String],
      ecElemNamesSql: Seq[String],
      idIndex: Int
  )(using
      eCodec: DbCodec[E],
      ecCodec: DbCodec[EC],
      idCodec: DbCodec[ID],
      eClassTag: ClassTag[E],
      ecClassTag: ClassTag[EC],
      idClassTag: ClassTag[ID]
  ): RepoDefaults[EC, E, ID] =
    val idName = eElemNamesSql(idIndex)
    val selectKeys = eElemNamesSql.mkString(", ")
    val ecInsertKeys = ecElemNamesSql.mkString("(", ", ", ")")

    val updateKeys: String = eElemNamesSql
      .lazyZip(eElemCodecs)
      .map((sqlName, codec) => sqlName + " = " + codec.queryRepr)
      .patch(idIndex, Seq.empty, 1)
      .mkString(", ")

    val updateCodecs = eElemCodecs
      .patch(idIndex, Seq.empty, 1)
      .appended(idCodec)
      .asInstanceOf[Seq[DbCodec[Any]]]

    val insertGenKeys = eElemNamesSql.toArray

    val countSql = s"SELECT count(*) FROM $tableNameSql"
    val countQuery = Frag(countSql, Vector.empty, FragWriter.empty).query[Long]
    val existsByIdSql =
      s"SELECT 1 FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val findAllSql = s"SELECT * FROM $tableNameSql"
    val findAllQuery = Frag(findAllSql, Vector.empty, FragWriter.empty).query[E]
    val findByIdSql =
      s"SELECT * FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val deleteByIdSql =
      s"DELETE FROM $tableNameSql WHERE $idName = ${idCodec.queryRepr}"
    val truncateSql = s"DELETE FROM $tableNameSql"
    val truncateUpdate =
      Frag(truncateSql, Vector.empty, FragWriter.empty).update
    val insertSql =
      s"INSERT INTO $tableNameSql $ecInsertKeys VALUES (${ecCodec.queryRepr})"
    val updateSql =
      s"UPDATE $tableNameSql SET $updateKeys WHERE $idName = ${idCodec.queryRepr}"

    def idWriter(id: ID): FragWriter = (ps, pos) =>
      idCodec.writeSingle(id, ps, pos)
      pos + idCodec.cols.length

    new RepoDefaults[EC, E, ID]:
      def count(using con: DbCon): Long = countQuery.run().head

      def existsById(id: ID)(using DbCon): Boolean =
        Frag(existsByIdSql, IArray(id), idWriter(id))
          .query[Int]
          .run()
          .nonEmpty

      def findAll(using DbCon): Vector[E] = findAllQuery.run()

      def findAll(spec: Spec[E])(using DbCon): Vector[E] =
        specImpl.findAll(spec, tableNameSql)

      def findById(id: ID)(using DbCon): Option[E] =
        Frag(findByIdSql, IArray(id), idWriter(id))
          .query[E]
          .run()
          .headOption

      def findAllById(ids: Iterable[ID])(using DbCon): Vector[E] =
        throw UnsupportedOperationException(
          "Sqlite does not support 'ANY' keyword, and does not support long IN parameter lists. Use findById in a loop instead."
        )

      def delete(entity: E)(using DbCon): Unit =
        deleteById(
          entity
            .asInstanceOf[Product]
            .productElement(idIndex)
            .asInstanceOf[ID]
        )

      def deleteById(id: ID)(using DbCon): Unit =
        Frag(deleteByIdSql, IArray(id), idWriter(id)).update
          .run()

      def truncate()(using DbCon): Unit =
        truncateUpdate.run()

      def deleteAll(entities: Iterable[E])(using DbCon): BatchUpdateResult =
        deleteAllById(
          entities.map(e =>
            e.asInstanceOf[Product].productElement(idIndex).asInstanceOf[ID]
          )
        )

      def deleteAllById(ids: Iterable[ID])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(deleteByIdSql, ids):
          Using(con.connection.prepareStatement(deleteByIdSql)): ps =>
            idCodec.write(ids, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      def insert(entityCreator: EC)(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreator):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.writeSingle(entityCreator, ps)
            timed(ps.executeUpdate())

      def insertAll(entityCreators: Iterable[EC])(using con: DbCon): Unit =
        handleQuery(insertSql, entityCreators):
          Using(con.connection.prepareStatement(insertSql)): ps =>
            ecCodec.write(entityCreators, ps)
            timed(batchUpdateResult(ps.executeBatch()))

      // https://github.com/AugustNagro/magnum/issues/87#issuecomment-2591823574
      def insertReturning(entityCreator: EC)(using con: DbCon): E =
        throw UnsupportedOperationException()

      // https://github.com/AugustNagro/magnum/issues/87#issuecomment-2591823574
      def insertAllReturning(
          entityCreators: Iterable[EC]
      )(using con: DbCon): Vector[E] =
        throw UnsupportedOperationException()

      def update(entity: E)(using con: DbCon): Unit =
        handleQuery(updateSql, entity):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            val entityValues: Vector[Any] = entity
              .asInstanceOf[Product]
              .productIterator
              .toVector
            // put ID at the end
            val updateValues = entityValues
              .patch(idIndex, Vector.empty, 1)
              .appended(entityValues(idIndex))

            var pos = 1
            for (field, codec) <- updateValues.lazyZip(updateCodecs) do
              codec.writeSingle(field, ps, pos)
              pos += codec.cols.length
            timed(ps.executeUpdate())

      def updateAll(entities: Iterable[E])(using
          con: DbCon
      ): BatchUpdateResult =
        handleQuery(updateSql, entities):
          Using(con.connection.prepareStatement(updateSql)): ps =>
            for entity <- entities do
              val entityValues: Vector[Any] = entity
                .asInstanceOf[Product]
                .productIterator
                .toVector
              // put ID at the end
              val updateValues = entityValues
                .patch(idIndex, Vector.empty, 1)
                .appended(entityValues(idIndex))

              var pos = 1
              for (field, codec) <- updateValues.lazyZip(updateCodecs) do
                codec.writeSingle(field, ps, pos)
                pos += codec.cols.length
              ps.addBatch()

            timed(batchUpdateResult(ps.executeBatch()))
    end new
  end buildRepoDefaults
end SqliteDbType


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Table.scala
================================================
package com.augustnagro.magnum

import scala.annotation.StaticAnnotation

class Table(
    val dbType: DbType,
    val nameMapper: SqlNameMapper = SqlNameMapper.SameCase
) extends StaticAnnotation


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/TableExprs.scala
================================================
package com.augustnagro.magnum

import scala.quoted.*

private case class TableExprs(
    tableAnnot: Expr[Table],
    tableNameScala: Expr[String],
    tableNameSql: Expr[String],
    eElemNames: Seq[String],
    eElemNamesSql: Seq[Expr[String]],
    ecElemNames: List[String],
    ecElemNamesSql: Seq[Expr[String]],
    idIndex: Expr[Int]
)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/TableInfo.scala
================================================
package com.augustnagro.magnum

import scala.deriving.*
import scala.compiletime.*
import scala.quoted.*

/** Metadata about a Table, which can be interpolated in sql"" expressions
  *
  * For example,
  *
  * {{{
  *   @Table(PostgresDbType, SqlNameMapper.CamelToSnakeCase)
  *   case class User(@Id id: Long, firstName: String)
  *     derives DbCodec
  *
  *   val u = TableInfo[User, User, Long].alias("u")
  *
  *   sql"SELECT ${u.firstName} FROM $u".sqlString ==
  *     "SELECT u.first_name FROM user u"
  * }}}
  */
class TableInfo[EC, E, ID](
    val all: ColumnNames,
    val insertColumns: ColumnNames,
    val alias: Option[String],
    val queryRepr: String,
    val idColumn: Option[ColumnName],
    private[magnum] val table: String,
    private[magnum] val eClassName: String
) extends Selectable, SqlLiteral:

  def selectDynamic(scalaName: String): ColumnName =
    all.columnNames.find(_.scalaName == scalaName).get

  def alias(tableAlias: String): this.type =
    require(tableAlias.nonEmpty, "custom tableAlias cannot be empty")
    val queryRepr = table + " " + tableAlias

    val allSchemaNames = all.columnNames.map(cn =>
      val sqlName = cn.sqlName
      ColumnName(
        scalaName = cn.scalaName,
        sqlName = sqlName,
        queryRepr = tableAlias + "." + sqlName
      )
    )
    val allQueryRepr = allSchemaNames.map(_.queryRepr).mkString(", ")
    val allCols = ColumnNames(allQueryRepr, allSchemaNames)
    val newIdColumn = idColumn.flatMap(oldId =>
      allSchemaNames.find(_.scalaName == oldId.scalaName)
    )

    new TableInfo[EC, E, ID](
      all = allCols,
      insertColumns = insertColumns,
      alias = Some(tableAlias),
      queryRepr = queryRepr,
      idColumn = newIdColumn,
      table = table,
      eClassName = eClassName
    ).asInstanceOf[this.type]
  end alias

end TableInfo

object TableInfo:
  transparent inline def apply[EC: Mirror.Of, E: Mirror.Of, ID] =
    ${ dbSchemaImpl[EC, E, ID] }

  private def dbSchemaImpl[EC: Type, E: Type, ID: Type](using
      Quotes
  ): Expr[Any] =
    import quotes.reflect.*
    val exprs = tableExprs[EC, E, ID]
    val refinement = exprs.eElemNames
      .foldLeft(TypeRepr.of[TableInfo[EC, E, ID]])((typeRepr, elemName) =>
        Refinement(typeRepr, elemName, TypeRepr.of[ColumnName])
      )

    val allColumnsExpr = Expr.ofSeq(
      exprs.eElemNames
        .lazyZip(exprs.eElemNamesSql)
        .map((elemName, elemNameSqlExpr) =>
          '{
            val elemNameSql = $elemNameSqlExpr
            ColumnName(${ Expr(elemName) }, elemNameSql, elemNameSql)
          }
        )
    )

    val insertColumnsExpr = Expr.ofSeq(
      exprs.ecElemNames
        .lazyZip(exprs.ecElemNamesSql)
        .map((elemName, elemNameSqlExpr) =>
          '{
            val elemNameSql = $elemNameSqlExpr
            ColumnName(${ Expr(elemName) }, elemNameSql, elemNameSql)
          }
        )
    )

    val idIdx =
      if TypeRepr.of[ID] =:= TypeRepr.of[Null] then '{ None }
      else '{ Some(${ exprs.idIndex }) }

    refinement.asType match
      case '[tpe] =>
        '{
          val allColumns = IArray.from($allColumnsExpr)
          val allQueryRepr = allColumns.map(_.queryRepr).mkString(", ")
          val allCols = ColumnNames(allQueryRepr, allColumns)

          val insertColumns = IArray.from($insertColumnsExpr)
          val insertQueryRepr =
            insertColumns.map(_.queryRepr).mkString("(", ", ", ")")
          val insertCols = ColumnNames(insertQueryRepr, insertColumns)
          val idColumn = $idIdx.map(idx => allColumns(idx))

          val tableName = ${ exprs.tableNameSql }
          new TableInfo[EC, E, ID](
            all = allCols,
            insertColumns = insertCols,
            alias = None,
            table = tableName,
            queryRepr = tableName,
            idColumn = idColumn,
            eClassName = ${ exprs.tableNameScala }
          ).asInstanceOf[tpe]
        }
    end match
  end dbSchemaImpl
end TableInfo


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Transactor.scala
================================================
package com.augustnagro.magnum

import java.sql.Connection
import javax.sql.DataSource
import scala.util.Using

class Transactor private (
    dataSource: DataSource,
    sqlLogger: SqlLogger = SqlLogger.Default,
    connectionConfig: Connection => Unit = con => ()
):
  def withSqlLogger(sqlLogger: SqlLogger): Transactor =
    new Transactor(dataSource, sqlLogger, connectionConfig)

  def withConnectionConfig(connectionConfig: Connection => Unit): Transactor =
    new Transactor(dataSource, sqlLogger, connectionConfig)

  def connect[T](f: DbCon ?=> T): T =
    Using.resource(dataSource.getConnection): con =>
      connectionConfig(con)
      f(using DbCon(con, sqlLogger))

  def transact[T](f: DbTx ?=> T): T =
    Using.resource(dataSource.getConnection): con =>
      connectionConfig(con)
      con.setAutoCommit(false)
      try
        val res = f(using DbTx(con, sqlLogger))
        con.commit()
        res
      catch
        case t =>
          try con.rollback()
          catch { case t2 => t.addSuppressed(t2) }
          throw t
end Transactor

object Transactor:

  def apply(
      dataSource: DataSource,
      sqlLogger: SqlLogger,
      connectionConfig: Connection => Unit
  ): Transactor =
    new Transactor(dataSource, sqlLogger, connectionConfig)

  def apply(dataSource: DataSource, sqlLogger: SqlLogger): Transactor =
    new Transactor(dataSource, sqlLogger, _ => ())

  def apply(
      dataSource: DataSource,
      connectionConfig: Connection => Unit
  ): Transactor =
    new Transactor(dataSource, SqlLogger.Default, connectionConfig)

  def apply(dataSource: DataSource): Transactor =
    new Transactor(dataSource, SqlLogger.Default, _ => ())

end Transactor


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/UUIDCodec.scala
================================================
package com.augustnagro.magnum

import java.sql.{PreparedStatement, ResultSet, Types}
import java.util.UUID

object UUIDCodec:
  given VarCharUUIDCodec: DbCodec[UUID] with
    def queryRepr: String = "?"
    val cols: IArray[Int] = IArray(Types.VARCHAR)
    def readSingle(rs: ResultSet, pos: Int): UUID =
      UUID.fromString(rs.getString(pos))
    def readSingleOption(rs: ResultSet, pos: Int): Option[UUID] =
      Option(rs.getString(pos)).map(UUID.fromString)
    def writeSingle(entity: UUID, ps: PreparedStatement, pos: Int): Unit =
      ps.setString(pos, entity.toString)


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/Update.scala
================================================
package com.augustnagro.magnum

import java.util.concurrent.TimeUnit
import scala.concurrent.duration.FiniteDuration
import scala.util.{Failure, Success, Using}

class Update private[magnum] (val frag: Frag):
  /** Exactly like [[java.sql.PreparedStatement]].executeUpdate */
  def run()(using con: DbCon): Int =
    handleQuery(frag.sqlString, frag.params):
      Using(con.connection.prepareStatement(frag.sqlString)): ps =>
        frag.writer.write(ps, 1)
        timed(ps.executeUpdate())


================================================
FILE: magnum/src/main/scala/com/augustnagro/magnum/util.scala
================================================
package com.augustnagro.magnum

import com.augustnagro.magnum.SqlException

import java.lang.System.Logger.Level
import java.sql.{Connection, PreparedStatement, ResultSet, Statement}
import java.util.StringJoiner
import java.util.concurrent.TimeUnit
import javax.sql.DataSource
import scala.collection.mutable as m
import scala.util.{Failure, Success, Try, Using, boundary}
import scala.deriving.Mirror
import scala.compiletime.{
  constValue,
  constValueTuple,
  erasedValue,
  error,
  summonInline
}
import scala.compiletime.ops.any.==
import scala.compiletime.ops.boolean.&&
import scala.concurrent.duration.FiniteDuration
import scala.reflect.ClassTag
import scala.quoted.*

def connect[T](transactor: Transactor)(f: DbCon ?=> T): T =
  transactor.connect(f)

def connect[T](dataSource: DataSource)(f: DbCon ?=> T): T =
  Transactor(dataSource).connect(f)

def transact[T](transactor: Transactor)(f: DbTx ?=> T): T =
  transactor.transact(f)

def transact[T](dataSource: DataSource)(f: DbTx ?=> T): T =
  Transactor(dataSource).transact(f)

def transact[T](dataSource: DataSource, connectionConfig: Connection => Unit)(
    f: DbTx ?=> T
): T =
  val transactor =
    Transactor(dataSource = dataSource, connectionConfig = connectionConfig)
  transactor.transact(f)

extension (inline sc: StringContext)
  inline def sql(inline args: Any*): Frag =
    ${ sqlImpl('{ sc }, '{ args }) }

private def sqlImpl(sc: Expr[StringContext], args: Expr[Seq[Any]])(using
    Quotes
): Expr[Frag] =
  import quotes.reflect.*
  val allArgsExprs: Seq[Expr[Any]] = args match
    case Varargs(ae) => ae
//  val stringExprs: Seq[Expr[String]] = sc match
//    case '{ StringContext(${ Varargs(strings) }: _*) } => strings

  '{
    val args: Seq[Any] = ${ Expr.ofSeq(allArgsExprs) }

    val sqlQueryReprs: Vector[String] = ${
      queryReprs(allArgsExprs, '{ args }, '{ Vector.newBuilder })
    }
    val queryExpr: String = $sc.s(sqlQueryReprs: _*)

    val flattenedArgs: Vector[Any] = ${
      flattenedArgsExpr(allArgsExprs, '{ args }, '{ Vector.newBuilder })
    }

    val writer: FragWriter = (ps: PreparedStatement, pos: Int) => {
      ${ sqlWriter('{ ps }, '{ pos }, '{ args }, allArgsExprs) }
    }
    Frag(queryExpr, flattenedArgs, writer)
  }
end sqlImpl

private def flattenedArgsExpr(
    argsExprs: Seq[Expr[Any]],
    allArgs: Expr[Seq[Any]],
    builder: Expr[m.Builder[Any, Vector[Any]]],
    i: Int = 0
)(using Quotes): Expr[Vector[Any]] =
  argsExprs match
    case '{ $arg: SqlLiteral } +: tail =>
      flattenedArgsExpr(tail, allArgs, builder, i + 1)
    case '{ $arg: Frag } +: tail =>
      val newBuilder = '{
        $builder ++= $allArgs(${ Expr(i) }).asInstanceOf[Frag].params
      }
      flattenedArgsExpr(tail, allArgs, newBuilder, i + 1)
    case '{ $arg: tp } +: tail =>
      val newBuilder = '{ $builder += $allArgs(${ Expr(i) }) }
      flattenedArgsExpr(tail, allArgs, newBuilder, i + 1)
    case Seq() =>
      '{ $builder.result() }

private def queryReprs(
    argsExprs: Seq[Expr[Any]],
    allArgs: Expr[Seq[Any]],
    builder: Expr[m.Builder[String, Vector[String]]],
    i: Int = 0
)(using Quotes): Expr[Vector[String]] =
  argsExprs match
    case '{ $arg: SqlLiteral } +: tail =>
      val newBuilder = '{
        $builder += $allArgs(${ Expr(i) }).asInstanceOf[SqlLiteral].queryRepr
      }
      queryReprs(tail, allArgs, newBuilder, i + 1)
    case '{ $arg: Frag } +: tail =>
      val newBuilder = '{
        $builder += $allArgs(${ Expr(i) }).asInstanceOf[Frag].sqlString
      }
      queryReprs(tail, allArgs, newBuilder, i + 1)
    case '{ $arg: tp } +: tail =>
      val codecExpr = summonWriter[tp]
      val newBuilder = '{ $builder += $codecExpr.queryRepr }
      queryReprs(tail, allArgs, newBuilder, i + 1)
    case Seq() =>
      '{ $builder.result() }

private def sqlWriter(
    psExpr: Expr[PreparedStatement],
    posExpr: Expr[Int],
    args: Expr[Seq[Any]],
    argsExprs: Seq[Expr[Any]],
    i: Int = 0
)(using Quotes): Expr[Int] =
  import quotes.reflect.*
  argsExprs match
    case '{ $arg: SqlLiteral } +: tail =>
      sqlWriter(psExpr, posExpr, args, tail, i + 1)
    case '{ $arg: Frag } +: tail =>
      '{
        val frag = $args(${ Expr(i) }).asInstanceOf[Frag]
        val pos = $posExpr
        val newPos = frag.writer.write($psExpr, pos)
        ${ sqlWriter(psExpr, '{ newPos }, args, tail, i + 1) }
      }
    case '{ $arg: tp } +: tail =>
      val codecExpr = summonWriter[tp]
      '{
        val argValue = $args(${ Expr(i) }).asInstanceOf[tp]
        val pos = $posExpr
        val codec = $codecExpr
        codec.writeSingle(argValue, $psExpr, pos)
        val newPos = pos + codec.cols.length
        ${ sqlWriter(psExpr, '{ newPos }, args, tail, i + 1) }
      }
    case Seq() => posExpr
  end match
end sqlWriter

private def summonWriter[T: Type](using Quotes): Expr[DbCodec[T]] =
  import quotes.reflect.*

  Expr
    .summon[DbCodec[T]]
    .orElse(
      TypeRepr.of[T].widen.asType match
        case '[tpe] =>
          Expr
            .summon[DbCodec[tpe]]
            .map(codec => '{ $codec.asInstanceOf[DbCodec[T]] })
    )
    .getOrElse:
      report.info(
        s"Could not find given DbCodec for ${TypeRepr.of[T].show}. Using PreparedStatement::setObject instead."
      )
      '{ DbCodec.AnyCodec.asInstanceOf[DbCodec[T]] }

def batchUpdate[T](values: Iterable[T])(f: T => Update)(using
    con: DbCon
): BatchUpdateResult =
  val it = values.iterator
  if !it.hasNext then return BatchUpdateResult.Success(0)
  val firstUpdate = f(it.next())
  val firstFrag = firstUpdate.frag

  Using.Manager(use =>
    val ps = use(con.connection.prepareStatement(firstFrag.sqlString))
    firstFrag.writer.write(ps, 1)
    ps.addBatch()

    while it.hasNext do
      val frag = f(it.next()).frag
      assert(
        frag.sqlString == firstFrag.sqlString,
        "all queries must be the same for batch PreparedStatement"
      )
      frag.writer.write(ps, 1)
      ps.addBatch()
    batchUpdateResult(ps.executeBatch())
  ) match
    case Success(res) => res
    case Failure(t) =>
      throw SqlException(
        con.sqlLogger.exceptionMsg(
          SqlExceptionEvent(firstFrag.sqlString, firstFrag.params, t)
        ),
        t
      )
  end match
end batchUpdate

private val Log = System.getLogger("com.augustnagro.magnum")

private def parseParams(params: Any): Iterator[Iterator[Any]] =
  params match
    case p: Product => Iterator(p.productIterator)
    case it: Iterable[?] =>
      it.headOption match
        case Some(h: Product) =>
          it.asInstanceOf[Iterable[Product]]
            .iterator
            .map(_.productIterator)
        case _ =>
          Iterator(it.iterator)
    case x => Iterator(Iterator(x))

private def paramsString(params: Iterator[Iterator[Any]]): String =
  params.map(_.mkString("(", ", ", ")")).mkString("", ",\n", "\n")

private def timed[T](f: => T): (T, FiniteDuration) =
  val start = System.currentTimeMillis()
  val res = f
  val execTime = FiniteDuration(
    System.currentTimeMillis() - start,
    TimeUnit.MILLISECONDS
  )
  (res, execTime)

private def batchUpdateResult(updateCounts: Array[Int]): BatchUpdateResult =
  boundary:
    val updatedRows = updateCounts.foldLeft(0L)((res, c) =>
      c match
        case rowCount if rowCount >= 0 =>
          res + rowCount
        case Statement.SUCCESS_NO_INFO =>
          boundary.break(BatchUpdateResult.SuccessNoInfo)
        case errorCode =>
          throw RuntimeException(s"Received JDBC error code $errorCode")
    )
    BatchUpdateResult.Success(updatedRows)

private def assertECIsSubsetOfE[EC: Type, E: Type](using Quotes): Unit =
  import quotes.reflect.*
  val eRepr = TypeRepr.of[E]
  val ecRepr = TypeRepr.of[EC]
  val eFields = eRepr.typeSymbol.caseFields
  val ecFields = ecRepr.typeSymbol.caseFields

  for ecField <- ecFields do
    if !eFields.exists(f =>
        f.name == ecField.name &&
          f.signature.resultSig == ecField.signature.resultSig
      )
    then
      report.error(
        s"""${ecRepr.show} must be an effective subset of ${eRepr.show}.
           |Are there any fields on ${ecRepr.show} you forgot to update on ${eRepr.show}?
           |""".stripMargin
      )

private def tableExprs[EC: Type, E: Type, ID: Type](using
    Quotes
): TableExprs =
  import quotes.reflect.*
  assertECIsSubsetOfE[EC, E]

  val idIndex = idAnnotIndex[E]
  val table: Expr[Table] =
    DerivingUtil.tableAnnot[E] match
      case Some(table) => table
      case None =>
        report.errorAndAbort(
          s"${TypeRepr.of[E].show} must have @Table annotation"
        )
  val nameMapper: Expr[SqlNameMapper] = '{ $table.nameMapper }

  Expr.summon[Mirror.Of[E]] match
    case Some('{
          $eMirror: Mirror.Of[E] {
            type MirroredLabel = eLabel
            type MirroredElemLabels = eMels
          }
        }) =>
      Expr.summon[Mirror.Of[EC]] match
        case Some('{
              $ecMirror: Mirror.Of[EC] {
                type MirroredElemLabels = ecMels
              }
            }) =>
          val tableNameScala = Type.valueOfConstant[eLabel].get.toString
          val tableNameScalaExpr = Expr(tableNameScala)
          val tableNameSql = DerivingUtil.sqlTableNameAnnot[E] match
            case Some(sqlName) => '{ $sqlName.name }
            case None => '{ $nameMapper.toTableName($tableNameScalaExpr) }
          val eElemNames = elemNames[eMels]()
          val eElemNamesSql = eElemNames.map(elemName =>
            sqlNameAnnot[E](elemName) match
              case Some(sqlName) => '{ $sqlName.name }
              case None =>
                '{ $nameMapper.toColumnName(${ Expr(elemName) }) }
          )
          val ecElemNames = elemNames[ecMels]()
          val ecElemNamesSql = ecElemNames.map(elemName =>
            sqlNameAnnot[E](elemName) match
              case Some(sqlName) => '{ $sqlName.name }
              case None =>
                '{ $nameMapper.toColumnName(${ Expr(elemName) }) }
          )
          TableExprs(
            table,
            tableNameScalaExpr,
            tableNameSql,
            eElemNames,
            eElemNamesSql,
            ecElemNames,
            ecElemNamesSql,
            idIndex
          )
        case _ =>
          report.errorAndAbort(
            s"A Mirror is required to derive RepoDefaults for ${TypeRepr.of[EC].show}"
          )
    case _ =>
      report.errorAndAbort(
        s"A Mirror is required to derive RepoDefaults for ${TypeRepr.of[E].show}"
      )
  end match
end tableExprs

private def idAnnotIndex[E: Type](using q: Quotes): Expr[Int] =
  import q.reflect.*
  val idAnnot = TypeRepr.of[Id].typeSymbol
  val index = TypeRepr
    .of[E]
    .typeSymbol
    .primaryConstructor
    .paramSymss
    .head
    .indexWhere(sym => sym.hasAnnotation(idAnnot)) match
    case -1 => 0
    case x  => x
  Expr(index)

private def elemNames[Mels: Type](res: List[String] = Nil)(using
    Quotes
): List[String] =
  import quotes.reflect.*
  Type.of[Mels] match
    case '[mel *: melTail] =>
      val melString = Type.valueOfConstant[mel].get.toString
      elemNames[melTail](melString :: res)
    case '[EmptyTuple] =>
      res.reverse

private def sqlNameAnnot[T: Type](elemName: String)(using
    Quotes
): Option[Expr[SqlName]] =
  import quotes.reflect.*
  val annot = TypeRepr.of[SqlName].typeSymbol
  TypeRepr
    .of[T]
    .typeSymbol
    .primaryConstructor
    .paramSymss
    .head
    .find(sym => sym.name == elemName && sym.hasAnnotation(annot))
    .flatMap(sym => sym.getAnnotation(annot))
    .map(term => term.asExprOf[SqlName])

private def handleQuery[A](sql: String, params: Any)(
    attempt: Try[(A, FiniteDuration)]
)(using con: DbCon): A =
  attempt match
    case Success((res, execTime)) =>
      con.sqlLogger.log(SqlSuccessEvent(sql, params, execTime))
      res
    case Failure(t) =>
      val msg = con.sqlLogger.exceptionMsg(SqlExceptionEvent(sql, params, t))
      throw SqlException(msg, t)


================================================
FILE: magnum/src/test/resources/clickhouse/big-dec.sql
================================================
drop table if exists big_dec;

create table big_dec (
    id Int64 NOT NULL,
    my_big_dec Nullable(Int256)
)
ENGINE = MergeTree()
ORDER BY id;

insert into big_dec values
(1, 123),
(2, null);

================================================
FILE: magnum/src/test/resources/clickhouse/car.sql
================================================
drop table if exists car;

CREATE TABLE car (
    model String NOT NULL,
    id Int64 NOT NULL,
    top_speed Int32 NOT NULL,
    vin Nullable(Int32),
    color Enum('Red', 'Green', 'Blue'),
    created DateTime NOT NULL
)
ENGINE = MergeTree()
ORDER BY created;

INSERT INTO car (model, id, top_speed, vin, color, created) VALUES
('McLaren Senna', 1, 208, 123, 'Red', toDateTime('2024-11-24 22:17:30', 'UTC')),
('Ferrari F8 Tributo', 2, 212, 124, 'Green', toDateTime('2024-11-24 22:17:31', 'UTC')),
('Aston Martin Superleggera', 3, 211, null, 'Blue', toDateTime('2024-11-24 22:17:32', 'UTC'));

================================================
FILE: magnum/src/test/resources/clickhouse/my-time.sql
================================================
drop table if exists my_time;

create table my_time (
  a DateTime not null,
  b Date not null,
  c String not null,
  d DateTime not null
)
engine = MergeTree()
order by a;

insert into my_time values
(toDateTime('2025-03-30 21:19:23'), toDate('2025-03-30'), '05:20:04', toDateTime('2025-04-02 20:16:38')),
(toDateTime('2025-03-31 21:19:23'), toDate('2025-03-31'), '05:30:04', toDateTime('2025-04-02T20:17:38'));


================================================
FILE: magnum/src/test/resources/clickhouse/no-id.sql
================================================
drop table if exists no_id;

CREATE TABLE no_id (
    created_at DateTime NOT NULL,
    user_name String NOT NULL,
    user_action String NOT NULL
)
ENGINE = MergeTree()
ORDER BY created_at;

INSERT INTO no_id VALUES
(timestamp '1997-08-15', 'Josh', 'clicked a button'),
(timestamp '1997-08-16', 'Danny', 'opened a toaster'),
(timestamp '1997-08-17', 'Greg', 'ran some QA tests');

================================================
FILE: magnum/src/test/resources/clickhouse/person.sql
================================================
drop table if exists person;

create table person (
    id Int64 not null,
    first_name Nullable(String),
    last_name String not null,
    is_admin Bool not null,
    created DateTime not null,
    social_id Nullable(UUID)
)
engine = MergeTree()
order by created;

insert into person values
(1, 'George', 'Washington', true, toDateTime('2023-03-05 02:26:00'), toUUID('d06443a6-3efb-46c4-a66a-a80a8a9a5388')),
(2, 'Alexander', 'Hamilton', true, toDateTime('2023-03-05 02:27:00'), toUUID('529b6c6d-7228-4da5-81d7-13b706f78ddb')),
(3, 'John', 'Adams', true, toDateTime('2023-03-05 02:28:00'), null),
(4, 'Benjamin', 'Franklin', true, toDateTime('2023-03-05 02:29:00'), null),
(5, 'John', 'Jay', true, toDateTime('2023-03-05 02:30:00'), null),
(6, 'Thomas', 'Jefferson', true, toDateTime('2023-03-05 02:31:00'), null),
(7, 'James', 'Madison', true, toDateTime('2023-03-05 02:32:00'), null),
(8, null, 'Nagro', false, toDateTime('2023-03-05 02:33:00'), null);

================================================
FILE: magnum/src/test/resources/h2/big-dec.sql
================================================
drop table if exists big_dec cascade;

create table big_dec (
    id int auto_increment primary key,
    my_big_dec numeric
);

insert into big_dec values
(1, 123),
(2, null);

================================================
FILE: magnum/src/test/resources/h2/car.sql
================================================
drop table if exists car;

create table car (
    model varchar(50) not null,
    id bigint auto_increment primary key,
    top_speed int not null,
    vin int,
    color enum('Red', 'Green', 'Blue'),
    created timestamp with time zone not null
);

insert into car (model, top_speed, vin, color, created) values
('McLaren Senna', 208, 123, 'Red', '2024-11-24T22:17:30.000000000Z'),
('Ferrari F8 Tributo', 212, 124, 'Green', '2024-11-24T22:17:31.000000000Z'),
('Aston Martin Superleggera', 211, null, 'Blue', '2024-11-24T22:17:32.000000000Z');


================================================
FILE: magnum/src/test/resources/h2/my-time.sql
================================================
drop table if exists my_time cascade;

create table my_time (
    a timestamp with time zone not null,
    b date not null,
    c time not null,
    d timestamp not null
);

insert into my_time values
('2025-03-30T21:19:23Z', '2025-03-30', '05:20:04', '2025-04-02T20:16:38'),
('2025-03-31T21:19:23Z', '2025-03-31', '05:30:04', '2025-04-02T20:17:38');


================================================
FILE: magnum/src/test/resources/h2/my-user.sql
================================================
drop table if exists my_user cascade;

create table my_user (
    first_name text not null,
    id bigint auto_increment primary key
);

insert into my_user (first_name) values
('George'),
('Alexander'),
('John');


================================================
FILE: magnum/src/test/resources/h2/no-id.sql
================================================
drop table if exists no_id;

create table no_id (
    created_at timestamp with time zone default now() not null,
    user_name varchar not null,
    user_action varchar not null
);

insert into no_id values
(timestamp '1997-08-15', 'Josh', 'clicked a button'),
(timestamp '1997-08-16', 'Danny', 'opened a toaster'),
(timestamp '1997-08-17', 'Greg', 'ran some QA tests');


================================================
FILE: magnum/src/test/resources/h2/person.sql
================================================
drop table if exists person cascade;

create table person (
    id bigint primary key,
    first_name varchar(50),
    last_name varchar(50) not null,
    is_admin boolean not null,
    created timestamp with time zone,
    social_id UUID
);

insert into person (id, first_name, last_name, is_admin, created, social_id) values
(1, 'George', 'Washington', true, now(), 'd06443a6-3efb-46c4-a66a-a80a8a9a5388'),
(2, 'Alexander', 'Hamilton', true, now(), '529b6c6d-7228-4da5-81d7-13b706f78ddb'),
(3, 'John', 'Adams', true, now(), null),
(4, 'Benjamin', 'Franklin', true, now(), null),
(5, 'John', 'Jay', true, now(), null),
(6, 'Thomas', 'Jefferson', true, now(), null),
(7, 'James', 'Madison', true, now(), null),
(8, null, 'Nagro', false, now(), null);


================================================
FILE: magnum/src/test/resources/mysql/big-dec.sql
================================================
drop table if exists big_dec cascade;

create table big_dec (
    id int primary key,
    my_big_dec numeric
);

insert into big_dec values
(1, 123),
(2, null);

================================================
FILE: magnum/src/test/resources/mysql/car.sql
================================================
drop table if exists car;

create table car (
    model varchar(50) not null,
    id bigint primary key,
    top_speed int not null,
    vin int,
    color enum('Red', 'Green', 'Blue'),
    created datetime not null
);

insert into car (model, id, top_speed, vin, color, created) values
('McLaren Senna', 1, 208, 123, 'Red', '2024-11-24 22:17:30'),
('Ferrari F8 Tributo', 2, 212, 124, 'Green', '2024-11-24 22:17:31'),
('Aston Martin Superleggera', 3, 211, null, 'Blue', '2024-11-24 22:17:32');


================================================
FILE: magnum/src/test/resources/mysql/my-time.sql
================================================
drop table if exists my_time cascade;

create table my_time (
  a timestamp not null,
  b date not null,
  c time not null,
  d datetime not null
);

insert into my_time values
('2025-03-30 21:19:23', '2025-03-30', '05:20:04', '2025-04-02 20:16:38'),
('2025-03-31 21:19:23', '2025-03-31', '05:30:04', '2025-04-02T20:17:38');

================================================
FILE: magnum/src/test/resources/mysql/my-user.sql
================================================
drop table if exists my_user cascade;

create table my_user (
    first_name varchar(200) not null,
    id bigint auto_increment primary key
);

insert into my_user (first_name) values
('George'),
('Alexander'),
('John');


================================================
FILE: magnum/src/test/resources/mysql/no-id.sql
================================================
drop table if exists no_id;

create table no_id (
    created_at datetime not null default now(),
    user_name varchar(200) not null,
    user_action varchar(200) not null
);

insert into no_id values
('1997-08-15', 'Josh', 'clicked a button'),
('1997-08-16', 'Danny', 'opened a toaster'),
('1997-08-17', 'Greg', 'ran some QA tests');


================================================
FILE: magnum/src/test/resources/mysql/person.sql
================================================
drop table if exists person cascade;

create table person (
    id bigint primary key,
    first_name varchar(50),
    last_name varchar(50) not null,
    is_admin boolean not null,
    created datetime not null,
    social_id varchar(36)
);

insert into person (id, first_name, last_name, is_admin, created, social_id) values
(1, 'George', 'Washington', true, now(), 'd06443a6-3efb-46c4-a66a-a80a8a9a5388'),
(2, 'Alexander', 'Hamilton', true, now(), '529b6c6d-7228-4da5-81d7-13b706f78ddb'),
(3, 'John', 'Adams', true, now(), null),
(4, 'Benjamin', 'Franklin', true, now(), null),
(5, 'John', 'Jay', true, now(), null),
(6, 'Thomas', 'Jefferson', true, now(), null),
(7, 'James', 'Madison', true, now(), null),
(8, null, 'Nagro', false, now(), null);


================================================
FILE: magnum/src/test/resources/pg/big-dec.sql
================================================
drop table if exists big_dec cascade;

create table big_dec (
    id int primary key,
    my_big_dec numeric
);

insert into big_dec values
(1, 123),
(2, null);

================================================
FILE: magnum/src/test/resources/pg/car.sql
================================================
DROP TABLE IF EXISTS car;

CREATE TABLE car (
    model VARCHAR(50) NOT NULL,
    id bigint PRIMARY KEY,
    top_speed INT NOT NULL,
    vin INT,
    color TEXT NOT NULL CHECK (color IN ('Red', 'Green', 'Blue')),
    created TIMESTAMP WITH TIME ZONE NOT NULL
);

INSERT INTO car (model, id, top_speed, vin, color, created) VALUES
('McLaren Senna', 1, 208, 123, 'Red', '2024-11-24T22:17:30.000000000Z'::timestamptz),
('Ferrari F8 Tributo', 2, 212, 124, 'Green', '2024-11-24T22:17:31.000000000Z'::timestamptz),
('Aston Martin Superleggera', 3, 211, null, 'Blue', '2024-11-24T22:17:32.000000000Z'::timestamptz);


================================================
FILE: magnum/src/test/resources/pg/my-time.sql
================================================
drop table if exists my_time cascade;

create table my_time (
  a timestamptz not null,
  b date not null,
  c time not null,
  d timestamp not null
);

insert into my_time values
('2025-03-30T21:19:23Z', '2025-03-30', '05:20:04', '2025-04-02T20:16:38'),
('2025-03-31T21:19:23Z', '2025-03-31', '05:30:04', '2025-04-02T20:17:38');

================================================
FILE: magnum/src/test/resources/pg/my-user.sql
================================================
drop table if exists my_user cascade;

create table my_user (
    first_name text not null,
    id bigint primary key generated always as identity
);

insert into my_user (first_name) values
('George'),
('Alexander'),
('John');


================================================
FILE: magnum/src/test/resources/pg/no-id.sql
================================================
drop table if exists no_id;

create table no_id (
    created_at timestamptz not null default now(),
    user_name text not null,
    user_action text not null
);

insert into no_id values
(timestamp '1997-08-15', 'Josh', 'clicked a button'),
(timestamp '1997-08-16', 'Danny', 'opened a toaster'),
(timestamp '1997-08-17', 'Greg', 'ran some QA tests');


================================================
FILE: magnum/src/test/resources/pg/person.sql
================================================
drop table if exists person cascade;

create table person (
    id bigint primary key,
    first_name varchar(50),
    last_name varchar(50) not null,
    is_admin boolean not null,
    created timestamptz not null,
    social_id UUID
);

insert into person (id, first_name, last_name, is_admin, created, social_id) values
(1, 'George', 'Washington', true, now(), 'd06443a6-3efb-46c4-a66a-a80a8a9a5388'),
(2, 'Alexander', 'Hamilton', true, now(), '529b6c6d-7228-4da5-81d7-13b706f78ddb'),
(3, 'John', 'Adams', true, now(), null),
(4, 'Benjamin', 'Franklin', true, now(), null),
(5, 'John', 'Jay', true, now(), null),
(6, 'Thomas', 'Jefferson', true, now(), null),
(7, 'James', 'Madison', true, now(), null),
(8, null, 'Nagro', false, timestamp '1997-08-12', null);


================================================
FILE: magnum/src/test/scala/ClickHouseTests.scala
================================================
import com.augustnagro.magnum.*
import com.clickhouse.client.config.ClickHouseDefaults
import com.clickhouse.jdbc.ClickHouseDataSource
import com.dimafeng.testcontainers.ClickHouseContainer
import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures
import munit.{AnyFixture, FunSuite, Location}
import org.testcontainers.utility.DockerImageName
import shared.*

import java.nio.file.{Files, Path}
import java.util.{Properties, UUID}
import scala.util.Using

class ClickHouseTests extends FunSuite, TestContainersFixtures:

  sharedTests(this, ClickhouseDbType, xa)

  test("only allows EC =:= E"):
    intercept[IllegalArgumentException]:
      case class UserCreator(name: String) derives DbCodec
      @Table(ClickhouseDbType)
      case class User(id: UUID, name: String) derives DbCodec
      val repo = Repo[UserCreator, User, UUID]

  val clickHouseContainer = ForAllContainerFixture(
    ClickHouseContainer
      .Def(dockerImageName =
        DockerImageName.parse("clickhouse/clickhouse-server:24.3.12.75")
      )
      .createContainer()
  )

  override def munitFixtures: Seq[AnyFixture[_]] =
    super.munitFixtures :+ clickHouseContainer

  def xa(): Transactor =
    val clickHouse = clickHouseContainer()
    val props = Properties()
    props.put(ClickHouseDefaults.USER.getKey, clickHouse.username)
    props.put(ClickHouseDefaults.PASSWORD.getKey, clickHouse.password)
    val ds = ClickHouseDataSource(clickHouse.jdbcUrl, props)
    val tableDDLs = Vector(
      "clickhouse/car.sql",
      "clickhouse/no-id.sql",
      "clickhouse/person.sql",
      "clickhouse/big-dec.sql",
      "clickhouse/my-time.sql"
    ).map(p => Files.readString(Path.of(getClass.getResource(p).toURI)))
    Using
      .Manager(use =>
        val con = use(ds.getConnection)
        val stmt = use(con.createStatement)
        for ddl <- tableDDLs do stmt.execute(ddl)
      )
      .get
    Transactor(ds)
  end xa
end ClickHouseTests


================================================
FILE: magnum/src/test/scala/EffectiveSubsetTests.scala
================================================
import com.augustnagro.magnum.*
import munit.FunSuite

class EffectiveSubsetTests extends FunSuite:

  test("DbSchema macro error if EC not an effective subset of E"):
    case class PersonCreator(first: String, last: String)
    case class Person(id: Long, last: String) derives DbCodec
    compileErrors("DbSchema[PersonCreator, Person, Long]")

  test("Repo macro error if EC not an effective subset of E"):
    case class PersonCreator(first: String, last: String)
    case class Person(id: Long, last: String) derives DbCodec
    compileErrors("Repo[PersonCreator, Person, Long]")


================================================
FILE: magnum/src/test/scala/H2Tests.scala
================================================
import com.augustnagro.magnum.*
import munit.FunSuite
import org.h2.jdbcx.JdbcDataSource
import shared.*

import java.nio.file.{Files, Path}
import scala.util.Using
import scala.util.Using.Manager

class H2Tests extends FunSuite:

  sharedTests(this, H2DbType, xa)

  lazy val h2DbPath = Files.createTempDirectory(null).toAbsolutePath

  def xa(): Transactor =
    val ds = JdbcDataSource()
    ds.setURL("jdbc:h2:" + h2DbPath)
    ds.setUser("sa")
    ds.setPassword("")
    val tableDDLs = Vector(
      "/h2/car.sql",
      "/h2/person.sql",
      "/h2/my-user.sql",
      "/h2/no-id.sql",
      "/h2/big-dec.sql",
      "/h2/my-time.sql"
    ).map(p => Files.readString(Path.of(getClass.getResource(p).toURI)))
    Manager(use =>
      val con = use(ds.getConnection)
      val stmt = use(con.createStatement)
      for ddl <- tableDDLs do stmt.execute(ddl)
    )
    Transactor(ds)

end H2Tests


================================================
FILE: magnum/src/test/scala/MySqlTests.scala
================================================
import com.augustnagro.magnum.*
import com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec
import com.dimafeng.testcontainers.MySQLContainer
import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures
import com.mysql.cj.jdbc.MysqlDataSource
import munit.{AnyFixture, FunSuite, Location}
import org.testcontainers.utility.DockerImageName
import shared.*

import java.nio.file.{Files, Path}
import scala.util.Using
import scala.util.Using.Manager

class MySqlTests extends FunSuite, TestContainersFixtures:

  sharedTests(this, MySqlDbType, xa)

  val mySqlContainer = ForAllContainerFixture(
    MySQLContainer
      .Def(dockerImageName = DockerImageName.parse("mysql:8.0.32"))
      .createContainer()
  )

  override def munitFixtures: Seq[AnyFixture[_]] =
    super.munitFixtures :+ mySqlContainer

  def xa(): Transactor =
    val mySql = mySqlContainer()
    val ds = MysqlDataSource()
    ds.setURL(mySql.jdbcUrl)
    ds.setUser(mySql.username)
    ds.setPassword(mySql.password)
    ds.setAllowMultiQueries(true)
    ds.setServerTimezone("UTC")
    val tableDDLs = Vector(
      "/mysql/car.sql",
      "/mysql/person.sql",
      "/mysql/my-user.sql",
      "/mysql/no-id.sql",
      "/mysql/big-dec.sql",
      "/mysql/my-time.sql"
    ).map(p => Files.readString(Path.of(getClass.getResource(p).toURI)))
    Manager(use =>
      val con = use(ds.getConnection)
      val stmt = use(con.createStatement())
      for ddl <- tableDDLs do stmt.execute(ddl)
    ).get
    Transactor(ds)
  end xa
end MySqlTests


================================================
FILE: magnum/src/test/scala/OracleTests.scala
================================================
import com.augustnagro.magnum.*
import com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec
import com.dimafeng.testcontainers.OracleContainer
import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures
import munit.{AnyFixture, FunSuite}
import oracle.jdbc.datasource.impl.OracleDataSource
import org.testcontainers.utility.DockerImageName
import shared.*

import java.sql.Statement
import java.time.LocalTime
import scala.util.Using

class OracleTests extends FunSuite, TestContainersFixtures:

  given DbCodec[Boolean] =
    DbCodec[String].biMap(_ == "Y", b => if b then "Y" else "N")

  given DbCodec[LocalTime] =
    DbCodec[String].biMap(LocalTime.parse, _.toString)

  sharedTests(this, OracleDbType, xa)

  val oracleContainer = ForAllContainerFixture(
    OracleContainer
      .Def(dockerImageName =
        DockerImageName.parse(
          "gvenzl/oracle-xe:21.3.0"
        )
      )
      .createContainer()
  )

  override def munitFixtures: Seq[AnyFixture[_]] =
    super.munitFixtures :+ oracleContainer

  def xa(): Transactor =
    val oracle = oracleContainer()
    val ds = OracleDataSource()
    ds.setURL(oracle.jdbcUrl)
    ds.setUser(oracle.username)
    ds.setPassword(oracle.password)
    // oracle doesn't support drop if exists,
    // or multi-statement queries
    Using
      .Manager(use =>
        val con = use(ds.getConnection())
        val stmt = use(con.createStatement())
        try stmt.execute("drop table car")
        catch case _ => ()
        stmt.execute(
          """create table car (
          |  model varchar2(50) not null,
          |  id number primary key,
          |  top_speed number not null,
          |  vin number,
          |  color varchar2(50) not null check (color in ('Red', 'Green', 'Blue')),
          |  created timestamp not null
          |)""".stripMargin
        )
        stmt.execute(
          """insert into car (model, id, top_speed, vin, color, created)
          |values ('McLaren Senna', 1, 208, 123, 'Red', timestamp '2024-11-24 22:17:30')""".stripMargin
        )
        stmt.execute(
          """insert into car (model, id, top_speed, vin, color, created)
          |values ('Ferrari F8 Tributo', 2, 212, 124, 'Green', timestamp '2024-11-24 22:17:31')""".stripMargin
        )
        stmt.execute(
          """insert into car (model, id, top_speed, vin, color, created)
          |values ('Aston Martin Superleggera', 3, 211, null, 'Blue', timestamp '2024-11-24 22:17:32')""".stripMargin
        )
        try stmt.execute("drop table person")
        catch case _ => ()
        stmt.execute(
          """create table person (
          |    id number primary key,
          |    first_name varchar2(50),
          |    last_name varchar2(50) not null,
          |    is_admin varchar2(1) not null,
          |    created timestamp not null,
          |    social_id varchar2(36)
          |)""".stripMargin
        )
        stmt.execute(
          """insert into person (id, first_name, last_name, is_admin, created, social_id) values
          |(1, 'George', 'Washington', 'Y', current_timestamp, 'd06443a6-3efb-46c4-a66a-a80a8a9a5388')""".stripMargin
        )
        stmt.execute(
          """insert into person (id, first_name, last_name, is_admin, created, social_id) values
          |(2, 'Alexander', 'Hamilton', 'Y', current_timestamp, '529b6c6d-7228-4da5-81d7-13b706f78ddb')""".stripMargin
        )
        stmt.execute(
          """insert into person (id, first_name, last_name, is_admin, created, social_id) values
          |(3, 'John', 'Adams', 'Y', current_timestamp, null)""".stripMargin
        )
        stmt.execute(
          """insert into person (id, first_name, last_name, is_admin, created, social_id) values
          |(4, 'Benjamin', 'Franklin', 'Y', current_timestamp, null)""".stripMargin
        )
        stmt.execute(
          """insert into person (id, first_name, last_name, is_admin, created, social_id) values
          |(5, 'John', 'Jay', 'Y', current_timestamp, null)""".stripMargin
        )
        stmt.execute(
          """insert into person (id, first_name, last_name, is_admin, created, social_id) values
          |(6, 'Thomas', 'Jefferson', 'Y', current_timestamp, null)""".stripMargin
        )
        stmt.execute(
          """insert into person (id, first_name, last_name, is_admin, created, social_id) values
          |(7, 'James', 'Madison', 'Y', current_timestamp, null)""".stripMargin
        )
        stmt.execute(
          """insert into person (id, first_name, last_name, is_admin, created, social_id) values
          |(8, null, 'Nagro', 'N', current_timestamp, null)""".stripMargin
        )
        try stmt.execute("drop table my_user")
        catch case _ => ()
        stmt.execute(
          """create table my_user (
            |  first_name varchar2(200) not null,
            |  id number generated always as identity,
            |  primary key (id)
            |)
            |""".stripMargin
        )
        stmt.execute("""insert into my_user (first_name) values ('George')""")
        stmt.execute(
          """insert into my_user (first_name) values ('Alexander')"""
        )
        stmt.execute("""insert into my_user (first_name) values ('John')""")
        try stmt.execute("drop table no_id")
        catch case _ => ()
        stmt.execute(
          """create table no_id (
            |  created_at timestamp not null,
            |  user_name varchar2(200) not null,
            |  user_action varchar2(200) not null
            |)
            |""".stripMargin
        )
        stmt.execute(
          """insert into no_id (created_at, user_name, user_action) values
            |(timestamp '1997-08-15 00:00:00', 'Josh', 'clicked a button')""".stripMargin
        )
        stmt.execute(
          """insert into no_id (created_at, user_name, user_action) values
            |(timestamp '1997-08-16 00:00:00', 'Danny', 'opened a toaster')""".stripMargin
        )
        stmt.execute(
          """insert into no_id (created_at, user_name, user_action) values
            |(timestamp '1997-08-17 00:00:00', 'Greg', 'ran some QA tests')""".stripMargin
        )
        try stmt.execute("drop table big_dec")
        catch case _ => ()
        stmt.execute(
          """create table big_dec (
            |  id number primary key,
            |  my_big_dec numeric
            |)""".stripMargin
        )
        stmt.execute("insert into big_dec (id, my_big_dec) values (1, 123)")
        stmt.execute("insert into big_dec (id, my_big_dec) values (2, null)")
        try stmt.execute("drop table my_time")
        catch case _ => ()
        stmt.execute(
          """create table my_time (
            |  a timestamp with local time zone not null,
            |  b date not null,
            |  c VARCHAR2(100) not null,
            |  d timestamp not null
            |)
            |""".stripMargin
        )
        stmt.execute(
          "insert into my_time values (timestamp '2025-03-30 21:19:23 -00:00', date '2025-03-30', '05:20:04', timestamp '2025-04-02 20:16:38')"
        )
        stmt.execute(
          "insert into my_time values (timestamp '2025-03-31 21:19:23 -00:00', date '2025-03-31', '05:30:04', timestamp '2025-04-02 20:17:38')"
        )
      )
      .get
    Transactor(ds)
  end xa
end OracleTests


================================================
FILE: magnum/src/test/scala/PgTests.scala
================================================
import com.augustnagro.magnum.*
import com.dimafeng.testcontainers.PostgreSQLContainer
import com.dimafeng.testcontainers.munit.fixtures.TestContainersFixtures
import munit.{AnyFixture, FunSuite, Location}
import org.postgresql.ds.PGSimpleDataSource
import org.testcontainers.utility.DockerImageName
import shared.*

import java.nio.file.{Files, Path}
import scala.util.Using
import scala.util.Using.Manager

class PgTests extends FunSuite, TestContainersFixtures:

  sharedTests(this, PostgresDbType, xa)

  val pgContainer = ForAllContainerFixture(
    PostgreSQLContainer
      .Def(dockerImageName = DockerImageName.parse("postgres:17.0"))
      .createContainer()
  )

  override def munitFixtures: Seq[AnyFixture[_]] =
    super.munitFixtures :+ pgContainer

  def xa(): Transactor =
    val ds = PGSimpleDataSource()
    va
Download .txt
gitextract_tcy7nv57/

├── .github/
│   └── workflows/
│       └── ci.yml
├── .gitignore
├── .scalafmt.conf
├── LICENSE
├── README.md
├── build.sbt
├── magnum/
│   └── src/
│       ├── main/
│       │   └── scala/
│       │       └── com/
│       │           └── augustnagro/
│       │               └── magnum/
│       │                   ├── BatchUpdateResult.scala
│       │                   ├── ClickhouseDbType.scala
│       │                   ├── ColumnName.scala
│       │                   ├── ColumnNames.scala
│       │                   ├── DbCodec.scala
│       │                   ├── DbCon.scala
│       │                   ├── DbTx.scala
│       │                   ├── DbType.scala
│       │                   ├── DerivingUtil.scala
│       │                   ├── Frag.scala
│       │                   ├── FragWriter.scala
│       │                   ├── H2DbType.scala
│       │                   ├── Id.scala
│       │                   ├── ImmutableRepo.scala
│       │                   ├── MySqlDbType.scala
│       │                   ├── NullOrder.scala
│       │                   ├── OracleDbType.scala
│       │                   ├── PostgresDbType.scala
│       │                   ├── Query.scala
│       │                   ├── Repo.scala
│       │                   ├── RepoDefaults.scala
│       │                   ├── ResultSetIterator.scala
│       │                   ├── Returning.scala
│       │                   ├── Seek.scala
│       │                   ├── SeekDir.scala
│       │                   ├── Sort.scala
│       │                   ├── SortOrder.scala
│       │                   ├── Spec.scala
│       │                   ├── SpecImpl.scala
│       │                   ├── SqlException.scala
│       │                   ├── SqlExceptionEvent.scala
│       │                   ├── SqlLiteral.scala
│       │                   ├── SqlLogger.scala
│       │                   ├── SqlName.scala
│       │                   ├── SqlNameMapper.scala
│       │                   ├── SqlSuccessEvent.scala
│       │                   ├── SqliteDbType.scala
│       │                   ├── Table.scala
│       │                   ├── TableExprs.scala
│       │                   ├── TableInfo.scala
│       │                   ├── Transactor.scala
│       │                   ├── UUIDCodec.scala
│       │                   ├── Update.scala
│       │                   └── util.scala
│       └── test/
│           ├── resources/
│           │   ├── clickhouse/
│           │   │   ├── big-dec.sql
│           │   │   ├── car.sql
│           │   │   ├── my-time.sql
│           │   │   ├── no-id.sql
│           │   │   └── person.sql
│           │   ├── h2/
│           │   │   ├── big-dec.sql
│           │   │   ├── car.sql
│           │   │   ├── my-time.sql
│           │   │   ├── my-user.sql
│           │   │   ├── no-id.sql
│           │   │   └── person.sql
│           │   ├── mysql/
│           │   │   ├── big-dec.sql
│           │   │   ├── car.sql
│           │   │   ├── my-time.sql
│           │   │   ├── my-user.sql
│           │   │   ├── no-id.sql
│           │   │   └── person.sql
│           │   └── pg/
│           │       ├── big-dec.sql
│           │       ├── car.sql
│           │       ├── my-time.sql
│           │       ├── my-user.sql
│           │       ├── no-id.sql
│           │       └── person.sql
│           └── scala/
│               ├── ClickHouseTests.scala
│               ├── EffectiveSubsetTests.scala
│               ├── H2Tests.scala
│               ├── MySqlTests.scala
│               ├── OracleTests.scala
│               ├── PgTests.scala
│               ├── SqliteTests.scala
│               ├── opaques.scala
│               └── shared/
│                   ├── BigDecTests.scala
│                   ├── Color.scala
│                   ├── DateTimeTests.scala
│                   ├── EmbeddedFragTests.scala
│                   ├── EntityCreatorTests.scala
│                   ├── ImmutableRepoTests.scala
│                   ├── MultilineFragTests.scala
│                   ├── NoIdTests.scala
│                   ├── OptionalProductTests.scala
│                   ├── RepoTests.scala
│                   ├── SharedTests.scala
│                   ├── SpecTests.scala
│                   ├── SqlNameTests.scala
│                   ├── TableInfoTests.scala
│                   └── TupleTests.scala
├── magnum-pg/
│   └── src/
│       ├── main/
│       │   └── scala/
│       │       └── com/
│       │           └── augustnagro/
│       │               └── magnum/
│       │                   └── pg/
│       │                       ├── PgCodec.scala
│       │                       ├── SqlArrayCodec.scala
│       │                       ├── enums/
│       │                       │   ├── PgEnumDbCodec.scala
│       │                       │   ├── PgEnumToScalaEnumSqlArrayCodec.scala
│       │                       │   └── PgStringToScalaEnumSqlArrayCodec.scala
│       │                       ├── json/
│       │                       │   ├── JsonBDbCodec.scala
│       │                       │   └── JsonDbCodec.scala
│       │                       └── xml/
│       │                           └── XmlDbCodec.scala
│       └── test/
│           ├── resources/
│           │   ├── pg-car.sql
│           │   ├── pg-service-list.sql
│           │   └── pg-user.sql
│           └── scala/
│               ├── CirceJsonBDbCodec.scala
│               ├── CirceJsonDbCodec.scala
│               ├── Color.scala
│               ├── LastService.scala
│               ├── MagCar.scala
│               ├── MagUser.scala
│               ├── MyJsonB.scala
│               ├── MyXml.scala
│               └── PgCodecTests.scala
├── magnum-zio/
│   └── src/
│       ├── main/
│       │   └── scala/
│       │       └── com/
│       │           └── augustnagro/
│       │               └── magnum/
│       │                   └── magzio/
│       │                       └── TransactorZIO.scala
│       └── test/
│           ├── resources/
│           │   └── pg/
│           │       ├── big-dec.sql
│           │       ├── car.sql
│           │       ├── my-user.sql
│           │       ├── no-id.sql
│           │       └── person.sql
│           └── scala/
│               └── com/
│                   └── augustnagro/
│                       └── magnum/
│                           └── magzio/
│                               ├── ImmutableRepoZioTests.scala
│                               └── PgZioTests.scala
└── project/
    ├── build.properties
    └── plugins.sbt
Download .txt
SYMBOL INDEX (31 symbols across 31 files)

FILE: magnum-pg/src/test/resources/pg-car.sql
  type mag_car (line 3) | create table mag_car (

FILE: magnum-pg/src/test/resources/pg-service-list.sql
  type mag_service_list (line 3) | create table mag_service_list (

FILE: magnum-pg/src/test/resources/pg-user.sql
  type mag_user (line 6) | create table mag_user (

FILE: magnum-zio/src/test/resources/pg/big-dec.sql
  type big_dec (line 3) | create table big_dec (

FILE: magnum-zio/src/test/resources/pg/car.sql
  type car (line 3) | CREATE TABLE car (

FILE: magnum-zio/src/test/resources/pg/my-user.sql
  type my_user (line 3) | create table my_user (

FILE: magnum-zio/src/test/resources/pg/no-id.sql
  type no_id (line 3) | create table no_id (

FILE: magnum-zio/src/test/resources/pg/person.sql
  type person (line 3) | create table person (

FILE: magnum/src/test/resources/clickhouse/big-dec.sql
  type big_dec (line 3) | create table big_dec (

FILE: magnum/src/test/resources/clickhouse/car.sql
  type car (line 3) | CREATE TABLE car (

FILE: magnum/src/test/resources/clickhouse/my-time.sql
  type my_time (line 3) | create table my_time (

FILE: magnum/src/test/resources/clickhouse/no-id.sql
  type no_id (line 3) | CREATE TABLE no_id (

FILE: magnum/src/test/resources/clickhouse/person.sql
  type person (line 3) | create table person (

FILE: magnum/src/test/resources/h2/big-dec.sql
  type big_dec (line 3) | create table big_dec (

FILE: magnum/src/test/resources/h2/car.sql
  type car (line 3) | create table car (

FILE: magnum/src/test/resources/h2/my-time.sql
  type my_time (line 3) | create table my_time (

FILE: magnum/src/test/resources/h2/my-user.sql
  type my_user (line 3) | create table my_user (

FILE: magnum/src/test/resources/h2/no-id.sql
  type no_id (line 3) | create table no_id (

FILE: magnum/src/test/resources/h2/person.sql
  type person (line 3) | create table person (

FILE: magnum/src/test/resources/mysql/big-dec.sql
  type big_dec (line 3) | create table big_dec (

FILE: magnum/src/test/resources/mysql/car.sql
  type car (line 3) | create table car (

FILE: magnum/src/test/resources/mysql/my-time.sql
  type my_time (line 3) | create table my_time (

FILE: magnum/src/test/resources/mysql/my-user.sql
  type my_user (line 3) | create table my_user (

FILE: magnum/src/test/resources/mysql/no-id.sql
  type no_id (line 3) | create table no_id (

FILE: magnum/src/test/resources/mysql/person.sql
  type person (line 3) | create table person (

FILE: magnum/src/test/resources/pg/big-dec.sql
  type big_dec (line 3) | create table big_dec (

FILE: magnum/src/test/resources/pg/car.sql
  type car (line 3) | CREATE TABLE car (

FILE: magnum/src/test/resources/pg/my-time.sql
  type my_time (line 3) | create table my_time (

FILE: magnum/src/test/resources/pg/my-user.sql
  type my_user (line 3) | create table my_user (

FILE: magnum/src/test/resources/pg/no-id.sql
  type no_id (line 3) | create table no_id (

FILE: magnum/src/test/resources/pg/person.sql
  type person (line 3) | create table person (
Condensed preview — 126 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (313K chars).
[
  {
    "path": ".github/workflows/ci.yml",
    "chars": 649,
    "preview": "name: CI\n\non:\n  push:\n    branches: [\"master\"]\n  pull_request:\n    branches: [\"master\"]\n\npermissions:\n  contents: read\n\n"
  },
  {
    "path": ".gitignore",
    "chars": 178,
    "preview": ".settings\n.DS_Store\n\n*.iml\n.idea\ntarget\nnbproject\nnb-configuration.xml\n\n.class\n.log\n.jar\n.war\n.ear\n.zip\n.tar.gz\n.rar\n\nhs"
  },
  {
    "path": ".scalafmt.conf",
    "chars": 169,
    "preview": "version = 3.8.4-RC3\nrunner.dialect = scala3\nrewrite.scala3.insertEndMarkerMinLines = 20\nrewrite.scala3.removeEndMarkerMa"
  },
  {
    "path": "LICENSE",
    "chars": 11357,
    "preview": "\n                                 Apache License\n                           Version 2.0, January 2004\n                  "
  },
  {
    "path": "README.md",
    "chars": 21152,
    "preview": "## Magnum\n\n[![Latest version](https://index.scala-lang.org/augustnagro/magnum/magnum/latest.svg?color=orange)](https://i"
  },
  {
    "path": "build.sbt",
    "chars": 3550,
    "preview": "ThisBuild / organization := \"com.augustnagro\"\nThisBuild / version := \"2.0.0-SNAPSHOT\"\nThisBuild / versionScheme := Some("
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/BatchUpdateResult.scala",
    "chars": 209,
    "preview": "package com.augustnagro.magnum\n\nimport scala.util.boundary\n\n/** The total number of rows updated, or SuccessNoInfo if un"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/ClickhouseDbType.scala",
    "chars": 5304,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.{Connection, PreparedStatement, ResultSet, Statement}\nimport java.time.O"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/ColumnName.scala",
    "chars": 226,
    "preview": "package com.augustnagro.magnum\n\n/** Represents an entity column. Can be interpolated in sql\"\" expressions */\nclass Colum"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/ColumnNames.scala",
    "chars": 348,
    "preview": "package com.augustnagro.magnum\n\n/** A grouping of schema names, which may be interpolated in sql\"\" expressions.\n  * @par"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/DbCodec.scala",
    "chars": 33568,
    "preview": "package com.augustnagro.magnum\n\nimport java.net.URL\nimport java.sql.{JDBCType, PreparedStatement, ResultSet, Types}\nimpo"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/DbCon.scala",
    "chars": 261,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.Connection\n\n/** Simple wrapper around java.sql.Connection. See\n  * `com."
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/DbTx.scala",
    "chars": 245,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.Connection\nimport scala.util.Using\n\n/** Represents a transactional [[DbC"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/DbType.scala",
    "chars": 611,
    "preview": "package com.augustnagro.magnum\n\nimport scala.reflect.ClassTag\nimport scala.deriving.Mirror\n\n/** Factory for Repo default"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/DerivingUtil.scala",
    "chars": 4559,
    "preview": "package com.augustnagro.magnum\n\nimport scala.deriving.Mirror\nimport scala.compiletime.{\n  constValue,\n  constValueTuple,"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Frag.scala",
    "chars": 2330,
    "preview": "package com.augustnagro.magnum\n\nimport java.lang.System.Logger.Level\nimport java.sql.{PreparedStatement, ResultSet, Stat"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/FragWriter.scala",
    "chars": 300,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.PreparedStatement\n\ntrait FragWriter:\n  /** Writes a Frag's values to `ps"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/H2DbType.scala",
    "chars": 8071,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.{Connection, JDBCType, PreparedStatement, ResultSet, Statement}\nimport j"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Id.scala",
    "chars": 108,
    "preview": "package com.augustnagro.magnum\n\nimport scala.annotation.StaticAnnotation\n\nclass Id extends StaticAnnotation\n"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/ImmutableRepo.scala",
    "chars": 1228,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.ResultSet\nimport javax.sql.DataSource\nimport scala.util.{Try, Using}\n\n/*"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/MySqlDbType.scala",
    "chars": 7809,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.{Connection, PreparedStatement, ResultSet, Statement}\nimport java.time.O"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/NullOrder.scala",
    "chars": 182,
    "preview": "package com.augustnagro.magnum\n\ntrait NullOrder\n\nobject NullOrder:\n  case object Default extends NullOrder\n  case object"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/OracleDbType.scala",
    "chars": 7380,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.{Connection, PreparedStatement, ResultSet, Statement}\nimport java.time.O"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/PostgresDbType.scala",
    "chars": 7838,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.{Connection, JDBCType, PreparedStatement, ResultSet, Statement}\nimport j"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Query.scala",
    "chars": 1157,
    "preview": "package com.augustnagro.magnum\n\nimport java.util.concurrent.TimeUnit\nimport scala.concurrent.duration.FiniteDuration\nimp"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Repo.scala",
    "chars": 1801,
    "preview": "package com.augustnagro.magnum\n\nimport javax.sql.DataSource\n\n/** A read & write data repository\n  *\n  * @tparam EC\n  *  "
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/RepoDefaults.scala",
    "chars": 3431,
    "preview": "package com.augustnagro.magnum\n\nimport scala.compiletime.*\nimport scala.deriving.*\nimport scala.quoted.*\nimport scala.re"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/ResultSetIterator.scala",
    "chars": 911,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.ResultSet\nimport scala.util.control.NonFatal\n\nprivate class ResultSetIte"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Returning.scala",
    "chars": 1498,
    "preview": "package com.augustnagro.magnum\n\nimport scala.util.{Failure, Success, Try, Using}\nimport Using.Manager\nimport java.sql.St"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Seek.scala",
    "chars": 226,
    "preview": "package com.augustnagro.magnum\n\nclass Seek private[magnum] (\n    val column: String,\n    val seekDirection: SeekDir,\n   "
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SeekDir.scala",
    "chars": 129,
    "preview": "package com.augustnagro.magnum\n\ntrait SeekDir\n\nobject SeekDir:\n  case object Gt extends SeekDir\n  case object Lt extends"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Sort.scala",
    "chars": 146,
    "preview": "package com.augustnagro.magnum\n\nclass Sort private[magnum] (\n    val column: String,\n    val direction: SortOrder,\n    v"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SortOrder.scala",
    "chars": 180,
    "preview": "package com.augustnagro.magnum\n\ntrait SortOrder\n\nobject SortOrder:\n  case object Default extends SortOrder\n  case object"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Spec.scala",
    "chars": 1426,
    "preview": "package com.augustnagro.magnum\n\nimport java.util.StringJoiner\n\nclass Spec[E] private (\n    val prefix: Option[Frag],\n   "
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SpecImpl.scala",
    "chars": 3095,
    "preview": "package com.augustnagro.magnum\n\nimport java.util.StringJoiner\n\nprivate trait SpecImpl:\n  def sortSql(sort: Sort): String"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SqlException.scala",
    "chars": 155,
    "preview": "package com.augustnagro.magnum\n\nclass SqlException private[magnum] (message: String, cause: Throwable = null)\n    extend"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SqlExceptionEvent.scala",
    "chars": 670,
    "preview": "package com.augustnagro.magnum\n\n/** Metadata for a exceptional SQL statement. */\nclass SqlExceptionEvent private[magnum]"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SqlLiteral.scala",
    "chars": 515,
    "preview": "package com.augustnagro.magnum\n\n/** A SQL string that is interpolated directly into a sql\"\" query (and not as a\n  * Prep"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SqlLogger.scala",
    "chars": 2843,
    "preview": "package com.augustnagro.magnum\n\nimport java.lang.System.Logger.Level\nimport scala.concurrent.duration.FiniteDuration\n\n/*"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SqlName.scala",
    "chars": 131,
    "preview": "package com.augustnagro.magnum\n\nimport scala.annotation.StaticAnnotation\n\nclass SqlName(val name: String) extends Static"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SqlNameMapper.scala",
    "chars": 1501,
    "preview": "package com.augustnagro.magnum\n\n/** Mapping from scala terms to sql terms */\ntrait SqlNameMapper:\n  def toColumnName(sca"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SqlSuccessEvent.scala",
    "chars": 965,
    "preview": "package com.augustnagro.magnum\n\nimport scala.concurrent.duration.FiniteDuration\n\n/** Metadata for a successfully execute"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/SqliteDbType.scala",
    "chars": 7111,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.{Connection, PreparedStatement, ResultSet, Statement}\nimport java.time.O"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Table.scala",
    "chars": 197,
    "preview": "package com.augustnagro.magnum\n\nimport scala.annotation.StaticAnnotation\n\nclass Table(\n    val dbType: DbType,\n    val n"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/TableExprs.scala",
    "chars": 343,
    "preview": "package com.augustnagro.magnum\n\nimport scala.quoted.*\n\nprivate case class TableExprs(\n    tableAnnot: Expr[Table],\n    t"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/TableInfo.scala",
    "chars": 4000,
    "preview": "package com.augustnagro.magnum\n\nimport scala.deriving.*\nimport scala.compiletime.*\nimport scala.quoted.*\n\n/** Metadata a"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Transactor.scala",
    "chars": 1703,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.Connection\nimport javax.sql.DataSource\nimport scala.util.Using\n\nclass Tr"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/UUIDCodec.scala",
    "chars": 582,
    "preview": "package com.augustnagro.magnum\n\nimport java.sql.{PreparedStatement, ResultSet, Types}\nimport java.util.UUID\n\nobject UUID"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/Update.scala",
    "chars": 494,
    "preview": "package com.augustnagro.magnum\n\nimport java.util.concurrent.TimeUnit\nimport scala.concurrent.duration.FiniteDuration\nimp"
  },
  {
    "path": "magnum/src/main/scala/com/augustnagro/magnum/util.scala",
    "chars": 12001,
    "preview": "package com.augustnagro.magnum\n\nimport com.augustnagro.magnum.SqlException\n\nimport java.lang.System.Logger.Level\nimport "
  },
  {
    "path": "magnum/src/test/resources/clickhouse/big-dec.sql",
    "chars": 193,
    "preview": "drop table if exists big_dec;\n\ncreate table big_dec (\n    id Int64 NOT NULL,\n    my_big_dec Nullable(Int256)\n)\nENGINE = "
  },
  {
    "path": "magnum/src/test/resources/clickhouse/car.sql",
    "chars": 593,
    "preview": "drop table if exists car;\n\nCREATE TABLE car (\n    model String NOT NULL,\n    id Int64 NOT NULL,\n    top_speed Int32 NOT "
  },
  {
    "path": "magnum/src/test/resources/clickhouse/my-time.sql",
    "chars": 414,
    "preview": "drop table if exists my_time;\n\ncreate table my_time (\n  a DateTime not null,\n  b Date not null,\n  c String not null,\n  d"
  },
  {
    "path": "magnum/src/test/resources/clickhouse/no-id.sql",
    "chars": 380,
    "preview": "drop table if exists no_id;\n\nCREATE TABLE no_id (\n    created_at DateTime NOT NULL,\n    user_name String NOT NULL,\n    u"
  },
  {
    "path": "magnum/src/test/resources/clickhouse/person.sql",
    "chars": 958,
    "preview": "drop table if exists person;\n\ncreate table person (\n    id Int64 not null,\n    first_name Nullable(String),\n    last_nam"
  },
  {
    "path": "magnum/src/test/resources/h2/big-dec.sql",
    "chars": 175,
    "preview": "drop table if exists big_dec cascade;\n\ncreate table big_dec (\n    id int auto_increment primary key,\n    my_big_dec nume"
  },
  {
    "path": "magnum/src/test/resources/h2/car.sql",
    "chars": 545,
    "preview": "drop table if exists car;\n\ncreate table car (\n    model varchar(50) not null,\n    id bigint auto_increment primary key,\n"
  },
  {
    "path": "magnum/src/test/resources/h2/my-time.sql",
    "chars": 351,
    "preview": "drop table if exists my_time cascade;\n\ncreate table my_time (\n    a timestamp with time zone not null,\n    b date not nu"
  },
  {
    "path": "magnum/src/test/resources/h2/my-user.sql",
    "chars": 214,
    "preview": "drop table if exists my_user cascade;\n\ncreate table my_user (\n    first_name text not null,\n    id bigint auto_increment"
  },
  {
    "path": "magnum/src/test/resources/h2/no-id.sql",
    "chars": 372,
    "preview": "drop table if exists no_id;\n\ncreate table no_id (\n    created_at timestamp with time zone default now() not null,\n    us"
  },
  {
    "path": "magnum/src/test/resources/h2/person.sql",
    "chars": 751,
    "preview": "drop table if exists person cascade;\n\ncreate table person (\n    id bigint primary key,\n    first_name varchar(50),\n    l"
  },
  {
    "path": "magnum/src/test/resources/mysql/big-dec.sql",
    "chars": 160,
    "preview": "drop table if exists big_dec cascade;\n\ncreate table big_dec (\n    id int primary key,\n    my_big_dec numeric\n);\n\ninsert "
  },
  {
    "path": "magnum/src/test/resources/mysql/car.sql",
    "chars": 494,
    "preview": "drop table if exists car;\n\ncreate table car (\n    model varchar(50) not null,\n    id bigint primary key,\n    top_speed i"
  },
  {
    "path": "magnum/src/test/resources/mysql/my-time.sql",
    "chars": 324,
    "preview": "drop table if exists my_time cascade;\n\ncreate table my_time (\n  a timestamp not null,\n  b date not null,\n  c time not nu"
  },
  {
    "path": "magnum/src/test/resources/mysql/my-user.sql",
    "chars": 222,
    "preview": "drop table if exists my_user cascade;\n\ncreate table my_user (\n    first_name varchar(200) not null,\n    id bigint auto_i"
  },
  {
    "path": "magnum/src/test/resources/mysql/no-id.sql",
    "chars": 336,
    "preview": "drop table if exists no_id;\n\ncreate table no_id (\n    created_at datetime not null default now(),\n    user_name varchar("
  },
  {
    "path": "magnum/src/test/resources/mysql/person.sql",
    "chars": 751,
    "preview": "drop table if exists person cascade;\n\ncreate table person (\n    id bigint primary key,\n    first_name varchar(50),\n    l"
  },
  {
    "path": "magnum/src/test/resources/pg/big-dec.sql",
    "chars": 160,
    "preview": "drop table if exists big_dec cascade;\n\ncreate table big_dec (\n    id int primary key,\n    my_big_dec numeric\n);\n\ninsert "
  },
  {
    "path": "magnum/src/test/resources/pg/car.sql",
    "chars": 609,
    "preview": "DROP TABLE IF EXISTS car;\n\nCREATE TABLE car (\n    model VARCHAR(50) NOT NULL,\n    id bigint PRIMARY KEY,\n    top_speed I"
  },
  {
    "path": "magnum/src/test/resources/pg/my-time.sql",
    "chars": 329,
    "preview": "drop table if exists my_time cascade;\n\ncreate table my_time (\n  a timestamptz not null,\n  b date not null,\n  c time not "
  },
  {
    "path": "magnum/src/test/resources/pg/my-user.sql",
    "chars": 228,
    "preview": "drop table if exists my_user cascade;\n\ncreate table my_user (\n    first_name text not null,\n    id bigint primary key ge"
  },
  {
    "path": "magnum/src/test/resources/pg/no-id.sql",
    "chars": 353,
    "preview": "drop table if exists no_id;\n\ncreate table no_id (\n    created_at timestamptz not null default now(),\n    user_name text "
  },
  {
    "path": "magnum/src/test/resources/pg/person.sql",
    "chars": 764,
    "preview": "drop table if exists person cascade;\n\ncreate table person (\n    id bigint primary key,\n    first_name varchar(50),\n    l"
  },
  {
    "path": "magnum/src/test/scala/ClickHouseTests.scala",
    "chars": 1950,
    "preview": "import com.augustnagro.magnum.*\nimport com.clickhouse.client.config.ClickHouseDefaults\nimport com.clickhouse.jdbc.ClickH"
  },
  {
    "path": "magnum/src/test/scala/EffectiveSubsetTests.scala",
    "chars": 586,
    "preview": "import com.augustnagro.magnum.*\nimport munit.FunSuite\n\nclass EffectiveSubsetTests extends FunSuite:\n\n  test(\"DbSchema ma"
  },
  {
    "path": "magnum/src/test/scala/H2Tests.scala",
    "chars": 900,
    "preview": "import com.augustnagro.magnum.*\nimport munit.FunSuite\nimport org.h2.jdbcx.JdbcDataSource\nimport shared.*\n\nimport java.ni"
  },
  {
    "path": "magnum/src/test/scala/MySqlTests.scala",
    "chars": 1527,
    "preview": "import com.augustnagro.magnum.*\nimport com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec\nimport com.dimafeng.testcontain"
  },
  {
    "path": "magnum/src/test/scala/OracleTests.scala",
    "chars": 7330,
    "preview": "import com.augustnagro.magnum.*\nimport com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec\nimport com.dimafeng.testcontain"
  },
  {
    "path": "magnum/src/test/scala/PgTests.scala",
    "chars": 1378,
    "preview": "import com.augustnagro.magnum.*\nimport com.dimafeng.testcontainers.PostgreSQLContainer\nimport com.dimafeng.testcontainer"
  },
  {
    "path": "magnum/src/test/scala/SqliteTests.scala",
    "chars": 5209,
    "preview": "import com.augustnagro.magnum.*\nimport com.augustnagro.magnum.UUIDCodec.VarCharUUIDCodec\nimport munit.FunSuite\nimport or"
  },
  {
    "path": "magnum/src/test/scala/opaques.scala",
    "chars": 266,
    "preview": "import com.augustnagro.magnum.*\n\nobject opaques:\n  opaque type Age = Int\n  object Age:\n    def apply(value: Int): Age = "
  },
  {
    "path": "magnum/src/test/scala/shared/BigDecTests.scala",
    "chars": 637,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\ndef bigDecTests(suite: FunSuite, dbTy"
  },
  {
    "path": "magnum/src/test/scala/shared/Color.scala",
    "chars": 107,
    "preview": "package shared\n\nimport com.augustnagro.magnum.DbCodec\n\nenum Color derives DbCodec:\n  case Red, Green, Blue\n"
  },
  {
    "path": "magnum/src/test/scala/shared/DateTimeTests.scala",
    "chars": 1823,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\nimport java.sql.Connection\nimport jav"
  },
  {
    "path": "magnum/src/test/scala/shared/EmbeddedFragTests.scala",
    "chars": 1114,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\nimport java.util.UUID\n\ndef embeddedFr"
  },
  {
    "path": "magnum/src/test/scala/shared/EntityCreatorTests.scala",
    "chars": 3558,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport com.augustnagro.magnum.SqlException\nimport munit.{FunSuite, Locat"
  },
  {
    "path": "magnum/src/test/scala/shared/ImmutableRepoTests.scala",
    "chars": 4784,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\nimport java.sql.{Connection, Prepared"
  },
  {
    "path": "magnum/src/test/scala/shared/MultilineFragTests.scala",
    "chars": 1056,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\ndef multilineFragTests(suite: FunSuit"
  },
  {
    "path": "magnum/src/test/scala/shared/NoIdTests.scala",
    "chars": 960,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\nimport java.time.OffsetDateTime\n\ndef "
  },
  {
    "path": "magnum/src/test/scala/shared/OptionalProductTests.scala",
    "chars": 986,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\nimport java.time.OffsetDateTime\n\ndef "
  },
  {
    "path": "magnum/src/test/scala/shared/RepoTests.scala",
    "chars": 11808,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport com.augustnagro.magnum.{BatchUpdateResult, SqlException, ColumnNa"
  },
  {
    "path": "magnum/src/test/scala/shared/SharedTests.scala",
    "chars": 850,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\nimport java.time.{LocalTime, OffsetDa"
  },
  {
    "path": "magnum/src/test/scala/shared/SpecTests.scala",
    "chars": 4714,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\nimport java.time.{OffsetDateTime, Zon"
  },
  {
    "path": "magnum/src/test/scala/shared/SqlNameTests.scala",
    "chars": 693,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\nimport java.time.OffsetDateTime\n\ndef "
  },
  {
    "path": "magnum/src/test/scala/shared/TableInfoTests.scala",
    "chars": 3160,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.FunSuite\n\nimport java.time.OffsetDateTime\nimport java.util."
  },
  {
    "path": "magnum/src/test/scala/shared/TupleTests.scala",
    "chars": 3348,
    "preview": "package shared\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\n\nimport java.sql.{PreparedStatement, R"
  },
  {
    "path": "magnum-pg/src/main/scala/com/augustnagro/magnum/pg/PgCodec.scala",
    "chars": 13231,
    "preview": "package com.augustnagro.magnum.pg\n\nimport com.augustnagro.magnum.DbCodec\nimport org.postgresql.geometric.{\n  PGbox,\n  PG"
  },
  {
    "path": "magnum-pg/src/main/scala/com/augustnagro/magnum/pg/SqlArrayCodec.scala",
    "chars": 7676,
    "preview": "package com.augustnagro.magnum.pg\n\nimport java.sql\nimport java.sql.JDBCType\nimport java.time.{OffsetDateTime, ZoneOffset"
  },
  {
    "path": "magnum-pg/src/main/scala/com/augustnagro/magnum/pg/enums/PgEnumDbCodec.scala",
    "chars": 2786,
    "preview": "package com.augustnagro.magnum.pg.enums\n\nimport com.augustnagro.magnum.{DbCodec, DerivingUtil}\n\nimport java.sql.{JDBCTyp"
  },
  {
    "path": "magnum-pg/src/main/scala/com/augustnagro/magnum/pg/enums/PgEnumToScalaEnumSqlArrayCodec.scala",
    "chars": 2789,
    "preview": "package com.augustnagro.magnum.pg.enums\n\nimport com.augustnagro.magnum.DerivingUtil\nimport com.augustnagro.magnum.pg.Sql"
  },
  {
    "path": "magnum-pg/src/main/scala/com/augustnagro/magnum/pg/enums/PgStringToScalaEnumSqlArrayCodec.scala",
    "chars": 2305,
    "preview": "package com.augustnagro.magnum.pg.enums\n\nimport com.augustnagro.magnum.DerivingUtil\nimport com.augustnagro.magnum.pg.Sql"
  },
  {
    "path": "magnum-pg/src/main/scala/com/augustnagro/magnum/pg/json/JsonBDbCodec.scala",
    "chars": 889,
    "preview": "package com.augustnagro.magnum.pg.json\n\nimport com.augustnagro.magnum.DbCodec\nimport org.postgresql.util.PGobject\n\nimpor"
  },
  {
    "path": "magnum-pg/src/main/scala/com/augustnagro/magnum/pg/json/JsonDbCodec.scala",
    "chars": 886,
    "preview": "package com.augustnagro.magnum.pg.json\n\nimport com.augustnagro.magnum.DbCodec\nimport org.postgresql.util.PGobject\n\nimpor"
  },
  {
    "path": "magnum-pg/src/main/scala/com/augustnagro/magnum/pg/xml/XmlDbCodec.scala",
    "chars": 884,
    "preview": "package com.augustnagro.magnum.pg.xml\n\nimport com.augustnagro.magnum.DbCodec\nimport org.postgresql.util.PGobject\n\nimport"
  },
  {
    "path": "magnum-pg/src/test/resources/pg-car.sql",
    "chars": 546,
    "preview": "drop table if exists mag_car;\n\ncreate table mag_car (\n    id bigint primary key,\n    text_colors text[] not null,\n    te"
  },
  {
    "path": "magnum-pg/src/test/resources/pg-service-list.sql",
    "chars": 191,
    "preview": "drop table if exists mag_service_list;\n\ncreate table mag_service_list (\n    id bigint primary key generated by default a"
  },
  {
    "path": "magnum-pg/src/test/resources/pg-user.sql",
    "chars": 1524,
    "preview": "drop table if exists mag_user;\ndrop type if exists colour;\n\ncreate type Colour as enum ('red_orange', 'Greenish', 'blue'"
  },
  {
    "path": "magnum-pg/src/test/scala/CirceJsonBDbCodec.scala",
    "chars": 493,
    "preview": "import com.augustnagro.magnum.pg.json.JsonBDbCodec\nimport io.circe.{Codec, Decoder, Encoder, JsonObject}\nimport io.circe"
  },
  {
    "path": "magnum-pg/src/test/scala/CirceJsonDbCodec.scala",
    "chars": 489,
    "preview": "import com.augustnagro.magnum.pg.json.JsonDbCodec\nimport io.circe.{Codec, Decoder, Encoder, JsonObject}\nimport io.circe."
  },
  {
    "path": "magnum-pg/src/test/scala/Color.scala",
    "chars": 265,
    "preview": "import com.augustnagro.magnum.{\n  DbCodec,\n  PostgresDbType,\n  SqlName,\n  SqlNameMapper,\n  Table\n}\n\n@SqlName(\"colour\")\n@"
  },
  {
    "path": "magnum-pg/src/test/scala/LastService.scala",
    "chars": 204,
    "preview": "import com.augustnagro.magnum.pg.json.JsonDbCodec\n\nimport io.circe.Codec\nimport java.time.LocalDate\n\ncase class LastServ"
  },
  {
    "path": "magnum-pg/src/test/scala/MagCar.scala",
    "chars": 484,
    "preview": "import com.augustnagro.magnum.{\n  DbCodec,\n  Id,\n  PostgresDbType,\n  SqlNameMapper,\n  Table\n}\nimport com.augustnagro.mag"
  },
  {
    "path": "magnum-pg/src/test/scala/MagUser.scala",
    "chars": 1544,
    "preview": "import com.augustnagro.magnum.{DbCodec, Id, PostgresDbType, SqlName, Table}\nimport com.augustnagro.magnum.pg.PgCodec.giv"
  },
  {
    "path": "magnum-pg/src/test/scala/MyJsonB.scala",
    "chars": 115,
    "preview": "import io.circe.Codec\n\ncase class MyJsonB(a: Vector[Int], b: String)\n    derives Codec.AsObject, CirceJsonBDbCodec\n"
  },
  {
    "path": "magnum-pg/src/test/scala/MyXml.scala",
    "chars": 317,
    "preview": "import com.augustnagro.magnum.DbCodec\nimport com.augustnagro.magnum.pg.xml.XmlDbCodec\n\nimport scala.xml.{Document, XML, "
  },
  {
    "path": "magnum-pg/src/test/scala/PgCodecTests.scala",
    "chars": 8293,
    "preview": "import com.dimafeng.testcontainers.PostgreSQLContainer\nimport com.dimafeng.testcontainers.munit.fixtures.TestContainersF"
  },
  {
    "path": "magnum-zio/src/main/scala/com/augustnagro/magnum/magzio/TransactorZIO.scala",
    "chars": 3367,
    "preview": "package com.augustnagro.magnum.magzio\n\nimport com.augustnagro.magnum.{DbCon, DbTx, SqlException, SqlLogger}\nimport zio.{"
  },
  {
    "path": "magnum-zio/src/test/resources/pg/big-dec.sql",
    "chars": 160,
    "preview": "drop table if exists big_dec cascade;\n\ncreate table big_dec (\n    id int primary key,\n    my_big_dec numeric\n);\n\ninsert "
  },
  {
    "path": "magnum-zio/src/test/resources/pg/car.sql",
    "chars": 609,
    "preview": "DROP TABLE IF EXISTS car;\n\nCREATE TABLE car (\n    model VARCHAR(50) NOT NULL,\n    id bigint PRIMARY KEY,\n    top_speed I"
  },
  {
    "path": "magnum-zio/src/test/resources/pg/my-user.sql",
    "chars": 228,
    "preview": "drop table if exists my_user cascade;\n\ncreate table my_user (\n    first_name text not null,\n    id bigint primary key ge"
  },
  {
    "path": "magnum-zio/src/test/resources/pg/no-id.sql",
    "chars": 353,
    "preview": "drop table if exists no_id;\n\ncreate table no_id (\n    created_at timestamptz not null default now(),\n    user_name text "
  },
  {
    "path": "magnum-zio/src/test/resources/pg/person.sql",
    "chars": 764,
    "preview": "drop table if exists person cascade;\n\ncreate table person (\n    id bigint primary key,\n    first_name varchar(50),\n    l"
  },
  {
    "path": "magnum-zio/src/test/scala/com/augustnagro/magnum/magzio/ImmutableRepoZioTests.scala",
    "chars": 5895,
    "preview": "package com.augustnagro.magnum.magzio\n\nimport com.augustnagro.magnum.*\nimport munit.{FunSuite, Location}\nimport zio.*\n\ni"
  },
  {
    "path": "magnum-zio/src/test/scala/com/augustnagro/magnum/magzio/PgZioTests.scala",
    "chars": 1757,
    "preview": "package com.augustnagro.magnum.magzio\n\nimport com.augustnagro.magnum.*\nimport com.dimafeng.testcontainers.PostgreSQLCont"
  },
  {
    "path": "project/build.properties",
    "chars": 19,
    "preview": "sbt.version=1.12.8\n"
  },
  {
    "path": "project/plugins.sbt",
    "chars": 110,
    "preview": "addSbtPlugin(\"org.scalameta\" % \"sbt-scalafmt\" % \"2.5.2\")\naddSbtPlugin(\"com.github.sbt\" % \"sbt-pgp\" % \"2.3.0\")\n"
  }
]

About this extraction

This page contains the full source code of the AugustNagro/magnum GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 126 files (285.6 KB), approximately 81.6k tokens, and a symbol index with 31 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!