Repository: tursodatabase/libsql-client-ts Branch: main Commit: 697ae59038dc Files: 144 Total size: 387.5 KB Directory structure: gitextract_x14w5we3/ ├── .github/ │ └── workflows/ │ ├── ci.yaml │ ├── pages.yaml │ └── publish.yml ├── .gitignore ├── .husky/ │ ├── install.mjs │ └── pre-commit ├── .lintstagedrc.json ├── .npmrc ├── .prettierignore ├── .prettierrc ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── examples/ │ ├── batch/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── index.mjs │ │ └── package.json │ ├── cloud-encryption/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── package.json │ │ ├── remote.mjs │ │ └── sync.mjs │ ├── encryption/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── index.mjs │ │ └── package.json │ ├── local/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── index.mjs │ │ └── package.json │ ├── memory/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── index.mjs │ │ └── package.json │ ├── ollama/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── index.mjs │ │ └── package.json │ ├── read-your-writes/ │ │ ├── package.json │ │ └── read_your_writes.js │ ├── remote/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── index.mjs │ │ └── package.json │ ├── sync/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── index.mjs │ │ └── package.json │ ├── transactions/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── index.mjs │ │ └── package.json │ └── vector/ │ ├── .gitignore │ ├── README.md │ ├── index.mjs │ └── package.json ├── package.json ├── packages/ │ ├── libsql-client/ │ │ ├── README.md │ │ ├── examples/ │ │ │ ├── example.js │ │ │ ├── package.json │ │ │ ├── shell.js │ │ │ ├── sync.js │ │ │ ├── sync_offline.js │ │ │ └── sync_vector.js │ │ ├── jest.config.js │ │ ├── package-cjs.json │ │ ├── package.json │ │ ├── smoke_test/ │ │ │ ├── vercel/ │ │ │ │ ├── .gitignore │ │ │ │ ├── app/ │ │ │ │ │ ├── .gitignore │ │ │ │ │ ├── api/ │ │ │ │ │ │ └── function.ts │ │ │ │ │ └── public/ │ │ │ │ │ └── index.html │ │ │ │ ├── package.json │ │ │ │ ├── test.js │ │ │ │ └── tsconfig.json │ │ │ └── workers/ │ │ │ ├── .gitignore │ │ │ ├── package.json │ │ │ ├── test.js │ │ │ ├── worker.js │ │ │ └── wrangler.toml │ │ ├── src/ │ │ │ ├── __tests__/ │ │ │ │ ├── client.test.ts │ │ │ │ ├── config.test.ts │ │ │ │ ├── helpers.ts │ │ │ │ ├── mocks/ │ │ │ │ │ ├── handlers.ts │ │ │ │ │ └── node.ts │ │ │ │ └── uri.test.ts │ │ │ ├── hrana.ts │ │ │ ├── http.ts │ │ │ ├── node.ts │ │ │ ├── sql_cache.ts │ │ │ ├── sqlite3.ts │ │ │ ├── web.ts │ │ │ └── ws.ts │ │ ├── tsconfig.base.json │ │ ├── tsconfig.build-cjs.json │ │ ├── tsconfig.build-esm.json │ │ ├── tsconfig.json │ │ └── typedoc.json │ ├── libsql-client-wasm/ │ │ ├── LICENSE │ │ ├── examples/ │ │ │ ├── browser/ │ │ │ │ ├── README.md │ │ │ │ ├── index.html │ │ │ │ ├── index.js │ │ │ │ └── package.json │ │ │ └── node/ │ │ │ ├── index.js │ │ │ └── package.json │ │ ├── jest.config.js │ │ ├── package-cjs.json │ │ ├── package.json │ │ ├── src/ │ │ │ └── wasm.ts │ │ ├── tsconfig.base.json │ │ ├── tsconfig.build-esm.json │ │ ├── tsconfig.json │ │ └── typedoc.json │ └── libsql-core/ │ ├── jest.config.js │ ├── package-cjs.json │ ├── package.json │ ├── src/ │ │ ├── api.ts │ │ ├── config.ts │ │ ├── uri.ts │ │ └── util.ts │ ├── tsconfig.base.json │ ├── tsconfig.build-cjs.json │ ├── tsconfig.build-esm.json │ ├── tsconfig.json │ └── typedoc.json └── testing/ ├── hrana-test-server/ │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── c3.py │ ├── from_proto.py │ ├── gen_sqlite3_error_map.py │ ├── proto/ │ │ ├── generate.sh │ │ ├── hrana/ │ │ │ ├── http_pb2.py │ │ │ └── ws_pb2.py │ │ ├── hrana.http.proto │ │ ├── hrana.proto │ │ ├── hrana.ws.proto │ │ └── hrana_pb2.py │ ├── requirements.txt │ ├── server_v1.py │ ├── server_v2.py │ ├── server_v3.py │ ├── sqlite3_error_map.py │ └── to_proto.py └── test.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/ci.yaml ================================================ name: "CI" on: push: branches: ["main"] pull_request: jobs: "typecheck": name: "Typecheck" runs-on: ubuntu-latest timeout-minutes: 2 steps: - name: "Checkout this repo" uses: actions/checkout@v4 - name: "Setup Node.js" uses: actions/setup-node@v4 with: node-version: "18.x" - name: "Install node dependencies and build" run: "npm ci && npm run build" - name: "Typecheck" run: "npm run typecheck" "format-check": name: "Check formatting" runs-on: ubuntu-latest timeout-minutes: 2 steps: - name: "Checkout this repo" uses: actions/checkout@v4 - name: "Setup Node.js" uses: actions/setup-node@v4 with: node-version: "18.x" - name: "Install node dependencies" run: "npm ci" - name: "Check formatting" run: "npm run format:check" "test-against-sqld": name: "Tests against sqld" runs-on: ubuntu-latest timeout-minutes: 2 defaults: run: working-directory: ./packages/libsql-client env: { "NODE_OPTIONS": "--trace-warnings" } steps: - name: "Checkout this repo" uses: actions/checkout@v4 - name: "Setup Node.js" uses: actions/setup-node@v4 with: node-version: "18.x" - name: "Build core" run: "npm ci && npm run build" working-directory: ./packages/libsql-core - name: "Install npm dependencies" run: "npm ci" - name: "Build" run: "npm run build" - name: Run Docker container in the background run: docker run -d -p 8080:8080 -e SQLD_NODE=primary ghcr.io/tursodatabase/libsql-server:latest - name: Verify container is running run: docker ps - name: "Test against sqld" run: "npm test" env: { "URL": "http://localhost:8080", "SERVER": "sqld" } "wasm-test": name: "Build and test Wasm on Node.js" runs-on: ubuntu-latest timeout-minutes: 2 defaults: run: working-directory: ./packages/libsql-client-wasm env: { "NODE_OPTIONS": "--trace-warnings" } steps: - name: "Checkout this repo" uses: actions/checkout@v4 - name: "Setup Node.js" uses: actions/setup-node@v4 with: node-version: "18.x" cache-dependency-path: "packages/libsql-client-wasm" - name: "Build core" run: "npm ci && npm run build" working-directory: ./packages/libsql-core - name: "Install npm dependencies" run: "npm ci" - name: "Build" run: "npm run build" - name: "Test example" run: "cd examples/node && npm i && node index.js" env: { "URL": "file:///tmp/example.db" } "node-test": name: "Build and test on Node.js" runs-on: ubuntu-latest timeout-minutes: 2 defaults: run: working-directory: ./packages/libsql-client env: { "NODE_OPTIONS": "--trace-warnings" } steps: - name: "Checkout this repo" uses: actions/checkout@v4 - name: "Setup Node.js" uses: actions/setup-node@v4 with: node-version: "18.x" - name: "Build core" run: "npm ci && npm run build" working-directory: ./packages/libsql-core - name: "Install npm dependencies" run: "npm ci" - name: "Setup Python" uses: actions/setup-python@v4 with: python-version: "3.10" - name: "Install pip dependencies" run: "pip install -r ../../testing/hrana-test-server/requirements.txt" - name: "Build" run: "npm run build" - name: "Test Hrana 1 over WebSocket" run: "python ../../testing/hrana-test-server/server_v1.py npm test" env: { "URL": "ws://localhost:8080", "SERVER": "test_v1" } - name: "Test Hrana 2 over WebSocket" run: "python ../../testing/hrana-test-server/server_v2.py npm test" env: { "URL": "ws://localhost:8080", "SERVER": "test_v2" } - name: "Test Hrana 2 over HTTP" run: "python ../../testing/hrana-test-server/server_v2.py npm test" env: { "URL": "http://localhost:8080", "SERVER": "test_v2" } # - name: "Test Hrana 3 over WebSocket" # run: "python ../../testing/hrana-test-server/server_v3.py npm test" # env: {"URL": "ws://localhost:8080", "SERVER": "test_v3"} # - name: "Test Hrana 3 over HTTP" # run: "python ../../testing/hrana-test-server/server_v3.py npm test" # env: {"URL": "http://localhost:8080", "SERVER": "test_v3"} - name: "Test local file" run: "npm test" env: { "URL": "file:///tmp/test.db" } - name: "Test example" run: "cd examples && npm i && node example.js" env: { "URL": "file:///tmp/example.db" } "workers-test": name: "Build and test with Cloudflare Workers" if: false runs-on: ubuntu-latest timeout-minutes: 2 defaults: run: working-directory: ./packages/libsql-client env: "CLOUDFLARE_API_TOKEN": "${{ secrets.CLOUDFLARE_API_TOKEN }}" "CLOUDFLARE_ACCOUNT_ID": "${{ secrets.CLOUDFLARE_ACCOUNT_ID }}" steps: - name: "Checkout this repo" uses: actions/checkout@v4 - name: "Setup Node.js" uses: actions/setup-node@v4 with: node-version: "lts/Hydrogen" - name: "Build core" run: "npm ci && npm run build" working-directory: ./packages/libsql-core - name: "Install npm dependencies" run: "npm ci" - name: "Setup Python" uses: actions/setup-python@v4 with: python-version: "3.10" - name: "Install pip dependencies" run: "pip install -r ../../testing/hrana-test-server/requirements.txt" - name: "Build" run: "npm run build" - name: "Install npm dependencies of the Workers test" run: "cd smoke_test/workers && npm link ../.." - name: "Local test with Hrana 1 over WebSocket" run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v1.py node --dns-result-order=ipv4first test.js" env: { "LOCAL": "1", "URL": "ws://localhost:8080" } - name: "Local test with Hrana 2 over WebSocket" run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v2.py node --dns-result-order=ipv4first test.js" env: { "LOCAL": "1", "URL": "ws://localhost:8080" } - name: "Local test with Hrana 2 over HTTP" run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v2.py node --dns-result-order=ipv4first test.js" env: { "LOCAL": "1", "URL": "http://localhost:8080" } # - name: "Local test with Hrana 3 over WebSocket" # run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v3.py node --dns-result-order=ipv4first test.js" # env: {"LOCAL": "1", "URL": "ws://localhost:8080"} # - name: "Local test with Hrana 3 over HTTP" # run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v3.py node --dns-result-order=ipv4first test.js" # env: {"LOCAL": "1", "URL": "http://localhost:8080"} # - name: "Non-local test with Hrana 1 over WebSocket" # run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v1.py node test.js" # env: {"LOCAL": "0", "URL": "ws://localhost:8080"} # - name: "Non-local test with Hrana 2 over WebSocket" # run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v2.py node test.js" # env: {"LOCAL": "0", "URL": "ws://localhost:8080"} # - name: "Non-local test with Hrana 2 over HTTP" # run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v2.py node test.js" # env: {"LOCAL": "0", "URL": "http://localhost:8080"} # - name: "Non-local test with Hrana 3 over WebSocket" # run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v3.py node test.js" # env: {"LOCAL": "0", "URL": "ws://localhost:8080"} # - name: "Non-local test with Hrana 3 over HTTP" # run: "cd smoke_test/workers && python ../../../../testing/hrana-test-server/server_v3.py node test.js" # env: {"LOCAL": "0", "URL": "http://localhost:8080"} # "vercel-test": # name: "Build and test with Vercel Edge Functions" # runs-on: ubuntu-latest # env: # VERCEL_TOKEN: "${{ secrets.VERCEL_TOKEN }}" # VERCEL_PROJECT_NAME: "smoke-test" # steps: # - name: "Checkout this repo" # uses: actions/checkout@v4 # - name: "Setup Node.js" # uses: actions/setup-node@v4 # with: # node-version: "lts/Hydrogen" # cache: "npm" # - name: "Install npm dependencies" # run: "npm ci" # - name: "Checkout hrana-test-server" # uses: actions/checkout@v4 # with: # repository: "libsql/hrana-test-server" # path: "hrana-test-server" # - name: "Setup Python" # uses: actions/setup-python@v4 # with: # python-version: "3.10" # cache: "pip" # - name: "Install pip dependencies" # run: "pip install -r hrana-test-server/requirements.txt" # - name: "Build" # run: "npm run build" # - name: "Install npm dependencies of the Vercel test" # run: "cd smoke_test/vercel && npm install" # - name: "Test with Hrana 1 over WebSocket" # run: "cd smoke_test/vercel && python ../../hrana-test-server/server_v1.py node test.js" # env: {"URL": "ws://localhost:8080"} # - name: "Test with Hrana 2 over WebSocket" # run: "cd smoke_test/vercel && python ../../hrana-test-server/server_v2.py node test.js" # env: {"URL": "ws://localhost:8080"} # - name: "Test with Hrana 2 over HTTP" # run: "cd smoke_test/vercel && python ../../hrana-test-server/server_v2.py node test.js" # env: {"URL": "http://localhost:8080"} # - name: "Test with Hrana 3 over WebSocket" # run: "cd smoke_test/vercel && python ../../hrana-test-server/server_v3.py node test.js" # env: {"URL": "ws://localhost:8080"} # - name: "Test with Hrana 3 over HTTP" # run: "cd smoke_test/vercel && python ../../hrana-test-server/server_v3.py node test.js" # env: {"URL": "http://localhost:8080"} ================================================ FILE: .github/workflows/pages.yaml ================================================ name: "GitHub Pages" on: push: branches: ["main"] jobs: "build": name: "Build the docs" runs-on: ubuntu-latest defaults: run: working-directory: ./packages/libsql-client steps: - name: "Checkout this repo" uses: actions/checkout@v4 - name: "Setup Node.js" uses: actions/setup-node@v4 with: node-version: "${{ matrix.node-version }}" cache: "npm" - name: "Build core" run: "npm ci && npm run build" working-directory: ./packages/libsql-core - name: "Install npm dependencies" run: "npm ci" - name: "Build" run: "npm run typedoc" - name: "Upload GitHub Pages artifact" uses: actions/upload-pages-artifact@v3 id: deployment with: path: "./packages/libsql-client/docs" "deploy": name: "Deploy the docs to GitHub Pages" needs: "build" permissions: pages: write id-token: write environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest steps: - name: "Deploy to GitHub Pages" id: deployment uses: actions/deploy-pages@v4 ================================================ FILE: .github/workflows/publish.yml ================================================ name: publish permissions: contents: read id-token: write on: push: tags: - v* jobs: publish-to-npm: name: "Publish new version to NPM" runs-on: ubuntu-latest timeout-minutes: 5 env: NODE_OPTIONS: "--trace-warnings" steps: - name: "Checkout this repo" uses: actions/checkout@v4 - name: "Setup Node.js" uses: actions/setup-node@v4 with: node-version: "20.x" - name: "Update npm" run: npm install -g npm@11 - name: "Build core" run: "npm ci && npm run build" working-directory: ./packages/libsql-core - name: "Publish core (pre-release)" if: contains(github.ref, '-pre') run: npm publish --tag next --provenance --access public working-directory: ./packages/libsql-core - name: "Publish core (latest)" if: "!contains(github.ref, '-pre')" run: npm publish --provenance --access public working-directory: ./packages/libsql-core - name: "Install npm dependencies (client)" run: "npm ci" working-directory: ./packages/libsql-client - name: "Publish client (pre-release)" if: contains(github.ref, '-pre') run: npm publish --tag next --provenance --access public working-directory: ./packages/libsql-client - name: "Publish client (latest)" if: "!contains(github.ref, '-pre')" run: npm publish --provenance --access public working-directory: ./packages/libsql-client - name: "Install npm dependencies (client wasm)" run: "npm ci" working-directory: ./packages/libsql-client-wasm - name: "Publish client-wasm (pre-release)" if: contains(github.ref, '-pre') run: npm publish --tag next --provenance --access public working-directory: ./packages/libsql-client-wasm - name: "Publish client-wasm (latest)" if: "!contains(github.ref, '-pre')" run: npm publish --provenance --access public working-directory: ./packages/libsql-client-wasm ================================================ FILE: .gitignore ================================================ node_modules /packages/*/lib-esm /packages/*/lib-cjs /docs *.tsbuildinfo Session.vim packages/libsql-client/hrana-test-server ================================================ FILE: .husky/install.mjs ================================================ // See https://typicode.github.io/husky/how-to.html#ci-server-and-docker // Skip Husky install in production and CI if (process.env.NODE_ENV === "production" || process.env.CI === "true") { process.exit(0); } const husky = (await import("husky")).default; console.log(husky()); ================================================ FILE: .husky/pre-commit ================================================ lint-staged ================================================ FILE: .lintstagedrc.json ================================================ { "*.{js,ts,json,md,yaml,yml}": "prettier --write" } ================================================ FILE: .npmrc ================================================ //registry.npmjs.org/:_authToken=${NPM_TOKEN} ================================================ FILE: .prettierignore ================================================ lib-cjs lib-esm ================================================ FILE: .prettierrc ================================================ { "tabWidth": 4 } ================================================ FILE: CHANGELOG.md ================================================ # Changelog ## 0.15.10 -- 2025-07-16 - Bump to latest `libsql` package. ## 0.15.9 -- 2025-06-09 - Bump to latest `libsql` package. ## 0.15.7 -- 2025-05-20 - Bump to latest `libsql` package. ## 0.15.6 -- 2025-05-14 - Bump to latest `libsql` package. ## 0.15.5 -- 2025-05-11 - Bump to latest `libsql` package. ## 0.15.4 -- 2025-04-15 - Bump to latest `libsql` package. ## 0.15.3 -- 2025-04-11 - Bump to latest `libsql` package. ## 0.15.2 -- 2025-04-01 - Bump to latest `libsql` package. ## 0.15.1 -- 2025-03-24 - Bump to latest `libsql` package. ## 0.15.0 -- 2025-03-17 - Bump to latest `libsql` package. ## 0.15.0-pre.3 -- 2025-03-11 - Fix Bun complaint about duplicate "prepare" key in `package.json` ## 0.15.0-pre.2 -- 2024-02-11 - Bump to latest `libsql` package. ## 0.15.0-pre.1 -- 2024-11-15 - Initial support for offline writes. ## 0.12.0 -- 2024-09-16 - Upgrade `hrana-client-ts` to latest 0.7.0 version which has stable `isomorphic-fetch` implementation (see https://github.com/libsql/hrana-client-ts/pull/19) ## 0.11.0 -- 2024-09-13 - Upgrade `libsql-js` to latest 0.4.4 version which brings full vector search support for embedded replicas (see vector search documentation here: https://docs.turso.tech/features/ai-and-embeddings) ## 0.10.0 -- 2024-08-26 - Add a migrate() API that can be used to do migrations on both schema databases and regular databases. It is mostly dedicated to schema migration tools. ## 0.8.1 -- 2024-08-03 - Fix embedded replica sync WAL index path name , which caused "No such file or directory" for local sync in some cases ([#244](https://github.com/tursodatabase/libsql-client-ts/issues/244)). ## 0.8.0 -- 2024-07-30 - No changes from 0.8.0-pre.1. ## 0.8.0-pre.1 -- 2024-07-18 - Bump hrana client to 0.6.2. - Support `cache=private|shared` [query parameter](https://www.sqlite.org/uri.html#recognized_query_parameters) in the connection string to local SQLite (https://github.com/tursodatabase/libsql-client-ts/pull/220) - Fix bug in wasm experimental client which appears when transaction are used in local mode (https://github.com/tursodatabase/libsql-client-ts/pull/231) - Add `execute(sql, args)` overload to make the API similar to other SQLite SDKs ## 0.7.0 -- 2024-06-25 - Add configurable concurrency limit for parallel query execution (defaults to 20) to address socket hangup errors. ## 0.6.2 -- 2024-06-01 - Fix compatibility issue with libSQL server versions that don't have migrations endpoint. ## 0.6.1 -- 2024-05-30 - Add an option to `batch()` to wait for schema changes to finish when using shared schema. ## 0.6.0 -- 2024-04-28 - Bump hrana client to 0.6.0, which uses native Node fetch(). Note that `@libsql/client` now requires Node 18 or later. ## 0.5.6 -- 2024-03-12 - Bump `libsql` package dependency to 0.3.10 that adds `wasm32` as supported CPU, which is needed for StackBlitz compatibility. ## 0.5.5 -- 2024-03-11 - Bump `@libsql/libsql-wasm-experimental"` dependency to 0.0.2, which fixes a broken sqlite3_get_autocommit() export. ## 0.5.4 -- 2024-03-11 - Bump `libsql` dependency to 0.3.9, which fixes symbol not found errors on Alpine. ## 0.5.3 -- 2024-03-06 - Add `syncInterval` config option to enable periodic sync. - Bump `libsql` dependency to 0.3.7, which switches default encryption cipher to aes256cbs. ## 0.5.2 -- 2024-02-24 - Disable SQL statemen tracing in Wasm. ## 0.5.1 -- 2024-02-19 - Update `libsql` package to 0.3.2, add `encryptionCipher` option, and switch default cipher to SQLCipher. ## 0.5.0 -- 2024-02-15 - Add a `encryptionKey` config option, which enables encryption at rest for local database files. ## 0.4.0 -- 2024-01-26 - Update hrana-client package to 0.5.6. - Add a `@libsql/client-wasm` package. - Fix Bun on Linux/arm64. ## 0.3.6 -- 2023-10-20 - Fix import problems on Cloudflare Workers. - Add `rawCode` property to errors for local databases. - Update the `libsql` package to version 0.1.28. ## 0.3.5 -- 2023-09-25 - Performance improvements for local database access by reusing connection in `Client`. - Embedded replica support. - Column introspection support via ResultSet.columnTypes property. ## 0.3.4 -- 2023-09-11 - Switch to Hrana 2 by default to let Hrana 3 cook some more. ## 0.3.3 -- 2023-09-11 - Updated `@libsql/hrana-client` to version 0.5.1, which has Bun support. - Switched to `libsql` package as a replacement for `better-sqlite3`. ## 0.3.2 -- 2023-07-29 - Updated `@libsql/hrana-client` to version 0.5.0, which implements Hrana 3 - Dropped workarounds for broken WebSocket support in Miniflare 2 - Added a `@libsql/client/node` import for explicit Node.js-specific module ## 0.3.1 -- 2023-07-20 - Added `ResultSet.toJSON()` to provide better JSON serialization. ([#61](https://github.com/libsql/libsql-client-ts/pull/61)) - Added conditional exports to `package.json` that redirect the default import of `@libsql/client` to `@libsql/client/web` on a few supported edge platforms. ([#65](https://github.com/libsql/libsql-client-ts/pull/65)) - Added `Config.fetch` to support overriding the `fetch` implementation from `@libsql/isomorphic-fetch`. ([#66](https://github.com/libsql/libsql-client-ts/pull/66)) ## 0.3.0 -- 2023-07-07 - **Changed the order of parameters to `batch()`**, so that the transaction mode is passed as the second parameter. ([#57](https://github.com/libsql/libsql-client-ts/pull/57)) - **Changed the default transaction mode to `"deferred"`**. ([#57](https://github.com/libsql/libsql-client-ts/pull/57)) - Added `Client.protocol` property to find out which protocol the client uses ([#54](https://github.com/libsql/libsql-client-ts/pull/54)). ## 0.2.2 -- 2023-06-22 - Added `intMode` field to the `Config`, which chooses whether SQLite integers are represented as numbers, bigints or strings in JavaScript ([#51](https://github.com/libsql/libsql-client-ts/pull/51)). ## 0.2.1 -- 2023-06-13 - Added `TransactionMode` argument to `batch()` and `transaction()` ([#46](https://github.com/libsql/libsql-client-ts/pull/46)) - Added `Client.executeMultiple()` and `Transaction.executeMultiple()` ([#49](https://github.com/libsql/libsql-client-ts/pull/49)) - Added `Transaction.batch()` ([#49](https://github.com/libsql/libsql-client-ts/pull/49)) - **Changed the default transaction mode** from `BEGIN DEFERRED` to `BEGIN IMMEDIATE` ## 0.2.0 -- 2023-06-07 - **Added support for interactive transactions over HTTP** by using `@libsql/hrana-client` version 0.4 ([#44](https://github.com/libsql/libsql-client-ts/pull/44)) - Added `?tls=0` query parameter to turn off TLS for `libsql:` URLs - Changed the `libsql:` URL to use HTTP instead of WebSockets - Changed the `Value` type to include `bigint` (so that we can add support for reading integers as bigints in the future, without breaking compatibility) - Removed the `./hrana` import, added `./ws` to import the WebSocket-only client ================================================ FILE: CONTRIBUTING.md ================================================ # Prerequisites - Have `npm` installed and in PATH - Have `git` installed and in PATH # Setting up the repository for contribution - Clone this repository: `git clone https://github.com/tursodatabase/libsql-client-ts` - Change the current working directory to the cloned repository: `cd libsql-client-ts` - Install dependencies: `npm i` - Change the current working directory to `libsql-core`'s workspace: `cd packages/libsql-core` - Built the core package: `npm run build` - Go back to the root directory to start making changes: `cd ../..` ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2023 libSQL Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: examples/batch/.gitignore ================================================ local.db ================================================ FILE: examples/batch/README.md ================================================ # Batch This example demonstrates how to use libSQL to execute a batch of SQL statements. ## Install Dependencies ```bash npm i ``` ## Running Execute the example: ```bash node index.mjs ``` This will setup a SQLite database, execute a batch of SQL statements, and then query the results. ================================================ FILE: examples/batch/index.mjs ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: "file:local.db", }); await client.batch( [ "CREATE TABLE IF NOT EXISTS users (email TEXT)", { sql: "INSERT INTO users VALUES (?)", args: ["first@example.com"], }, { sql: "INSERT INTO users VALUES (?)", args: ["second@example.com"], }, { sql: "INSERT INTO users VALUES (?)", args: ["third@example.com"], }, ], "write", ); const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); ================================================ FILE: examples/batch/package.json ================================================ { "name": "batch", "version": "1.0.0", "main": "index.mjs", "author": "Giovanni Benussi", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0" } } ================================================ FILE: examples/cloud-encryption/.gitignore ================================================ local.db ================================================ FILE: examples/cloud-encryption/README.md ================================================ # Cloud Encryption These examples demonstrates how to use Turso Cloud encryption. Visit the documentation here - [Cloud Encryption](https://docs.turso.tech/cloud/encryption) ## Install Dependencies ```bash npm i ``` ## Running Execute the example which operates over remotely encrypted database: ```bash node remote.mjs ``` Cloud encryption also supports sync: ```bash node sync.mjs ``` ================================================ FILE: examples/cloud-encryption/package.json ================================================ { "name": "batch", "version": "1.0.0", "main": "remote.mjs", "author": "Turso Authors", "license": "MIT", "dependencies": { "@libsql/client": "^0.16.0" } } ================================================ FILE: examples/cloud-encryption/remote.mjs ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: process.env.TURSO_DATABASE_URL, authToken: process.env.TURSO_AUTH_TOKEN, remoteEncryptionKey: process.env.TURSO_REMOTE_ENCRYPTION_KEY, }); await client.batch( [ "CREATE TABLE IF NOT EXISTS users (email TEXT)", "INSERT INTO users VALUES ('first@example.com')", "INSERT INTO users VALUES ('second@example.com')", "INSERT INTO users VALUES ('third@example.com')", ], "write", ); const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); ================================================ FILE: examples/cloud-encryption/sync.mjs ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: "file:local.db", syncUrl: process.env.TURSO_DATABASE_URL, authToken: process.env.TURSO_AUTH_TOKEN, remoteEncryptionKey: process.env.TURSO_REMOTE_ENCRYPTION_KEY, }); await client.batch( [ "CREATE TABLE IF NOT EXISTS users (email TEXT)", "INSERT INTO users VALUES ('first@example.com')", "INSERT INTO users VALUES ('second@example.com')", "INSERT INTO users VALUES ('third@example.com')", ], "write", ); const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); ================================================ FILE: examples/encryption/.gitignore ================================================ encrypted.db ================================================ FILE: examples/encryption/README.md ================================================ # Encryption This example demonstrates how to create and use an encrypted SQLite database with libSQL. ## Install Dependencies ```bash npm i ``` ## Running Execute the example: ```bash node index.mjs ``` This will setup an encrypted SQLite database, execute a batch of SQL statements, and then query the results. ================================================ FILE: examples/encryption/index.mjs ================================================ import { createClient } from "@libsql/client"; // You should set the ENCRYPTION_KEY in a environment variable // For demo purposes, we're using a fixed key const encryptionKey = "my-safe-encryption-key"; const client = createClient({ url: "file:encrypted.db", encryptionKey, }); await client.batch( [ "CREATE TABLE IF NOT EXISTS users (email TEXT)", "INSERT INTO users VALUES ('first@example.com')", "INSERT INTO users VALUES ('second@example.com')", "INSERT INTO users VALUES ('third@example.com')", ], "write", ); const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); ================================================ FILE: examples/encryption/package.json ================================================ { "name": "batch", "version": "1.0.0", "main": "index.mjs", "author": "Giovanni Benussi", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0" } } ================================================ FILE: examples/local/.gitignore ================================================ local.db ================================================ FILE: examples/local/README.md ================================================ # Local This example demonstrates how to use libSQL with a local SQLite file. ## Install Dependencies ```bash npm i ``` ## Running Execute the example: ```bash node index.mjs ``` This will setup a local SQLite database, insert some data, and then query the results. ================================================ FILE: examples/local/index.mjs ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: "file:local.db", }); await client.batch( [ "CREATE TABLE IF NOT EXISTS users (email TEXT)", "INSERT INTO users VALUES ('first@example.com')", "INSERT INTO users VALUES ('second@example.com')", "INSERT INTO users VALUES ('third@example.com')", ], "write", ); const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); ================================================ FILE: examples/local/package.json ================================================ { "name": "batch", "version": "1.0.0", "main": "index.mjs", "author": "Giovanni Benussi", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0" } } ================================================ FILE: examples/memory/.gitignore ================================================ local.db ================================================ FILE: examples/memory/README.md ================================================ # Memory This example demonstrates how to use libsql with an in-memory SQLite database. ## Install Dependencies ```bash npm i ``` ## Running Execute the example: ```bash node index.mjs ``` This will create an in-memory SQLite database, insert some data, and then query the results. ================================================ FILE: examples/memory/index.mjs ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: ":memory:", }); await client.batch( [ "CREATE TABLE users (email TEXT)", "INSERT INTO users VALUES ('first@example.com')", "INSERT INTO users VALUES ('second@example.com')", "INSERT INTO users VALUES ('third@example.com')", ], "write", ); const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); ================================================ FILE: examples/memory/package.json ================================================ { "name": "batch", "version": "1.0.0", "main": "index.mjs", "author": "Giovanni Benussi", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0" } } ================================================ FILE: examples/ollama/.gitignore ================================================ local.db ================================================ FILE: examples/ollama/README.md ================================================ # Ollama + Vector Search Example This example demonstrates how to use libSQL vector search with a local database and Ollama. ## Install Dependencies ```bash npm i ``` ## Install Ollama [Download Ollama](https://ollama.com/download) and install it. ## Running Make sure Ollama is running with the model `mistral`: ```bash ollama run mistral ``` Execute the example: ```bash node index.mjs ``` This will setup a local SQLite database, generate embeddings using Ollama, and insert the data with embeddings, and then query the results using the vector similarity search function. ================================================ FILE: examples/ollama/index.mjs ================================================ import { createClient } from "@libsql/client"; import ollama from "ollama"; const client = createClient({ url: "file:local.db", }); await client.batch( [ "CREATE TABLE IF NOT EXISTS movies (id INTEGER PRIMARY KEY, title TEXT NOT NULL, description TEXT NOT NULL, embedding F32_BLOB(4096))", "CREATE INDEX IF NOT EXISTS movies_embedding_idx ON movies(libsql_vector_idx(embedding))", ], "write", ); async function getEmbedding(prompt) { const response = await ollama.embeddings({ model: "mistral", prompt, }); return response.embedding; } async function insertMovie(id, title, description) { const embedding = await getEmbedding(description); await client.execute({ sql: `INSERT OR REPLACE INTO movies (id, title, description, embedding) VALUES (?, ?, ?, vector(?))`, args: [id, title, description, JSON.stringify(embedding)], }); } async function insertMovieIfNotExists(id, title, description) { const existing = await client.execute({ sql: "SELECT id FROM movies WHERE id = ?", args: [id], }); if (existing.rows.length === 0) { await insertMovie(id, title, description); console.log(`Inserted: ${title} (ID: ${id})`); } else { console.log(`Movie already exists: ${title} (ID: ${id})`); } } async function findSimilarMovies(description, limit = 3) { const queryEmbedding = await getEmbedding(description); const results = await client.execute({ sql: ` WITH vector_scores AS ( SELECT DISTINCT id, title, description, 1 - vector_distance_cos(embedding, vector32(?)) AS similarity FROM movies ORDER BY similarity DESC LIMIT ? ) SELECT id, title, description, similarity FROM vector_scores `, args: [JSON.stringify(queryEmbedding), limit], }); return results.rows; } try { const sampleMovies = [ { id: 1, title: "Inception", description: "A thief who enters the dreams of others to steal secrets from their subconscious.", }, { id: 2, title: "The Matrix", description: "A computer programmer discovers that reality as he knows it is a simulation created by machines.", }, { id: 3, title: "Interstellar", description: "Astronauts travel through a wormhole in search of a new habitable planet for humanity.", }, ]; for (const movie of sampleMovies) { await insertMovieIfNotExists(movie.id, movie.title, movie.description); } const query = "A sci-fi movie about virtual reality and artificial intelligence"; console.log("\nSearching for movies similar to:", query); const similarMovies = await findSimilarMovies(query); console.log("\nSimilar movies found:"); similarMovies.forEach((movie) => { console.log(`\nTitle: ${movie.title}`); console.log(`Description: ${movie.description}`); console.log(`Similarity: ${movie.similarity.toFixed(4)}`); }); } catch (error) { console.error("Error:", error); } ================================================ FILE: examples/ollama/package.json ================================================ { "name": "ollama", "private": true, "main": "index.mjs", "dependencies": { "@libsql/client": "^0.14.0", "ollama": "^0.5.11" } } ================================================ FILE: examples/read-your-writes/package.json ================================================ { "name": "read-your-writes", "version": "1.0.0", "main": "index.mjs", "author": "Levy Albuquerque", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0" } } ================================================ FILE: examples/read-your-writes/read_your_writes.js ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: "file:local.db", syncUrl: process.env.TURSO_DATABASE_URL, authToken: process.env.TURSO_AUTH_TOKEN, readYourWrites: false, }); await client.execute("DROP TABLE users"); await client.execute("CREATE TABLE IF NOT EXISTS users (email TEXT)"); await client.sync(); await client.execute("INSERT INTO users VALUES ('first@example.com')"); await client.execute("INSERT INTO users VALUES ('second@example.com')"); await client.execute("INSERT INTO users VALUES ('third@example.com')"); { // No users, sinc no sync has happend since inserts const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); } { await client.sync(); // No users, sinc no sync has happend since inserts const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); } ================================================ FILE: examples/remote/.gitignore ================================================ local.db ================================================ FILE: examples/remote/README.md ================================================ # Remote This example demonstrates how to use libSQL with a remote database. ## Install Dependencies ```bash npm i ``` ## Running Execute the example: ```bash TURSO_DATABASE_URL="..." TURSO_AUTH_TOKEN="..." node index.mjs ``` This will connect to a remote SQLite database, insert some data, and then query the results. ================================================ FILE: examples/remote/index.mjs ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: process.env.TURSO_DATABASE_URL, authToken: process.env.TURSO_AUTH_TOKEN, }); await client.batch( [ "CREATE TABLE IF NOT EXISTS users (email TEXT)", "INSERT INTO users VALUES ('first@example.com')", "INSERT INTO users VALUES ('second@example.com')", "INSERT INTO users VALUES ('third@example.com')", ], "write", ); const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); ================================================ FILE: examples/remote/package.json ================================================ { "name": "batch", "version": "1.0.0", "main": "index.mjs", "author": "Giovanni Benussi", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0" } } ================================================ FILE: examples/sync/.gitignore ================================================ local.db local.db-client_wal_index ================================================ FILE: examples/sync/README.md ================================================ # Sync This example demonstrates how to use libSQL with a synced database (local file synced with a remote database). ## Install Dependencies ```bash npm i ``` ## Running Execute the example: ```bash TURSO_DATABASE_URL="..." TURSO_AUTH_TOKEN="..." node index.mjs ``` This will connect to a remote SQLite database, insert some data, and then query the results. ================================================ FILE: examples/sync/index.mjs ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: "file:local.db", syncUrl: process.env.TURSO_DATABASE_URL, authToken: process.env.TURSO_AUTH_TOKEN, }); await client.batch( [ "CREATE TABLE IF NOT EXISTS users (email TEXT)", "INSERT INTO users VALUES ('first@example.com')", "INSERT INTO users VALUES ('second@example.com')", "INSERT INTO users VALUES ('third@example.com')", ], "write", ); const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); ================================================ FILE: examples/sync/package.json ================================================ { "name": "batch", "version": "1.0.0", "main": "index.mjs", "author": "Giovanni Benussi", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0" } } ================================================ FILE: examples/transactions/.gitignore ================================================ local.db ================================================ FILE: examples/transactions/README.md ================================================ # Local This example demonstrates how to use transactions with libSQL. ## Install Dependencies ```bash npm i ``` ## Running Execute the example: ```bash node index.mjs ``` This example will: 1. Create a new table called `users`. 2. Start a transaction. 3. Insert multiple users within the transaction. 4. Demonstrate how to rollback a transaction. 5. Start another transaction. 6. Insert more users and commit the transaction. 7. Query and display the final state of the `users` table. ================================================ FILE: examples/transactions/index.mjs ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: "file:local.db", }); await client.batch( [ "DROP TABLE IF EXISTS users", "CREATE TABLE users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)", "INSERT INTO users (name) VALUES ('Iku Turso')", ], "write", ); const names = ["John Doe", "Mary Smith", "Alice Jones", "Mark Taylor"]; let transaction, secondTransaction; try { transaction = await client.transaction("write"); for (const name of names) { await transaction.execute({ sql: "INSERT INTO users (name) VALUES (?)", args: [name], }); } await transaction.rollback(); secondTransaction = await client.transaction("write"); for (const name of names) { await secondTransaction.execute({ sql: "INSERT INTO users (name) VALUES (?)", args: [name], }); } await secondTransaction.commit(); } catch (e) { console.error(e); await transaction?.rollback(); await secondTransaction?.rollback(); } const result = await client.execute("SELECT * FROM users"); console.log("Users:", result.rows); ================================================ FILE: examples/transactions/package.json ================================================ { "name": "batch", "version": "1.0.0", "main": "index.mjs", "author": "Giovanni Benussi", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0" } } ================================================ FILE: examples/vector/.gitignore ================================================ local.db ================================================ FILE: examples/vector/README.md ================================================ # Local This example demonstrates how to use libSQL vector search with a local database. ## Install Dependencies ```bash npm i ``` ## Running Execute the example: ```bash node index.mjs ``` This will setup a local SQLite database, insert some data, and then query the results using the vector similarity search function. ================================================ FILE: examples/vector/index.mjs ================================================ import { createClient } from "@libsql/client"; const client = createClient({ url: "file:local.db", }); await client.batch( [ "DROP TABLE IF EXISTS movies", "CREATE TABLE IF NOT EXISTS movies (title TEXT, year INT, embedding F32_BLOB(3))", "CREATE INDEX movies_idx ON movies (libsql_vector_idx(embedding))", "INSERT INTO movies (title, year, embedding) VALUES ('Napoleon', 2023, vector32('[1,2,3]')), ('Black Hawk Down', 2001, vector32('[10,11,12]')), ('Gladiator', 2000, vector32('[7,8,9]')), ('Blade Runner', 1982, vector32('[4,5,6]'))", ], "write", ); const result = await client.execute( "SELECT title, year FROM vector_top_k('movies_idx', '[4,5,6]', 3) JOIN movies ON movies.rowid = id", ); console.log("Movies:", result.rows); ================================================ FILE: examples/vector/package.json ================================================ { "name": "batch", "version": "1.0.0", "main": "index.mjs", "author": "Giovanni Benussi", "license": "MIT", "dependencies": { "@libsql/client": "^0.14.0" } } ================================================ FILE: package.json ================================================ { "workspaces": [ "packages/libsql-core", "packages/libsql-client", "packages/libsql-client-wasm" ], "dependencies": {}, "scripts": { "prepare": "node .husky/install.mjs", "build": "npm run build --workspaces", "test": "./testing/test.sh", "typecheck": "npm run typecheck --workspaces", "format:check": "npm run format:check --workspaces", "lint-staged": "lint-staged" }, "devDependencies": { "lint-staged": "^15.2.2", "husky": "^9.1.5" } } ================================================ FILE: packages/libsql-client/README.md ================================================

libSQL TypeScript

libSQL TypeScript

Databases for all TypeScript and JS multi-tenant apps.

Turso · Docs · Quickstart · SDK Reference · Blog & Tutorials

MIT License Discord Contributors Weekly downloads Examples

> **Looking for the Turso serverless package?** Check out [`@tursodatabase/serverless`](https://www.npmjs.com/package/@tursodatabase/serverless) — the lightest option with zero native dependencies, and will be the driver to later support concurrent writes. Use `@libsql/client` if you need a battle-tested driver today with ORM integration. ## Features - 🔌 Works offline with [Embedded Replicas](https://docs.turso.tech/features/embedded-replicas/introduction) - 🌎 Works with remote Turso databases - ✨ Works with Turso [AI & Vector Search](https://docs.turso.tech/features/ai-and-embeddings) - 🔐 Supports [encryption at rest](https://docs.turso.tech/libsql#encryption-at-rest) ## Install ```bash npm install @libsql/client ``` ## Quickstart The example below uses Embedded Replicas and syncs every minute from Turso. ```ts import { createClient } from "@libsql/client"; export const turso = createClient({ url: "file:local.db", syncUrl: process.env.TURSO_DATABASE_URL, authToken: process.env.TURSO_AUTH_TOKEN, syncInterval: 60000, }); await turso.batch( [ "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)", { sql: "INSERT INTO users(name) VALUES (?)", args: ["Iku"], }, ], "write", ); await turso.execute({ sql: "SELECT * FROM users WHERE id = ?", args: [1], }); ``` ## Examples | Example | Description | | ------------------------------------- | --------------------------------------------------------------------------------------- | | [local](examples/local) | Uses libsql with a local SQLite file. Creates database, inserts data, and queries. | | [remote](examples/remote) | Connects to a remote database. Requires environment variables for URL and auth token. | | [sync](examples/sync) | Demonstrates synchronization between local and remote databases. | | [batch](examples/batch) | Executes multiple SQL statements in a single batch operation. | | [transactions](examples/transactions) | Shows transaction usage: starting, performing operations, and committing/rolling back. | | [memory](examples/memory) | Uses an in-memory SQLite database for temporary storage or fast access. | | [vector](examples/vector) | Works with vector embeddings, storing and querying for similarity search. | | [encryption](examples/encryption) | Creates and uses an encrypted SQLite database, demonstrating setup and data operations. | | [ollama](examples/ollama) | Similarity search with Ollama and Mistral. | ## Documentation Visit our [official documentation](https://docs.turso.tech/sdk/ts). ## Support Join us [on Discord](https://tur.so/discord-ts) to get help using this SDK. Report security issues [via email](mailto:security@turso.tech). ## Contributors See the [contributing guide](CONTRIBUTING.md) to learn how to get involved. ![Contributors](https://contrib.nn.ci/api?repo=tursodatabase/libsql-client-ts) good first issue ================================================ FILE: packages/libsql-client/examples/example.js ================================================ import { createClient } from "@libsql/client"; async function example() { const config = { url: process.env.URL ?? "file:local.db", encryptionKey: process.env.ENCRYPTION_KEY, }; const db = createClient(config); await db.batch( [ "CREATE TABLE IF NOT EXISTS users (email TEXT)", "INSERT INTO users (email) VALUES ('alice@example.com')", "INSERT INTO users (email) VALUES ('bob@example.com')", ], "write", ); await db.batch( [ { sql: "INSERT INTO users (email) VALUES (?)", args: ["alice@example.com"], }, ["INSERT INTO users (email) VALUES (?)", ["bob@example.com"]], { sql: "INSERT INTO users (email) VALUES (:email)", args: { email: "charlie@example.com" }, }, ], "write", ); const rs = await db.execute("SELECT * FROM users"); console.log(rs); } await example(); ================================================ FILE: packages/libsql-client/examples/package.json ================================================ { "name": "libsql-examples", "type": "module", "private": true, "dependencies": { "@libsql/client": "..", "@libsql/core": "../../libsql-core", "readline-sync": "^1.4.10" } } ================================================ FILE: packages/libsql-client/examples/shell.js ================================================ import * as readline from "node:readline/promises"; import { stdin, stdout, argv } from "node:process"; import * as libsql from "@libsql/client"; async function main() { const url = argv[2]; if (!url) { console.error("Please specify database URL as command-line argument"); return; } const client = libsql.createClient({ url }); const rl = readline.createInterface({ input: stdin, output: stdout }); for (;;) { const sql = await rl.question("> "); let rs; try { rs = await client.execute(sql); } catch (e) { if (e instanceof libsql.LibsqlError) { console.error(e); continue; } throw e; } console.log(JSON.stringify(rs.columns)); for (const row of rs.rows) { console.log(JSON.stringify(Array.from(row))); } } } await main(); ================================================ FILE: packages/libsql-client/examples/sync.js ================================================ import { createClient } from "@libsql/client"; import reader from "readline-sync"; async function example() { const config = { url: process.env.URL ?? "file:local.db", syncUrl: process.env.SYNC_URL, authToken: process.env.AUTH_TOKEN, }; const db = createClient(config); await db.sync(); await db.execute( "CREATE TABLE IF NOT EXISTS guest_book_entries (comment TEXT)", ); const rep = await db.sync(); console.log("frames_synced: " + rep.frames_synced); const comment = reader.question("Enter your comment: "); await db.execute({ sql: "INSERT INTO guest_book_entries (comment) VALUES (?)", args: [comment], }); const rep2 = await db.sync(); console.log("frames_synced: " + rep2.frames_synced); console.log("Guest book entries:"); const rs = await db.execute("SELECT * FROM guest_book_entries"); for (const row of rs.rows) { console.log(" - " + row.comment); } } example(); ================================================ FILE: packages/libsql-client/examples/sync_offline.js ================================================ import { createClient } from "@libsql/client"; import reader from "readline-sync"; async function example() { const config = { url: process.env.URL ?? "file:local.db", syncUrl: process.env.SYNC_URL, authToken: process.env.AUTH_TOKEN, offline: true, }; const db = createClient(config); console.log("Syncing database ..."); await db.sync(); await db.execute( "CREATE TABLE IF NOT EXISTS guest_book_entries (comment TEXT)", ); const comment = reader.question("Enter your comment: "); await db.execute({ sql: "INSERT INTO guest_book_entries (comment) VALUES (?)", args: [comment], }); console.log("Syncing database ..."); const rep2 = await db.sync(); console.log("frames_synced: " + rep2.frames_synced); console.log("Guest book entries:"); const rs = await db.execute("SELECT * FROM guest_book_entries"); for (const row of rs.rows) { console.log(" - " + row.comment); } } example(); ================================================ FILE: packages/libsql-client/examples/sync_vector.js ================================================ import { createClient } from "@libsql/client"; import reader from "readline-sync"; async function example() { const config = { url: process.env.URL ?? "file:local.db", syncUrl: process.env.SYNC_URL, authToken: process.env.AUTH_TOKEN, }; const db = createClient(config); await db.sync(); await db.execute( "CREATE TABLE IF NOT EXISTS movies (title TEXT, embedding FLOAT32(4))", ); await db.execute( "CREATE INDEX IF NOT EXISTS movies_idx ON movies (libsql_vector_idx(embedding))", ); await db.sync(); const title = reader.question("Add movie (title): "); const embedding = reader.question( "Add movie (embedding, e.g. [1,2,3,4]): ", ); await db.execute({ sql: "INSERT INTO movies (title, embedding) VALUES (?, vector32(?))", args: [title, embedding], }); await db.sync(); const all = await db.execute( "SELECT title, vector_extract(embedding) as embedding FROM movies", ); console.info("all movies:"); for (const row of all.rows) { console.log(" - " + row.title + ": " + row.embedding); } const query = reader.question("KNN query (e.g. [1,2,3,4]): "); const nn = await db.execute({ sql: "SELECT title, vector_extract(embedding) as embedding FROM vector_top_k('movies_idx', vector32(?), 2) as knn JOIN movies ON knn.id = movies.rowid", args: [query], }); console.info("nearest neighbors:"); for (const row of nn.rows) { console.log(" - " + row.title + ": " + row.embedding); } } example(); ================================================ FILE: packages/libsql-client/jest.config.js ================================================ export default { preset: "ts-jest/presets/default-esm", moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, testMatch: ["**/__tests__/*.test.[jt]s"], }; ================================================ FILE: packages/libsql-client/package-cjs.json ================================================ { "type": "commonjs" } ================================================ FILE: packages/libsql-client/package.json ================================================ { "name": "@libsql/client", "version": "0.17.2", "keywords": [ "libsql", "database", "sqlite", "serverless", "vercel", "netlify", "lambda" ], "description": "libSQL driver for TypeScript and JavaScript", "repository": { "type": "git", "url": "git+https://github.com/tursodatabase/libsql-client-ts", "directory": "packages/libsql-client" }, "authors": [ "Jan Špaček ", "Pekka Enberg ", "Jan Plhak " ], "license": "MIT", "type": "module", "main": "lib-cjs/node.js", "types": "lib-esm/node.d.ts", "exports": { ".": { "types": "./lib-esm/node.d.ts", "import": { "workerd": "./lib-esm/web.js", "deno": "./lib-esm/node.js", "edge-light": "./lib-esm/web.js", "netlify": "./lib-esm/web.js", "node": "./lib-esm/node.js", "browser": "./lib-esm/web.js", "default": "./lib-esm/node.js" }, "require": "./lib-cjs/node.js" }, "./node": { "types": "./lib-esm/node.d.ts", "import": "./lib-esm/node.js", "require": "./lib-cjs/node.js" }, "./http": { "types": "./lib-esm/http.d.ts", "import": "./lib-esm/http.js", "require": "./lib-cjs/http.js" }, "./ws": { "types": "./lib-esm/ws.d.ts", "import": "./lib-esm/ws.js", "require": "./lib-cjs/ws.js" }, "./sqlite3": { "types": "./lib-esm/sqlite3.d.ts", "import": "./lib-esm/sqlite3.js", "require": "./lib-cjs/sqlite3.js" }, "./web": { "types": "./lib-esm/web.d.ts", "import": "./lib-esm/web.js", "require": "./lib-cjs/web.js" } }, "typesVersions": { "*": { ".": [ "./lib-esm/node.d.ts" ], "http": [ "./lib-esm/http.d.ts" ], "hrana": [ "./lib-esm/hrana.d.ts" ], "sqlite3": [ "./lib-esm/sqlite3.d.ts" ], "web": [ "./lib-esm/web.d.ts" ] } }, "files": [ "lib-cjs/**", "lib-esm/**", "README.md" ], "scripts": { "prepublishOnly": "npm run build", "prebuild": "rm -rf ./lib-cjs ./lib-esm", "build": "npm run build:cjs && npm run build:esm", "build:cjs": "tsc -p tsconfig.build-cjs.json", "build:esm": "tsc -p tsconfig.build-esm.json", "format:check": "prettier --check .", "postbuild": "cp package-cjs.json ./lib-cjs/package.json", "test": "jest --runInBand", "typecheck": "tsc --noEmit", "typedoc": "rm -rf ./docs && typedoc", "lint-staged": "lint-staged" }, "dependencies": { "@libsql/core": "^0.17.2", "@libsql/hrana-client": "^0.9.0", "js-base64": "^3.7.5", "libsql": "^0.5.28", "promise-limit": "^2.7.0" }, "devDependencies": { "@types/jest": "^29.2.5", "@types/node": "^18.15.5", "jest": "^29.3.1", "lint-staged": "^15.2.2", "msw": "^2.3.0", "prettier": "3.2.5", "ts-jest": "^29.0.5", "typedoc": "^0.23.28", "typescript": "^4.9.4" } } ================================================ FILE: packages/libsql-client/smoke_test/vercel/.gitignore ================================================ app/package.json package-lock.json *.tgz ================================================ FILE: packages/libsql-client/smoke_test/vercel/app/.gitignore ================================================ .vercel ================================================ FILE: packages/libsql-client/smoke_test/vercel/app/api/function.ts ================================================ import * as libsql from "@libsql/client"; export const config = { runtime: "edge", }; export default async function (request: Request) { function respond(status: number, responseBody: string) { return new Response(responseBody, { status, headers: [["content-type", "text/plain"]], }); } if (request.method !== "GET") { return respond(405, "Only GET method is supported"); } const url = new URL(request.url); const testCase = url.searchParams.get("test"); if (testCase === null) { return respond( 400, "Please specify the test case using the 'test' query parameter", ); } const testCaseFn = testCases[testCase]; if (testCaseFn === undefined) { return respond(404, "Unknown test case"); } let client; try { client = libsql.createClient({ url: process.env.CLIENT_URL! }); await testCaseFn(client); return respond(200, "Test passed"); } catch (e) { return respond(500, `Test failed\n${(e as Error).stack}`); } finally { if (client !== undefined) { client.close(); } } } const testCases: Record Promise> = { execute: async (client: libsql.Client): Promise => { const rs = await client.execute("SELECT 1+1 AS two"); assert(rs.columns.length === 1); assert(rs.columns[0] === "two"); assert(rs.rows.length === 1); assert(rs.rows[0].length === 1); assert(rs.rows[0][0] === 2.0); }, batch: async (client: libsql.Client): Promise => { const rss = await client.batch([ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a, b)", "INSERT INTO t VALUES (1, 'one'), (2, 'two'), (3, 'three')", "SELECT * FROM t ORDER BY a", ]); assert(rss[0].columns.length === 0); assert(rss[0].rows.length === 0); assert(rss[1].columns.length === 0); assert(rss[1].rows.length === 0); assert(rss[2].columns.length === 0); assert(rss[2].rows.length === 0); assert(rss[3].columns.length === 2); assert(rss[3].columns[0] === "a"); assert(rss[3].columns[1] === "b"); assert(rss[3].rows.length === 3); assert(rss[3].rows[0][0] === 1); assert(rss[3].rows[0][1] === "one"); assert(rss[3].rows[1][0] === 2); assert(rss[3].rows[1][1] === "two"); assert(rss[3].rows[2][0] === 3); assert(rss[3].rows[2][1] === "three"); }, transaction: async (client: libsql.Client): Promise => { await client.batch([ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a, b)", "INSERT INTO t VALUES (1, 'one'), (2, 'two'), (3, 'three')", ]); const txn = await client.transaction(); try { await txn.execute("INSERT INTO t VALUES (4, 'four')"); await txn.execute("DELETE FROM t WHERE a <= 2"); await txn.commit(); } finally { txn.close(); } const rs = await client.execute("SELECT COUNT(*) FROM t"); assert(rs.rows[0][0] === 2); }, }; function assert(value: unknown, message?: string) { if (!value) { throw new Error(message ?? "Assertion failed"); } } ================================================ FILE: packages/libsql-client/smoke_test/vercel/app/public/index.html ================================================

This is a smoke-test Vercel app for @libsql/client.

================================================ FILE: packages/libsql-client/smoke_test/vercel/package.json ================================================ { "name": "smoke-test", "dependencies": { "@types/node": "20.4.2", "localtunnel": "^2.0.2", "vercel": "^31.0.3", "typescript": "^4.9.4" } } ================================================ FILE: packages/libsql-client/smoke_test/vercel/test.js ================================================ "use strict"; const { spawn } = require("node:child_process"); const fs = require("node:fs"); const fetch = require("node-fetch"); const localtunnel = require("localtunnel"); function getEnv(name) { const value = process.env[name] ?? ""; if (!value) { throw new Error(`Please set the env variable ${name}`); } return value; } const vercelToken = getEnv("VERCEL_TOKEN"); const projectName = getEnv("VERCEL_PROJECT_NAME"); async function npm( subcommand, args, hiddenArgs = [], { capture = false } = {}, ) { console.info(`$ npm ${subcommand} ${args.join(" ")}`); const proc = spawn("npm", [subcommand, ...args, ...hiddenArgs], { stdio: ["ignore", capture ? "pipe" : "inherit", "inherit"], }); const exitPromise = new Promise((resolve, reject) => { proc.on("exit", (code, signal) => { if (signal !== null) { reject( new Error( `vercel command terminated due to signal: ${signal}`, ), ); } else if (code !== 0) { reject(new Error(`vercel command exited with code: ${code}`)); } else { resolve(); } }); }); const dataPromise = new Promise((resolve, reject) => { if (!capture) { return resolve(); } const stream = proc.stdout; stream.setEncoding("utf-8"); const chunks = []; stream.on("data", (chunk) => chunks.push(chunk)); stream.on("end", () => resolve(chunks.join(""))); stream.on("error", (e) => reject(e)); }); return exitPromise.then(() => dataPromise); } async function deployToVercel(clientUrlInsideVercel) { console.info("Building and deploying to Vercel..."); let tarballName = await npm("pack", ["../.."], [], { capture: true }); tarballName = tarballName.trim(); const appPackageJson = { dependencies: { "@libsql/client": `../${tarballName}`, }, }; fs.writeFileSync( "app/package.json", JSON.stringify(appPackageJson, null, 4), ); await npm( "exec", [ "--", "vercel", "link", "--yes", "--project", projectName, "--cwd", "app/", ], ["--token", vercelToken], ); await npm( "exec", [ "--", "vercel", "pull", "--yes", "--environment=preview", "--cwd", "app/", ], ["--token", vercelToken], ); await npm("exec", ["--", "vercel", "build", "--cwd", "app/"]); const deployUrl = await npm( "exec", [ "--", "vercel", "deploy", "--prebuilt", "--env", `CLIENT_URL=${clientUrlInsideVercel}`, "--cwd", "app/", ], ["--token", vercelToken, "--cwd", "app/"], { capture: true }, ); console.info(`Deployed Vercel project on ${deployUrl}`); return deployUrl; } const testCases = ["execute", "batch", "transaction"]; async function runTests(functionUrl) { let ok = true; for (const testCase of testCases) { if (!(await runTest(functionUrl, testCase))) { ok = false; } } return ok; } async function runTest(functionUrl, testCase) { const resp = await fetch(`${functionUrl}?test=${testCase}`); const respText = await resp.text(); const ok = resp.status === 200 && respText === "Test passed"; if (ok) { console.info(`TEST ${testCase}: passed`); } else { console.warn( `\nTEST ${testCase}: failed with status ${resp.status}\n${respText}\n`, ); } return ok; } async function main() { const url = new URL(process.env.URL ?? "ws://localhost:8080"); console.info(`Creating a tunnel to ${url}...`); const tunnel = await localtunnel({ port: url.port, // NOTE: if we specify `local_host`, `localtunnel` will try to rewrite the `Host` header in the // tunnelled HTTP requests. Unfortunately, they do it in a very silly way by converting the // tunnelled data to a string, thus corrupting the request body. //local_host: url.hostname, }); let clientUrlInsideVercel = new URL(tunnel.url); if (url.protocol === "http:") { clientUrlInsideVercel.protocol = "https:"; } else if (url.protocol === "ws:") { clientUrlInsideVercel.protocol = "wss:"; } else { clientUrlInsideVercel.protocol = url.protocol; } console.info(`Established a tunnel on ${clientUrlInsideVercel}`); let ok = false; try { const deployUrl = await deployToVercel(clientUrlInsideVercel); const functionUrl = new URL("api/function", deployUrl); ok = await runTests(functionUrl); if (ok) { console.log("All tests passed"); } else { console.error("Some tests failed"); } } finally { console.info("Closing the tunnel..."); await tunnel.close(); } process.exit(ok ? 0 : 1); } main(); ================================================ FILE: packages/libsql-client/smoke_test/vercel/tsconfig.json ================================================ { "compilerOptions": { "target": "es5", "lib": ["dom", "esnext"], "module": "esnext", "moduleResolution": "node", "allowJs": true, "skipLibCheck": true, "strict": true, "noEmit": true, "esModuleInterop": true, "isolatedModules": true } } ================================================ FILE: packages/libsql-client/smoke_test/workers/.gitignore ================================================ package-lock.json ================================================ FILE: packages/libsql-client/smoke_test/workers/package.json ================================================ { "devDependencies": { "localtunnel": "^2.0.2", "wrangler": "^3.5.1" } } ================================================ FILE: packages/libsql-client/smoke_test/workers/test.js ================================================ "use strict"; const localtunnel = require("localtunnel"); const wrangler = require("wrangler"); const testCases = ["/execute", "/batch", "/transaction"]; async function main() { const local = !!parseInt(process.env.LOCAL ?? "1"); const url = new URL(process.env.URL ?? "ws://localhost:8080"); let clientUrlInsideWorker; let tunnel = undefined; if (local) { clientUrlInsideWorker = url; } else { console.info(`Creating an tunnel to ${url}...`); tunnel = await localtunnel({ port: url.port, // NOTE: if we specify `local_host`, `localtunnel` will try to rewrite the `Host` header in the // tunnelled HTTP requests. Unfortunately, they do it in a very silly way by converting the // tunnelled data to a string, thus corrupting the request body. //local_host: url.hostname, }); clientUrlInsideWorker = new URL(tunnel.url); if (url.protocol === "http:") { clientUrlInsideWorker.protocol = "https:"; } else if (url.protocol === "ws:") { clientUrlInsideWorker.protocol = "wss:"; } else { clientUrlInsideWorker.protocol = url.protocol; } console.info(`Established a tunnel on ${clientUrlInsideWorker}`); } let ok = false; try { ok = await runWorker(local, clientUrlInsideWorker); if (ok) { console.log("All tests passed"); } else { console.error("Some tests failed"); } } finally { if (tunnel !== undefined) { console.info("Closing tunnel..."); await tunnel.close(); } // TODO: wrangler keeps the program running: // https://github.com/cloudflare/workers-sdk/issues/2892 setTimeout(() => process.exit(ok ? 0 : 1), 200); } } async function runWorker(local, clientUrlInsideWorker) { console.info(`Creating a ${local ? "local" : "nonlocal"} Worker...`); const worker = await wrangler.unstable_dev("worker.js", { config: "wrangler.toml", logLevel: "info", local, vars: { CLIENT_URL: clientUrlInsideWorker.toString(), }, experimental: { disableExperimentalWarning: true, }, }); console.info(`Worker created on ${worker.address}:${worker.port}`); try { let ok = true; for (const testCase of testCases) { if (!(await runTest(worker, testCase))) { ok = false; } } return ok; } finally { console.info("Stopping Worker..."); await worker.stop(); } } async function runTest(worker, testCase) { const resp = await worker.fetch(testCase); const respText = await resp.text(); const ok = resp.status === 200 && respText === "Test passed"; if (ok) { console.info(`TEST ${testCase}: passed`); } else { console.warn( `\nTEST ${testCase}: failed with status ${resp.status}\n${respText}\n`, ); } return ok; } main(); ================================================ FILE: packages/libsql-client/smoke_test/workers/worker.js ================================================ import * as libsql from "@libsql/client"; export default { async fetch(request, env, ctx) { function respond(status, responseBody) { return new Response(responseBody, { status, headers: [["content-type", "text/plain"]], }); } if (request.method !== "GET") { return respond(405, "Only GET method is supported"); } const url = new URL(request.url); if (url.pathname === "/") { return respond( 200, "This is a smoke-test Worker for @libsql/client", ); } const testCaseFn = testCases[url.pathname]; if (testCaseFn === undefined) { return respond(404, "Unknown test case"); } let client; try { client = libsql.createClient({ url: env.CLIENT_URL }); await testCaseFn(client); return respond(200, "Test passed"); } catch (e) { return respond(500, `Test failed\n${e.stack}`); } finally { if (client !== undefined) { client.close(); } } }, }; const testCases = { "/execute": async (client) => { const rs = await client.execute("SELECT 1+1 AS two"); assert(rs.columns.length === 1); assert(rs.columns[0] === "two"); assert(rs.rows.length === 1); assert(rs.rows[0].length === 1); assert(rs.rows[0][0] === 2.0); }, "/batch": async (client) => { const rss = await client.batch([ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a, b)", "INSERT INTO t VALUES (1, 'one'), (2, 'two'), (3, 'three')", "SELECT * FROM t ORDER BY a", ]); assert(rss[0].columns.length === 0); assert(rss[0].rows.length === 0); assert(rss[1].columns.length === 0); assert(rss[1].rows.length === 0); assert(rss[2].columns.length === 0); assert(rss[2].rows.length === 0); assert(rss[3].columns.length === 2); assert(rss[3].columns[0] === "a"); assert(rss[3].columns[1] === "b"); assert(rss[3].rows.length === 3); assert(rss[3].rows[0][0] === 1); assert(rss[3].rows[0][1] === "one"); assert(rss[3].rows[1][0] === 2); assert(rss[3].rows[1][1] === "two"); assert(rss[3].rows[2][0] === 3); assert(rss[3].rows[2][1] === "three"); }, "/transaction": async (client) => { await client.batch([ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a, b)", "INSERT INTO t VALUES (1, 'one'), (2, 'two'), (3, 'three')", ]); const txn = await client.transaction(); try { await txn.execute("INSERT INTO t VALUES (4, 'four')"); await txn.execute("DELETE FROM t WHERE a <= 2"); await txn.commit(); } finally { txn.close(); } const rs = await client.execute("SELECT COUNT(*) FROM t"); assert(rs.rows[0][0] === 2); }, }; function assert(value, message) { if (!value) { throw new Error(message ?? "Assertion failed"); } } ================================================ FILE: packages/libsql-client/smoke_test/workers/wrangler.toml ================================================ main = "worker.js" compatibility_date = "2023-05-15" [vars] CLIENT_URL = "ws://localhost:8080" ================================================ FILE: packages/libsql-client/src/__tests__/client.test.ts ================================================ import console from "node:console"; import { expect } from "@jest/globals"; import type { MatcherFunction } from "expect"; import type { Request, Response } from "@libsql/hrana-client"; import { fetch } from "@libsql/hrana-client"; import "./helpers.js"; import type * as libsql from "../node.js"; import { createClient } from "../node.js"; const config = { url: process.env.URL ?? "ws://localhost:8080", syncUrl: process.env.SYNC_URL, authToken: process.env.AUTH_TOKEN, }; const isWs = config.url.startsWith("ws:") || config.url.startsWith("wss:") || config.url.startsWith("libsql:"); const isHttp = config.url.startsWith("http:") || config.url.startsWith("https:"); const isFile = config.url.startsWith("file:"); // This allows us to skip tests based on the Hrana server that we are targeting: // - "test_v3" is the v3 test server in Python // - "test_v2" is the v2 test server in Python // - "test_v1" is the v1 test server in Python // - "sqld" is sqld const server = process.env.SERVER ?? "test_v3"; const isSqld = server === "sqld"; const hasHrana2 = server !== "test_v1"; const hasHrana3 = server !== "test_v1" && server !== "test_v2" && server !== "sqld"; const hasNetworkErrors = isWs && (server === "test_v1" || server === "test_v2" || server === "test_v3"); function withClient( f: (c: libsql.Client) => Promise, extraConfig: Partial = {}, ): () => Promise { return async () => { const c = createClient({ ...config, ...extraConfig }); try { await f(c); } finally { c.close(); } }; } function withInMemoryClient( f: (c: libsql.Client) => Promise, ): () => Promise { return async () => { const c = createClient({ url: ":memory:" }); try { await f(c); } finally { c.close(); } }; } describe("createClient()", () => { test("URL scheme not supported", () => { expect(() => createClient({ url: "ftp://localhost" })).toThrow( expect.toBeLibsqlError("URL_SCHEME_NOT_SUPPORTED", /"ftp:"/), ); }); test("URL param not supported", () => { expect(() => createClient({ url: "ws://localhost?foo=bar" })).toThrow( expect.toBeLibsqlError("URL_PARAM_NOT_SUPPORTED", /"foo"/), ); }); test("URL scheme incompatible with ?tls", () => { const urls = [ "ws://localhost?tls=1", "wss://localhost?tls=0", "http://localhost?tls=1", "https://localhost?tls=0", ]; for (const url of urls) { expect(() => createClient({ url })).toThrow( expect.toBeLibsqlError("URL_INVALID", /TLS/), ); } }); test("missing port in libsql URL with tls=0", () => { expect(() => createClient({ url: "libsql://localhost?tls=0" })).toThrow( expect.toBeLibsqlError("URL_INVALID", /port/), ); }); test("invalid value of tls query param", () => { expect(() => createClient({ url: "libsql://localhost?tls=yes" }), ).toThrow(expect.toBeLibsqlError("URL_INVALID", /"tls".*"yes"/)); }); test("passing URL instead of config object", () => { // @ts-expect-error expect(() => createClient("ws://localhost")).toThrow( /as object, got string/, ); }); test("invalid value for `intMode`", () => { // @ts-expect-error expect(() => createClient({ ...config, intMode: "foo" })).toThrow( /"foo"/, ); }); test("supports in-memory database", () => { expect(() => createClient({ url: ":memory:" })).not.toThrow(); }); }); describe("execute()", () => { test( "query a single value", withClient(async (c) => { const rs = await c.execute("SELECT 42"); expect(rs.columns.length).toStrictEqual(1); expect(rs.columnTypes.length).toStrictEqual(1); expect(rs.rows.length).toStrictEqual(1); expect(rs.rows[0].length).toStrictEqual(1); expect(rs.rows[0][0]).toStrictEqual(42); }), ); test( "query a single row", withClient(async (c) => { const rs = await c.execute( "SELECT 1 AS one, 'two' AS two, 0.5 AS three", ); expect(rs.columns).toStrictEqual(["one", "two", "three"]); expect(rs.columnTypes).toStrictEqual(["", "", ""]); expect(rs.rows.length).toStrictEqual(1); const r = rs.rows[0]; expect(r.length).toStrictEqual(3); expect(Array.from(r)).toStrictEqual([1, "two", 0.5]); expect(Object.entries(r)).toStrictEqual([ ["one", 1], ["two", "two"], ["three", 0.5], ]); }), ); test( "query multiple rows", withClient(async (c) => { const rs = await c.execute( "VALUES (1, 'one'), (2, 'two'), (3, 'three')", ); expect(rs.columns.length).toStrictEqual(2); expect(rs.columnTypes.length).toStrictEqual(2); expect(rs.rows.length).toStrictEqual(3); expect(Array.from(rs.rows[0])).toStrictEqual([1, "one"]); expect(Array.from(rs.rows[1])).toStrictEqual([2, "two"]); expect(Array.from(rs.rows[2])).toStrictEqual([3, "three"]); }), ); test( "statement that produces error", withClient(async (c) => { await expect(c.execute("SELECT foobar")).rejects.toBeLibsqlError(); }), ); test( "rowsAffected with INSERT", withClient(async (c) => { await c.batch( ["DROP TABLE IF EXISTS t", "CREATE TABLE t (a)"], "write", ); const rs = await c.execute("INSERT INTO t VALUES (1), (2)"); expect(rs.rowsAffected).toStrictEqual(2); }), ); test( "rowsAffected with DELETE", withClient(async (c) => { await c.batch( [ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a)", "INSERT INTO t VALUES (1), (2), (3), (4), (5)", ], "write", ); const rs = await c.execute("DELETE FROM t WHERE a >= 3"); expect(rs.rowsAffected).toStrictEqual(3); }), ); test( "lastInsertRowid with INSERT", withClient(async (c) => { await c.batch( [ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a)", "INSERT INTO t VALUES ('one'), ('two')", ], "write", ); const insertRs = await c.execute("INSERT INTO t VALUES ('three')"); expect(insertRs.lastInsertRowid).not.toBeUndefined(); const selectRs = await c.execute({ sql: "SELECT a FROM t WHERE ROWID = ?", args: [insertRs.lastInsertRowid!], }); expect(Array.from(selectRs.rows[0])).toStrictEqual(["three"]); }), ); test( "rows from INSERT RETURNING", withClient(async (c) => { await c.batch( ["DROP TABLE IF EXISTS t", "CREATE TABLE t (a)"], "write", ); const rs = await c.execute( "INSERT INTO t VALUES (1) RETURNING 42 AS x, 'foo' AS y", ); expect(rs.columns).toStrictEqual(["x", "y"]); expect(rs.columnTypes).toStrictEqual(["", ""]); expect(rs.rows.length).toStrictEqual(1); expect(Array.from(rs.rows[0])).toStrictEqual([42, "foo"]); }), ); (hasHrana2 ? test : test.skip)( "rowsAffected with WITH INSERT", withClient(async (c) => { await c.batch( [ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a)", "INSERT INTO t VALUES (1), (2), (3)", ], "write", ); const rs = await c.execute(` WITH x(a) AS (SELECT 2*a FROM t) INSERT INTO t SELECT a+1 FROM x `); expect(rs.rowsAffected).toStrictEqual(3); }), ); test( "query a single value using an in memory database", withInMemoryClient(async (c) => { await c.batch( [ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a)", "INSERT INTO t VALUES ('one'), ('two')", ], "write", ); const insertRs = await c.execute("INSERT INTO t VALUES ('three')"); expect(insertRs.lastInsertRowid).not.toBeUndefined(); const selectRs = await c.execute({ sql: "SELECT a FROM t WHERE ROWID = ?", args: [insertRs.lastInsertRowid!], }); expect(Array.from(selectRs.rows[0])).toStrictEqual(["three"]); }), ); // see issue https://github.com/tursodatabase/libsql/issues/1411 test( "execute transaction against in memory database with shared cache", withClient( async (c) => { await c.execute("CREATE TABLE t (a)"); const transaction = await c.transaction(); transaction.close(); await c.execute("SELECT * FROM t"); }, { url: "file::memory:?cache=shared" }, ), ); test( "execute transaction against in memory database with private cache", withClient( async (c) => { await c.execute("CREATE TABLE t (a)"); const transaction = await c.transaction(); transaction.close(); expect(() => c.execute("SELECT * FROM t")).rejects.toThrow(); }, { url: "file::memory:?cache=private" }, ), ); test( "execute transaction against in memory database with default cache", withClient( async (c) => { await c.execute("CREATE TABLE t (a)"); const transaction = await c.transaction(); transaction.close(); expect(() => c.execute("SELECT * FROM t")).rejects.toThrow(); }, { url: ":memory:" }, ), ); }); describe("values", () => { function testRoundtrip( name: string, passed: libsql.InValue, expected: libsql.Value, intMode?: libsql.IntMode, ): void { test( name, withClient( async (c) => { const rs = await c.execute({ sql: "SELECT ?", args: [passed], }); expect(rs.rows[0][0]).toStrictEqual(expected); }, { intMode }, ), ); } function testRoundtripError( name: string, passed: libsql.InValue, expectedError: unknown, intMode?: libsql.IntMode, ): void { test( name, withClient( async (c) => { await expect( c.execute({ sql: "SELECT ?", args: [passed], }), ).rejects.toBeInstanceOf(expectedError); }, { intMode }, ), ); } testRoundtrip("string", "boomerang", "boomerang"); testRoundtrip("string with weird characters", "a\n\r\t ", "a\n\r\t "); testRoundtrip( "string with unicode", "žluťoučký kůň úpěl ďábelské ódy", "žluťoučký kůň úpěl ďábelské ódy", ); describe("number", () => { const intModes: Array = ["number", "bigint", "string"]; for (const intMode of intModes) { testRoundtrip("zero", 0, 0, intMode); testRoundtrip("integer", -2023, -2023, intMode); testRoundtrip("float", 12.345, 12.345, intMode); testRoundtrip("large positive float", 1e18, 1e18, intMode); testRoundtrip("large negative float", -1e18, -1e18, intMode); testRoundtrip( "MAX_VALUE", Number.MAX_VALUE, Number.MAX_VALUE, intMode, ); testRoundtrip( "-MAX_VALUE", -Number.MAX_VALUE, -Number.MAX_VALUE, intMode, ); testRoundtrip( "MIN_VALUE", Number.MIN_VALUE, Number.MIN_VALUE, intMode, ); } }); describe("bigint", () => { describe("'number' int mode", () => { testRoundtrip("zero integer", 0n, 0, "number"); testRoundtrip("small integer", -42n, -42, "number"); testRoundtrip( "largest safe integer", 9007199254740991n, 9007199254740991, "number", ); testRoundtripError( "smallest unsafe integer", 9007199254740992n, RangeError, "number", ); testRoundtripError( "large unsafe integer", -1152921504594532842n, RangeError, "number", ); }); describe("'bigint' int mode", () => { testRoundtrip("zero integer", 0n, 0n, "bigint"); testRoundtrip("small integer", -42n, -42n, "bigint"); testRoundtrip( "large positive integer", 1152921504608088318n, 1152921504608088318n, "bigint", ); testRoundtrip( "large negative integer", -1152921504594532842n, -1152921504594532842n, "bigint", ); testRoundtrip( "largest positive integer", 9223372036854775807n, 9223372036854775807n, "bigint", ); testRoundtrip( "largest negative integer", -9223372036854775808n, -9223372036854775808n, "bigint", ); }); describe("'string' int mode", () => { testRoundtrip("zero integer", 0n, "0", "string"); testRoundtrip("small integer", -42n, "-42", "string"); testRoundtrip( "large positive integer", 1152921504608088318n, "1152921504608088318", "string", ); testRoundtrip( "large negative integer", -1152921504594532842n, "-1152921504594532842", "string", ); testRoundtrip( "largest positive integer", 9223372036854775807n, "9223372036854775807", "string", ); testRoundtrip( "largest negative integer", -9223372036854775808n, "-9223372036854775808", "string", ); }); }); const buf = new ArrayBuffer(256); const array = new Uint8Array(buf); for (let i = 0; i < 256; ++i) { array[i] = i ^ 0xab; } testRoundtrip("ArrayBuffer", buf, buf); testRoundtrip("Uint8Array", array, buf); testRoundtrip("null", null, null); testRoundtrip("true", true, 1n, "bigint"); testRoundtrip("false", false, 0n, "bigint"); testRoundtrip("true", true, 1, "number"); testRoundtrip("false", false, 0, "number"); testRoundtrip("true", true, "1", "string"); testRoundtrip("false", false, "0", "string"); testRoundtrip("true", true, 1); testRoundtrip("false", false, 0); testRoundtrip( "Date", new Date("2023-01-02T12:34:56Z"), 1672662896000, "bigint", ); // @ts-expect-error testRoundtripError("undefined produces error", undefined, TypeError); testRoundtripError("NaN produces error", NaN, RangeError); testRoundtripError("Infinity produces error", Infinity, RangeError); testRoundtripError( "large bigint produces error", -1267650600228229401496703205376n, RangeError, ); test( "max 64-bit bigint", withClient(async (c) => { const rs = await c.execute({ sql: "SELECT ?||''", args: [9223372036854775807n], }); expect(rs.rows[0][0]).toStrictEqual("9223372036854775807"); }), ); test( "min 64-bit bigint", withClient(async (c) => { const rs = await c.execute({ sql: "SELECT ?||''", args: [-9223372036854775808n], }); expect(rs.rows[0][0]).toStrictEqual("-9223372036854775808"); }), ); }); describe("ResultSet.toJSON()", () => { test( "simple result set", withClient(async (c) => { const rs = await c.execute("SELECT 1 AS a"); const json = rs.toJSON(); expect( json["lastInsertRowid"] === null || json["lastInsertRowid"] === "0", ).toBe(true); expect(json["columns"]).toStrictEqual(["a"]); expect(json["columnTypes"]).toStrictEqual([""]); expect(json["rows"]).toStrictEqual([[1]]); expect(json["rowsAffected"]).toStrictEqual(0); const str = JSON.stringify(rs); expect( str === '{"columns":["a"],"columnTypes":[""],"rows":[[1]],"rowsAffected":0,"lastInsertRowid":null}' || str === '{"columns":["a"],"columnTypes":[""],"rows":[[1]],"rowsAffected":0,"lastInsertRowid":"0"}', ).toBe(true); }), ); test( "lastInsertRowid", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t"); await c.execute("CREATE TABLE t (id INTEGER PRIMARY KEY NOT NULL)"); const rs = await c.execute("INSERT INTO t VALUES (12345)"); expect(rs.toJSON()).toStrictEqual({ columns: [], columnTypes: [], rows: [], rowsAffected: 1, lastInsertRowid: "12345", }); }), ); test( "computed values", withClient(async (c) => { const rs = await c.execute( "SELECT 42 AS integer, 0.5 AS float, NULL AS \"null\", 'foo' AS text, X'626172' AS blob", ); const json = rs.toJSON(); expect(json["columns"]).toStrictEqual([ "integer", "float", "null", "text", "blob", ]); expect(json["columnTypes"]).toStrictEqual(["", "", "", "", ""]); expect(json["rows"]).toStrictEqual([ [42, 0.5, null, "foo", "YmFy"], ]); }), ); (hasHrana2 ? test : test.skip)( "row values", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t"); await c.execute( "CREATE TABLE t (i INTEGER, f FLOAT, t TEXT, b BLOB)", ); await c.execute("INSERT INTO t VALUES (42, 0.5, 'foo', X'626172')"); const rs = await c.execute("SELECT i, f, t, b FROM t LIMIT 1"); const json = rs.toJSON(); expect(json["columns"]).toStrictEqual(["i", "f", "t", "b"]); expect(json["columnTypes"]).toStrictEqual([ "INTEGER", "FLOAT", "TEXT", "BLOB", ]); expect(json["rows"]).toStrictEqual([[42, 0.5, "foo", "YmFy"]]); }), ); test( "bigint row value", withClient( async (c) => { const rs = await c.execute("SELECT 42"); const json = rs.toJSON(); expect(json["rows"]).toStrictEqual([["42"]]); }, { intMode: "bigint" }, ), ); }); describe("arguments", () => { test( "? arguments", withClient(async (c) => { const rs = await c.execute({ sql: "SELECT ?, ?", args: ["one", "two"], }); expect(Array.from(rs.rows[0])).toStrictEqual(["one", "two"]); }), ); (!isFile ? test : test.skip)( "?NNN arguments", withClient(async (c) => { const rs = await c.execute({ sql: "SELECT ?2, ?3, ?1", args: ["one", "two", "three"], }); expect(Array.from(rs.rows[0])).toStrictEqual([ "two", "three", "one", ]); }), ); (!isFile ? test : test.skip)( "?NNN arguments with holes", withClient(async (c) => { const rs = await c.execute({ sql: "SELECT ?3, ?1", args: ["one", "two", "three"], }); expect(Array.from(rs.rows[0])).toStrictEqual(["three", "one"]); }), ); (!isFile ? test : test.skip)( "?NNN and ? arguments", withClient(async (c) => { const rs = await c.execute({ sql: "SELECT ?2, ?, ?3", args: ["one", "two", "three"], }); expect(Array.from(rs.rows[0])).toStrictEqual([ "two", "three", "three", ]); }), ); for (const sign of [":", "@", "$"]) { test( `${sign}AAAA arguments`, withClient(async (c) => { const rs = await c.execute({ sql: `SELECT ${sign}b, ${sign}a`, args: { a: "one", [`${sign}b`]: "two" }, }); expect(Array.from(rs.rows[0])).toStrictEqual(["two", "one"]); }), ); test( `${sign}AAAA arguments used multiple times`, withClient(async (c) => { const rs = await c.execute({ sql: `SELECT ${sign}b, ${sign}a, ${sign}b || ${sign}a`, args: { a: "one", [`${sign}b`]: "two" }, }); expect(Array.from(rs.rows[0])).toStrictEqual([ "two", "one", "twoone", ]); }), ); test( `${sign}AAAA arguments and ?NNN arguments`, withClient(async (c) => { const rs = await c.execute({ sql: `SELECT ${sign}b, ${sign}a, ?1`, args: { a: "one", [`${sign}b`]: "two" }, }); expect(Array.from(rs.rows[0])).toStrictEqual([ "two", "one", "two", ]); }), ); } }); describe("batch()", () => { test( "multiple queries", withClient(async (c) => { const rss = await c.batch( [ "SELECT 1+1", "SELECT 1 AS one, 2 AS two", { sql: "SELECT ?", args: ["boomerang"] }, { sql: "VALUES (?), (?)", args: ["big", "ben"] }, ], "read", ); expect(rss.length).toStrictEqual(4); const [rs0, rs1, rs2, rs3] = rss; expect(rs0.rows.length).toStrictEqual(1); expect(Array.from(rs0.rows[0])).toStrictEqual([2]); expect(rs1.rows.length).toStrictEqual(1); expect(Array.from(rs1.rows[0])).toStrictEqual([1, 2]); expect(rs2.rows.length).toStrictEqual(1); expect(Array.from(rs2.rows[0])).toStrictEqual(["boomerang"]); expect(rs3.rows.length).toStrictEqual(2); expect(Array.from(rs3.rows[0])).toStrictEqual(["big"]); expect(Array.from(rs3.rows[1])).toStrictEqual(["ben"]); }), ); test( "statements are executed sequentially", withClient(async (c) => { const rss = await c.batch( [ /* 0 */ "DROP TABLE IF EXISTS t", /* 1 */ "CREATE TABLE t (a, b)", /* 2 */ "INSERT INTO t VALUES (1, 'one')", /* 3 */ "SELECT * FROM t ORDER BY a", /* 4 */ "INSERT INTO t VALUES (2, 'two')", /* 5 */ "SELECT * FROM t ORDER BY a", /* 6 */ "DROP TABLE t", ], "write", ); expect(rss.length).toStrictEqual(7); expect(rss[3].rows).toEqual([{ a: 1, b: "one" }]); expect(rss[5].rows).toEqual([ { a: 1, b: "one" }, { a: 2, b: "two" }, ]); }), ); test( "statements are executed in a transaction", withClient(async (c) => { await c.batch( [ "DROP TABLE IF EXISTS t1", "DROP TABLE IF EXISTS t2", "CREATE TABLE t1 (a)", "CREATE TABLE t2 (a)", ], "write", ); const n = 100; const promises = [] as Array; for (let i = 0; i < n; ++i) { const ii = i; promises.push( (async () => { const rss = await c.batch( [ { sql: "INSERT INTO t1 VALUES (?)", args: [ii], }, { sql: "INSERT INTO t2 VALUES (?)", args: [ii * 10], }, "SELECT SUM(a) FROM t1", "SELECT SUM(a) FROM t2", ], "write", ); const sum1 = rss[2].rows[0][0] as number; const sum2 = rss[3].rows[0][0] as number; expect(sum2).toStrictEqual(sum1 * 10); })(), ); } await Promise.all(promises); const rs1 = await c.execute("SELECT SUM(a) FROM t1"); expect(rs1.rows[0][0]).toStrictEqual((n * (n - 1)) / 2); const rs2 = await c.execute("SELECT SUM(a) FROM t2"); expect(rs2.rows[0][0]).toStrictEqual(((n * (n - 1)) / 2) * 10); }), 10000, ); test( "error in batch", withClient(async (c) => { await expect( c.batch(["SELECT 1+1", "SELECT foobar"], "read"), ).rejects.toBeLibsqlError(); }), ); test( "error in batch rolls back transaction", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t"); await c.execute("CREATE TABLE t (a)"); await c.execute("INSERT INTO t VALUES ('one')"); await expect( c.batch( [ "INSERT INTO t VALUES ('two')", "SELECT foobar", "INSERT INTO t VALUES ('three')", ], "write", ), ).rejects.toBeLibsqlError(); const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toStrictEqual(1); }), ); test( "batch error reports statement index - error at index 0", withClient(async (c) => { try { await c.batch( ["SELECT invalid_column", "SELECT 1", "SELECT 2"], "read", ); throw new Error("Expected batch to fail"); } catch (e: any) { expect(e.name).toBe("LibsqlBatchError"); expect(e.statementIndex).toBe(0); expect(e.code).toBeDefined(); } }), ); test( "batch error reports statement index - error at index 1", withClient(async (c) => { try { await c.batch( ["SELECT 1", "SELECT invalid_column", "SELECT 2"], "read", ); throw new Error("Expected batch to fail"); } catch (e: any) { expect(e.name).toBe("LibsqlBatchError"); expect(e.statementIndex).toBe(1); expect(e.code).toBeDefined(); } }), ); test( "batch error reports statement index - error at index 2", withClient(async (c) => { try { await c.batch( ["SELECT 1", "SELECT 2", "SELECT invalid_column"], "read", ); throw new Error("Expected batch to fail"); } catch (e: any) { expect(e.name).toBe("LibsqlBatchError"); expect(e.statementIndex).toBe(2); expect(e.code).toBeDefined(); } }), ); test( "batch error with write mode reports statement index", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t"); await c.execute("CREATE TABLE t (a UNIQUE)"); await c.execute("INSERT INTO t VALUES (1)"); try { await c.batch( [ "INSERT INTO t VALUES (2)", "INSERT INTO t VALUES (3)", "INSERT INTO t VALUES (1)", // Duplicate, will fail "INSERT INTO t VALUES (4)", ], "write", ); throw new Error("Expected batch to fail"); } catch (e: any) { expect(e.name).toBe("LibsqlBatchError"); expect(e.statementIndex).toBe(2); expect(e.code).toBeDefined(); } // Verify rollback happened const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toBe(1); }), ); test( "batch error in in-memory database reports statement index", withInMemoryClient(async (c) => { await c.execute("CREATE TABLE t (a)"); try { await c.batch( [ "INSERT INTO t VALUES (1)", "SELECT invalid_column FROM t", "INSERT INTO t VALUES (2)", ], "write", ); throw new Error("Expected batch to fail"); } catch (e: any) { expect(e.name).toBe("LibsqlBatchError"); expect(e.statementIndex).toBe(1); expect(e.code).toBeDefined(); } }), ); test( "batch with a lot of different statements", withClient(async (c) => { const stmts = [] as Array; for (let i = 0; i < 1000; ++i) { stmts.push(`SELECT ${i}`); } const rss = await c.batch(stmts, "read"); for (let i = 0; i < stmts.length; ++i) { expect(rss[i].rows[0][0]).toStrictEqual(i); } }), ); test( "batch with a lot of the same statements", withClient(async (c) => { const n = 20; const m = 200; const stmts = [] as Array; for (let i = 0; i < n; ++i) { for (let j = 0; j < m; ++j) { stmts.push({ sql: `SELECT ?, ${j}`, args: [i] }); } } const rss = await c.batch(stmts, "read"); for (let i = 0; i < n; ++i) { for (let j = 0; j < m; ++j) { const rs = rss[i * m + j]; expect(rs.rows[0][0]).toStrictEqual(i); expect(rs.rows[0][1]).toStrictEqual(j); } } }), ); test( "deferred batch", withClient(async (c) => { const rss = await c.batch( [ "SELECT 1+1", "DROP TABLE IF EXISTS t", "CREATE TABLE t (a)", "INSERT INTO t VALUES (21) RETURNING 2*a", ], "deferred", ); expect(rss.length).toStrictEqual(4); const [rs0, _rs1, _rs2, rs3] = rss; expect(rs0.rows.length).toStrictEqual(1); expect(Array.from(rs0.rows[0])).toStrictEqual([2]); expect(rs3.rows.length).toStrictEqual(1); expect(Array.from(rs3.rows[0])).toStrictEqual([42]); }), ); (hasHrana3 ? test : test.skip)( "ROLLBACK statement stops execution of batch", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t"); await c.execute("CREATE TABLE t (a)"); await expect( c.batch( [ "INSERT INTO t VALUES (1), (2), (3)", "ROLLBACK", "INSERT INTO t VALUES (4), (5)", ], "write", ), ).rejects.toBeLibsqlError("TRANSACTION_CLOSED"); const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toStrictEqual(0); }), ); }); describe("transaction()", () => { test( "query multiple rows", withClient(async (c) => { const txn = await c.transaction("read"); const rs = await txn.execute( "VALUES (1, 'one'), (2, 'two'), (3, 'three')", ); expect(rs.columns.length).toStrictEqual(2); expect(rs.columnTypes.length).toStrictEqual(2); expect(rs.rows.length).toStrictEqual(3); expect(Array.from(rs.rows[0])).toStrictEqual([1, "one"]); expect(Array.from(rs.rows[1])).toStrictEqual([2, "two"]); expect(Array.from(rs.rows[2])).toStrictEqual([3, "three"]); txn.close(); }), ); test( "commit()", withClient(async (c) => { await c.batch( ["DROP TABLE IF EXISTS t", "CREATE TABLE t (a)"], "write", ); const txn = await c.transaction("write"); await txn.execute("INSERT INTO t VALUES ('one')"); await txn.execute("INSERT INTO t VALUES ('two')"); expect(txn.closed).toStrictEqual(false); await txn.commit(); expect(txn.closed).toStrictEqual(true); const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toStrictEqual(2); await expect(txn.execute("SELECT 1")).rejects.toBeLibsqlError( "TRANSACTION_CLOSED", ); }), ); test( "rollback()", withClient(async (c) => { await c.batch( ["DROP TABLE IF EXISTS t", "CREATE TABLE t (a)"], "write", ); const txn = await c.transaction("write"); await txn.execute("INSERT INTO t VALUES ('one')"); await txn.execute("INSERT INTO t VALUES ('two')"); expect(txn.closed).toStrictEqual(false); await txn.rollback(); expect(txn.closed).toStrictEqual(true); const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toStrictEqual(0); await expect(txn.execute("SELECT 1")).rejects.toBeLibsqlError( "TRANSACTION_CLOSED", ); }), ); test( "close()", withClient(async (c) => { await c.batch( ["DROP TABLE IF EXISTS t", "CREATE TABLE t (a)"], "write", ); const txn = await c.transaction("write"); await txn.execute("INSERT INTO t VALUES ('one')"); expect(txn.closed).toStrictEqual(false); txn.close(); expect(txn.closed).toStrictEqual(true); const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toStrictEqual(0); await expect(txn.execute("SELECT 1")).rejects.toBeLibsqlError( "TRANSACTION_CLOSED", ); }), ); test( "error does not rollback", withClient(async (c) => { await c.batch( ["DROP TABLE IF EXISTS t", "CREATE TABLE t (a)"], "write", ); const txn = await c.transaction("write"); await expect(txn.execute("SELECT foo")).rejects.toBeLibsqlError(); await txn.execute("INSERT INTO t VALUES ('one')"); await expect(txn.execute("SELECT bar")).rejects.toBeLibsqlError(); await txn.commit(); const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toStrictEqual(1); }), ); (hasHrana3 ? test : test.skip)( "ROLLBACK statement stops execution of transaction", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t"); await c.execute("CREATE TABLE t (a)"); const txn = await c.transaction("write"); const prom1 = txn.execute("INSERT INTO t VALUES (1), (2), (3)"); const prom2 = txn.execute("ROLLBACK"); const prom3 = txn.execute("INSERT INTO t VALUES (4), (5)"); await prom1; await prom2; await expect(prom3).rejects.toBeLibsqlError("TRANSACTION_CLOSED"); await expect(txn.commit()).rejects.toBeLibsqlError(); txn.close(); const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toStrictEqual(0); }), ); (hasHrana3 ? test : test.skip)( "OR ROLLBACK statement stops execution of transaction", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t"); await c.execute("CREATE TABLE t (a UNIQUE)"); const txn = await c.transaction("write"); const prom1 = txn.execute("INSERT INTO t VALUES (1), (2), (3)"); const prom2 = txn.execute("INSERT OR ROLLBACK INTO t VALUES (1)"); const prom3 = txn.execute("INSERT INTO t VALUES (4), (5)"); await prom1; await expect(prom2).rejects.toBeLibsqlError(); await expect(prom3).rejects.toBeLibsqlError("TRANSACTION_CLOSED"); await expect(txn.commit()).rejects.toBeLibsqlError(); txn.close(); const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toStrictEqual(0); }), ); (hasHrana3 ? test : test.skip)( "OR ROLLBACK as the first statement stops execution of transaction", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t"); await c.execute("CREATE TABLE t (a UNIQUE)"); await c.execute("INSERT INTO t VALUES (1), (2), (3)"); const txn = await c.transaction("write"); const prom1 = txn.execute("INSERT OR ROLLBACK INTO t VALUES (1)"); const prom2 = txn.execute("INSERT INTO t VALUES (4), (5)"); await expect(prom1).rejects.toBeLibsqlError(); await expect(prom2).rejects.toBeLibsqlError("TRANSACTION_CLOSED"); await expect(txn.commit()).rejects.toBeLibsqlError(); txn.close(); const rs = await c.execute("SELECT COUNT(*) FROM t"); expect(rs.rows[0][0]).toStrictEqual(3); }), ); test( "commit empty", withClient(async (c) => { const txn = await c.transaction("read"); await txn.commit(); }), ); test( "rollback empty", withClient(async (c) => { const txn = await c.transaction("read"); await txn.rollback(); }), ); describe("batch()", () => { test( "as the first operation on transaction", withClient(async (c) => { const txn = await c.transaction("write"); await txn.batch([ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a)", { sql: "INSERT INTO t VALUES (?)", args: [1] }, { sql: "INSERT INTO t VALUES (?)", args: [2] }, { sql: "INSERT INTO t VALUES (?)", args: [4] }, ]); const rs = await txn.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(7); txn.close(); }), ); test( "as the second operation on transaction", withClient(async (c) => { const txn = await c.transaction("write"); await txn.execute("DROP TABLE IF EXISTS t"); await txn.batch([ "CREATE TABLE t (a)", { sql: "INSERT INTO t VALUES (?)", args: [1] }, { sql: "INSERT INTO t VALUES (?)", args: [2] }, { sql: "INSERT INTO t VALUES (?)", args: [4] }, ]); const rs = await txn.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(7); txn.close(); }), ); test( "after error, further statements are not executed", withClient(async (c) => { const txn = await c.transaction("write"); await expect( txn.batch([ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a UNIQUE)", "INSERT INTO t VALUES (1), (2), (4)", "INSERT INTO t VALUES (1)", "INSERT INTO t VALUES (8), (16)", ]), ).rejects.toBeLibsqlError(); const rs = await txn.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(7); await txn.commit(); }), ); test( "batch error reports statement index in transaction", withClient(async (c) => { const txn = await c.transaction("write"); try { await txn.batch([ "DROP TABLE IF EXISTS t", "CREATE TABLE t (a UNIQUE)", "INSERT INTO t VALUES (1), (2), (3)", "INSERT INTO t VALUES (1)", // Duplicate, will fail at index 3 "INSERT INTO t VALUES (4), (5)", ]); throw new Error("Expected batch to fail"); } catch (e: any) { expect(e.name).toBe("LibsqlBatchError"); expect(e.statementIndex).toBe(3); expect(e.code).toBeDefined(); } // Transaction should still be usable after batch error const rs = await txn.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toBe(6); await txn.commit(); }), ); test( "batch error reports statement index - error at first statement in transaction", withClient(async (c) => { const txn = await c.transaction("read"); try { await txn.batch([ "SELECT invalid_column", "SELECT 1", "SELECT 2", ]); throw new Error("Expected batch to fail"); } catch (e: any) { expect(e.name).toBe("LibsqlBatchError"); expect(e.statementIndex).toBe(0); expect(e.code).toBeDefined(); } txn.close(); }), ); test( "batch error reports statement index - error at middle statement in transaction", withClient(async (c) => { const txn = await c.transaction("read"); try { await txn.batch([ "SELECT 1", "SELECT 2", "SELECT invalid_column", "SELECT 3", ]); throw new Error("Expected batch to fail"); } catch (e: any) { expect(e.name).toBe("LibsqlBatchError"); expect(e.statementIndex).toBe(2); expect(e.code).toBeDefined(); } txn.close(); }), ); }); (hasHrana2 ? describe : describe.skip)("executeMultiple()", () => { test( "as the first operation on transaction", withClient(async (c) => { const txn = await c.transaction("write"); await txn.executeMultiple(` DROP TABLE IF EXISTS t; CREATE TABLE t (a); INSERT INTO t VALUES (1), (2), (4), (8); `); const rs = await txn.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(15); txn.close(); }), ); test( "as the second operation on transaction", withClient(async (c) => { const txn = await c.transaction("write"); await txn.execute("DROP TABLE IF EXISTS t"); await txn.executeMultiple(` CREATE TABLE t (a); INSERT INTO t VALUES (1), (2), (4), (8); `); const rs = await txn.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(15); txn.close(); }), ); test( "after error, further statements are not executed", withClient(async (c) => { const txn = await c.transaction("write"); await expect( txn.executeMultiple(` DROP TABLE IF EXISTS t; CREATE TABLE t (a UNIQUE); INSERT INTO t VALUES (1), (2), (4); INSERT INTO t VALUES (1); INSERT INTO t VALUES (8), (16); `), ).rejects.toBeLibsqlError(); const rs = await txn.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(7); await txn.commit(); }), ); }); }); (hasHrana2 ? describe : describe.skip)("executeMultiple()", () => { test( "multiple statements", withClient(async (c) => { await c.executeMultiple(` DROP TABLE IF EXISTS t; CREATE TABLE t (a); INSERT INTO t VALUES (1), (2), (4), (8); `); const rs = await c.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(15); }), ); test( "after an error, statements are not executed", withClient(async (c) => { await expect( c.executeMultiple(` DROP TABLE IF EXISTS t; CREATE TABLE t (a); INSERT INTO t VALUES (1), (2), (4); INSERT INTO t VALUES (foo()); INSERT INTO t VALUES (8), (16); `), ).rejects.toBeLibsqlError(); const rs = await c.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(7); }), ); test( "manual transaction control statements", withClient(async (c) => { await c.executeMultiple(` DROP TABLE IF EXISTS t; CREATE TABLE t (a); BEGIN; INSERT INTO t VALUES (1), (2), (4); INSERT INTO t VALUES (8), (16); COMMIT; `); const rs = await c.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(31); }), ); test( "error rolls back a manual transaction", withClient(async (c) => { await expect( c.executeMultiple(` DROP TABLE IF EXISTS t; CREATE TABLE t (a); INSERT INTO t VALUES (0); BEGIN; INSERT INTO t VALUES (1), (2), (4); INSERT INTO t VALUES (foo()); INSERT INTO t VALUES (8), (16); COMMIT; `), ).rejects.toBeLibsqlError(); const rs = await c.execute("SELECT SUM(a) FROM t"); expect(rs.rows[0][0]).toStrictEqual(0); }), ); }); (hasNetworkErrors ? describe : describe.skip)("network errors", () => { const testCases = [ { title: "WebSocket close", sql: ".close_ws" }, { title: "TCP close", sql: ".close_tcp" }, ]; for (const { title, sql } of testCases) { test( `${title} in execute()`, withClient(async (c) => { await expect(c.execute(sql)).rejects.toBeLibsqlError( "HRANA_WEBSOCKET_ERROR", ); expect((await c.execute("SELECT 42")).rows[0][0]).toStrictEqual( 42, ); }), ); test( `${title} in transaction()`, withClient(async (c) => { const txn = await c.transaction("read"); await expect(txn.execute(sql)).rejects.toBeLibsqlError( "HRANA_WEBSOCKET_ERROR", ); await expect(txn.commit()).rejects.toBeLibsqlError( "TRANSACTION_CLOSED", ); txn.close(); expect((await c.execute("SELECT 42")).rows[0][0]).toStrictEqual( 42, ); }), ); test( `${title} in batch()`, withClient(async (c) => { await expect( c.batch(["SELECT 42", sql, "SELECT 24"], "read"), ).rejects.toBeLibsqlError("HRANA_WEBSOCKET_ERROR"); expect((await c.execute("SELECT 42")).rows[0][0]).toStrictEqual( 42, ); }), ); } }); (isHttp ? test : test.skip)("custom fetch", async () => { let fetchCalledCount = 0; function customFetch(request: Request): Promise { fetchCalledCount += 1; return fetch(request); } const c = createClient({ ...config, fetch: customFetch }); try { const rs = await c.execute("SELECT 42"); expect(rs.rows[0][0]).toStrictEqual(42); expect(fetchCalledCount).toBeGreaterThan(0); } finally { c.close(); } }); (isFile ? test : test.skip)("raw error codes", async () => { const c = createClient(config); try { await expect(c.execute("NOT A VALID SQL")).rejects.toThrow( expect.toBeLibsqlError({ code: "SQLITE_ERROR", rawCode: 1 }), ); } finally { c.close(); } }); // Test to verify constraint error codes // - code: base error code (e.g., SQLITE_CONSTRAINT) - consistent across local and remote // - extendedCode: extended error code (e.g., SQLITE_CONSTRAINT_PRIMARYKEY) - available when supported (server !== "test_v1" ? describe : describe.skip)( "constraint error codes", () => { test( "PRIMARY KEY constraint violation", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t_pk_test"); await c.execute( "CREATE TABLE t_pk_test (id INTEGER PRIMARY KEY, name TEXT)", ); await c.execute("INSERT INTO t_pk_test VALUES (1, 'first')"); try { await c.execute( "INSERT INTO t_pk_test VALUES (1, 'duplicate')", ); throw new Error("Expected PRIMARY KEY constraint error"); } catch (e: any) { expect(e.code).toBe("SQLITE_CONSTRAINT"); if (e.extendedCode !== undefined) { expect(e.extendedCode).toBe( "SQLITE_CONSTRAINT_PRIMARYKEY", ); } } }), ); test( "UNIQUE constraint violation", withClient(async (c) => { await c.execute("DROP TABLE IF EXISTS t_unique_test"); await c.execute( "CREATE TABLE t_unique_test (id INTEGER, name TEXT UNIQUE)", ); await c.execute( "INSERT INTO t_unique_test VALUES (1, 'unique_name')", ); try { await c.execute( "INSERT INTO t_unique_test VALUES (2, 'unique_name')", ); throw new Error("Expected UNIQUE constraint error"); } catch (e: any) { expect(e.code).toBe("SQLITE_CONSTRAINT"); if (e.extendedCode !== undefined) { expect(e.extendedCode).toBe("SQLITE_CONSTRAINT_UNIQUE"); } } }), ); }, ); (isSqld ? test : test.skip)("embedded replica test", async () => { const remote = createClient(config); const embedded = createClient({ ...config, url: "file:///tmp/local.db", syncUrl: config.url, }); await remote.execute("CREATE TABLE embedded(a)"); await embedded.sync(); let embedded1 = await embedded.execute("SELECT * FROM embedded"); expect(embedded1.columns).toStrictEqual(["a"]); expect(embedded1.rows.length).toStrictEqual(0); await remote.execute("INSERT INTO embedded VALUES (1), (2), (3)"); let embedded2 = await embedded.execute("SELECT * FROM embedded"); expect(embedded2.columns).toStrictEqual(["a"]); expect(embedded2.rows.length).toStrictEqual(0); let remote1 = await remote.execute("SELECT * FROM embedded"); expect(remote1.columns).toStrictEqual(["a"]); expect(remote1.rows.length).toStrictEqual(3); await embedded.sync(); let embedded3 = await embedded.execute("SELECT * FROM embedded"); expect(embedded3.columns).toStrictEqual(["a"]); expect(embedded3.rows.length).toStrictEqual(3); }); ================================================ FILE: packages/libsql-client/src/__tests__/config.test.ts ================================================ import { expect } from "@jest/globals"; import "./helpers.js"; import { expandConfig } from "@libsql/core/config"; import { IntMode } from "@libsql/hrana-client"; describe("expandConfig - default tls values", () => { const cases = [ { name: "file", preferHttp: true, config: { url: "file://local.db" }, tls: true, }, { name: "http", preferHttp: true, config: { url: "http://localhost" }, tls: false, }, { name: "http (tls in config)", preferHttp: true, config: { url: "http://localhost", tls: true }, tls: true, }, { name: "http (tls in query)", preferHttp: true, config: { url: "http://localhost?tls=1", tls: false }, tls: true, }, { name: "http (no tls in query)", preferHttp: true, config: { url: "http://localhost?tls=0", tls: true }, tls: false, }, { name: "http (no tls in query)", preferHttp: true, config: { url: "http://localhost?tls=0", tls: true }, tls: false, }, ]; for (const { name, config, preferHttp, tls } of cases) { test(name, () => { expect(expandConfig(config, preferHttp).tls).toEqual(tls); }); } }); describe("expandConfig - invalid arguments", () => { const cases = [ { name: "in-memory with unsupported query params", config: { url: "file::memory:?mode=memory" }, error: 'Unsupported URL query parameter "mode"', }, { name: "in-memory with tls param", config: { url: "file::memory:?tls=0" }, error: 'Unsupported URL query parameter "tls"', }, { name: "in-memory with authToken param", config: { url: "file::memory:?authToken=0" }, error: 'Unsupported URL query parameter "authToken"', }, { name: "invalid tls param value", config: { url: "libsql://localhost?tls=2" }, error: 'Unknown value for the "tls" query argument: "2". Supported values are: ["0", "1"]', }, { name: "invalid scheme", config: { url: "ftp://localhost" }, error: /The client supports only.*got "ftp:"/g, }, { name: "invalid intMode", config: { url: "file://localhost", intMode: "decimal" as IntMode }, error: /Invalid value for intMode.*got "decimal"/g, }, { name: "fragment in uri", config: { url: "file://localhost#fragment" }, error: "URL fragments are not supported", }, { name: "libsql, no tls, no port", config: { url: "libsql://localhost?tls=0" }, error: "must specify an explicit port", }, ]; for (const { name, config, error } of cases) { test(name, () => { try { expandConfig(config, false); throw new Error("expand command must fail"); } catch (e: any) { expect(e.message).toMatch(error); } }); } }); describe("expandConfig - parsing of valid arguments", () => { const cases = [ { name: "in-memory", config: { url: ":memory:" }, expanded: { scheme: "file", tls: false, intMode: "number", path: ":memory:", concurrency: 20, }, }, { name: "in-memory with params", config: { url: "file::memory:?cache=shared" }, expanded: { scheme: "file", tls: false, intMode: "number", path: ":memory:?cache=shared", concurrency: 20, }, }, { name: "simple local file", config: { url: "file://local.db" }, expanded: { scheme: "file", authority: { host: "local.db" }, tls: true, intMode: "number", path: "", concurrency: 20, }, }, { name: "wss with path & port", config: { url: "wss://localhost:8888/libsql/connect" }, expanded: { scheme: "wss", authority: { host: "localhost", port: 8888 }, tls: true, intMode: "number", path: "/libsql/connect", concurrency: 20, }, }, { name: "wss with user info", config: { url: "wss://user:password@localhost:8888/libsql/connect", concurrency: 20, }, expanded: { scheme: "wss", authority: { host: "localhost", port: 8888, userinfo: { username: "user", password: "password" }, }, tls: true, intMode: "number", path: "/libsql/connect", concurrency: 20, }, }, { name: "override tls=0", config: { url: "wss://localhost/libsql/connect?tls=0", tls: true }, expanded: { scheme: "wss", authority: { host: "localhost" }, tls: false, intMode: "number", path: "/libsql/connect", concurrency: 20, }, }, { name: "override tls=1", config: { url: "wss://localhost/libsql/connect?tls=1", tls: false }, expanded: { scheme: "wss", authority: { host: "localhost" }, tls: true, intMode: "number", path: "/libsql/connect", concurrency: 20, }, }, { name: "override auth token", config: { url: "wss://localhost/libsql/connect?authToken=new", authToken: "old", }, expanded: { authToken: "new", scheme: "wss", authority: { host: "localhost" }, tls: true, intMode: "number", path: "/libsql/connect", concurrency: 20, }, }, ]; for (const { name, config, expanded } of cases) { test(name, () => { expect(expandConfig(config, false)).toEqual(expanded); }); } }); ================================================ FILE: packages/libsql-client/src/__tests__/helpers.ts ================================================ import { expect } from "@jest/globals"; import type { MatcherFunction } from "expect"; import { LibsqlError } from "../node.js"; type CodeMatch = { code: string; rawCode: number; }; const toBeLibsqlError: MatcherFunction< [code?: string | CodeMatch, message?: RegExp] > = function (actual, code?, messageRe?) { const pass = actual instanceof LibsqlError && isValidCode(actual, code) && (messageRe === undefined || actual.message.match(messageRe) !== null); const message = (): string => { const parts = []; parts.push("expected "); parts.push(this.utils.printReceived(actual)); parts.push(pass ? " not to be " : " to be "); parts.push("an instance of LibsqlError"); if (code !== undefined) { parts.push(" with error code "); parts.push(this.utils.printExpected(code)); } if (messageRe !== undefined) { parts.push(" with error message matching "); parts.push(this.utils.printExpected(messageRe)); } return parts.join(""); }; return { pass, message }; }; const isValidCode = (error: LibsqlError, code?: string | CodeMatch) => { if (code === undefined) { return true; } if (typeof code === "string") { return error.code === code; } return error.code === code.code && error.rawCode === code.rawCode; }; expect.extend({ toBeLibsqlError }); declare module "expect" { interface AsymmetricMatchers { toBeLibsqlError(code?: string | CodeMatch, messageRe?: RegExp): void; } interface Matchers { toBeLibsqlError(code?: string | CodeMatch, messageRe?: RegExp): R; } } ================================================ FILE: packages/libsql-client/src/__tests__/mocks/handlers.ts ================================================ import { http, HttpResponse } from "msw"; export const handlers = [ http.get("http://fake-base-url.example.com/v1/jobs", () => { return HttpResponse.json({ schema_version: 4, migrations: [ { job_id: 4, status: "WaitingDryRun" }, { job_id: 3, status: "RunSuccess" }, { job_id: 2, status: "RunSuccess" }, { job_id: 1, status: "RunSuccess" }, ], }); }), http.get( "http://fake-base-url.example.com/v1/jobs/:job_id", ({ params }) => { const { job_id } = params; return HttpResponse.json({ job_id, status: "RunSuccess", progress: [ { namespace: "b2ab4a64-402c-4bdf-a1e8-27ef33518cbd", status: "RunSuccess", error: null, }, ], }); }, ), ]; ================================================ FILE: packages/libsql-client/src/__tests__/mocks/node.ts ================================================ import { setupServer } from "msw/node"; import { handlers } from "./handlers"; export const server = setupServer(...handlers); ================================================ FILE: packages/libsql-client/src/__tests__/uri.test.ts ================================================ import { expect } from "@jest/globals"; import "./helpers.js"; import { parseUri, encodeBaseUrl } from "@libsql/core/uri"; describe("parseUri()", () => { test(":memory: uri", () => { const cases = [ { text: "file::memory:", path: ":memory:", query: undefined }, { text: "file::memory:?cache=shared", path: ":memory:", query: { pairs: [{ key: "cache", value: "shared" }] }, }, ]; for (const { text, path, query } of cases) { expect(parseUri(text)).toEqual({ scheme: "file", path, query }); } }); test("authority and path", () => { const cases = [ { text: "file://localhost", path: "" }, { text: "file://localhost/", path: "/" }, { text: "file://localhost/absolute/path", path: "/absolute/path" }, { text: "file://localhost/k%C5%AF%C5%88", path: "/kůň" }, ]; for (const { text, path } of cases) { expect(parseUri(text)).toEqual({ scheme: "file", authority: { host: "localhost" }, path, }); } }); test("empty authority and path", () => { const cases = [ { text: "file:///absolute/path", path: "/absolute/path" }, { text: "file://", path: "" }, { text: "file:///k%C5%AF%C5%88", path: "/kůň" }, ]; for (const { text, path } of cases) { expect(parseUri(text)).toEqual({ scheme: "file", authority: { host: "" }, path, }); } }); test("no authority and path", () => { const cases = [ { text: "file:/absolute/path", path: "/absolute/path" }, { text: "file:relative/path", path: "relative/path" }, { text: "file:", path: "" }, { text: "file:C:/path/to/file", path: "C:/path/to/file" }, { text: "file:k%C5%AF%C5%88", path: "kůň" }, ]; for (const { text, path } of cases) { expect(parseUri(text)).toEqual({ scheme: "file", path, }); } }); test("authority", () => { const hosts = [ { text: "localhost", host: "localhost" }, { text: "domain.name", host: "domain.name" }, { text: "some$weird.%20!name", host: "some$weird. !name" }, { text: "1.20.255.99", host: "1.20.255.99" }, { text: "[2001:4860:4802:32::a]", host: "2001:4860:4802:32::a" }, { text: "%61", host: "a" }, { text: "100%2e100%2e100%2e100", host: "100.100.100.100" }, { text: "k%C5%AF%C5%88", host: "kůň" }, ]; const ports = [ { text: "", port: undefined }, { text: ":", port: undefined }, { text: ":0", port: 0 }, { text: ":99", port: 99 }, { text: ":65535", port: 65535 }, ]; const userinfos = [ { text: "", userinfo: undefined }, { text: "@", userinfo: { username: "" } }, { text: "alice@", userinfo: { username: "alice" } }, { text: "alice:secret@", userinfo: { username: "alice", password: "secret" }, }, { text: "alice:sec:et@", userinfo: { username: "alice", password: "sec:et" }, }, { text: "alice%3Asecret@", userinfo: { username: "alice:secret" } }, { text: "alice:s%65cret@", userinfo: { username: "alice", password: "secret" }, }, ]; for (const { text: hostText, host } of hosts) { for (const { text: portText, port } of ports) { for (const { text: userText, userinfo } of userinfos) { const text = `http://${userText}${hostText}${portText}`; expect(parseUri(text)).toEqual({ scheme: "http", authority: { host, port, userinfo }, path: "", }); } } } }); test("query", () => { const cases = [ { text: "?", pairs: [] }, { text: "?key=value", pairs: [{ key: "key", value: "value" }] }, { text: "?&key=value", pairs: [{ key: "key", value: "value" }] }, { text: "?key=value&&", pairs: [{ key: "key", value: "value" }] }, { text: "?a", pairs: [{ key: "a", value: "" }] }, { text: "?a=", pairs: [{ key: "a", value: "" }] }, { text: "?=a", pairs: [{ key: "", value: "a" }] }, { text: "?=", pairs: [{ key: "", value: "" }] }, { text: "?a=b=c", pairs: [{ key: "a", value: "b=c" }] }, { text: "?a=b&c=d", pairs: [ { key: "a", value: "b" }, { key: "c", value: "d" }, ], }, { text: "?a+b=c", pairs: [{ key: "a b", value: "c" }] }, { text: "?a=b+c", pairs: [{ key: "a", value: "b c" }] }, { text: "?a?b", pairs: [{ key: "a?b", value: "" }] }, { text: "?%61=%62", pairs: [{ key: "a", value: "b" }] }, { text: "?a%3db", pairs: [{ key: "a=b", value: "" }] }, { text: "?a=%2b", pairs: [{ key: "a", value: "+" }] }, { text: "?%2b=b", pairs: [{ key: "+", value: "b" }] }, { text: "?a=b%26c", pairs: [{ key: "a", value: "b&c" }] }, { text: "?a=k%C5%AF%C5%88", pairs: [{ key: "a", value: "kůň" }] }, ]; for (const { text: queryText, pairs } of cases) { const text = `file:${queryText}`; expect(parseUri(text)).toEqual({ scheme: "file", path: "", query: { pairs }, }); } }); test("fragment", () => { const cases = [ { text: "", fragment: undefined }, { text: "#a", fragment: "a" }, { text: "#a?b", fragment: "a?b" }, { text: "#%61", fragment: "a" }, { text: "#k%C5%AF%C5%88", fragment: "kůň" }, ]; for (const { text: fragmentText, fragment } of cases) { const text = `file:${fragmentText}`; expect(parseUri(text)).toEqual({ scheme: "file", path: "", fragment, }); } }); test("parse errors", () => { const cases = [ { text: "", message: /format/ }, { text: "foo", message: /format/ }, { text: "foo.bar.com", message: /format/ }, { text: "h$$p://localhost", message: /format/ }, { text: "h%74%74p://localhost", message: /format/ }, { text: "http://localhost:%38%38", message: /authority/ }, { text: "file:k%C5%C5%88", message: /percent encoding/ }, ]; for (const { text, message } of cases) { expect(() => parseUri(text)).toThrow( expect.toBeLibsqlError("URL_INVALID", message), ); } }); }); test("encodeBaseUrl()", () => { const cases = [ { scheme: "http", host: "localhost", path: "", url: "http://localhost", }, { scheme: "http", host: "localhost", path: "/", url: "http://localhost/", }, { scheme: "http", host: "localhost", port: 8080, path: "", url: "http://localhost:8080", }, { scheme: "http", host: "localhost", path: "/foo/bar", url: "http://localhost/foo/bar", }, { scheme: "http", host: "localhost", path: "foo/bar", url: "http://localhost/foo/bar", }, { scheme: "http", host: "some.long.domain.name", path: "", url: "http://some.long.domain.name", }, { scheme: "http", host: "1.2.3.4", path: "", url: "http://1.2.3.4", }, { scheme: "http", host: "2001:4860:4802:32::a", path: "", url: "http://[2001:4860:4802:32::a]", }, { scheme: "http", host: "localhost", userinfo: { username: "alice", password: undefined }, path: "", url: "http://alice@localhost", }, { scheme: "http", host: "localhost", userinfo: { username: "alice", password: "secr:t" }, path: "", url: "http://alice:secr%3At@localhost", }, { scheme: "https", host: "localhost", userinfo: { username: "alice", password: "secret" }, port: 8080, path: "/some/path", url: "https://alice:secret@localhost:8080/some/path", }, ]; for (const { scheme, host, port, userinfo, path, url } of cases) { expect( encodeBaseUrl(scheme, { host, port, userinfo }, path), ).toStrictEqual(new URL(url)); } }); ================================================ FILE: packages/libsql-client/src/hrana.ts ================================================ import * as hrana from "@libsql/hrana-client"; import type { InStatement, ResultSet, Transaction, TransactionMode, InArgs, } from "@libsql/core/api"; import { LibsqlError, LibsqlBatchError } from "@libsql/core/api"; import type { SqlCache } from "./sql_cache.js"; import { transactionModeToBegin, ResultSetImpl } from "@libsql/core/util"; export abstract class HranaTransaction implements Transaction { #mode: TransactionMode; #version: hrana.ProtocolVersion; // Promise that is resolved when the BEGIN statement completes, or `undefined` if we haven't executed the // BEGIN statement yet. #started: Promise | undefined; /** @private */ constructor(mode: TransactionMode, version: hrana.ProtocolVersion) { this.#mode = mode; this.#version = version; this.#started = undefined; } /** @private */ abstract _getStream(): hrana.Stream; /** @private */ abstract _getSqlCache(): SqlCache; abstract close(): void; abstract get closed(): boolean; execute(stmt: InStatement): Promise { return this.batch([stmt]).then((results) => results[0]); } async batch(stmts: Array): Promise> { const stream = this._getStream(); if (stream.closed) { throw new LibsqlError( "Cannot execute statements because the transaction is closed", "TRANSACTION_CLOSED", ); } try { const hranaStmts = stmts.map(stmtToHrana); let rowsPromises: Array>; if (this.#started === undefined) { // The transaction hasn't started yet, so we need to send the BEGIN statement in a batch with // `hranaStmts`. this._getSqlCache().apply(hranaStmts); const batch = stream.batch(this.#version >= 3); const beginStep = batch.step(); const beginPromise = beginStep.run( transactionModeToBegin(this.#mode), ); // Execute the `hranaStmts` only if the BEGIN succeeded, to make sure that we don't execute it // outside of a transaction. let lastStep = beginStep; rowsPromises = hranaStmts.map((hranaStmt) => { const stmtStep = batch .step() .condition(hrana.BatchCond.ok(lastStep)); if (this.#version >= 3) { // If the Hrana version supports it, make sure that we are still in a transaction stmtStep.condition( hrana.BatchCond.not( hrana.BatchCond.isAutocommit(batch), ), ); } const rowsPromise = stmtStep.query(hranaStmt); rowsPromise.catch(() => undefined); // silence Node warning lastStep = stmtStep; return rowsPromise; }); // `this.#started` is resolved successfully only if the batch and the BEGIN statement inside // of the batch are both successful. this.#started = batch .execute() .then(() => beginPromise) .then(() => undefined); try { await this.#started; } catch (e) { // If the BEGIN failed, the transaction is unusable and we must close it. However, if the // BEGIN suceeds and `hranaStmts` fail, the transaction is _not_ closed. this.close(); throw e; } } else { if (this.#version < 3) { // The transaction has started, so we must wait until the BEGIN statement completed to make // sure that we don't execute `hranaStmts` outside of a transaction. await this.#started; } else { // The transaction has started, but we will use `hrana.BatchCond.isAutocommit()` to make // sure that we don't execute `hranaStmts` outside of a transaction, so we don't have to // wait for `this.#started` } this._getSqlCache().apply(hranaStmts); const batch = stream.batch(this.#version >= 3); let lastStep: hrana.BatchStep | undefined = undefined; rowsPromises = hranaStmts.map((hranaStmt) => { const stmtStep = batch.step(); if (lastStep !== undefined) { stmtStep.condition(hrana.BatchCond.ok(lastStep)); } if (this.#version >= 3) { stmtStep.condition( hrana.BatchCond.not( hrana.BatchCond.isAutocommit(batch), ), ); } const rowsPromise = stmtStep.query(hranaStmt); rowsPromise.catch(() => undefined); // silence Node warning lastStep = stmtStep; return rowsPromise; }); await batch.execute(); } const resultSets = []; for (let i = 0; i < rowsPromises.length; i++) { try { const rows = await rowsPromises[i]; if (rows === undefined) { throw new LibsqlBatchError( "Statement in a transaction was not executed, " + "probably because the transaction has been rolled back", i, "TRANSACTION_CLOSED", ); } resultSets.push(resultSetFromHrana(rows)); } catch (e) { if (e instanceof LibsqlBatchError) { throw e; } // Map hrana errors to LibsqlError first, then wrap in LibsqlBatchError const mappedError = mapHranaError(e); if (mappedError instanceof LibsqlError) { throw new LibsqlBatchError( mappedError.message, i, mappedError.code, mappedError.extendedCode, mappedError.rawCode, mappedError.cause instanceof Error ? mappedError.cause : undefined, ); } throw mappedError; } } return resultSets; } catch (e) { throw mapHranaError(e); } } async executeMultiple(sql: string): Promise { const stream = this._getStream(); if (stream.closed) { throw new LibsqlError( "Cannot execute statements because the transaction is closed", "TRANSACTION_CLOSED", ); } try { if (this.#started === undefined) { // If the transaction hasn't started yet, start it now this.#started = stream .run(transactionModeToBegin(this.#mode)) .then(() => undefined); try { await this.#started; } catch (e) { this.close(); throw e; } } else { // Wait until the transaction has started await this.#started; } await stream.sequence(sql); } catch (e) { throw mapHranaError(e); } } async rollback(): Promise { try { const stream = this._getStream(); if (stream.closed) { return; } if (this.#started !== undefined) { // We don't have to wait for the BEGIN statement to complete. If the BEGIN fails, we will // execute a ROLLBACK outside of an active transaction, which should be harmless. } else { // We did nothing in the transaction, so there is nothing to rollback. return; } // Pipeline the ROLLBACK statement and the stream close. const promise = stream.run("ROLLBACK").catch((e) => { throw mapHranaError(e); }); stream.closeGracefully(); await promise; } catch (e) { throw mapHranaError(e); } finally { // `this.close()` may close the `hrana.Client`, which aborts all pending stream requests, so we // must call it _after_ we receive the ROLLBACK response. // Also note that the current stream should already be closed, but we need to call `this.close()` // anyway, because it may need to do more cleanup. this.close(); } } async commit(): Promise { // (this method is analogous to `rollback()`) try { const stream = this._getStream(); if (stream.closed) { throw new LibsqlError( "Cannot commit the transaction because it is already closed", "TRANSACTION_CLOSED", ); } if (this.#started !== undefined) { // Make sure to execute the COMMIT only if the BEGIN was successful. await this.#started; } else { return; } const promise = stream.run("COMMIT").catch((e) => { throw mapHranaError(e); }); stream.closeGracefully(); await promise; } catch (e) { throw mapHranaError(e); } finally { this.close(); } } } export async function executeHranaBatch( mode: TransactionMode, version: hrana.ProtocolVersion, batch: hrana.Batch, hranaStmts: Array, disableForeignKeys: boolean = false, ): Promise> { if (disableForeignKeys) { batch.step().run("PRAGMA foreign_keys=off"); } const beginStep = batch.step(); const beginPromise = beginStep.run(transactionModeToBegin(mode)); let lastStep = beginStep; const stmtPromises = hranaStmts.map((hranaStmt) => { const stmtStep = batch.step().condition(hrana.BatchCond.ok(lastStep)); if (version >= 3) { stmtStep.condition( hrana.BatchCond.not(hrana.BatchCond.isAutocommit(batch)), ); } const stmtPromise = stmtStep.query(hranaStmt); lastStep = stmtStep; return stmtPromise; }); const commitStep = batch.step().condition(hrana.BatchCond.ok(lastStep)); if (version >= 3) { commitStep.condition( hrana.BatchCond.not(hrana.BatchCond.isAutocommit(batch)), ); } const commitPromise = commitStep.run("COMMIT"); const rollbackStep = batch .step() .condition(hrana.BatchCond.not(hrana.BatchCond.ok(commitStep))); rollbackStep.run("ROLLBACK").catch((_) => undefined); if (disableForeignKeys) { batch.step().run("PRAGMA foreign_keys=on"); } await batch.execute(); const resultSets = []; await beginPromise; for (let i = 0; i < stmtPromises.length; i++) { try { const hranaRows = await stmtPromises[i]; if (hranaRows === undefined) { throw new LibsqlBatchError( "Statement in a batch was not executed, probably because the transaction has been rolled back", i, "TRANSACTION_CLOSED", ); } resultSets.push(resultSetFromHrana(hranaRows)); } catch (e) { if (e instanceof LibsqlBatchError) { throw e; } // Map hrana errors to LibsqlError first, then wrap in LibsqlBatchError const mappedError = mapHranaError(e); if (mappedError instanceof LibsqlError) { throw new LibsqlBatchError( mappedError.message, i, mappedError.code, mappedError.extendedCode, mappedError.rawCode, mappedError.cause instanceof Error ? mappedError.cause : undefined, ); } throw mappedError; } } await commitPromise; return resultSets; } export function stmtToHrana(stmt: InStatement | [string, InArgs?]): hrana.Stmt { let sql: string; let args: InArgs | undefined; if (Array.isArray(stmt)) { [sql, args] = stmt; } else if (typeof stmt === "string") { sql = stmt; } else { sql = stmt.sql; args = stmt.args; } const hranaStmt = new hrana.Stmt(sql); if (args) { if (Array.isArray(args)) { hranaStmt.bindIndexes(args); } else { for (const [key, value] of Object.entries(args)) { hranaStmt.bindName(key, value); } } } return hranaStmt; } export function resultSetFromHrana(hranaRows: hrana.RowsResult): ResultSet { const columns = hranaRows.columnNames.map((c) => c ?? ""); const columnTypes = hranaRows.columnDecltypes.map((c) => c ?? ""); const rows = hranaRows.rows; const rowsAffected = hranaRows.affectedRowCount; const lastInsertRowid = hranaRows.lastInsertRowid !== undefined ? hranaRows.lastInsertRowid : undefined; return new ResultSetImpl( columns, columnTypes, rows, rowsAffected, lastInsertRowid, ); } export function mapHranaError(e: unknown): unknown { if (e instanceof hrana.ClientError) { const code = mapHranaErrorCode(e); // TODO: Parse extendedCode once the SQL over HTTP protocol supports it return new LibsqlError(e.message, code, undefined, undefined, e); } return e; } function mapHranaErrorCode(e: hrana.ClientError): string { if (e instanceof hrana.ResponseError && e.code !== undefined) { return e.code; } else if (e instanceof hrana.ProtoError) { return "HRANA_PROTO_ERROR"; } else if (e instanceof hrana.ClosedError) { return e.cause instanceof hrana.ClientError ? mapHranaErrorCode(e.cause) : "HRANA_CLOSED_ERROR"; } else if (e instanceof hrana.WebSocketError) { return "HRANA_WEBSOCKET_ERROR"; } else if (e instanceof hrana.HttpServerError) { return "SERVER_ERROR"; } else if (e instanceof hrana.ProtocolVersionError) { return "PROTOCOL_VERSION_ERROR"; } else if (e instanceof hrana.InternalError) { return "INTERNAL_ERROR"; } else { return "UNKNOWN"; } } ================================================ FILE: packages/libsql-client/src/http.ts ================================================ import * as hrana from "@libsql/hrana-client"; import type { Config, Client } from "@libsql/core/api"; import type { InStatement, ResultSet, Transaction, IntMode, InArgs, Replicated, } from "@libsql/core/api"; import { TransactionMode, LibsqlError } from "@libsql/core/api"; import type { ExpandedConfig } from "@libsql/core/config"; import { expandConfig } from "@libsql/core/config"; import { HranaTransaction, executeHranaBatch, stmtToHrana, resultSetFromHrana, mapHranaError, } from "./hrana.js"; import { SqlCache } from "./sql_cache.js"; import { encodeBaseUrl } from "@libsql/core/uri"; import { supportedUrlLink } from "@libsql/core/util"; import promiseLimit from "promise-limit"; export * from "@libsql/core/api"; export function createClient(config: Config): Client { return _createClient(expandConfig(config, true)); } /** @private */ export function _createClient(config: ExpandedConfig): Client { if (config.scheme !== "https" && config.scheme !== "http") { throw new LibsqlError( 'The HTTP client supports only "libsql:", "https:" and "http:" URLs, ' + `got ${JSON.stringify(config.scheme + ":")}. For more information, please read ${supportedUrlLink}`, "URL_SCHEME_NOT_SUPPORTED", ); } if (config.encryptionKey !== undefined) { throw new LibsqlError( "Encryption key is not supported by the remote client.", "ENCRYPTION_KEY_NOT_SUPPORTED", ); } if (config.scheme === "http" && config.tls) { throw new LibsqlError( `A "http:" URL cannot opt into TLS by using ?tls=1`, "URL_INVALID", ); } else if (config.scheme === "https" && !config.tls) { throw new LibsqlError( `A "https:" URL cannot opt out of TLS by using ?tls=0`, "URL_INVALID", ); } const url = encodeBaseUrl(config.scheme, config.authority, config.path); return new HttpClient( url, config.authToken, config.intMode, config.fetch, config.concurrency, config.remoteEncryptionKey, ); } const sqlCacheCapacity = 30; export class HttpClient implements Client { #client: hrana.HttpClient; protocol: "http"; #url: URL; #intMode: IntMode; #customFetch: Function | undefined; #concurrency: number; #authToken: string | undefined; #remoteEncryptionKey: string | undefined; #promiseLimitFunction: ReturnType>; /** @private */ constructor( url: URL, authToken: string | undefined, intMode: IntMode, customFetch: Function | undefined, concurrency: number, remoteEncryptionKey: string | undefined, ) { this.#url = url; this.#authToken = authToken; this.#intMode = intMode; this.#customFetch = customFetch; this.#concurrency = concurrency; this.#remoteEncryptionKey = remoteEncryptionKey; this.#client = hrana.openHttp( this.#url, this.#authToken, this.#customFetch, remoteEncryptionKey, ); this.#client.intMode = this.#intMode; this.protocol = "http"; this.#promiseLimitFunction = promiseLimit(this.#concurrency); } private async limit(fn: () => Promise): Promise { return this.#promiseLimitFunction(fn); } async execute( stmtOrSql: InStatement | string, args?: InArgs, ): Promise { let stmt: InStatement; if (typeof stmtOrSql === "string") { stmt = { sql: stmtOrSql, args: args || [], }; } else { stmt = stmtOrSql; } return this.limit(async () => { try { const hranaStmt = stmtToHrana(stmt); // Pipeline all operations, so `hrana.HttpClient` can open the stream, execute the statement and // close the stream in a single HTTP request. let rowsPromise: Promise; const stream = this.#client.openStream(); try { rowsPromise = stream.query(hranaStmt); } finally { stream.closeGracefully(); } const rowsResult = await rowsPromise; return resultSetFromHrana(rowsResult); } catch (e) { throw mapHranaError(e); } }); } async batch( stmts: Array, mode: TransactionMode = "deferred", ): Promise> { return this.limit>(async () => { try { const normalizedStmts = stmts.map((stmt) => { if (Array.isArray(stmt)) { return { sql: stmt[0], args: stmt[1] || [], }; } return stmt; }); const hranaStmts = normalizedStmts.map(stmtToHrana); const version = await this.#client.getVersion(); // Pipeline all operations, so `hrana.HttpClient` can open the stream, execute the batch and // close the stream in a single HTTP request. let resultsPromise: Promise>; const stream = this.#client.openStream(); try { // It makes sense to use a SQL cache even for a single batch, because it may contain the same // statement repeated multiple times. const sqlCache = new SqlCache(stream, sqlCacheCapacity); sqlCache.apply(hranaStmts); // TODO: we do not use a cursor here, because it would cause three roundtrips: // 1. pipeline request to store SQL texts // 2. cursor request // 3. pipeline request to close the stream const batch = stream.batch(false); resultsPromise = executeHranaBatch( mode, version, batch, hranaStmts, ); } finally { stream.closeGracefully(); } const results = await resultsPromise; return results; } catch (e) { throw mapHranaError(e); } }); } async migrate(stmts: Array): Promise> { return this.limit>(async () => { try { const hranaStmts = stmts.map(stmtToHrana); const version = await this.#client.getVersion(); // Pipeline all operations, so `hrana.HttpClient` can open the stream, execute the batch and // close the stream in a single HTTP request. let resultsPromise: Promise>; const stream = this.#client.openStream(); try { const batch = stream.batch(false); resultsPromise = executeHranaBatch( "deferred", version, batch, hranaStmts, true, ); } finally { stream.closeGracefully(); } const results = await resultsPromise; return results; } catch (e) { throw mapHranaError(e); } }); } async transaction( mode: TransactionMode = "write", ): Promise { return this.limit(async () => { try { const version = await this.#client.getVersion(); return new HttpTransaction( this.#client.openStream(), mode, version, ); } catch (e) { throw mapHranaError(e); } }); } async executeMultiple(sql: string): Promise { return this.limit(async () => { try { // Pipeline all operations, so `hrana.HttpClient` can open the stream, execute the sequence and // close the stream in a single HTTP request. let promise: Promise; const stream = this.#client.openStream(); try { promise = stream.sequence(sql); } finally { stream.closeGracefully(); } await promise; } catch (e) { throw mapHranaError(e); } }); } sync(): Promise { throw new LibsqlError( "sync not supported in http mode", "SYNC_NOT_SUPPORTED", ); } close(): void { this.#client.close(); } async reconnect(): Promise { try { if (!this.closed) { // Abort in-flight ops and free resources this.#client.close(); } } finally { // Recreate the underlying hrana client this.#client = hrana.openHttp( this.#url, this.#authToken, this.#customFetch, this.#remoteEncryptionKey, ); this.#client.intMode = this.#intMode; } } get closed(): boolean { return this.#client.closed; } } export class HttpTransaction extends HranaTransaction implements Transaction { #stream: hrana.HttpStream; #sqlCache: SqlCache; /** @private */ constructor( stream: hrana.HttpStream, mode: TransactionMode, version: hrana.ProtocolVersion, ) { super(mode, version); this.#stream = stream; this.#sqlCache = new SqlCache(stream, sqlCacheCapacity); } /** @private */ override _getStream(): hrana.Stream { return this.#stream; } /** @private */ override _getSqlCache(): SqlCache { return this.#sqlCache; } override close(): void { this.#stream.close(); } override get closed(): boolean { return this.#stream.closed; } } ================================================ FILE: packages/libsql-client/src/node.ts ================================================ import type { Config, Client } from "@libsql/core/api"; import { LibsqlError } from "@libsql/core/api"; import type { ExpandedConfig } from "@libsql/core/config"; import { expandConfig } from "@libsql/core/config"; import { _createClient as _createSqlite3Client } from "./sqlite3.js"; import { _createClient as _createWsClient } from "./ws.js"; import { _createClient as _createHttpClient } from "./http.js"; export * from "@libsql/core/api"; /** Creates a {@link Client} object. * * You must pass at least an `url` in the {@link Config} object. */ export function createClient(config: Config): Client { return _createClient(expandConfig(config, true)); } function _createClient(config: ExpandedConfig) { if (config.scheme === "wss" || config.scheme === "ws") { return _createWsClient(config); } else if (config.scheme === "https" || config.scheme === "http") { return _createHttpClient(config); } else { return _createSqlite3Client(config); } } ================================================ FILE: packages/libsql-client/src/sql_cache.ts ================================================ import type * as hrana from "@libsql/hrana-client"; export class SqlCache { #owner: hrana.SqlOwner; #sqls: Lru; capacity: number; constructor(owner: hrana.SqlOwner, capacity: number) { this.#owner = owner; this.#sqls = new Lru(); this.capacity = capacity; } // Replaces SQL strings with cached `hrana.Sql` objects in the statements in `hranaStmts`. After this // function returns, we guarantee that all `hranaStmts` refer to valid (not closed) `hrana.Sql` objects, // but _we may invalidate any other `hrana.Sql` objects_ (by closing them, thus removing them from the // server). // // In practice, this means that after calling this function, you can use the statements only up to the // first `await`, because concurrent code may also use the cache and invalidate those statements. apply(hranaStmts: Array): void { if (this.capacity <= 0) { return; } const usedSqlObjs: Set = new Set(); for (const hranaStmt of hranaStmts) { if (typeof hranaStmt.sql !== "string") { continue; } const sqlText = hranaStmt.sql; // Stored SQL cannot exceed 5kb. // https://github.com/tursodatabase/libsql/blob/e9d637e051685f92b0da43849507b5ef4232fbeb/libsql-server/src/hrana/http/request.rs#L10 if (sqlText.length >= 5000) { continue; } let sqlObj = this.#sqls.get(sqlText); if (sqlObj === undefined) { while (this.#sqls.size + 1 > this.capacity) { const [evictSqlText, evictSqlObj] = this.#sqls.peekLru()!; if (usedSqlObjs.has(evictSqlObj)) { // The SQL object that we are trying to evict is already in use in this batch, so we // must not evict and close it. break; } evictSqlObj.close(); this.#sqls.delete(evictSqlText); } if (this.#sqls.size + 1 <= this.capacity) { sqlObj = this.#owner.storeSql(sqlText); this.#sqls.set(sqlText, sqlObj); } } if (sqlObj !== undefined) { hranaStmt.sql = sqlObj; usedSqlObjs.add(sqlObj); } } } } class Lru { // This maps keys to the cache values. The entries are ordered by their last use (entires that were used // most recently are at the end). #cache: Map; constructor() { this.#cache = new Map(); } get(key: K): V | undefined { const value = this.#cache.get(key); if (value !== undefined) { // move the entry to the back of the Map this.#cache.delete(key); this.#cache.set(key, value); } return value; } set(key: K, value: V): void { this.#cache.set(key, value); } peekLru(): [K, V] | undefined { for (const entry of this.#cache.entries()) { return entry; } return undefined; } delete(key: K): void { this.#cache.delete(key); } get size(): number { return this.#cache.size; } } ================================================ FILE: packages/libsql-client/src/sqlite3.ts ================================================ import Database from "libsql"; import { Buffer } from "node:buffer"; import type { Config, IntMode, Client, Transaction, TransactionMode, ResultSet, Row, Value, InValue, InStatement, InArgs, Replicated, } from "@libsql/core/api"; import { LibsqlError, LibsqlBatchError } from "@libsql/core/api"; import type { ExpandedConfig } from "@libsql/core/config"; import { expandConfig, isInMemoryConfig } from "@libsql/core/config"; import { supportedUrlLink, transactionModeToBegin, ResultSetImpl, } from "@libsql/core/util"; export * from "@libsql/core/api"; export function createClient(config: Config): Client { return _createClient(expandConfig(config, true)); } /** @private */ export function _createClient(config: ExpandedConfig): Client { if (config.scheme !== "file") { throw new LibsqlError( `URL scheme ${JSON.stringify(config.scheme + ":")} is not supported by the local sqlite3 client. ` + `For more information, please read ${supportedUrlLink}`, "URL_SCHEME_NOT_SUPPORTED", ); } const authority = config.authority; if (authority !== undefined) { const host = authority.host.toLowerCase(); if (host !== "" && host !== "localhost") { throw new LibsqlError( `Invalid host in file URL: ${JSON.stringify(authority.host)}. ` + 'A "file:" URL with an absolute path should start with one slash ("file:/absolute/path.db") ' + 'or with three slashes ("file:///absolute/path.db"). ' + `For more information, please read ${supportedUrlLink}`, "URL_INVALID", ); } if (authority.port !== undefined) { throw new LibsqlError("File URL cannot have a port", "URL_INVALID"); } if (authority.userinfo !== undefined) { throw new LibsqlError( "File URL cannot have username and password", "URL_INVALID", ); } } let isInMemory = isInMemoryConfig(config); if (isInMemory && config.syncUrl) { throw new LibsqlError( `Embedded replica must use file for local db but URI with in-memory mode were provided instead: ${config.path}`, "URL_INVALID", ); } let path = config.path; if (isInMemory) { // note: we should prepend file scheme in order for SQLite3 to recognize :memory: connection query parameters path = `${config.scheme}:${config.path}`; } const options = { authToken: config.authToken, encryptionKey: config.encryptionKey, remoteEncryptionKey: config.remoteEncryptionKey, syncUrl: config.syncUrl, syncPeriod: config.syncInterval, readYourWrites: config.readYourWrites, offline: config.offline, }; const db = new Database(path, options); executeStmt( db, "SELECT 1 AS checkThatTheDatabaseCanBeOpened", config.intMode, ); return new Sqlite3Client(path, options, db, config.intMode); } export class Sqlite3Client implements Client { #path: string; #options: Database.Options; #db: Database.Database | null; #intMode: IntMode; closed: boolean; protocol: "file"; /** @private */ constructor( path: string, options: Database.Options, db: Database.Database, intMode: IntMode, ) { this.#path = path; this.#options = options; this.#db = db; this.#intMode = intMode; this.closed = false; this.protocol = "file"; } async execute( stmtOrSql: InStatement | string, args?: InArgs, ): Promise { let stmt: InStatement; if (typeof stmtOrSql === "string") { stmt = { sql: stmtOrSql, args: args || [], }; } else { stmt = stmtOrSql; } this.#checkNotClosed(); return executeStmt(this.#getDb(), stmt, this.#intMode); } async batch( stmts: Array, mode: TransactionMode = "deferred", ): Promise> { this.#checkNotClosed(); const db = this.#getDb(); try { executeStmt(db, transactionModeToBegin(mode), this.#intMode); const resultSets = []; for (let i = 0; i < stmts.length; i++) { try { if (!db.inTransaction) { throw new LibsqlBatchError( "The transaction has been rolled back", i, "TRANSACTION_CLOSED", ); } const stmt = stmts[i]; const normalizedStmt: InStatement = Array.isArray(stmt) ? { sql: stmt[0], args: stmt[1] || [] } : stmt; resultSets.push( executeStmt(db, normalizedStmt, this.#intMode), ); } catch (e) { if (e instanceof LibsqlBatchError) { throw e; } if (e instanceof LibsqlError) { throw new LibsqlBatchError( e.message, i, e.code, e.extendedCode, e.rawCode, e.cause instanceof Error ? e.cause : undefined, ); } throw e; } } executeStmt(db, "COMMIT", this.#intMode); return resultSets; } finally { if (db.inTransaction) { executeStmt(db, "ROLLBACK", this.#intMode); } } } async migrate(stmts: Array): Promise> { this.#checkNotClosed(); const db = this.#getDb(); try { executeStmt(db, "PRAGMA foreign_keys=off", this.#intMode); executeStmt(db, transactionModeToBegin("deferred"), this.#intMode); const resultSets = []; for (let i = 0; i < stmts.length; i++) { try { if (!db.inTransaction) { throw new LibsqlBatchError( "The transaction has been rolled back", i, "TRANSACTION_CLOSED", ); } resultSets.push(executeStmt(db, stmts[i], this.#intMode)); } catch (e) { if (e instanceof LibsqlBatchError) { throw e; } if (e instanceof LibsqlError) { throw new LibsqlBatchError( e.message, i, e.code, e.extendedCode, e.rawCode, e.cause instanceof Error ? e.cause : undefined, ); } throw e; } } executeStmt(db, "COMMIT", this.#intMode); return resultSets; } finally { if (db.inTransaction) { executeStmt(db, "ROLLBACK", this.#intMode); } executeStmt(db, "PRAGMA foreign_keys=on", this.#intMode); } } async transaction(mode: TransactionMode = "write"): Promise { const db = this.#getDb(); executeStmt(db, transactionModeToBegin(mode), this.#intMode); this.#db = null; // A new connection will be lazily created on next use return new Sqlite3Transaction(db, this.#intMode); } async executeMultiple(sql: string): Promise { this.#checkNotClosed(); const db = this.#getDb(); try { return executeMultiple(db, sql); } finally { if (db.inTransaction) { executeStmt(db, "ROLLBACK", this.#intMode); } } } async sync(): Promise { this.#checkNotClosed(); const rep = await this.#getDb().sync(); return { frames_synced: rep.frames_synced, frame_no: rep.frame_no, } as Replicated; } async reconnect(): Promise { try { if (!this.closed && this.#db !== null) { this.#db.close(); } } finally { this.#db = new Database(this.#path, this.#options); this.closed = false; } } close(): void { this.closed = true; if (this.#db !== null) { this.#db.close(); this.#db = null; } } #checkNotClosed(): void { if (this.closed) { throw new LibsqlError("The client is closed", "CLIENT_CLOSED"); } } // Lazily creates the database connection and returns it #getDb(): Database.Database { if (this.#db === null) { this.#db = new Database(this.#path, this.#options); } return this.#db; } } export class Sqlite3Transaction implements Transaction { #database: Database.Database; #intMode: IntMode; /** @private */ constructor(database: Database.Database, intMode: IntMode) { this.#database = database; this.#intMode = intMode; } async execute(stmt: InStatement): Promise; async execute(sql: string, args?: InArgs): Promise; async execute( stmtOrSql: InStatement | string, args?: InArgs, ): Promise { let stmt: InStatement; if (typeof stmtOrSql === "string") { stmt = { sql: stmtOrSql, args: args || [], }; } else { stmt = stmtOrSql; } this.#checkNotClosed(); return executeStmt(this.#database, stmt, this.#intMode); } async batch( stmts: Array, ): Promise> { const resultSets = []; for (let i = 0; i < stmts.length; i++) { try { this.#checkNotClosed(); const stmt = stmts[i]; const normalizedStmt: InStatement = Array.isArray(stmt) ? { sql: stmt[0], args: stmt[1] || [] } : stmt; resultSets.push( executeStmt(this.#database, normalizedStmt, this.#intMode), ); } catch (e) { if (e instanceof LibsqlBatchError) { throw e; } if (e instanceof LibsqlError) { throw new LibsqlBatchError( e.message, i, e.code, e.extendedCode, e.rawCode, e.cause instanceof Error ? e.cause : undefined, ); } throw e; } } return resultSets; } async executeMultiple(sql: string): Promise { this.#checkNotClosed(); return executeMultiple(this.#database, sql); } async rollback(): Promise { if (!this.#database.open) { return; } this.#checkNotClosed(); executeStmt(this.#database, "ROLLBACK", this.#intMode); } async commit(): Promise { this.#checkNotClosed(); executeStmt(this.#database, "COMMIT", this.#intMode); } close(): void { if (this.#database.inTransaction) { executeStmt(this.#database, "ROLLBACK", this.#intMode); } } get closed(): boolean { return !this.#database.inTransaction; } #checkNotClosed(): void { if (this.closed) { throw new LibsqlError( "The transaction is closed", "TRANSACTION_CLOSED", ); } } } function executeStmt( db: Database.Database, stmt: InStatement, intMode: IntMode, ): ResultSet { let sql: string; let args: Array | Record; if (typeof stmt === "string") { sql = stmt; args = []; } else { sql = stmt.sql; if (Array.isArray(stmt.args)) { args = stmt.args.map((value) => valueToSql(value, intMode)); } else { args = {}; for (const name in stmt.args) { const argName = name[0] === "@" || name[0] === "$" || name[0] === ":" ? name.substring(1) : name; args[argName] = valueToSql(stmt.args[name], intMode); } } } try { const sqlStmt = db.prepare(sql); sqlStmt.safeIntegers(true); let returnsData = true; try { sqlStmt.raw(true); } catch { // raw() throws an exception if the statement does not return data returnsData = false; } if (returnsData) { const columns = Array.from( sqlStmt.columns().map((col) => col.name), ); const columnTypes = Array.from( sqlStmt.columns().map((col) => col.type ?? ""), ); const rows = sqlStmt.all(args).map((sqlRow) => { return rowFromSql(sqlRow as Array, columns, intMode); }); // TODO: can we get this info from better-sqlite3? const rowsAffected = 0; const lastInsertRowid = undefined; return new ResultSetImpl( columns, columnTypes, rows, rowsAffected, lastInsertRowid, ); } else { const info = sqlStmt.run(args); const rowsAffected = info.changes; const lastInsertRowid = BigInt(info.lastInsertRowid); return new ResultSetImpl([], [], [], rowsAffected, lastInsertRowid); } } catch (e) { throw mapSqliteError(e); } } function rowFromSql( sqlRow: Array, columns: Array, intMode: IntMode, ): Row { const row = {}; // make sure that the "length" property is not enumerable Object.defineProperty(row, "length", { value: sqlRow.length }); for (let i = 0; i < sqlRow.length; ++i) { const value = valueFromSql(sqlRow[i], intMode); Object.defineProperty(row, i, { value }); const column = columns[i]; if (!Object.hasOwn(row, column)) { Object.defineProperty(row, column, { value, enumerable: true, configurable: true, writable: true, }); } } return row as Row; } function valueFromSql(sqlValue: unknown, intMode: IntMode): Value { if (typeof sqlValue === "bigint") { if (intMode === "number") { if (sqlValue < minSafeBigint || sqlValue > maxSafeBigint) { throw new RangeError( "Received integer which cannot be safely represented as a JavaScript number", ); } return Number(sqlValue); } else if (intMode === "bigint") { return sqlValue; } else if (intMode === "string") { return "" + sqlValue; } else { throw new Error("Invalid value for IntMode"); } } else if (sqlValue instanceof Buffer) { return sqlValue.buffer; } return sqlValue as Value; } const minSafeBigint = -9007199254740991n; const maxSafeBigint = 9007199254740991n; function valueToSql(value: InValue, intMode: IntMode): unknown { if (typeof value === "number") { if (!Number.isFinite(value)) { throw new RangeError( "Only finite numbers (not Infinity or NaN) can be passed as arguments", ); } return value; } else if (typeof value === "bigint") { if (value < minInteger || value > maxInteger) { throw new RangeError( "bigint is too large to be represented as a 64-bit integer and passed as argument", ); } return value; } else if (typeof value === "boolean") { switch (intMode) { case "bigint": return value ? 1n : 0n; case "string": return value ? "1" : "0"; default: return value ? 1 : 0; } } else if (value instanceof ArrayBuffer) { return Buffer.from(value); } else if (value instanceof Date) { return value.valueOf(); } else if (value === undefined) { throw new TypeError( "undefined cannot be passed as argument to the database", ); } else { return value; } } const minInteger = -9223372036854775808n; const maxInteger = 9223372036854775807n; function executeMultiple(db: Database.Database, sql: string): void { try { db.exec(sql); } catch (e) { throw mapSqliteError(e); } } function mapSqliteError(e: unknown): unknown { if (e instanceof Database.SqliteError) { const extendedCode = e.code; const code = mapToBaseCode(e.rawCode); return new LibsqlError(e.message, code, extendedCode, e.rawCode, e); } return e; } // Map SQLite raw error code to base error code string. // Extended error codes are (base | (extended << 8)), so base = rawCode & 0xFF function mapToBaseCode(rawCode: number | undefined): string { if (rawCode === undefined) { return "SQLITE_UNKNOWN"; } const baseCode = rawCode & 0xff; return ( sqliteErrorCodes[baseCode] ?? `SQLITE_UNKNOWN_${baseCode.toString()}` ); } const sqliteErrorCodes: Record = { 1: "SQLITE_ERROR", 2: "SQLITE_INTERNAL", 3: "SQLITE_PERM", 4: "SQLITE_ABORT", 5: "SQLITE_BUSY", 6: "SQLITE_LOCKED", 7: "SQLITE_NOMEM", 8: "SQLITE_READONLY", 9: "SQLITE_INTERRUPT", 10: "SQLITE_IOERR", 11: "SQLITE_CORRUPT", 12: "SQLITE_NOTFOUND", 13: "SQLITE_FULL", 14: "SQLITE_CANTOPEN", 15: "SQLITE_PROTOCOL", 16: "SQLITE_EMPTY", 17: "SQLITE_SCHEMA", 18: "SQLITE_TOOBIG", 19: "SQLITE_CONSTRAINT", 20: "SQLITE_MISMATCH", 21: "SQLITE_MISUSE", 22: "SQLITE_NOLFS", 23: "SQLITE_AUTH", 24: "SQLITE_FORMAT", 25: "SQLITE_RANGE", 26: "SQLITE_NOTADB", 27: "SQLITE_NOTICE", 28: "SQLITE_WARNING", }; ================================================ FILE: packages/libsql-client/src/web.ts ================================================ import type { Config, Client } from "@libsql/core/api"; import { LibsqlError } from "@libsql/core/api"; import type { ExpandedConfig } from "@libsql/core/config"; import { expandConfig } from "@libsql/core/config"; import { supportedUrlLink } from "@libsql/core/util"; import { _createClient as _createWsClient } from "./ws.js"; import { _createClient as _createHttpClient } from "./http.js"; export * from "@libsql/core/api"; export function createClient(config: Config): Client { return _createClient(expandConfig(config, true)); } /** @private */ export function _createClient(config: ExpandedConfig): Client { if (config.scheme === "ws" || config.scheme === "wss") { return _createWsClient(config); } else if (config.scheme === "http" || config.scheme === "https") { return _createHttpClient(config); } else { throw new LibsqlError( 'The client that uses Web standard APIs supports only "libsql:", "wss:", "ws:", "https:" and "http:" URLs, ' + `got ${JSON.stringify(config.scheme + ":")}. For more information, please read ${supportedUrlLink}`, "URL_SCHEME_NOT_SUPPORTED", ); } } ================================================ FILE: packages/libsql-client/src/ws.ts ================================================ import * as hrana from "@libsql/hrana-client"; import type { Config, IntMode, Client, Transaction, ResultSet, InStatement, InArgs, Replicated, } from "@libsql/core/api"; import { TransactionMode, LibsqlError } from "@libsql/core/api"; import type { ExpandedConfig } from "@libsql/core/config"; import { expandConfig } from "@libsql/core/config"; import { HranaTransaction, executeHranaBatch, stmtToHrana, resultSetFromHrana, mapHranaError, } from "./hrana.js"; import { SqlCache } from "./sql_cache.js"; import { encodeBaseUrl } from "@libsql/core/uri"; import { supportedUrlLink } from "@libsql/core/util"; import promiseLimit from "promise-limit"; export * from "@libsql/core/api"; export function createClient(config: Config): WsClient { return _createClient(expandConfig(config, false)); } /** @private */ export function _createClient(config: ExpandedConfig): WsClient { if (config.scheme !== "wss" && config.scheme !== "ws") { throw new LibsqlError( 'The WebSocket client supports only "libsql:", "wss:" and "ws:" URLs, ' + `got ${JSON.stringify(config.scheme + ":")}. For more information, please read ${supportedUrlLink}`, "URL_SCHEME_NOT_SUPPORTED", ); } if (config.encryptionKey !== undefined) { throw new LibsqlError( "Encryption key is not supported by the remote client.", "ENCRYPTION_KEY_NOT_SUPPORTED", ); } if (config.scheme === "ws" && config.tls) { throw new LibsqlError( `A "ws:" URL cannot opt into TLS by using ?tls=1`, "URL_INVALID", ); } else if (config.scheme === "wss" && !config.tls) { throw new LibsqlError( `A "wss:" URL cannot opt out of TLS by using ?tls=0`, "URL_INVALID", ); } const url = encodeBaseUrl(config.scheme, config.authority, config.path); let client: hrana.WsClient; try { client = hrana.openWs(url, config.authToken); } catch (e) { if (e instanceof hrana.WebSocketUnsupportedError) { const suggestedScheme = config.scheme === "wss" ? "https" : "http"; const suggestedUrl = encodeBaseUrl( suggestedScheme, config.authority, config.path, ); throw new LibsqlError( "This environment does not support WebSockets, please switch to the HTTP client by using " + `a "${suggestedScheme}:" URL (${JSON.stringify(suggestedUrl)}). ` + `For more information, please read ${supportedUrlLink}`, "WEBSOCKETS_NOT_SUPPORTED", ); } throw mapHranaError(e); } return new WsClient( client, url, config.authToken, config.intMode, config.concurrency, ); } // This object maintains state for a single WebSocket connection. interface ConnState { // The Hrana client (which corresponds to a single WebSocket). client: hrana.WsClient; // We can cache SQL texts on the server only if the server supports Hrana 2. But to get the server // version, we need to wait for the WebSocket handshake to complete, so this value is initially // `undefined`, until we find out the version. useSqlCache: boolean | undefined; // The cache of SQL texts stored on the server. Initially has capacity 0, but it is set to // `sqlCacheCapacity` when `useSqlCache` is set to `true`. sqlCache: SqlCache; // The time when the connection was opened. openTime: Date; // Set of all `StreamState`-s that were opened from this connection. We can safely close the connection // only when this is empty. streamStates: Set; } interface StreamState { conn: ConnState; stream: hrana.WsStream; } const maxConnAgeMillis = 60 * 1000; const sqlCacheCapacity = 100; export class WsClient implements Client { #url: URL; #authToken: string | undefined; #intMode: IntMode; // State of the current connection. The `hrana.WsClient` inside may be closed at any moment due to an // asynchronous error. #connState: ConnState; // If defined, this is a connection that will be used in the future, once it is ready. #futureConnState: ConnState | undefined; closed: boolean; protocol: "ws"; #isSchemaDatabase: Promise | undefined; #promiseLimitFunction: ReturnType>; /** @private */ constructor( client: hrana.WsClient, url: URL, authToken: string | undefined, intMode: IntMode, concurrency: number | undefined, ) { this.#url = url; this.#authToken = authToken; this.#intMode = intMode; this.#connState = this.#openConn(client); this.#futureConnState = undefined; this.closed = false; this.protocol = "ws"; this.#promiseLimitFunction = promiseLimit(concurrency); } private async limit(fn: () => Promise): Promise { return this.#promiseLimitFunction(fn); } async execute( stmtOrSql: InStatement | string, args?: InArgs, ): Promise { let stmt: InStatement; if (typeof stmtOrSql === "string") { stmt = { sql: stmtOrSql, args: args || [], }; } else { stmt = stmtOrSql; } return this.limit(async () => { const streamState = await this.#openStream(); try { const hranaStmt = stmtToHrana(stmt); // Schedule all operations synchronously, so they will be pipelined and executed in a single // network roundtrip. streamState.conn.sqlCache.apply([hranaStmt]); const hranaRowsPromise = streamState.stream.query(hranaStmt); streamState.stream.closeGracefully(); const hranaRowsResult = await hranaRowsPromise; return resultSetFromHrana(hranaRowsResult); } catch (e) { throw mapHranaError(e); } finally { this._closeStream(streamState); } }); } async batch( stmts: Array, mode: TransactionMode = "deferred", ): Promise> { return this.limit>(async () => { const streamState = await this.#openStream(); try { const normalizedStmts = stmts.map((stmt) => { if (Array.isArray(stmt)) { return { sql: stmt[0], args: stmt[1] || [], }; } return stmt; }); const hranaStmts = normalizedStmts.map(stmtToHrana); const version = await streamState.conn.client.getVersion(); // Schedule all operations synchronously, so they will be pipelined and executed in a single // network roundtrip. streamState.conn.sqlCache.apply(hranaStmts); const batch = streamState.stream.batch(version >= 3); const resultsPromise = executeHranaBatch( mode, version, batch, hranaStmts, ); const results = await resultsPromise; return results; } catch (e) { throw mapHranaError(e); } finally { this._closeStream(streamState); } }); } async migrate(stmts: Array): Promise> { return this.limit>(async () => { const streamState = await this.#openStream(); try { const hranaStmts = stmts.map(stmtToHrana); const version = await streamState.conn.client.getVersion(); // Schedule all operations synchronously, so they will be pipelined and executed in a single // network roundtrip. const batch = streamState.stream.batch(version >= 3); const resultsPromise = executeHranaBatch( "deferred", version, batch, hranaStmts, true, ); const results = await resultsPromise; return results; } catch (e) { throw mapHranaError(e); } finally { this._closeStream(streamState); } }); } async transaction(mode: TransactionMode = "write"): Promise { return this.limit(async () => { const streamState = await this.#openStream(); try { const version = await streamState.conn.client.getVersion(); // the BEGIN statement will be batched with the first statement on the transaction to save a // network roundtrip return new WsTransaction(this, streamState, mode, version); } catch (e) { this._closeStream(streamState); throw mapHranaError(e); } }); } async executeMultiple(sql: string): Promise { return this.limit(async () => { const streamState = await this.#openStream(); try { // Schedule all operations synchronously, so they will be pipelined and executed in a single // network roundtrip. const promise = streamState.stream.sequence(sql); streamState.stream.closeGracefully(); await promise; } catch (e) { throw mapHranaError(e); } finally { this._closeStream(streamState); } }); } sync(): Promise { throw new LibsqlError( "sync not supported in ws mode", "SYNC_NOT_SUPPORTED", ); } async #openStream(): Promise { if (this.closed) { throw new LibsqlError("The client is closed", "CLIENT_CLOSED"); } const now = new Date(); const ageMillis = now.valueOf() - this.#connState.openTime.valueOf(); if ( ageMillis > maxConnAgeMillis && this.#futureConnState === undefined ) { // The existing connection is too old, let's open a new one. const futureConnState = this.#openConn(); this.#futureConnState = futureConnState; // However, if we used `futureConnState` immediately, we would introduce additional latency, // because we would have to wait for the WebSocket handshake to complete, even though we may a // have perfectly good existing connection in `this.#connState`! // // So we wait until the `hrana.Client.getVersion()` operation completes (which happens when the // WebSocket hanshake completes), and only then we replace `this.#connState` with // `futureConnState`, which is stored in `this.#futureConnState` in the meantime. futureConnState.client.getVersion().then( (_version) => { if (this.#connState !== futureConnState) { // We need to close `this.#connState` before we replace it. However, it is possible // that `this.#connState` has already been replaced: see the code below. if (this.#connState.streamStates.size === 0) { this.#connState.client.close(); } else { // If there are existing streams on the connection, we must not close it, because // these streams would be broken. The last stream to be closed will also close the // connection in `_closeStream()`. } } this.#connState = futureConnState; this.#futureConnState = undefined; }, (_e) => { // If the new connection could not be established, let's just ignore the error and keep // using the existing connection. this.#futureConnState = undefined; }, ); } if (this.#connState.client.closed) { // An error happened on this connection and it has been closed. Let's try to seamlessly reconnect. try { if (this.#futureConnState !== undefined) { // We are already in the process of opening a new connection, so let's just use it // immediately. this.#connState = this.#futureConnState; } else { this.#connState = this.#openConn(); } } catch (e) { throw mapHranaError(e); } } const connState = this.#connState; try { // Now we wait for the WebSocket handshake to complete (if it hasn't completed yet). Note that // this does not increase latency, because any messages that we would send on the WebSocket before // the handshake would be queued until the handshake is completed anyway. if (connState.useSqlCache === undefined) { connState.useSqlCache = (await connState.client.getVersion()) >= 2; if (connState.useSqlCache) { connState.sqlCache.capacity = sqlCacheCapacity; } } const stream = connState.client.openStream(); stream.intMode = this.#intMode; const streamState = { conn: connState, stream }; connState.streamStates.add(streamState); return streamState; } catch (e) { throw mapHranaError(e); } } #openConn(client?: hrana.WsClient): ConnState { try { client ??= hrana.openWs(this.#url, this.#authToken); return { client, useSqlCache: undefined, sqlCache: new SqlCache(client, 0), openTime: new Date(), streamStates: new Set(), }; } catch (e) { throw mapHranaError(e); } } async reconnect(): Promise { try { for (const st of Array.from(this.#connState.streamStates)) { try { st.stream.close(); } catch {} } this.#connState.client.close(); } catch {} if (this.#futureConnState) { try { this.#futureConnState.client.close(); } catch {} this.#futureConnState = undefined; } const next = this.#openConn(); const version = await next.client.getVersion(); next.useSqlCache = version >= 2; if (next.useSqlCache) { next.sqlCache.capacity = sqlCacheCapacity; } this.#connState = next; this.closed = false; } _closeStream(streamState: StreamState): void { streamState.stream.close(); const connState = streamState.conn; connState.streamStates.delete(streamState); if ( connState.streamStates.size === 0 && connState !== this.#connState ) { // We are not using this connection anymore and this is the last stream that was using it, so we // must close it now. connState.client.close(); } } close(): void { this.#connState.client.close(); this.closed = true; if (this.#futureConnState) { try { this.#futureConnState.client.close(); } catch {} this.#futureConnState = undefined; } this.closed = true; } } export class WsTransaction extends HranaTransaction implements Transaction { #client: WsClient; #streamState: StreamState; /** @private */ constructor( client: WsClient, state: StreamState, mode: TransactionMode, version: hrana.ProtocolVersion, ) { super(mode, version); this.#client = client; this.#streamState = state; } /** @private */ override _getStream(): hrana.Stream { return this.#streamState.stream; } /** @private */ override _getSqlCache(): SqlCache { return this.#streamState.conn.sqlCache; } override close(): void { this.#client._closeStream(this.#streamState); } override get closed(): boolean { return this.#streamState.stream.closed; } } ================================================ FILE: packages/libsql-client/tsconfig.base.json ================================================ { "compilerOptions": { "moduleResolution": "node", "lib": ["esnext"], "target": "esnext", "esModuleInterop": true, "isolatedModules": true, "rootDir": "src/", "strict": true }, "include": ["src/"], "exclude": ["**/__tests__"] } ================================================ FILE: packages/libsql-client/tsconfig.build-cjs.json ================================================ { "extends": "./tsconfig.base.json", "compilerOptions": { "module": "commonjs", "declaration": false, "outDir": "./lib-cjs/" } } ================================================ FILE: packages/libsql-client/tsconfig.build-esm.json ================================================ { "extends": "./tsconfig.base.json", "compilerOptions": { "module": "esnext", "declaration": true, "outDir": "./lib-esm/" } } ================================================ FILE: packages/libsql-client/tsconfig.json ================================================ { "extends": "./tsconfig.base.json", "compilerOptions": { "noEmit": true, "incremental": true } } ================================================ FILE: packages/libsql-client/typedoc.json ================================================ { "entryPoints": ["src/node.ts"], "out": "docs", "excludePrivate": true, "excludeInternal": true, "visibilityFilters": { "inherited": true, "external": true }, "includeVersion": true } ================================================ FILE: packages/libsql-client-wasm/LICENSE ================================================ MIT License Copyright (c) 2023 libSQL Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: packages/libsql-client-wasm/examples/browser/README.md ================================================ # libSQL Wasm example for browsers ## Building Run the following in the `packages/libsql-client-wasm` directory: ``` npm run build ``` Run the following in this directory: ``` npm i ./node_modules/.bin/esbuild --target=safari16 index.js --bundle --outfile=dist/out.js --format=esm cp ../../../../node_modules/@libsql/libsql-wasm-experimental/sqlite-wasm/jswasm/sqlite3.wasm dist ``` and open the app in browser: ``` npx http-server -o ``` ================================================ FILE: packages/libsql-client-wasm/examples/browser/index.html ================================================ libSQL SDK Wasm Demo

Hello libSQL and Wasm!

================================================ FILE: packages/libsql-client-wasm/examples/browser/index.js ================================================ import { createClient } from "@libsql/client-wasm"; async function main() { const config = { url: "file:local.db", }; const db = await createClient(config); const rs = await db.execute("SELECT * FROM users"); console.log(rs); } main().catch((error) => { console.log(error); }); ================================================ FILE: packages/libsql-client-wasm/examples/browser/package.json ================================================ { "name": "browser", "version": "1.0.0", "description": "", "main": "index.js", "type": "module", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "author": "", "license": "MIT", "dependencies": { "@libsql/client-wasm": "../.." }, "devDependencies": { "esbuild": "0.19.11" } } ================================================ FILE: packages/libsql-client-wasm/examples/node/index.js ================================================ import { createClient } from "@libsql/client-wasm"; async function main() { const config = { url: "file:local.db", }; const db = await createClient(config); await db.execute("CREATE TABLE users (id INT PRIMARY KEY, username TEXT)"); await db.execute("INSERT INTO users VALUES (1, 'penberg')"); const rs = await db.execute("SELECT * FROM users"); console.log(rs); } main().catch((error) => { console.log(error); }); ================================================ FILE: packages/libsql-client-wasm/examples/node/package.json ================================================ { "name": "nodejs", "version": "1.0.0", "description": "", "main": "index.js", "type": "module", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "author": "", "license": "MIT", "dependencies": { "@libsql/client-wasm": "../../" } } ================================================ FILE: packages/libsql-client-wasm/jest.config.js ================================================ export default { preset: "ts-jest/presets/default-esm", moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, testMatch: ["**/__tests__/*.test.[jt]s"], }; ================================================ FILE: packages/libsql-client-wasm/package-cjs.json ================================================ { "type": "commonjs" } ================================================ FILE: packages/libsql-client-wasm/package.json ================================================ { "name": "@libsql/client-wasm", "version": "0.17.2", "keywords": [ "libsql", "database", "sqlite", "serverless", "vercel", "netlify", "lambda" ], "description": "libSQL driver for TypeScript and JavaScript", "repository": { "type": "git", "url": "git+https://github.com/tursodatabase/libsql-client-ts", "directory": "packages/libsql-client-wasm" }, "authors": [ "Jan Špaček ", "Pekka Enberg ", "Jan Plhak " ], "license": "MIT", "type": "module", "bundledDependencies": [ "@libsql/libsql-wasm-experimental" ], "main": "lib-esm/wasm.js", "types": "lib-esm/wasm.d.ts", "exports": { ".": { "types": "./lib-esm/wasm.d.ts", "import": { "default": "./lib-esm/wasm.js" } } }, "typesVersions": { "*": { ".": [ "./lib-esm/wasm.d.ts" ] } }, "files": [ "lib-esm/**" ], "scripts": { "prepublishOnly": "npm run build", "prebuild": "rm -rf ./lib-esm && npm run bundle", "build": "npm run build:esm", "build:esm": "tsc -p tsconfig.build-esm.json", "bundle": "rm -rf node_modules && mkdir -p node_modules/@libsql/libsql-wasm-experimental && cp -R ../../node_modules/@libsql/libsql-wasm-experimental/* node_modules/@libsql/libsql-wasm-experimental", "format:check": "prettier --check .", "test": "jest --runInBand", "typecheck": "tsc --noEmit", "typedoc": "rm -rf ./docs && typedoc" }, "dependencies": { "@libsql/core": "^0.17.0", "@libsql/libsql-wasm-experimental": "^0.0.2", "js-base64": "^3.7.5" }, "devDependencies": { "@types/jest": "^29.2.5", "jest": "^29.3.1", "ts-jest": "^29.0.5", "typedoc": "^0.23.28", "typescript": "^4.9.4" } } ================================================ FILE: packages/libsql-client-wasm/src/wasm.ts ================================================ import sqlite3InitModule from "@libsql/libsql-wasm-experimental"; import type { Database, SqlValue, Sqlite3Static, } from "@libsql/libsql-wasm-experimental"; import type { Config, IntMode, Client, Transaction, TransactionMode, ResultSet, Row, Value, InValue, InStatement, InArgs, Replicated, } from "@libsql/core/api"; import { LibsqlError } from "@libsql/core/api"; import type { ExpandedConfig } from "@libsql/core/config"; import { expandConfig } from "@libsql/core/config"; import { supportedUrlLink, transactionModeToBegin, ResultSetImpl, } from "@libsql/core/util"; export * from "@libsql/core/api"; const sqlite3 = await sqlite3InitModule(); export function createClient(config: Config): Client { return _createClient(expandConfig(config, true)); } /** @private */ export function _createClient(config: ExpandedConfig): Client { if (config.scheme !== "file") { throw new LibsqlError( `URL scheme ${JSON.stringify(config.scheme + ":")} is not supported by the local sqlite3 client. ` + `For more information, please read ${supportedUrlLink}`, "URL_SCHEME_NOT_SUPPORTED", ); } if (config.encryptionKey !== undefined) { throw new LibsqlError( "Encryption key is not supported by the Wasm client.", "ENCRYPTION_KEY_NOT_SUPPORTED", ); } const authority = config.authority; if (authority !== undefined) { const host = authority.host.toLowerCase(); if (host !== "" && host !== "localhost") { throw new LibsqlError( `Invalid host in file URL: ${JSON.stringify(authority.host)}. ` + 'A "file:" URL with an absolute path should start with one slash ("file:/absolute/path.db") ' + 'or with three slashes ("file:///absolute/path.db"). ' + `For more information, please read ${supportedUrlLink}`, "URL_INVALID", ); } if (authority.port !== undefined) { throw new LibsqlError("File URL cannot have a port", "URL_INVALID"); } if (authority.userinfo !== undefined) { throw new LibsqlError( "File URL cannot have username and password", "URL_INVALID", ); } } const path = config.path; const options = { authToken: config.authToken, syncUrl: config.syncUrl, }; const db: Database = new sqlite3.oo1.DB(path, "c"); executeStmt( db, "SELECT 1 AS checkThatTheDatabaseCanBeOpened", config.intMode, ); return new Sqlite3Client(sqlite3, path, /*options,*/ db, config.intMode); } function inTransaction(db: Database): boolean { return db.getAutocommit() == 0; } export class Sqlite3Client implements Client { #sqlite3: Sqlite3Static; #path: string; #db: Database | null; #intMode: IntMode; closed: boolean; protocol: "file"; /** @private */ constructor( sqlite3: Sqlite3Static, path: string, /*options: Database.Options,*/ db: Database, intMode: IntMode, ) { this.#sqlite3 = sqlite3; this.#path = path; //this.#options = options; this.#db = db; this.#intMode = intMode; this.closed = false; this.protocol = "file"; } async execute( stmtOrSql: InStatement | string, args?: InArgs, ): Promise { let stmt: InStatement; if (typeof stmtOrSql === "string") { stmt = { sql: stmtOrSql, args: args || [], }; } else { stmt = stmtOrSql; } this.#checkNotClosed(); return executeStmt(this.#getDb(), stmt, this.#intMode); } async batch( stmts: Array, mode: TransactionMode = "deferred", ): Promise> { this.#checkNotClosed(); const db = this.#getDb(); try { executeStmt(db, transactionModeToBegin(mode), this.#intMode); const resultSets = stmts.map((stmt) => { if (!inTransaction(db)) { throw new LibsqlError( "The transaction has been rolled back", "TRANSACTION_CLOSED", ); } return executeStmt(db, stmt, this.#intMode); }); executeStmt(db, "COMMIT", this.#intMode); return resultSets; } finally { if (inTransaction(db)) { executeStmt(db, "ROLLBACK", this.#intMode); } } } async migrate(stmts: Array): Promise> { this.#checkNotClosed(); const db = this.#getDb(); try { executeStmt(db, "PRAGMA foreign_keys=off", this.#intMode); executeStmt(db, transactionModeToBegin("deferred"), this.#intMode); const resultSets = stmts.map((stmt) => { if (!inTransaction(db)) { throw new LibsqlError( "The transaction has been rolled back", "TRANSACTION_CLOSED", ); } return executeStmt(db, stmt, this.#intMode); }); executeStmt(db, "COMMIT", this.#intMode); return resultSets; } finally { if (inTransaction(db)) { executeStmt(db, "ROLLBACK", this.#intMode); } executeStmt(db, "PRAGMA foreign_keys=on", this.#intMode); } } async transaction(mode: TransactionMode = "write"): Promise { const db = this.#getDb(); executeStmt(db, transactionModeToBegin(mode), this.#intMode); this.#db = null; // A new connection will be lazily created on next use return new Sqlite3Transaction(db, this.#intMode); } async executeMultiple(sql: string): Promise { this.#checkNotClosed(); const db = this.#getDb(); try { return executeMultiple(db, sql); } finally { if (inTransaction(db)) { executeStmt(db, "ROLLBACK", this.#intMode); } } } async sync(): Promise { throw new LibsqlError( "sync not supported in wasm mode", "SYNC_NOT_SUPPORTED", ); } async reconnect(): Promise { try { if (!this.closed && this.#db !== null) { this.#db.close(); } } finally { this.#db = new this.#sqlite3.oo1.DB(this.#path, "c"); this.closed = false; } } close(): void { this.closed = true; if (this.#db !== null) { this.#db.close(); this.#db = null; } } #checkNotClosed(): void { if (this.closed) { throw new LibsqlError("The client is closed", "CLIENT_CLOSED"); } } // Lazily creates the database connection and returns it #getDb(): Database { if (this.#db === null) { this.#db = new sqlite3.oo1.DB(this.#path, "c"); } return this.#db; } } export class Sqlite3Transaction implements Transaction { #database: Database; #intMode: IntMode; /** @private */ constructor(database: Database, intMode: IntMode) { this.#database = database; this.#intMode = intMode; } async execute(stmt: InStatement): Promise { this.#checkNotClosed(); return executeStmt(this.#database, stmt, this.#intMode); } async batch(stmts: Array): Promise> { return stmts.map((stmt) => { this.#checkNotClosed(); return executeStmt(this.#database, stmt, this.#intMode); }); } async executeMultiple(sql: string): Promise { this.#checkNotClosed(); return executeMultiple(this.#database, sql); } async rollback(): Promise { if (!this.#database.isOpen()) { return; } this.#checkNotClosed(); executeStmt(this.#database, "ROLLBACK", this.#intMode); } async commit(): Promise { this.#checkNotClosed(); executeStmt(this.#database, "COMMIT", this.#intMode); } close(): void { if (inTransaction(this.#database)) { executeStmt(this.#database, "ROLLBACK", this.#intMode); } } get closed(): boolean { return !inTransaction(this.#database); } #checkNotClosed(): void { if (this.closed) { throw new LibsqlError( "The transaction is closed", "TRANSACTION_CLOSED", ); } } } function executeStmt( db: Database, stmt: InStatement, intMode: IntMode, ): ResultSet { let sql: string; let args: Array | Record; if (typeof stmt === "string") { sql = stmt; args = []; } else { sql = stmt.sql; if (Array.isArray(stmt.args)) { args = stmt.args.map((value) => valueToSql(value, intMode)); } else { args = {}; for (const name in stmt.args) { const argName = name[0] === "@" || name[0] === "$" || name[0] === ":" ? name.substring(1) : name; args[argName] = valueToSql(stmt.args[name], intMode); } } } try { const sqlStmt = db.prepare(sql); // TODO: sqlStmt.safeIntegers(true); let returnsData = sqlStmt.columnCount > 0; if (Array.isArray(args)) { for (let i = 0; i < args.length; ++i) { const value = args[i]; sqlStmt.bind(i + 1, value); } } else { for (const argName in args) { const idx = sqlStmt.getParamIndex(argName)!; const value = args[argName]; sqlStmt.bind(idx, value); } } if (returnsData) { let columns: string[] = sqlStmt.getColumnNames(); let columnTypes: string[] = []; let rows: Row[] = []; for (;;) { if (!sqlStmt.step()) { break; } const values: unknown[] = sqlStmt.get([]); rows.push(rowFromSql(values, columns, intMode)); } const rowsAffected = 0; const lastInsertRowid = undefined; return new ResultSetImpl( columns, columnTypes, rows, rowsAffected, lastInsertRowid, ); } else { sqlStmt.step(); // TODO: check return value const rowsAffected = db.changes(); const lastInsertRowid = BigInt(db.lastInsertRowid()); return new ResultSetImpl([], [], [], rowsAffected, lastInsertRowid); } } catch (e) { throw mapSqliteError(e); } } function rowFromSql( sqlRow: Array, columns: Array, intMode: IntMode, ): Row { const row = {}; // make sure that the "length" property is not enumerable Object.defineProperty(row, "length", { value: sqlRow.length }); for (let i = 0; i < sqlRow.length; ++i) { const value = valueFromSql(sqlRow[i], intMode); Object.defineProperty(row, i, { value }); const column = columns[i]; if (!Object.hasOwn(row, column)) { Object.defineProperty(row, column, { value, enumerable: true, configurable: true, writable: true, }); } } return row as Row; } function valueFromSql(sqlValue: unknown, intMode: IntMode): Value { if (typeof sqlValue === "bigint") { if (intMode === "number") { if (sqlValue < minSafeBigint || sqlValue > maxSafeBigint) { throw new RangeError( "Received integer which cannot be safely represented as a JavaScript number", ); } return Number(sqlValue); } else if (intMode === "bigint") { return sqlValue; } else if (intMode === "string") { return "" + sqlValue; } else { throw new Error("Invalid value for IntMode"); } } return sqlValue as Value; } const minSafeBigint = -9007199254740991n; const maxSafeBigint = 9007199254740991n; function valueToSql(value: InValue, intMode: IntMode): SqlValue { if (typeof value === "number") { if (!Number.isFinite(value)) { throw new RangeError( "Only finite numbers (not Infinity or NaN) can be passed as arguments", ); } return value; } else if (typeof value === "bigint") { if (value < minInteger || value > maxInteger) { throw new RangeError( "bigint is too large to be represented as a 64-bit integer and passed as argument", ); } return value; } else if (typeof value === "boolean") { switch (intMode) { case "bigint": return value ? 1n : 0n; case "string": return value ? "1" : "0"; default: return value ? 1 : 0; } } else if (value instanceof Date) { return value.valueOf(); } else if (value === undefined) { throw new TypeError( "undefined cannot be passed as argument to the database", ); } else { return value; } } const minInteger = -9223372036854775808n; const maxInteger = 9223372036854775807n; function executeMultiple(db: Database, sql: string): void { try { db.exec(sql); } catch (e) { throw mapSqliteError(e); } } function mapSqliteError(e: unknown): unknown { // TODO: Map to LibsqlError return e; } ================================================ FILE: packages/libsql-client-wasm/tsconfig.base.json ================================================ { "compilerOptions": { "module": "es2022", "moduleResolution": "node", "lib": ["esnext", "dom"], "target": "es2022", "esModuleInterop": true, "isolatedModules": true, "rootDir": "src/", "strict": true }, "include": ["src/"], "exclude": ["**/__tests__"] } ================================================ FILE: packages/libsql-client-wasm/tsconfig.build-esm.json ================================================ { "extends": "./tsconfig.base.json", "compilerOptions": { "module": "esnext", "declaration": true, "outDir": "./lib-esm/" } } ================================================ FILE: packages/libsql-client-wasm/tsconfig.json ================================================ { "extends": "./tsconfig.base.json", "compilerOptions": { "noEmit": true, "incremental": true } } ================================================ FILE: packages/libsql-client-wasm/typedoc.json ================================================ { "entryPoints": ["src/node.ts"], "out": "docs", "excludePrivate": true, "excludeInternal": true, "visibilityFilters": { "inherited": true, "external": true }, "includeVersion": true } ================================================ FILE: packages/libsql-core/jest.config.js ================================================ export default { preset: "ts-jest/presets/default-esm", moduleNameMapper: { "^(\\.{1,2}/.*)\\.js$": "$1", }, testMatch: ["**/__tests__/*.test.[jt]s"], }; ================================================ FILE: packages/libsql-core/package-cjs.json ================================================ { "type": "commonjs" } ================================================ FILE: packages/libsql-core/package.json ================================================ { "name": "@libsql/core", "version": "0.17.2", "keywords": [ "libsql", "database", "sqlite", "serverless", "vercel", "netlify", "lambda" ], "description": "libSQL driver for TypeScript and JavaScript", "repository": { "type": "git", "url": "git+https://github.com/tursodatabase/libsql-client-ts", "directory": "packages/libsql-core" }, "authors": [ "Jan Špaček ", "Pekka Enberg ", "Jan Plhak " ], "license": "MIT", "type": "module", "exports": { "./api": { "types": "./lib-esm/api.d.ts", "import": "./lib-esm/api.js", "require": "./lib-cjs/api.js" }, "./config": { "types": "./lib-esm/config.d.ts", "import": "./lib-esm/config.js", "require": "./lib-cjs/config.js" }, "./uri": { "types": "./lib-esm/uri.d.ts", "import": "./lib-esm/uri.js", "require": "./lib-cjs/uri.js" }, "./util": { "types": "./lib-esm/util.d.ts", "import": "./lib-esm/util.js", "require": "./lib-cjs/util.js" } }, "typesVersions": { "*": { "api": [ "./lib-esm/api.d.ts" ], "config": [ "./lib-esm/config.d.ts" ], "uri": [ "./lib-esm/uri.d.ts" ], "util": [ "./lib-esm/util.d.ts" ] } }, "files": [ "lib-cjs/**", "lib-esm/**" ], "scripts": { "prepublishOnly": "npm run build", "prebuild": "rm -rf ./lib-cjs ./lib-esm", "build": "npm run build:cjs && npm run build:esm", "build:cjs": "tsc -p tsconfig.build-cjs.json", "build:esm": "tsc -p tsconfig.build-esm.json", "format:check": "prettier --check .", "postbuild": "cp package-cjs.json ./lib-cjs/package.json", "test": "jest --runInBand", "typecheck": "tsc --noEmit", "typedoc": "rm -rf ./docs && typedoc" }, "dependencies": { "js-base64": "^3.7.5" }, "devDependencies": { "@types/jest": "^29.2.5", "@types/node": "^18.15.5", "jest": "^29.3.1", "ts-jest": "^29.0.5", "typedoc": "^0.23.28", "typescript": "^4.9.4" } } ================================================ FILE: packages/libsql-core/src/api.ts ================================================ /** Configuration object for {@link createClient}. */ export interface Config { /** The database URL. * * The client supports `libsql:`, `http:`/`https:`, `ws:`/`wss:` and `file:` URL. For more infomation, * please refer to the project README: * * https://github.com/libsql/libsql-client-ts#supported-urls */ url: string; /** Authentication token for the database. */ authToken?: string; /** Encryption key for the database. */ encryptionKey?: string; /** Encryption key for encryption in Turso Cloud. */ remoteEncryptionKey?: string; /** URL of a remote server to synchronize database with. */ syncUrl?: string; /** Sync interval in seconds. */ syncInterval?: number; /** Read your writes */ readYourWrites?: boolean; /** Enable offline writes */ offline?: boolean; /** Enables or disables TLS for `libsql:` URLs. * * By default, `libsql:` URLs use TLS. You can set this option to `false` to disable TLS. */ tls?: boolean; /** How to convert SQLite integers to JavaScript values: * * - `"number"` (default): returns SQLite integers as JavaScript `number`-s (double precision floats). * `number` cannot precisely represent integers larger than 2^53-1 in absolute value, so attempting to read * larger integers will throw a `RangeError`. * - `"bigint"`: returns SQLite integers as JavaScript `bigint`-s (arbitrary precision integers). Bigints can * precisely represent all SQLite integers. * - `"string"`: returns SQLite integers as strings. */ intMode?: IntMode; /** Custom `fetch` function to use for the HTTP client. * * By default, the HTTP client uses `fetch` from the `@libsql/isomorphic-fetch` package, but you can pass * your own function here. The argument to this function will be `Request` from * `@libsql/isomorphic-fetch`, and it must return a promise that resolves to an object that is compatible * with the Web `Response`. */ fetch?: Function; /** Concurrency limit. * * By default, the client performs up to 20 concurrent requests. You can set this option to a higher * number to increase the concurrency limit or set it to 0 to disable concurrency limits completely. */ concurrency?: number | undefined; } /** Representation of integers from database as JavaScript values. See {@link Config.intMode}. */ export type IntMode = "number" | "bigint" | "string"; /** Client object for a remote or local database. * * After you are done with the client, you **should** close it by calling {@link close}. */ export interface Client { /** Execute a single SQL statement. * * Every statement executed with this method is executed in its own logical database connection. If you * want to execute a group of statements in a transaction, use the {@link batch} or the {@link * transaction} methods. * * ```javascript * // execute a statement without arguments * const rs = await client.execute("SELECT * FROM books"); * * // execute a statement with positional arguments * const rs = await client.execute({ * sql: "SELECT * FROM books WHERE author = ?", * args: ["Jane Austen"], * }); * * // execute a statement with named arguments * const rs = await client.execute({ * sql: "SELECT * FROM books WHERE published_at > $year", * args: {year: 1719}, * }); * ``` */ execute(stmt: InStatement): Promise; execute(sql: string, args?: InArgs): Promise; /** Execute a batch of SQL statements in a transaction. * * The batch is executed in its own logical database connection and the statements are wrapped in a * transaction. This ensures that the batch is applied atomically: either all or no changes are applied. * * The `mode` parameter selects the transaction mode for the batch; please see {@link TransactionMode} for * details. The default transaction mode is `"deferred"`. * * If any of the statements in the batch fails with an error, the batch is aborted, the transaction is * rolled back and the returned promise is rejected. * * This method provides non-interactive transactions. If you need interactive transactions, please use the * {@link transaction} method. * * ```javascript * const rss = await client.batch([ * // batch statement without arguments * "DELETE FROM books WHERE name LIKE '%Crusoe'", * * // batch statement with positional arguments * { * sql: "INSERT INTO books (name, author, published_at) VALUES (?, ?, ?)", * args: ["First Impressions", "Jane Austen", 1813], * }, * * // batch statement with named arguments * { * sql: "UPDATE books SET name = $new WHERE name = $old", * args: {old: "First Impressions", new: "Pride and Prejudice"}, * }, * ], "write"); * ``` */ batch( stmts: Array, mode?: TransactionMode, ): Promise>; /** Execute a batch of SQL statements in a transaction with PRAGMA foreign_keys=off; before and PRAGMA foreign_keys=on; after. * * The batch is executed in its own logical database connection and the statements are wrapped in a * transaction. This ensures that the batch is applied atomically: either all or no changes are applied. * * The transaction mode is `"deferred"`. * * If any of the statements in the batch fails with an error, the batch is aborted, the transaction is * rolled back and the returned promise is rejected. * * ```javascript * const rss = await client.migrate([ * // statement without arguments * "CREATE TABLE test (a INT)", * * // statement with positional arguments * { * sql: "INSERT INTO books (name, author, published_at) VALUES (?, ?, ?)", * args: ["First Impressions", "Jane Austen", 1813], * }, * * // statement with named arguments * { * sql: "UPDATE books SET name = $new WHERE name = $old", * args: {old: "First Impressions", new: "Pride and Prejudice"}, * }, * ]); * ``` */ migrate(stmts: Array): Promise>; /** Start an interactive transaction. * * Interactive transactions allow you to interleave execution of SQL statements with your application * logic. They can be used if the {@link batch} method is too restrictive, but please note that * interactive transactions have higher latency. * * The `mode` parameter selects the transaction mode for the interactive transaction; please see {@link * TransactionMode} for details. The default transaction mode is `"deferred"`. * * You **must** make sure that the returned {@link Transaction} object is closed, by calling {@link * Transaction.close}, {@link Transaction.commit} or {@link Transaction.rollback}. The best practice is * to call {@link Transaction.close} in a `finally` block, as follows: * * ```javascript * const transaction = client.transaction("write"); * try { * // do some operations with the transaction here * await transaction.execute({ * sql: "INSERT INTO books (name, author) VALUES (?, ?)", * args: ["First Impressions", "Jane Austen"], * }); * await transaction.execute({ * sql: "UPDATE books SET name = ? WHERE name = ?", * args: ["Pride and Prejudice", "First Impressions"], * }); * * // if all went well, commit the transaction * await transaction.commit(); * } finally { * // make sure to close the transaction, even if an exception was thrown * transaction.close(); * } * ``` */ transaction(mode?: TransactionMode): Promise; /** Start an interactive transaction in `"write"` mode. * * Please see {@link transaction} for details. * * @deprecated Please specify the `mode` explicitly. The default `"write"` will be removed in the next * major release. */ transaction(): Promise; /** Execute a sequence of SQL statements separated by semicolons. * * The statements are executed sequentially on a new logical database connection. If a statement fails, * further statements are not executed and this method throws an error. All results from the statements * are ignored. * * We do not wrap the statements in a transaction, but the SQL can contain explicit transaction-control * statements such as `BEGIN` and `COMMIT`. * * This method is intended to be used with existing SQL scripts, such as migrations or small database * dumps. If you want to execute a sequence of statements programmatically, please use {@link batch} * instead. * * ```javascript * await client.executeMultiple(` * CREATE TABLE books (id INTEGER PRIMARY KEY, title TEXT NOT NULL, author_id INTEGER NOT NULL); * CREATE TABLE authors (id INTEGER PRIMARY KEY, name TEXT NOT NULL); * `); * ``` */ executeMultiple(sql: string): Promise; sync(): Promise; /** Close the client and release resources. * * This method closes the client (aborting any operations that are currently in progress) and releases any * resources associated with the client (such as a WebSocket connection). */ close(): void; /** Reconnect after the client has been closed. */ reconnect(): void; /** Is the client closed? * * This is set to `true` after a call to {@link close} or if the client encounters an unrecoverable * error. */ closed: boolean; /** Which protocol does the client use? * * - `"http"` if the client connects over HTTP * - `"ws"` if the client connects over WebSockets * - `"file"` if the client works with a local file */ protocol: string; } /** Interactive transaction. * * A transaction groups multiple SQL statements together, so that they are applied atomically: either all * changes are applied, or none are. Other SQL statements on the database (including statements executed on * the same {@link Client} object outside of this transaction) will not see any changes from the transaction * until the transaction is committed by calling {@link commit}. You can also use {@link rollback} to abort * the transaction and roll back the changes. * * You **must** make sure that the {@link Transaction} object is closed, by calling {@link close}, {@link * commit} or {@link rollback}. The best practice is to call {@link close} in a `finally` block, as follows: * * ```javascript * const transaction = client.transaction("write"); * try { * // do some operations with the transaction here * await transaction.execute({ * sql: "INSERT INTO books (name, author) VALUES (?, ?)", * args: ["First Impressions", "Jane Austen"], * }); * await transaction.execute({ * sql: "UPDATE books SET name = ? WHERE name = ?", * args: ["Pride and Prejudice", "First Impressions"], * }); * * // if all went well, commit the transaction * await transaction.commit(); * } finally { * // make sure to close the transaction, even if an exception was thrown * transaction.close(); * } * ``` */ export interface Transaction { /** Execute an SQL statement in this transaction. * * If the statement makes any changes to the database, these changes won't be visible to statements * outside of this transaction until you call {@link rollback}. * * ```javascript * await transaction.execute({ * sql: "INSERT INTO books (name, author) VALUES (?, ?)", * args: ["First Impressions", "Jane Austen"], * }); * ``` */ execute(stmt: InStatement): Promise; /** Execute a batch of SQL statements in this transaction. * * If any of the statements in the batch fails with an error, further statements are not executed and the * returned promise is rejected with an error, but the transaction is not rolled back. */ batch(stmts: Array): Promise>; /** Execute a sequence of SQL statements separated by semicolons. * * The statements are executed sequentially in the transaction. If a statement fails, further statements * are not executed and this method throws an error, but the transaction won't be rolled back. All results * from the statements are ignored. * * This method is intended to be used with existing SQL scripts, such as migrations or small database * dumps. If you want to execute statements programmatically, please use {@link batch} instead. */ executeMultiple(sql: string): Promise; /** Roll back any changes from this transaction. * * This method closes the transaction and undoes any changes done by the previous SQL statements on this * transaction. You cannot call this method after calling {@link commit}, though. */ rollback(): Promise; /** Commit changes from this transaction to the database. * * This method closes the transaction and applies all changes done by the previous SQL statement on this * transaction. Once the returned promise is resolved successfully, the database guarantees that the * changes were applied. */ commit(): Promise; /** Close the transaction. * * This method closes the transaction and releases any resources associated with the transaction. If the * transaction is already closed (perhaps by a previous call to {@link commit} or {@link rollback}), then * this method does nothing. * * If the transaction wasn't already committed by calling {@link commit}, the transaction is rolled * back. */ close(): void; /** Is the transaction closed? * * This is set to `true` after a call to {@link close}, {@link commit} or {@link rollback}, or if we * encounter an unrecoverable error. */ closed: boolean; } /** Transaction mode. * * The client supports multiple modes for transactions: * * - `"write"` is a read-write transaction, started with `BEGIN IMMEDIATE`. This transaction mode supports * both read statements (`SELECT`) and write statements (`INSERT`, `UPDATE`, `CREATE TABLE`, etc). The libSQL * server cannot process multiple write transactions concurrently, so if there is another write transaction * already started, our transaction will wait in a queue before it can begin. * * - `"read"` is a read-only transaction, started with `BEGIN TRANSACTION READONLY` (a libSQL extension). This * transaction mode supports only reads (`SELECT`) and will not accept write statements. The libSQL server can * handle multiple read transactions at the same time, so we don't need to wait for other transactions to * complete. A read-only transaction can also be executed on a local replica, so it provides lower latency. * * - `"deferred"` is a transaction started with `BEGIN DEFERRED`, which starts as a read transaction, but the * first write statement will try to upgrade it to a write transaction. However, this upgrade may fail if * there already is a write transaction executing on the server, so you should be ready to handle these * failures. * * If your transaction includes only read statements, `"read"` is always preferred over `"deferred"` or * `"write"`, because `"read"` transactions can be executed more efficiently and don't block other * transactions. * * If your transaction includes both read and write statements, you should be using the `"write"` mode most of * the time. Use the `"deferred"` mode only if you prefer to fail the write transaction instead of waiting for * the previous write transactions to complete. */ export type TransactionMode = "write" | "read" | "deferred"; /** Result of executing an SQL statement. * * ```javascript * const rs = await client.execute("SELECT name, title FROM books"); * console.log(`Found ${rs.rows.length} books`); * for (const row in rs.rows) { * console.log(`Book ${row[0]} by ${row[1]}`); * } * * const rs = await client.execute("DELETE FROM books WHERE author = 'Jane Austen'"); * console.log(`Deleted ${rs.rowsAffected} books`); * ``` */ export interface ResultSet { /** Names of columns. * * Names of columns can be defined using the `AS` keyword in SQL: * * ```sql * SELECT author AS author, COUNT(*) AS count FROM books GROUP BY author * ``` */ columns: Array; /** Types of columns. * * The types are currently shown for types declared in a SQL table. For * column types of function calls, for example, an empty string is * returned. */ columnTypes: Array; /** Rows produced by the statement. */ rows: Array; /** Number of rows that were affected by an UPDATE, INSERT or DELETE operation. * * This value is not specified for other SQL statements. */ rowsAffected: number; /** ROWID of the last inserted row. * * This value is not specified if the SQL statement was not an INSERT or if the table was not a ROWID * table. */ lastInsertRowid: bigint | undefined; /** Converts the result set to JSON. * * This is used automatically by `JSON.stringify()`, but you can also call it explicitly. */ toJSON(): any; } /** Row returned from an SQL statement. * * The row object can be used as an `Array` or as an object: * * ```javascript * const rs = await client.execute("SELECT name, title FROM books"); * for (const row in rs.rows) { * // Get the value from column `name` * console.log(row.name); * // Get the value from second column (`title`) * console.log(row[1]); * } * ``` */ export interface Row { /** Number of columns in this row. * * All rows in one {@link ResultSet} have the same number and names of columns. */ length: number; /** Columns can be accessed like an array by numeric indexes. */ [index: number]: Value; /** Columns can be accessed like an object by column names. */ [name: string]: Value; } export type Replicated = | { frame_no: number; frames_synced: number } | undefined; export type Value = null | string | number | bigint | ArrayBuffer; export type InValue = Value | boolean | Uint8Array | Date; export type InStatement = { sql: string; args?: InArgs } | string; export type InArgs = Array | Record; /** Error thrown by the client. */ export class LibsqlError extends Error { /** Machine-readable error code. */ code: string; /** Extended error code with more specific information (e.g., SQLITE_CONSTRAINT_PRIMARYKEY). */ extendedCode?: string; /** Raw numeric error code */ rawCode?: number; constructor( message: string, code: string, extendedCode?: string, rawCode?: number, cause?: Error, ) { if (code !== undefined) { message = `${code}: ${message}`; } super(message, { cause }); this.code = code; this.extendedCode = extendedCode; this.rawCode = rawCode; this.name = "LibsqlError"; } } /** Error thrown by the client during batch operations. */ export class LibsqlBatchError extends LibsqlError { /** The zero-based index of the statement that failed in the batch. */ statementIndex: number; constructor( message: string, statementIndex: number, code: string, extendedCode?: string, rawCode?: number, cause?: Error, ) { super(message, code, extendedCode, rawCode, cause); this.statementIndex = statementIndex; this.name = "LibsqlBatchError"; } } ================================================ FILE: packages/libsql-core/src/config.ts ================================================ import type { Config, IntMode } from "./api.js"; import { LibsqlError } from "./api.js"; import type { Authority } from "./uri.js"; import { parseUri } from "./uri.js"; import { supportedUrlLink } from "./util.js"; export interface ExpandedConfig { scheme: ExpandedScheme; tls: boolean; authority: Authority | undefined; path: string; authToken: string | undefined; encryptionKey: string | undefined; remoteEncryptionKey: string | undefined; syncUrl: string | undefined; syncInterval: number | undefined; readYourWrites: boolean | undefined; offline: boolean | undefined; intMode: IntMode; fetch: Function | undefined; concurrency: number; } export type ExpandedScheme = "wss" | "ws" | "https" | "http" | "file"; type queryParamDef = { values?: string[]; update?: (key: string, value: string) => void; }; type queryParamsDef = { [key: string]: queryParamDef }; const inMemoryMode = ":memory:"; export function isInMemoryConfig(config: ExpandedConfig): boolean { return ( config.scheme === "file" && (config.path === ":memory:" || config.path.startsWith(":memory:?")) ); } export function expandConfig( config: Readonly, preferHttp: boolean, ): ExpandedConfig { if (typeof config !== "object") { // produce a reasonable error message in the common case where users type // `createClient("libsql://...")` instead of `createClient({url: "libsql://..."})` throw new TypeError( `Expected client configuration as object, got ${typeof config}`, ); } let { url, authToken, tls, intMode, concurrency } = config; // fill simple defaults right here concurrency = Math.max(0, concurrency || 20); intMode ??= "number"; let connectionQueryParams: string[] = []; // recognized query parameters which we sanitize through white list of valid key-value pairs // convert plain :memory: url to URI format to make logic more uniform if (url === inMemoryMode) { url = "file::memory:"; } // parse url parameters first and override config with update values const uri = parseUri(url); const originalUriScheme = uri.scheme.toLowerCase(); const isInMemoryMode = originalUriScheme === "file" && uri.path === inMemoryMode && uri.authority === undefined; let queryParamsDef: queryParamsDef; if (isInMemoryMode) { queryParamsDef = { cache: { values: ["shared", "private"], update: (key, value) => connectionQueryParams.push(`${key}=${value}`), }, }; } else { queryParamsDef = { tls: { values: ["0", "1"], update: (_, value) => (tls = value === "1"), }, authToken: { update: (_, value) => (authToken = value), }, }; } for (const { key, value } of uri.query?.pairs ?? []) { if (!Object.hasOwn(queryParamsDef, key)) { throw new LibsqlError( `Unsupported URL query parameter ${JSON.stringify(key)}`, "URL_PARAM_NOT_SUPPORTED", ); } const queryParamDef = queryParamsDef[key]; if ( queryParamDef.values !== undefined && !queryParamDef.values.includes(value) ) { throw new LibsqlError( `Unknown value for the "${key}" query argument: ${JSON.stringify(value)}. Supported values are: [${queryParamDef.values.map((x) => '"' + x + '"').join(", ")}]`, "URL_INVALID", ); } if (queryParamDef.update !== undefined) { queryParamDef?.update(key, value); } } // fill complex defaults & validate config const connectionQueryParamsString = connectionQueryParams.length === 0 ? "" : `?${connectionQueryParams.join("&")}`; const path = uri.path + connectionQueryParamsString; let scheme: string; if (originalUriScheme === "libsql") { if (tls === false) { if (uri.authority?.port === undefined) { throw new LibsqlError( 'A "libsql:" URL with ?tls=0 must specify an explicit port', "URL_INVALID", ); } scheme = preferHttp ? "http" : "ws"; } else { scheme = preferHttp ? "https" : "wss"; } } else { scheme = originalUriScheme; } if (scheme === "http" || scheme === "ws") { tls ??= false; } else { tls ??= true; } if ( scheme !== "http" && scheme !== "ws" && scheme !== "https" && scheme !== "wss" && scheme !== "file" ) { throw new LibsqlError( 'The client supports only "libsql:", "wss:", "ws:", "https:", "http:" and "file:" URLs, ' + `got ${JSON.stringify(uri.scheme + ":")}. ` + `For more information, please read ${supportedUrlLink}`, "URL_SCHEME_NOT_SUPPORTED", ); } if (intMode !== "number" && intMode !== "bigint" && intMode !== "string") { throw new TypeError( `Invalid value for intMode, expected "number", "bigint" or "string", got ${JSON.stringify(intMode)}`, ); } if (uri.fragment !== undefined) { throw new LibsqlError( `URL fragments are not supported: ${JSON.stringify("#" + uri.fragment)}`, "URL_INVALID", ); } if (isInMemoryMode) { return { scheme: "file", tls: false, path, intMode, concurrency, syncUrl: config.syncUrl, syncInterval: config.syncInterval, readYourWrites: config.readYourWrites, offline: config.offline, fetch: config.fetch, authToken: undefined, encryptionKey: undefined, remoteEncryptionKey: undefined, authority: undefined, }; } return { scheme, tls, authority: uri.authority, path, authToken, intMode, concurrency, encryptionKey: config.encryptionKey, remoteEncryptionKey: config.remoteEncryptionKey, syncUrl: config.syncUrl, syncInterval: config.syncInterval, readYourWrites: config.readYourWrites, offline: config.offline, fetch: config.fetch, }; } ================================================ FILE: packages/libsql-core/src/uri.ts ================================================ // URI parser based on RFC 3986 // We can't use the standard `URL` object, because we want to support relative `file:` URLs like // `file:relative/path/database.db`, which are not correct according to RFC 8089, which standardizes the // `file` scheme. import { LibsqlError } from "./api.js"; export interface Uri { scheme: string; authority: Authority | undefined; path: string; query: Query | undefined; fragment: string | undefined; } export interface HierPart { authority: Authority | undefined; path: string; } export interface Authority { host: string; port: number | undefined; userinfo: Userinfo | undefined; } export interface Userinfo { username: string; password: string | undefined; } export interface Query { pairs: Array; } export interface KeyValue { key: string; value: string; } export function parseUri(text: string): Uri { const match = URI_RE.exec(text); if (match === null) { throw new LibsqlError( `The URL '${text}' is not in a valid format`, "URL_INVALID", ); } const groups = match.groups!; const scheme = groups["scheme"]!; const authority = groups["authority"] !== undefined ? parseAuthority(groups["authority"]) : undefined; const path = percentDecode(groups["path"]!); const query = groups["query"] !== undefined ? parseQuery(groups["query"]) : undefined; const fragment = groups["fragment"] !== undefined ? percentDecode(groups["fragment"]) : undefined; return { scheme, authority, path, query, fragment }; } const URI_RE = (() => { const SCHEME = "(?[A-Za-z][A-Za-z.+-]*)"; const AUTHORITY = "(?[^/?#]*)"; const PATH = "(?[^?#]*)"; const QUERY = "(?[^#]*)"; const FRAGMENT = "(?.*)"; return new RegExp( `^${SCHEME}:(//${AUTHORITY})?${PATH}(\\?${QUERY})?(#${FRAGMENT})?$`, "su", ); })(); function parseAuthority(text: string): Authority { const match = AUTHORITY_RE.exec(text); if (match === null) { throw new LibsqlError( "The authority part of the URL is not in a valid format", "URL_INVALID", ); } const groups = match.groups!; const host = percentDecode(groups["host_br"] ?? groups["host"]); const port = groups["port"] ? parseInt(groups["port"], 10) : undefined; const userinfo = groups["username"] !== undefined ? { username: percentDecode(groups["username"]), password: groups["password"] !== undefined ? percentDecode(groups["password"]) : undefined, } : undefined; return { host, port, userinfo }; } const AUTHORITY_RE = (() => { return new RegExp( `^((?[^:]*)(:(?.*))?@)?((?[^:\\[\\]]*)|(\\[(?[^\\[\\]]*)\\]))(:(?[0-9]*))?$`, "su", ); })(); // Query string is parsed as application/x-www-form-urlencoded according to the Web URL standard: // https://url.spec.whatwg.org/#urlencoded-parsing function parseQuery(text: string): Query { const sequences = text.split("&"); const pairs = []; for (const sequence of sequences) { if (sequence === "") { continue; } let key: string; let value: string; const splitIdx = sequence.indexOf("="); if (splitIdx < 0) { key = sequence; value = ""; } else { key = sequence.substring(0, splitIdx); value = sequence.substring(splitIdx + 1); } pairs.push({ key: percentDecode(key.replaceAll("+", " ")), value: percentDecode(value.replaceAll("+", " ")), }); } return { pairs }; } function percentDecode(text: string): string { try { return decodeURIComponent(text); } catch (e) { if (e instanceof URIError) { throw new LibsqlError( `URL component has invalid percent encoding: ${e}`, "URL_INVALID", undefined, undefined, e, ); } throw e; } } export function encodeBaseUrl( scheme: string, authority: Authority | undefined, path: string, ): URL { if (authority === undefined) { throw new LibsqlError( `URL with scheme ${JSON.stringify(scheme + ":")} requires authority (the "//" part)`, "URL_INVALID", ); } const schemeText = `${scheme}:`; const hostText = encodeHost(authority.host); const portText = encodePort(authority.port); const userinfoText = encodeUserinfo(authority.userinfo); const authorityText = `//${userinfoText}${hostText}${portText}`; let pathText = path.split("/").map(encodeURIComponent).join("/"); if (pathText !== "" && !pathText.startsWith("/")) { pathText = "/" + pathText; } return new URL(`${schemeText}${authorityText}${pathText}`); } function encodeHost(host: string): string { return host.includes(":") ? `[${encodeURI(host)}]` : encodeURI(host); } function encodePort(port: number | undefined): string { return port !== undefined ? `:${port}` : ""; } function encodeUserinfo(userinfo: Userinfo | undefined): string { if (userinfo === undefined) { return ""; } const usernameText = encodeURIComponent(userinfo.username); const passwordText = userinfo.password !== undefined ? `:${encodeURIComponent(userinfo.password)}` : ""; return `${usernameText}${passwordText}@`; } ================================================ FILE: packages/libsql-core/src/util.ts ================================================ import { Base64 } from "js-base64"; import { ResultSet, Row, Value, TransactionMode, InStatement, LibsqlError, } from "./api"; export const supportedUrlLink = "https://github.com/libsql/libsql-client-ts#supported-urls"; export function transactionModeToBegin(mode: TransactionMode): string { if (mode === "write") { return "BEGIN IMMEDIATE"; } else if (mode === "read") { return "BEGIN TRANSACTION READONLY"; } else if (mode === "deferred") { return "BEGIN DEFERRED"; } else { throw RangeError( 'Unknown transaction mode, supported values are "write", "read" and "deferred"', ); } } export class ResultSetImpl implements ResultSet { columns: Array; columnTypes: Array; rows: Array; rowsAffected: number; lastInsertRowid: bigint | undefined; constructor( columns: Array, columnTypes: Array, rows: Array, rowsAffected: number, lastInsertRowid: bigint | undefined, ) { this.columns = columns; this.columnTypes = columnTypes; this.rows = rows; this.rowsAffected = rowsAffected; this.lastInsertRowid = lastInsertRowid; } toJSON(): any { return { columns: this.columns, columnTypes: this.columnTypes, rows: this.rows.map(rowToJson), rowsAffected: this.rowsAffected, lastInsertRowid: this.lastInsertRowid !== undefined ? "" + this.lastInsertRowid : null, }; } } function rowToJson(row: Row): unknown { return Array.prototype.map.call(row, valueToJson); } function valueToJson(value: Value): unknown { if (typeof value === "bigint") { return "" + value; } else if (value instanceof ArrayBuffer) { return Base64.fromUint8Array(new Uint8Array(value)); } else { return value; } } ================================================ FILE: packages/libsql-core/tsconfig.base.json ================================================ { "compilerOptions": { "moduleResolution": "node", "lib": ["esnext"], "target": "esnext", "esModuleInterop": true, "isolatedModules": true, "rootDir": "src/", "strict": true }, "include": ["src/"], "exclude": ["**/__tests__"] } ================================================ FILE: packages/libsql-core/tsconfig.build-cjs.json ================================================ { "extends": "./tsconfig.base.json", "compilerOptions": { "module": "commonjs", "declaration": false, "outDir": "./lib-cjs/" } } ================================================ FILE: packages/libsql-core/tsconfig.build-esm.json ================================================ { "extends": "./tsconfig.base.json", "compilerOptions": { "module": "esnext", "declaration": true, "outDir": "./lib-esm/" } } ================================================ FILE: packages/libsql-core/tsconfig.json ================================================ { "extends": "./tsconfig.base.json", "compilerOptions": { "noEmit": true, "incremental": true } } ================================================ FILE: packages/libsql-core/typedoc.json ================================================ { "entryPoints": ["src/node.ts"], "out": "docs", "excludePrivate": true, "excludeInternal": true, "visibilityFilters": { "inherited": true, "external": true }, "includeVersion": true } ================================================ FILE: testing/hrana-test-server/.gitignore ================================================ Session.vim *.pyc ================================================ FILE: testing/hrana-test-server/LICENSE ================================================ MIT License Copyright 2023 the sqld authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: testing/hrana-test-server/README.md ================================================ # Test servers for Hrana This repository contains simple Hrana servers implemented in Python, one for each version of the Hrana protocol. These servers are useful for testing our various Hrana libraries. By default, the server creates a single temporary database for all HTTP requests and a new temporary database for every WebSocket connection, so multiple streams in the same WebSocket connection share the database, but are isolated from other WebSocket connections. However, if you pass environment variable `PERSISTENT_DB`, all HTTP requests and WebSocket connections will use that as the database file. If you pass any arguments to the server, they will be interpreted as a command. After the server starts up, it spawns the command, waits for it to terminate, and returns its exit code. ================================================ FILE: testing/hrana-test-server/c3.py ================================================ import logging import platform from ctypes import ( CDLL, POINTER, CFUNCTYPE, pointer, byref, string_at, cast, c_void_p, c_char_p, c_int, c_int64, c_uint64, c_double, c_char, ) from sqlite3_error_map import sqlite_error_code_to_name logger = logging.getLogger("server") c_sqlite3_p = c_void_p c_sqlite3_stmt_p = c_void_p c_exec_callback_fn = CFUNCTYPE(c_int, c_void_p, c_int, POINTER(c_char_p), POINTER(c_char_p)) c_destructor_fn = CFUNCTYPE(None, c_void_p) libfile_platform = { "Linux": "libsqlite3.so", "Darwin": "libsqlite3.dylib", } platform_name = platform.system() libfile = libfile_platform[platform_name] lib = CDLL(libfile) lib.sqlite3_open_v2.argtypes = (c_char_p, POINTER(c_sqlite3_p), c_int, c_char_p,) lib.sqlite3_open_v2.restype = c_int lib.sqlite3_close_v2.argtypes = (c_sqlite3_p,) lib.sqlite3_close_v2.restype = c_int lib.sqlite3_extended_result_codes.argtypes = (c_sqlite3_p, c_int,) lib.sqlite3_extended_result_codes.restype = c_int lib.sqlite3_errmsg.argtypes = (c_sqlite3_p,) lib.sqlite3_errmsg.restype = c_char_p lib.sqlite3_errstr.argtypes = (c_int,) lib.sqlite3_errstr.restype = c_char_p lib.sqlite3_exec.argtypes = (c_sqlite3_p, c_char_p, c_exec_callback_fn, c_void_p, POINTER(c_char_p),) lib.sqlite3_exec.restype = c_int lib.sqlite3_txn_state.argtypes = (c_sqlite3_p, c_char_p,) lib.sqlite3_txn_state.restype = c_int lib.sqlite3_changes64.argtypes = (c_sqlite3_p,) lib.sqlite3_changes64.restype = c_int64 lib.sqlite3_total_changes64.argtypes = (c_sqlite3_p,) lib.sqlite3_total_changes64.restype = c_int64 lib.sqlite3_last_insert_rowid.argtypes = (c_sqlite3_p,) lib.sqlite3_last_insert_rowid.restype = c_int64 lib.sqlite3_limit.argtypes = (c_sqlite3_p, c_int, c_int,) lib.sqlite3_limit.restype = c_int lib.sqlite3_busy_timeout.argtypes = (c_sqlite3_p, c_int,) lib.sqlite3_busy_timeout.restype = c_int lib.sqlite3_get_autocommit.argtypes = (c_sqlite3_p,) lib.sqlite3_get_autocommit.restype = c_int lib.sqlite3_prepare_v2.argtypes = ( c_sqlite3_p, c_void_p, c_int, POINTER(c_sqlite3_stmt_p), POINTER(c_void_p),) lib.sqlite3_prepare_v2.restype = c_int lib.sqlite3_finalize.argtypes = (c_sqlite3_stmt_p,) lib.sqlite3_finalize.restype = c_int lib.sqlite3_step.argtypes = (c_sqlite3_stmt_p,) lib.sqlite3_step.restype = c_int lib.sqlite3_bind_parameter_count.argtypes = (c_sqlite3_stmt_p,) lib.sqlite3_bind_parameter_count.restype = c_int lib.sqlite3_bind_parameter_index.argtypes = (c_sqlite3_stmt_p, c_char_p,) lib.sqlite3_bind_parameter_index.restype = c_int lib.sqlite3_bind_parameter_name.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_bind_parameter_name.restype = c_char_p lib.sqlite3_bind_blob64.argtypes = (c_sqlite3_stmt_p, c_int, c_void_p, c_uint64, c_destructor_fn,) lib.sqlite3_bind_blob64.restype = c_int lib.sqlite3_bind_text.argtypes = (c_sqlite3_stmt_p, c_int, POINTER(c_char), c_int, c_destructor_fn,) lib.sqlite3_bind_text.restype = c_int lib.sqlite3_bind_double.argtypes = (c_sqlite3_stmt_p, c_int, c_double,) lib.sqlite3_bind_double.restype = c_int lib.sqlite3_bind_int64.argtypes = (c_sqlite3_stmt_p, c_int, c_int64,) lib.sqlite3_bind_int64.restype = c_int lib.sqlite3_bind_null.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_bind_null.restype = c_int lib.sqlite3_column_count.argtypes = (c_sqlite3_stmt_p,) lib.sqlite3_column_count.restype = c_int lib.sqlite3_column_name.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_column_name.restype = c_char_p lib.sqlite3_column_decltype.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_column_decltype.restype = c_char_p lib.sqlite3_column_type.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_column_type.restype = c_int lib.sqlite3_column_blob.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_column_blob.restype = c_void_p lib.sqlite3_column_text.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_column_text.restype = c_void_p lib.sqlite3_column_bytes.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_column_bytes.restype = c_int lib.sqlite3_column_double.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_column_double.restype = c_double lib.sqlite3_column_int64.argtypes = (c_sqlite3_stmt_p, c_int,) lib.sqlite3_column_int64.restype = c_int64 lib.sqlite3_stmt_readonly.argtypes = (c_sqlite3_stmt_p,) lib.sqlite3_stmt_readonly.restype = c_int lib.sqlite3_stmt_isexplain.argtypes = (c_sqlite3_stmt_p,) lib.sqlite3_stmt_isexplain.restype = c_int SQLITE_OPEN_READWRITE = 0x00000002 SQLITE_OPEN_CREATE = 0x00000004 SQLITE_TRANSIENT = c_destructor_fn(-1) SQLITE_ROW = 100 SQLITE_DONE = 101 SQLITE_INTEGER = 1 SQLITE_FLOAT = 2 SQLITE_BLOB = 4 SQLITE_NULL = 5 SQLITE_TEXT = 3 SQLITE_LIMIT_LENGTH = 0 SQLITE_LIMIT_SQL_LENGTH = 1 SQLITE_LIMIT_COLUMN = 2 SQLITE_LIMIT_EXPR_DEPTH = 3 SQLITE_LIMIT_COMPOUND_SELECT = 4 SQLITE_LIMIT_VDBE_OP = 5 SQLITE_LIMIT_FUNCTION_ARG = 6 SQLITE_LIMIT_ATTACHED = 7 SQLITE_LIMIT_LIKE_PATTERN_LENGTH = 8 SQLITE_LIMIT_VARIABLE_NUMBER = 9 SQLITE_LIMIT_TRIGGER_DEPTH = 10 SQLITE_LIMIT_WORKER_THREADS = 11 class Conn: def __init__(self, db_ptr): self.db_ptr = db_ptr @classmethod def open(cls, filename): filename_ptr = c_char_p(filename.encode()) db_ptr = c_sqlite3_p() flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE vfs_ptr = c_char_p() _try(lib.sqlite3_open_v2(filename_ptr, byref(db_ptr), flags, vfs_ptr)) return cls(db_ptr) def close(self): if self.db_ptr is not None: lib.sqlite3_close_v2(self.db_ptr) self.db_ptr = None def __del__(self): self.close() def extended_result_codes(self, onoff): assert self.db_ptr is not None lib.sqlite3_extended_result_codes(self.db_ptr, onoff) def errmsg(self): assert self.db_ptr is not None return str(lib.sqlite3_errmsg(self.db_ptr).decode()) @classmethod def errstr(cls, code): return str(lib.sqlite3_errstr(code).decode()) def exec(self, sql): assert self.db_ptr is not None sql_ptr = c_char_p(sql.encode()) callback_ptr = c_exec_callback_fn() arg_ptr = c_void_p() errmsg_ptr_ptr = pointer(c_char_p()) _try(lib.sqlite3_exec(self.db_ptr, sql_ptr, callback_ptr, arg_ptr, errmsg_ptr_ptr), self) def txn_state(self): assert self.db_ptr is not None schema_ptr = c_char_p() return lib.sqlite3_txn_state(self.db_ptr, schema_ptr) def prepare(self, sql): assert self.db_ptr is not None sql = sql.encode() sql_data = c_char_p(sql) sql_ptr = cast(sql_data, c_void_p) sql_len = c_int(len(sql) + 1) stmt_ptr = c_sqlite3_stmt_p() tail_ptr = c_void_p() _try(lib.sqlite3_prepare_v2(self.db_ptr, sql_ptr, sql_len, byref(stmt_ptr), byref(tail_ptr)), self) if stmt_ptr.value is None: return None, b"" tail = sql[tail_ptr.value - sql_ptr.value:] return Stmt(self, stmt_ptr), tail.decode() def changes(self): assert self.db_ptr is not None return lib.sqlite3_changes64(self.db_ptr) def total_changes(self): assert self.db_ptr is not None return lib.sqlite3_total_changes64(self.db_ptr) def last_insert_rowid(self): assert self.db_ptr is not None return lib.sqlite3_last_insert_rowid(self.db_ptr) def limit(self, id, new_val): assert self.db_ptr is not None return lib.sqlite3_limit(self.db_ptr, id, new_val) def busy_timeout(self, ms): assert self.db_ptr is not None lib.sqlite3_busy_timeout(self.db_ptr, ms) def get_autocommit(self): assert self.db_ptr is not None return lib.sqlite3_get_autocommit(self.db_ptr) != 0 class Stmt: def __init__(self, conn, stmt_ptr): self.conn = conn self.stmt_ptr = stmt_ptr def close(self): if self.stmt_ptr is not None: lib.sqlite3_finalize(self.stmt_ptr) self.stmt_ptr = None def __del__(self): self.close() def param_count(self): assert self.stmt_ptr is not None return lib.sqlite3_bind_parameter_count(self.stmt_ptr) def param_index(self, name): assert self.stmt_ptr is not None name_ptr = c_char_p(name.encode()) return lib.sqlite3_bind_parameter_index(self.stmt_ptr, name_ptr) def param_name(self, param_i): assert self.stmt_ptr is not None name = lib.sqlite3_bind_parameter_name(self.stmt_ptr, param_i) return name.decode() if name is not None else None def bind(self, param_i, value): assert self.stmt_ptr is not None if isinstance(value, str): value = value.encode() value_ptr, value_len = c_char_p(value), c_int(len(value)) _try(lib.sqlite3_bind_text(self.stmt_ptr, param_i, value_ptr, value_len, SQLITE_TRANSIENT), self.conn) elif isinstance(value, bytes): value_ptr, value_len = c_char_p(value), c_uint64(len(value)) _try(lib.sqlite3_bind_blob64(self.stmt_ptr, param_i, value_ptr, value_len, SQLITE_TRANSIENT), self.conn) elif isinstance(value, int): _try(lib.sqlite3_bind_int64(self.stmt_ptr, param_i, c_int64(value)), self.conn) elif isinstance(value, float): _try(lib.sqlite3_bind_double(self.stmt_ptr, param_i, c_double(value)), self.conn) elif value is None: _try(lib.sqlite3_bind_null(self.stmt_ptr, param_i), self.conn) else: raise ValueError(f"Cannot bind {type(value)!r}") def step(self): assert self.stmt_ptr is not None res = lib.sqlite3_step(self.stmt_ptr) if res == SQLITE_DONE: return False elif res == SQLITE_ROW: return True _try(res, self.conn) def column_count(self): assert self.stmt_ptr is not None return lib.sqlite3_column_count(self.stmt_ptr) def column_name(self, column_i): assert self.stmt_ptr is not None return lib.sqlite3_column_name(self.stmt_ptr, column_i).decode() def column_decltype(self, column_i): assert self.stmt_ptr is not None name = lib.sqlite3_column_decltype(self.stmt_ptr, column_i) return name.decode() if name is not None else name def column(self, column_i): assert self.stmt_ptr is not None typ = lib.sqlite3_column_type(self.stmt_ptr, column_i) if typ == SQLITE_INTEGER: return lib.sqlite3_column_int64(self.stmt_ptr, column_i) elif typ == SQLITE_FLOAT: return lib.sqlite3_column_double(self.stmt_ptr, column_i) elif typ == SQLITE_BLOB: data_ptr = lib.sqlite3_column_blob(self.stmt_ptr, column_i) data_len = lib.sqlite3_column_bytes(self.stmt_ptr, column_i) return bytes(string_at(data_ptr, data_len)) elif typ == SQLITE_TEXT: data_ptr = lib.sqlite3_column_text(self.stmt_ptr, column_i) data_len = lib.sqlite3_column_bytes(self.stmt_ptr, column_i) b = bytes(string_at(data_ptr, data_len)) try: return b.decode() except UnicodeDecodeError: logger.debug("Could not decode column %s, bytes %s", column_i, b, exc_info=True) raise elif typ == SQLITE_NULL: return None else: raise ValueError(f"Unknown SQLite type {typ}") def readonly(self): assert self.stmt_ptr is not None return lib.sqlite3_stmt_readonly(self.stmt_ptr) != 0 def isexplain(self): assert self.stmt_ptr is not None return lib.sqlite3_stmt_isexplain(self.stmt_ptr) class SqliteError(RuntimeError): def __init__(self, message, error_code=None) -> None: super().__init__(message) self.error_code = error_code self.error_name = sqlite_error_code_to_name.get(error_code) def _try(error_code, conn=None): if error_code == 0: return error_str = Conn.errstr(error_code) if conn is not None: details = f": {conn.errmsg()}" message = f"SQLite function returned error code {error_code} ({error_str}){details}" raise SqliteError(message, error_code) ================================================ FILE: testing/hrana-test-server/from_proto.py ================================================ import base64 def ws_client_msg(p): ty = p.WhichOneof("msg") if ty == "hello": return ws_hello_msg(p.hello) elif ty == "request": return ws_request_msg(p.request) else: raise RuntimeError("Unknown type of ClientMsg") def ws_hello_msg(p): return { "type": "hello", "jwt": p.jwt if p.HasField("jwt") else None, } def ws_request_msg(p): ty = p.WhichOneof("request") if ty == "open_stream": request = ws_open_stream_req(p.open_stream) elif ty == "close_stream": request = ws_close_stream_req(p.close_stream) elif ty == "execute": request = ws_execute_req(p.execute) elif ty == "batch": request = ws_batch_req(p.batch) elif ty == "open_cursor": request = ws_open_cursor_req(p.open_cursor) elif ty == "close_cursor": request = ws_close_cursor_req(p.close_cursor) elif ty == "fetch_cursor": request = ws_fetch_cursor_req(p.fetch_cursor) elif ty == "sequence": request = ws_sequence_req(p.sequence) elif ty == "describe": request = ws_describe_req(p.describe) elif ty == "store_sql": request = ws_store_sql_req(p.store_sql) elif ty == "close_sql": request = ws_close_sql_req(p.close_sql) elif ty == "get_autocommit": request = ws_get_autocommit_req(p.get_autocommit) else: raise RuntimeError("Unknown type of RequestMsg") return {"type": "request", "request_id": p.request_id, "request": request} def ws_open_stream_req(p): return {"type": "open_stream", "stream_id": p.stream_id} def ws_close_stream_req(p): return {"type": "close_stream", "stream_id": p.stream_id} def ws_execute_req(p): return { "type": "execute", "stream_id": p.stream_id, "stmt": stmt(p.stmt), } def ws_batch_req(p): return { "type": "batch", "stream_id": p.stream_id, "batch": batch(p.batch), } def ws_open_cursor_req(p): return { "type": "open_cursor", "stream_id": p.stream_id, "cursor_id": p.cursor_id, "batch": batch(p.batch), } def ws_close_cursor_req(p): return { "type": "close_cursor", "cursor_id": p.cursor_id, } def ws_fetch_cursor_req(p): return { "type": "fetch_cursor", "cursor_id": p.cursor_id, "max_count": p.max_count, } def ws_sequence_req(p): return { "type": "sequence", "stream_id": p.stream_id, "sql": p.sql if p.HasField("sql") else None, "sql_id": p.sql_id if p.HasField("sql_id") else None, } def ws_describe_req(p): return { "type": "describe", "stream_id": p.stream_id, "sql": p.sql if p.HasField("sql") else None, "sql_id": p.sql_id if p.HasField("sql_id") else None, } def ws_store_sql_req(p): return { "type": "store_sql", "sql_id": p.sql_id, "sql": p.sql, } def ws_close_sql_req(p): return { "type": "close_sql", "sql_id": p.sql_id, } def ws_get_autocommit_req(p): return { "type": "get_autocommit", "stream_id": p.stream_id, } def http_pipeline_req_body(p): return { "baton": p.baton if p.HasField("baton") else None, "requests": [http_stream_request(p) for p in p.requests], } def http_stream_request(p): ty = p.WhichOneof("request") if ty == "close": return {"type": "close"} if ty == "execute": return http_execute_stream_req(p.execute) elif ty == "batch": return http_batch_stream_req(p.batch) elif ty == "sequence": return http_sequence_stream_req(p.sequence) elif ty == "describe": return http_describe_stream_req(p.describe) elif ty == "store_sql": return http_store_sql_stream_req(p.store_sql) elif ty == "close_sql": return http_close_sql_stream_req(p.close_sql) elif ty == "get_autocommit": return {"type": "get_autocommit"} else: raise RuntimeError("Unknown type of StreamRequest") def http_execute_stream_req(p): return {"type": "execute", "stmt": stmt(p.stmt)} def http_batch_stream_req(p): return {"type": "batch", "batch": batch(p.batch)} def http_sequence_stream_req(p): return { "type": "sequence", "sql": p.sql if p.HasField("sql") else None, "sql_id": p.sql_id if p.HasField("sql_id") else None, } def http_describe_stream_req(p): return { "type": "describe", "sql": p.sql if p.HasField("sql") else None, "sql_id": p.sql_id if p.HasField("sql_id") else None, } def http_store_sql_stream_req(p): return {"type": "store_sql", "sql_id": p.sql_id, "sql": p.sql} def http_close_sql_stream_req(p): return {"type": "close_sql", "sql_id": p.sql_id} def http_cursor_req_body(p): return { "baton": p.baton if p.HasField("baton") else None, "batch": batch(p.batch), } def batch(p): return {"steps": [batch_step(p) for p in p.steps]} def batch_step(p): return { "condition": batch_cond(p.condition) if p.HasField("condition") else None, "stmt": stmt(p.stmt), } def batch_cond(p): ty = p.WhichOneof("cond") if ty == "step_ok": return {"type": "ok", "step": p.step_ok} elif ty == "step_error": return {"type": "error", "step": p.step_error} elif ty == "not": return {"type": "not", "cond": batch_cond(getattr(p, "not"))} elif ty == "and": return {"type": "and", "conds": [batch_cond(p) for p in getattr(p, "and").conds]} elif ty == "or": return {"type": "or", "conds": [batch_cond(p) for p in getattr(p, "or").conds]} elif ty == "is_autocommit": return {"type": "is_autocommit"} else: raise RuntimeError("Unknown type of BatchCond") def stmt(p): return { "sql": p.sql if p.HasField("sql") else None, "sql_id": p.sql_id if p.HasField("sql_id") else None, "args": [value(p) for p in p.args], "named_args": [named_arg(p) for p in p.named_args], "want_rows": p.want_rows if p.HasField("want_rows") else None, } def named_arg(p): return { "name": p.name, "value": value(p.value), } def value(p): ty = p.WhichOneof("value") if ty == "null": return {"type": "null"} elif ty == "integer": return {"type": "integer", "value": str(p.integer)} elif ty == "float": return {"type": "float", "value": p.float} elif ty == "text": return {"type": "text", "value": p.text} elif ty == "blob": return {"type": "blob", "base64": base64.b64encode(p.blob)} else: raise RuntimeError("Unknown type of Value") ================================================ FILE: testing/hrana-test-server/gen_sqlite3_error_map.py ================================================ import re # usage: # python gen_sqlite3_error_map.py > sqlite3_error_map.py # https://www.sqlite.org/c3ref/c_abort.html _sqlite_h_ = """" #define SQLITE_OK 0 /* Successful result */ /* beginning-of-error-codes */ #define SQLITE_ERROR 1 /* Generic error */ #define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */ #define SQLITE_PERM 3 /* Access permission denied */ #define SQLITE_ABORT 4 /* Callback routine requested an abort */ #define SQLITE_BUSY 5 /* The database file is locked */ #define SQLITE_LOCKED 6 /* A table in the database is locked */ #define SQLITE_NOMEM 7 /* A malloc() failed */ #define SQLITE_READONLY 8 /* Attempt to write a readonly database */ #define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/ #define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ #define SQLITE_CORRUPT 11 /* The database disk image is malformed */ #define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */ #define SQLITE_FULL 13 /* Insertion failed because database is full */ #define SQLITE_CANTOPEN 14 /* Unable to open the database file */ #define SQLITE_PROTOCOL 15 /* Database lock protocol error */ #define SQLITE_EMPTY 16 /* Internal use only */ #define SQLITE_SCHEMA 17 /* The database schema changed */ #define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ #define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ #define SQLITE_MISMATCH 20 /* Data type mismatch */ #define SQLITE_MISUSE 21 /* Library used incorrectly */ #define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ #define SQLITE_AUTH 23 /* Authorization denied */ #define SQLITE_FORMAT 24 /* Not used */ #define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */ #define SQLITE_NOTADB 26 /* File opened that is not a database file */ #define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */ #define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */ #define SQLITE_ROW 100 /* sqlite3_step() has another row ready */ #define SQLITE_DONE 101 /* sqlite3_step() has finished executing */ /* end-of-error-codes */ """ _sqlite_h_parse_re = re.compile( r"^\s*#define\s+(?P\w+)\s+(?P\d+).*$", re.MULTILINE | re.ASCII, ) _sqlite_error_code_to_name = {} _sqlite_error_name_to_code = {} for m in _sqlite_h_parse_re.finditer(_sqlite_h_): code = int(m.group("code")) name = m.group("name") _sqlite_error_code_to_name[code] = name _sqlite_error_name_to_code[name] = code # https://www.sqlite.org/c3ref/c_abort_rollback.html _sqlite_h_extended = """ #define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8)) #define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8)) #define SQLITE_ERROR_SNAPSHOT (SQLITE_ERROR | (3<<8)) #define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) #define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) #define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) #define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) #define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) #define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) #define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) #define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) #define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) #define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) #define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) #define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) #define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8)) #define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8)) #define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) #define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) #define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) #define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8)) #define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8)) #define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8)) #define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8)) #define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8)) #define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8)) #define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8)) #define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8)) #define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8)) #define SQLITE_IOERR_VNODE (SQLITE_IOERR | (27<<8)) #define SQLITE_IOERR_AUTH (SQLITE_IOERR | (28<<8)) #define SQLITE_IOERR_BEGIN_ATOMIC (SQLITE_IOERR | (29<<8)) #define SQLITE_IOERR_COMMIT_ATOMIC (SQLITE_IOERR | (30<<8)) #define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8)) #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) #define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) #define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8)) #define SQLITE_BUSY_TIMEOUT (SQLITE_BUSY | (3<<8)) #define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8)) #define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8)) #define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8)) #define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8)) #define SQLITE_CANTOPEN_DIRTYWAL (SQLITE_CANTOPEN | (5<<8)) /* Not Used */ #define SQLITE_CANTOPEN_SYMLINK (SQLITE_CANTOPEN | (6<<8)) #define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8)) #define SQLITE_CORRUPT_SEQUENCE (SQLITE_CORRUPT | (2<<8)) #define SQLITE_CORRUPT_INDEX (SQLITE_CORRUPT | (3<<8)) #define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8)) #define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8)) #define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8)) #define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8)) #define SQLITE_READONLY_CANTINIT (SQLITE_READONLY | (5<<8)) #define SQLITE_READONLY_DIRECTORY (SQLITE_READONLY | (6<<8)) #define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8)) #define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8)) #define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8)) #define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8)) #define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8)) #define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8)) #define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8)) #define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8)) #define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8)) #define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8)) #define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8)) #define SQLITE_CONSTRAINT_PINNED (SQLITE_CONSTRAINT |(11<<8)) #define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8)) #define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) #define SQLITE_NOTICE_RBU (SQLITE_NOTICE | (3<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) #define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) #define SQLITE_OK_SYMLINK (SQLITE_OK | (2<<8)) /* internal use only */ """ _sqlite_h_extended_parse_re = re.compile( r"^\s*#define\s+(?P\w+)\s+[(]\s*" + r"(?P\w+)\s*[|]\s*" + r"[(]\s*(?P\d+)\s*<<\s*(?P\d+)\s*[)]" + r"\s*[)].*$", re.MULTILINE | re.ASCII, ) for m in _sqlite_h_extended_parse_re.finditer(_sqlite_h_extended): dep = _sqlite_error_name_to_code[m.group("dep")] bit = int(m.group("bit")) shift = int(m.group("shift")) code = dep | (bit << shift) name = m.group("name") _sqlite_error_code_to_name[code] = name _sqlite_error_name_to_code[name] = code print("sqlite_error_code_to_name = {") for k, v in sorted(_sqlite_error_code_to_name.items()): print(f" {k}: \"{v}\",") print("}") ================================================ FILE: testing/hrana-test-server/proto/generate.sh ================================================ #!/bin/sh cd $(dirname "$0") protoc -I. *.proto --python_out=. --experimental_allow_proto3_optional sed -i 's/^import hrana_pb2 /from .. import hrana_pb2 /' hrana/*_pb2.py ================================================ FILE: testing/hrana-test-server/proto/hrana/http_pb2.py ================================================ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: hrana.http.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from .. import hrana_pb2 as hrana__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10hrana.http.proto\x12\nhrana.http\x1a\x0bhrana.proto\"\\\n\x0fPipelineReqBody\x12\x12\n\x05\x62\x61ton\x18\x01 \x01(\tH\x00\x88\x01\x01\x12+\n\x08requests\x18\x02 \x03(\x0b\x32\x19.hrana.http.StreamRequestB\x08\n\x06_baton\"\x7f\n\x10PipelineRespBody\x12\x12\n\x05\x62\x61ton\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08\x62\x61se_url\x18\x02 \x01(\tH\x01\x88\x01\x01\x12)\n\x07results\x18\x03 \x03(\x0b\x32\x18.hrana.http.StreamResultB\x08\n\x06_batonB\x0b\n\t_base_url\"a\n\x0cStreamResult\x12(\n\x02ok\x18\x01 \x01(\x0b\x32\x1a.hrana.http.StreamResponseH\x00\x12\x1d\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x0c.hrana.ErrorH\x00\x42\x08\n\x06result\"J\n\rCursorReqBody\x12\x12\n\x05\x62\x61ton\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1b\n\x05\x62\x61tch\x18\x02 \x01(\x0b\x32\x0c.hrana.BatchB\x08\n\x06_baton\"R\n\x0e\x43ursorRespBody\x12\x12\n\x05\x62\x61ton\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08\x62\x61se_url\x18\x02 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_batonB\x0b\n\t_base_url\"\xb1\x03\n\rStreamRequest\x12+\n\x05\x63lose\x18\x01 \x01(\x0b\x32\x1a.hrana.http.CloseStreamReqH\x00\x12/\n\x07\x65xecute\x18\x02 \x01(\x0b\x32\x1c.hrana.http.ExecuteStreamReqH\x00\x12+\n\x05\x62\x61tch\x18\x03 \x01(\x0b\x32\x1a.hrana.http.BatchStreamReqH\x00\x12\x31\n\x08sequence\x18\x04 \x01(\x0b\x32\x1d.hrana.http.SequenceStreamReqH\x00\x12\x31\n\x08\x64\x65scribe\x18\x05 \x01(\x0b\x32\x1d.hrana.http.DescribeStreamReqH\x00\x12\x32\n\tstore_sql\x18\x06 \x01(\x0b\x32\x1d.hrana.http.StoreSqlStreamReqH\x00\x12\x32\n\tclose_sql\x18\x07 \x01(\x0b\x32\x1d.hrana.http.CloseSqlStreamReqH\x00\x12<\n\x0eget_autocommit\x18\x08 \x01(\x0b\x32\".hrana.http.GetAutocommitStreamReqH\x00\x42\t\n\x07request\"\xbb\x03\n\x0eStreamResponse\x12,\n\x05\x63lose\x18\x01 \x01(\x0b\x32\x1b.hrana.http.CloseStreamRespH\x00\x12\x30\n\x07\x65xecute\x18\x02 \x01(\x0b\x32\x1d.hrana.http.ExecuteStreamRespH\x00\x12,\n\x05\x62\x61tch\x18\x03 \x01(\x0b\x32\x1b.hrana.http.BatchStreamRespH\x00\x12\x32\n\x08sequence\x18\x04 \x01(\x0b\x32\x1e.hrana.http.SequenceStreamRespH\x00\x12\x32\n\x08\x64\x65scribe\x18\x05 \x01(\x0b\x32\x1e.hrana.http.DescribeStreamRespH\x00\x12\x33\n\tstore_sql\x18\x06 \x01(\x0b\x32\x1e.hrana.http.StoreSqlStreamRespH\x00\x12\x33\n\tclose_sql\x18\x07 \x01(\x0b\x32\x1e.hrana.http.CloseSqlStreamRespH\x00\x12=\n\x0eget_autocommit\x18\x08 \x01(\x0b\x32#.hrana.http.GetAutocommitStreamRespH\x00\x42\n\n\x08response\"\x10\n\x0e\x43loseStreamReq\"\x11\n\x0f\x43loseStreamResp\"-\n\x10\x45xecuteStreamReq\x12\x19\n\x04stmt\x18\x01 \x01(\x0b\x32\x0b.hrana.Stmt\"6\n\x11\x45xecuteStreamResp\x12!\n\x06result\x18\x01 \x01(\x0b\x32\x11.hrana.StmtResult\"-\n\x0e\x42\x61tchStreamReq\x12\x1b\n\x05\x62\x61tch\x18\x01 \x01(\x0b\x32\x0c.hrana.Batch\"5\n\x0f\x42\x61tchStreamResp\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.hrana.BatchResult\"M\n\x11SequenceStreamReq\x12\x10\n\x03sql\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06sql_id\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x06\n\x04_sqlB\t\n\x07_sql_id\"\x14\n\x12SequenceStreamResp\"M\n\x11\x44\x65scribeStreamReq\x12\x10\n\x03sql\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06sql_id\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x06\n\x04_sqlB\t\n\x07_sql_id\";\n\x12\x44\x65scribeStreamResp\x12%\n\x06result\x18\x01 \x01(\x0b\x32\x15.hrana.DescribeResult\"0\n\x11StoreSqlStreamReq\x12\x0e\n\x06sql_id\x18\x01 \x01(\x05\x12\x0b\n\x03sql\x18\x02 \x01(\t\"\x14\n\x12StoreSqlStreamResp\"#\n\x11\x43loseSqlStreamReq\x12\x0e\n\x06sql_id\x18\x01 \x01(\x05\"\x14\n\x12\x43loseSqlStreamResp\"\x18\n\x16GetAutocommitStreamReq\"0\n\x17GetAutocommitStreamResp\x12\x15\n\ris_autocommit\x18\x01 \x01(\x08\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'hrana.http_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _globals['_PIPELINEREQBODY']._serialized_start=45 _globals['_PIPELINEREQBODY']._serialized_end=137 _globals['_PIPELINERESPBODY']._serialized_start=139 _globals['_PIPELINERESPBODY']._serialized_end=266 _globals['_STREAMRESULT']._serialized_start=268 _globals['_STREAMRESULT']._serialized_end=365 _globals['_CURSORREQBODY']._serialized_start=367 _globals['_CURSORREQBODY']._serialized_end=441 _globals['_CURSORRESPBODY']._serialized_start=443 _globals['_CURSORRESPBODY']._serialized_end=525 _globals['_STREAMREQUEST']._serialized_start=528 _globals['_STREAMREQUEST']._serialized_end=961 _globals['_STREAMRESPONSE']._serialized_start=964 _globals['_STREAMRESPONSE']._serialized_end=1407 _globals['_CLOSESTREAMREQ']._serialized_start=1409 _globals['_CLOSESTREAMREQ']._serialized_end=1425 _globals['_CLOSESTREAMRESP']._serialized_start=1427 _globals['_CLOSESTREAMRESP']._serialized_end=1444 _globals['_EXECUTESTREAMREQ']._serialized_start=1446 _globals['_EXECUTESTREAMREQ']._serialized_end=1491 _globals['_EXECUTESTREAMRESP']._serialized_start=1493 _globals['_EXECUTESTREAMRESP']._serialized_end=1547 _globals['_BATCHSTREAMREQ']._serialized_start=1549 _globals['_BATCHSTREAMREQ']._serialized_end=1594 _globals['_BATCHSTREAMRESP']._serialized_start=1596 _globals['_BATCHSTREAMRESP']._serialized_end=1649 _globals['_SEQUENCESTREAMREQ']._serialized_start=1651 _globals['_SEQUENCESTREAMREQ']._serialized_end=1728 _globals['_SEQUENCESTREAMRESP']._serialized_start=1730 _globals['_SEQUENCESTREAMRESP']._serialized_end=1750 _globals['_DESCRIBESTREAMREQ']._serialized_start=1752 _globals['_DESCRIBESTREAMREQ']._serialized_end=1829 _globals['_DESCRIBESTREAMRESP']._serialized_start=1831 _globals['_DESCRIBESTREAMRESP']._serialized_end=1890 _globals['_STORESQLSTREAMREQ']._serialized_start=1892 _globals['_STORESQLSTREAMREQ']._serialized_end=1940 _globals['_STORESQLSTREAMRESP']._serialized_start=1942 _globals['_STORESQLSTREAMRESP']._serialized_end=1962 _globals['_CLOSESQLSTREAMREQ']._serialized_start=1964 _globals['_CLOSESQLSTREAMREQ']._serialized_end=1999 _globals['_CLOSESQLSTREAMRESP']._serialized_start=2001 _globals['_CLOSESQLSTREAMRESP']._serialized_end=2021 _globals['_GETAUTOCOMMITSTREAMREQ']._serialized_start=2023 _globals['_GETAUTOCOMMITSTREAMREQ']._serialized_end=2047 _globals['_GETAUTOCOMMITSTREAMRESP']._serialized_start=2049 _globals['_GETAUTOCOMMITSTREAMRESP']._serialized_end=2097 # @@protoc_insertion_point(module_scope) ================================================ FILE: testing/hrana-test-server/proto/hrana/ws_pb2.py ================================================ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: hrana.ws.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from .. import hrana_pb2 as hrana__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0ehrana.ws.proto\x12\x08hrana.ws\x1a\x0bhrana.proto\"`\n\tClientMsg\x12#\n\x05hello\x18\x01 \x01(\x0b\x32\x12.hrana.ws.HelloMsgH\x00\x12\'\n\x07request\x18\x02 \x01(\x0b\x32\x14.hrana.ws.RequestMsgH\x00\x42\x05\n\x03msg\"\xd2\x01\n\tServerMsg\x12(\n\x08hello_ok\x18\x01 \x01(\x0b\x32\x14.hrana.ws.HelloOkMsgH\x00\x12.\n\x0bhello_error\x18\x02 \x01(\x0b\x32\x17.hrana.ws.HelloErrorMsgH\x00\x12.\n\x0bresponse_ok\x18\x03 \x01(\x0b\x32\x17.hrana.ws.ResponseOkMsgH\x00\x12\x34\n\x0eresponse_error\x18\x04 \x01(\x0b\x32\x1a.hrana.ws.ResponseErrorMsgH\x00\x42\x05\n\x03msg\"$\n\x08HelloMsg\x12\x10\n\x03jwt\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x06\n\x04_jwt\"\x0c\n\nHelloOkMsg\",\n\rHelloErrorMsg\x12\x1b\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0c.hrana.Error\"\xd3\x04\n\nRequestMsg\x12\x12\n\nrequest_id\x18\x01 \x01(\x05\x12.\n\x0bopen_stream\x18\x02 \x01(\x0b\x32\x17.hrana.ws.OpenStreamReqH\x00\x12\x30\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x18.hrana.ws.CloseStreamReqH\x00\x12\'\n\x07\x65xecute\x18\x04 \x01(\x0b\x32\x14.hrana.ws.ExecuteReqH\x00\x12#\n\x05\x62\x61tch\x18\x05 \x01(\x0b\x32\x12.hrana.ws.BatchReqH\x00\x12.\n\x0bopen_cursor\x18\x06 \x01(\x0b\x32\x17.hrana.ws.OpenCursorReqH\x00\x12\x30\n\x0c\x63lose_cursor\x18\x07 \x01(\x0b\x32\x18.hrana.ws.CloseCursorReqH\x00\x12\x30\n\x0c\x66\x65tch_cursor\x18\x08 \x01(\x0b\x32\x18.hrana.ws.FetchCursorReqH\x00\x12)\n\x08sequence\x18\t \x01(\x0b\x32\x15.hrana.ws.SequenceReqH\x00\x12)\n\x08\x64\x65scribe\x18\n \x01(\x0b\x32\x15.hrana.ws.DescribeReqH\x00\x12*\n\tstore_sql\x18\x0b \x01(\x0b\x32\x15.hrana.ws.StoreSqlReqH\x00\x12*\n\tclose_sql\x18\x0c \x01(\x0b\x32\x15.hrana.ws.CloseSqlReqH\x00\x12\x34\n\x0eget_autocommit\x18\r \x01(\x0b\x32\x1a.hrana.ws.GetAutocommitReqH\x00\x42\t\n\x07request\"\xe3\x04\n\rResponseOkMsg\x12\x12\n\nrequest_id\x18\x01 \x01(\x05\x12/\n\x0bopen_stream\x18\x02 \x01(\x0b\x32\x18.hrana.ws.OpenStreamRespH\x00\x12\x31\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x19.hrana.ws.CloseStreamRespH\x00\x12(\n\x07\x65xecute\x18\x04 \x01(\x0b\x32\x15.hrana.ws.ExecuteRespH\x00\x12$\n\x05\x62\x61tch\x18\x05 \x01(\x0b\x32\x13.hrana.ws.BatchRespH\x00\x12/\n\x0bopen_cursor\x18\x06 \x01(\x0b\x32\x18.hrana.ws.OpenCursorRespH\x00\x12\x31\n\x0c\x63lose_cursor\x18\x07 \x01(\x0b\x32\x19.hrana.ws.CloseCursorRespH\x00\x12\x31\n\x0c\x66\x65tch_cursor\x18\x08 \x01(\x0b\x32\x19.hrana.ws.FetchCursorRespH\x00\x12*\n\x08sequence\x18\t \x01(\x0b\x32\x16.hrana.ws.SequenceRespH\x00\x12*\n\x08\x64\x65scribe\x18\n \x01(\x0b\x32\x16.hrana.ws.DescribeRespH\x00\x12+\n\tstore_sql\x18\x0b \x01(\x0b\x32\x16.hrana.ws.StoreSqlRespH\x00\x12+\n\tclose_sql\x18\x0c \x01(\x0b\x32\x16.hrana.ws.CloseSqlRespH\x00\x12\x35\n\x0eget_autocommit\x18\r \x01(\x0b\x32\x1b.hrana.ws.GetAutocommitRespH\x00\x42\n\n\x08response\"C\n\x10ResponseErrorMsg\x12\x12\n\nrequest_id\x18\x01 \x01(\x05\x12\x1b\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x0c.hrana.Error\"\"\n\rOpenStreamReq\x12\x11\n\tstream_id\x18\x01 \x01(\x05\"\x10\n\x0eOpenStreamResp\"#\n\x0e\x43loseStreamReq\x12\x11\n\tstream_id\x18\x01 \x01(\x05\"\x11\n\x0f\x43loseStreamResp\":\n\nExecuteReq\x12\x11\n\tstream_id\x18\x01 \x01(\x05\x12\x19\n\x04stmt\x18\x02 \x01(\x0b\x32\x0b.hrana.Stmt\"0\n\x0b\x45xecuteResp\x12!\n\x06result\x18\x01 \x01(\x0b\x32\x11.hrana.StmtResult\":\n\x08\x42\x61tchReq\x12\x11\n\tstream_id\x18\x01 \x01(\x05\x12\x1b\n\x05\x62\x61tch\x18\x02 \x01(\x0b\x32\x0c.hrana.Batch\"/\n\tBatchResp\x12\"\n\x06result\x18\x01 \x01(\x0b\x32\x12.hrana.BatchResult\"R\n\rOpenCursorReq\x12\x11\n\tstream_id\x18\x01 \x01(\x05\x12\x11\n\tcursor_id\x18\x02 \x01(\x05\x12\x1b\n\x05\x62\x61tch\x18\x03 \x01(\x0b\x32\x0c.hrana.Batch\"\x10\n\x0eOpenCursorResp\"#\n\x0e\x43loseCursorReq\x12\x11\n\tcursor_id\x18\x01 \x01(\x05\"\x11\n\x0f\x43loseCursorResp\"6\n\x0e\x46\x65tchCursorReq\x12\x11\n\tcursor_id\x18\x01 \x01(\x05\x12\x11\n\tmax_count\x18\x02 \x01(\r\"D\n\x0f\x46\x65tchCursorResp\x12#\n\x07\x65ntries\x18\x01 \x03(\x0b\x32\x12.hrana.CursorEntry\x12\x0c\n\x04\x64one\x18\x02 \x01(\x08\"*\n\x0bStoreSqlReq\x12\x0e\n\x06sql_id\x18\x01 \x01(\x05\x12\x0b\n\x03sql\x18\x02 \x01(\t\"\x0e\n\x0cStoreSqlResp\"\x1d\n\x0b\x43loseSqlReq\x12\x0e\n\x06sql_id\x18\x01 \x01(\x05\"\x0e\n\x0c\x43loseSqlResp\"Z\n\x0bSequenceReq\x12\x11\n\tstream_id\x18\x01 \x01(\x05\x12\x10\n\x03sql\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06sql_id\x18\x03 \x01(\x05H\x01\x88\x01\x01\x42\x06\n\x04_sqlB\t\n\x07_sql_id\"\x0e\n\x0cSequenceResp\"Z\n\x0b\x44\x65scribeReq\x12\x11\n\tstream_id\x18\x01 \x01(\x05\x12\x10\n\x03sql\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06sql_id\x18\x03 \x01(\x05H\x01\x88\x01\x01\x42\x06\n\x04_sqlB\t\n\x07_sql_id\"5\n\x0c\x44\x65scribeResp\x12%\n\x06result\x18\x01 \x01(\x0b\x32\x15.hrana.DescribeResult\"%\n\x10GetAutocommitReq\x12\x11\n\tstream_id\x18\x01 \x01(\x05\"*\n\x11GetAutocommitResp\x12\x15\n\ris_autocommit\x18\x01 \x01(\x08\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'hrana.ws_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _globals['_CLIENTMSG']._serialized_start=41 _globals['_CLIENTMSG']._serialized_end=137 _globals['_SERVERMSG']._serialized_start=140 _globals['_SERVERMSG']._serialized_end=350 _globals['_HELLOMSG']._serialized_start=352 _globals['_HELLOMSG']._serialized_end=388 _globals['_HELLOOKMSG']._serialized_start=390 _globals['_HELLOOKMSG']._serialized_end=402 _globals['_HELLOERRORMSG']._serialized_start=404 _globals['_HELLOERRORMSG']._serialized_end=448 _globals['_REQUESTMSG']._serialized_start=451 _globals['_REQUESTMSG']._serialized_end=1046 _globals['_RESPONSEOKMSG']._serialized_start=1049 _globals['_RESPONSEOKMSG']._serialized_end=1660 _globals['_RESPONSEERRORMSG']._serialized_start=1662 _globals['_RESPONSEERRORMSG']._serialized_end=1729 _globals['_OPENSTREAMREQ']._serialized_start=1731 _globals['_OPENSTREAMREQ']._serialized_end=1765 _globals['_OPENSTREAMRESP']._serialized_start=1767 _globals['_OPENSTREAMRESP']._serialized_end=1783 _globals['_CLOSESTREAMREQ']._serialized_start=1785 _globals['_CLOSESTREAMREQ']._serialized_end=1820 _globals['_CLOSESTREAMRESP']._serialized_start=1822 _globals['_CLOSESTREAMRESP']._serialized_end=1839 _globals['_EXECUTEREQ']._serialized_start=1841 _globals['_EXECUTEREQ']._serialized_end=1899 _globals['_EXECUTERESP']._serialized_start=1901 _globals['_EXECUTERESP']._serialized_end=1949 _globals['_BATCHREQ']._serialized_start=1951 _globals['_BATCHREQ']._serialized_end=2009 _globals['_BATCHRESP']._serialized_start=2011 _globals['_BATCHRESP']._serialized_end=2058 _globals['_OPENCURSORREQ']._serialized_start=2060 _globals['_OPENCURSORREQ']._serialized_end=2142 _globals['_OPENCURSORRESP']._serialized_start=2144 _globals['_OPENCURSORRESP']._serialized_end=2160 _globals['_CLOSECURSORREQ']._serialized_start=2162 _globals['_CLOSECURSORREQ']._serialized_end=2197 _globals['_CLOSECURSORRESP']._serialized_start=2199 _globals['_CLOSECURSORRESP']._serialized_end=2216 _globals['_FETCHCURSORREQ']._serialized_start=2218 _globals['_FETCHCURSORREQ']._serialized_end=2272 _globals['_FETCHCURSORRESP']._serialized_start=2274 _globals['_FETCHCURSORRESP']._serialized_end=2342 _globals['_STORESQLREQ']._serialized_start=2344 _globals['_STORESQLREQ']._serialized_end=2386 _globals['_STORESQLRESP']._serialized_start=2388 _globals['_STORESQLRESP']._serialized_end=2402 _globals['_CLOSESQLREQ']._serialized_start=2404 _globals['_CLOSESQLREQ']._serialized_end=2433 _globals['_CLOSESQLRESP']._serialized_start=2435 _globals['_CLOSESQLRESP']._serialized_end=2449 _globals['_SEQUENCEREQ']._serialized_start=2451 _globals['_SEQUENCEREQ']._serialized_end=2541 _globals['_SEQUENCERESP']._serialized_start=2543 _globals['_SEQUENCERESP']._serialized_end=2557 _globals['_DESCRIBEREQ']._serialized_start=2559 _globals['_DESCRIBEREQ']._serialized_end=2649 _globals['_DESCRIBERESP']._serialized_start=2651 _globals['_DESCRIBERESP']._serialized_end=2704 _globals['_GETAUTOCOMMITREQ']._serialized_start=2706 _globals['_GETAUTOCOMMITREQ']._serialized_end=2743 _globals['_GETAUTOCOMMITRESP']._serialized_start=2745 _globals['_GETAUTOCOMMITRESP']._serialized_end=2787 # @@protoc_insertion_point(module_scope) ================================================ FILE: testing/hrana-test-server/proto/hrana.http.proto ================================================ syntax = "proto3"; package hrana.http; import "hrana.proto"; message PipelineReqBody { optional string baton = 1; repeated StreamRequest requests = 2; } message PipelineRespBody { optional string baton = 1; optional string base_url = 2; repeated StreamResult results = 3; } message StreamResult { oneof result { StreamResponse ok = 1; Error error = 2; } } message CursorReqBody { optional string baton = 1; Batch batch = 2; } message CursorRespBody { optional string baton = 1; optional string base_url = 2; } message StreamRequest { oneof request { CloseStreamReq close = 1; ExecuteStreamReq execute = 2; BatchStreamReq batch = 3; SequenceStreamReq sequence = 4; DescribeStreamReq describe = 5; StoreSqlStreamReq store_sql = 6; CloseSqlStreamReq close_sql = 7; GetAutocommitStreamReq get_autocommit = 8; } } message StreamResponse { oneof response { CloseStreamResp close = 1; ExecuteStreamResp execute = 2; BatchStreamResp batch = 3; SequenceStreamResp sequence = 4; DescribeStreamResp describe = 5; StoreSqlStreamResp store_sql = 6; CloseSqlStreamResp close_sql = 7; GetAutocommitStreamResp get_autocommit = 8; } } message CloseStreamReq { } message CloseStreamResp { } message ExecuteStreamReq { Stmt stmt = 1; } message ExecuteStreamResp { StmtResult result = 1; } message BatchStreamReq { Batch batch = 1; } message BatchStreamResp { BatchResult result = 1; } message SequenceStreamReq { optional string sql = 1; optional int32 sql_id = 2; } message SequenceStreamResp { } message DescribeStreamReq { optional string sql = 1; optional int32 sql_id = 2; } message DescribeStreamResp { DescribeResult result = 1; } message StoreSqlStreamReq { int32 sql_id = 1; string sql = 2; } message StoreSqlStreamResp { } message CloseSqlStreamReq { int32 sql_id = 1; } message CloseSqlStreamResp { } message GetAutocommitStreamReq { } message GetAutocommitStreamResp { bool is_autocommit = 1; } ================================================ FILE: testing/hrana-test-server/proto/hrana.proto ================================================ syntax = "proto3"; package hrana; message Error { string message = 1; optional string code = 2; } message Stmt { optional string sql = 1; optional int32 sql_id = 2; repeated Value args = 3; repeated NamedArg named_args = 4; optional bool want_rows = 5; } message NamedArg { string name = 1; Value value = 2; } message StmtResult { repeated Col cols = 1; repeated Row rows = 2; uint64 affected_row_count = 3; optional sint64 last_insert_rowid = 4; } message Col { optional string name = 1; optional string decltype = 2; } message Row { repeated Value values = 1; } message Batch { repeated BatchStep steps = 1; } message BatchStep { optional BatchCond condition = 1; Stmt stmt = 2; } message BatchCond { oneof cond { uint32 step_ok = 1; uint32 step_error = 2; BatchCond not = 3; CondList and = 4; CondList or = 5; IsAutocommit is_autocommit = 6; } message CondList { repeated BatchCond conds = 1; } message IsAutocommit { } } message BatchResult { map step_results = 1; map step_errors = 2; } message CursorEntry { oneof entry { StepBeginEntry step_begin = 1; StepEndEntry step_end = 2; StepErrorEntry step_error = 3; Row row = 4; Error error = 5; } } message StepBeginEntry { uint32 step = 1; repeated Col cols = 2; } message StepEndEntry { uint64 affected_row_count = 1; optional sint64 last_insert_rowid = 2; } message StepErrorEntry { uint32 step = 1; Error error = 2; } message DescribeResult { repeated DescribeParam params = 1; repeated DescribeCol cols = 2; bool is_explain = 3; bool is_readonly = 4; } message DescribeParam { optional string name = 1; } message DescribeCol { string name = 1; optional string decltype = 2; } message Value { oneof value { Null null = 1; sint64 integer = 2; double float = 3; string text = 4; bytes blob = 5; } message Null {} } ================================================ FILE: testing/hrana-test-server/proto/hrana.ws.proto ================================================ syntax = "proto3"; package hrana.ws; import "hrana.proto"; message ClientMsg { oneof msg { HelloMsg hello = 1; RequestMsg request = 2; } } message ServerMsg { oneof msg { HelloOkMsg hello_ok = 1; HelloErrorMsg hello_error = 2; ResponseOkMsg response_ok = 3; ResponseErrorMsg response_error = 4; } } message HelloMsg { optional string jwt = 1; } message HelloOkMsg { } message HelloErrorMsg { Error error = 1; } message RequestMsg { int32 request_id = 1; oneof request { OpenStreamReq open_stream = 2; CloseStreamReq close_stream = 3; ExecuteReq execute = 4; BatchReq batch = 5; OpenCursorReq open_cursor = 6; CloseCursorReq close_cursor = 7; FetchCursorReq fetch_cursor = 8; SequenceReq sequence = 9; DescribeReq describe = 10; StoreSqlReq store_sql = 11; CloseSqlReq close_sql = 12; GetAutocommitReq get_autocommit = 13; } } message ResponseOkMsg { int32 request_id = 1; oneof response { OpenStreamResp open_stream = 2; CloseStreamResp close_stream = 3; ExecuteResp execute = 4; BatchResp batch = 5; OpenCursorResp open_cursor = 6; CloseCursorResp close_cursor = 7; FetchCursorResp fetch_cursor = 8; SequenceResp sequence = 9; DescribeResp describe = 10; StoreSqlResp store_sql = 11; CloseSqlResp close_sql = 12; GetAutocommitResp get_autocommit = 13; } } message ResponseErrorMsg { int32 request_id = 1; Error error = 2; } message OpenStreamReq { int32 stream_id = 1; } message OpenStreamResp { } message CloseStreamReq { int32 stream_id = 1; } message CloseStreamResp { } message ExecuteReq { int32 stream_id = 1; Stmt stmt = 2; } message ExecuteResp { StmtResult result = 1; } message BatchReq { int32 stream_id = 1; Batch batch = 2; } message BatchResp { BatchResult result = 1; } message OpenCursorReq { int32 stream_id = 1; int32 cursor_id = 2; Batch batch = 3; } message OpenCursorResp { } message CloseCursorReq { int32 cursor_id = 1; } message CloseCursorResp { } message FetchCursorReq { int32 cursor_id = 1; uint32 max_count = 2; } message FetchCursorResp { repeated CursorEntry entries = 1; bool done = 2; } message StoreSqlReq { int32 sql_id = 1; string sql = 2; } message StoreSqlResp { } message CloseSqlReq { int32 sql_id = 1; } message CloseSqlResp { } message SequenceReq { int32 stream_id = 1; optional string sql = 2; optional int32 sql_id = 3; } message SequenceResp { } message DescribeReq { int32 stream_id = 1; optional string sql = 2; optional int32 sql_id = 3; } message DescribeResp { DescribeResult result = 1; } message GetAutocommitReq { int32 stream_id = 1; } message GetAutocommitResp { bool is_autocommit = 1; } ================================================ FILE: testing/hrana-test-server/proto/hrana_pb2.py ================================================ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: hrana.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0bhrana.proto\x12\x05hrana\"4\n\x05\x45rror\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x11\n\x04\x63ode\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_code\"\xa7\x01\n\x04Stmt\x12\x10\n\x03sql\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06sql_id\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x1a\n\x04\x61rgs\x18\x03 \x03(\x0b\x32\x0c.hrana.Value\x12#\n\nnamed_args\x18\x04 \x03(\x0b\x32\x0f.hrana.NamedArg\x12\x16\n\twant_rows\x18\x05 \x01(\x08H\x02\x88\x01\x01\x42\x06\n\x04_sqlB\t\n\x07_sql_idB\x0c\n\n_want_rows\"5\n\x08NamedArg\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1b\n\x05value\x18\x02 \x01(\x0b\x32\x0c.hrana.Value\"\x92\x01\n\nStmtResult\x12\x18\n\x04\x63ols\x18\x01 \x03(\x0b\x32\n.hrana.Col\x12\x18\n\x04rows\x18\x02 \x03(\x0b\x32\n.hrana.Row\x12\x1a\n\x12\x61\x66\x66\x65\x63ted_row_count\x18\x03 \x01(\x04\x12\x1e\n\x11last_insert_rowid\x18\x04 \x01(\x12H\x00\x88\x01\x01\x42\x14\n\x12_last_insert_rowid\"E\n\x03\x43ol\x12\x11\n\x04name\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08\x64\x65\x63ltype\x18\x02 \x01(\tH\x01\x88\x01\x01\x42\x07\n\x05_nameB\x0b\n\t_decltype\"#\n\x03Row\x12\x1c\n\x06values\x18\x01 \x03(\x0b\x32\x0c.hrana.Value\"(\n\x05\x42\x61tch\x12\x1f\n\x05steps\x18\x01 \x03(\x0b\x32\x10.hrana.BatchStep\"^\n\tBatchStep\x12(\n\tcondition\x18\x01 \x01(\x0b\x32\x10.hrana.BatchCondH\x00\x88\x01\x01\x12\x19\n\x04stmt\x18\x02 \x01(\x0b\x32\x0b.hrana.StmtB\x0c\n\n_condition\"\xa5\x02\n\tBatchCond\x12\x11\n\x07step_ok\x18\x01 \x01(\rH\x00\x12\x14\n\nstep_error\x18\x02 \x01(\rH\x00\x12\x1f\n\x03not\x18\x03 \x01(\x0b\x32\x10.hrana.BatchCondH\x00\x12(\n\x03\x61nd\x18\x04 \x01(\x0b\x32\x19.hrana.BatchCond.CondListH\x00\x12\'\n\x02or\x18\x05 \x01(\x0b\x32\x19.hrana.BatchCond.CondListH\x00\x12\x36\n\ris_autocommit\x18\x06 \x01(\x0b\x32\x1d.hrana.BatchCond.IsAutocommitH\x00\x1a+\n\x08\x43ondList\x12\x1f\n\x05\x63onds\x18\x01 \x03(\x0b\x32\x10.hrana.BatchCond\x1a\x0e\n\x0cIsAutocommitB\x06\n\x04\x63ond\"\x89\x02\n\x0b\x42\x61tchResult\x12\x39\n\x0cstep_results\x18\x01 \x03(\x0b\x32#.hrana.BatchResult.StepResultsEntry\x12\x37\n\x0bstep_errors\x18\x02 \x03(\x0b\x32\".hrana.BatchResult.StepErrorsEntry\x1a\x45\n\x10StepResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.hrana.StmtResult:\x02\x38\x01\x1a?\n\x0fStepErrorsEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\x1b\n\x05value\x18\x02 \x01(\x0b\x32\x0c.hrana.Error:\x02\x38\x01\"\xd3\x01\n\x0b\x43ursorEntry\x12+\n\nstep_begin\x18\x01 \x01(\x0b\x32\x15.hrana.StepBeginEntryH\x00\x12\'\n\x08step_end\x18\x02 \x01(\x0b\x32\x13.hrana.StepEndEntryH\x00\x12+\n\nstep_error\x18\x03 \x01(\x0b\x32\x15.hrana.StepErrorEntryH\x00\x12\x19\n\x03row\x18\x04 \x01(\x0b\x32\n.hrana.RowH\x00\x12\x1d\n\x05\x65rror\x18\x05 \x01(\x0b\x32\x0c.hrana.ErrorH\x00\x42\x07\n\x05\x65ntry\"8\n\x0eStepBeginEntry\x12\x0c\n\x04step\x18\x01 \x01(\r\x12\x18\n\x04\x63ols\x18\x02 \x03(\x0b\x32\n.hrana.Col\"`\n\x0cStepEndEntry\x12\x1a\n\x12\x61\x66\x66\x65\x63ted_row_count\x18\x01 \x01(\x04\x12\x1e\n\x11last_insert_rowid\x18\x02 \x01(\x12H\x00\x88\x01\x01\x42\x14\n\x12_last_insert_rowid\";\n\x0eStepErrorEntry\x12\x0c\n\x04step\x18\x01 \x01(\r\x12\x1b\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x0c.hrana.Error\"\x81\x01\n\x0e\x44\x65scribeResult\x12$\n\x06params\x18\x01 \x03(\x0b\x32\x14.hrana.DescribeParam\x12 \n\x04\x63ols\x18\x02 \x03(\x0b\x32\x12.hrana.DescribeCol\x12\x12\n\nis_explain\x18\x03 \x01(\x08\x12\x13\n\x0bis_readonly\x18\x04 \x01(\x08\"+\n\rDescribeParam\x12\x11\n\x04name\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x07\n\x05_name\"?\n\x0b\x44\x65scribeCol\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\x08\x64\x65\x63ltype\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0b\n\t_decltype\"\x7f\n\x05Value\x12!\n\x04null\x18\x01 \x01(\x0b\x32\x11.hrana.Value.NullH\x00\x12\x11\n\x07integer\x18\x02 \x01(\x12H\x00\x12\x0f\n\x05\x66loat\x18\x03 \x01(\x01H\x00\x12\x0e\n\x04text\x18\x04 \x01(\tH\x00\x12\x0e\n\x04\x62lob\x18\x05 \x01(\x0cH\x00\x1a\x06\n\x04NullB\x07\n\x05valueb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'hrana_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _BATCHRESULT_STEPRESULTSENTRY._options = None _BATCHRESULT_STEPRESULTSENTRY._serialized_options = b'8\001' _BATCHRESULT_STEPERRORSENTRY._options = None _BATCHRESULT_STEPERRORSENTRY._serialized_options = b'8\001' _globals['_ERROR']._serialized_start=22 _globals['_ERROR']._serialized_end=74 _globals['_STMT']._serialized_start=77 _globals['_STMT']._serialized_end=244 _globals['_NAMEDARG']._serialized_start=246 _globals['_NAMEDARG']._serialized_end=299 _globals['_STMTRESULT']._serialized_start=302 _globals['_STMTRESULT']._serialized_end=448 _globals['_COL']._serialized_start=450 _globals['_COL']._serialized_end=519 _globals['_ROW']._serialized_start=521 _globals['_ROW']._serialized_end=556 _globals['_BATCH']._serialized_start=558 _globals['_BATCH']._serialized_end=598 _globals['_BATCHSTEP']._serialized_start=600 _globals['_BATCHSTEP']._serialized_end=694 _globals['_BATCHCOND']._serialized_start=697 _globals['_BATCHCOND']._serialized_end=990 _globals['_BATCHCOND_CONDLIST']._serialized_start=923 _globals['_BATCHCOND_CONDLIST']._serialized_end=966 _globals['_BATCHCOND_ISAUTOCOMMIT']._serialized_start=968 _globals['_BATCHCOND_ISAUTOCOMMIT']._serialized_end=982 _globals['_BATCHRESULT']._serialized_start=993 _globals['_BATCHRESULT']._serialized_end=1258 _globals['_BATCHRESULT_STEPRESULTSENTRY']._serialized_start=1124 _globals['_BATCHRESULT_STEPRESULTSENTRY']._serialized_end=1193 _globals['_BATCHRESULT_STEPERRORSENTRY']._serialized_start=1195 _globals['_BATCHRESULT_STEPERRORSENTRY']._serialized_end=1258 _globals['_CURSORENTRY']._serialized_start=1261 _globals['_CURSORENTRY']._serialized_end=1472 _globals['_STEPBEGINENTRY']._serialized_start=1474 _globals['_STEPBEGINENTRY']._serialized_end=1530 _globals['_STEPENDENTRY']._serialized_start=1532 _globals['_STEPENDENTRY']._serialized_end=1628 _globals['_STEPERRORENTRY']._serialized_start=1630 _globals['_STEPERRORENTRY']._serialized_end=1689 _globals['_DESCRIBERESULT']._serialized_start=1692 _globals['_DESCRIBERESULT']._serialized_end=1821 _globals['_DESCRIBEPARAM']._serialized_start=1823 _globals['_DESCRIBEPARAM']._serialized_end=1866 _globals['_DESCRIBECOL']._serialized_start=1868 _globals['_DESCRIBECOL']._serialized_end=1931 _globals['_VALUE']._serialized_start=1933 _globals['_VALUE']._serialized_end=2060 _globals['_VALUE_NULL']._serialized_start=2045 _globals['_VALUE_NULL']._serialized_end=2051 # @@protoc_insertion_point(module_scope) ================================================ FILE: testing/hrana-test-server/requirements.txt ================================================ aiohttp==3.8.4 protobuf==4.24.0 ================================================ FILE: testing/hrana-test-server/server_v1.py ================================================ import asyncio import base64 import collections import json import logging import os import sqlite3 import sys import tempfile import aiohttp.web logger = logging.getLogger("server") persistent_db_file = os.getenv("PERSISTENT_DB") async def main(command): logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO")) app = aiohttp.web.Application() app.add_routes([ aiohttp.web.get("/", handle_get_index), aiohttp.web.post("/v1/execute", handle_post_execute), aiohttp.web.post("/v1/batch", handle_post_batch), ]) if persistent_db_file is None: http_db_fd, http_db_file = tempfile.mkstemp(suffix=".db", prefix="hrana_test_") os.close(http_db_fd) else: http_db_file = persistent_db_file app["http_db_file"] = http_db_file app["db_lock"] = asyncio.Lock() async def on_shutdown(app): if http_db_file != persistent_db_file: os.unlink(http_db_file) app.on_shutdown.append(on_shutdown) runner = aiohttp.web.AppRunner(app) await runner.setup() site = aiohttp.web.TCPSite(runner, "localhost", 8080) await site.start() logger.info("Server is ready") if len(command) > 0: proc = await asyncio.create_subprocess_exec(*command) code = await proc.wait() else: while True: await asyncio.sleep(10) await runner.cleanup() return code async def handle_get_index(req): ws = aiohttp.web.WebSocketResponse(protocols=("hrana1",)) if ws.can_prepare(req): await ws.prepare(req) try: await handle_websocket(req.app, ws) finally: await ws.close() return ws return aiohttp.web.Response(text="This is a Hrana test server") async def handle_websocket(app, ws): async def recv_msg(): ws_msg = await ws.receive() if ws_msg.type == aiohttp.WSMsgType.TEXT: msg = json.loads(ws_msg.data) return msg elif ws_msg.type in (aiohttp.WSMsgType.CLOSE, aiohttp.WSMsgType.CLOSED): return None else: raise RuntimeError(f"Unknown websocket message: {msg!r}") async def send_msg(msg): msg_str = json.dumps(msg) await ws.send_str(msg_str) Stream = collections.namedtuple("Stream", ["conn"]) streams = {} if persistent_db_file is None: db_fd, db_file = tempfile.mkstemp(suffix=".db", prefix="hrana_test_") os.close(db_fd) else: db_file = persistent_db_file async def handle_request(req): if req["type"] == "open_stream": conn = await to_thread(lambda: connect(db_file)) streams[int(req["stream_id"])] = Stream(conn) return {"type": "open_stream"} elif req["type"] == "close_stream": stream = streams.pop(int(req["stream_id"]), None) if stream is not None: await to_thread(lambda: stream.conn.close()) return {"type": "close_stream"} elif req["type"] == "execute": stream = streams[int(req["stream_id"])] async with app["db_lock"]: result = await to_thread(lambda: execute_stmt(stream.conn, req["stmt"])) return {"type": "execute", "result": result} elif req["type"] == "batch": stream = streams[int(req["stream_id"])] async with app["db_lock"]: result = await to_thread(lambda: execute_batch(stream.conn, req["batch"])) return {"type": "batch", "result": result} else: raise RuntimeError(f"Unknown req: {req!r}") async def handle_msg(msg): if msg["type"] == "request": try: response = await handle_request(msg["request"]) await send_msg({ "type": "response_ok", "request_id": msg["request_id"], "response": response, }) except ResponseError as e: await send_msg({ "type": "response_error", "request_id": msg["request_id"], "error": {"message": str(e)}, }) else: raise RuntimeError(f"Unknown msg: {msg!r}") try: hello_msg = await recv_msg() if hello_msg is None: return assert hello_msg.get("type") == "hello" await send_msg({"type": "hello_ok"}) jwt = hello_msg.get("jwt") if jwt is not None: logger.info(f"Authenticated with JWT: {jwt[:20]}...") while True: msg = await recv_msg() if msg is None: break await handle_msg(msg) except CloseWebSocket: await ws.close() except CloseTcpSocket: ws._writer.transport.close() finally: for stream in streams.values(): stream.conn.close() if db_file != persistent_db_file: os.unlink(db_file) async def handle_post_execute(req): req_body = await req.json() conn = await to_thread(lambda: connect(req.app["http_db_file"])) try: async with req.app["db_lock"]: result = await to_thread(lambda: execute_stmt(conn, req_body["stmt"])) return aiohttp.web.json_response({"result": result}) except ResponseError as e: return aiohttp.web.json_response({"message": str(e)}, status=400) finally: conn.close() async def handle_post_batch(req): req_body = await req.json() conn = await to_thread(lambda: connect(req.app["http_db_file"])) try: async with req.app["db_lock"]: result = await to_thread(lambda: execute_batch(conn, req_body["batch"])) return aiohttp.web.json_response({"result": result}) except ResponseError as e: return aiohttp.web.json_response({"message": str(e)}, status=400) finally: conn.close() def connect(db_file): conn = sqlite3.connect(db_file, check_same_thread=False, isolation_level=None, timeout=1) conn.execute("PRAGMA journal_mode = WAL") return conn class CloseWebSocket(BaseException): pass class CloseTcpSocket(BaseException): pass def execute_stmt(conn, stmt): if stmt["sql"] == ".close_ws": raise CloseWebSocket() elif stmt["sql"] == ".close_tcp": raise CloseTcpSocket() args = stmt.get("args", []) named_args = stmt.get("named_args", []) if len(named_args) == 0: sql_args = [value_to_sqlite(arg) for arg in args] elif len(args) == 0: sql_args = {} for arg in named_args: value = value_to_sqlite(arg["value"]) if arg["name"][0] in (":", "@", "$"): key = arg["name"][1:] else: key = arg["name"] sql_args[key] = value else: raise RuntimeError(f"Using both positional and named arguments is not supported") try: cursor = conn.execute(stmt["sql"], sql_args) except sqlite3.Error as e: raise ResponseError(str(e)) except OverflowError as e: raise ResponseError(str(e)) except sqlite3.Warning as e: raise ResponseError(str(e)) cols = [{"name": name} for name, *_ in cursor.description or []] rows = [] for row in cursor: if stmt["want_rows"]: rows.append([value_from_sqlite(val) for val in row]) if cursor.rowcount >= 0: affected_row_count = cursor.rowcount else: affected_row_count = 0 if cursor.lastrowid is not None: last_insert_rowid = str(cursor.lastrowid) else: last_insert_rowid = None return { "cols": cols, "rows": rows, "affected_row_count": affected_row_count, "last_insert_rowid": last_insert_rowid, } def execute_batch(conn, batch): step_results = [] step_errors = [] for step in batch["steps"]: condition = step.get("condition") if condition is not None: enabled = eval_cond(step_results, step_errors, condition) else: enabled = True step_result = None step_error = None if enabled: try: step_result = execute_stmt(conn, step["stmt"]) except ResponseError as e: step_error = {"message": str(e)} step_results.append(step_result) step_errors.append(step_error) return { "step_results": step_results, "step_errors": step_errors, } def eval_cond(step_results, step_errors, cond): if cond["type"] == "ok": return step_results[cond["step"]] is not None elif cond["type"] == "error": return step_errors[cond["step"]] is not None elif cond["type"] == "not": return not eval_cond(step_results, step_errors, cond["cond"]) elif cond["type"] == "and": return all(eval_cond(step_results, step_errors, c) for c in cond["conds"]) elif cond["type"] == "or": return any(eval_cond(step_results, step_errors, c) for c in cond["conds"]) else: raise RuntimeError(f"Unknown cond: {cond!r}") def value_to_sqlite(value): if value["type"] == "null": return None elif value["type"] == "integer": return int(value["value"]) elif value["type"] == "float": return float(value["value"]) elif value["type"] == "text": return str(value["value"]) elif value["type"] == "blob": return base64.b64decode(value["base64"]) else: raise RuntimeError(f"Unknown value: {value!r}") def value_from_sqlite(value): if value is None: return {"type": "null"} elif isinstance(value, int): return {"type": "integer", "value": str(value)} elif isinstance(value, float): return {"type": "float", "value": value} elif isinstance(value, str): return {"type": "text", "value": value} elif isinstance(value, bytes): return {"type": "blob", "base64": base64.b64encode(value).decode()} else: raise RuntimeError(f"Unknown SQLite value: {value!r}") class ResponseError(RuntimeError): pass async def to_thread(func): return await asyncio.get_running_loop().run_in_executor(None, func) if __name__ == "__main__": try: sys.exit(asyncio.run(main(sys.argv[1:]))) except KeyboardInterrupt: print() ================================================ FILE: testing/hrana-test-server/server_v2.py ================================================ import asyncio import base64 import collections import dataclasses import json import logging import os import random import sys import tempfile import aiohttp.web import c3 from sqlite3_error_map import sqlite_error_code_to_name logger = logging.getLogger("server") persistent_db_file = os.getenv("PERSISTENT_DB") @dataclasses.dataclass class HttpStream: conn: c3.Conn sqls: dict baton: str async def main(command): logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO")) app = aiohttp.web.Application() app.add_routes([ aiohttp.web.get("/", handle_get_index), aiohttp.web.post("/v1/execute", handle_post_execute), aiohttp.web.post("/v1/batch", handle_post_batch), aiohttp.web.get("/v2", handle_get_index), aiohttp.web.post("/v2/pipeline", handle_post_pipeline), ]) app["http_streams"] = {} if persistent_db_file is None: http_db_fd, http_db_file = tempfile.mkstemp(suffix=".db", prefix="hrana_test_") os.close(http_db_fd) else: http_db_file = persistent_db_file app["http_db_file"] = http_db_file app["db_lock"] = asyncio.Lock() async def on_shutdown(app): if http_db_file != persistent_db_file: os.unlink(http_db_file) app.on_shutdown.append(on_shutdown) runner = aiohttp.web.AppRunner(app) await runner.setup() site = aiohttp.web.TCPSite(runner, "localhost", 8080) await site.start() logger.info("Server is ready") if len(command) > 0: proc = await asyncio.create_subprocess_exec(*command) code = await proc.wait() else: while True: await asyncio.sleep(10) await runner.cleanup() return code async def handle_get_index(req): ws = aiohttp.web.WebSocketResponse(protocols=("hrana2",)) if ws.can_prepare(req): await ws.prepare(req) try: await handle_websocket(req.app, ws) finally: await ws.close() return ws return aiohttp.web.Response(text="This is a Hrana test server") async def handle_websocket(app, ws): async def recv_msg(): ws_msg = await ws.receive() if ws_msg.type == aiohttp.WSMsgType.TEXT: msg = json.loads(ws_msg.data) return msg elif ws_msg.type in (aiohttp.WSMsgType.CLOSE, aiohttp.WSMsgType.CLOSED): return None else: raise RuntimeError(f"Unknown websocket message: {msg!r}") async def send_msg(msg): msg_str = json.dumps(msg) await ws.send_str(msg_str) WsStream = collections.namedtuple("WsStream", ["conn"]) streams = {} sqls = {} if persistent_db_file is None: db_fd, db_file = tempfile.mkstemp(suffix=".db", prefix="hrana_test_") os.close(db_fd) else: db_file = persistent_db_file async def handle_request(req): if req["type"] == "open_stream": conn = await to_thread(lambda: connect(db_file)) stream_id = int(req["stream_id"]) assert stream_id not in streams streams[stream_id] = WsStream(conn) return {"type": "open_stream"} elif req["type"] == "close_stream": stream = streams.pop(int(req["stream_id"]), None) if stream is not None: await to_thread(lambda: stream.conn.close()) return {"type": "close_stream"} elif req["type"] == "execute": stream = streams[int(req["stream_id"])] async with app["db_lock"]: result = await to_thread(lambda: execute_stmt(stream.conn, sqls, req["stmt"])) return {"type": "execute", "result": result} elif req["type"] == "batch": stream = streams[int(req["stream_id"])] async with app["db_lock"]: result = await to_thread(lambda: execute_batch(stream.conn, sqls, req["batch"])) return {"type": "batch", "result": result} elif req["type"] == "sequence": stream = streams[int(req["stream_id"])] sql = get_sql(sqls, req) async with app["db_lock"]: await to_thread(lambda: execute_sequence(stream.conn, sql)) return {"type": "sequence"} elif req["type"] == "describe": stream = streams[int(req["stream_id"])] sql = get_sql(sqls, req) async with app["db_lock"]: result = await to_thread(lambda: describe_stmt(stream.conn, sql)) return {"type": "describe", "result": result} elif req["type"] == "store_sql": sql_id = int(req["sql_id"]) assert sql_id not in sqls sqls[sql_id] = req["sql"] assert len(sqls) <= 250 return {"type": "store_sql"} elif req["type"] == "close_sql": sqls.pop(int(req["sql_id"])) return {"type": "close_sql"} else: raise RuntimeError(f"Unknown req: {req!r}") hello_recvd = False async def handle_msg(msg): nonlocal hello_recvd if msg["type"] == "request": assert hello_recvd try: response = await handle_request(msg["request"]) await send_msg({ "type": "response_ok", "request_id": msg["request_id"], "response": response, }) except ResponseError as e: await send_msg({ "type": "response_error", "request_id": msg["request_id"], "error": e.tojson(), }) elif msg["type"] == "hello": jwt = msg.get("jwt") if jwt is not None: logger.info(f"Reauthenticated with JWT: {jwt[:20]}...") hello_recvd = True await send_msg({"type": "hello_ok"}) else: raise RuntimeError(f"Unknown msg: {msg!r}") try: while True: msg = await recv_msg() if msg is None: break await handle_msg(msg) except CloseWebSocket: await ws.close() except CloseTcpSocket: ws._writer.transport.close() finally: for stream in streams.values(): stream.conn.close() if db_file != persistent_db_file: os.unlink(db_file) async def handle_post_execute(req): req_body = await req.json() conn = await to_thread(lambda: connect(req.app["http_db_file"])) try: async with req.app["db_lock"]: result = await to_thread(lambda: execute_stmt(conn, {}, req_body["stmt"])) return aiohttp.web.json_response({"result": result}) except ResponseError as e: return aiohttp.web.json_response(e.tojson(), status=400) finally: conn.close() async def handle_post_batch(req): req_body = await req.json() conn = await to_thread(lambda: connect(req.app["http_db_file"])) try: async with req.app["db_lock"]: result = await to_thread(lambda: execute_batch(conn, {}, req_body["batch"])) return aiohttp.web.json_response({"result": result}) except ResponseError as e: return aiohttp.web.json_response(e.tojson(), status=400) finally: conn.close() async def handle_post_pipeline(req): req_body = await req.json() app = req.app if req_body.get("baton") is not None: baton = req_body["baton"] stream_id, _, _ = baton.partition(".") stream = req.app["http_streams"][stream_id] assert stream.baton == baton else: conn = await to_thread(lambda: connect(req.app["http_db_file"])) stream_id = random.randbytes(16).hex() stream = HttpStream(conn, sqls={}, baton=None) req.app["http_streams"][stream_id] = stream stream.baton = f"{stream_id}.{random.randbytes(8).hex()}" async def handle_request(req): if req["type"] == "execute": async with app["db_lock"]: result = await to_thread(lambda: execute_stmt(stream.conn, stream.sqls, req["stmt"])) return {"type": "execute", "result": result} elif req["type"] == "batch": async with app["db_lock"]: result = await to_thread(lambda: execute_batch(stream.conn, stream.sqls, req["batch"])) return {"type": "batch", "result": result} elif req["type"] == "sequence": sql = get_sql(stream.sqls, req) async with app["db_lock"]: await to_thread(lambda: execute_sequence(stream.conn, sql)) return {"type": "sequence"} elif req["type"] == "describe": sql = get_sql(stream.sqls, req) async with app["db_lock"]: result = await to_thread(lambda: describe_stmt(stream.conn, sql)) return {"type": "describe", "result": result} elif req["type"] == "store_sql": sql_id = int(req["sql_id"]) assert sql_id not in stream.sqls stream.sqls[sql_id] = req["sql"] assert len(stream.sqls) <= 50 return {"type": "store_sql"} elif req["type"] == "close_sql": stream.sqls.pop(int(req["sql_id"])) return {"type": "close_sql"} elif req["type"] == "close": stream.conn.close() stream.conn = None return {"type": "close"} else: raise RuntimeError(f"Unknown req: {req!r}") try: results = [] for request in req_body["requests"]: try: response = await handle_request(request) result = {"type": "ok", "response": response} except ResponseError as e: result = {"type": "error", "error": e.tojson()} results.append(result) except Exception: if stream.conn is not None: stream.conn.close() stream.conn = None raise finally: if stream.conn is None: stream.baton = None del app["http_streams"][stream_id] return aiohttp.web.json_response({ "baton": stream.baton, "results": results, }) def connect(db_file): conn = c3.Conn.open(db_file) conn.extended_result_codes(True) conn.limit(c3.SQLITE_LIMIT_ATTACHED, 0) conn.busy_timeout(1000) conn.exec("PRAGMA journal_mode = WAL") return conn def get_sql(sqls, obj): sql, sql_id = obj.get("sql"), obj.get("sql_id") assert sql is None or sql_id is None if sql is not None: return sql elif sql_id is not None: return sqls[sql_id] else: raise RuntimeError("Expected 'sql' or 'sql_id'") class CloseWebSocket(BaseException): pass class CloseTcpSocket(BaseException): pass def execute_stmt(conn, sqls, stmt): sql = get_sql(sqls, stmt) if sql == ".close_ws": raise CloseWebSocket() elif sql == ".close_tcp": raise CloseTcpSocket() try: changes_before = conn.total_changes() prepared, sql_rest = conn.prepare(sql) if not prepared: raise ResponseError(f"SQL string does not contain a valid statement", "SQL_NO_STATEMENT") param_count = prepared.param_count() if len(sql_rest.strip()) != 0: raise ResponseError(f"SQL string contains more than one statement") args = stmt.get("args", []) named_args = stmt.get("named_args", []) provided_params_count = len(args) + len(named_args) if provided_params_count != param_count: raise ResponseError(f"Required {param_count} but {provided_params_count} were provided", "ARGS_INVALID") for param_i, arg_value in enumerate(args, 1): prepared.bind(param_i, value_to_sqlite(arg_value)) for arg in named_args: arg_name = arg["name"] if arg_name[0] in (":", "@", "$"): param_i = prepared.param_index(arg_name) else: for prefix in (":", "@", "$"): param_i = prepared.param_index(prefix + arg_name) if param_i != 0: break if param_i == 0: raise ResponseError(f"Parameter with name {arg_name!r} was not found", "ARGS_INVALID") prepared.bind(param_i, value_to_sqlite(arg["value"])) col_count = prepared.column_count() cols = [ { "name": prepared.column_name(col_i), "decltype": prepared.column_decltype(col_i), } for col_i in range(col_count) ] want_rows = stmt.get("want_rows", True) rows = [] while prepared.step(): if not want_rows: continue cells = [] for col_i in range(col_count): try: val = prepared.column(col_i) except ValueError as e: name = cols[col_i].get("name") or col_i if isinstance(e, UnicodeDecodeError): # NOTE: formatting msg like this to match Python's dbapi # error, but it could be anything. However this way # allows the hrana test server to be used against # Python's test suite obj = e.object.decode(errors="replace") msg = f"Could not decode to UTF-8 column {name!r} with text {obj!r}" code = "UNICODE_ERROR" else: msg = f"Could not get column {name!r}: {e}" code = "VALUE_ERROR" raise ResponseError(msg, code) from e cells.append(value_from_sqlite(val)) rows.append(cells) affected_row_count = conn.total_changes() - changes_before last_insert_rowid = conn.last_insert_rowid() except c3.SqliteError as e: raise ResponseError(e) from e return { "cols": cols, "rows": rows, "affected_row_count": affected_row_count, "last_insert_rowid": str(last_insert_rowid), } def describe_stmt(conn, sql): try: prepared, _ = conn.prepare(sql) param_count = prepared.param_count() params = [ {"name": prepared.param_name(param_i)} for param_i in range(1, param_count+1) ] col_count = prepared.column_count() cols = [ { "name": prepared.column_name(col_i), "decltype": prepared.column_decltype(col_i) } for col_i in range(col_count) ] is_explain = prepared.isexplain() > 0 is_readonly = prepared.readonly() except c3.SqliteError as e: raise ResponseError(e) from e return { "params": params, "cols": cols, "is_explain": is_explain, "is_readonly": is_readonly, } def execute_sequence(conn, sql): try: while len(sql) > 0: prepared, sql = conn.prepare(sql) if prepared is None: break while prepared.step(): pass except c3.SqliteError as e: raise ResponseError(e) from e def execute_batch(conn, sqls, batch): step_results = [] step_errors = [] for step in batch["steps"]: condition = step.get("condition") if condition is not None: enabled = eval_cond(step_results, step_errors, condition) else: enabled = True step_result = None step_error = None if enabled: try: step_result = execute_stmt(conn, sqls, step["stmt"]) except ResponseError as e: step_error = e.tojson() step_results.append(step_result) step_errors.append(step_error) return { "step_results": step_results, "step_errors": step_errors, } def eval_cond(step_results, step_errors, cond): if cond["type"] == "ok": return step_results[cond["step"]] is not None elif cond["type"] == "error": return step_errors[cond["step"]] is not None elif cond["type"] == "not": return not eval_cond(step_results, step_errors, cond["cond"]) elif cond["type"] == "and": return all(eval_cond(step_results, step_errors, c) for c in cond["conds"]) elif cond["type"] == "or": return any(eval_cond(step_results, step_errors, c) for c in cond["conds"]) else: raise RuntimeError(f"Unknown cond: {cond!r}") def value_to_sqlite(value): if value["type"] == "null": return None elif value["type"] == "integer": return int(value["value"]) elif value["type"] == "float": return float(value["value"]) elif value["type"] == "text": return str(value["value"]) elif value["type"] == "blob": return base64.b64decode(value["base64"]) else: raise RuntimeError(f"Unknown value: {value!r}") def value_from_sqlite(value): if value is None: return {"type": "null"} elif isinstance(value, int): return {"type": "integer", "value": str(value)} elif isinstance(value, float): return {"type": "float", "value": value} elif isinstance(value, str): return {"type": "text", "value": value} elif isinstance(value, bytes): return {"type": "blob", "base64": base64.b64encode(value).decode()} else: raise RuntimeError(f"Unknown SQLite value: {value!r}") class ResponseError(RuntimeError): def __init__(self, message, code=None): if isinstance(message, c3.SqliteError): if code is None: # Use base error code (error_code & 0xFF) instead of extended code base_code = message.error_code & 0xFF if message.error_code else None code = sqlite_error_code_to_name.get(base_code) message = str(message) super().__init__(message) self.code = code def tojson(self): message = str(self) if self.code: return {"message": message, "code": self.code} return {"message": message} async def to_thread(func): return await asyncio.get_running_loop().run_in_executor(None, func) if __name__ == "__main__": try: sys.exit(asyncio.run(main(sys.argv[1:]))) except KeyboardInterrupt: print() ================================================ FILE: testing/hrana-test-server/server_v3.py ================================================ import asyncio import base64 import collections import dataclasses import json import logging import os import random import sys import tempfile import aiohttp.web import c3 import from_proto import to_proto from sqlite3_error_map import sqlite_error_code_to_name import proto.hrana.http_pb2 import proto.hrana.ws_pb2 logger = logging.getLogger("server") persistent_db_file = os.getenv("PERSISTENT_DB") encoding = os.getenv("ENCODING", "protobuf") assert encoding in ("json", "protobuf") @dataclasses.dataclass class HttpStream: conn: c3.Conn sqls: dict baton: str async def main(command): logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO")) http_dir = { "json": "v3", "protobuf": "v3-protobuf", }[encoding] app = aiohttp.web.Application() app.add_routes([ aiohttp.web.get("/", handle_get_index), aiohttp.web.get(f"/{http_dir}", handle_get_index), aiohttp.web.post(f"/{http_dir}/pipeline", handle_post_pipeline), aiohttp.web.post(f"/{http_dir}/cursor", handle_post_cursor), ]) app["http_streams"] = {} if persistent_db_file is None: http_db_fd, http_db_file = tempfile.mkstemp(suffix=".db", prefix="hrana_test_") os.close(http_db_fd) else: http_db_file = persistent_db_file app["http_db_file"] = http_db_file app["db_lock"] = asyncio.Lock() async def on_shutdown(app): if http_db_file != persistent_db_file: os.unlink(http_db_file) app.on_shutdown.append(on_shutdown) runner = aiohttp.web.AppRunner(app) await runner.setup() site = aiohttp.web.TCPSite(runner, "localhost", 8080) await site.start() logger.info("Server is ready") if len(command) > 0: proc = await asyncio.create_subprocess_exec(*command) code = await proc.wait() else: while True: await asyncio.sleep(10) await runner.cleanup() return code async def handle_get_index(req): protocol = { "json": "hrana3", "protobuf": "hrana3-protobuf", }[encoding]; ws = aiohttp.web.WebSocketResponse(protocols=(protocol,)) if ws.can_prepare(req): await ws.prepare(req) try: await handle_websocket(req.app, ws) finally: await ws.close() return ws return aiohttp.web.Response(text="This is a Hrana test server") async def handle_websocket(app, ws): async def recv_msg(): ws_msg = await ws.receive() if ws_msg.type == aiohttp.WSMsgType.TEXT: assert encoding == "json" msg = json.loads(ws_msg.data) return msg elif ws_msg.type == aiohttp.WSMsgType.BINARY: assert encoding == "protobuf" msg_proto = proto.hrana.ws_pb2.ClientMsg() msg_proto.ParseFromString(ws_msg.data) msg = from_proto.ws_client_msg(msg_proto) return msg elif ws_msg.type in (aiohttp.WSMsgType.CLOSE, aiohttp.WSMsgType.CLOSED): return None else: raise RuntimeError(f"Unknown websocket message: {msg!r}") async def send_msg(msg): if encoding == "json": await ws.send_str(json.dumps(msg)) elif encoding == "protobuf": msg_proto = proto.hrana.ws_pb2.ServerMsg() to_proto.ws_server_msg(msg_proto, msg) await ws.send_bytes(msg_proto.SerializeToString()) else: assert False WsStream = collections.namedtuple("WsStream", ["conn"]) streams = {} cursors = {} sqls = {} if persistent_db_file is None: db_fd, db_file = tempfile.mkstemp(suffix=".db", prefix="hrana_test_") os.close(db_fd) else: db_file = persistent_db_file async def handle_request(req): if req["type"] == "open_stream": conn = await to_thread(lambda: connect(db_file)) stream_id = int(req["stream_id"]) assert stream_id not in streams streams[stream_id] = WsStream(conn) return {"type": "open_stream"} elif req["type"] == "close_stream": stream = streams.pop(int(req["stream_id"]), None) if stream is not None: await to_thread(lambda: stream.conn.close()) return {"type": "close_stream"} elif req["type"] == "execute": stream = streams[int(req["stream_id"])] async with app["db_lock"]: result = await to_thread(lambda: execute_stmt(stream.conn, sqls, req["stmt"])) return {"type": "execute", "result": result} elif req["type"] == "batch": stream = streams[int(req["stream_id"])] async with app["db_lock"]: result = await to_thread(lambda: execute_batch(stream.conn, sqls, req["batch"])) return {"type": "batch", "result": result} elif req["type"] == "open_cursor": stream = streams[int(req["stream_id"])] cursor_id = int(req["cursor_id"]) assert cursor_id not in cursors async with app["db_lock"]: entries = await to_thread(lambda: execute_cursor(stream.conn, sqls, req["batch"])) cursors[cursor_id] = collections.deque(entries) return {"type": "open_cursor"} elif req["type"] == "close_cursor": cursors.pop(int(req["cursor_id"]), None) return {"type": "close_cursor"} elif req["type"] == "fetch_cursor": cursor = cursors[int(req["cursor_id"])] entries = [] while len(cursor) > 0 and len(entries) < req["max_count"]: entries.append(cursor.popleft()) return {"type": "fetch_cursor", "entries": entries, "done": len(cursor) == 0} elif req["type"] == "sequence": stream = streams[int(req["stream_id"])] sql = get_sql(sqls, req) async with app["db_lock"]: await to_thread(lambda: execute_sequence(stream.conn, sql)) return {"type": "sequence"} elif req["type"] == "describe": stream = streams[int(req["stream_id"])] sql = get_sql(sqls, req) async with app["db_lock"]: result = await to_thread(lambda: describe_stmt(stream.conn, sql)) return {"type": "describe", "result": result} elif req["type"] == "store_sql": sql_id = int(req["sql_id"]) assert sql_id not in sqls sqls[sql_id] = req["sql"] assert len(sqls) <= 250 return {"type": "store_sql"} elif req["type"] == "close_sql": sqls.pop(int(req["sql_id"])) return {"type": "close_sql"} elif req["type"] == "get_autocommit": stream = streams[int(req["stream_id"])] is_autocommit = stream.conn.get_autocommit() return {"type": "get_autocommit", "is_autocommit": is_autocommit} else: raise RuntimeError(f"Unknown req: {req!r}") hello_recvd = False async def handle_msg(msg): nonlocal hello_recvd if msg["type"] == "request": assert hello_recvd try: response = await handle_request(msg["request"]) await send_msg({ "type": "response_ok", "request_id": msg["request_id"], "response": response, }) except ResponseError as e: await send_msg({ "type": "response_error", "request_id": msg["request_id"], "error": e.tojson(), }) elif msg["type"] == "hello": jwt = msg.get("jwt") if jwt is not None: logger.info(f"Reauthenticated with JWT: {jwt[:20]}...") hello_recvd = True await send_msg({"type": "hello_ok"}) else: raise RuntimeError(f"Unknown msg: {msg!r}") try: while True: msg = await recv_msg() if msg is None: break await handle_msg(msg) except CloseWebSocket: await ws.close() except CloseTcpSocket: ws._writer.transport.close() finally: for stream in streams.values(): stream.conn.close() if db_file != persistent_db_file: os.unlink(db_file) async def handle_post_pipeline(req): if encoding == "json": req_body = await req.json() elif encoding == "protobuf": msg_proto = proto.hrana.http_pb2.PipelineReqBody() msg_proto.ParseFromString(await req.read()) req_body = from_proto.http_pipeline_req_body(msg_proto) app = req.app stream_id, stream = await handle_baton(app, req_body.get("baton")) async def handle_request(req): if req["type"] == "execute": async with app["db_lock"]: result = await to_thread(lambda: execute_stmt(stream.conn, stream.sqls, req["stmt"])) return {"type": "execute", "result": result} elif req["type"] == "batch": async with app["db_lock"]: result = await to_thread(lambda: execute_batch(stream.conn, stream.sqls, req["batch"])) return {"type": "batch", "result": result} elif req["type"] == "sequence": sql = get_sql(stream.sqls, req) async with app["db_lock"]: await to_thread(lambda: execute_sequence(stream.conn, sql)) return {"type": "sequence"} elif req["type"] == "describe": sql = get_sql(stream.sqls, req) async with app["db_lock"]: result = await to_thread(lambda: describe_stmt(stream.conn, sql)) return {"type": "describe", "result": result} elif req["type"] == "store_sql": sql_id = int(req["sql_id"]) assert sql_id not in stream.sqls stream.sqls[sql_id] = req["sql"] assert len(stream.sqls) <= 50 return {"type": "store_sql"} elif req["type"] == "close_sql": stream.sqls.pop(int(req["sql_id"])) return {"type": "close_sql"} elif req["type"] == "close": stream.conn.close() stream.conn = None return {"type": "close"} elif req["type"] == "get_autocommit": is_autocommit = stream.conn.get_autocommit() return {"type": "get_autocommit", "is_autocommit": is_autocommit} else: raise RuntimeError(f"Unknown req: {req!r}") try: results = [] for request in req_body["requests"]: try: response = await handle_request(request) result = {"type": "ok", "response": response} except ResponseError as e: result = {"type": "error", "error": e.tojson()} results.append(result) except Exception: if stream.conn is not None: stream.conn.close() stream.conn = None raise finally: if stream.conn is None: stream.baton = None del app["http_streams"][stream_id] resp_body = { "baton": stream.baton, "results": results, } if encoding == "json": return aiohttp.web.json_response(resp_body) elif encoding == "protobuf": msg_proto = proto.hrana.http_pb2.PipelineRespBody() to_proto.http_pipeline_resp_body(msg_proto, resp_body) return aiohttp.web.Response( body=msg_proto.SerializeToString(), content_type="application/x-protobuf", ) async def handle_post_cursor(req): if encoding == "json": req_body = await req.json() elif encoding == "protobuf": msg_proto = proto.hrana.http_pb2.CursorReqBody() msg_proto.ParseFromString(await req.read()) req_body = from_proto.http_cursor_req_body(msg_proto) app = req.app stream_id, stream = await handle_baton(app, req_body.get("baton")) resp = aiohttp.web.StreamResponse() resp.headers["content-type"] = { "json": "text/plain", "protobuf": "application/octet-stream", }[encoding] await resp.prepare(req) async def send_item(item, proto_class, to_proto_fun): if encoding == "json": await resp.write(json.dumps(item).encode()) await resp.write(b"\n") elif encoding == "protobuf": msg_proto = proto_class() to_proto_fun(msg_proto, item) msg_bytes = msg_proto.SerializeToString() await resp.write(encode_varint(len(msg_bytes))) await resp.write(msg_bytes) resp_body = {"baton": stream.baton} await send_item(resp_body, proto.hrana.http_pb2.CursorRespBody, to_proto.http_cursor_resp_body) async with app["db_lock"]: entries = await to_thread(lambda: execute_cursor(stream.conn, stream.sqls, req_body["batch"])) for entry in entries: await send_item(entry, proto.hrana_pb2.CursorEntry, to_proto.cursor_entry) await resp.write_eof() return resp async def handle_baton(app, baton): if baton is not None: stream_id, _, _ = baton.partition(".") stream = app["http_streams"][stream_id] assert stream.baton == baton else: conn = await to_thread(lambda: connect(app["http_db_file"])) stream_id = random.randbytes(16).hex() stream = HttpStream(conn, sqls={}, baton=None) app["http_streams"][stream_id] = stream stream.baton = f"{stream_id}.{random.randbytes(8).hex()}" return stream_id, stream def encode_varint(num): bs = [] while True: b = num & 0x7f num = num >> 7 if num == 0: bs.append(b) break else: bs.append(0x80 | b) return bytes(bs) def connect(db_file): conn = c3.Conn.open(db_file) conn.extended_result_codes(True) conn.limit(c3.SQLITE_LIMIT_ATTACHED, 0) conn.busy_timeout(1000) conn.exec("PRAGMA journal_mode = WAL") return conn def get_sql(sqls, obj): sql, sql_id = obj.get("sql"), obj.get("sql_id") assert sql is None or sql_id is None if sql is not None: return sql elif sql_id is not None: return sqls[sql_id] else: raise RuntimeError("Expected 'sql' or 'sql_id'") class CloseWebSocket(BaseException): pass class CloseTcpSocket(BaseException): pass def execute_stmt(conn, sqls, stmt): sql = get_sql(sqls, stmt) if sql == ".close_ws": raise CloseWebSocket() elif sql == ".close_tcp": raise CloseTcpSocket() try: changes_before = conn.total_changes() prepared, sql_rest = conn.prepare(sql) if not prepared: raise ResponseError("SQL string does not contain a valid statement", "SQL_NO_STATEMENT") param_count = prepared.param_count() if len(sql_rest.strip()) != 0: raise ResponseError("SQL string contains more than one statement") args = stmt.get("args", []) named_args = stmt.get("named_args", []) provided_params_count = len(args) + len(named_args) if provided_params_count != param_count: raise ResponseError(f"Required {param_count} but {provided_params_count} were provided", "ARGS_INVALID") for param_i, arg_value in enumerate(args, 1): prepared.bind(param_i, value_to_sqlite(arg_value)) for arg in named_args: arg_name = arg["name"] if arg_name[0] in (":", "@", "$"): param_i = prepared.param_index(arg_name) else: for prefix in (":", "@", "$"): param_i = prepared.param_index(prefix + arg_name) if param_i != 0: break if param_i == 0: raise ResponseError(f"Parameter with name {arg_name!r} was not found", "ARGS_INVALID") prepared.bind(param_i, value_to_sqlite(arg["value"])) col_count = prepared.column_count() cols = [ { "name": prepared.column_name(col_i), "decltype": prepared.column_decltype(col_i), } for col_i in range(col_count) ] want_rows = stmt.get("want_rows", True) rows = [] while prepared.step(): if not want_rows: continue cells = [] for col_i in range(col_count): try: val = prepared.column(col_i) except ValueError as e: name = cols[col_i].get("name") or col_i if isinstance(e, UnicodeDecodeError): # NOTE: formatting msg like this to match Python's dbapi # error, but it could be anything. However this way # allows the hrana test server to be used against # Python's test suite obj = e.object.decode(errors="replace") msg = f"Could not decode to UTF-8 column {name!r} with text {obj!r}" code = "UNICODE_ERROR" else: msg = f"Could not get column {name!r}: {e}" code = "VALUE_ERROR" raise ResponseError(msg, code) from e cells.append(value_from_sqlite(val)) rows.append(cells) affected_row_count = conn.total_changes() - changes_before last_insert_rowid = conn.last_insert_rowid() except c3.SqliteError as e: raise ResponseError(e) from e return { "cols": cols, "rows": rows, "affected_row_count": affected_row_count, "last_insert_rowid": str(last_insert_rowid), } def describe_stmt(conn, sql): try: prepared, _ = conn.prepare(sql) param_count = prepared.param_count() params = [ {"name": prepared.param_name(param_i)} for param_i in range(1, param_count+1) ] col_count = prepared.column_count() cols = [ { "name": prepared.column_name(col_i), "decltype": prepared.column_decltype(col_i) } for col_i in range(col_count) ] is_explain = prepared.isexplain() > 0 is_readonly = prepared.readonly() except c3.SqliteError as e: raise ResponseError(e) from e return { "params": params, "cols": cols, "is_explain": is_explain, "is_readonly": is_readonly, } def execute_sequence(conn, sql): try: while len(sql) > 0: prepared, sql = conn.prepare(sql) if prepared is None: break while prepared.step(): pass except c3.SqliteError as e: raise ResponseError(e) from e def execute_cursor(conn, sqls, batch): batch_result = execute_batch(conn, sqls, batch) entries = [] for step_i in range(len(batch["steps"])): step_result = batch_result["step_results"][step_i] step_error = batch_result["step_errors"][step_i] if step_result is not None: entries.append({ "type": "step_begin", "step": step_i, "cols": step_result["cols"], }) for row in step_result["rows"]: entries.append({"type": "row", "row": row}) entries.append({ "type": "step_end", "affected_row_count": step_result["affected_row_count"], "last_insert_rowid": step_result["last_insert_rowid"], }) elif step_error is not None: entries.append({ "type": "step_error", "step": step_i, "error": step_error, }) return entries def execute_batch(conn, sqls, batch): step_results = [] step_errors = [] for step in batch["steps"]: condition = step.get("condition") if condition is not None: enabled = eval_cond(conn, step_results, step_errors, condition) else: enabled = True step_result = None step_error = None if enabled: try: step_result = execute_stmt(conn, sqls, step["stmt"]) except ResponseError as e: step_error = e.tojson() step_results.append(step_result) step_errors.append(step_error) return { "step_results": step_results, "step_errors": step_errors, } def eval_cond(conn, step_results, step_errors, cond): if cond["type"] == "ok": return step_results[cond["step"]] is not None elif cond["type"] == "error": return step_errors[cond["step"]] is not None elif cond["type"] == "not": return not eval_cond(conn, step_results, step_errors, cond["cond"]) elif cond["type"] == "and": return all(eval_cond(conn, step_results, step_errors, c) for c in cond["conds"]) elif cond["type"] == "or": return any(eval_cond(conn, step_results, step_errors, c) for c in cond["conds"]) elif cond["type"] == "is_autocommit": return conn.get_autocommit() else: raise RuntimeError(f"Unknown cond: {cond!r}") def value_to_sqlite(value): if value["type"] == "null": return None elif value["type"] == "integer": return int(value["value"]) elif value["type"] == "float": return float(value["value"]) elif value["type"] == "text": return str(value["value"]) elif value["type"] == "blob": return base64.b64decode(value["base64"]) else: raise RuntimeError(f"Unknown value: {value!r}") def value_from_sqlite(value): if value is None: return {"type": "null"} elif isinstance(value, int): return {"type": "integer", "value": str(value)} elif isinstance(value, float): return {"type": "float", "value": value} elif isinstance(value, str): return {"type": "text", "value": value} elif isinstance(value, bytes): return {"type": "blob", "base64": base64.b64encode(value).decode()} else: raise RuntimeError(f"Unknown SQLite value: {value!r}") class ResponseError(RuntimeError): def __init__(self, message, code=None): if isinstance(message, c3.SqliteError): if code is None: # Use base error code (error_code & 0xFF) instead of extended code base_code = message.error_code & 0xFF if message.error_code else None code = sqlite_error_code_to_name.get(base_code) message = str(message) super().__init__(message) self.code = code def tojson(self): message = str(self) if self.code: return {"message": message, "code": self.code} return {"message": message} async def to_thread(func): return await asyncio.get_running_loop().run_in_executor(None, func) if __name__ == "__main__": try: sys.exit(asyncio.run(main(sys.argv[1:]))) except KeyboardInterrupt: print() ================================================ FILE: testing/hrana-test-server/sqlite3_error_map.py ================================================ sqlite_error_code_to_name = { 0: "SQLITE_OK", 1: "SQLITE_ERROR", 2: "SQLITE_INTERNAL", 3: "SQLITE_PERM", 4: "SQLITE_ABORT", 5: "SQLITE_BUSY", 6: "SQLITE_LOCKED", 7: "SQLITE_NOMEM", 8: "SQLITE_READONLY", 9: "SQLITE_INTERRUPT", 10: "SQLITE_IOERR", 11: "SQLITE_CORRUPT", 12: "SQLITE_NOTFOUND", 13: "SQLITE_FULL", 14: "SQLITE_CANTOPEN", 15: "SQLITE_PROTOCOL", 16: "SQLITE_EMPTY", 17: "SQLITE_SCHEMA", 18: "SQLITE_TOOBIG", 19: "SQLITE_CONSTRAINT", 20: "SQLITE_MISMATCH", 21: "SQLITE_MISUSE", 22: "SQLITE_NOLFS", 23: "SQLITE_AUTH", 24: "SQLITE_FORMAT", 25: "SQLITE_RANGE", 26: "SQLITE_NOTADB", 27: "SQLITE_NOTICE", 28: "SQLITE_WARNING", 100: "SQLITE_ROW", 101: "SQLITE_DONE", 256: "SQLITE_OK_LOAD_PERMANENTLY", 257: "SQLITE_ERROR_MISSING_COLLSEQ", 261: "SQLITE_BUSY_RECOVERY", 262: "SQLITE_LOCKED_SHAREDCACHE", 264: "SQLITE_READONLY_RECOVERY", 266: "SQLITE_IOERR_READ", 267: "SQLITE_CORRUPT_VTAB", 270: "SQLITE_CANTOPEN_NOTEMPDIR", 275: "SQLITE_CONSTRAINT_CHECK", 279: "SQLITE_AUTH_USER", 283: "SQLITE_NOTICE_RECOVER_WAL", 284: "SQLITE_WARNING_AUTOINDEX", 512: "SQLITE_OK_SYMLINK", 513: "SQLITE_ERROR_RETRY", 516: "SQLITE_ABORT_ROLLBACK", 517: "SQLITE_BUSY_SNAPSHOT", 518: "SQLITE_LOCKED_VTAB", 520: "SQLITE_READONLY_CANTLOCK", 522: "SQLITE_IOERR_SHORT_READ", 523: "SQLITE_CORRUPT_SEQUENCE", 526: "SQLITE_CANTOPEN_ISDIR", 531: "SQLITE_CONSTRAINT_COMMITHOOK", 539: "SQLITE_NOTICE_RECOVER_ROLLBACK", 769: "SQLITE_ERROR_SNAPSHOT", 773: "SQLITE_BUSY_TIMEOUT", 776: "SQLITE_READONLY_ROLLBACK", 778: "SQLITE_IOERR_WRITE", 779: "SQLITE_CORRUPT_INDEX", 782: "SQLITE_CANTOPEN_FULLPATH", 787: "SQLITE_CONSTRAINT_FOREIGNKEY", 795: "SQLITE_NOTICE_RBU", 1032: "SQLITE_READONLY_DBMOVED", 1034: "SQLITE_IOERR_FSYNC", 1038: "SQLITE_CANTOPEN_CONVPATH", 1043: "SQLITE_CONSTRAINT_FUNCTION", 1288: "SQLITE_READONLY_CANTINIT", 1290: "SQLITE_IOERR_DIR_FSYNC", 1294: "SQLITE_CANTOPEN_DIRTYWAL", 1299: "SQLITE_CONSTRAINT_NOTNULL", 1544: "SQLITE_READONLY_DIRECTORY", 1546: "SQLITE_IOERR_TRUNCATE", 1550: "SQLITE_CANTOPEN_SYMLINK", 1555: "SQLITE_CONSTRAINT_PRIMARYKEY", 1802: "SQLITE_IOERR_FSTAT", 1811: "SQLITE_CONSTRAINT_TRIGGER", 2058: "SQLITE_IOERR_UNLOCK", 2067: "SQLITE_CONSTRAINT_UNIQUE", 2314: "SQLITE_IOERR_RDLOCK", 2323: "SQLITE_CONSTRAINT_VTAB", 2570: "SQLITE_IOERR_DELETE", 2579: "SQLITE_CONSTRAINT_ROWID", 2826: "SQLITE_IOERR_BLOCKED", 2835: "SQLITE_CONSTRAINT_PINNED", 3082: "SQLITE_IOERR_NOMEM", 3091: "SQLITE_CONSTRAINT_DATATYPE", 3338: "SQLITE_IOERR_ACCESS", 3594: "SQLITE_IOERR_CHECKRESERVEDLOCK", 3850: "SQLITE_IOERR_LOCK", 4106: "SQLITE_IOERR_CLOSE", 4362: "SQLITE_IOERR_DIR_CLOSE", 4618: "SQLITE_IOERR_SHMOPEN", 4874: "SQLITE_IOERR_SHMSIZE", 5130: "SQLITE_IOERR_SHMLOCK", 5386: "SQLITE_IOERR_SHMMAP", 5642: "SQLITE_IOERR_SEEK", 5898: "SQLITE_IOERR_DELETE_NOENT", 6154: "SQLITE_IOERR_MMAP", 6410: "SQLITE_IOERR_GETTEMPPATH", 6666: "SQLITE_IOERR_CONVPATH", 6922: "SQLITE_IOERR_VNODE", 7178: "SQLITE_IOERR_AUTH", 7434: "SQLITE_IOERR_BEGIN_ATOMIC", 7690: "SQLITE_IOERR_COMMIT_ATOMIC", 7946: "SQLITE_IOERR_ROLLBACK_ATOMIC", 8202: "SQLITE_IOERR_DATA", 8458: "SQLITE_IOERR_CORRUPTFS", } ================================================ FILE: testing/hrana-test-server/to_proto.py ================================================ import base64 import proto.hrana.ws_pb2 def ws_server_msg(p, m): if m["type"] == "hello_ok": p.hello_ok.SetInParent() elif m["type"] == "hello_error": error(p.hello_error.error, m["error"]) elif m["type"] == "response_ok": p.response_ok.request_id = m["request_id"] if m["response"]["type"] == "open_stream": p.response_ok.open_stream.SetInParent() elif m["response"]["type"] == "close_stream": p.response_ok.close_stream.SetInParent() elif m["response"]["type"] == "execute": ws_execute_resp(p.response_ok.execute, m["response"]) elif m["response"]["type"] == "batch": ws_batch_resp(p.response_ok.batch, m["response"]) elif m["response"]["type"] == "open_cursor": p.response_ok.open_cursor.SetInParent() elif m["response"]["type"] == "close_cursor": p.response_ok.close_cursor.SetInParent() elif m["response"]["type"] == "fetch_cursor": ws_fetch_cursor_resp(p.response_ok.fetch_cursor, m["response"]) elif m["response"]["type"] == "sequence": p.response_ok.sequence.SetInParent() elif m["response"]["type"] == "describe": ws_describe_resp(p.response_ok.describe, m["response"]) elif m["response"]["type"] == "store_sql": p.response_ok.store_sql.SetInParent() elif m["response"]["type"] == "close_sql": p.response_ok.close_sql.SetInParent() elif m["response"]["type"] == "get_autocommit": ws_get_autocommit_resp(p.response_ok.get_autocommit, m["response"]) elif m["type"] == "response_error": p.response_error.request_id = m["request_id"] error(p.response_error.error, m["error"]) def ws_execute_resp(p, m): stmt_result(p.result, m["result"]) def ws_batch_resp(p, m): batch_result(p.result, m["result"]) def ws_fetch_cursor_resp(p, m): for mm in m["entries"]: cursor_entry(p.entries.add(), mm) p.done = m["done"] def ws_describe_resp(p, m): describe_result(p.result, m["result"]) def ws_get_autocommit_resp(p, m): p.is_autocommit = m["is_autocommit"] def http_pipeline_resp_body(p, m): if m["baton"] is not None: p.baton = m["baton"] if m.get("base_url") is not None: p.base_url = m["base_url"] for mm in m["results"]: http_stream_result(p.results.add(), mm) def http_stream_result(p, m): if m["type"] == "ok": http_stream_response(p.ok, m["response"]) elif m["type"] == "error": error(p.error, m["error"]) def http_stream_response(p, m): if m["type"] == "close": p.close.SetInParent() elif m["type"] == "execute": stmt_result(p.execute.result, m["result"]) elif m["type"] == "batch": batch_result(p.batch.result, m["result"]) elif m["type"] == "sequence": p.sequence.SetInParent() elif m["type"] == "describe": describe_result(p.describe.result, m["result"]) elif m["type"] == "store_sql": p.store_sql.SetInParent() elif m["type"] == "close_sql": p.close_sql.SetInParent() elif m["type"] == "get_autocommit": p.get_autocommit.is_autocommit = m["is_autocommit"] def http_cursor_resp_body(p, m): if m["baton"] is not None: p.baton = m["baton"] if m.get("base_url") is not None: p.base_url = m["base_url"] def error(p, m): p.message = m["message"] if m["code"] is not None: p.code = m["code"] def cursor_entry(p, m): if m["type"] == "step_begin": p.step_begin.step = m["step"] for mm in m["cols"]: col(p.step_begin.cols.add(), mm) elif m["type"] == "step_end": p.step_end.affected_row_count = m["affected_row_count"] if m["last_insert_rowid"] is not None: p.step_end.last_insert_rowid = int(m["last_insert_rowid"]) elif m["type"] == "step_error": p.step_error.step = m["step"] error(p.step_error.error, m["error"]) elif m["type"] == "row": row(p.row, m["row"]) elif m["type"] == "error": error(p.error, m["error"]) return p def stmt_result(p, m): for mm in m["cols"]: col(p.cols.add(), mm) for mm in m["rows"]: row(p.rows.add(), mm) p.affected_row_count = m["affected_row_count"] if m["last_insert_rowid"] is not None: p.last_insert_rowid = int(m["last_insert_rowid"]) def col(p, m): p.name = m["name"] if m["decltype"] is not None: p.decltype = m["decltype"] def row(p, m): for mm in m: value(p.values.add(), mm) def batch_result(p, m): p.SetInParent() for i, mm in enumerate(m["step_results"]): if mm is not None: stmt_result(p.step_results[i], mm) for i, mm in enumerate(m["step_errors"]): if mm is not None: error(p.step_errors[i], mm) def describe_result(p, m): for mm in m["params"]: describe_param(p.params.add(), mm) for mm in m["cols"]: describe_col(p.cols.add(), mm) p.is_explain = m["is_explain"] p.is_readonly = m["is_readonly"] def describe_param(p, m): if m["name"] is not None: p.name = m["name"] def describe_col(p, m): p.name = m["name"] if m["decltype"] is not None: p.decltype = m["decltype"] def value(p, m): if m["type"] == "null": p.null.SetInParent() elif m["type"] == "integer": p.integer = int(m["value"]) elif m["type"] == "float": p.float = m["value"] elif m["type"] == "text": p.text = m["value"] elif m["type"] == "blob": p.blob = base64.b64decode(m["base64"]) ================================================ FILE: testing/test.sh ================================================ #!/bin/sh python3 -m venv .venv source .venv/bin/activate pip3 install aiohttp protobuf npm run build && SERVER=test_v2 python3 testing/hrana-test-server/server_v2.py npm test --prefix packages/libsql-client