Showing preview only (6,174K chars total). Download the full file or copy to clipboard to get everything.
Repository: opral/lix
Branch: main
Commit: a9023fb8ad90
Files: 493
Total size: 5.8 MB
Directory structure:
gitextract_5kejlpdk/
├── .gitattributes
├── .gitignore
├── .infisical.json
├── .prettierignore
├── CONTRIBUTING.md
├── Cargo.toml
├── README.md
├── benchmarks/
│ ├── 10k-entities/
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ └── src/
│ │ ├── main.rs
│ │ ├── sqlite_backend.rs
│ │ └── wasmtime_runtime.rs
│ ├── engine2-json-pointer/
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ └── src/
│ │ ├── main.rs
│ │ └── sqlite_backend.rs
│ └── git-compare/
│ ├── Cargo.toml
│ ├── README.md
│ └── src/
│ └── main.rs
├── blog/
│ ├── 001-introducing-lix/
│ │ └── index.md
│ ├── 002-modeling-a-company-as-a-repository/
│ │ └── index.md
│ ├── 003-february-2026-update/
│ │ └── index.md
│ ├── 004-march-2026-update/
│ │ └── index.md
│ ├── 005-april-2026-update/
│ │ └── index.md
│ ├── authors.json
│ └── table_of_contents.json
├── cla-signatures.json
├── docs/
│ ├── api-reference.md
│ ├── backend.md
│ ├── comparison-to-git.md
│ ├── getting-started.md
│ ├── history.md
│ ├── lix-for-ai-agents.md
│ ├── persistence.md
│ ├── schemas.md
│ ├── sql-functions.md
│ ├── surfaces.md
│ ├── table_of_contents.json
│ ├── versions.md
│ └── what-is-lix.md
├── nx.json
├── optimization_log6_crud.md
├── optimization_log7.md
├── optimization_log8.md
├── optimization_log9_sql2.md
├── package.json
├── packages/
│ ├── cli/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ ├── app/
│ │ │ ├── context.rs
│ │ │ ├── mod.rs
│ │ │ ├── run.rs
│ │ │ └── welcome.rs
│ │ ├── cli/
│ │ │ ├── exp.rs
│ │ │ ├── init.rs
│ │ │ ├── mod.rs
│ │ │ ├── redo.rs
│ │ │ ├── root.rs
│ │ │ ├── sql.rs
│ │ │ ├── undo.rs
│ │ │ └── version.rs
│ │ ├── commands/
│ │ │ ├── exp/
│ │ │ │ ├── git_replay.rs
│ │ │ │ └── mod.rs
│ │ │ ├── init.rs
│ │ │ ├── mod.rs
│ │ │ ├── redo.rs
│ │ │ ├── sql/
│ │ │ │ ├── execute.rs
│ │ │ │ └── mod.rs
│ │ │ ├── undo.rs
│ │ │ └── version/
│ │ │ ├── create.rs
│ │ │ ├── merge.rs
│ │ │ ├── mod.rs
│ │ │ └── switch.rs
│ │ ├── db/
│ │ │ └── mod.rs
│ │ ├── error.rs
│ │ ├── hints.rs
│ │ ├── lib.rs
│ │ ├── main.rs
│ │ └── output/
│ │ └── mod.rs
│ ├── engine/
│ │ ├── .gitignore
│ │ ├── AGENTS.md
│ │ ├── Cargo.toml
│ │ ├── benches/
│ │ │ ├── fixtures/
│ │ │ │ └── pnpm-lock.fixture.json
│ │ │ ├── json_pointer_crud/
│ │ │ │ └── main.rs
│ │ │ ├── json_pointer_physical/
│ │ │ │ └── main.rs
│ │ │ ├── optimization9_sql2/
│ │ │ │ ├── json_pointer.schema.json
│ │ │ │ ├── main.rs
│ │ │ │ └── pnpm-lock.fixture.json
│ │ │ ├── physical_layout/
│ │ │ │ ├── backend_kv.rs
│ │ │ │ ├── changelog.rs
│ │ │ │ ├── json_store.rs
│ │ │ │ ├── main.rs
│ │ │ │ ├── tracked_state.rs
│ │ │ │ └── workflow.rs
│ │ │ ├── storage/
│ │ │ │ ├── README.md
│ │ │ │ ├── backend.rs
│ │ │ │ ├── binary_cas.rs
│ │ │ │ ├── changelog.rs
│ │ │ │ ├── commit_graph.rs
│ │ │ │ ├── json_store.rs
│ │ │ │ ├── main.rs
│ │ │ │ ├── rocksdb_backend.rs
│ │ │ │ ├── sqlite_backend.rs
│ │ │ │ ├── storage_api.rs
│ │ │ │ ├── tracked_state.rs
│ │ │ │ └── untracked_state.rs
│ │ │ └── transaction/
│ │ │ └── main.rs
│ │ ├── src/
│ │ │ ├── backend/
│ │ │ │ ├── kv.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── testing.rs
│ │ │ │ └── types.rs
│ │ │ ├── binary_cas/
│ │ │ │ ├── chunking.rs
│ │ │ │ ├── codec.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── kv.rs
│ │ │ │ ├── mod.rs
│ │ │ │ └── types.rs
│ │ │ ├── catalog/
│ │ │ │ ├── context.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── schema.rs
│ │ │ │ └── snapshot.rs
│ │ │ ├── cel/
│ │ │ │ ├── context.rs
│ │ │ │ ├── error.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── provider.rs
│ │ │ │ ├── runtime.rs
│ │ │ │ └── value.rs
│ │ │ ├── commit_graph/
│ │ │ │ ├── context.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── types.rs
│ │ │ │ └── walker.rs
│ │ │ ├── commit_store/
│ │ │ │ ├── codec.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── materialization.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── storage.rs
│ │ │ │ └── types.rs
│ │ │ ├── common/
│ │ │ │ ├── error.rs
│ │ │ │ ├── fingerprint.rs
│ │ │ │ ├── fs_path.rs
│ │ │ │ ├── identity.rs
│ │ │ │ ├── json_pointer.rs
│ │ │ │ ├── metadata.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── types.rs
│ │ │ │ └── wire.rs
│ │ │ ├── domain.rs
│ │ │ ├── engine.rs
│ │ │ ├── entity_identity.rs
│ │ │ ├── functions/
│ │ │ │ ├── context.rs
│ │ │ │ ├── deterministic.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── provider.rs
│ │ │ │ ├── state.rs
│ │ │ │ └── types.rs
│ │ │ ├── init.rs
│ │ │ ├── json_store/
│ │ │ │ ├── compression.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── encoded.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── store.rs
│ │ │ │ └── types.rs
│ │ │ ├── lib.rs
│ │ │ ├── live_state/
│ │ │ │ ├── context.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── overlay.rs
│ │ │ │ ├── reader.rs
│ │ │ │ ├── types.rs
│ │ │ │ └── visibility.rs
│ │ │ ├── plugin/
│ │ │ │ ├── archive.rs
│ │ │ │ ├── component.rs
│ │ │ │ ├── install.rs
│ │ │ │ ├── manifest.rs
│ │ │ │ ├── materializer.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── plugin_manifest.json
│ │ │ │ └── storage.rs
│ │ │ ├── schema/
│ │ │ │ ├── annotations/
│ │ │ │ │ ├── defaults.rs
│ │ │ │ │ └── mod.rs
│ │ │ │ ├── builtin/
│ │ │ │ │ ├── lix_account.json
│ │ │ │ │ ├── lix_active_account.json
│ │ │ │ │ ├── lix_binary_blob_ref.json
│ │ │ │ │ ├── lix_change.json
│ │ │ │ │ ├── lix_change_author.json
│ │ │ │ │ ├── lix_commit.json
│ │ │ │ │ ├── lix_commit_edge.json
│ │ │ │ │ ├── lix_directory_descriptor.json
│ │ │ │ │ ├── lix_file_descriptor.json
│ │ │ │ │ ├── lix_key_value.json
│ │ │ │ │ ├── lix_label.json
│ │ │ │ │ ├── lix_label_assignment.json
│ │ │ │ │ ├── lix_registered_schema.json
│ │ │ │ │ ├── lix_version_descriptor.json
│ │ │ │ │ ├── lix_version_ref.json
│ │ │ │ │ └── mod.rs
│ │ │ │ ├── compatibility.rs
│ │ │ │ ├── definition.json
│ │ │ │ ├── definition.rs
│ │ │ │ ├── key.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── seed.rs
│ │ │ │ └── tests.rs
│ │ │ ├── session/
│ │ │ │ ├── context.rs
│ │ │ │ ├── create_version.rs
│ │ │ │ ├── execute.rs
│ │ │ │ ├── merge/
│ │ │ │ │ ├── analysis.rs
│ │ │ │ │ ├── apply.rs
│ │ │ │ │ ├── conflicts.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ ├── stats.rs
│ │ │ │ │ └── version.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── optimization9_sql2_bench.rs
│ │ │ │ └── switch_version.rs
│ │ │ ├── sql2/
│ │ │ │ ├── change_provider.rs
│ │ │ │ ├── classify.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── directory_history_provider.rs
│ │ │ │ ├── directory_provider.rs
│ │ │ │ ├── dml.rs
│ │ │ │ ├── entity_history_provider.rs
│ │ │ │ ├── entity_provider.rs
│ │ │ │ ├── error.rs
│ │ │ │ ├── execute.rs
│ │ │ │ ├── file_history_provider.rs
│ │ │ │ ├── file_provider.rs
│ │ │ │ ├── filesystem_planner.rs
│ │ │ │ ├── filesystem_predicates.rs
│ │ │ │ ├── filesystem_visibility.rs
│ │ │ │ ├── history_projection.rs
│ │ │ │ ├── history_provider.rs
│ │ │ │ ├── history_route.rs
│ │ │ │ ├── lix_state_provider.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── predicate_typecheck.rs
│ │ │ │ ├── public_bind/
│ │ │ │ │ ├── assignment.rs
│ │ │ │ │ ├── capability.rs
│ │ │ │ │ ├── dml.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ └── table.rs
│ │ │ │ ├── read_only.rs
│ │ │ │ ├── record_batch.rs
│ │ │ │ ├── result_metadata.rs
│ │ │ │ ├── runtime.rs
│ │ │ │ ├── session.rs
│ │ │ │ ├── udfs/
│ │ │ │ │ ├── common.rs
│ │ │ │ │ ├── lix_active_version_commit_id.rs
│ │ │ │ │ ├── lix_empty_blob.rs
│ │ │ │ │ ├── lix_json.rs
│ │ │ │ │ ├── lix_json_get.rs
│ │ │ │ │ ├── lix_json_get_text.rs
│ │ │ │ │ ├── lix_text_decode.rs
│ │ │ │ │ ├── lix_text_encode.rs
│ │ │ │ │ ├── lix_timestamp.rs
│ │ │ │ │ ├── lix_uuid_v7.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ └── public_call.rs
│ │ │ │ ├── version_provider.rs
│ │ │ │ ├── version_scope.rs
│ │ │ │ └── write_normalization.rs
│ │ │ ├── storage/
│ │ │ │ ├── context.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── read_scope.rs
│ │ │ │ └── types.rs
│ │ │ ├── storage_bench.rs
│ │ │ ├── test_support.rs
│ │ │ ├── tracked_state/
│ │ │ │ ├── by_file_index.rs
│ │ │ │ ├── codec.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── diff.rs
│ │ │ │ ├── materialization.rs
│ │ │ │ ├── materializer.rs
│ │ │ │ ├── merge.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── storage.rs
│ │ │ │ ├── tree.rs
│ │ │ │ └── types.rs
│ │ │ ├── transaction/
│ │ │ │ ├── commit.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── live_state_overlay.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── normalization.rs
│ │ │ │ ├── prep.rs
│ │ │ │ ├── schema_resolver.rs
│ │ │ │ ├── staging.rs
│ │ │ │ ├── types.rs
│ │ │ │ └── validation.rs
│ │ │ ├── untracked_state/
│ │ │ │ ├── codec.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── materialization.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── storage.rs
│ │ │ │ └── types.rs
│ │ │ ├── version/
│ │ │ │ ├── context.rs
│ │ │ │ ├── lifecycle.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── refs.rs
│ │ │ │ ├── stage_rows.rs
│ │ │ │ └── types.rs
│ │ │ └── wasm/
│ │ │ └── mod.rs
│ │ ├── tests/
│ │ │ ├── branching.rs
│ │ │ ├── code_structure.rs
│ │ │ ├── commit_graph.rs
│ │ │ ├── engine.rs
│ │ │ ├── json_pointer_crud_storage.rs
│ │ │ ├── sql/
│ │ │ │ ├── entity_history.rs
│ │ │ │ ├── errors.rs
│ │ │ │ ├── history_conformance.rs
│ │ │ │ ├── lix_change.rs
│ │ │ │ ├── lix_commit.rs
│ │ │ │ ├── lix_directory.rs
│ │ │ │ ├── lix_directory_history.rs
│ │ │ │ ├── lix_file.rs
│ │ │ │ ├── lix_file_history.rs
│ │ │ │ ├── lix_json.rs
│ │ │ │ ├── lix_key_value.rs
│ │ │ │ ├── lix_label_assignment.rs
│ │ │ │ ├── lix_registered_schema.rs
│ │ │ │ ├── lix_state.rs
│ │ │ │ ├── lix_state_history.rs
│ │ │ │ ├── lix_version.rs
│ │ │ │ ├── metadata.rs
│ │ │ │ ├── read_only.rs
│ │ │ │ └── udfs.rs
│ │ │ ├── sql.rs
│ │ │ ├── storage_accounting.rs
│ │ │ ├── support/
│ │ │ │ ├── mod.rs
│ │ │ │ └── simulation_test/
│ │ │ │ ├── engine/
│ │ │ │ │ ├── expect_same.rs
│ │ │ │ │ ├── kv_backend.rs
│ │ │ │ │ ├── macro_runtime.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ ├── mode.rs
│ │ │ │ │ ├── rebuild_tracked_state.rs
│ │ │ │ │ └── simulation.rs
│ │ │ │ └── mod.rs
│ │ │ ├── tmp_lix_key_value_amplification.rs
│ │ │ └── transaction.rs
│ │ └── wit/
│ │ └── lix-plugin.wit
│ ├── js-kysely/
│ │ ├── .gitignore
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── create-lix-kysely.ts
│ │ │ ├── eb-entity.ts
│ │ │ ├── index.ts
│ │ │ ├── qb.test-d.ts
│ │ │ ├── qb.ts
│ │ │ └── schema.ts
│ │ ├── tests/
│ │ │ ├── eb-entity.test.ts
│ │ │ └── transaction.test.ts
│ │ ├── tsconfig.json
│ │ ├── tsconfig.type-tests.json
│ │ └── vitest.config.ts
│ ├── js-sdk/
│ │ ├── .gitignore
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── SKILL.md
│ │ ├── package.json
│ │ ├── scripts/
│ │ │ ├── build.js
│ │ │ ├── sync-builtin-schemas.js
│ │ │ └── sync-engine-src.js
│ │ ├── src/
│ │ │ ├── builtin-schemas.ts
│ │ │ ├── engine-wasm/
│ │ │ │ ├── index.ts
│ │ │ │ └── value.test.ts
│ │ │ ├── index.ts
│ │ │ ├── open-lix.test.ts
│ │ │ ├── open-lix.ts
│ │ │ ├── sqlite/
│ │ │ │ ├── better-sqlite3.d.ts
│ │ │ │ ├── index.test.ts
│ │ │ │ └── index.ts
│ │ │ └── types.ts
│ │ ├── tsconfig.json
│ │ ├── vitest.config.ts
│ │ └── wasm-bindgen.rs
│ ├── plugin-json-v2/
│ │ ├── .gitignore
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── benches/
│ │ │ ├── apply_changes.rs
│ │ │ ├── common/
│ │ │ │ └── mod.rs
│ │ │ ├── detect_changes.rs
│ │ │ └── roundtrip.rs
│ │ ├── schema/
│ │ │ └── json_pointer.json
│ │ ├── src/
│ │ │ └── lib.rs
│ │ └── tests/
│ │ ├── apply_changes.rs
│ │ ├── common/
│ │ │ └── mod.rs
│ │ ├── detect_changes.rs
│ │ ├── roundtrip.rs
│ │ └── schema.rs
│ ├── plugin-md-v2/
│ │ ├── .gitignore
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── benches/
│ │ │ ├── common/
│ │ │ │ └── mod.rs
│ │ │ └── detect_changes.rs
│ │ ├── manifest.json
│ │ ├── schema/
│ │ │ ├── markdown_block.json
│ │ │ └── markdown_document.json
│ │ ├── src/
│ │ │ ├── apply_changes.rs
│ │ │ ├── common.rs
│ │ │ ├── detect_changes.rs
│ │ │ ├── lib.rs
│ │ │ └── schemas.rs
│ │ └── tests/
│ │ ├── apply_changes.rs
│ │ ├── common/
│ │ │ └── mod.rs
│ │ ├── detect_changes.rs
│ │ ├── roundtrip.rs
│ │ └── schema.rs
│ ├── react-utils/
│ │ ├── .oxlintrc.json
│ │ ├── .prettierrc.json
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── hooks/
│ │ │ │ ├── use-lix.test.tsx
│ │ │ │ ├── use-lix.ts
│ │ │ │ ├── use-query.test.tsx
│ │ │ │ └── use-query.ts
│ │ │ ├── index.ts
│ │ │ └── provider.tsx
│ │ ├── test-setup.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── rs-sdk/
│ │ ├── Cargo.toml
│ │ ├── src/
│ │ │ ├── in_memory_backend.rs
│ │ │ ├── lib.rs
│ │ │ └── lix.rs
│ │ └── tests/
│ │ └── e2e.rs
│ ├── text-plugin/
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── benches/
│ │ │ ├── apply_changes.rs
│ │ │ ├── common/
│ │ │ │ └── mod.rs
│ │ │ └── detect_changes.rs
│ │ ├── manifest.json
│ │ ├── schema/
│ │ │ ├── text_document.json
│ │ │ └── text_line.json
│ │ ├── src/
│ │ │ └── lib.rs
│ │ └── tests/
│ │ ├── apply_changes.rs
│ │ ├── common/
│ │ │ └── mod.rs
│ │ ├── detect_changes.rs
│ │ ├── roundtrip.rs
│ │ └── schema.rs
│ └── website/
│ ├── .gitignore
│ ├── .vscode/
│ │ └── settings.json
│ ├── HTML_DIFF_LIX_DEV_SEO_FOLLOWUP.md
│ ├── README.md
│ ├── content/
│ │ └── plugins/
│ │ └── index.md
│ ├── package.json
│ ├── public/
│ │ ├── _redirects
│ │ ├── manifest.json
│ │ └── robots.txt
│ ├── scripts/
│ │ ├── plugin-readme-sync.test.ts
│ │ ├── plugin-readme-sync.ts
│ │ └── post-build-seo.js
│ ├── src/
│ │ ├── blog/
│ │ │ ├── blogMetadata.ts
│ │ │ └── og-image.ts
│ │ ├── components/
│ │ │ ├── code-snippet.tsx
│ │ │ ├── doc-code-snippet-element.tsx
│ │ │ ├── docs-layout.tsx
│ │ │ ├── docs-prev-next.tsx
│ │ │ ├── footer.tsx
│ │ │ ├── header.tsx
│ │ │ ├── landing-page.tsx
│ │ │ ├── markdown-page.interactive.js
│ │ │ ├── markdown-page.style.css
│ │ │ ├── markdown-page.tsx
│ │ │ └── prev-next-nav.tsx
│ │ ├── github-stars-cache.ts
│ │ ├── lib/
│ │ │ ├── build-doc-map.test.ts
│ │ │ ├── build-doc-map.ts
│ │ │ ├── plugin-sidebar.ts
│ │ │ ├── seo.test.ts
│ │ │ └── seo.ts
│ │ ├── router.tsx
│ │ ├── routes/
│ │ │ ├── -seo-smoke.test.ts
│ │ │ ├── __root.tsx
│ │ │ ├── blog/
│ │ │ │ ├── $slug.tsx
│ │ │ │ └── index.tsx
│ │ │ ├── docs/
│ │ │ │ ├── $slugId.tsx
│ │ │ │ ├── index.tsx
│ │ │ │ └── redirects.json
│ │ │ ├── guide/
│ │ │ │ ├── $slugId.tsx
│ │ │ │ └── index.tsx
│ │ │ ├── index.tsx
│ │ │ ├── plugins/
│ │ │ │ ├── $pluginKey.tsx
│ │ │ │ ├── index.tsx
│ │ │ │ └── plugin.registry.json
│ │ │ └── rfc/
│ │ │ ├── $slug.tsx
│ │ │ └── index.tsx
│ │ ├── ssg/
│ │ │ └── github-stars-plugin.ts
│ │ ├── styles.css
│ │ └── types/
│ │ └── lix-js-plugin-json.d.ts
│ ├── tsconfig.json
│ ├── vite.config.ts
│ └── wrangler.json
├── pnpm-workspace.yaml
├── rfcs/
│ ├── 001-preprocess-writes/
│ │ └── index.md
│ ├── 002-rewrite-in-rust/
│ │ └── index.md
│ └── 003-canonical-lix-value/
│ └── index.md
└── skills/
└── cli/
└── SKILL.md
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitattributes
================================================
pnpm-lock.yaml merge=ours
# automatically normalize line endings in text files to be line feed
# https://github.com/opral/monorepo/pull/3340#issue-2782271138
* text=auto eol=lf
================================================
FILE: .gitignore
================================================
### inlang ###
# .devcontainer.json
.pnpm-store
# **/out
examples/svelte/package-lock.json
examples/sveltekit/package-lock.json
/build
/package
.env*
.dev.vars
.nx
# Benchmark reports and scratch databases
benchmarks/engine2-json-pointer/output*/
packages/engine/benches/storage/output*/
# Playwright
**/test-results/
**/playwright-report/
**/playwright/.cache/
packages/vscode-docs-replay/results/
# SEO – Generated sitemap
inlang/**/sitemap.xml
# Created by https://www.toptal.com/developers/gitignore/api/windows,macos,linux,node,visualstudiocode,intellij
# Edit at https://www.toptal.com/developers/gitignore?templates=windows,macos,linux,node,visualstudiocode,intellij
### Intellij ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# AWS User-specific
.idea/**/aws.xml
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# SonarLint plugin
.idea/sonarlint/
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
### Intellij Patch ###
# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
# *.iml
# modules.xml
# .idea/misc.xml
# *.ipr
# Sonarlint plugin
# https://plugins.jetbrains.com/plugin/7973-sonarlint
.idea/**/sonarlint/
# SonarQube Plugin
# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
.idea/**/sonarIssues.xml
# Markdown Navigator plugin
# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
.idea/**/markdown-navigator.xml
.idea/**/markdown-navigator-enh.xml
.idea/**/markdown-navigator/
# Cache file creation bug
# See https://youtrack.jetbrains.com/issue/JBR-2257
.idea/$CACHE_FILE$
# CodeStream plugin
# https://plugins.jetbrains.com/plugin/12206-codestream
.idea/codestream.xml
# Azure Toolkit for IntelliJ plugin
# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
.idea/**/azureSettings.xml
### Linux ###
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### macOS Patch ###
# iCloud generated files
*.icloud
### Node ###
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores Visual Studio Code versions used for testing Visual Studio Code extension (Sherlock)s
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
### Node Patch ###
# Serverless Webpack directories
.webpack/
# Optional stylelint cache
# SvelteKit build / generate output
.svelte-kit
### VisualStudioCode ###
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/*.code-snippets
# Local History for Visual Studio Code
.history/
# Built Visual Studio Code Extensions
*.vsix
### VisualStudioCode Patch ###
# Ignore all local history of files
.history
.ionide
### Windows ###
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
# End of https://www.toptal.com/developers/gitignore/api/windows,macos,linux,node,visualstudiocode,intellij
inlang/packages/paraglide/paraglide-sveltekit/example/build
inlang/packages/paraglide/paraglide-solidstart/example/.solid
*.h.ts.mjs
**/vite.config.ts.timestamp-*
**/vite.config.js.timestamp-*
# Fink version.json
inlang/packages/editor/version.json
# Lix website build
packages/lix-website/build
# gitea test instance data
lix/packages/gitea
# VitePress cache
packages/lix-docs/docs/.vitepress/cache
packages/lix-docs/docs/.vitepress/dist
artifact/*
packages/engine/artifact/*
target
# Built plugin archive artifacts
packages/*/*.lixplugin
================================================
FILE: .infisical.json
================================================
{
"workspaceId": "6e0353e4-b0b0-4c6d-a338-38f09cfafa22",
"defaultEnvironment": "",
"gitBranchToEnvironmentMapping": null
}
================================================
FILE: .prettierignore
================================================
## adding the copied sources from the markdown plugin to be able to see changes since copy..
packages/md-app/src/components/editor/plugins/markdown-plate-fork/**
packages/md-app/src/components/editor/plugins/*.tsx
packages/md-app/src/components/editor/plugins/*.ts
# also exclude ui
packages/md-app/src/components/plate-ui/*.tsx
packages/md-app/src/components/plate-ui/*.ts
packages/md-app/src/components/editor/plugins/markdown/fixtures/*.md
================================================
FILE: CONTRIBUTING.md
================================================
# Contributing
## Prerequisites
- [Node.js](https://nodejs.org/en/) (v20 or higher)
- [pnpm](https://pnpm.io/) (v8 or higher)
> [!INFO]
> If you are developing on Windows, you need to use [WSL](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux).
## Development
1. Clone the repository
2. run `pnpm i` in the root of the repo
3. run `pnpm --filter <package-name>... build` to build the dependencies of the package you want to work on
4. run `pnpm --filter <package-name> dev|test|...` to run the commands of the package you work on
### Example
> [!INFO]
> You need to run the build for the dependencies of the package via the three dots `...` at least once. [Here](https://pnpm.io/filtering#--filter-package_name-1) is the pnpm documentation for filtering.
1. `pnpm i`
2. `pnpm --filter @inlang/paraglide-js... build`
3. `pnpm --filter @inlang/paraglide-js dev`
## Opening a PR
1. run `pnpm run ci` to run all tests and checks
2. run `npx changeset` to write a changelog and trigger a version bumb. watch this loom video to see how to use changesets: https://www.loom.com/share/1c5467ae3a5243d79040fc3eb5aa12d6
================================================
FILE: Cargo.toml
================================================
[workspace]
resolver = "2"
members = [
"benchmarks/git-compare",
"benchmarks/10k-entities",
"benchmarks/engine2-json-pointer",
"packages/engine",
"packages/js-sdk",
"packages/text-plugin",
"packages/rs-sdk",
"packages/plugin-md-v2",
"packages/cli",
]
exclude = ["packages/plugin-json-v2"]
[profile.test]
debug = 1
[profile.bench]
debug = true
strip = false
================================================
FILE: README.md
================================================
<p align="center">
<img src="https://raw.githubusercontent.com/opral/lix/main/assets/logo.svg" alt="Lix" height="60">
</p>
<h3 align="center">Embeddable version control system</h3>
<p align="center">
<a href="https://www.npmjs.com/package/@lix-js/sdk"><img src="https://img.shields.io/npm/dw/%40lix-js%2Fsdk?logo=npm&logoColor=red&label=npm%20downloads" alt="weekly downloads on NPM"></a>
<a href="https://discord.gg/gdMPPWy57R"><img src="https://img.shields.io/discord/897438559458430986?style=flat&logo=discord&labelColor=white" alt="Discord"></a>
<a href="https://github.com/opral/lix"><img src="https://img.shields.io/github/stars/opral/lix?style=flat&logo=github&color=brightgreen" alt="GitHub Stars"></a>
<a href="https://x.com/lixCCS"><img src="https://img.shields.io/badge/Follow-@lixCCS-black?logo=x&logoColor=white" alt="X (Twitter)"></a>
</p>
> [!NOTE]
>
> **Lix is in alpha** · [Follow progress to v1.0 →](https://github.com/opral/lix/issues/374)
---
Lix is an **embeddable version control system for files of any format** (DOCX, XLSX, CAD, PDF, JSON) with semantic, per-entity diffs. Branches, merge, and an immutable change history, exposed as SQL, all in-process.
Use it inside a contract editor, a feature-flag service, an artifact registry, an AI-agent platform, a versioned filesystem, or a domain-specific CLI.
> Lix is to version control what DuckDB is to analytics: an embeddable engine with pluggable support for file formats.
- **It's just a library.** `npm install`, import, run. No daemon, no protocol, no remote.
- **Semantic per-entity diffs.** XLSX cells, DOCX clauses, CAD parts. Not line-by-line text.
- **History is SQL.** Diffs, blame, and audit are direct queries against `lix_change`.
The entity foundation ships today. A plugin API is on the [roadmap](#roadmap); once it lands, anyone can author a plugin that turns a file format (DOCX, XLSX, CAD, PDF, anything else) into entities.
[How does Lix compare to Git? →](https://lix.dev/docs/comparison-to-git)
## Getting started
<p>
<img src="https://cdn.simpleicons.org/javascript/F7DF1E" alt="JavaScript" width="18" height="18" /> JavaScript ·
<a href="https://github.com/opral/lix/issues/370"><img src="https://cdn.jsdelivr.net/gh/devicons/devicon/icons/python/python-original.svg" alt="Python" width="18" height="18" /> Python</a> ·
<a href="https://github.com/opral/lix/issues/371"><img src="https://cdn.simpleicons.org/rust/CE422B" alt="Rust" width="18" height="18" /> Rust</a> ·
<a href="https://github.com/opral/lix/issues/373"><img src="https://cdn.simpleicons.org/go/00ADD8" alt="Go" width="18" height="18" /> Go</a>
</p>
```bash
npm install @lix-js/sdk
```
```ts
import { openLix } from "@lix-js/sdk";
const lix = await openLix(); // in-memory by default; pass a backend for persistence
// Register a schema for a tracked entity
await lix.execute(
"INSERT INTO lix_registered_schema (value) VALUES (lix_json($1))",
[
JSON.stringify({
"x-lix-key": "task",
"x-lix-version": "1",
"x-lix-primary-key": ["/id"],
type: "object",
required: ["id", "title"],
properties: {
id: { type: "string" },
title: { type: "string" },
},
additionalProperties: false,
}),
],
);
// Write rows like any SQL table
await lix.execute(
"INSERT INTO task (id, title) VALUES ($1, $2)",
["t1", "Ship v1"],
);
// Every change is journaled; query it with SQL
const changes = await lix.execute(
"SELECT entity_id, schema_key, snapshot_content FROM lix_change",
);
```
## Semantic change (delta) tracking
Unlike Git's line-based diffs, Lix understands file structure through plugins. Lix sees `price: 10 → 12` or `cell B4: pending → shipped`, not "line 4 changed" or "binary files differ".
### JSON file example
**Before:**
```json
{"theme":"light","notifications":true,"language":"en"}
```
**After:**
```json
{"theme":"dark","notifications":true,"language":"en"}
```
**Git sees:**
```diff
-{"theme":"light","notifications":true,"language":"en"}
+{"theme":"dark","notifications":true,"language":"en"}
```
**Lix sees:**
```diff
property theme:
- light
+ dark
```
### Excel file example
The same approach works for binary formats. With an XLSX plugin, Lix shows cell-level changes:
**Before:**
```diff
| order_id | product | status |
| -------- | -------- | -------- |
| 1001 | Widget A | shipped |
| 1002 | Widget B | pending |
```
**After:**
```diff
| order_id | product | status |
| -------- | -------- | -------- |
| 1001 | Widget A | shipped |
| 1002 | Widget B | shipped |
```
**Git sees:**
```diff
-Binary files differ
```
**Lix sees:**
```diff
order_id 1002 status:
- pending
+ shipped
```
## How Lix Works
Lix uses SQL databases as query engine and persistence layer. Virtual tables like `file` and `file_history` are exposed on top:
```sql
SELECT * FROM file_history
WHERE path = '/orders.xlsx'
ORDER BY created_at DESC;
```
When a file is written, a plugin parses it and detects entity-level changes. These changes (deltas) are stored in the database, enabling branching, merging, and audit trails.
```
┌─────────────────────────────────────────────────┐
│ Lix │
│ │
│ ┌────────────┐ ┌──────────┐ ┌─────────┐ ┌─────┐ │
│ │ Filesystem │ │ Branches │ │ History │ │ ... │ │
│ └────────────┘ └──────────┘ └─────────┘ └─────┘ │
└────────────────────────┬────────────────────────┘
│
▼
┌─────────────────────────────────────────────────┐
│ SQL database │
│ (SQLite, Postgres, etc.) │
└─────────────────────────────────────────────────┘
```
[Read more about Lix architecture →](https://lix.dev/docs/architecture)
## Roadmap
- [x] Core API (<v0.5)
- [x] ACID transactions (v0.6)
- [x] Branching, diffing, merging (v0.6)
- [x] SQL API (v0.6)
- [x] Stable physical storage layout (v0.6)
- [ ] Plugin API for file formats (community-authored plugins for DOCX, XLSX, CAD, PDF, …)
- [ ] Merge conflict semantics and resolution
- [ ] Working changes & checkpointing
- [ ] Real-time sync
## Learn More
- **[Getting Started Guide](https://lix.dev/docs/getting-started)** - Build your first app with Lix
- **[Documentation](https://lix.dev/docs)** - Full API reference and guides
- **[Discord](https://discord.gg/gdMPPWy57R)** - Get help and join the community
- **[GitHub](https://github.com/opral/lix)** - Report issues and contribute
## Blog posts
- [Introducing Lix: An embeddable version control system](https://lix.dev/blog/introducing-lix)
- [What if a Git SDK to build apps exists?](https://samuelstroschein.com/blog/what-if-a-git-sdk-exists)
- [Git is unsuited for applications](https://samuelstroschein.com/blog/git-limitations)
- [Does a git-based architecture make sense?](https://samuelstroschein.com/blog/git-based-architecture)
## License
[MIT](https://github.com/opral/lix/blob/main/packages/lix-sdk/LICENSE)
================================================
FILE: benchmarks/10k-entities/Cargo.toml
================================================
[package]
name = "ten_k_entities_benchmark"
version = "0.1.0"
edition = "2021"
publish = false
[dependencies]
async-trait = "0.1"
clap = { version = "4.5.31", features = ["derive"] }
lix_engine = { path = "../../packages/engine" }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
sqlx = { version = "0.8.6", default-features = false, features = ["sqlite", "runtime-tokio-rustls"] }
tokio = { version = "1", features = ["sync"] }
wasmtime = { version = "30", features = ["component-model"] }
wasmtime-wasi = "30"
zip = { version = "2", default-features = false, features = ["deflate"] }
================================================
FILE: benchmarks/10k-entities/README.md
================================================
# 10k Entities Benchmark
This benchmark compares two engine paths for the same logical JSON document:
1. File write: insert one `.json` blob with `10_000` props through `lix_file`
2. Direct entity writes: insert `10_000` `json_pointer` rows directly through `lix_state`
The goal is to separate:
- file/plugin detect overhead
- direct semantic row write overhead
Both cases use the real current engine on a fresh file-backed SQLite database.
## Case 1: File Write JSON With 10k Props
Timed section:
- begin a buffered write transaction
- run `INSERT INTO lix_file (id, path, data)`
- commit the transaction
This case includes:
- JSON plugin `detect-changes`
- semantic row commit
- live-state rebuild
- file cache/materialization refresh
## Case 2: Direct Entity Writes 10k
Outside the timer:
- insert an empty `{}` JSON file through `lix_file`
Timed section:
- begin a buffered write transaction
- run one root-row update plus chunked `INSERT INTO lix_state (...) VALUES (...)` statements until all `10_000` property rows are written
- commit the transaction
This case excludes file-to-entity detection, but still includes:
- direct semantic row commit
- live-state rebuild
- file cache/materialization refresh
The benchmark treats committed `json_pointer` row count as the hard invariant for
this case and records the final `lix_file` payload match as an observation.
## Usage
```bash
cargo run --release -p ten_k_entities_benchmark -- \
--props 10000 \
--warmups 2 \
--iterations 10 \
--output-dir artifact/benchmarks/10k-entities
```
The benchmark writes:
- `artifact/benchmarks/10k-entities/report.json`
- `artifact/benchmarks/10k-entities/report.md`
## Verification
Each case verifies:
- committed `json_pointer` row count in `lix_state_by_version`
- file-write case: final `lix_file` JSON must match the expected payload
- direct-write case: final `lix_file` JSON match is recorded in the report
## Notes
- Warmups absorb first-use wasm/component initialization costs.
- The direct-write case times `10_000` property inserts plus one root-row update so the JSON semantic root stays in sync with the property rows.
- The report includes per-case `write`, `commit`, and `total` timing summaries plus a comparison table.
================================================
FILE: benchmarks/10k-entities/src/main.rs
================================================
use clap::Parser;
use lix_engine::wasm::WasmRuntime;
use lix_engine::{boot, BootArgs, ExecuteOptions, LixError, Session, Value};
use serde::Serialize;
use std::fs;
use std::io::{Cursor, Write};
use std::path::{Path, PathBuf};
use std::process::Command;
use std::sync::Arc;
use std::time::{Instant, SystemTime, UNIX_EPOCH};
use zip::write::SimpleFileOptions;
use zip::{CompressionMethod, ZipWriter};
mod sqlite_backend;
mod wasmtime_runtime;
const DEFAULT_OUTPUT_DIR: &str = "artifact/benchmarks/10k-entities";
const DEFAULT_PROPS: usize = 10_000;
const DEFAULT_WARMUPS: usize = 2;
const DEFAULT_ITERATIONS: usize = 10;
const DIRECT_ENTITY_WRITE_CHUNK_SIZE: usize = 250;
const PLUGIN_KEY: &str = "json";
const PLUGIN_SCHEMA_KEY: &str = "json_pointer";
const PLUGIN_ARCHIVE_MANIFEST_JSON: &str = r#"{
"key": "json",
"runtime": "wasm-component-v1",
"api_version": "0.1.0",
"match": {"path_glob": "*.json"},
"detect_changes": {},
"entry": "plugin.wasm",
"schemas": ["schema/json_pointer.json"]
}"#;
const JSON_POINTER_SCHEMA_JSON: &str =
include_str!("../../../packages/plugin-json-v2/schema/json_pointer.json");
type BenchResult<T> = Result<T, String>;
#[derive(Parser, Debug)]
#[command(
name = "10k-entities-benchmark",
about = "Benchmark file-write vs direct-entity-write paths for a 10k-prop JSON document"
)]
struct Args {
#[arg(long, default_value_t = DEFAULT_PROPS)]
props: usize,
#[arg(long, default_value_t = DEFAULT_WARMUPS)]
warmups: usize,
#[arg(long, default_value_t = DEFAULT_ITERATIONS)]
iterations: usize,
#[arg(long, default_value = DEFAULT_OUTPUT_DIR)]
output_dir: PathBuf,
}
#[derive(Debug, Clone, Copy)]
enum BenchmarkCaseKind {
FileWriteJson,
DirectEntityWrites,
}
impl BenchmarkCaseKind {
fn id(self) -> &'static str {
match self {
Self::FileWriteJson => "file_write_json_10k_props",
Self::DirectEntityWrites => "direct_entity_writes_10k",
}
}
fn title(self) -> &'static str {
match self {
Self::FileWriteJson => "File Write JSON With 10k Props",
Self::DirectEntityWrites => "Direct Entity Writes 10k",
}
}
fn timed_operation(self) -> &'static str {
match self {
Self::FileWriteJson => {
"INSERT INTO lix_file for one 10k-prop JSON payload inside a buffered write transaction, then commit"
}
Self::DirectEntityWrites => {
"UPDATE the root json_pointer row and INSERT 10k property json_pointer rows inside a buffered write transaction, then commit"
}
}
}
fn notes(self) -> Vec<&'static str> {
match self {
Self::FileWriteJson => vec![
"This is the real file-write path with plugin detect-changes enabled.",
"The timed write is one INSERT INTO lix_file statement.",
"The semantic layer derives json_pointer rows during commit.",
"This case includes plugin detect-changes cost plus direct semantic row commit cost.",
],
Self::DirectEntityWrites => vec![
"This isolates direct semantic writes through the engine without detect-changes.",
"Outside the timer, the benchmark inserts an empty {} JSON file to establish the file descriptor and root entity.",
"Inside the timer, it updates the root json_pointer row and inserts the 10k property rows through chunked lix_state statements.",
"This case still includes normal commit, live-state rebuild, and file-cache refresh work for direct entity writes.",
"The report records whether lix_file matched the expected payload after commit, but row-count verification is the hard invariant for this case.",
],
}
}
fn timed_sql(self) -> &'static str {
match self {
Self::FileWriteJson => "INSERT INTO lix_file (id, path, data) VALUES (?1, ?2, ?3)",
Self::DirectEntityWrites => {
"UPDATE lix_state root row; INSERT INTO lix_state (...) VALUES (... x chunk_size), repeated until props rows are written"
}
}
}
fn verification(self) -> &'static str {
match self {
Self::FileWriteJson => {
"Verify committed json_pointer row count for the file and verify lix_file JSON matches the input payload."
}
Self::DirectEntityWrites => {
"Verify committed json_pointer row count for the file and record whether lix_file JSON matched the expected 10k-prop payload."
}
}
}
fn setup_outside_timer(self) -> Vec<&'static str> {
match self {
Self::FileWriteJson => vec![
"Build plugin-json-v2 wasm.",
"Create a fresh SQLite database.",
"Boot the engine and install the JSON plugin.",
],
Self::DirectEntityWrites => vec![
"Build plugin-json-v2 wasm.",
"Create a fresh SQLite database.",
"Boot the engine and install the JSON plugin.",
"Insert an empty {} JSON file so direct state writes target an existing JSON file.",
"Load the committed root json_pointer entity id for that file.",
],
}
}
}
#[derive(Debug, Serialize)]
struct Report {
generated_at_unix_ms: u128,
benchmark: BenchmarkMetadata,
shared_setup: SharedSetupReport,
cases: Vec<CaseReport>,
comparison: ComparisonSummary,
}
#[derive(Debug, Serialize)]
struct BenchmarkMetadata {
name: &'static str,
notes: Vec<&'static str>,
}
#[derive(Debug, Serialize)]
struct SharedSetupReport {
props: usize,
input_bytes: usize,
direct_property_rows: usize,
expected_state_rows_after_commit: u64,
plugin_key: &'static str,
schema_key: &'static str,
plugin_wasm_path: String,
sqlite_mode: &'static str,
}
#[derive(Debug, Serialize)]
struct CaseReport {
case_id: &'static str,
title: &'static str,
timed_operation: &'static str,
notes: Vec<&'static str>,
setup: CaseSetupReport,
warmups: Vec<RunSample>,
samples: Vec<RunSample>,
timing_ms: TimingSummary,
}
#[derive(Debug, Serialize)]
struct CaseSetupReport {
timed_rows: usize,
timed_sql: &'static str,
setup_outside_timer: Vec<&'static str>,
verification: &'static str,
}
#[derive(Debug, Clone, Serialize)]
struct RunSample {
index: usize,
write_ms: f64,
commit_ms: f64,
total_ms: f64,
committed_state_rows: u64,
file_matches_expected: bool,
}
#[derive(Debug, Serialize)]
struct TimingSummary {
sample_count: usize,
write: PhaseSummary,
commit: PhaseSummary,
total: PhaseSummary,
}
#[derive(Debug, Serialize)]
struct PhaseSummary {
mean_ms: f64,
median_ms: f64,
min_ms: f64,
max_ms: f64,
}
#[derive(Debug, Serialize)]
struct ComparisonSummary {
file_write_total_mean_ms: f64,
direct_entity_total_mean_ms: f64,
file_write_minus_direct_entity_total_mean_ms: f64,
file_write_commit_mean_ms: f64,
direct_entity_commit_mean_ms: f64,
file_write_minus_direct_entity_commit_mean_ms: f64,
file_write_write_mean_ms: f64,
direct_entity_write_mean_ms: f64,
file_write_minus_direct_entity_write_mean_ms: f64,
file_write_to_direct_entity_total_ratio: f64,
}
struct TempSqlitePath {
path: PathBuf,
}
impl TempSqlitePath {
fn new(label: &str) -> Self {
Self {
path: temp_sqlite_path(label),
}
}
fn path(&self) -> &Path {
&self.path
}
}
impl Drop for TempSqlitePath {
fn drop(&mut self) {
for suffix in ["", "-wal", "-shm", "-journal"] {
let _ = std::fs::remove_file(format!("{}{}", self.path.display(), suffix));
}
}
}
fn main() {
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("tokio runtime should initialize");
if let Err(error) = runtime.block_on(run(Args::parse())) {
eprintln!("error: {error}");
std::process::exit(1);
}
}
async fn run(args: Args) -> BenchResult<()> {
if args.props == 0 {
return Err("--props must be greater than 0".to_string());
}
if args.iterations == 0 {
return Err("--iterations must be greater than 0".to_string());
}
fs::create_dir_all(&args.output_dir).map_err(io_err)?;
let repo_root = repo_root()?;
let plugin_wasm_path = build_plugin_json_v2_wasm(&repo_root)?;
let plugin_wasm_bytes = fs::read(&plugin_wasm_path).map_err(io_err)?;
let plugin_archive = build_plugin_archive(&plugin_wasm_bytes)?;
let payload = build_flat_json_payload(args.props)?;
let expected_state_rows_after_commit = (args.props + 1) as u64;
let wasm_runtime: Arc<dyn WasmRuntime> =
Arc::new(wasmtime_runtime::TestWasmtimeRuntime::new().map_err(lix_err)?);
let file_write_case = run_case(
BenchmarkCaseKind::FileWriteJson,
&args,
Arc::clone(&wasm_runtime),
&plugin_archive,
&payload,
expected_state_rows_after_commit,
)
.await?;
let direct_entity_case = run_case(
BenchmarkCaseKind::DirectEntityWrites,
&args,
Arc::clone(&wasm_runtime),
&plugin_archive,
&payload,
expected_state_rows_after_commit,
)
.await?;
let comparison = build_comparison_summary(&file_write_case, &direct_entity_case)?;
let report = Report {
generated_at_unix_ms: now_unix_ms()?,
benchmark: BenchmarkMetadata {
name: "10k-entities-json-file-vs-direct-state",
notes: vec![
"Both cases use a fresh file-backed SQLite database per run.",
"Plugin wasm build, engine init, plugin install, and database setup are outside the timer.",
"Each case reports write_ms, commit_ms, and total_ms separately.",
"The goal is to separate file/plugin detect overhead from direct 10k entity write overhead.",
],
},
shared_setup: SharedSetupReport {
props: args.props,
input_bytes: payload.len(),
direct_property_rows: args.props,
expected_state_rows_after_commit,
plugin_key: PLUGIN_KEY,
schema_key: PLUGIN_SCHEMA_KEY,
plugin_wasm_path: plugin_wasm_path.display().to_string(),
sqlite_mode: "fresh file-backed SQLite database per run",
},
cases: vec![file_write_case, direct_entity_case],
comparison,
};
let report_json_path = args.output_dir.join("report.json");
let report_markdown_path = args.output_dir.join("report.md");
fs::write(
&report_json_path,
serde_json::to_vec_pretty(&report).map_err(serde_err)?,
)
.map_err(io_err)?;
fs::write(&report_markdown_path, render_markdown_report(&report)).map_err(io_err)?;
print_summary(&report, &report_json_path, &report_markdown_path);
Ok(())
}
async fn run_case(
kind: BenchmarkCaseKind,
args: &Args,
wasm_runtime: Arc<dyn WasmRuntime>,
plugin_archive: &[u8],
payload: &[u8],
expected_state_rows_after_commit: u64,
) -> BenchResult<CaseReport> {
let mut warmups = Vec::with_capacity(args.warmups);
for index in 0..args.warmups {
warmups.push(
run_sample(
kind,
index,
Arc::clone(&wasm_runtime),
plugin_archive,
payload,
expected_state_rows_after_commit,
)
.await?,
);
}
let mut samples = Vec::with_capacity(args.iterations);
for index in 0..args.iterations {
samples.push(
run_sample(
kind,
index,
Arc::clone(&wasm_runtime),
plugin_archive,
payload,
expected_state_rows_after_commit,
)
.await?,
);
}
Ok(CaseReport {
case_id: kind.id(),
title: kind.title(),
timed_operation: kind.timed_operation(),
notes: kind.notes(),
setup: CaseSetupReport {
timed_rows: match kind {
BenchmarkCaseKind::FileWriteJson => 1,
BenchmarkCaseKind::DirectEntityWrites => args.props + 1,
},
timed_sql: kind.timed_sql(),
setup_outside_timer: kind.setup_outside_timer(),
verification: kind.verification(),
},
warmups,
samples: samples.clone(),
timing_ms: summarize_timings(&samples)?,
})
}
async fn run_sample(
kind: BenchmarkCaseKind,
index: usize,
wasm_runtime: Arc<dyn WasmRuntime>,
plugin_archive: &[u8],
payload: &[u8],
expected_state_rows_after_commit: u64,
) -> BenchResult<RunSample> {
match kind {
BenchmarkCaseKind::FileWriteJson => {
run_file_write_sample(
index,
wasm_runtime,
plugin_archive,
payload,
expected_state_rows_after_commit,
)
.await
}
BenchmarkCaseKind::DirectEntityWrites => {
run_direct_entity_write_sample(
index,
wasm_runtime,
plugin_archive,
payload,
expected_state_rows_after_commit,
)
.await
}
}
}
async fn run_file_write_sample(
index: usize,
wasm_runtime: Arc<dyn WasmRuntime>,
plugin_archive: &[u8],
payload: &[u8],
expected_state_rows_after_commit: u64,
) -> BenchResult<RunSample> {
let sqlite_path = TempSqlitePath::new(&format!("10k-entities-file-write-{index}"));
let session = open_prepared_session(sqlite_path.path(), wasm_runtime, plugin_archive).await?;
let file_id = format!("json-file-write-{index}");
let file_path = format!("/{file_id}.json");
let active_version_id = session.active_version_id();
let mut transaction = Some(
session
.begin_transaction_with_options(ExecuteOptions::default())
.await
.map_err(lix_err)?,
);
let started_at = Instant::now();
let write_started_at = Instant::now();
let write_result = {
let transaction = transaction
.as_mut()
.expect("transaction should be available during write phase");
transaction
.execute(
"INSERT INTO lix_file (id, path, data) VALUES (?1, ?2, ?3)",
&[
Value::Text(file_id.clone()),
Value::Text(file_path),
Value::Blob(payload.to_vec()),
],
)
.await
.map_err(lix_err)
};
if let Err(error) = write_result {
if let Some(transaction) = transaction.take() {
let _ = transaction.rollback().await;
}
return Err(error);
}
let write_ms = write_started_at.elapsed().as_secs_f64() * 1000.0;
let commit_started_at = Instant::now();
transaction
.take()
.expect("transaction should be available for commit")
.commit()
.await
.map_err(lix_err)?;
let commit_ms = commit_started_at.elapsed().as_secs_f64() * 1000.0;
let total_ms = started_at.elapsed().as_secs_f64() * 1000.0;
finish_sample(
index,
&session,
&file_id,
&active_version_id,
payload,
expected_state_rows_after_commit,
true,
write_ms,
commit_ms,
total_ms,
)
.await
}
async fn run_direct_entity_write_sample(
index: usize,
wasm_runtime: Arc<dyn WasmRuntime>,
plugin_archive: &[u8],
payload: &[u8],
expected_state_rows_after_commit: u64,
) -> BenchResult<RunSample> {
let sqlite_path = TempSqlitePath::new(&format!("10k-entities-direct-state-{index}"));
let session = open_prepared_session(sqlite_path.path(), wasm_runtime, plugin_archive).await?;
let file_id = format!("json-direct-state-{index}");
let file_path = format!("/{file_id}.json");
let active_version_id = session.active_version_id();
bootstrap_empty_json_file(&session, &file_id, &file_path).await?;
let root_entity_id =
load_root_json_pointer_entity_id(&session, &file_id, &active_version_id).await?;
let direct_write_sql_batches = build_direct_entity_write_sql_batches(
&file_id,
&root_entity_id,
payload,
DIRECT_ENTITY_WRITE_CHUNK_SIZE,
)?;
let mut transaction = Some(
session
.begin_transaction_with_options(ExecuteOptions::default())
.await
.map_err(lix_err)?,
);
let started_at = Instant::now();
let write_started_at = Instant::now();
let write_result = {
let transaction = transaction
.as_mut()
.expect("transaction should be available during write phase");
let mut result = Ok(());
for sql in &direct_write_sql_batches {
if let Err(error) = transaction.execute(sql, &[]).await.map_err(lix_err) {
result = Err(error);
break;
}
}
result
};
if let Err(error) = write_result {
if let Some(transaction) = transaction.take() {
let _ = transaction.rollback().await;
}
return Err(error);
}
let write_ms = write_started_at.elapsed().as_secs_f64() * 1000.0;
let commit_started_at = Instant::now();
transaction
.take()
.expect("transaction should be available for commit")
.commit()
.await
.map_err(lix_err)?;
let commit_ms = commit_started_at.elapsed().as_secs_f64() * 1000.0;
let total_ms = started_at.elapsed().as_secs_f64() * 1000.0;
finish_sample(
index,
&session,
&file_id,
&active_version_id,
payload,
expected_state_rows_after_commit,
false,
write_ms,
commit_ms,
total_ms,
)
.await
}
async fn finish_sample(
index: usize,
session: &Session,
file_id: &str,
active_version_id: &str,
expected_payload: &[u8],
expected_state_rows_after_commit: u64,
enforce_file_match: bool,
write_ms: f64,
commit_ms: f64,
total_ms: f64,
) -> BenchResult<RunSample> {
let committed_state_rows = scalar_count(
session,
"SELECT COUNT(*) \
FROM lix_state_by_version \
WHERE file_id = ?1 \
AND version_id = ?2 \
AND schema_key = ?3 \
AND snapshot_content IS NOT NULL",
&[
Value::Text(file_id.to_string()),
Value::Text(active_version_id.to_string()),
Value::Text(PLUGIN_SCHEMA_KEY.to_string()),
],
)
.await?;
if committed_state_rows != expected_state_rows_after_commit {
return Err(format!(
"expected {expected_state_rows_after_commit} committed json_pointer rows for '{file_id}', got {committed_state_rows}"
));
}
let file_matches_expected =
match verify_file_json_matches(session, file_id, expected_payload).await {
Ok(()) => true,
Err(error) if !enforce_file_match => {
let _ = error;
false
}
Err(error) => return Err(error),
};
Ok(RunSample {
index,
write_ms,
commit_ms,
total_ms,
committed_state_rows,
file_matches_expected,
})
}
async fn open_prepared_session(
sqlite_path: &Path,
wasm_runtime: Arc<dyn WasmRuntime>,
plugin_archive: &[u8],
) -> BenchResult<Session> {
let backend = sqlite_backend::BenchSqliteBackend::file_backed(sqlite_path).map_err(lix_err)?;
let mut boot_args = BootArgs::new(Box::new(backend), wasm_runtime);
boot_args.access_to_internal = true;
let engine = Arc::new(boot(boot_args));
engine.initialize().await.map_err(lix_err)?;
let session = engine.open_session().await.map_err(lix_err)?;
session
.install_plugin(plugin_archive)
.await
.map_err(lix_err)?;
Ok(session)
}
async fn bootstrap_empty_json_file(
session: &Session,
file_id: &str,
file_path: &str,
) -> BenchResult<()> {
session
.execute(
"INSERT INTO lix_file (id, path, data) VALUES (?1, ?2, ?3)",
&[
Value::Text(file_id.to_string()),
Value::Text(file_path.to_string()),
Value::Blob(b"{}".to_vec()),
],
)
.await
.map_err(lix_err)?;
Ok(())
}
async fn load_root_json_pointer_entity_id(
session: &Session,
file_id: &str,
active_version_id: &str,
) -> BenchResult<String> {
let result = session
.execute(
"SELECT entity_id \
FROM lix_state_by_version \
WHERE file_id = ?1 \
AND version_id = ?2 \
AND schema_key = ?3 \
AND snapshot_content IS NOT NULL \
ORDER BY entity_id ASC \
LIMIT 1",
&[
Value::Text(file_id.to_string()),
Value::Text(active_version_id.to_string()),
Value::Text(PLUGIN_SCHEMA_KEY.to_string()),
],
)
.await
.map_err(lix_err)?;
let value = result
.statements
.first()
.and_then(|statement| statement.rows.first())
.and_then(|row| row.first())
.ok_or_else(|| format!("query returned no root json_pointer row for '{file_id}'"))?;
match value {
Value::Text(text) => Ok(text.clone()),
other => Err(format!(
"expected text entity_id for root json_pointer row of '{file_id}', got {other:?}"
)),
}
}
fn build_direct_entity_write_sql_batches(
file_id: &str,
root_entity_id: &str,
payload: &[u8],
chunk_size: usize,
) -> BenchResult<Vec<String>> {
if chunk_size == 0 {
return Err("direct entity write chunk size must be greater than 0".to_string());
}
let expected_json: serde_json::Value = serde_json::from_slice(payload).map_err(serde_err)?;
let object = expected_json
.as_object()
.ok_or_else(|| "expected generated payload to be a JSON object".to_string())?;
let root_snapshot_content = serde_json::json!({
"path": root_entity_id,
"value": expected_json,
});
let root_snapshot_content = serde_json::to_string(&root_snapshot_content).map_err(serde_err)?;
let root_entity_id_json =
serde_json::to_string(&serde_json::json!([root_entity_id])).map_err(serde_err)?;
let mut statements = vec![format!(
"UPDATE lix_state \
SET snapshot_content = '{}' \
WHERE entity_id = lix_json('{}') \
AND file_id = '{}' \
AND schema_key = '{}' \
AND plugin_key = '{}'",
escape_sql_string(&root_snapshot_content),
escape_sql_string(&root_entity_id_json),
escape_sql_string(file_id),
PLUGIN_SCHEMA_KEY,
PLUGIN_KEY,
)];
let entries = object
.iter()
.map(|(key, value)| -> BenchResult<String> {
let entity_id = format!("/{}", escape_json_pointer_segment(key));
let snapshot_content = serde_json::json!({
"path": entity_id,
"value": value,
});
let snapshot_content = serde_json::to_string(&snapshot_content).map_err(serde_err)?;
Ok(format!(
"('{}', '{}', '{}', '{}', '{}')",
escape_sql_string(&entity_id),
escape_sql_string(file_id),
PLUGIN_SCHEMA_KEY,
PLUGIN_KEY,
escape_sql_string(&snapshot_content),
))
})
.collect::<BenchResult<Vec<_>>>()?;
for chunk in entries.chunks(chunk_size) {
statements.push(format!(
"INSERT INTO lix_state (entity_id, file_id, schema_key, plugin_key, snapshot_content) VALUES {}",
chunk.join(", ")
));
}
Ok(statements)
}
async fn verify_file_json_matches(
session: &Session,
file_id: &str,
expected_payload: &[u8],
) -> BenchResult<()> {
let result = session
.execute(
"SELECT data FROM lix_file WHERE id = ?1 LIMIT 1",
&[Value::Text(file_id.to_string())],
)
.await
.map_err(lix_err)?;
let value = result
.statements
.first()
.and_then(|statement| statement.rows.first())
.and_then(|row| row.first())
.ok_or_else(|| format!("query returned no file data row for '{file_id}'"))?;
let actual_bytes = match value {
Value::Blob(bytes) => bytes.clone(),
other => {
return Err(format!(
"expected blob data from lix_file for '{file_id}', got {other:?}"
));
}
};
let actual_json: serde_json::Value =
serde_json::from_slice(&actual_bytes).map_err(serde_err)?;
let expected_json: serde_json::Value =
serde_json::from_slice(expected_payload).map_err(serde_err)?;
if actual_json != expected_json {
return Err(format!(
"lix_file JSON for '{file_id}' did not match expected payload"
));
}
Ok(())
}
fn build_plugin_archive(plugin_wasm_bytes: &[u8]) -> BenchResult<Vec<u8>> {
let options = SimpleFileOptions::default().compression_method(CompressionMethod::Stored);
let mut writer = ZipWriter::new(Cursor::new(Vec::new()));
writer
.start_file("manifest.json", options)
.map_err(io_err)?;
writer
.write_all(PLUGIN_ARCHIVE_MANIFEST_JSON.as_bytes())
.map_err(io_err)?;
writer.start_file("plugin.wasm", options).map_err(io_err)?;
writer.write_all(plugin_wasm_bytes).map_err(io_err)?;
writer
.start_file("schema/json_pointer.json", options)
.map_err(io_err)?;
writer
.write_all(JSON_POINTER_SCHEMA_JSON.as_bytes())
.map_err(io_err)?;
writer
.finish()
.map(|cursor| cursor.into_inner())
.map_err(io_err)
}
async fn scalar_count(session: &Session, sql: &str, params: &[Value]) -> BenchResult<u64> {
let result = session.execute(sql, params).await.map_err(lix_err)?;
let value = result
.statements
.first()
.and_then(|statement| statement.rows.first())
.and_then(|row| row.first())
.ok_or_else(|| format!("query returned no scalar value: {sql}"))?;
match value {
Value::Integer(number) => {
if *number < 0 {
Err(format!("query returned negative count {number}: {sql}"))
} else {
Ok(*number as u64)
}
}
other => Err(format!(
"query returned non-integer scalar {other:?}: {sql}"
)),
}
}
fn summarize_timings(samples: &[RunSample]) -> BenchResult<TimingSummary> {
if samples.is_empty() {
return Err("cannot summarize empty samples".to_string());
}
Ok(TimingSummary {
sample_count: samples.len(),
write: summarize_phase(samples.iter().map(|sample| sample.write_ms).collect())?,
commit: summarize_phase(samples.iter().map(|sample| sample.commit_ms).collect())?,
total: summarize_phase(samples.iter().map(|sample| sample.total_ms).collect())?,
})
}
fn summarize_phase(mut values: Vec<f64>) -> BenchResult<PhaseSummary> {
if values.is_empty() {
return Err("cannot summarize empty timing phase".to_string());
}
values.sort_by(|left, right| left.partial_cmp(right).unwrap_or(std::cmp::Ordering::Equal));
let sum = values.iter().sum::<f64>();
let median_ms = if values.len() % 2 == 0 {
let upper = values.len() / 2;
(values[upper - 1] + values[upper]) / 2.0
} else {
values[values.len() / 2]
};
Ok(PhaseSummary {
mean_ms: sum / values.len() as f64,
median_ms,
min_ms: values[0],
max_ms: values[values.len() - 1],
})
}
fn build_comparison_summary(
file_write_case: &CaseReport,
direct_entity_case: &CaseReport,
) -> BenchResult<ComparisonSummary> {
let file_write_total_mean_ms = file_write_case.timing_ms.total.mean_ms;
let direct_entity_total_mean_ms = direct_entity_case.timing_ms.total.mean_ms;
let ratio = if direct_entity_total_mean_ms == 0.0 {
return Err("cannot compare cases: direct-entity total mean is zero".to_string());
} else {
file_write_total_mean_ms / direct_entity_total_mean_ms
};
Ok(ComparisonSummary {
file_write_total_mean_ms,
direct_entity_total_mean_ms,
file_write_minus_direct_entity_total_mean_ms: file_write_total_mean_ms
- direct_entity_total_mean_ms,
file_write_commit_mean_ms: file_write_case.timing_ms.commit.mean_ms,
direct_entity_commit_mean_ms: direct_entity_case.timing_ms.commit.mean_ms,
file_write_minus_direct_entity_commit_mean_ms: file_write_case.timing_ms.commit.mean_ms
- direct_entity_case.timing_ms.commit.mean_ms,
file_write_write_mean_ms: file_write_case.timing_ms.write.mean_ms,
direct_entity_write_mean_ms: direct_entity_case.timing_ms.write.mean_ms,
file_write_minus_direct_entity_write_mean_ms: file_write_case.timing_ms.write.mean_ms
- direct_entity_case.timing_ms.write.mean_ms,
file_write_to_direct_entity_total_ratio: ratio,
})
}
fn build_flat_json_payload(props: usize) -> BenchResult<Vec<u8>> {
let mut root = serde_json::Map::new();
for index in 0..props {
root.insert(
format!("prop_{index:05}"),
serde_json::Value::String(format!("value_{index:05}")),
);
}
serde_json::to_vec(&serde_json::Value::Object(root)).map_err(serde_err)
}
fn build_plugin_json_v2_wasm(repo_root: &Path) -> BenchResult<PathBuf> {
let manifest_path = repo_root.join("packages/plugin-json-v2/Cargo.toml");
let wasm_path =
repo_root.join("packages/plugin-json-v2/target/wasm32-wasip2/release/plugin_json_v2.wasm");
let build = || {
Command::new("cargo")
.arg("build")
.arg("--manifest-path")
.arg(&manifest_path)
.arg("--target")
.arg("wasm32-wasip2")
.arg("--release")
.output()
.map_err(io_err)
};
let output = build()?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
if stderr.contains("wasm32-wasip2")
&& (stderr.contains("target may not be installed")
|| stderr.contains("can't find crate for `core`"))
{
let rustup = Command::new("rustup")
.arg("target")
.arg("add")
.arg("wasm32-wasip2")
.output()
.map_err(io_err)?;
if !rustup.status.success() {
return Err(format!(
"rustup target add wasm32-wasip2 failed:\n{}",
String::from_utf8_lossy(&rustup.stderr)
));
}
let retry = build()?;
if !retry.status.success() {
return Err(format!(
"cargo build for plugin_json_v2 failed after installing wasm32-wasip2:\n{}",
String::from_utf8_lossy(&retry.stderr)
));
}
} else {
return Err(format!(
"cargo build for plugin_json_v2 failed:\n{}",
String::from_utf8_lossy(&output.stderr)
));
}
}
if !wasm_path.exists() {
return Err(format!(
"plugin wasm build succeeded but output was missing at {}",
wasm_path.display()
));
}
Ok(wasm_path)
}
fn render_markdown_report(report: &Report) -> String {
let case_sections = report
.cases
.iter()
.map(render_case_markdown)
.collect::<Vec<_>>()
.join("\n\n");
format!(
"# 10k Entities Benchmark Comparison\n\n\
- Props: {}\n\
- Input bytes: {}\n\
- Direct property rows inside timed direct-write case: {}\n\
- Expected committed json_pointer rows after each case: {}\n\
- Plugin key: `{}`\n\
- Schema key: `{}`\n\
- SQLite mode: `{}`\n\
- Plugin wasm: `{}`\n\n\
## Comparison\n\n\
| metric | file write | direct entities | delta |\n\
| --- | ---: | ---: | ---: |\n\
| write mean ms | {:.3} | {:.3} | {:.3} |\n\
| commit mean ms | {:.3} | {:.3} | {:.3} |\n\
| total mean ms | {:.3} | {:.3} | {:.3} |\n\
| total ratio | {:.3}x | 1.000x | {:.3}x |\n\n\
{}\n",
report.shared_setup.props,
report.shared_setup.input_bytes,
report.shared_setup.direct_property_rows,
report.shared_setup.expected_state_rows_after_commit,
report.shared_setup.plugin_key,
report.shared_setup.schema_key,
report.shared_setup.sqlite_mode,
report.shared_setup.plugin_wasm_path,
report.comparison.file_write_write_mean_ms,
report.comparison.direct_entity_write_mean_ms,
report
.comparison
.file_write_minus_direct_entity_write_mean_ms,
report.comparison.file_write_commit_mean_ms,
report.comparison.direct_entity_commit_mean_ms,
report
.comparison
.file_write_minus_direct_entity_commit_mean_ms,
report.comparison.file_write_total_mean_ms,
report.comparison.direct_entity_total_mean_ms,
report
.comparison
.file_write_minus_direct_entity_total_mean_ms,
report.comparison.file_write_to_direct_entity_total_ratio,
report.comparison.file_write_to_direct_entity_total_ratio,
case_sections,
)
}
fn render_case_markdown(case: &CaseReport) -> String {
let sample_rows = case
.samples
.iter()
.map(|sample| {
format!(
"| {} | {:.3} | {:.3} | {:.3} | {} | {} |",
sample.index,
sample.write_ms,
sample.commit_ms,
sample.total_ms,
sample.committed_state_rows,
sample.file_matches_expected
)
})
.collect::<Vec<_>>()
.join("\n");
let notes = case
.notes
.iter()
.map(|note| format!("- {note}"))
.collect::<Vec<_>>()
.join("\n");
let setup_notes = case
.setup
.setup_outside_timer
.iter()
.map(|note| format!("- {note}"))
.collect::<Vec<_>>()
.join("\n");
format!(
"## {}\n\n\
Timed operation: {}\n\n\
{}\n\n\
Setup outside timer:\n\
{}\n\n\
- Timed rows: {}\n\
- Timed SQL: `{}`\n\
- Verification: {}\n\n\
### Timing\n\n\
| phase | mean ms | median ms | min ms | max ms |\n\
| --- | ---: | ---: | ---: | ---: |\n\
| write | {:.3} | {:.3} | {:.3} | {:.3} |\n\
| commit | {:.3} | {:.3} | {:.3} | {:.3} |\n\
| total | {:.3} | {:.3} | {:.3} | {:.3} |\n\n\
### Samples\n\n\
| run | write ms | commit ms | total ms | committed state rows | file matches expected |\n\
| --- | ---: | ---: | ---: | ---: | --- |\n\
{}\n",
case.title,
case.timed_operation,
notes,
setup_notes,
case.setup.timed_rows,
case.setup.timed_sql,
case.setup.verification,
case.timing_ms.write.mean_ms,
case.timing_ms.write.median_ms,
case.timing_ms.write.min_ms,
case.timing_ms.write.max_ms,
case.timing_ms.commit.mean_ms,
case.timing_ms.commit.median_ms,
case.timing_ms.commit.min_ms,
case.timing_ms.commit.max_ms,
case.timing_ms.total.mean_ms,
case.timing_ms.total.median_ms,
case.timing_ms.total.min_ms,
case.timing_ms.total.max_ms,
sample_rows,
)
}
fn print_summary(report: &Report, report_json_path: &Path, report_markdown_path: &Path) {
println!("10k entities benchmark comparison");
println!(
"props={} input_bytes={} expected_state_rows_after_commit={}",
report.shared_setup.props,
report.shared_setup.input_bytes,
report.shared_setup.expected_state_rows_after_commit
);
for case in &report.cases {
println!("case={} title={}", case.case_id, case.title);
println!(
"write_ms mean={:.3} median={:.3} min={:.3} max={:.3}",
case.timing_ms.write.mean_ms,
case.timing_ms.write.median_ms,
case.timing_ms.write.min_ms,
case.timing_ms.write.max_ms,
);
println!(
"commit_ms mean={:.3} median={:.3} min={:.3} max={:.3}",
case.timing_ms.commit.mean_ms,
case.timing_ms.commit.median_ms,
case.timing_ms.commit.min_ms,
case.timing_ms.commit.max_ms,
);
println!(
"total_ms mean={:.3} median={:.3} min={:.3} max={:.3} samples={}",
case.timing_ms.total.mean_ms,
case.timing_ms.total.median_ms,
case.timing_ms.total.min_ms,
case.timing_ms.total.max_ms,
case.timing_ms.sample_count,
);
}
println!(
"comparison total_mean_delta_ms={:.3} total_ratio={:.3}x",
report
.comparison
.file_write_minus_direct_entity_total_mean_ms,
report.comparison.file_write_to_direct_entity_total_ratio,
);
println!("report_json={}", report_json_path.display());
println!("report_markdown={}", report_markdown_path.display());
}
fn repo_root() -> BenchResult<PathBuf> {
Path::new(env!("CARGO_MANIFEST_DIR"))
.join("../..")
.canonicalize()
.map_err(io_err)
}
fn temp_sqlite_path(label: &str) -> PathBuf {
let nanos = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("system time should be after unix epoch")
.as_nanos();
std::env::temp_dir().join(format!("lix-{label}-{nanos}.sqlite"))
}
fn now_unix_ms() -> BenchResult<u128> {
Ok(SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(io_err)?
.as_millis())
}
fn escape_sql_string(value: &str) -> String {
value.replace('\'', "''")
}
fn escape_json_pointer_segment(segment: &str) -> String {
segment.replace('~', "~0").replace('/', "~1")
}
fn io_err(error: impl std::fmt::Display) -> String {
error.to_string()
}
fn serde_err(error: impl std::fmt::Display) -> String {
error.to_string()
}
fn lix_err(error: LixError) -> String {
format!("{}: {}", error.code, error.description)
}
================================================
FILE: benchmarks/10k-entities/src/sqlite_backend.rs
================================================
use std::path::Path;
use std::str::FromStr;
use std::sync::Arc;
use lix_engine::{
collapse_prepared_batch_for_dialect, LixBackend, LixBackendTransaction, LixError,
PreparedBatch, QueryResult, SqlDialect, TransactionMode, Value,
};
use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions};
use sqlx::{Column, Executor, Row, TypeInfo, ValueRef};
use tokio::sync::OnceCell;
#[derive(Clone)]
pub struct BenchSqliteBackend {
inner: Arc<BenchSqliteBackendInner>,
}
struct BenchSqliteBackendInner {
filename: String,
pool: OnceCell<sqlx::SqlitePool>,
}
struct BenchSqliteTransaction {
conn: sqlx::pool::PoolConnection<sqlx::Sqlite>,
mode: TransactionMode,
}
impl BenchSqliteBackend {
pub fn file_backed(path: &Path) -> Result<Self, LixError> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!(
"failed to create sqlite benchmark directory {}: {error}",
parent.display()
),
hint: None,
})?;
}
Ok(Self {
inner: Arc::new(BenchSqliteBackendInner {
filename: path.display().to_string(),
pool: OnceCell::const_new(),
}),
})
}
async fn pool(&self) -> Result<&sqlx::SqlitePool, LixError> {
self.inner
.pool
.get_or_try_init(|| async {
let conn = if self.inner.filename == ":memory:" {
"sqlite::memory:".to_string()
} else if self.inner.filename.starts_with("sqlite:")
|| self.inner.filename.starts_with("file:")
{
self.inner.filename.clone()
} else {
format!("sqlite://{}", self.inner.filename)
};
let options = SqliteConnectOptions::from_str(&conn)
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})?
.create_if_missing(true)
.foreign_keys(true)
.busy_timeout(std::time::Duration::from_secs(30));
SqlitePoolOptions::new()
.max_connections(1)
.connect_with(options)
.await
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})
})
.await
}
}
#[async_trait::async_trait(?Send)]
impl LixBackend for BenchSqliteBackend {
fn dialect(&self) -> SqlDialect {
SqlDialect::Sqlite
}
async fn execute(&self, sql: &str, params: &[Value]) -> Result<QueryResult, LixError> {
let mut transaction = self.begin_transaction(TransactionMode::Deferred).await?;
let result = transaction.execute(sql, params).await;
match result {
Ok(result) => {
transaction.commit().await?;
Ok(result)
}
Err(error) => {
let _ = transaction.rollback().await;
Err(error)
}
}
}
async fn begin_transaction(
&self,
mode: TransactionMode,
) -> Result<Box<dyn LixBackendTransaction + '_>, LixError> {
let pool = self.pool().await?;
let mut conn = pool.acquire().await.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})?;
sqlx::query(match mode {
TransactionMode::Read | TransactionMode::Deferred => "BEGIN",
TransactionMode::Write => "BEGIN IMMEDIATE",
})
.execute(&mut *conn)
.await
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})?;
Ok(Box::new(BenchSqliteTransaction { conn, mode }))
}
async fn begin_savepoint(
&self,
_name: &str,
) -> Result<Box<dyn LixBackendTransaction + '_>, LixError> {
self.begin_transaction(TransactionMode::Write).await
}
}
#[async_trait::async_trait(?Send)]
impl LixBackendTransaction for BenchSqliteTransaction {
fn dialect(&self) -> SqlDialect {
SqlDialect::Sqlite
}
fn mode(&self) -> TransactionMode {
self.mode
}
async fn execute(&mut self, sql: &str, params: &[Value]) -> Result<QueryResult, LixError> {
execute_query_with_connection(&mut self.conn, sql, params).await
}
async fn execute_batch(&mut self, batch: &PreparedBatch) -> Result<QueryResult, LixError> {
let collapsed = collapse_prepared_batch_for_dialect(batch, self.dialect())?;
if collapsed.sql.trim().is_empty() {
return Ok(QueryResult {
rows: Vec::new(),
columns: Vec::new(),
});
}
self.conn
.execute(collapsed.sql.as_str())
.await
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})?;
Ok(QueryResult {
rows: Vec::new(),
columns: Vec::new(),
})
}
async fn commit(mut self: Box<Self>) -> Result<(), LixError> {
sqlx::query("COMMIT")
.execute(&mut *self.conn)
.await
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})?;
Ok(())
}
async fn rollback(mut self: Box<Self>) -> Result<(), LixError> {
sqlx::query("ROLLBACK")
.execute(&mut *self.conn)
.await
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})?;
Ok(())
}
}
async fn execute_query_with_connection(
conn: &mut sqlx::pool::PoolConnection<sqlx::Sqlite>,
sql: &str,
params: &[Value],
) -> Result<QueryResult, LixError> {
let mut query = sqlx::query(sql);
for param in params {
query = bind_param_sqlite(query, param);
}
let rows = query
.fetch_all(&mut **conn)
.await
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})?;
let columns = rows
.first()
.map(|row| {
row.columns()
.iter()
.map(|column| column.name().to_string())
.collect::<Vec<_>>()
})
.unwrap_or_default();
let mut result_rows = Vec::with_capacity(rows.len());
for row in rows {
let mut out = Vec::with_capacity(row.columns().len());
for index in 0..row.columns().len() {
out.push(map_sqlite_value(&row, index)?);
}
result_rows.push(out);
}
Ok(QueryResult {
rows: result_rows,
columns,
})
}
fn bind_param_sqlite<'q>(
query: sqlx::query::Query<'q, sqlx::Sqlite, sqlx::sqlite::SqliteArguments<'q>>,
param: &Value,
) -> sqlx::query::Query<'q, sqlx::Sqlite, sqlx::sqlite::SqliteArguments<'q>> {
match param {
Value::Null => query.bind::<Option<i64>>(None),
Value::Boolean(value) => query.bind(*value),
Value::Integer(value) => query.bind(*value),
Value::Real(value) => query.bind(*value),
Value::Text(value) => query.bind(value.clone()),
Value::Blob(value) => query.bind(value.clone()),
Value::Json(value) => query.bind(value.to_string()),
}
}
fn map_sqlite_value(row: &sqlx::sqlite::SqliteRow, index: usize) -> Result<Value, LixError> {
let raw = row.try_get_raw(index).map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})?;
if raw.is_null() {
return Ok(Value::Null);
}
match raw.type_info().name() {
"INTEGER" => row.try_get::<i64, _>(index).map(Value::Integer),
"REAL" => row.try_get::<f64, _>(index).map(Value::Real),
"TEXT" => row.try_get::<String, _>(index).map(Value::Text),
"BLOB" => row.try_get::<Vec<u8>, _>(index).map(Value::Blob),
_ => row
.try_get::<String, _>(index)
.map(Value::Text)
.or_else(|_| row.try_get::<i64, _>(index).map(Value::Integer))
.or_else(|_| row.try_get::<f64, _>(index).map(Value::Real))
.or_else(|_| row.try_get::<Vec<u8>, _>(index).map(Value::Blob)),
}
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: error.to_string(),
hint: None,
})
}
================================================
FILE: benchmarks/10k-entities/src/wasmtime_runtime.rs
================================================
use std::collections::HashMap;
use std::hash::{DefaultHasher, Hash, Hasher};
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use lix_engine::wasm::{WasmComponentInstance, WasmLimits, WasmRuntime};
use lix_engine::{CanonicalJson, LixError};
use wasmtime::component::{Component, Linker, ResourceTable};
use wasmtime::{Config, Engine, Store};
use wasmtime_wasi::{IoView, WasiCtx, WasiCtxBuilder, WasiView};
mod plugin_bindings {
wasmtime::component::bindgen!({
path: "../../packages/engine/wit",
world: "plugin",
});
}
#[derive(Debug, serde::Deserialize)]
struct WirePluginFile {
id: String,
path: String,
data: Vec<u8>,
}
#[derive(Debug, serde::Deserialize)]
struct WireDetectChangesRequest {
before: Option<WirePluginFile>,
after: WirePluginFile,
state_context: Option<WireDetectStateContext>,
}
#[derive(Debug, serde::Deserialize)]
struct WireDetectStateContext {
active_state: Option<Vec<WireActiveStateRow>>,
}
#[derive(Debug, serde::Deserialize)]
struct WireActiveStateRow {
entity_id: String,
schema_key: Option<String>,
snapshot_content: Option<CanonicalJson>,
file_id: Option<String>,
plugin_key: Option<String>,
version_id: Option<String>,
change_id: Option<String>,
metadata: Option<CanonicalJson>,
created_at: Option<String>,
updated_at: Option<String>,
}
#[derive(Debug, serde::Deserialize)]
struct WirePluginEntityChange {
entity_id: String,
schema_key: String,
snapshot_content: Option<CanonicalJson>,
}
#[derive(Debug, serde::Deserialize)]
struct WireApplyChangesRequest {
file: WirePluginFile,
changes: Vec<WirePluginEntityChange>,
}
#[derive(Debug, serde::Serialize)]
struct WirePluginEntityChangeOutput {
entity_id: String,
schema_key: String,
snapshot_content: Option<CanonicalJson>,
}
pub struct TestWasmtimeRuntime {
engine: Engine,
component_cache: Mutex<HashMap<ComponentCacheKey, Arc<Component>>>,
}
impl TestWasmtimeRuntime {
pub fn new() -> Result<Self, LixError> {
let mut config = Config::new();
config.wasm_component_model(true);
config.async_support(false);
config.consume_fuel(true);
let engine = Engine::new(&config).map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Failed to initialize wasmtime engine: {error}"),
hint: None,
})?;
Ok(Self {
engine,
component_cache: Mutex::new(HashMap::new()),
})
}
}
#[derive(Clone, PartialEq, Eq, Hash)]
struct ComponentCacheKey {
wasm_fingerprint: u64,
wasm_len: usize,
}
impl ComponentCacheKey {
fn from_bytes(bytes: &[u8]) -> Self {
Self {
wasm_fingerprint: wasm_fingerprint(bytes),
wasm_len: bytes.len(),
}
}
}
struct TestWasmtimeInstance {
engine: Engine,
component: Arc<Component>,
}
struct WasiState {
table: ResourceTable,
ctx: WasiCtx,
}
impl IoView for WasiState {
fn table(&mut self) -> &mut ResourceTable {
&mut self.table
}
}
impl WasiView for WasiState {
fn ctx(&mut self) -> &mut WasiCtx {
&mut self.ctx
}
}
#[async_trait(?Send)]
impl WasmRuntime for TestWasmtimeRuntime {
async fn init_component(
&self,
bytes: Vec<u8>,
_limits: WasmLimits,
) -> Result<Arc<dyn WasmComponentInstance>, LixError> {
let cache_key = ComponentCacheKey::from_bytes(&bytes);
if let Some(component) = self
.component_cache
.lock()
.expect("component cache mutex poisoned")
.get(&cache_key)
.cloned()
{
return Ok(Arc::new(TestWasmtimeInstance {
engine: self.engine.clone(),
component,
}));
}
let compiled =
Arc::new(
Component::new(&self.engine, &bytes).map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Failed to compile wasm component: {error}"),
hint: None,
})?,
);
let component = {
let mut cache = self
.component_cache
.lock()
.expect("component cache mutex poisoned");
cache
.entry(cache_key)
.or_insert_with(|| compiled.clone())
.clone()
};
Ok(Arc::new(TestWasmtimeInstance {
engine: self.engine.clone(),
component,
}))
}
}
#[async_trait(?Send)]
impl WasmComponentInstance for TestWasmtimeInstance {
async fn call(&self, export: &str, input: &[u8]) -> Result<Vec<u8>, LixError> {
let mut store = Store::new(
&self.engine,
WasiState {
table: ResourceTable::new(),
ctx: WasiCtxBuilder::new().build(),
},
);
store.set_fuel(u64::MAX).map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Failed to configure wasm fuel: {error}"),
hint: None,
})?;
let mut linker = Linker::new(&self.engine);
wasmtime_wasi::add_to_linker_sync(&mut linker).map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Failed to add wasi imports to linker: {error}"),
hint: None,
})?;
let bindings =
plugin_bindings::Plugin::instantiate(&mut store, self.component.as_ref(), &linker)
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Failed to instantiate wasm component: {error}"),
hint: None,
})?;
match export {
"detect-changes" | "api#detect-changes" => {
let request: WireDetectChangesRequest =
serde_json::from_slice(input).map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!(
"Failed to decode detect-changes request payload: {error}"
),
hint: None,
})?;
let before = request.before.map(wire_file_to_binding);
let after = wire_file_to_binding(request.after);
let state_context = request.state_context.map(wire_state_context_to_binding);
let result = bindings
.lix_plugin_api()
.call_detect_changes(
&mut store,
before.as_ref(),
&after,
state_context.as_ref(),
)
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Wasm call failed for export '{export}': {error}"),
hint: None,
})?;
match result {
Ok(changes) => {
let wire = changes
.into_iter()
.map(binding_change_to_wire)
.collect::<Result<Vec<_>, _>>()?;
serde_json::to_vec(&wire).map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!(
"Failed to encode detect-changes response payload: {error}"
),
hint: None,
})
}
Err(error) => Err(map_plugin_error(error)),
}
}
"apply-changes" | "api#apply-changes" => {
let request: WireApplyChangesRequest =
serde_json::from_slice(input).map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!(
"Failed to decode apply-changes request payload: {error}"
),
hint: None,
})?;
let file = wire_file_to_binding(request.file);
let changes = request
.changes
.into_iter()
.map(wire_change_to_binding)
.collect::<Vec<_>>();
let result = bindings
.lix_plugin_api()
.call_apply_changes(&mut store, &file, &changes)
.map_err(|error| LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Wasm call failed for export '{export}': {error}"),
hint: None,
})?;
match result {
Ok(output) => Ok(output),
Err(error) => Err(map_plugin_error(error)),
}
}
other => Err(LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Unsupported export '{other}' for TestWasmtimeRuntime"),
hint: None,
}),
}
}
}
fn wasm_fingerprint(bytes: &[u8]) -> u64 {
let mut hasher = DefaultHasher::new();
bytes.hash(&mut hasher);
hasher.finish()
}
fn wire_file_to_binding(file: WirePluginFile) -> plugin_bindings::exports::lix::plugin::api::File {
plugin_bindings::exports::lix::plugin::api::File {
id: file.id,
path: file.path,
data: file.data,
}
}
fn wire_change_to_binding(
change: WirePluginEntityChange,
) -> plugin_bindings::exports::lix::plugin::api::EntityChange {
plugin_bindings::exports::lix::plugin::api::EntityChange {
entity_id: change.entity_id,
schema_key: change.schema_key,
snapshot_content: change.snapshot_content.map(Into::into),
}
}
fn wire_state_context_to_binding(
context: WireDetectStateContext,
) -> plugin_bindings::exports::lix::plugin::api::DetectStateContext {
plugin_bindings::exports::lix::plugin::api::DetectStateContext {
active_state: context.active_state.map(|rows| {
rows.into_iter()
.map(wire_active_state_row_to_binding)
.collect::<Vec<_>>()
}),
}
}
fn wire_active_state_row_to_binding(
row: WireActiveStateRow,
) -> plugin_bindings::exports::lix::plugin::api::ActiveStateRow {
plugin_bindings::exports::lix::plugin::api::ActiveStateRow {
entity_id: row.entity_id,
schema_key: row.schema_key,
snapshot_content: row.snapshot_content.map(Into::into),
file_id: row.file_id,
plugin_key: row.plugin_key,
version_id: row.version_id,
change_id: row.change_id,
metadata: row.metadata.map(Into::into),
created_at: row.created_at,
updated_at: row.updated_at,
}
}
fn binding_change_to_wire(
change: plugin_bindings::exports::lix::plugin::api::EntityChange,
) -> Result<WirePluginEntityChangeOutput, LixError> {
Ok(WirePluginEntityChangeOutput {
entity_id: change.entity_id,
schema_key: change.schema_key,
snapshot_content: change
.snapshot_content
.map(CanonicalJson::from_text)
.transpose()?,
})
}
fn map_plugin_error(error: plugin_bindings::exports::lix::plugin::api::PluginError) -> LixError {
match error {
plugin_bindings::exports::lix::plugin::api::PluginError::InvalidInput(message) => {
LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Plugin invalid-input error: {message}"),
hint: None,
}
}
plugin_bindings::exports::lix::plugin::api::PluginError::Internal(message) => LixError {
code: "LIX_ERROR_UNKNOWN".to_string(),
description: format!("Plugin internal error: {message}"),
hint: None,
},
}
}
================================================
FILE: benchmarks/engine2-json-pointer/Cargo.toml
================================================
[package]
name = "engine2_json_pointer_benchmark"
version = "0.1.0"
edition = "2021"
publish = false
[dependencies]
async-trait = "0.1"
clap = { version = "4.5.31", features = ["derive"] }
lix_rs_sdk = { path = "../../packages/rs-sdk" }
rusqlite = { version = "0.32", features = ["bundled"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
tokio = { version = "1", features = ["rt"] }
================================================
FILE: benchmarks/engine2-json-pointer/README.md
================================================
# Engine2 JSON Pointer Benchmark
This benchmark exercises engine2 end to end on a fresh on-disk SQLite-backed KV
store.
The first case measures direct insertion of `json_pointer` semantic rows through
`lix_state`:
- initialize engine2 storage
- open the generated main version
- register `packages/plugin-json-v2/schema/json_pointer.json`
- insert `N` JSON pointer rows in chunked SQL statements
- verify the committed row count through the normal SQL surface
## Usage
```bash
cargo run --release -p engine2_json_pointer_benchmark -- \
--rows 10000 \
--warmups 1 \
--iterations 5 \
--output-dir artifact/benchmarks/engine2-json-pointer
```
Fast CI smoke:
```bash
cargo run --release -p engine2_json_pointer_benchmark -- \
--rows 10000 \
--warmups 0 \
--iterations 1 \
--output-dir artifact/benchmarks/engine2-json-pointer
```
================================================
FILE: benchmarks/engine2-json-pointer/src/main.rs
================================================
use clap::Parser;
use lix_rs_sdk::{open_lix, ExecuteResult, Lix, LixError, OpenLixOptions, Value};
use serde::Serialize;
use std::fs;
use std::path::PathBuf;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use tokio::runtime::Builder;
mod sqlite_backend;
use sqlite_backend::Engine2SqliteBackend;
const DEFAULT_OUTPUT_DIR: &str = "artifact/benchmarks/engine2-json-pointer";
const DEFAULT_ROWS: usize = 10_000;
const DEFAULT_WARMUPS: usize = 1;
const DEFAULT_ITERATIONS: usize = 5;
const DEFAULT_CHUNK_SIZE: usize = 500;
const JSON_POINTER_SCHEMA_JSON: &str =
include_str!("../../../packages/plugin-json-v2/schema/json_pointer.json");
type BenchResult<T> = Result<T, String>;
#[derive(Parser, Debug)]
#[command(
name = "engine2-json-pointer-benchmark",
about = "Benchmark engine2 json_pointer writes on an on-disk SQLite KV backend"
)]
struct Args {
#[arg(long, default_value_t = DEFAULT_ROWS)]
rows: usize,
#[arg(long, default_value_t = DEFAULT_WARMUPS)]
warmups: usize,
#[arg(long, default_value_t = DEFAULT_ITERATIONS)]
iterations: usize,
#[arg(long, default_value_t = DEFAULT_CHUNK_SIZE)]
chunk_size: usize,
#[arg(long, default_value = DEFAULT_OUTPUT_DIR)]
output_dir: PathBuf,
#[arg(long)]
keep_databases: bool,
}
#[derive(Debug, Serialize)]
struct Report {
generated_at_unix_ms: u128,
benchmark: &'static str,
rows: usize,
chunk_size: usize,
warmups: Vec<RunSample>,
samples: Vec<RunSample>,
timing_ms: TimingSummary,
}
#[derive(Debug, Clone, Serialize)]
struct RunSample {
index: usize,
sqlite_path: String,
insert_ms: f64,
verify_ms: f64,
total_ms: f64,
committed_rows: usize,
}
#[derive(Debug, Serialize)]
struct TimingSummary {
sample_count: usize,
insert: PhaseSummary,
verify: PhaseSummary,
total: PhaseSummary,
}
#[derive(Debug, Serialize)]
struct PhaseSummary {
mean_ms: f64,
median_ms: f64,
min_ms: f64,
max_ms: f64,
}
fn main() {
if let Err(error) = run() {
eprintln!("{error}");
std::process::exit(1);
}
}
fn run() -> BenchResult<()> {
let args = Args::parse();
fs::create_dir_all(&args.output_dir).map_err(|error| {
format!(
"failed to create output directory {}: {error}",
args.output_dir.display()
)
})?;
let runtime = Builder::new_current_thread()
.enable_all()
.build()
.map_err(|error| format!("failed to create tokio runtime: {error}"))?;
let mut warmups = Vec::new();
for index in 0..args.warmups {
warmups.push(runtime.block_on(run_insert_case(&args, "warmup", index))?);
}
let mut samples = Vec::new();
for index in 0..args.iterations {
samples.push(runtime.block_on(run_insert_case(&args, "sample", index))?);
}
let report = Report {
generated_at_unix_ms: unix_ms(),
benchmark: "engine2_json_pointer_insert",
rows: args.rows,
chunk_size: args.chunk_size,
timing_ms: summarize_samples(&samples),
warmups,
samples,
};
let json_path = args.output_dir.join("report.json");
let md_path = args.output_dir.join("report.md");
fs::write(
&json_path,
serde_json::to_string_pretty(&report)
.map_err(|error| format!("failed to serialize report: {error}"))?,
)
.map_err(|error| format!("failed to write {}: {error}", json_path.display()))?;
fs::write(&md_path, render_markdown_report(&report))
.map_err(|error| format!("failed to write {}: {error}", md_path.display()))?;
println!("wrote {}", json_path.display());
println!("wrote {}", md_path.display());
println!(
"insert_{}: mean {:.2}ms, median {:.2}ms",
args.rows, report.timing_ms.insert.mean_ms, report.timing_ms.insert.median_ms
);
Ok(())
}
async fn run_insert_case(args: &Args, label: &str, index: usize) -> BenchResult<RunSample> {
let db_path = args
.output_dir
.join(format!("{label}-{index}-{}.sqlite", std::process::id()));
let cleanup = CleanupDatabase {
path: db_path.clone(),
keep: args.keep_databases,
};
cleanup.remove_existing()?;
let backend = Engine2SqliteBackend::file_backed(&db_path).map_err(display_lix_error)?;
let lix = open_lix(OpenLixOptions {
backend: Some(Box::new(backend)),
})
.await
.map_err(display_lix_error)?;
ensure_benchmark_file_descriptor(&lix).await?;
register_json_pointer_schema(&lix).await?;
let started = Instant::now();
let insert_started = Instant::now();
for sql in build_insert_batches(args.rows, args.chunk_size)? {
let result = lix.execute(&sql, &[]).await.map_err(display_lix_error)?;
let ExecuteResult::AffectedRows(affected_rows) = result else {
return Err("json pointer insert should return affected rows".to_string());
};
if affected_rows == 0 {
return Err("json pointer insert unexpectedly affected zero rows".to_string());
}
}
let insert_elapsed = insert_started.elapsed();
let verify_started = Instant::now();
let committed_rows = count_json_pointer_rows(&lix).await?;
let verify_elapsed = verify_started.elapsed();
if committed_rows != args.rows {
return Err(format!(
"committed json_pointer row count mismatch: expected {}, got {committed_rows}",
args.rows
));
}
let total_elapsed = started.elapsed();
let sample = RunSample {
index,
sqlite_path: db_path.display().to_string(),
insert_ms: millis(insert_elapsed),
verify_ms: millis(verify_elapsed),
total_ms: millis(total_elapsed),
committed_rows,
};
drop(cleanup);
Ok(sample)
}
async fn register_json_pointer_schema(lix: &Lix) -> BenchResult<()> {
let schema = sql_string(JSON_POINTER_SCHEMA_JSON);
let sql = format!(
"INSERT INTO lix_registered_schema (value, lixcol_global, lixcol_untracked) \
VALUES (lix_json('{schema}'), true, true)"
);
match lix.execute(&sql, &[]).await.map_err(display_lix_error)? {
ExecuteResult::AffectedRows(1) => Ok(()),
other => Err(format!(
"schema registration returned unexpected result: {other:?}"
)),
}
}
async fn ensure_benchmark_file_descriptor(lix: &Lix) -> BenchResult<()> {
let snapshot = serde_json::json!({
"id": "bench.json",
"directory_id": null,
"name": "bench",
"extension": "json",
"hidden": false
});
let sql = format!(
"INSERT INTO lix_state (\
entity_id, schema_key, file_id, snapshot_content, global, untracked\
) VALUES (\
'bench.json', 'lix_file_descriptor', NULL, lix_json('{}'), false, false\
)",
sql_string(&snapshot.to_string())
);
match lix.execute(&sql, &[]).await.map_err(display_lix_error)? {
ExecuteResult::AffectedRows(1) => Ok(()),
other => Err(format!(
"file descriptor insert returned unexpected result: {other:?}"
)),
}
}
fn build_insert_batches(row_count: usize, chunk_size: usize) -> BenchResult<Vec<String>> {
if chunk_size == 0 {
return Err("chunk_size must be greater than zero".to_string());
}
let mut batches = Vec::new();
let mut next = 0;
while next < row_count {
let end = (next + chunk_size).min(row_count);
let mut sql = String::from(
"INSERT INTO lix_state (\
entity_id, schema_key, file_id, snapshot_content, global, untracked\
) VALUES ",
);
for index in next..end {
if index > next {
sql.push(',');
}
let pointer = format!("/prop_{index}");
let snapshot = serde_json::json!({
"path": pointer,
"value": {
"index": index,
"label": format!("value-{index}")
}
});
sql.push_str(&format!(
"('{}','json_pointer','bench.json',lix_json('{}'),false,false)",
sql_string(&pointer),
sql_string(&snapshot.to_string())
));
}
batches.push(sql);
next = end;
}
Ok(batches)
}
async fn count_json_pointer_rows(lix: &Lix) -> BenchResult<usize> {
let result = lix
.execute(
"SELECT COUNT(*) \
FROM lix_state \
WHERE schema_key = 'json_pointer' \
AND file_id = 'bench.json' \
AND snapshot_content IS NOT NULL",
&[],
)
.await
.map_err(display_lix_error)?;
let ExecuteResult::Rows(rows) = result else {
return Err("COUNT query should return rows".to_string());
};
let Some(row) = rows.rows().first() else {
return Err("COUNT query returned no rows".to_string());
};
match row.values().first() {
Some(Value::Integer(value)) => {
usize::try_from(*value).map_err(|_| format!("COUNT returned negative value: {value}"))
}
other => Err(format!("COUNT returned unexpected value: {other:?}")),
}
}
fn summarize_samples(samples: &[RunSample]) -> TimingSummary {
TimingSummary {
sample_count: samples.len(),
insert: summarize_phase(samples.iter().map(|sample| sample.insert_ms).collect()),
verify: summarize_phase(samples.iter().map(|sample| sample.verify_ms).collect()),
total: summarize_phase(samples.iter().map(|sample| sample.total_ms).collect()),
}
}
fn summarize_phase(mut values: Vec<f64>) -> PhaseSummary {
if values.is_empty() {
return PhaseSummary {
mean_ms: 0.0,
median_ms: 0.0,
min_ms: 0.0,
max_ms: 0.0,
};
}
values.sort_by(|left, right| left.total_cmp(right));
let sum = values.iter().sum::<f64>();
let midpoint = values.len() / 2;
let median = if values.len() % 2 == 0 {
(values[midpoint - 1] + values[midpoint]) / 2.0
} else {
values[midpoint]
};
PhaseSummary {
mean_ms: sum / values.len() as f64,
median_ms: median,
min_ms: values[0],
max_ms: values[values.len() - 1],
}
}
fn render_markdown_report(report: &Report) -> String {
format!(
"# Engine2 JSON Pointer Benchmark\n\n\
- Rows: `{}`\n\
- Chunk size: `{}`\n\
- Samples: `{}`\n\n\
| Phase | Mean ms | Median ms | Min ms | Max ms |\n\
| --- | ---: | ---: | ---: | ---: |\n\
| Insert | {:.2} | {:.2} | {:.2} | {:.2} |\n\
| Verify | {:.2} | {:.2} | {:.2} | {:.2} |\n\
| Total | {:.2} | {:.2} | {:.2} | {:.2} |\n",
report.rows,
report.chunk_size,
report.timing_ms.sample_count,
report.timing_ms.insert.mean_ms,
report.timing_ms.insert.median_ms,
report.timing_ms.insert.min_ms,
report.timing_ms.insert.max_ms,
report.timing_ms.verify.mean_ms,
report.timing_ms.verify.median_ms,
report.timing_ms.verify.min_ms,
report.timing_ms.verify.max_ms,
report.timing_ms.total.mean_ms,
report.timing_ms.total.median_ms,
report.timing_ms.total.min_ms,
report.timing_ms.total.max_ms,
)
}
fn sql_string(value: &str) -> String {
value.replace('\'', "''")
}
fn display_lix_error(error: LixError) -> String {
format!("{}: {}", error.code, error.description)
}
fn millis(duration: Duration) -> f64 {
duration.as_secs_f64() * 1000.0
}
fn unix_ms() -> u128 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|duration| duration.as_millis())
.unwrap_or_default()
}
struct CleanupDatabase {
path: PathBuf,
keep: bool,
}
impl CleanupDatabase {
fn remove_existing(&self) -> BenchResult<()> {
for path in self.paths() {
if path.exists() {
fs::remove_file(&path)
.map_err(|error| format!("failed to remove {}: {error}", path.display()))?;
}
}
Ok(())
}
fn paths(&self) -> Vec<PathBuf> {
["", "-wal", "-shm", "-journal"]
.into_iter()
.map(|suffix| PathBuf::from(format!("{}{}", self.path.display(), suffix)))
.collect()
}
}
impl Drop for CleanupDatabase {
fn drop(&mut self) {
if self.keep {
return;
}
for path in self.paths() {
let _ = fs::remove_file(path);
}
}
}
================================================
FILE: benchmarks/engine2-json-pointer/src/sqlite_backend.rs
================================================
use async_trait::async_trait;
use lix_rs_sdk::{
KvPair, KvScanRange, LixBackend, LixBackendTransaction, LixError, TransactionBeginMode,
};
use rusqlite::{params, Connection, OptionalExtension};
use std::path::Path;
use std::sync::{Arc, Mutex, MutexGuard};
const KV_TABLE: &str = "lix_engine2_kv";
#[derive(Clone)]
pub struct Engine2SqliteBackend {
conn: Arc<Mutex<Connection>>,
}
pub struct Engine2SqliteTransaction {
conn: Arc<Mutex<Connection>>,
finalized: bool,
mode: TransactionBeginMode,
}
impl Engine2SqliteBackend {
pub fn file_backed(path: &Path) -> Result<Self, LixError> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).map_err(|error| {
LixError::new(
"LIX_ERROR_UNKNOWN",
format!(
"failed to create sqlite benchmark directory {}: {error}",
parent.display()
),
)
})?;
}
let conn = Connection::open(path).map_err(sqlite_error)?;
configure_connection(&conn)?;
ensure_kv_table(&conn)?;
Ok(Self {
conn: Arc::new(Mutex::new(conn)),
})
}
fn lock_conn(&self) -> Result<MutexGuard<'_, Connection>, LixError> {
self.conn
.lock()
.map_err(|_| LixError::new("LIX_ERROR_UNKNOWN", "sqlite benchmark mutex poisoned"))
}
}
#[async_trait]
impl LixBackend for Engine2SqliteBackend {
async fn begin_transaction(
&self,
mode: TransactionBeginMode,
) -> Result<Box<dyn LixBackendTransaction + Send + Sync + 'static>, LixError> {
{
let conn = self.lock_conn()?;
conn.execute_batch(match mode {
TransactionBeginMode::Read | TransactionBeginMode::Deferred => "BEGIN TRANSACTION",
TransactionBeginMode::Write => "BEGIN IMMEDIATE",
})
.map_err(sqlite_error)?;
}
Ok(Box::new(Engine2SqliteTransaction {
conn: Arc::clone(&self.conn),
finalized: false,
mode,
}))
}
async fn kv_get(&self, namespace: &str, key: &[u8]) -> Result<Option<Vec<u8>>, LixError> {
let conn = self.lock_conn()?;
kv_get_with_connection(&conn, namespace, key)
}
async fn kv_scan(
&self,
namespace: &str,
range: KvScanRange,
limit: Option<usize>,
) -> Result<Vec<KvPair>, LixError> {
let conn = self.lock_conn()?;
kv_scan_with_connection(&conn, namespace, &range, limit)
}
}
#[async_trait]
impl LixBackendTransaction for Engine2SqliteTransaction {
fn mode(&self) -> TransactionBeginMode {
self.mode
}
async fn kv_get(&mut self, namespace: &str, key: &[u8]) -> Result<Option<Vec<u8>>, LixError> {
let conn = self.lock_conn()?;
kv_get_with_connection(&conn, namespace, key)
}
async fn kv_scan(
&mut self,
namespace: &str,
range: KvScanRange,
limit: Option<usize>,
) -> Result<Vec<KvPair>, LixError> {
let conn = self.lock_conn()?;
kv_scan_with_connection(&conn, namespace, &range, limit)
}
async fn kv_put(&mut self, namespace: &str, key: &[u8], value: &[u8]) -> Result<(), LixError> {
let conn = self.lock_conn()?;
conn.execute(
&format!(
"INSERT INTO {KV_TABLE} (namespace, key, value) VALUES (?1, ?2, ?3) \
ON CONFLICT(namespace, key) DO UPDATE SET value = excluded.value"
),
params![namespace, key, value],
)
.map_err(sqlite_error)?;
Ok(())
}
async fn kv_delete(&mut self, namespace: &str, key: &[u8]) -> Result<(), LixError> {
let conn = self.lock_conn()?;
conn.execute(
&format!("DELETE FROM {KV_TABLE} WHERE namespace = ?1 AND key = ?2"),
params![namespace, key],
)
.map_err(sqlite_error)?;
Ok(())
}
async fn commit(mut self: Box<Self>) -> Result<(), LixError> {
self.lock_conn()?
.execute_batch("COMMIT")
.map_err(sqlite_error)?;
self.finalized = true;
Ok(())
}
async fn rollback(mut self: Box<Self>) -> Result<(), LixError> {
self.lock_conn()?
.execute_batch("ROLLBACK")
.map_err(sqlite_error)?;
self.finalized = true;
Ok(())
}
}
impl Engine2SqliteTransaction {
fn lock_conn(&self) -> Result<MutexGuard<'_, Connection>, LixError> {
self.conn
.lock()
.map_err(|_| LixError::new("LIX_ERROR_UNKNOWN", "sqlite benchmark mutex poisoned"))
}
}
impl Drop for Engine2SqliteTransaction {
fn drop(&mut self) {
if self.finalized || std::thread::panicking() {
return;
}
if let Ok(conn) = self.conn.lock() {
let _ = conn.execute_batch("ROLLBACK");
}
}
}
fn configure_connection(conn: &Connection) -> Result<(), LixError> {
conn.execute_batch(
"PRAGMA journal_mode = WAL;\
PRAGMA synchronous = NORMAL;\
PRAGMA temp_store = MEMORY;",
)
.map_err(sqlite_error)?;
Ok(())
}
fn ensure_kv_table(conn: &Connection) -> Result<(), LixError> {
conn.execute_batch(&format!(
"CREATE TABLE IF NOT EXISTS {KV_TABLE} (\
namespace TEXT NOT NULL,\
key BLOB NOT NULL,\
value BLOB NOT NULL,\
PRIMARY KEY(namespace, key)\
) WITHOUT ROWID"
))
.map_err(sqlite_error)?;
Ok(())
}
fn kv_get_with_connection(
conn: &Connection,
namespace: &str,
key: &[u8],
) -> Result<Option<Vec<u8>>, LixError> {
conn.query_row(
&format!("SELECT value FROM {KV_TABLE} WHERE namespace = ?1 AND key = ?2"),
params![namespace, key],
|row| row.get::<_, Vec<u8>>(0),
)
.optional()
.map_err(sqlite_error)
}
fn kv_scan_with_connection(
conn: &Connection,
namespace: &str,
range: &KvScanRange,
limit: Option<usize>,
) -> Result<Vec<KvPair>, LixError> {
let mut pairs = match range {
KvScanRange::Prefix(prefix) => {
let mut stmt = conn
.prepare(&format!(
"SELECT key, value FROM {KV_TABLE} WHERE namespace = ?1 ORDER BY key"
))
.map_err(sqlite_error)?;
let rows = stmt
.query_map(params![namespace], |row| {
Ok((row.get::<_, Vec<u8>>(0)?, row.get::<_, Vec<u8>>(1)?))
})
.map_err(sqlite_error)?;
collect_matching_rows(rows, |key| key.starts_with(prefix))?
}
KvScanRange::Range { start, end } => {
let mut stmt = conn
.prepare(&format!(
"SELECT key, value FROM {KV_TABLE} \
WHERE namespace = ?1 AND key >= ?2 AND key < ?3 \
ORDER BY key"
))
.map_err(sqlite_error)?;
let rows = stmt
.query_map(params![namespace, start, end], |row| {
Ok((row.get::<_, Vec<u8>>(0)?, row.get::<_, Vec<u8>>(1)?))
})
.map_err(sqlite_error)?;
collect_matching_rows(rows, |_| true)?
}
};
if let Some(limit) = limit {
pairs.truncate(limit);
}
Ok(pairs)
}
fn collect_matching_rows<F>(
rows: rusqlite::MappedRows<
'_,
impl FnMut(&rusqlite::Row<'_>) -> rusqlite::Result<(Vec<u8>, Vec<u8>)>,
>,
mut matches: F,
) -> Result<Vec<KvPair>, LixError>
where
F: FnMut(&[u8]) -> bool,
{
let mut pairs = Vec::new();
for row in rows {
let (key, value) = row.map_err(sqlite_error)?;
if matches(&key) {
pairs.push(KvPair::new(key, value));
}
}
Ok(pairs)
}
fn sqlite_error(error: rusqlite::Error) -> LixError {
LixError::new(
"LIX_ERROR_UNKNOWN",
format!("sqlite benchmark error: {error}"),
)
}
================================================
FILE: benchmarks/git-compare/Cargo.toml
================================================
[package]
name = "git_compare_benchmark"
version = "0.1.0"
edition = "2021"
publish = false
[dependencies]
clap = { version = "4.5.31", features = ["derive"] }
lix_engine = { path = "../../packages/engine" }
lix_rs_sdk = { path = "../../packages/rs-sdk" }
pollster = "0.4"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
================================================
FILE: benchmarks/git-compare/README.md
================================================
# Git Compare Benchmark
This benchmark answers a narrower question than `exp git-replay`:
- a repo already exists
- a user changes files
- the user finalizes one commit
- how long do `write` and `commit` take for Git vs Lix?
It cuts replay noise by:
- selecting real first-parent commits from a production repo as workloads
- building Git and Lix parent-state templates outside the timed section
- timing only `apply workload` and `finalize commit`
- interleaving Git and Lix runs
- verifying the final Git tree and final Lix `lix_file` state after each trial
## What It Measures
For each selected workload commit:
- `write_ms`
- Git: apply the commit's file mutations into a clean checkout
- Lix: apply equivalent `lix_file` mutations inside an open transaction
- `commit_ms`
- Git: `git add -A` + `git commit`
- Lix: `COMMIT`
- `total_ms`
- end-to-end write + commit
## Usage
```bash
cargo run --release -p git_compare_benchmark -- \
--repo-path /Users/samuel/git-repos/paraglide-js \
--output-dir artifact/benchmarks/git-compare/paraglide-js \
--max-workloads 5 \
--runs 5 \
--warmups 1 \
--force
```
With the benchmark-tuned SQLite settings:
```bash
cargo run --release -p git_compare_benchmark -- \
--repo-path /Users/samuel/git-repos/paraglide-js \
--output-dir artifact/benchmarks/git-compare/paraglide-js-tuned \
--sqlite-benchmark-tuned \
--max-workloads 5 \
--runs 5 \
--warmups 1 \
--force
```
Reports are written to:
- `report.json`
- `report.md`
inside the chosen output directory.
## Notes
- The current seed mode is hybrid on purpose:
- Git uses a local parent checkout so the baseline tree is exact.
- Lix seeds a fresh DB from the parent tree snapshot outside the timer.
- Lix path seeding percent-encodes Git path characters that `lix_file` does not currently accept raw, so the benchmark still exercises the same file set even when the repo contains paths like `+layout.svelte` or `[locale]`.
- Workloads are filtered to regular-file content changes. Mode-only or symlink-heavy commits are skipped because `lix_file` currently benchmarks `path + data`, not full Git file mode semantics.
================================================
FILE: benchmarks/git-compare/src/main.rs
================================================
use clap::Parser;
use lix_engine::{
boot as boot_engine, BootArgs as EngineConfig, ExecuteOptions, Session, SessionTransaction,
Value,
};
use lix_rs_sdk::{SqliteBackend, WasmRuntime, WasmtimeRuntime};
use serde::Serialize;
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::Arc;
use std::time::Instant;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
const NULL_OID: &str = "0000000000000000000000000000000000000000";
type DynError = Box<dyn std::error::Error + Send + Sync>;
type DynResult<T> = Result<T, DynError>;
#[derive(Parser, Debug, Clone)]
#[command(about = "Benchmark write+commit latency for Git vs Lix on real repo workloads")]
struct Args {
#[arg(long)]
repo_path: PathBuf,
#[arg(long, default_value = "HEAD")]
head_ref: String,
#[arg(long = "commit-sha")]
commit_shas: Vec<String>,
#[arg(long, default_value = "artifact/benchmarks/git-compare")]
output_dir: PathBuf,
#[arg(long, default_value_t = 5)]
max_workloads: usize,
#[arg(long, default_value_t = 200)]
scan_commits: usize,
#[arg(long, default_value_t = 5)]
runs: usize,
#[arg(long, default_value_t = 1)]
warmups: usize,
#[arg(long, default_value_t = 1)]
min_changed_paths: usize,
#[arg(long, default_value_t = 25)]
max_changed_paths: usize,
#[arg(long)]
skip_verify: bool,
#[arg(long)]
keep_temp: bool,
#[arg(long)]
force: bool,
}
#[derive(Clone)]
struct CommitInfo {
sha: String,
parents: Vec<String>,
subject: String,
}
#[derive(Clone)]
struct PatchSet {
changes: Vec<RawChange>,
blobs: HashMap<String, Vec<u8>>,
}
#[derive(Clone)]
struct RawChange {
status: char,
old_mode: String,
new_mode: String,
old_oid: String,
new_oid: String,
old_path: Option<String>,
new_path: Option<String>,
}
#[derive(Clone)]
enum OperationKind {
Add,
Modify,
Delete,
Rename,
Copy,
}
#[derive(Clone)]
struct FileOperation {
kind: OperationKind,
old_path: Option<String>,
new_path: Option<String>,
new_bytes: Option<Vec<u8>>,
new_executable: bool,
}
#[derive(Clone)]
struct Workload {
commit_sha: String,
parent_sha: String,
subject: String,
changed_paths: usize,
child_tree_sha: String,
operations: Vec<FileOperation>,
expected_files: BTreeMap<String, Vec<u8>>,
}
#[derive(Clone)]
struct LixTemplate {
seed_rows: Vec<LixSeedRow>,
path_to_id: BTreeMap<String, String>,
}
#[derive(Clone)]
struct LixSeedRow {
id: String,
path: String,
data: Vec<u8>,
}
#[derive(Clone)]
struct PreparedWorkload {
workload: Workload,
git_template_dir: PathBuf,
lix_template: LixTemplate,
}
#[derive(Serialize)]
struct Report {
repo_path: String,
head_ref: String,
head_commit: String,
config: ConfigReport,
workload_selection: WorkloadSelectionReport,
template_seed: TemplateSeedReport,
workloads: Vec<WorkloadReport>,
overall: OverallReport,
}
#[derive(Serialize)]
struct ConfigReport {
runs: usize,
warmups: usize,
verify_state: bool,
min_changed_paths: usize,
max_changed_paths: usize,
max_workloads: usize,
scan_commits: usize,
}
#[derive(Serialize)]
struct WorkloadSelectionReport {
selected_count: usize,
skipped: Vec<SkippedCandidate>,
}
#[derive(Serialize)]
struct SkippedCandidate {
commit_sha: String,
subject: String,
reason: String,
}
#[derive(Serialize)]
struct TemplateSeedReport {
mode: &'static str,
}
#[derive(Serialize)]
struct WorkloadReport {
commit_sha: String,
parent_sha: String,
subject: String,
changed_paths: usize,
child_tree_sha: String,
git: MetricReport,
lix: MetricReport,
total_ratio_lix_over_git: f64,
total_pct_less_time_for_lix: f64,
trials: Vec<TrialResult>,
}
#[derive(Serialize)]
struct OverallReport {
git: MetricReport,
lix: MetricReport,
total_ratio_lix_over_git: f64,
total_pct_less_time_for_lix: f64,
}
#[derive(Serialize, Clone)]
struct MetricReport {
write_ms: SummaryStats,
commit_ms: SummaryStats,
total_ms: SummaryStats,
}
#[derive(Serialize, Clone, Default)]
struct SummaryStats {
samples: usize,
min_ms: f64,
p50_ms: f64,
p95_ms: f64,
mean_ms: f64,
max_ms: f64,
}
#[derive(Serialize, Clone)]
struct TrialResult {
workload_commit_sha: String,
system: &'static str,
iteration: usize,
warmup: bool,
write_ms: f64,
commit_ms: f64,
total_ms: f64,
verified: bool,
}
fn main() {
if let Err(error) = run_with_large_stack(real_main) {
eprintln!("{error}");
std::process::exit(1);
}
}
fn run_with_large_stack<F>(f: F) -> DynResult<()>
where
F: FnOnce() -> DynResult<()> + Send + 'static,
{
let handle = std::thread::Builder::new()
.name("git-compare-benchmark".to_string())
.stack_size(32 * 1024 * 1024)
.spawn(f)?;
match handle.join() {
Ok(result) => result,
Err(_) => Err("benchmark thread panicked".into()),
}
}
fn real_main() -> DynResult<()> {
let args = Args::parse();
validate_args(&args)?;
let repo_path = fs::canonicalize(&args.repo_path)?;
ensure_git_repo(&repo_path)?;
prepare_output_dir(&args.output_dir, args.force)?;
let tmp_root = args.output_dir.join("tmp");
fs::create_dir_all(&tmp_root)?;
let head_commit = rev_parse_commit(&repo_path, &args.head_ref)?;
let (workloads, skipped) = select_workloads(&repo_path, &args, &head_commit)?;
let prepared = prepare_workloads(&repo_path, &args, &tmp_root, &workloads)?;
let mut workload_reports = Vec::with_capacity(prepared.workloads.len());
let mut all_trials = Vec::new();
println!(
"[git-compare] selected {} workloads from {}",
prepared.workloads.len(),
repo_path.display()
);
for prepared_workload in &prepared.workloads {
println!(
"[git-compare] workload {} {} ({} changed paths)",
&prepared_workload.workload.commit_sha[..12],
prepared_workload.workload.subject,
prepared_workload.workload.changed_paths
);
let trials = run_workload_trials(
&repo_path,
&args,
&tmp_root,
prepared_workload,
Arc::clone(&prepared.wasm_runtime),
)?;
let git_trials = filtered_trials(&trials, "git");
let lix_trials = filtered_trials(&trials, "lix");
let git_report = build_metric_report(&git_trials);
let lix_report = build_metric_report(&lix_trials);
let ratio = safe_ratio(lix_report.total_ms.p50_ms, git_report.total_ms.p50_ms);
let pct_less = pct_less_time(lix_report.total_ms.p50_ms, git_report.total_ms.p50_ms);
workload_reports.push(WorkloadReport {
commit_sha: prepared_workload.workload.commit_sha.clone(),
parent_sha: prepared_workload.workload.parent_sha.clone(),
subject: prepared_workload.workload.subject.clone(),
changed_paths: prepared_workload.workload.changed_paths,
child_tree_sha: prepared_workload.workload.child_tree_sha.clone(),
git: git_report,
lix: lix_report,
total_ratio_lix_over_git: ratio,
total_pct_less_time_for_lix: pct_less,
trials: trials.clone(),
});
all_trials.extend(trials);
}
let overall_git = build_metric_report(&filtered_trials(&all_trials, "git"));
let overall_lix = build_metric_report(&filtered_trials(&all_trials, "lix"));
let report = Report {
repo_path: repo_path.display().to_string(),
head_ref: args.head_ref.clone(),
head_commit,
config: ConfigReport {
runs: args.runs,
warmups: args.warmups,
verify_state: !args.skip_verify,
min_changed_paths: args.min_changed_paths,
max_changed_paths: args.max_changed_paths,
max_workloads: args.max_workloads,
scan_commits: args.scan_commits,
},
workload_selection: WorkloadSelectionReport {
selected_count: workload_reports.len(),
skipped,
},
template_seed: TemplateSeedReport {
mode: "git-parent-checkout + lix-parent-snapshot",
},
workloads: workload_reports,
overall: OverallReport {
git: overall_git.clone(),
lix: overall_lix.clone(),
total_ratio_lix_over_git: safe_ratio(
overall_lix.total_ms.p50_ms,
overall_git.total_ms.p50_ms,
),
total_pct_less_time_for_lix: pct_less_time(
overall_lix.total_ms.p50_ms,
overall_git.total_ms.p50_ms,
),
},
};
let json_path = args.output_dir.join("report.json");
let markdown_path = args.output_dir.join("report.md");
fs::write(
&json_path,
format!("{}\n", serde_json::to_string_pretty(&report)?),
)?;
fs::write(&markdown_path, render_markdown_report(&report))?;
println!(
"[git-compare] overall median total: git {:.2}ms, lix {:.2}ms, lix {:.2}% less time",
report.overall.git.total_ms.p50_ms,
report.overall.lix.total_ms.p50_ms,
report.overall.total_pct_less_time_for_lix
);
println!("[git-compare] json: {}", json_path.display());
println!("[git-compare] markdown: {}", markdown_path.display());
if !args.keep_temp {
let _ = fs::remove_dir_all(&tmp_root);
}
Ok(())
}
struct PreparedBenchmark {
workloads: Vec<PreparedWorkload>,
wasm_runtime: Arc<dyn WasmRuntime>,
}
fn validate_args(args: &Args) -> DynResult<()> {
if args.max_workloads == 0 {
return Err("--max-workloads must be >= 1".into());
}
if args.runs == 0 {
return Err("--runs must be >= 1".into());
}
if args.min_changed_paths == 0 {
return Err("--min-changed-paths must be >= 1".into());
}
if args.min_changed_paths > args.max_changed_paths {
return Err("--min-changed-paths must be <= --max-changed-paths".into());
}
Ok(())
}
fn ensure_git_repo(repo_path: &Path) -> DynResult<()> {
run_git_text(repo_path, ["rev-parse", "--git-dir"])?;
Ok(())
}
fn prepare_output_dir(path: &Path, force: bool) -> DynResult<()> {
if path.exists() {
if !force {
return Err(format!(
"output dir already exists: {} (pass --force to overwrite)",
path.display()
)
.into());
}
fs::remove_dir_all(path)?;
}
fs::create_dir_all(path)?;
Ok(())
}
fn select_workloads(
repo_path: &Path,
args: &Args,
head_commit: &str,
) -> DynResult<(Vec<Workload>, Vec<SkippedCandidate>)> {
let commit_infos = if args.commit_shas.is_empty() {
list_first_parent_commit_info(repo_path, &args.head_ref, Some(args.scan_commits))?
} else {
let mut commits = Vec::with_capacity(args.commit_shas.len());
for commit_sha in &args.commit_shas {
commits.push(read_commit_info(repo_path, commit_sha)?);
}
commits
};
let mut selected = Vec::new();
let mut skipped = Vec::new();
for commit in commit_infos {
if selected.len() >= args.max_workloads {
break;
}
if commit.sha == head_commit && commit.parents.is_empty() {
skipped.push(SkippedCandidate {
commit_sha: commit.sha,
subject: commit.subject,
reason: "root commit is not a useful user write+commit workload".to_string(),
});
continue;
}
if commit.parents.len() != 1 {
skipped.push(SkippedCandidate {
commit_sha: commit.sha,
subject: commit.subject,
reason: "merge commit skipped as a timed workload".to_string(),
});
continue;
}
let patch_set = read_commit_patch_set(repo_path, &commit.sha)?;
if patch_set.changes.len() < args.min_changed_paths {
skipped.push(SkippedCandidate {
commit_sha: commit.sha,
subject: commit.subject,
reason: format!(
"changed path count {} below minimum {}",
patch_set.changes.len(),
args.min_changed_paths
),
});
continue;
}
if patch_set.changes.len() > args.max_changed_paths {
skipped.push(SkippedCandidate {
commit_sha: commit.sha,
subject: commit.subject,
reason: format!(
"changed path count {} above maximum {}",
patch_set.changes.len(),
args.max_changed_paths
),
});
continue;
}
if let Some(reason) = first_unsupported_change_reason(&patch_set.changes) {
skipped.push(SkippedCandidate {
commit_sha: commit.sha,
subject: commit.subject,
reason,
});
continue;
}
let operations = compile_operations(&patch_set)?;
let expected_files =
normalize_snapshot_for_lix(&read_tree_snapshot(repo_path, &commit.sha)?);
let child_tree_sha = rev_parse_tree(repo_path, &commit.sha)?;
selected.push(Workload {
commit_sha: commit.sha,
parent_sha: commit.parents[0].clone(),
subject: commit.subject,
changed_paths: operations.len(),
child_tree_sha,
operations,
expected_files,
});
}
if selected.is_empty() {
return Err("no benchmark workloads selected; widen scan or changed-path filters".into());
}
Ok((selected, skipped))
}
fn prepare_workloads(
repo_path: &Path,
_args: &Args,
tmp_root: &Path,
workloads: &[Workload],
) -> DynResult<PreparedBenchmark> {
let wasm_runtime: Arc<dyn WasmRuntime> = Arc::new(WasmtimeRuntime::new()?);
let git_templates_dir = tmp_root.join("git-templates");
fs::create_dir_all(&git_templates_dir)?;
let mut prepared_workloads = Vec::with_capacity(workloads.len());
for workload in workloads {
let parent_files = read_tree_snapshot(repo_path, &workload.parent_sha)?;
let git_template_dir = git_templates_dir.join(&workload.commit_sha);
create_git_checkout_template(repo_path, &git_template_dir, &workload.parent_sha)?;
let lix_template = create_lix_snapshot_template(&parent_files)?;
prepared_workloads.push(PreparedWorkload {
workload: workload.clone(),
git_template_dir,
lix_template,
});
}
Ok(PreparedBenchmark {
workloads: prepared_workloads,
wasm_runtime,
})
}
fn run_workload_trials(
repo_path: &Path,
args: &Args,
tmp_root: &Path,
workload: &PreparedWorkload,
wasm_runtime: Arc<dyn WasmRuntime>,
) -> DynResult<Vec<TrialResult>> {
let git_trial_root = tmp_root
.join("git-runs")
.join(&workload.workload.commit_sha);
let lix_trial_root = tmp_root
.join("lix-runs")
.join(&workload.workload.commit_sha);
fs::create_dir_all(&git_trial_root)?;
fs::create_dir_all(&lix_trial_root)?;
let total_iterations = args.warmups + args.runs;
let mut trials = Vec::with_capacity(total_iterations * 2);
for iteration in 0..total_iterations {
let warmup = iteration < args.warmups;
let order = if iteration % 2 == 0 {
["git", "lix"]
} else {
["lix", "git"]
};
for system in order {
let trial = match system {
"git" => run_git_trial(
&git_trial_root,
iteration,
warmup,
workload,
!args.skip_verify,
)?,
"lix" => run_lix_trial(
repo_path,
&lix_trial_root,
iteration,
warmup,
workload,
Arc::clone(&wasm_runtime),
!args.skip_verify,
)?,
_ => unreachable!(),
};
trials.push(trial);
}
}
Ok(trials)
}
fn run_git_trial(
trial_root: &Path,
iteration: usize,
warmup: bool,
workload: &PreparedWorkload,
verify_state: bool,
) -> DynResult<TrialResult> {
let repo_dir = trial_root.join(format!("trial-{iteration}"));
if repo_dir.exists() {
fs::remove_dir_all(&repo_dir)?;
}
copy_directory(&workload.git_template_dir, &repo_dir)?;
let write_started = Instant::now();
apply_operations_to_git(&repo_dir, &workload.workload.operations)?;
let write_ms = elapsed_ms(write_started);
let commit_started = Instant::now();
let commit_message = format!("bench {}", &workload.workload.commit_sha[..12]);
run_git_text(&repo_dir, ["add", "-A"])?;
run_git_text(
&repo_dir,
[
"-c",
"core.hooksPath=/dev/null",
"-c",
"commit.gpgSign=false",
"commit",
"-q",
"--allow-empty",
"-m",
&commit_message,
],
)?;
let commit_ms = elapsed_ms(commit_started);
let verified = if verify_state {
let actual_tree = run_git_text(&repo_dir, ["rev-parse", "HEAD^{tree}"])?;
let actual_tree = actual_tree.trim();
if actual_tree != workload.workload.child_tree_sha {
return Err(format!(
"git trial tree mismatch for {}: expected {}, got {}",
workload.workload.commit_sha, workload.workload.child_tree_sha, actual_tree
)
.into());
}
true
} else {
false
};
fs::remove_dir_all(&repo_dir)?;
Ok(TrialResult {
workload_commit_sha: workload.workload.commit_sha.clone(),
system: "git",
iteration,
warmup,
write_ms,
commit_ms,
total_ms: write_ms + commit_ms,
verified,
})
}
fn run_lix_trial(
_repo_path: &Path,
trial_root: &Path,
iteration: usize,
warmup: bool,
workload: &PreparedWorkload,
wasm_runtime: Arc<dyn WasmRuntime>,
verify_state: bool,
) -> DynResult<TrialResult> {
let db_path = trial_root.join(format!("trial-{iteration}.lix"));
if db_path.exists() {
fs::remove_file(&db_path)?;
}
let session = create_initialized_session(&db_path, wasm_runtime)?;
if !workload.lix_template.seed_rows.is_empty() {
let seed_rows = workload.lix_template.seed_rows.clone();
pollster::block_on(session.transaction(ExecuteOptions::default(), |tx| {
Box::pin(async move {
for row in seed_rows {
tx.execute(
"INSERT INTO lix_file (id, path, data) VALUES (?1, ?2, ?3)",
&[
Value::Text(row.id),
Value::Text(row.path),
Value::Blob(row.data),
],
)
.await?;
}
Ok(())
})
}))?;
}
let mut path_to_id = workload.lix_template.path_to_id.clone();
let mut next_file_id = next_file_id_from_map(&path_to_id);
let mut transaction =
pollster::block_on(session.begin_transaction_with_options(ExecuteOptions::default()))?;
let write_started = Instant::now();
for operation in &workload.workload.operations {
execute_engine_operation(
&mut transaction,
operation,
&mut path_to_id,
&mut next_file_id,
)?;
}
let write_ms = elapsed_ms(write_started);
let commit_started = Instant::now();
pollster::block_on(transaction.commit())?;
let commit_ms = elapsed_ms(commit_started);
let verified = if verify_state {
verify_session_state(&session, &workload.workload.expected_files)?;
true
} else {
false
};
drop(session);
let _ = fs::remove_file(&db_path);
let _ = fs::remove_file(format!("{}-journal", db_path.display()));
let _ = fs::remove_file(format!("{}-wal", db_path.display()));
let _ = fs::remove_file(format!("{}-shm", db_path.display()));
Ok(TrialResult {
workload_commit_sha: workload.workload.commit_sha.clone(),
system: "lix",
iteration,
warmup,
write_ms,
commit_ms,
total_ms: write_ms + commit_ms,
verified,
})
}
fn create_git_checkout_template(
repo_path: &Path,
template_dir: &Path,
parent_sha: &str,
) -> DynResult<()> {
if template_dir.exists() {
fs::remove_dir_all(template_dir)?;
}
run_command(
"git",
[
"clone",
"--local",
"--quiet",
repo_path.to_str().ok_or("invalid repo path")?,
template_dir.to_str().ok_or("invalid template path")?,
],
None,
None,
)?;
run_git_text(template_dir, ["checkout", "--quiet", parent_sha])?;
run_git_text(template_dir, ["config", "user.email", "bench@example.com"])?;
run_git_text(template_dir, ["config", "user.name", "git-compare-bench"])?;
run_git_text(template_dir, ["config", "core.hooksPath", "/dev/null"])?;
run_git_text(template_dir, ["config", "commit.gpgSign", "false"])?;
run_git_text(template_dir, ["config", "gc.auto", "0"])?;
run_git_text(template_dir, ["config", "maintenance.auto", "false"])?;
run_git_text(template_dir, ["config", "gc.autoDetach", "false"])?;
Ok(())
}
fn create_lix_snapshot_template(
parent_files: &BTreeMap<String, Vec<u8>>,
) -> DynResult<LixTemplate> {
let mut path_to_id = BTreeMap::new();
let mut next_file_id = 1_u64;
let mut seed_rows = Vec::with_capacity(parent_files.len());
for (path, bytes) in parent_files {
let file_id = allocate_file_id(&mut next_file_id);
let lix_path = to_lix_path(path);
path_to_id.insert(lix_path.clone(), file_id.clone());
seed_rows.push(LixSeedRow {
id: file_id,
path: lix_path,
data: bytes.clone(),
});
}
Ok(LixTemplate {
seed_rows,
path_to_id,
})
}
fn apply_operations_to_git(repo_dir: &Path, operations: &[FileOperation]) -> DynResult<()> {
for operation in operations {
match operation.kind {
OperationKind::Add | OperationKind::Copy | OperationKind::Modify => {
let path = repo_dir.join(
operation
.new_path
.as_ref()
.ok_or("missing new path for git write")?,
);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
fs::write(
&path,
operation
.new_bytes
.as_ref()
.ok_or("missing bytes for git write")?,
)?;
set_executable_if_needed(&path, operation.new_executable)?;
}
OperationKind::Rename => {
if let Some(old_path) = &operation.old_path {
let old_full = repo_dir.join(old_path);
if old_full.exists() {
fs::remove_file(&old_full)?;
}
}
let new_full = repo_dir.join(
operation
.new_path
.as_ref()
.ok_or("missing new path for rename")?,
);
if let Some(parent) = new_full.parent() {
fs::create_dir_all(parent)?;
}
fs::write(
&new_full,
operation
.new_bytes
.as_ref()
.ok_or("missing bytes for rename")?,
)?;
set_executable_if_needed(&new_full, operation.new_executable)?;
}
OperationKind::Delete => {
let path = repo_dir.join(
operation
.old_path
.as_ref()
.ok_or("missing old path for delete")?,
);
if path.exists() {
fs::remove_file(path)?;
}
}
}
}
Ok(())
}
fn set_executable_if_needed(path: &Path, executable: bool) -> DynResult<()> {
#[cfg(unix)]
{
let mode = if executable { 0o755 } else { 0o644 };
let mut permissions = fs::metadata(path)?.permissions();
permissions.set_mode(mode);
fs::set_permissions(path, permissions)?;
}
#[cfg(not(unix))]
let _ = (path, executable);
Ok(())
}
fn execute_engine_operation(
transaction: &mut SessionTransaction<'_>,
operation: &FileOperation,
path_to_id: &mut BTreeMap<String, String>,
next_file_id: &mut u64,
) -> DynResult<()> {
match operation.kind {
OperationKind::Add | OperationKind::Copy => {
let path = to_lix_path(
operation
.new_path
.as_ref()
.ok_or("missing new path for Lix insert")?,
);
let file_id = allocate_file_id(next_file_id);
pollster::block_on(
transaction.execute(
"INSERT INTO lix_file (id, path, data) VALUES (?1, ?2, ?3)",
&[
Value::Text(file_id.clone()),
Value::Text(path.clone()),
Value::Blob(
operation
.new_bytes
.as_ref()
.ok_or("missing bytes for Lix insert")?
.clone(),
),
],
),
)?;
path_to_id.insert(path.clone(), file_id);
}
OperationKind::Modify => {
let path = to_lix_path(
operation
.new_path
.as_ref()
.ok_or("missing path for Lix update")?,
);
let file_id = path_to_id
.get(&path)
.cloned()
.ok_or_else(|| format!("missing file id for modified path {path}"))?;
pollster::block_on(
transaction.execute(
"UPDATE lix_file SET data = ?1 WHERE id = ?2",
&[
Value::Blob(
operation
.new_bytes
.as_ref()
.ok_or("missing bytes for Lix update")?
.clone(),
),
Value::Text(file_id),
],
),
)?;
}
OperationKind::Rename => {
let old_path = to_lix_path(
operation
.old_path
.as_ref()
.ok_or("missing old path for Lix rename")?,
);
let new_path = to_lix_path(
operation
.new_path
.as_ref()
.ok_or("missing new path for Lix rename")?,
);
let file_id = path_to_id
.remove(&old_path)
.ok_or_else(|| format!("missing file id for renamed path {old_path}"))?;
pollster::block_on(
transaction.execute(
"UPDATE lix_file SET path = ?1, data = ?2 WHERE id = ?3",
&[
Value::Text(new_path.clone()),
Value::Blob(
operation
.new_bytes
.as_ref()
.ok_or("missing bytes for Lix rename")?
.clone(),
),
Value::Text(file_id.clone()),
],
),
)?;
path_to_id.insert(new_path.clone(), file_id);
}
OperationKind::Delete => {
let old_path = to_lix_path(
operation
.old_path
.as_ref()
.ok_or("missing old path for Lix delete")?,
);
let file_id = path_to_id
.remove(&old_path)
.ok_or_else(|| format!("missing file id for deleted path {old_path}"))?;
pollster::block_on(transaction.execute(
"DELETE FROM lix_file WHERE id = ?1",
&[Value::Text(file_id)],
))?;
}
}
Ok(())
}
fn verify_session_state(
session: &Session,
expected_files: &BTreeMap<String, Vec<u8>>,
) -> DynResult<()> {
let result =
pollster::block_on(session.execute("SELECT path, data FROM lix_file ORDER BY path", &[]))?;
let mut actual = BTreeMap::new();
for row in &result.statements[0].rows {
let path = expect_text(&row[0])?;
let bytes = value_as_bytes(&row[1])?;
actual.insert(path, bytes);
}
if &actual != expected_files {
return Err(format!(
"Lix state verification failed: expected {} files, got {} files",
expected_files.len(),
actual.len()
)
.into());
}
Ok(())
}
fn create_initialized_session(
path: &Path,
wasm_runtime: Arc<dyn WasmRuntime>,
) -> DynResult<Session> {
if path.exists() {
fs::remove_file(path)?;
}
let init_backend = SqliteBackend::from_path(path)?;
let engine = Arc::new(boot_engine(EngineConfig::new(
Box::new(init_backend),
Arc::clone(&wasm_runtime),
)));
let _ = pollster::block_on(engine.initialize_if_needed())?;
pollster::block_on(engine.open_existing())?;
Ok(pollster::block_on(engine.open_session())?)
}
fn expect_text(value: &Value) -> DynResult<String> {
match value {
Value::Text(text) => Ok(text.clone()),
other => Err(format!("expected text value, got {other:?}").into()),
}
}
fn value_as_bytes(value: &Value) -> DynResult<Vec<u8>> {
match value {
Value::Blob(bytes) => Ok(bytes.clone()),
Value::Text(text) => Ok(text.as_bytes().to_vec()),
other => Err(format!("expected blob/text value, got {other:?}").into()),
}
}
fn next_file_id_from_map(path_to_id: &BTreeMap<String, String>) -> u64 {
path_to_id
.values()
.filter_map(|id| id.strip_prefix("bench-file-"))
.filter_map(|tail| tail.parse::<u64>().ok())
.max()
.unwrap_or(0)
+ 1
}
fn allocate_file_id(next_file_id: &mut u64) -> String {
let file_id = format!("bench-file-{next_file_id}");
*next_file_id += 1;
file_id
}
fn filtered_trials(trials: &[TrialResult], system: &str) -> Vec<TrialResult> {
trials
.iter()
.filter(|trial| trial.system == system && !trial.warmup)
.cloned()
.collect()
}
fn build_metric_report(trials: &[TrialResult]) -> MetricReport {
MetricReport {
write_ms: summarize(trials.iter().map(|trial| trial.write_ms).collect()),
commit_ms: summarize(trials.iter().map(|trial| trial.commit_ms).collect()),
total_ms: summarize(trials.iter().map(|trial| trial.total_ms).collect()),
}
}
fn summarize(mut values: Vec<f64>) -> SummaryStats {
if values.is_empty() {
return SummaryStats::default();
}
values.sort_by(|left, right| left.partial_cmp(right).unwrap());
let samples = values.len();
let sum: f64 = values.iter().sum();
SummaryStats {
samples,
min_ms: values[0],
p50_ms: percentile(&values, 0.50),
p95_ms: percentile(&values, 0.95),
mean_ms: sum / samples as f64,
max_ms: values[samples - 1],
}
}
fn percentile(sorted_values: &[f64], percentile: f64) -> f64 {
if sorted_values.is_empty() {
return 0.0;
}
let rank = percentile * (sorted_values.len().saturating_sub(1)) as f64;
let lower = rank.floor() as usize;
let upper = rank.ceil() as usize;
if lower == upper {
return sorted_values[lower];
}
let weight = rank - lower as f64;
sorted_values[lower] * (1.0 - weight) + sorted_values[upper] * weight
}
fn safe_ratio(numerator: f64, denominator: f64) -> f64 {
if denominator == 0.0 {
0.0
} else {
numerator / denominator
}
}
fn pct_less_time(lix_ms: f64, git_ms: f64) -> f64 {
if git_ms == 0.0 {
0.0
} else {
(1.0 - (lix_ms / git_ms)) * 100.0
}
}
fn render_markdown_report(report: &Report) -> String {
let mut output = String::new();
output.push_str("# Git Compare Benchmark\n\n");
output.push_str(&format!(
"Repo: `{}` \nHead: `{}` (`{}`)\n\n",
report.repo_path, report.head_ref, report.head_commit
));
output.push_str("## Setup\n\n");
output.push_str(&format!(
"- workloads: `{}`\n- runs per system: `{}`\n- warmups: `{}`\n- verification: `{}`\n\n",
report.workload_selection.selected_count,
report.config.runs,
report.config.warmups,
report.config.verify_state,
));
output.push_str("## Overall Median\n\n");
output.push_str("| system | write ms | commit ms | total ms | p95 total ms |\n");
output.push_str("| --- | ---: | ---: | ---: | ---: |\n");
output.push_str(&format!(
"| git | {:.2} | {:.2} | {:.2} | {:.2} |\n",
report.overall.git.write_ms.p50_ms,
report.overall.git.commit_ms.p50_ms,
report.overall.git.total_ms.p50_ms,
report.overall.git.total_ms.p95_ms
));
output.push_str(&format!(
"| lix | {:.2} | {:.2} | {:.2} | {:.2} |\n\n",
report.overall.lix.write_ms.p50_ms,
report.overall.lix.commit_ms.p50_ms,
report.overall.lix.total_ms.p50_ms,
report.overall.lix.total_ms.p95_ms
));
output.push_str(&format!(
"Lix median total time was `{:.2}%` less than Git on this benchmark (`{:.2}x` Lix/Git).\n\n",
report.overall.total_pct_less_time_for_lix,
report.overall.total_ratio_lix_over_git
));
output.push_str("## Workloads\n\n");
output.push_str("| commit | changed paths | git total ms | lix total ms | lix less time |\n");
output.push_str("| --- | ---: | ---: | ---: | ---: |\n");
for workload in &report.workloads {
output.push_str(&format!(
"| `{}` | {} | {:.2} | {:.2} | {:.2}% |\n",
&workload.commit_sha[..12],
workload.changed_paths,
workload.git.total_ms.p50_ms,
workload.lix.total_ms.p50_ms,
workload.total_pct_less_time_for_lix
));
}
output.push_str("\n## Notes\n\n");
output.push_str(&format!(
"- template seed mode: `{}`\n- skipped candidate commits during workload selection: `{}`\n",
report.template_seed.mode,
report.workload_selection.skipped.len()
));
output
}
fn list_first_parent_commit_info(
repo_path: &Path,
reference: &str,
limit: Option<usize>,
) -> DynResult<Vec<CommitInfo>> {
let mut args = vec![
"log".to_string(),
"--first-parent".to_string(),
"--format=%H%x1f%P%x1f%s%x1e".to_string(),
];
if let Some(limit) = limit {
args.push("-n".to_string());
args.push(limit.to_string());
}
args.push(reference.to_string());
let output = run_git_text(repo_path, args.iter().map(String::as_str))?;
let mut commits = Vec::new();
for record in output.split('\x1e') {
let trimmed = record.trim();
if trimmed.is_empty() {
continue;
}
let mut parts = trimmed.split('\x1f');
let sha = parts.next().unwrap_or_default().trim().to_string();
let parent_part = parts.next().unwrap_or_default().trim();
let subject = parts.next().unwrap_or_default().trim().to_string();
commits.push(CommitInfo {
sha,
parents: if parent_part.is_empty() {
Vec::new()
} else {
parent_part
.split_whitespace()
.map(ToString::to_string)
.collect()
},
subject,
});
}
Ok(commits)
}
fn read_commit_info(repo_path: &Path, reference: &str) -> DynResult<CommitInfo> {
let sha = rev_parse_commit(repo_path, reference)?;
let output = run_git_text(repo_path, ["log", "-1", "--format=%P%x1f%s", &sha])?;
let trimmed = output.trim();
let mut parts = trimmed.split('\x1f');
let parent_part = parts.next().unwrap_or_default().trim();
let subject = parts.next().unwrap_or_default().trim().to_string();
Ok(CommitInfo {
sha,
parents: if parent_part.is_empty() {
Vec::new()
} else {
parent_part
.split_whitespace()
.map(ToString::to_string)
.collect()
},
subject,
})
}
fn rev_parse_commit(repo_path: &Path, reference: &str) -> DynResult<String> {
Ok(run_git_text(
repo_path,
["rev-parse", "--verify", &format!("{reference}^{{commit}}")],
)?
.trim()
.to_string())
}
fn rev_parse_tree(repo_path: &Path, commit_sha: &str) -> DynResult<String> {
Ok(
run_git_text(repo_path, ["rev-parse", &format!("{commit_sha}^{{tree}}")])?
.trim()
.to_string(),
)
}
fn read_commit_patch_set(repo_path: &Path, commit_sha: &str) -> DynResult<PatchSet> {
let raw = run_git_bytes(
repo_path,
[
"diff-tree",
"--root",
"--raw",
"-r",
"-z",
"-m",
"--first-parent",
"--find-renames",
"--no-commit-id",
commit_sha,
],
None,
)?;
let changes = parse_raw_diff_tree(&raw)?;
let wanted_blob_ids = collect_wanted_blob_ids(&changes);
let blobs = read_blobs(repo_path, &wanted_blob_ids)?;
Ok(PatchSet { changes, blobs })
}
fn parse_raw_diff_tree(raw: &[u8]) -> DynResult<Vec<RawChange>> {
if raw.is_empty() {
return Ok(Vec::new());
}
let tokens = raw
.split(|byte| *byte == 0)
.filter(|token| !token.is_empty())
.collect::<Vec<_>>();
let mut changes = Vec::new();
let mut index = 0;
while index < tokens.len() {
let header = std::str::from_utf8(tokens[index])?;
index += 1;
if !header.starts_with(':') {
continue;
}
let fields = header[1..].split(' ').collect::<Vec<_>>();
if fields.len() < 5 {
continue;
}
let status_token = fields[4];
let status = status_token.chars().next().unwrap_or('M');
let first_path =
std::str::from_utf8(tokens.get(index).ok_or("missing diff-tree path")?)?.to_string();
index += 1;
if status == 'R' || status == 'C' {
let second_path =
std::str::from_utf8(tokens.get(index).ok_or("missing rename target path")?)?
.to_string();
index += 1;
changes.push(RawChange {
status,
old_mode: fields[0].to_string(),
new_mode: fields[1].to_string(),
old_oid: fields[2].to_string(),
new_oid: fields[3].to_string(),
old_path: Some(first_path),
new_path: Some(second_path),
});
continue;
}
changes.push(RawChange {
status,
old_mode: fields[0].to_string(),
new_mode: fields[1].to_string(),
old_oid: fields[2].to_string(),
new_oid: fields[3].to_string(),
old_path: if status == 'A' {
None
} else {
Some(first_path.clone())
},
new_path: if status == 'D' {
None
} else {
Some(first_path)
},
});
}
Ok(changes)
}
fn collect_wanted_blob_ids(changes: &[RawChange]) -> Vec<String> {
let mut ids = BTreeSet::new();
for change in changes {
if change.new_path.is_some()
&& is_regular_blob_mode(&change.new_mode)
&& change.new_oid != NULL_OID
{
ids.insert(change.new_oid.clone());
}
}
ids.into_iter().collect()
}
fn read_tree_snapshot(repo_path: &Path, commit_sha: &str) -> DynResult<BTreeMap<String, Vec<u8>>> {
let raw = run_git_bytes(
repo_path,
["ls-tree", "-r", "-z", "--full-tree", commit_sha],
None,
)?;
let mut path_by_oid = BTreeMap::new();
for token in raw
.split(|byte| *byte == 0)
.filter(|token| !token.is_empty())
{
let entry = std::str::from_utf8(token)?;
let (header, path) = entry.split_once('\t').ok_or("invalid ls-tree entry")?;
let fields = header.split_whitespace().collect::<Vec<_>>();
if fields.len() != 3 {
continue;
}
let mode = fields[0];
let object_type = fields[1];
let oid = fields[2];
if object_type != "blob" || !is_regular_blob_mode(mode) {
continue;
}
path_by_oid.insert(path.to_string(), oid.to_string());
}
let blob_ids = path_by_oid.values().cloned().collect::<Vec<_>>();
let blobs = read_blobs(repo_path, &blob_ids)?;
let mut files = BTreeMap::new();
for (path, oid) in path_by_oid {
let bytes = blobs
.get(&oid)
.cloned()
.ok_or_else(|| format!("missing blob {oid} for path {path}"))?;
files.insert(path, bytes);
}
Ok(files)
}
fn compile_operations(patch_set: &PatchSet) -> DynResult<Vec<FileOperation>> {
let mut operations = Vec::with_capacity(patch_set.changes.len());
for change in &patch_set.changes {
let new_bytes = if change.new_path.is_some() && is_regular_blob_mode(&change.new_mode) {
Some(
patch_set
.blobs
.get(&change.new_oid)
.cloned()
.ok_or_else(|| format!("missing blob bytes for {}", change.new_oid))?,
)
} else {
None
};
let kind = match change.status {
'A' => OperationKind::Add,
'M' => OperationKind::Modify,
'D' => OperationKind::Delete,
'R' => OperationKind::Rename,
'C' => OperationKind::Copy,
other => {
return Err(format!("unsupported diff status '{other}'").into());
}
};
operations.push(FileOperation {
kind,
old_path: change.old_path.clone(),
new_path: change.new_path.clone(),
new_bytes,
new_executable: change.new_mode == "100755",
});
}
Ok(operations)
}
fn normalize_snapshot_for_lix(files: &BTreeMap<String, Vec<u8>>) -> BTreeMap<String, Vec<u8>> {
files
.iter()
.map(|(path, bytes)| (to_lix_path(path), bytes.clone()))
.collect()
}
fn to_lix_path(path: &str) -> String {
let trimmed = path.trim_start_matches('/');
let segments = trimmed
.split('/')
.filter(|segment| !segment.is_empty())
.map(encode_lix_path_segment)
.collect::<Vec<_>>();
format!("/{}", segments.join("/"))
}
fn encode_lix_path_segment(segment: &str) -> String {
let mut encoded = String::new();
for byte in segment.as_bytes() {
let ch = *byte as char;
let allowed = ch.is_ascii_alphanumeric() || matches!(ch, '.' | '_' | '~' | '-');
if allowed {
encoded.push(ch);
} else {
encoded.push_str(&format!("%{:02X}", byte));
}
}
encoded
}
fn first_unsupported_change_reason(changes: &[RawChange]) -> Option<String> {
changes.iter().find_map(unsupported_change_reason)
}
fn unsupported_change_reason(change: &RawChange) -> Option<String> {
match change.status {
'A' => {
if !is_regular_blob_mode(&change.new_mode) {
Some(format!(
"added path {:?} uses unsupported mode {}",
change.new_path, change.new_mode
))
} else {
None
}
}
'M' => {
if !is_regular_blob_mode(&change.old_mode) || !is_regular_blob_mode(&change.new_mode) {
return Some(format!(
"modified path {:?} uses unsupported mode {} -> {}",
change.new_path, change.old_mode, change.new_mode
));
}
if change.old_path == change.new_path
&& change.old_oid == change.new_oid
&& change.old_mode != change.new_mode
{
return Some(format!(
"mode-only change on {:?} is not represented by lix_file",
change.new_path
));
}
None
}
'D' => {
if !is_regular_blob_mode(&change.old_mode) {
Some(format!(
"deleted path {:?} uses unsupported mode {}",
change.old_path, change.old_mode
))
} else {
None
}
}
'R' | 'C' => {
if !is_regular_blob_mode(&change.old_mode) || !is_regular_blob_mode(&change.new_mode) {
Some(format!(
"rename/copy {:?} -> {:?} uses unsupported mode {} -> {}",
change.old_path, change.new_path, change.old_mode, change.new_mode
))
} else {
None
}
}
other => Some(format!("unsupported diff status '{other}'")),
}
}
fn is_regular_blob_mode(mode: &str) -> bool {
mode == "100644" || mode == "100755"
}
fn read_blobs(repo_path: &Path, blob_ids: &[String]) -> DynResult<HashMap<String, Vec<u8>>> {
if blob_ids.is_empty() {
return Ok(HashMap::new());
}
let input = format!("{}\n", blob_ids.join("\n")).into_bytes();
let output = run_git_bytes(repo_path, ["cat-file", "--batch"], Some(input))?;
let mut blobs = HashMap::with_capacity(blob_ids.len());
let mut offset = 0usize;
while offset < output.len() {
let line_end = output[offset..]
.iter()
.position(|byte| *byte == b'\n')
.map(|index| offset + index)
.ok_or("invalid cat-file batch output")?;
let header = std::str::from_utf8(&output[offset..line_end])?;
offset = line_end + 1;
let header_fields = header.split_whitespace().collect::<Vec<_>>();
if header_fields.len() != 3 {
return Err(format!("invalid cat-file header: {header}").into());
}
let oid = header_fields[0].to_string();
let object_type = header_fields[1];
let size: usize = header_fields[2].parse()?;
if object_type != "blob" {
return Err(format!("expected blob for {oid}, got {object_type}").into());
}
let body_end = offset + size;
if body_end > output.len() {
return Err(format!("truncated blob body for {oid}").into());
}
blobs.insert(oid, output[offset..body_end].to_vec());
offset = body_end + 1;
}
Ok(blobs)
}
fn run_git_text<I, S>(repo_path: &Path, args: I) -> DynResult<String>
where
I: IntoIterator<Item = S>,
S: AsRef<str>,
{
let args_vec = args
.into_iter()
.map(|arg| arg.as_ref().to_string())
.collect::<Vec<_>>();
let output = run_command(
"git",
args_vec.iter().map(String::as_str),
Some(repo_path),
None,
)?;
Ok(String::from_utf8(output)?)
}
fn run_git_bytes<I, S>(repo_path: &Path, args: I, stdin: Option<Vec<u8>>) -> DynResult<Vec<u8>>
where
I: IntoIterator<Item = S>,
S: AsRef<str>,
{
let args_vec = args
.into_iter()
.map(|arg| arg.as_ref().to_string())
.collect::<Vec<_>>();
run_command(
"git",
args_vec.iter().map(String::as_str),
Some(repo_path),
stdin,
)
}
fn run_command<I, S>(
program: &str,
args: I,
cwd: Option<&Path>,
stdin: Option<Vec<u8>>,
) -> DynResult<Vec<u8>>
where
I: IntoIterator<Item = S>,
S: AsRef<str>,
{
let args_vec = args
.into_iter()
.map(|arg| arg.as_ref().to_string())
.collect::<Vec<_>>();
let mut command = Command::new(program);
command.args(&args_vec);
if let Some(cwd) = cwd {
command.current_dir(cwd);
}
if stdin.is_some() {
command.stdin(Stdio::piped());
}
command.stdout(Stdio::piped());
command.stderr(Stdio::piped());
let mut child = command.spawn()?;
if let Some(stdin_bytes) = stdin {
use std::io::Write;
let mut child_stdin = child.stdin.take().ok_or("missing child stdin")?;
child_stdin.write_all(&stdin_bytes)?;
}
let output = child.wait_with_output()?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!(
"command failed: {} {}\n{}",
program,
args_vec.join(" "),
stderr.trim()
)
.into());
}
Ok(output.stdout)
}
fn copy_directory(source: &Path, destination: &Path) -> DynResult<()> {
if destination.exists() {
fs::remove_dir_all(destination)?;
}
run_command(
"cp",
[
"-R",
source.to_str().ok_or("invalid source path")?,
destination.to_str().ok_or("invalid destination path")?,
],
None,
None,
)?;
Ok(())
}
fn elapsed_ms(started: Instant) -> f64 {
started.elapsed().as_secs_f64() * 1000.0
}
================================================
FILE: blog/001-introducing-lix/index.md
================================================
---
date: "2026-01-20"
og:description: "Lix is a version control system you import as a library. It records semantic changes to enable diffs, reviews, rollback, and querying of edits."
---
# Introducing Lix: An embeddable version control system
Lix is an **embeddable version control system** that can be imported as a library. Use lix, for example, to enable human-in-the-loop workflows for AI agents like diffs and reviews.
- **It's just a library** — Lix is a library you import. Get branching, diff, rollback in your existing stack
- **Tracks semantic changes** — diffs, blame, and history are queryable via SQL
- **Approval workflows for agents** — agents propose changes in isolated versions, humans review and merge

> [!TIP]
> Lix does not replace Git. [Read how Lix compares to Git →](https://lix.dev/docs/comparison-to-git)
## Semantic change tracking
Lix doesn't track line-by-line text changes. It tracks **semantic changes** at the entity level via plugins.
A plugin parses a format (or a piece of app state) into structured entities. Then Lix stores **what changed** — not just which bytes differ.
**Before:**
```json
{"theme":"light","notifications":true,"language":"en"}
```
**After:**
```json
{"theme":"dark","notifications":true,"language":"en"}
```
**Git tracks:**
```diff
-{"theme":"light","notifications":true,"language":"en"}
+{"theme":"dark","notifications":true,"language":"en"}
```
**Lix tracks:**
```diff
property theme:
- light
+ dark
```
### Excel file example
With an XLSX plugin (not shipped yet), Lix can show a cell-level diff like:
This is exactly the kind of semantic surface plugins define: cells vs formulas vs styling.
**Before:**
| order_id | product | status |
| -------- | -------- | ------- |
| 1001 | Widget A | shipped |
| 1002 | Widget B | pending |
**After:**
| order_id | product | status |
| -------- | -------- | ------- |
| 1001 | Widget A | shipped |
| 1002 | Widget B | shipped |
**Git tracks:**
```diff
-Binary files differ
```
**Lix tracks:**
```diff
order_id 1002 status:
- pending
+ shipped
```
The same approach extends to any other format your product cares about — **as long as there’s a plugin** that can interpret it.
## How does Lix work?
Lix is **change-first**: it stores semantic changes as queryable data, not snapshots.
That means audit trails, rollbacks, and “blame” become simple queries:
```sql
SELECT *
FROM state_history
WHERE entity_id = 'settings.theme'
ORDER BY depth ASC;
```
Lix uses existing SQL databases as both **query engine** and **persistence layer**.
Plugins parse files (including binary formats) into "meaningful changes" e.g. cells, properties, whitespace, etc. Lix stores those changes as rows in virtual tables like `file`, `file_history`, and `state_history`.
Why this matters:
- **Doesn't reinvent databases** — durability, ACID, and recovery come from proven SQL engines.
- **SQL API for changes** — query diffs, history, and audit trails directly.
- **Portable** — runs on SQLite, Postgres, or other SQL databases.
```
┌─────────────────────────────────────────────────┐
│ Lix │
│ │
│ ┌────────────┐ ┌──────────┐ ┌─────────┐ ┌─────┐ │
│ │ Filesystem │ │ Branches │ │ History │ │ ... │ │
│ └────────────┘ └──────────┘ └─────────┘ └─────┘ │
└────────────────────────┬────────────────────────┘
│
▼
┌─────────────────────────────────────────────────┐
│ SQL database │
│ (SQLite, Postgres, etc.) │
└─────────────────────────────────────────────────┘
```
This means: no separate infrastructure to manage, and no “special” datastore just for version control.
## Plugins (format support)
Lix’s format support depends on plugins. Here’s the current status:
| Format | Plugin | Status |
| ------ | ------ | ------ |
| JSON | `@lix-js/plugin-json` | Stable |
| CSV | `@lix-js/plugin-csv` | Stable |
| Markdown | `@lix-js/plugin-md` | Beta |
| ProseMirror | `@lix-js/plugin-prosemirror` | Stable |
**Building your own plugin:** take an off-the-shelf parser for your format, map it to Lix’s entity/change schema, and you get semantic diffs + history for that format. [Plugin documentation →](https://lix.dev/docs/plugins)
## Why did we build Lix?
Lix was developed alongside [inlang](https://inlang.com), open-source localization infrastructure.
We needed version control **as a library**, not as an external tool. Git's architecture didn't fit: we needed database semantics (transactions, ACID), queryable history, and semantic diffing. [Read more →](https://samuelstroschein.com/blog/git-limitations)
The result is Lix, now at over [90k weekly downloads on NPM](https://www.npmjs.com/package/@lix-js/sdk).

## Getting started
<p>
<img src="https://cdn.simpleicons.org/javascript/F7DF1E" alt="JavaScript" width="18" height="18" /> JavaScript ·
<a href="https://github.com/opral/lix/issues/370"><img src="https://cdn.jsdelivr.net/gh/devicons/devicon/icons/python/python-original.svg" alt="Python" width="18" height="18" /> Python</a> ·
<a href="https://github.com/opral/lix/issues/371"><img src="https://cdn.simpleicons.org/rust/CE422B" alt="Rust" width="18" height="18" /> Rust</a> ·
<a href="https://github.com/opral/lix/issues/373"><img src="https://cdn.simpleicons.org/go/00ADD8" alt="Go" width="18" height="18" /> Go</a>
</p>
```bash
npm install @lix-js/sdk
```
```ts
import { openLix, selectWorkingDiff } from "@lix-js/sdk";
const lix = await openLix({
environment: new InMemorySQLite()
});
await lix.db.insertInto("file").values({ path: "/hello.txt", data: ... }).execute();
const diff = await selectWorkingDiff({ lix }).selectAll().execute();
```
## What's next
The next version of Lix will be a refactor to be purely "preprocessor" based. This makes Lix easier to embed anywhere and enables:
- **Fast writes** ([RFC 001](/rfc/001-preprocess-writes))
- **Any SQL database** (SQLite, Postgres, Turso, MySQL)
- **SDKs for Python, Rust, Go** ([RFC 002](/rfc/002-rewrite-in-rust))
```
┌────────────────┐
SELECT * FROM ... │ Lix Engine │ SELECT * FROM ...
───────────────────▶ │ (Rust) │ ───────────────────▶ Database
└────────────────┘
```
### Join the community
- ⭐ [Star the lix repo on GitHub](https://github.com/opral/lix)
- 💬 [Chat on Discord](https://discord.gg/gdMPPWy57R)
================================================
FILE: blog/002-modeling-a-company-as-a-repository/index.md
================================================
---
date: "2026-02-23"
og:description: "Modeling a company as a filesystem is promising for AI agents, but binary files break the model. Lix turns binary formats into structured data agents can read and write."
og:image: "./cover.jpg"
og:image:alt: "Abstract illustration for Your Company should be a Repository for AI agents"
---
# Your Company should be a Repository for AI agents
The idea of modeling a company as a filesystem for maximum agent efficiency is gaining traction on X (Twitter).
For example, [Eli Mernit](https://x.com/mernit/status/2021324284875153544) wrote that agents get better context if a company is modeled as files ("Your company is a filesystem").
The problem is modeling a company as filesystem doesn't work today because most files are binary formats that agents can't work with effectively. [Anvisha Pai](https://x.com/anvishapai/status/2022062725354967551) pointed that out in her response post "Your company is not a filesystem".
But, what if a system exists that turns binary files into structured data agents can read and write to?

## The case for the filesystem
The "company as a filesystem because of agents" argument is compelling for two reasons:
1. Agents get full context. When company data lives in files, agents can inspect and reason across systems without brittle app integrations.
2. No third party API restrictions. Tools like Codex and Claude Code feel powerful because they can use direct filesystem primitives (`grep`, shell commands, scripts) instead of being constrained by third-party APIs.

## But the filesystem is not enough
A plain filesystem alone doesn't let agents work effectively:
1. Most file formats are not agent-friendly. Documents, spreadsheets, presentations, etc. are binary formats. Agents can parse some formats, but there is no universal semantic layer that enables round-trip editing.
2. Many files cannot be converted into text. A common workaround is to convert binary files to text. But, visual and structural media (for example CAD, PCB, or layered design files) lose critical information when reduced to text. That makes review and verification harder, the real bottleneck with the uprising of AI agents.

## A system that understands binary files
A system that turns binary files into structured data agents can read and write to would enable modeling a company as filesystem.
The implementation can be simple. Parse binary files into their schemas. After all, most binary files are structured data under the hood.
For example, a docx file is a collection of paragraphs, tables, images, etc. All of those can be expressed as JSON that an agent can understand.
```text
┌─────────────────┐ ┌───────────────────────┐
│ contract.docx │────┬──► │ { type: "paragraph" } │
└─────────────────┘ ├──► │ { type: "table" } │
└──► │ { type: "image" } │
┌─────────────────┐ ├───────────────────────┤
│ design.psd │────┬──► │ { type: "layer" } │
└─────────────────┘ └──► │ { type: "mask" } │
├───────────────────────┤
┌─────────────────┐ │ │
│ budget.xlsx │────┬──► │ { type: "row" } │
└─────────────────┘ └──► │ { type: "formula" } │
└───────────────────────┘
▲
│
▼
┌──────────────┐
│ Agent │
│ read/write │
└──────────────┘
```
## Lix is that system
A system that turns binary files into structured JSON agents can understand already exists; it's called **Lix**.
Lix is a "universal" version control system. "Universal" because it can track changes in binary files by parsing files into JSON schemas. Otherwise, tracking changes in those binary files would not be possible. Lix also solves the problem of opaque binary files agents are now running into.
Lix is in alpha, but you can already check out the repository on GitHub.
[Lix on GitHub](https://github.com/opral/lix)

================================================
FILE: blog/003-february-2026-update/index.md
================================================
---
date: "2026-03-04"
og:description: "The Rust rewrite is complete. 33x faster file writes, lix was trending on HackerNews, and what's next in March."
og:image: "./cover.png"
og:image:alt: "February 2026 update cover showing the Lix Rust rewrite milestone"
---
# February 2026 Update: Rust Rewrite Complete
**TL;DR**
- 33x faster file writes
- GitHub stars grew from 70 to over 500
- Real workload and AX (user) testing in March
## The Rust rewrite is complete
[RFC 001](https://lix.dev/rfc/001-preprocess-writes) and [RFC 002](https://lix.dev/rfc/002-rewrite-in-rust) have been implemented in February, with two strong outcomes:
### 33x faster file writes
The rewrite significantly improves heavy write paths, with the largest gain on realistic plugin-based JSON file inserts (**33x median, ~40x p95**).
| Benchmark | `v0.5` | `next` | Speedup |
| --------------------------------- | --------- | --------- | ---------- |
| State single-row insert | 17.43 ms | 14.85 ms | 1.17x |
| State 10-row insert | 57.33 ms | 46.53 ms | 1.23x |
| State 100-row insert | 460.27 ms | 193.30 ms | **2.38x** |
| JSON file insert (120 properties) | 889.81 ms | 26.90 ms | **33.08x** |
### Controlling the query planner
The new architecture unlocks previously impossible optimizations. The SQL database is merely used as a storage and query execution layer.
v0.5 and below could not optimize beyond what the vtable API of the database provides. Every write triggered per-row callbacks that crossed the JS-WASM boundary with ~10-25 internal SQL queries each. In SQLite's case, even batching mutations was not optimizable.
Lix now intercepts and rewrites queries before they hit SQLite, batching what used to be per-row vtable callbacks into single bulk operations. For more information read [RFC 001](https://lix.dev/rfc/001-preprocess-writes).
```plain
v0.5 next
────── ────
┌───────┐ ┌───────┐
│ Query │ │ Query │
└───┬───┘ └───┬───┘
│ │
▼ ▼
┌──────────────┐ ┌─────────────┐
│ SQL Database │ │ Lix │
└──────┬───────┘ └──────┬──────┘
│ │
▼ ▼
┌───────┐ ┌──────────────┐
│ Lix │ │ SQL Database │
└───────┘ └──────────────┘
```
## GitHub stars and HackerNews
Lix was trending on HackerNews in late January. The outcome was an instant jump in GitHub stars and inbound requests to try out lix. Most inbound interest is around AI agents operating on non-code files and formats Git can't handle (Excel, XML, SSIS packages) well.
[https://news.ycombinator.com/item?id=46713387](https://news.ycombinator.com/item?id=46713387)


## What's next in March
People want to test lix. The major use case are AI agents that operate on non-code files (.docx, .pdf, etc.). We have two remaining things to do:
### 1. Real workload testing and bug fixing
Real production workloads will surface performance issues and bugs that should be simple to solve with the completed refactor. After all, we control the query planner now.
### 2. AX (agent experience) testing and API iteration
AX testing? Yes. That's a fundamental shift in 2026. The old way of discussing APIs and/or conducting user interviews are not needed anymore. Ask an agent to do a task, then follow up with "What friction points did you run into?" and fix the friction points.

================================================
FILE: blog/004-march-2026-update/index.md
================================================
---
date: "2026-04-03"
og:description: "500 real commits replayed with no corruption bugs. Without the semantic layer, Lix is ~8x faster than Git, but semantic writes still bottleneck on write amplification."
og:image: "./cover.svg"
og:image:alt: "Lix March 2026 Update: 500 commits with zero corruption, blob commit in 5ms, semantic writes need fixing"
---
# March 2026 Update: No Corruption Bugs, 8x Faster Than Git, Semantic Writes Still Too Slow

**TL;DR**
- Workload testing worked: 500 real commits replayed with no state corruption bugs
- Semantic writes still hit a write-amplification bottleneck on large files (500ms+)
- Without the semantic layer, the file-write-plus-commit workflow is ~8x faster than Git
- April goal: sub 100ms for 10k entity inserts
## Workload testing
[Last month](/blog/february-2026-update) we set out to do real workload testing in March to reveal performance bottlenecks and bugs that prevent production usage of lix.
The test replays 500 real commits from the [paraglide-js](https://github.com/opral/paraglide-js) repo. For each commit, it sets up the "before" state outside the timer, applies the same file changes, and measures how long Lix takes to commit. The simulated scenario: "I edited some files, now I'm committing."
Three findings came out of this.
### Finding 1: It works
The best result from the workload replay is that it worked. Replaying 500 real commits did not reveal state corruption bugs. That matters more than the benchmark number because correctness is the prerequisite for everything else.
### Finding 2: Semantic writes still bottleneck on write amplification
> [!NOTE]
> **Refresher: What is the semantic layer?**
>
> Lix parses files into structured entities like paragraphs, tables, images so it can diff, merge, and sync at that level instead of treating files as opaque blobs.
>
> ```
> contract.docx
> ↓
> paragraphs / tables / images
> ↓
> diff / merge / history on those units
> ```
The bottleneck is write amplification. A single file write fans out into many entity rows. Inserting a file with 10k entities means the engine has to process 10k entity rows. On the current path, semantic writes are multi-second operations. Any interaction above 100ms stops feeling instantaneous, so this needs to come down.
```
contract.docx Lix engine SQL database
┌──────────────────┐ ┌─────────────────────┐ ┌──────────────┐
│ Paragraph 1 │ │ process 10,000 │ │ │
│ Paragraph 2 │ │ entity rows │ │ INSERT row 1 │
│ Paragraph 3 │────►│ │──────►│ INSERT row 2 │
│ Table 1 │ │ validate, transform,│ │ ... │
│ Row 1 │ │ detect changes │ │ INSERT row │
│ Row 2 │ │ │ │ 10,000 │
│ Image 1 │ │ 💥 too slow │ │ │
│ ... │ └─────────────────────┘ └──────────────┘
│ Paragraph 4,291 │
└──────────────────┘
1 file write N entities to process N SQL row inserts
```
The engine is not fast enough to handle these large batches. The goal for April is to get 10k entity inserts under 100ms.
### Finding 3: Without the semantic layer, the file-write-plus-commit workflow is ~8x faster than Git
Unexpected good news. Without the semantic layer (treating files as blobs), Lix completes the same file-write-plus-commit workload in ~5 ms where Git takes ~39 ms.[^1]
[^1]: Measured on a MacBook Pro M5 Pro (18-core), SQLite in WAL mode.
| Phase | Git | Lix |
| ----------- | ---------- | --------- |
| File writes | ~0.2 ms | ~3.6 ms |
| Commit | ~39 ms | ~1 ms |
| **Total** | **~39 ms** | **~5 ms** |
The difference comes down to architecture. Lix applies mutations inside an open SQLite transaction. Committing is closing that transaction (~1 ms). The comparison runs `git add -A` followed by `git commit`, which scans the working tree, updates the index, and writes tree and commit objects.
This is encouraging, but it's the blob layer only. The semantic layer is what makes Lix useful for non-code files, and that's where the work is.
### Why not skip the semantic layer entirely?
If Lix is already fast without the semantic layer, why not just store blobs and diff on the fly?
This is really a source-of-truth decision, not a storage decision. Lix can keep both a blob and semantic state, but only one can be authoritative:
```
Option A: Blob is source of truth, diffs computed on the fly
┌──────────────┐ ┌──────────────┐
│ contract.docx│──────►│ re-parse │──────► diffs (computed every time)
│ (blob) │ │ on every op │
└──────────────┘ └──────────────┘
Option B: Diffs are source of truth, blob derived on demand
┌──────────────┐ ┌──────────────┐
│ diffs │──────►│ serialize │──────► contract.docx (derived)
│ (stored) │ │ on demand │
└──────────────┘ └──────────────┘
```
If both are independently writable, they can drift.
Git gets away with blob-first storage because its default diff and merge model is line-oriented and works well for ordinary text. For smaller structured text files like JSON, re-parsing on demand can still be acceptable. But as files grow, the cost per operation grows with them:
| File type | Size | Rebuild cost per operation |
| ------------------- | --------- | -------------------------- |
| `.js` source file | ~0.005 MB | trivial |
| Large JSON config | ~0.5 MB | acceptable |
| `.docx` with images | ~5 MB | slow |
| `.xlsx` spreadsheet | 5-20 MB | 💥 too slow |
OOXML files like `.docx` and `.xlsx` are ZIP packages made of many XML parts, so rebuilding semantic state from the blob on every merge, history read, or sync means repeatedly paying unzip, parse, and tree-diff costs. A cache avoids repeated rebuilds, but now there are two representations to keep consistent — every write path must update both, and bugs in that synchronization are silent data corruption.
So Lix makes semantic state canonical and materializes the blob on demand when someone actually needs the file bytes. The tradeoff is that blob writes pay an upfront parsing cost — which is the write-amplification bottleneck we're now fixing.
Long term, most app and agent writes should bypass blob parsing entirely. They will write entities directly, so the hot path avoids both blob parsing and blob serialization.
That means the semantic layer must be fast.
## Prolly trees for cheap versioning
Solving write speed alone isn't enough — storage also needs to scale across versions. Without content deduplication, creating a new version means duplicating all entity data. A 10k-entity Word document across 5 versions = 50k rows stored.
```
Without deduplication:
version: main version: draft
┌──────────────────┐ ┌──────────────────┐
│ 10,000 entities │ │ 10,000 entities │ ← full copy
└──────────────────┘ └──────────────────┘
💥 10,000 rows 💥 10,000 rows (copied)
```
[Prolly trees](https://docs.dolthub.com/architecture/storage-engine/prolly-tree) are the most promising fit for this. Entities are grouped into chunks with boundaries determined by content hashes. If one paragraph changes, only the chunk containing that paragraph is new. The rest is shared across versions.
```
With Prolly trees:
version: main version: draft
(original) (paragraph 3 edited)
┌──────────────────┐ ┌──────────────────┐
│ Paragraph 1 │ │ Paragraph 1 │
│ Paragraph 2 │ │ Paragraph 2 │
│ Paragraph 3 │ │ Paragraph 3 ✎ │
│ Table 1 │ │ Table 1 │
│ ... │ │ ... │
│ Paragraph 4,291 │ │ Paragraph 4,291 │
└──────────────────┘ └──────────────────┘
│ │
▼ ▼
┌──────────────┐ ┌──────────────┐
│ chunk A ──┼────────────────────┼── chunk A │ ← shared
│ chunk B │ │ chunk B' │ ← different (contains edited paragraph 3)
│ chunk C ──┼────────────────────┼── chunk C │ ← shared
│ chunk D ──┼────────────────────┼── chunk D │ ← shared
└──────────────┘ └──────────────┘
✅ Creating a version = pointing to the same chunks
✅ Only changed chunks are stored separately
```
## What's next in April
**Goal: Make Lix ready for people to try out.**
March proved the blob path works. April is about closing the gap so the semantic layer is fast enough and correct enough for real use.
1. **10k entity inserts under 100 ms.** SQLite can insert 10k rows in under 10 ms. That gives us ~90 ms of headroom to work with.
2. **Prolly trees for cheap branching.** Without content deduplication, every branch copies all entity data. Prolly trees share unchanged chunks across versions, so branching a 10k-entity document is nearly free.
3. **Workload testing with the semantic layer on.** March proved the blob path doesn't corrupt state across 500 real commits. April repeats that test with semantic writes enabled.
================================================
FILE: blog/005-april-2026-update/index.md
================================================
---
date: "2026-05-11"
og:description: "The new DataFusion path runs the core Lix MVP flow. April did not hit the 10k inserts target, but it clarified why Lix needs control from incoming query down to storage."
og:image: "./cover.svg"
og:image:alt: "Lix April 2026 Update cover showing DataFusion planning queries, Lix owning the storage abstraction, and SQLite, RocksDB, S3/R2, and OPFS as backends"
---
# April 2026 Update: Adopting DataFusion

**TL;DR**
- Benchmarking exposed that SQLite gives too little control over Lix's versioned storage model to keep improving incrementally.
- Decision: move query execution to DataFusion while keeping SQLite as a possible physical storage backend.
- May goal: Release `v0.6` MVP with focus on CRUD with branching and merging on the optimized semantic write path that the file API will use next.
## What works now
The important April result is that the core API works on the new path.
The shape is the MVP API:
```ts
import { openLix } from "@lix-js/sdk";
import { createBetterSqlite3Backend } from "@lix-js/sdk/sqlite";
const lix = await openLix({
backend: createBetterSqlite3Backend({ path: "app.lix" }),
// Later: swap this for a RocksDB/S3/OPFS backend
// without changing the Lix API below.
});
await lix.createVersion({ name: "draft" });
await lix.execute("INSERT INTO markdown_paragraph (id, text) VALUES ($1, $2)", [
"paragraph_1",
"Ship CRUD MVP",
]);
await lix.switchVersion({ name: "main" });
await lix.mergeVersion({ source: "draft" });
```
The exact API names might still change. The important part is that the flow works:
- open a Lix
- create a version
- write entities with CRUD operations
- switch versions
- merge a version
That is the product surface for the MVP.
Files are not in the `v0.6` MVP on purpose.
A file write fans out into entity writes. A Word document, JSON file, or spreadsheet save can become thousands of inserts. That means the file API can only be as fast as the entity layer underneath it. The 10k inserts benchmark measures that layer.
Most apps and agents should write entities directly anyway. They should update a paragraph, cell, or property, not re-serialize a whole document. The file API comes after CRUD because it is built on the same semantic write path.
The first preview is published on npm:
```bash
npm install @lix-js/sdk@0.6.0-preview.2
```
[`@lix-js/sdk@0.6.0-preview.2`](https://www.npmjs.com/package/@lix-js/sdk/v/0.6.0-preview.2) is not the final `v0.6` MVP yet. It is the preview that proves the new path can be installed and tested.
## April goal
[Last month](/blog/march-2026-update) we found the next bottleneck: semantic writes.
The blob path was already fast. The semantic path was not. Writing one file can fan out into thousands of entities, and the April goal was to get **10k entity inserts under 100ms**.
The number is not random. A semantic file is not one row:
- a Word document becomes paragraphs, tables, comments, images, and relationships
- a JSON file becomes hundreds or thousands of properties
- a spreadsheet becomes cells, formulas, sheets, and metadata
10k inserts is the first useful proxy for "real file, real structure." 100ms is the interaction budget. Below that, the write still feels instant. Above that, Lix becomes something users and agents wait on.
We did not hit the benchmark in April.
We are not publishing a final April number because the benchmark target moved to the new DataFusion path. Optimizing the old SQLite-centered path further would measure the architecture we are replacing.
The problem was not one slow query. The SQLite-centered path kept pushing Lix concepts like version roots, inherited rows, tombstones, and file projections into SQLite tables and views. Each optimization fixed one path, but the next feature needed another translation layer.
## Finding: too little control
The recurring problem has been architecture confidence. Lix should ship an MVP and improve from there. But that only works if the architecture can be improved incrementally.
In February, we wrote that the Rust rewrite gave Lix control over the query planner. That wording was too broad. Lix controlled the query before SQLite saw it. Lix could parse and rewrite SQL, batch operations, and avoid many vtable callbacks.
April showed that this is not enough.
SQLite still owns the final query planner and storage model. Lix can rewrite queries before SQLite sees them, but the result still has to fit into SQLite tables, indexes, views, and vtables.
The 10k inserts work made the missing control clear. Lix needs control from the incoming query all the way down to raw storage. Every write touches current state, history, branch visibility, file projections, and later merge inputs. Those choices depend on the physical shape of the data.
```plain
February / March architecture
┌───────────┐
│ SQL query │
└─────┬─────┘
│
▼
┌────────────────────────┐
│ Lix SQL parser/rewrite │ ← Lix controls this
└─────┬──────────────────┘
│
▼
┌──────────────────────┐
│ SQLite query planner │ ← SQLite still controls this
└─────┬────────────────┘
│
▼
┌───────────────────────────────┐
│ SQLite tables/views/vtables │ ← Lix concepts squeezed here
└─────┬─────────────────────────┘
│
▼
┌────────────────┐
│ SQLite storage │
└────────────────┘
```
## Decision: adopt DataFusion
DataFusion is an Apache Arrow SQL query engine. It gives Lix SQL parsing, planning, and execution while letting Lix provide the logic underneath.
The decision is not "SQLite bad, custom database good." Reusing a query engine is still the right idea. The mistake would be building one from scratch when DataFusion exists.
That is the control Lix needs: from incoming query, through `lix_state`, versions, history, branch visibility, merge inputs, and file projections, down to the raw storage backend.
SQLite does not go away. It can still be the physical storage backend. The change is that SQLite no longer defines the query and storage shape of Lix state.
```plain
DataFusion-centered architecture
┌───────────┐
│ SQL query │
└─────┬─────┘
│
▼
┌─────────────────────────┐
│ DataFusion query engine │ ← Lix controls query execution
└─────┬───────────────────┘
│
▼
┌──────────────────────────────────┐
│ Lix logic + storage abstraction │ ← Lix controls this
└─────┬────────────────────────────┘
│
▼
┌──────────────────────────────────┐
│ SQLite · RocksDB · S3/R2 · OPFS │ ← physical storage
└──────────────────────────────────┘
```
Lix does not need to invent physical storage. Existing systems should still handle durability, transactions, files, pages, object storage, and the other hard parts of persistence. The prolly-tree direction from March is now part of this storage abstraction work: make branching cheap by sharing unchanged state, while keeping CRUD operations fast enough for the MVP.
This also changes the portability story. Earlier posts framed portability as "any SQL database." With DataFusion, portability moves one layer down: any backend that can satisfy Lix's storage abstraction. Postgres can still be a backend later, but not because Lix delegates SQL execution to Postgres.
## What happened to March's goals
March had three April goals:
1. 10k entity inserts under 100ms
2. prolly trees for cheap branching
3. workload testing with the semantic layer on
The first goal moved to May on the DataFusion path. Prolly trees moved into the broader physical storage abstraction work. The semantic workload replay should happen after the `v0.6` path is fast enough to be the path we intend to ship.
## What's next in May
May goal: turn the preview into the Lix `v0.6` MVP.
The acceptance criteria:
1. CRUD operations work through the new DataFusion path.
2. Branching and merging work on that path.
3. 10k semantic inserts are under 100ms.
4. The Lix physical storage abstraction is no more than 1.5x slower than a direct SQLite storage + query baseline for the same workload.
The 1.5x number is the guardrail for the storage abstraction. It is not the final product latency target. It checks that the abstraction itself is not the bottleneck. If storage is close to SQLite's baseline, Lix can ship the MVP and keep optimizing query/runtime logic above it incrementally.
Files follow after CRUD because file writes fan out into the same entity writes.
Everything else is secondary.
================================================
FILE: blog/authors.json
================================================
{
"samuelstroschein": {
"name": "Samuel Stroschein",
"avatar": "https://avatars.githubusercontent.com/u/35429197?v=4",
"twitter": "https://x.com/samuelstroschei",
"github": "https://github.com/samuelstroschein"
}
}
================================================
FILE: blog/table_of_contents.json
================================================
[
{
"path": "./005-april-2026-update/index.md",
"slug": "april-2026-update",
"authors": ["samuelstroschein"]
},
{
"path": "./004-march-2026-update/index.md",
"slug": "march-2026-update",
"authors": ["samuelstroschein"]
},
{
"path": "./003-february-2026-update/index.md",
"slug": "february-2026-update",
"authors": ["samuelstroschein"]
},
{
"path": "./002-modeling-a-company-as-a-repository/index.md",
"slug": "modeling-a-company-as-a-repository",
"authors": ["samuelstroschein"]
},
{
"path": "./001-introducing-lix/index.md",
"slug": "introducing-lix",
"authors": ["samuelstroschein"]
}
]
================================================
FILE: cla-signatures.json
================================================
{
"signedContributors": [
{
"name": "janfjohannes",
"id": 110794494,
"comment_id": 1711859828,
"created_at": "2023-09-08T15:36:26Z",
"repoId": 394757291,
"pullRequestNo": 1319
},
{
"name": "MaxKless",
"id": 34165455,
"comment_id": 1714026516,
"created_at": "2023-09-11T14:39:53Z",
"repoId": 394757291,
"pullRequestNo": 1325
},
{
"name": "felixhaeberle",
"id": 34959078,
"comment_id": 1717809210,
"created_at": "2023-09-13T14:59:55Z",
"repoId": 394757291,
"pullRequestNo": 1339
},
{
"name": "samuelstroschein",
"id": 35429197,
"comment_id": 1719038132,
"created_at": "2023-09-14T08:55:18Z",
"repoId": 394757291,
"pullRequestNo": 1339
},
{
"name": "NiklasBuchfink",
"id": 59048346,
"comment_id": 1719232555,
"created_at": "2023-09-14T10:59:32Z",
"repoId": 394757291,
"pullRequestNo": 1347
},
{
"name": "floriandwt",
"id": 92092993,
"comment_id": 1719439744,
"created_at": "2023-09-14T13:20:15Z",
"repoId": 394757291,
"pullRequestNo": 1339
},
{
"name": "NilsJacobsen",
"id": 58360188,
"comment_id": 1727541380,
"created_at": "2023-09-20T11:30:29Z",
"repoId": 394757291,
"pullRequestNo": 1385
},
{
"name": "misa1515",
"id": 61636045,
"comment_id": 1728275039,
"created_at": "2023-09-20T18:59:25Z",
"repoId": 394757291,
"pullRequestNo": 1388
},
{
"name": "BRGustavoRibeiro",
"id": 34517016,
"comment_id": 1728633275,
"created_at": "2023-09-21T01:33:29Z",
"repoId": 394757291,
"pullRequestNo": 1390
},
{
"name": "jannesblobel",
"id": 72493222,
"comment_id": 1729390653,
"created_at": "2023-09-21T11:36:20Z",
"repoId": 394757291,
"pullRequestNo": 1393
},
{
"name": "hecker",
"id": 23746655,
"comment_id": 1736918216,
"created_at": "2023-09-27T08:14:41Z",
"repoId": 394757291,
"pullRequestNo": 1408
},
{
"name": "openscript",
"id": 1105080,
"comment_id": 1738661818,
"created_at": "2023-09-28T07:57:14Z",
"repoId": 394757291,
"pullRequestNo": 1412
},
{
"name": "martin-lysk",
"id": 113943358,
"comment_id": 1772895783,
"created_at": "2023-10-20T14:54:50Z",
"repoId": 394757291,
"pullRequestNo": 1504
},
{
"name": "sunxyw",
"id": 31698606,
"comment_id": 1784985693,
"created_at": "2023-10-30T11:25:07Z",
"repoId": 394757291,
"pullRequestNo": 1533
},
{
"name": "ZerdoX-x",
"id": 49815452,
"comment_id": 1787270801,
"created_at": "2023-10-31T13:55:59Z",
"repoId": 394757291,
"pullRequestNo": 1549
},
{
"name": "WarningImHack3r",
"id": 43064022,
"comment_id": 1802507427,
"created_at": "2023-11-08T19:21:15Z",
"repoId": 394757291,
"pullRequestNo": 1615
},
{
"name": "albbus-stack",
"id": 57916483,
"comment_id": 1804883805,
"created_at": "2023-11-10T00:24:59Z",
"repoId": 394757291,
"pullRequestNo": 1620
},
{
"name": "JLAcostaEC",
"id": 61467132,
"comment_id": 1806107356,
"created_at": "2023-11-10T17:13:33Z",
"repoId": 394757291,
"pullRequestNo": 1623
},
{
"name": "rishi-raj-jain",
"id": 46300090,
"comment_id": 1810487483,
"created_at": "2023-11-14T15:44:12Z",
"repoId": 394757291,
"pullRequestNo": 1638
},
{
"name": "DanikVitek",
"id": 25585136,
"comment_id": 1811255169,
"created_at": "2023-11-14T20:49:37Z",
"repoId": 394757291,
"pullRequestNo": 1640
},
{
"name": "Min2who",
"id": 127925465,
"comment_id": 1813826899,
"created_at": "2023-11-16T05:47:48Z",
"repoId": 394757291,
"pullRequestNo": 1643
},
{
"name": "LorisSigrist",
"id": 43482866,
"comment_id": 1819259247,
"created_at": "2023-11-20T15:15:25Z",
"repoId": 394757291,
"pullRequestNo": 1659
},
{
"name": "KraXen72",
"id": 21956756,
"comment_id": 1825537784,
"created_at": "2023-11-24T11:28:53Z",
"repoId": 394757291,
"pullRequestNo": 1732
},
{
"name": "AdamTmHun",
"id": 61880960,
"comment_id": 1826420619,
"created_at": "2023-11-25T21:10:20Z",
"repoId": 394757291,
"pullRequestNo": 1745
},
{
"name": "KTibow",
"id": 10727862,
"comment_id": 1826423449,
"created_at": "2023-11-25T21:27:57Z",
"repoId": 394757291,
"pullRequestNo": 1746
},
{
"name": "thetarnav",
"id": 24491503,
"comment_id": 1833456333,
"created_at": "2023-11-30T10:08:16Z",
"repoId": 394757291,
"pullRequestNo": 1785
},
{
"name": "TajAlasfiyaa",
"id": 87016999,
"comment_id": 1856385866,
"created_at": "2023-12-14T18:35:58Z",
"repoId": 394757291,
"pullRequestNo": 1893
},
{
"name": "tomas-correia",
"id": 20492365,
"comment_id": 1862914722,
"created_at": "2023-12-19T14:52:47Z",
"repoId": 394757291,
"pullRequestNo": 1919
},
{
"name": "Gernii",
"id": 54741529,
"comment_id": 1863028528,
"created_at": "2023-12-19T15:55:04Z",
"repoId": 394757291,
"pullRequestNo": 1921
},
{
"name": "mr-islam",
"id": 17675428,
"comment_id": 1871469307,
"created_at": "2023-12-28T20:26:39Z",
"repoId": 394757291,
"pullRequestNo": 1955
},
{
"name": "jldec",
"id": 849592,
"comment_id": 1894298346,
"created_at": "2024-01-16T18:28:34Z",
"repoId": 394757291,
"pullRequestNo": 2040
},
{
"name": "oscard0m",
"id": 2574275,
"comment_id": 1895458003,
"created_at": "2024-01-17T09:52:08Z",
"repoId": 394757291,
"pullRequestNo": 2047
},
{
"name": "leonardoRocchini",
"id": 62795461,
"comment_id": 1924359871,
"created_at": "2024-02-02T17:29:39Z",
"repoId": 394757291,
"pullReq
gitextract_5kejlpdk/
├── .gitattributes
├── .gitignore
├── .infisical.json
├── .prettierignore
├── CONTRIBUTING.md
├── Cargo.toml
├── README.md
├── benchmarks/
│ ├── 10k-entities/
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ └── src/
│ │ ├── main.rs
│ │ ├── sqlite_backend.rs
│ │ └── wasmtime_runtime.rs
│ ├── engine2-json-pointer/
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ └── src/
│ │ ├── main.rs
│ │ └── sqlite_backend.rs
│ └── git-compare/
│ ├── Cargo.toml
│ ├── README.md
│ └── src/
│ └── main.rs
├── blog/
│ ├── 001-introducing-lix/
│ │ └── index.md
│ ├── 002-modeling-a-company-as-a-repository/
│ │ └── index.md
│ ├── 003-february-2026-update/
│ │ └── index.md
│ ├── 004-march-2026-update/
│ │ └── index.md
│ ├── 005-april-2026-update/
│ │ └── index.md
│ ├── authors.json
│ └── table_of_contents.json
├── cla-signatures.json
├── docs/
│ ├── api-reference.md
│ ├── backend.md
│ ├── comparison-to-git.md
│ ├── getting-started.md
│ ├── history.md
│ ├── lix-for-ai-agents.md
│ ├── persistence.md
│ ├── schemas.md
│ ├── sql-functions.md
│ ├── surfaces.md
│ ├── table_of_contents.json
│ ├── versions.md
│ └── what-is-lix.md
├── nx.json
├── optimization_log6_crud.md
├── optimization_log7.md
├── optimization_log8.md
├── optimization_log9_sql2.md
├── package.json
├── packages/
│ ├── cli/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ ├── app/
│ │ │ ├── context.rs
│ │ │ ├── mod.rs
│ │ │ ├── run.rs
│ │ │ └── welcome.rs
│ │ ├── cli/
│ │ │ ├── exp.rs
│ │ │ ├── init.rs
│ │ │ ├── mod.rs
│ │ │ ├── redo.rs
│ │ │ ├── root.rs
│ │ │ ├── sql.rs
│ │ │ ├── undo.rs
│ │ │ └── version.rs
│ │ ├── commands/
│ │ │ ├── exp/
│ │ │ │ ├── git_replay.rs
│ │ │ │ └── mod.rs
│ │ │ ├── init.rs
│ │ │ ├── mod.rs
│ │ │ ├── redo.rs
│ │ │ ├── sql/
│ │ │ │ ├── execute.rs
│ │ │ │ └── mod.rs
│ │ │ ├── undo.rs
│ │ │ └── version/
│ │ │ ├── create.rs
│ │ │ ├── merge.rs
│ │ │ ├── mod.rs
│ │ │ └── switch.rs
│ │ ├── db/
│ │ │ └── mod.rs
│ │ ├── error.rs
│ │ ├── hints.rs
│ │ ├── lib.rs
│ │ ├── main.rs
│ │ └── output/
│ │ └── mod.rs
│ ├── engine/
│ │ ├── .gitignore
│ │ ├── AGENTS.md
│ │ ├── Cargo.toml
│ │ ├── benches/
│ │ │ ├── fixtures/
│ │ │ │ └── pnpm-lock.fixture.json
│ │ │ ├── json_pointer_crud/
│ │ │ │ └── main.rs
│ │ │ ├── json_pointer_physical/
│ │ │ │ └── main.rs
│ │ │ ├── optimization9_sql2/
│ │ │ │ ├── json_pointer.schema.json
│ │ │ │ ├── main.rs
│ │ │ │ └── pnpm-lock.fixture.json
│ │ │ ├── physical_layout/
│ │ │ │ ├── backend_kv.rs
│ │ │ │ ├── changelog.rs
│ │ │ │ ├── json_store.rs
│ │ │ │ ├── main.rs
│ │ │ │ ├── tracked_state.rs
│ │ │ │ └── workflow.rs
│ │ │ ├── storage/
│ │ │ │ ├── README.md
│ │ │ │ ├── backend.rs
│ │ │ │ ├── binary_cas.rs
│ │ │ │ ├── changelog.rs
│ │ │ │ ├── commit_graph.rs
│ │ │ │ ├── json_store.rs
│ │ │ │ ├── main.rs
│ │ │ │ ├── rocksdb_backend.rs
│ │ │ │ ├── sqlite_backend.rs
│ │ │ │ ├── storage_api.rs
│ │ │ │ ├── tracked_state.rs
│ │ │ │ └── untracked_state.rs
│ │ │ └── transaction/
│ │ │ └── main.rs
│ │ ├── src/
│ │ │ ├── backend/
│ │ │ │ ├── kv.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── testing.rs
│ │ │ │ └── types.rs
│ │ │ ├── binary_cas/
│ │ │ │ ├── chunking.rs
│ │ │ │ ├── codec.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── kv.rs
│ │ │ │ ├── mod.rs
│ │ │ │ └── types.rs
│ │ │ ├── catalog/
│ │ │ │ ├── context.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── schema.rs
│ │ │ │ └── snapshot.rs
│ │ │ ├── cel/
│ │ │ │ ├── context.rs
│ │ │ │ ├── error.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── provider.rs
│ │ │ │ ├── runtime.rs
│ │ │ │ └── value.rs
│ │ │ ├── commit_graph/
│ │ │ │ ├── context.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── types.rs
│ │ │ │ └── walker.rs
│ │ │ ├── commit_store/
│ │ │ │ ├── codec.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── materialization.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── storage.rs
│ │ │ │ └── types.rs
│ │ │ ├── common/
│ │ │ │ ├── error.rs
│ │ │ │ ├── fingerprint.rs
│ │ │ │ ├── fs_path.rs
│ │ │ │ ├── identity.rs
│ │ │ │ ├── json_pointer.rs
│ │ │ │ ├── metadata.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── types.rs
│ │ │ │ └── wire.rs
│ │ │ ├── domain.rs
│ │ │ ├── engine.rs
│ │ │ ├── entity_identity.rs
│ │ │ ├── functions/
│ │ │ │ ├── context.rs
│ │ │ │ ├── deterministic.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── provider.rs
│ │ │ │ ├── state.rs
│ │ │ │ └── types.rs
│ │ │ ├── init.rs
│ │ │ ├── json_store/
│ │ │ │ ├── compression.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── encoded.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── store.rs
│ │ │ │ └── types.rs
│ │ │ ├── lib.rs
│ │ │ ├── live_state/
│ │ │ │ ├── context.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── overlay.rs
│ │ │ │ ├── reader.rs
│ │ │ │ ├── types.rs
│ │ │ │ └── visibility.rs
│ │ │ ├── plugin/
│ │ │ │ ├── archive.rs
│ │ │ │ ├── component.rs
│ │ │ │ ├── install.rs
│ │ │ │ ├── manifest.rs
│ │ │ │ ├── materializer.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── plugin_manifest.json
│ │ │ │ └── storage.rs
│ │ │ ├── schema/
│ │ │ │ ├── annotations/
│ │ │ │ │ ├── defaults.rs
│ │ │ │ │ └── mod.rs
│ │ │ │ ├── builtin/
│ │ │ │ │ ├── lix_account.json
│ │ │ │ │ ├── lix_active_account.json
│ │ │ │ │ ├── lix_binary_blob_ref.json
│ │ │ │ │ ├── lix_change.json
│ │ │ │ │ ├── lix_change_author.json
│ │ │ │ │ ├── lix_commit.json
│ │ │ │ │ ├── lix_commit_edge.json
│ │ │ │ │ ├── lix_directory_descriptor.json
│ │ │ │ │ ├── lix_file_descriptor.json
│ │ │ │ │ ├── lix_key_value.json
│ │ │ │ │ ├── lix_label.json
│ │ │ │ │ ├── lix_label_assignment.json
│ │ │ │ │ ├── lix_registered_schema.json
│ │ │ │ │ ├── lix_version_descriptor.json
│ │ │ │ │ ├── lix_version_ref.json
│ │ │ │ │ └── mod.rs
│ │ │ │ ├── compatibility.rs
│ │ │ │ ├── definition.json
│ │ │ │ ├── definition.rs
│ │ │ │ ├── key.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── seed.rs
│ │ │ │ └── tests.rs
│ │ │ ├── session/
│ │ │ │ ├── context.rs
│ │ │ │ ├── create_version.rs
│ │ │ │ ├── execute.rs
│ │ │ │ ├── merge/
│ │ │ │ │ ├── analysis.rs
│ │ │ │ │ ├── apply.rs
│ │ │ │ │ ├── conflicts.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ ├── stats.rs
│ │ │ │ │ └── version.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── optimization9_sql2_bench.rs
│ │ │ │ └── switch_version.rs
│ │ │ ├── sql2/
│ │ │ │ ├── change_provider.rs
│ │ │ │ ├── classify.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── directory_history_provider.rs
│ │ │ │ ├── directory_provider.rs
│ │ │ │ ├── dml.rs
│ │ │ │ ├── entity_history_provider.rs
│ │ │ │ ├── entity_provider.rs
│ │ │ │ ├── error.rs
│ │ │ │ ├── execute.rs
│ │ │ │ ├── file_history_provider.rs
│ │ │ │ ├── file_provider.rs
│ │ │ │ ├── filesystem_planner.rs
│ │ │ │ ├── filesystem_predicates.rs
│ │ │ │ ├── filesystem_visibility.rs
│ │ │ │ ├── history_projection.rs
│ │ │ │ ├── history_provider.rs
│ │ │ │ ├── history_route.rs
│ │ │ │ ├── lix_state_provider.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── predicate_typecheck.rs
│ │ │ │ ├── public_bind/
│ │ │ │ │ ├── assignment.rs
│ │ │ │ │ ├── capability.rs
│ │ │ │ │ ├── dml.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ └── table.rs
│ │ │ │ ├── read_only.rs
│ │ │ │ ├── record_batch.rs
│ │ │ │ ├── result_metadata.rs
│ │ │ │ ├── runtime.rs
│ │ │ │ ├── session.rs
│ │ │ │ ├── udfs/
│ │ │ │ │ ├── common.rs
│ │ │ │ │ ├── lix_active_version_commit_id.rs
│ │ │ │ │ ├── lix_empty_blob.rs
│ │ │ │ │ ├── lix_json.rs
│ │ │ │ │ ├── lix_json_get.rs
│ │ │ │ │ ├── lix_json_get_text.rs
│ │ │ │ │ ├── lix_text_decode.rs
│ │ │ │ │ ├── lix_text_encode.rs
│ │ │ │ │ ├── lix_timestamp.rs
│ │ │ │ │ ├── lix_uuid_v7.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ └── public_call.rs
│ │ │ │ ├── version_provider.rs
│ │ │ │ ├── version_scope.rs
│ │ │ │ └── write_normalization.rs
│ │ │ ├── storage/
│ │ │ │ ├── context.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── read_scope.rs
│ │ │ │ └── types.rs
│ │ │ ├── storage_bench.rs
│ │ │ ├── test_support.rs
│ │ │ ├── tracked_state/
│ │ │ │ ├── by_file_index.rs
│ │ │ │ ├── codec.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── diff.rs
│ │ │ │ ├── materialization.rs
│ │ │ │ ├── materializer.rs
│ │ │ │ ├── merge.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── storage.rs
│ │ │ │ ├── tree.rs
│ │ │ │ └── types.rs
│ │ │ ├── transaction/
│ │ │ │ ├── commit.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── live_state_overlay.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── normalization.rs
│ │ │ │ ├── prep.rs
│ │ │ │ ├── schema_resolver.rs
│ │ │ │ ├── staging.rs
│ │ │ │ ├── types.rs
│ │ │ │ └── validation.rs
│ │ │ ├── untracked_state/
│ │ │ │ ├── codec.rs
│ │ │ │ ├── context.rs
│ │ │ │ ├── materialization.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── storage.rs
│ │ │ │ └── types.rs
│ │ │ ├── version/
│ │ │ │ ├── context.rs
│ │ │ │ ├── lifecycle.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── refs.rs
│ │ │ │ ├── stage_rows.rs
│ │ │ │ └── types.rs
│ │ │ └── wasm/
│ │ │ └── mod.rs
│ │ ├── tests/
│ │ │ ├── branching.rs
│ │ │ ├── code_structure.rs
│ │ │ ├── commit_graph.rs
│ │ │ ├── engine.rs
│ │ │ ├── json_pointer_crud_storage.rs
│ │ │ ├── sql/
│ │ │ │ ├── entity_history.rs
│ │ │ │ ├── errors.rs
│ │ │ │ ├── history_conformance.rs
│ │ │ │ ├── lix_change.rs
│ │ │ │ ├── lix_commit.rs
│ │ │ │ ├── lix_directory.rs
│ │ │ │ ├── lix_directory_history.rs
│ │ │ │ ├── lix_file.rs
│ │ │ │ ├── lix_file_history.rs
│ │ │ │ ├── lix_json.rs
│ │ │ │ ├── lix_key_value.rs
│ │ │ │ ├── lix_label_assignment.rs
│ │ │ │ ├── lix_registered_schema.rs
│ │ │ │ ├── lix_state.rs
│ │ │ │ ├── lix_state_history.rs
│ │ │ │ ├── lix_version.rs
│ │ │ │ ├── metadata.rs
│ │ │ │ ├── read_only.rs
│ │ │ │ └── udfs.rs
│ │ │ ├── sql.rs
│ │ │ ├── storage_accounting.rs
│ │ │ ├── support/
│ │ │ │ ├── mod.rs
│ │ │ │ └── simulation_test/
│ │ │ │ ├── engine/
│ │ │ │ │ ├── expect_same.rs
│ │ │ │ │ ├── kv_backend.rs
│ │ │ │ │ ├── macro_runtime.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ ├── mode.rs
│ │ │ │ │ ├── rebuild_tracked_state.rs
│ │ │ │ │ └── simulation.rs
│ │ │ │ └── mod.rs
│ │ │ ├── tmp_lix_key_value_amplification.rs
│ │ │ └── transaction.rs
│ │ └── wit/
│ │ └── lix-plugin.wit
│ ├── js-kysely/
│ │ ├── .gitignore
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── create-lix-kysely.ts
│ │ │ ├── eb-entity.ts
│ │ │ ├── index.ts
│ │ │ ├── qb.test-d.ts
│ │ │ ├── qb.ts
│ │ │ └── schema.ts
│ │ ├── tests/
│ │ │ ├── eb-entity.test.ts
│ │ │ └── transaction.test.ts
│ │ ├── tsconfig.json
│ │ ├── tsconfig.type-tests.json
│ │ └── vitest.config.ts
│ ├── js-sdk/
│ │ ├── .gitignore
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── SKILL.md
│ │ ├── package.json
│ │ ├── scripts/
│ │ │ ├── build.js
│ │ │ ├── sync-builtin-schemas.js
│ │ │ └── sync-engine-src.js
│ │ ├── src/
│ │ │ ├── builtin-schemas.ts
│ │ │ ├── engine-wasm/
│ │ │ │ ├── index.ts
│ │ │ │ └── value.test.ts
│ │ │ ├── index.ts
│ │ │ ├── open-lix.test.ts
│ │ │ ├── open-lix.ts
│ │ │ ├── sqlite/
│ │ │ │ ├── better-sqlite3.d.ts
│ │ │ │ ├── index.test.ts
│ │ │ │ └── index.ts
│ │ │ └── types.ts
│ │ ├── tsconfig.json
│ │ ├── vitest.config.ts
│ │ └── wasm-bindgen.rs
│ ├── plugin-json-v2/
│ │ ├── .gitignore
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── benches/
│ │ │ ├── apply_changes.rs
│ │ │ ├── common/
│ │ │ │ └── mod.rs
│ │ │ ├── detect_changes.rs
│ │ │ └── roundtrip.rs
│ │ ├── schema/
│ │ │ └── json_pointer.json
│ │ ├── src/
│ │ │ └── lib.rs
│ │ └── tests/
│ │ ├── apply_changes.rs
│ │ ├── common/
│ │ │ └── mod.rs
│ │ ├── detect_changes.rs
│ │ ├── roundtrip.rs
│ │ └── schema.rs
│ ├── plugin-md-v2/
│ │ ├── .gitignore
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── benches/
│ │ │ ├── common/
│ │ │ │ └── mod.rs
│ │ │ └── detect_changes.rs
│ │ ├── manifest.json
│ │ ├── schema/
│ │ │ ├── markdown_block.json
│ │ │ └── markdown_document.json
│ │ ├── src/
│ │ │ ├── apply_changes.rs
│ │ │ ├── common.rs
│ │ │ ├── detect_changes.rs
│ │ │ ├── lib.rs
│ │ │ └── schemas.rs
│ │ └── tests/
│ │ ├── apply_changes.rs
│ │ ├── common/
│ │ │ └── mod.rs
│ │ ├── detect_changes.rs
│ │ ├── roundtrip.rs
│ │ └── schema.rs
│ ├── react-utils/
│ │ ├── .oxlintrc.json
│ │ ├── .prettierrc.json
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── hooks/
│ │ │ │ ├── use-lix.test.tsx
│ │ │ │ ├── use-lix.ts
│ │ │ │ ├── use-query.test.tsx
│ │ │ │ └── use-query.ts
│ │ │ ├── index.ts
│ │ │ └── provider.tsx
│ │ ├── test-setup.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── rs-sdk/
│ │ ├── Cargo.toml
│ │ ├── src/
│ │ │ ├── in_memory_backend.rs
│ │ │ ├── lib.rs
│ │ │ └── lix.rs
│ │ └── tests/
│ │ └── e2e.rs
│ ├── text-plugin/
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── benches/
│ │ │ ├── apply_changes.rs
│ │ │ ├── common/
│ │ │ │ └── mod.rs
│ │ │ └── detect_changes.rs
│ │ ├── manifest.json
│ │ ├── schema/
│ │ │ ├── text_document.json
│ │ │ └── text_line.json
│ │ ├── src/
│ │ │ └── lib.rs
│ │ └── tests/
│ │ ├── apply_changes.rs
│ │ ├── common/
│ │ │ └── mod.rs
│ │ ├── detect_changes.rs
│ │ ├── roundtrip.rs
│ │ └── schema.rs
│ └── website/
│ ├── .gitignore
│ ├── .vscode/
│ │ └── settings.json
│ ├── HTML_DIFF_LIX_DEV_SEO_FOLLOWUP.md
│ ├── README.md
│ ├── content/
│ │ └── plugins/
│ │ └── index.md
│ ├── package.json
│ ├── public/
│ │ ├── _redirects
│ │ ├── manifest.json
│ │ └── robots.txt
│ ├── scripts/
│ │ ├── plugin-readme-sync.test.ts
│ │ ├── plugin-readme-sync.ts
│ │ └── post-build-seo.js
│ ├── src/
│ │ ├── blog/
│ │ │ ├── blogMetadata.ts
│ │ │ └── og-image.ts
│ │ ├── components/
│ │ │ ├── code-snippet.tsx
│ │ │ ├── doc-code-snippet-element.tsx
│ │ │ ├── docs-layout.tsx
│ │ │ ├── docs-prev-next.tsx
│ │ │ ├── footer.tsx
│ │ │ ├── header.tsx
│ │ │ ├── landing-page.tsx
│ │ │ ├── markdown-page.interactive.js
│ │ │ ├── markdown-page.style.css
│ │ │ ├── markdown-page.tsx
│ │ │ └── prev-next-nav.tsx
│ │ ├── github-stars-cache.ts
│ │ ├── lib/
│ │ │ ├── build-doc-map.test.ts
│ │ │ ├── build-doc-map.ts
│ │ │ ├── plugin-sidebar.ts
│ │ │ ├── seo.test.ts
│ │ │ └── seo.ts
│ │ ├── router.tsx
│ │ ├── routes/
│ │ │ ├── -seo-smoke.test.ts
│ │ │ ├── __root.tsx
│ │ │ ├── blog/
│ │ │ │ ├── $slug.tsx
│ │ │ │ └── index.tsx
│ │ │ ├── docs/
│ │ │ │ ├── $slugId.tsx
│ │ │ │ ├── index.tsx
│ │ │ │ └── redirects.json
│ │ │ ├── guide/
│ │ │ │ ├── $slugId.tsx
│ │ │ │ └── index.tsx
│ │ │ ├── index.tsx
│ │ │ ├── plugins/
│ │ │ │ ├── $pluginKey.tsx
│ │ │ │ ├── index.tsx
│ │ │ │ └── plugin.registry.json
│ │ │ └── rfc/
│ │ │ ├── $slug.tsx
│ │ │ └── index.tsx
│ │ ├── ssg/
│ │ │ └── github-stars-plugin.ts
│ │ ├── styles.css
│ │ └── types/
│ │ └── lix-js-plugin-json.d.ts
│ ├── tsconfig.json
│ ├── vite.config.ts
│ └── wrangler.json
├── pnpm-workspace.yaml
├── rfcs/
│ ├── 001-preprocess-writes/
│ │ └── index.md
│ ├── 002-rewrite-in-rust/
│ │ └── index.md
│ └── 003-canonical-lix-value/
│ └── index.md
└── skills/
└── cli/
└── SKILL.md
Showing preview only (686K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (7045 symbols across 315 files)
FILE: benchmarks/10k-entities/src/main.rs
constant DEFAULT_OUTPUT_DIR (line 17) | const DEFAULT_OUTPUT_DIR: &str = "artifact/benchmarks/10k-entities";
constant DEFAULT_PROPS (line 18) | const DEFAULT_PROPS: usize = 10_000;
constant DEFAULT_WARMUPS (line 19) | const DEFAULT_WARMUPS: usize = 2;
constant DEFAULT_ITERATIONS (line 20) | const DEFAULT_ITERATIONS: usize = 10;
constant DIRECT_ENTITY_WRITE_CHUNK_SIZE (line 21) | const DIRECT_ENTITY_WRITE_CHUNK_SIZE: usize = 250;
constant PLUGIN_KEY (line 23) | const PLUGIN_KEY: &str = "json";
constant PLUGIN_SCHEMA_KEY (line 24) | const PLUGIN_SCHEMA_KEY: &str = "json_pointer";
constant PLUGIN_ARCHIVE_MANIFEST_JSON (line 25) | const PLUGIN_ARCHIVE_MANIFEST_JSON: &str = r#"{
constant JSON_POINTER_SCHEMA_JSON (line 35) | const JSON_POINTER_SCHEMA_JSON: &str =
type BenchResult (line 38) | type BenchResult<T> = Result<T, String>;
type Args (line 45) | struct Args {
type BenchmarkCaseKind (line 60) | enum BenchmarkCaseKind {
method id (line 66) | fn id(self) -> &'static str {
method title (line 73) | fn title(self) -> &'static str {
method timed_operation (line 80) | fn timed_operation(self) -> &'static str {
method notes (line 91) | fn notes(self) -> Vec<&'static str> {
method timed_sql (line 109) | fn timed_sql(self) -> &'static str {
method verification (line 118) | fn verification(self) -> &'static str {
method setup_outside_timer (line 129) | fn setup_outside_timer(self) -> Vec<&'static str> {
type Report (line 148) | struct Report {
type BenchmarkMetadata (line 157) | struct BenchmarkMetadata {
type SharedSetupReport (line 163) | struct SharedSetupReport {
type CaseReport (line 175) | struct CaseReport {
type CaseSetupReport (line 187) | struct CaseSetupReport {
type RunSample (line 195) | struct RunSample {
type TimingSummary (line 205) | struct TimingSummary {
type PhaseSummary (line 213) | struct PhaseSummary {
type ComparisonSummary (line 221) | struct ComparisonSummary {
type TempSqlitePath (line 234) | struct TempSqlitePath {
method new (line 239) | fn new(label: &str) -> Self {
method path (line 245) | fn path(&self) -> &Path {
method drop (line 251) | fn drop(&mut self) {
function main (line 258) | fn main() {
function run (line 270) | async fn run(args: Args) -> BenchResult<()> {
function run_case (line 348) | async fn run_case(
function run_sample (line 406) | async fn run_sample(
function run_file_write_sample (line 438) | async fn run_file_write_sample(
function run_direct_entity_write_sample (line 512) | async fn run_direct_entity_write_sample(
function finish_sample (line 593) | async fn finish_sample(
function open_prepared_session (line 647) | async fn open_prepared_session(
function bootstrap_empty_json_file (line 667) | async fn bootstrap_empty_json_file(
function load_root_json_pointer_entity_id (line 686) | async fn load_root_json_pointer_entity_id(
function build_direct_entity_write_sql_batches (line 724) | fn build_direct_entity_write_sql_batches(
function verify_file_json_matches (line 791) | async fn verify_file_json_matches(
function build_plugin_archive (line 832) | fn build_plugin_archive(plugin_wasm_bytes: &[u8]) -> BenchResult<Vec<u8>> {
function scalar_count (line 859) | async fn scalar_count(session: &Session, sql: &str, params: &[Value]) ->...
function summarize_timings (line 882) | fn summarize_timings(samples: &[RunSample]) -> BenchResult<TimingSummary> {
function summarize_phase (line 895) | fn summarize_phase(mut values: Vec<f64>) -> BenchResult<PhaseSummary> {
function build_comparison_summary (line 918) | fn build_comparison_summary(
function build_flat_json_payload (line 947) | fn build_flat_json_payload(props: usize) -> BenchResult<Vec<u8>> {
function build_plugin_json_v2_wasm (line 958) | fn build_plugin_json_v2_wasm(repo_root: &Path) -> BenchResult<PathBuf> {
function render_markdown_report (line 1019) | fn render_markdown_report(report: &Report) -> String {
function render_case_markdown (line 1074) | fn render_case_markdown(case: &CaseReport) -> String {
function print_summary (line 1148) | fn print_summary(report: &Report, report_json_path: &Path, report_markdo...
function repo_root (line 1194) | fn repo_root() -> BenchResult<PathBuf> {
function temp_sqlite_path (line 1201) | fn temp_sqlite_path(label: &str) -> PathBuf {
function now_unix_ms (line 1209) | fn now_unix_ms() -> BenchResult<u128> {
function escape_sql_string (line 1216) | fn escape_sql_string(value: &str) -> String {
function escape_json_pointer_segment (line 1220) | fn escape_json_pointer_segment(segment: &str) -> String {
function io_err (line 1224) | fn io_err(error: impl std::fmt::Display) -> String {
function serde_err (line 1228) | fn serde_err(error: impl std::fmt::Display) -> String {
function lix_err (line 1232) | fn lix_err(error: LixError) -> String {
FILE: benchmarks/10k-entities/src/sqlite_backend.rs
type BenchSqliteBackend (line 14) | pub struct BenchSqliteBackend {
method file_backed (line 29) | pub fn file_backed(path: &Path) -> Result<Self, LixError> {
method pool (line 49) | async fn pool(&self) -> Result<&sqlx::SqlitePool, LixError> {
type BenchSqliteBackendInner (line 18) | struct BenchSqliteBackendInner {
type BenchSqliteTransaction (line 23) | struct BenchSqliteTransaction {
method dialect (line 89) | fn dialect(&self) -> SqlDialect {
method execute (line 93) | async fn execute(&self, sql: &str, params: &[Value]) -> Result<QueryResu...
method begin_transaction (line 108) | async fn begin_transaction(
method begin_savepoint (line 134) | async fn begin_savepoint(
method dialect (line 144) | fn dialect(&self) -> SqlDialect {
method mode (line 148) | fn mode(&self) -> TransactionMode {
method execute (line 152) | async fn execute(&mut self, sql: &str, params: &[Value]) -> Result<Query...
method execute_batch (line 156) | async fn execute_batch(&mut self, batch: &PreparedBatch) -> Result<Query...
method commit (line 180) | async fn commit(mut self: Box<Self>) -> Result<(), LixError> {
method rollback (line 192) | async fn rollback(mut self: Box<Self>) -> Result<(), LixError> {
function execute_query_with_connection (line 205) | async fn execute_query_with_connection(
function bind_param_sqlite (line 249) | fn bind_param_sqlite<'q>(
function map_sqlite_value (line 264) | fn map_sqlite_value(row: &sqlx::sqlite::SqliteRow, index: usize) -> Resu...
FILE: benchmarks/10k-entities/src/wasmtime_runtime.rs
type WirePluginFile (line 20) | struct WirePluginFile {
type WireDetectChangesRequest (line 27) | struct WireDetectChangesRequest {
type WireDetectStateContext (line 34) | struct WireDetectStateContext {
type WireActiveStateRow (line 39) | struct WireActiveStateRow {
type WirePluginEntityChange (line 53) | struct WirePluginEntityChange {
type WireApplyChangesRequest (line 60) | struct WireApplyChangesRequest {
type WirePluginEntityChangeOutput (line 66) | struct WirePluginEntityChangeOutput {
type TestWasmtimeRuntime (line 72) | pub struct TestWasmtimeRuntime {
method new (line 78) | pub fn new() -> Result<Self, LixError> {
type ComponentCacheKey (line 98) | struct ComponentCacheKey {
method from_bytes (line 104) | fn from_bytes(bytes: &[u8]) -> Self {
type TestWasmtimeInstance (line 112) | struct TestWasmtimeInstance {
type WasiState (line 117) | struct WasiState {
method table (line 123) | fn table(&mut self) -> &mut ResourceTable {
method ctx (line 129) | fn ctx(&mut self) -> &mut WasiCtx {
method init_component (line 136) | async fn init_component(
method call (line 185) | async fn call(&self, export: &str, input: &[u8]) -> Result<Vec<u8>, LixE...
function wasm_fingerprint (line 300) | fn wasm_fingerprint(bytes: &[u8]) -> u64 {
function wire_file_to_binding (line 306) | fn wire_file_to_binding(file: WirePluginFile) -> plugin_bindings::export...
function wire_change_to_binding (line 314) | fn wire_change_to_binding(
function wire_state_context_to_binding (line 324) | fn wire_state_context_to_binding(
function wire_active_state_row_to_binding (line 336) | fn wire_active_state_row_to_binding(
function binding_change_to_wire (line 353) | fn binding_change_to_wire(
function map_plugin_error (line 366) | fn map_plugin_error(error: plugin_bindings::exports::lix::plugin::api::P...
FILE: benchmarks/engine2-json-pointer/src/main.rs
constant DEFAULT_OUTPUT_DIR (line 13) | const DEFAULT_OUTPUT_DIR: &str = "artifact/benchmarks/engine2-json-point...
constant DEFAULT_ROWS (line 14) | const DEFAULT_ROWS: usize = 10_000;
constant DEFAULT_WARMUPS (line 15) | const DEFAULT_WARMUPS: usize = 1;
constant DEFAULT_ITERATIONS (line 16) | const DEFAULT_ITERATIONS: usize = 5;
constant DEFAULT_CHUNK_SIZE (line 17) | const DEFAULT_CHUNK_SIZE: usize = 500;
constant JSON_POINTER_SCHEMA_JSON (line 18) | const JSON_POINTER_SCHEMA_JSON: &str =
type BenchResult (line 21) | type BenchResult<T> = Result<T, String>;
type Args (line 28) | struct Args {
type Report (line 49) | struct Report {
type RunSample (line 60) | struct RunSample {
type TimingSummary (line 70) | struct TimingSummary {
type PhaseSummary (line 78) | struct PhaseSummary {
function main (line 85) | fn main() {
function run (line 92) | fn run() -> BenchResult<()> {
function run_insert_case (line 147) | async fn run_insert_case(args: &Args, label: &str, index: usize) -> Benc...
function register_json_pointer_schema (line 204) | async fn register_json_pointer_schema(lix: &Lix) -> BenchResult<()> {
function ensure_benchmark_file_descriptor (line 218) | async fn ensure_benchmark_file_descriptor(lix: &Lix) -> BenchResult<()> {
function build_insert_batches (line 242) | fn build_insert_batches(row_count: usize, chunk_size: usize) -> BenchRes...
function count_json_pointer_rows (line 280) | async fn count_json_pointer_rows(lix: &Lix) -> BenchResult<usize> {
function summarize_samples (line 306) | fn summarize_samples(samples: &[RunSample]) -> TimingSummary {
function summarize_phase (line 315) | fn summarize_phase(mut values: Vec<f64>) -> PhaseSummary {
function render_markdown_report (line 340) | fn render_markdown_report(report: &Report) -> String {
function sql_string (line 369) | fn sql_string(value: &str) -> String {
function display_lix_error (line 373) | fn display_lix_error(error: LixError) -> String {
function millis (line 377) | fn millis(duration: Duration) -> f64 {
function unix_ms (line 381) | fn unix_ms() -> u128 {
type CleanupDatabase (line 388) | struct CleanupDatabase {
method remove_existing (line 394) | fn remove_existing(&self) -> BenchResult<()> {
method paths (line 404) | fn paths(&self) -> Vec<PathBuf> {
method drop (line 413) | fn drop(&mut self) {
FILE: benchmarks/engine2-json-pointer/src/sqlite_backend.rs
constant KV_TABLE (line 9) | const KV_TABLE: &str = "lix_engine2_kv";
type Engine2SqliteBackend (line 12) | pub struct Engine2SqliteBackend {
method file_backed (line 23) | pub fn file_backed(path: &Path) -> Result<Self, LixError> {
method lock_conn (line 44) | fn lock_conn(&self) -> Result<MutexGuard<'_, Connection>, LixError> {
type Engine2SqliteTransaction (line 16) | pub struct Engine2SqliteTransaction {
method lock_conn (line 151) | fn lock_conn(&self) -> Result<MutexGuard<'_, Connection>, LixError> {
method begin_transaction (line 53) | async fn begin_transaction(
method kv_get (line 73) | async fn kv_get(&self, namespace: &str, key: &[u8]) -> Result<Option<Vec...
method kv_scan (line 78) | async fn kv_scan(
method mode (line 91) | fn mode(&self) -> TransactionBeginMode {
method kv_get (line 95) | async fn kv_get(&mut self, namespace: &str, key: &[u8]) -> Result<Option...
method kv_scan (line 100) | async fn kv_scan(
method kv_put (line 110) | async fn kv_put(&mut self, namespace: &str, key: &[u8], value: &[u8]) ->...
method kv_delete (line 123) | async fn kv_delete(&mut self, namespace: &str, key: &[u8]) -> Result<(),...
method commit (line 133) | async fn commit(mut self: Box<Self>) -> Result<(), LixError> {
method rollback (line 141) | async fn rollback(mut self: Box<Self>) -> Result<(), LixError> {
method drop (line 159) | fn drop(&mut self) {
function configure_connection (line 169) | fn configure_connection(conn: &Connection) -> Result<(), LixError> {
function ensure_kv_table (line 179) | fn ensure_kv_table(conn: &Connection) -> Result<(), LixError> {
function kv_get_with_connection (line 192) | fn kv_get_with_connection(
function kv_scan_with_connection (line 206) | fn kv_scan_with_connection(
function collect_matching_rows (line 249) | fn collect_matching_rows<F>(
function sqlite_error (line 269) | fn sqlite_error(error: rusqlite::Error) -> LixError {
FILE: benchmarks/git-compare/src/main.rs
constant NULL_OID (line 18) | const NULL_OID: &str = "0000000000000000000000000000000000000000";
type DynError (line 20) | type DynError = Box<dyn std::error::Error + Send + Sync>;
type DynResult (line 21) | type DynResult<T> = Result<T, DynError>;
type Args (line 25) | struct Args {
type CommitInfo (line 55) | struct CommitInfo {
type PatchSet (line 62) | struct PatchSet {
type RawChange (line 68) | struct RawChange {
type OperationKind (line 79) | enum OperationKind {
type FileOperation (line 88) | struct FileOperation {
type Workload (line 97) | struct Workload {
type LixTemplate (line 108) | struct LixTemplate {
type LixSeedRow (line 114) | struct LixSeedRow {
type PreparedWorkload (line 121) | struct PreparedWorkload {
type Report (line 128) | struct Report {
type ConfigReport (line 140) | struct ConfigReport {
type WorkloadSelectionReport (line 151) | struct WorkloadSelectionReport {
type SkippedCandidate (line 157) | struct SkippedCandidate {
type TemplateSeedReport (line 164) | struct TemplateSeedReport {
type WorkloadReport (line 169) | struct WorkloadReport {
type OverallReport (line 183) | struct OverallReport {
type MetricReport (line 191) | struct MetricReport {
type SummaryStats (line 198) | struct SummaryStats {
type TrialResult (line 208) | struct TrialResult {
function main (line 219) | fn main() {
function run_with_large_stack (line 226) | fn run_with_large_stack<F>(f: F) -> DynResult<()>
function real_main (line 240) | fn real_main() -> DynResult<()> {
type PreparedBenchmark (line 360) | struct PreparedBenchmark {
function validate_args (line 365) | fn validate_args(args: &Args) -> DynResult<()> {
function ensure_git_repo (line 381) | fn ensure_git_repo(repo_path: &Path) -> DynResult<()> {
function prepare_output_dir (line 386) | fn prepare_output_dir(path: &Path, force: bool) -> DynResult<()> {
function select_workloads (line 401) | fn select_workloads(
function prepare_workloads (line 496) | fn prepare_workloads(
function run_workload_trials (line 525) | fn run_workload_trials(
function run_git_trial (line 579) | fn run_git_trial(
function run_lix_trial (line 643) | fn run_lix_trial(
function create_git_checkout_template (line 721) | fn create_git_checkout_template(
function create_lix_snapshot_template (line 752) | fn create_lix_snapshot_template(
function apply_operations_to_git (line 774) | fn apply_operations_to_git(repo_dir: &Path, operations: &[FileOperation]...
function set_executable_if_needed (line 837) | fn set_executable_if_needed(path: &Path, executable: bool) -> DynResult<...
function execute_engine_operation (line 850) | fn execute_engine_operation(
function verify_session_state (line 963) | fn verify_session_state(
function create_initialized_session (line 987) | fn create_initialized_session(
function expect_text (line 1004) | fn expect_text(value: &Value) -> DynResult<String> {
function value_as_bytes (line 1011) | fn value_as_bytes(value: &Value) -> DynResult<Vec<u8>> {
function next_file_id_from_map (line 1019) | fn next_file_id_from_map(path_to_id: &BTreeMap<String, String>) -> u64 {
function allocate_file_id (line 1029) | fn allocate_file_id(next_file_id: &mut u64) -> String {
function filtered_trials (line 1035) | fn filtered_trials(trials: &[TrialResult], system: &str) -> Vec<TrialRes...
function build_metric_report (line 1043) | fn build_metric_report(trials: &[TrialResult]) -> MetricReport {
function summarize (line 1051) | fn summarize(mut values: Vec<f64>) -> SummaryStats {
function percentile (line 1068) | fn percentile(sorted_values: &[f64], percentile: f64) -> f64 {
function safe_ratio (line 1082) | fn safe_ratio(numerator: f64, denominator: f64) -> f64 {
function pct_less_time (line 1090) | fn pct_less_time(lix_ms: f64, git_ms: f64) -> f64 {
function render_markdown_report (line 1098) | fn render_markdown_report(report: &Report) -> String {
function list_first_parent_commit_info (line 1157) | fn list_first_parent_commit_info(
function read_commit_info (line 1199) | fn read_commit_info(repo_path: &Path, reference: &str) -> DynResult<Comm...
function rev_parse_commit (line 1220) | fn rev_parse_commit(repo_path: &Path, reference: &str) -> DynResult<Stri...
function rev_parse_tree (line 1229) | fn rev_parse_tree(repo_path: &Path, commit_sha: &str) -> DynResult<Strin...
function read_commit_patch_set (line 1237) | fn read_commit_patch_set(repo_path: &Path, commit_sha: &str) -> DynResul...
function parse_raw_diff_tree (line 1260) | fn parse_raw_diff_tree(raw: &[u8]) -> DynResult<Vec<RawChange>> {
function collect_wanted_blob_ids (line 1322) | fn collect_wanted_blob_ids(changes: &[RawChange]) -> Vec<String> {
function read_tree_snapshot (line 1335) | fn read_tree_snapshot(repo_path: &Path, commit_sha: &str) -> DynResult<B...
function compile_operations (line 1373) | fn compile_operations(patch_set: &PatchSet) -> DynResult<Vec<FileOperati...
function normalize_snapshot_for_lix (line 1409) | fn normalize_snapshot_for_lix(files: &BTreeMap<String, Vec<u8>>) -> BTre...
function to_lix_path (line 1416) | fn to_lix_path(path: &str) -> String {
function encode_lix_path_segment (line 1426) | fn encode_lix_path_segment(segment: &str) -> String {
function first_unsupported_change_reason (line 1440) | fn first_unsupported_change_reason(changes: &[RawChange]) -> Option<Stri...
function unsupported_change_reason (line 1444) | fn unsupported_change_reason(change: &RawChange) -> Option<String> {
function is_regular_blob_mode (line 1498) | fn is_regular_blob_mode(mode: &str) -> bool {
function read_blobs (line 1502) | fn read_blobs(repo_path: &Path, blob_ids: &[String]) -> DynResult<HashMa...
function run_git_text (line 1539) | fn run_git_text<I, S>(repo_path: &Path, args: I) -> DynResult<String>
function run_git_bytes (line 1557) | fn run_git_bytes<I, S>(repo_path: &Path, args: I, stdin: Option<Vec<u8>>...
function run_command (line 1574) | fn run_command<I, S>(
function copy_directory (line 1618) | fn copy_directory(source: &Path, destination: &Path) -> DynResult<()> {
function elapsed_ms (line 1635) | fn elapsed_ms(started: Instant) -> f64 {
FILE: packages/cli/src/app/context.rs
type AppContext (line 4) | pub struct AppContext {
FILE: packages/cli/src/app/run.rs
function run (line 10) | pub fn run() -> Result<(), CliError> {
function render_error_output (line 54) | pub(crate) fn render_error_output<W: Write>(err: &CliError, no_hints: bo...
function rendered (line 68) | fn rendered(err: &CliError, no_hints: bool) -> String {
function renders_hint_line_when_error_carries_hint (line 75) | fn renders_hint_line_when_error_carries_hint() {
function suppresses_hint_when_no_hints_is_set (line 93) | fn suppresses_hint_when_no_hints_is_set() {
function omits_hint_line_when_error_has_no_hint (line 103) | fn omits_hint_line_when_error_has_no_hint() {
function omits_hint_line_for_non_lix_error_variants (line 110) | fn omits_hint_line_for_non_lix_error_variants() {
FILE: packages/cli/src/app/welcome.rs
constant CYAN (line 4) | const CYAN: &str = "\x1b[38;2;8;181;214m";
constant RESET (line 5) | const RESET: &str = "\x1b[0m";
constant LOGO (line 7) | const LOGO: [&str; 6] = [
constant TAGLINE (line 16) | const TAGLINE: &str = "change control system for everything";
function print_banner (line 18) | pub fn print_banner(explicit_lix_path: Option<&Path>) {
function use_color (line 43) | fn use_color() -> bool {
function current_dir_display (line 47) | fn current_dir_display() -> String {
function describe_lix_state (line 66) | fn describe_lix_state(explicit: Option<&Path>) -> String {
FILE: packages/cli/src/cli/exp.rs
type ExpCommand (line 5) | pub struct ExpCommand {
type ExpSubcommand (line 11) | pub enum ExpSubcommand {
type ExpGitReplayArgs (line 17) | pub struct ExpGitReplayArgs {
FILE: packages/cli/src/cli/init.rs
type InitCommand (line 5) | pub struct InitCommand {
FILE: packages/cli/src/cli/redo.rs
type RedoCommand (line 4) | pub struct RedoCommand {
FILE: packages/cli/src/cli/root.rs
type Cli (line 13) | pub struct Cli {
type Command (line 29) | pub enum Command {
function parses_init_command_path_argument (line 53) | fn parses_init_command_path_argument() {
function parses_sql_execute_params_json_flag (line 62) | fn parses_sql_execute_params_json_flag() {
function parses_undo_command_version_flag (line 85) | fn parses_undo_command_version_flag() {
function parses_redo_command_without_version (line 97) | fn parses_redo_command_without_version() {
function parses_version_merge_command (line 106) | fn parses_version_merge_command() {
function parses_version_create_command (line 130) | fn parses_version_create_command() {
function parses_version_switch_command (line 159) | fn parses_version_switch_command() {
function rejects_version_switch_without_reference_flag (line 174) | fn rejects_version_switch_without_reference_flag() {
FILE: packages/cli/src/cli/sql.rs
type SqlCommand (line 4) | pub struct SqlCommand {
type SqlSubcommand (line 10) | pub enum SqlSubcommand {
type SqlOutputFormat (line 21) | pub enum SqlOutputFormat {
type SqlExecuteArgs (line 27) | pub struct SqlExecuteArgs {
FILE: packages/cli/src/cli/undo.rs
type UndoCommand (line 4) | pub struct UndoCommand {
FILE: packages/cli/src/cli/version.rs
type VersionCommand (line 4) | pub struct VersionCommand {
type VersionSubcommand (line 10) | pub enum VersionSubcommand {
type CreateVersionCommand (line 20) | pub struct CreateVersionCommand {
type MergeVersionCommand (line 43) | pub struct MergeVersionCommand {
type SwitchVersionCommand (line 78) | pub struct SwitchVersionCommand {
FILE: packages/cli/src/commands/exp/git_replay.rs
constant NULL_OID (line 14) | const NULL_OID: &str = "0000000000000000000000000000000000000000";
constant PROGRESS_EVERY (line 15) | const PROGRESS_EVERY: usize = 10;
constant DEFAULT_INSERT_BATCH_ROWS (line 16) | const DEFAULT_INSERT_BATCH_ROWS: usize = 100;
type Change (line 19) | struct Change {
method new_is_blob (line 29) | fn new_is_blob(&self) -> bool {
type PatchSet (line 35) | struct PatchSet {
type ReplayState (line 41) | struct ReplayState {
type WriteRow (line 47) | struct WriteRow {
type PreparedBatch (line 54) | struct PreparedBatch {
type SqlStatement (line 61) | struct SqlStatement {
type ExpectedFile (line 67) | struct ExpectedFile {
type ReplayProfilePhaseTotals (line 73) | struct ReplayProfilePhaseTotals {
type ReplayCommitProfile (line 83) | struct ReplayCommitProfile {
type ReplayProfileReport (line 102) | struct ReplayProfileReport {
type SqlTraceCommitTarget (line 118) | struct SqlTraceCommitTarget {
type ReplaySqlTraceReport (line 123) | struct ReplaySqlTraceReport {
type ReplaySqlTraceCommit (line 134) | struct ReplaySqlTraceCommit {
type ReplaySqlTraceOperation (line 146) | struct ReplaySqlTraceOperation {
function run (line 160) | pub fn run(args: ExpGitReplayArgs) -> Result<(), CliError> {
function init_and_open_lix_at_path (line 378) | fn init_and_open_lix_at_path(path: &Path) -> Result<Lix, CliError> {
function execute_statements_as_transaction (line 389) | fn execute_statements_as_transaction(
function build_transaction_script (line 410) | fn build_transaction_script(statements: &[SqlStatement]) -> String {
function number_sql_parameters (line 427) | fn number_sql_parameters(sql: &str, next_param_index: &mut usize) -> Str...
function prepared_blob_bytes (line 441) | fn prepared_blob_bytes(prepared: &PreparedBatch) -> usize {
function total_statement_sql_chars (line 450) | fn total_statement_sql_chars(statements: &[SqlStatement]) -> usize {
function duration_to_ms (line 454) | fn duration_to_ms(duration: Duration) -> f64 {
function write_profile_report (line 458) | fn write_profile_report(path: &Path, report: ReplayProfileReport) -> Res...
function write_sql_trace_report (line 468) | fn write_sql_trace_report(path: &Path, report: ReplaySqlTraceReport) -> ...
function list_linear_commits (line 478) | fn list_linear_commits(
function resolve_trace_commit_target (line 505) | fn resolve_trace_commit_target(
function should_trace_commit (line 537) | fn should_trace_commit(commit_sha: &str, target: Option<&SqlTraceCommitT...
function select_replay_commits (line 544) | fn select_replay_commits(
function resolve_commit_oid (line 569) | fn resolve_commit_oid(repo_path: &Path, raw: &str) -> Result<String, Cli...
function read_commit_patch_set (line 596) | fn read_commit_patch_set(repo_path: &Path, commit_sha: &str) -> Result<P...
function parse_raw_diff_tree (line 620) | fn parse_raw_diff_tree(raw: &[u8]) -> Result<Vec<Change>, CliError> {
function collect_wanted_blob_ids (line 699) | fn collect_wanted_blob_ids(changes: &[Change]) -> Vec<String> {
function read_blobs (line 712) | fn read_blobs(repo_path: &Path, blob_ids: &[String]) -> Result<HashMap<S...
function prepare_commit_changes (line 799) | fn prepare_commit_changes(
function should_delete_old_entry (line 868) | fn should_delete_old_entry(change: &Change, status: char) -> bool {
type WriteTarget (line 880) | struct WriteTarget {
function resolve_delete_path (line 885) | fn resolve_delete_path(state: &mut ReplayState, change: &Change) -> Opti...
function resolve_write_target (line 892) | fn resolve_write_target(
function build_replay_commit_statements (line 935) | fn build_replay_commit_statements(
function apply_prepared_to_expected_state (line 1002) | fn apply_prepared_to_expected_state(
function verify_commit_state_hashes (line 1021) | fn verify_commit_state_hashes(
function value_to_string (line 1098) | fn value_to_string(value: &Value, context: &str) -> Result<String, CliEr...
function value_to_blob (line 1110) | fn value_to_blob<'a>(value: &'a Value, context: &str) -> Result<&'a [u8]...
function sha256_hex (line 1117) | fn sha256_hex(bytes: &[u8]) -> String {
function hex_digit_lower (line 1127) | fn hex_digit_lower(value: u8) -> char {
function hex_digit_upper (line 1135) | fn hex_digit_upper(value: u8) -> char {
function normalize_status (line 1143) | fn normalize_status(value: char) -> char {
function stable_file_id (line 1147) | fn stable_file_id(path: &str) -> String {
function to_lix_path (line 1151) | fn to_lix_path(path: &str) -> String {
function encode_path_segment (line 1162) | fn encode_path_segment(segment: &str) -> String {
function mode_is_blob (line 1178) | fn mode_is_blob(mode: &str) -> bool {
function token_to_string (line 1182) | fn token_to_string(token: &[u8]) -> String {
function run_git_text (line 1186) | fn run_git_text(
function run_git_bytes (line 1195) | fn run_git_bytes(
function prepare_regular_output_path (line 1251) | fn prepare_regular_output_path(path: &Path, force: bool) -> Result<(), C...
function validate_repo_dir (line 1278) | fn validate_repo_dir(path: &Path) -> Result<(), CliError> {
function validate_git_repo (line 1289) | fn validate_git_repo(path: &Path) -> Result<(), CliError> {
function normalize_replay_ref (line 1301) | fn normalize_replay_ref(raw: &str) -> Result<String, CliError> {
function absolutize_from_cwd (line 1314) | fn absolutize_from_cwd(path: &Path) -> Result<PathBuf, CliError> {
function collect_wanted_blob_ids_skips_gitlink_oids (line 1330) | fn collect_wanted_blob_ids_skips_gitlink_oids() {
function select_replay_commits_starts_from_specific_commit_inclusive (line 1358) | fn select_replay_commits_starts_from_specific_commit_inclusive() {
function select_replay_commits_applies_limit_after_from_commit (line 1371) | fn select_replay_commits_applies_limit_after_from_commit() {
function select_replay_commits_errors_when_from_commit_missing (line 1384) | fn select_replay_commits_errors_when_from_commit_missing() {
function prepare_commit_changes_typechange_blob_to_gitlink_deletes_file (line 1399) | fn prepare_commit_changes_typechange_blob_to_gitlink_deletes_file() {
function prepare_output_path_rejects_existing_file (line 1433) | fn prepare_output_path_rejects_existing_file() {
function prepare_output_path_allows_nonexistent_file_and_creates_parent (line 1452) | fn prepare_output_path_allows_nonexistent_file_and_creates_parent() {
function prepare_output_lix_path_force_removes_existing_file_and_sidecars (line 1468) | fn prepare_output_lix_path_force_removes_existing_file_and_sidecars() {
function build_replay_commit_statements_omits_path_for_stable_updates (line 1503) | fn build_replay_commit_statements_omits_path_for_stable_updates() {
function build_replay_commit_statements_preserves_path_for_renames (line 1531) | fn build_replay_commit_statements_preserves_path_for_renames() {
function unique_temp_dir (line 1559) | fn unique_temp_dir() -> PathBuf {
FILE: packages/cli/src/commands/exp/mod.rs
function run (line 8) | pub fn run(_context: &AppContext, command: ExpCommand) -> Result<Command...
FILE: packages/cli/src/commands/init.rs
function run (line 6) | pub fn run(command: InitCommand) -> Result<CommandOutput, CliError> {
FILE: packages/cli/src/commands/redo.rs
function run (line 6) | pub fn run(_context: &AppContext, _command: RedoCommand) -> Result<Comma...
FILE: packages/cli/src/commands/sql/execute.rs
function run (line 12) | pub fn run(context: &AppContext, args: SqlExecuteArgs) -> Result<Command...
function resolve_sql_and_params (line 33) | fn resolve_sql_and_params(args: &SqlExecuteArgs) -> Result<(String, Vec<...
function read_stdin (line 66) | fn read_stdin(context: &'static str) -> Result<String, CliError> {
function resolve_params (line 74) | fn resolve_params(
function parse_params_json (line 96) | fn parse_params_json(raw: &str) -> Result<Vec<Value>, CliError> {
function parse_param_value (line 114) | fn parse_param_value(value: &JsonValue, index: usize) -> Result<Value, C...
function parse_object_param (line 137) | fn parse_object_param(
function resolve_params_defaults_to_empty_when_unset (line 172) | fn resolve_params_defaults_to_empty_when_unset() {
function resolve_params_maps_json_array_values_to_typed_sql_values (line 178) | fn resolve_params_maps_json_array_values_to_typed_sql_values() {
function resolve_params_rejects_non_array_json (line 198) | fn resolve_params_rejects_non_array_json() {
function resolve_params_rejects_invalid_object_shape (line 207) | fn resolve_params_rejects_invalid_object_shape() {
function resolve_sql_and_params_rejects_double_stdin_usage (line 217) | fn resolve_sql_and_params_rejects_double_stdin_usage() {
function execute_accepts_numbered_placeholders_with_json_params (line 232) | fn execute_accepts_numbered_placeholders_with_json_params() {
function test_lix_path (line 261) | fn test_lix_path(label: &str) -> PathBuf {
FILE: packages/cli/src/commands/sql/mod.rs
function run (line 8) | pub fn run(context: &AppContext, command: SqlCommand) -> Result<CommandO...
FILE: packages/cli/src/commands/undo.rs
function run (line 6) | pub fn run(_context: &AppContext, _command: UndoCommand) -> Result<Comma...
FILE: packages/cli/src/commands/version/create.rs
function run (line 11) | pub fn run(context: &AppContext, command: CreateVersionCommand) -> Resul...
function create_confirmation_lines (line 56) | fn create_confirmation_lines(
function create_confirmation_uses_active_version_not_parent_version (line 80) | fn create_confirmation_uses_active_version_not_parent_version() {
FILE: packages/cli/src/commands/version/merge.rs
function run (line 9) | pub fn run(context: &AppContext, command: MergeVersionCommand) -> Result...
FILE: packages/cli/src/commands/version/mod.rs
type VersionLookup (line 12) | pub(super) enum VersionLookup<'a> {
type ResolvedVersionRef (line 18) | pub(super) struct ResolvedVersionRef {
function run (line 23) | pub fn run(context: &AppContext, command: VersionCommand) -> Result<Comm...
function resolve_version_ref (line 31) | pub(super) fn resolve_version_ref(
function resolve_active_version_ref (line 41) | pub(super) fn resolve_active_version_ref(lix: &Lix) -> Result<ResolvedVe...
function resolve_version_by_id (line 47) | fn resolve_version_by_id(lix: &Lix, id: &str) -> Result<ResolvedVersionR...
function resolve_version_by_name (line 64) | fn resolve_version_by_name(lix: &Lix, name: &str) -> Result<ResolvedVers...
function statement_rows (line 92) | fn statement_rows(result: &ExecuteResult) -> Result<&[LixRow], CliError> {
function text_at (line 96) | fn text_at(row: &LixRow, index: usize, field: &str) -> Result<String, Cl...
function temp_lix_path (line 118) | fn temp_lix_path(label: &str) -> PathBuf {
function cleanup_lix_path (line 129) | fn cleanup_lix_path(path: &Path) {
function text_at (line 136) | fn text_at(result: &ExecuteResult, row: usize, col: usize) -> String {
function fast_forward_merge_keeps_database_openable_across_fresh_opens (line 149) | fn fast_forward_merge_keeps_database_openable_across_fresh_opens() {
function fast_forward_merge_keeps_database_openable_across_fresh_opens_inner (line 161) | fn fast_forward_merge_keeps_database_openable_across_fresh_opens_inner() {
function resolve_version_ref_by_name_rejects_ambiguous_matches (line 250) | fn resolve_version_ref_by_name_rejects_ambiguous_matches() {
function resolve_version_ref_by_name_rejects_ambiguous_matches_inner (line 260) | fn resolve_version_ref_by_name_rejects_ambiguous_matches_inner() {
function resolve_version_ref_by_name_rejects_missing_match (line 290) | fn resolve_version_ref_by_name_rejects_missing_match() {
function resolve_version_ref_by_name_rejects_missing_match_inner (line 300) | fn resolve_version_ref_by_name_rejects_missing_match_inner() {
FILE: packages/cli/src/commands/version/switch.rs
function run (line 9) | pub fn run(context: &AppContext, command: SwitchVersionCommand) -> Resul...
FILE: packages/cli/src/db/mod.rs
function resolve_db_path (line 15) | pub fn resolve_db_path(context: &AppContext) -> Result<PathBuf, CliError> {
function open_lix_at (line 51) | pub fn open_lix_at(path: &Path) -> Result<Lix, CliError> {
function init_lix_at (line 60) | pub fn init_lix_at(path: &Path) -> Result<bool, CliError> {
function destroy_lix_at (line 76) | pub fn destroy_lix_at(path: &Path) -> Result<(), CliError> {
function prepare_lix_output_path (line 91) | pub fn prepare_lix_output_path(path: &Path, force: bool) -> Result<(), C...
function find_lix_files (line 123) | fn find_lix_files(cwd: &Path) -> Result<Vec<PathBuf>, CliError> {
function validate_lix_file_path (line 142) | fn validate_lix_file_path(path: &Path) -> Result<(), CliError> {
function block_on (line 153) | pub fn block_on<F: std::future::Future>(future: F) -> F::Output {
function remove_sidecar (line 161) | fn remove_sidecar(path: &Path, suffix: &str) -> Result<(), CliError> {
type KvMap (line 170) | type KvMap = BTreeMap<(String, Vec<u8>), Vec<u8>>;
type FileBackend (line 173) | struct FileBackend {
method from_path (line 179) | fn from_path(path: &Path) -> Result<Self, CliError> {
method begin_transaction (line 190) | async fn begin_transaction(
method kv_get (line 207) | async fn kv_get(&self, namespace: &str, key: &[u8]) -> Result<Option<Vec...
method kv_scan (line 216) | async fn kv_scan(
type FileBackendTransaction (line 230) | struct FileBackendTransaction {
method mode (line 239) | fn mode(&self) -> TransactionBeginMode {
method kv_get (line 243) | async fn kv_get(&mut self, namespace: &str, key: &[u8]) -> Result<Option...
method kv_scan (line 247) | async fn kv_scan(
method kv_put (line 256) | async fn kv_put(&mut self, namespace: &str, key: &[u8], value: &[u8]) ->...
method kv_delete (line 262) | async fn kv_delete(&mut self, namespace: &str, key: &[u8]) -> Result<(),...
method commit (line 267) | async fn commit(self: Box<Self>) -> Result<(), LixError> {
method rollback (line 276) | async fn rollback(self: Box<Self>) -> Result<(), LixError> {
type FileSnapshot (line 282) | struct FileSnapshot {
type FileEntry (line 287) | struct FileEntry {
function read_kv_file (line 293) | fn read_kv_file(path: &Path) -> Result<KvMap, CliError> {
function write_kv_file (line 313) | fn write_kv_file(path: &Path, kv: &KvMap) -> Result<(), LixError> {
function scan_map (line 338) | fn scan_map(kv: &KvMap, namespace: &str, range: &KvScanRange, limit: Opt...
function key_matches_range (line 353) | fn key_matches_range(key: &[u8], range: &KvScanRange) -> bool {
function encode_bytes (line 360) | fn encode_bytes(bytes: &[u8]) -> String {
function decode_bytes (line 364) | fn decode_bytes(value: &str) -> Result<Vec<u8>, CliError> {
function lock_error (line 370) | fn lock_error(name: &str) -> LixError {
function resolve_db_path_rejects_explicit_non_lix_path (line 383) | fn resolve_db_path_rejects_explicit_non_lix_path() {
function init_lix_at_rejects_non_lix_path (line 404) | fn init_lix_at_rejects_non_lix_path() {
function prepare_output_path_rejects_non_lix_path (line 420) | fn prepare_output_path_rejects_non_lix_path() {
function unique_temp_dir (line 436) | fn unique_temp_dir() -> PathBuf {
FILE: packages/cli/src/error.rs
type CliError (line 5) | pub enum CliError {
method io (line 19) | pub fn io(context: &'static str, source: std::io::Error) -> Self {
method msg (line 23) | pub fn msg(message: impl Into<String>) -> Self {
method from_lix (line 27) | pub fn from_lix(context: &'static str, source: LixError) -> Self {
method hint (line 31) | pub fn hint(&self) -> Option<&str> {
method fmt (line 40) | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
function hint_returns_none_for_non_lix_variants (line 59) | fn hint_returns_none_for_non_lix_variants() {
function hint_returns_lix_hint_when_attached (line 70) | fn hint_returns_lix_hint_when_attached() {
function hint_returns_none_when_lix_error_has_no_hint (line 77) | fn hint_returns_none_when_lix_error_has_no_hint() {
function display_format_omits_hint_line (line 84) | fn display_format_omits_hint_line() {
FILE: packages/cli/src/hints.rs
type CommandOutput (line 5) | pub struct CommandOutput {
method empty (line 10) | pub fn empty() -> Self {
method with_hints (line 14) | pub fn with_hints(hints: Vec<String>) -> Self {
function hint_after_init (line 21) | pub fn hint_after_init() -> Vec<String> {
function hint_blob_in_result (line 28) | pub fn hint_blob_in_result(result: &ExecuteResult) -> Vec<String> {
function hint_from_error (line 45) | pub fn hint_from_error(err: &CliError) -> Vec<String> {
function are_hints_enabled (line 52) | pub fn are_hints_enabled(lix: &Lix) -> bool {
function render_hints (line 71) | pub fn render_hints(hints: &[String]) {
function hint_from_error_returns_empty_for_non_lix_variants (line 83) | fn hint_from_error_returns_empty_for_non_lix_variants() {
function hint_from_error_returns_empty_when_lix_error_has_no_hint (line 89) | fn hint_from_error_returns_empty_when_lix_error_has_no_hint() {
function hint_from_error_returns_lix_hint (line 95) | fn hint_from_error_returns_lix_hint() {
FILE: packages/cli/src/lib.rs
function run (line 9) | pub fn run() -> Result<(), error::CliError> {
FILE: packages/cli/src/main.rs
function main (line 1) | fn main() {
FILE: packages/cli/src/output/mod.rs
function print_execute_result_table (line 6) | pub fn print_execute_result_table(result: &ExecuteResult) {
function print_execute_result_json (line 39) | pub fn print_execute_result_json(result: &ExecuteResult) {
function execute_result_to_json (line 47) | fn execute_result_to_json(result: &ExecuteResult) -> JsonValue {
function row_to_json (line 56) | fn row_to_json(columns: &[String], row: &lix_rs_sdk::Row) -> JsonValue {
function value_to_text (line 68) | fn value_to_text(value: &Value) -> String {
function value_to_json (line 80) | fn value_to_json(value: &Value) -> JsonValue {
function bytes_to_hex (line 96) | fn bytes_to_hex(bytes: &[u8]) -> String {
function hex_digit (line 106) | fn hex_digit(value: u8) -> char {
function value_to_json_uses_blob_tagged_shape (line 119) | fn value_to_json_uses_blob_tagged_shape() {
function value_to_json_uses_native_scalars (line 131) | fn value_to_json_uses_native_scalars() {
function execute_result_to_json_preserves_envelope_and_order (line 147) | fn execute_result_to_json_preserves_envelope_and_order() {
FILE: packages/engine/benches/json_pointer_crud/main.rs
constant JSON_POINTER_SCHEMA_JSON (line 22) | const JSON_POINTER_SCHEMA_JSON: &str =
constant PNPM_LOCK_JSON (line 24) | const PNPM_LOCK_JSON: &str = include_str!("../fixtures/pnpm-lock.fixture...
constant BASELINE_ROWS (line 25) | const BASELINE_ROWS: usize = 100;
constant SMOKE_ROWS (line 26) | const SMOKE_ROWS: usize = 1_000;
constant SCALE_ROWS (line 27) | const SCALE_ROWS: usize = 10_000;
constant CHUNK_SIZE (line 28) | const CHUNK_SIZE: usize = 500;
constant CHANGE_ROW_DENOMINATOR (line 29) | const CHANGE_ROW_DENOMINATOR: usize = 10;
type PointerRow (line 32) | struct PointerRow {
type LixBackendProfile (line 39) | enum LixBackendProfile {
method name (line 45) | fn name(self) -> &'static str {
method backend_label (line 52) | fn backend_label(self) -> &'static str {
type RawSqliteFixture (line 60) | struct RawSqliteFixture {
type LixFixture (line 65) | struct LixFixture {
function json_pointer_crud_benches (line 69) | fn json_pointer_crud_benches(c: &mut Criterion) {
function bench_raw_sqlite (line 87) | fn bench_raw_sqlite(c: &mut Criterion, all_rows: &[PointerRow], row_coun...
function bench_raw_storage (line 156) | fn bench_raw_storage(
function bench_lix (line 512) | fn bench_lix(
function prepare_raw_sqlite_empty (line 644) | fn prepare_raw_sqlite_empty() -> RawSqliteFixture {
function prepare_raw_sqlite_seeded (line 664) | fn prepare_raw_sqlite_seeded(rows: &[PointerRow]) -> RawSqliteFixture {
function raw_sqlite_seed (line 670) | fn raw_sqlite_seed(conn: &Connection, rows: &[PointerRow]) {
function raw_sqlite_insert_all (line 690) | fn raw_sqlite_insert_all(fixture: RawSqliteFixture, rows: &[PointerRow])...
function raw_sqlite_select_all (line 695) | fn raw_sqlite_select_all(fixture: RawSqliteFixture, expected_rows: usize...
function raw_sqlite_select_one_by_pk (line 708) | fn raw_sqlite_select_one_by_pk(fixture: RawSqliteFixture, row: &PointerR...
function raw_sqlite_update_all (line 722) | fn raw_sqlite_update_all(fixture: RawSqliteFixture, expected_rows: usize...
function raw_sqlite_update_one_by_pk (line 734) | fn raw_sqlite_update_one_by_pk(fixture: RawSqliteFixture, row: &PointerR...
function raw_sqlite_delete_all (line 746) | fn raw_sqlite_delete_all(fixture: RawSqliteFixture, expected_rows: usize...
function raw_sqlite_delete_one_by_pk (line 755) | fn raw_sqlite_delete_one_by_pk(fixture: RawSqliteFixture, row: &PointerR...
function prepare_lix_empty (line 767) | async fn prepare_lix_empty(profile: LixBackendProfile) -> LixFixture {
function prepare_lix_seeded (line 802) | async fn prepare_lix_seeded(profile: LixBackendProfile, rows: &[PointerR...
function register_json_pointer_schema (line 808) | async fn register_json_pointer_schema(session: &SessionContext) {
function lix_insert_all (line 822) | async fn lix_insert_all(fixture: LixFixture, rows: &[PointerRow]) -> usi...
function insert_lix_rows (line 827) | async fn insert_lix_rows(session: &SessionContext, rows: &[PointerRow]) {
function lix_select_all (line 849) | async fn lix_select_all(fixture: LixFixture, expected_rows: usize) -> us...
function lix_select_one_by_pk (line 859) | async fn lix_select_one_by_pk(fixture: LixFixture, row: &PointerRow) -> ...
function lix_update_all (line 873) | async fn lix_update_all(fixture: LixFixture, expected_rows: usize) -> us...
function lix_update_one_by_pk (line 887) | async fn lix_update_one_by_pk(fixture: LixFixture, row: &PointerRow) -> ...
function lix_delete_all (line 903) | async fn lix_delete_all(fixture: LixFixture, expected_rows: usize) -> us...
function lix_delete_one_by_pk (line 914) | async fn lix_delete_one_by_pk(fixture: LixFixture, row: &PointerRow) -> ...
function lix_create_version (line 929) | async fn lix_create_version(fixture: LixFixture) -> String {
function create_lix_version (line 933) | async fn create_lix_version(session: &SessionContext) -> String {
function lix_merge_version_fast_forward (line 945) | async fn lix_merge_version_fast_forward(
function lix_merge_version_divergent (line 981) | async fn lix_merge_version_divergent(
function update_lix_rows_by_pk (line 1018) | async fn update_lix_rows_by_pk(session: &SessionContext, rows: &[Pointer...
function fixture_rows (line 1040) | fn fixture_rows() -> Vec<PointerRow> {
function storage_rows (line 1052) | fn storage_rows(rows: &[PointerRow]) -> Vec<storage_bench::JsonPointerSt...
function pick_pk_row (line 1062) | fn pick_pk_row(rows: &[PointerRow]) -> &PointerRow {
function raw_storage_backend (line 1066) | fn raw_storage_backend(profile: LixBackendProfile) -> Arc<dyn Backend + ...
function prepare_raw_storage_read (line 1077) | fn prepare_raw_storage_read(
function flatten_json (line 1094) | fn flatten_json(path: &str, value: &JsonValue, rows: &mut Vec<PointerRow...
function updated_value_for (line 1118) | fn updated_value_for(path: &str) -> String {
function escape_pointer_token (line 1126) | fn escape_pointer_token(token: &str) -> String {
function sql_string (line 1130) | fn sql_string(value: &str) -> String {
function row_label (line 1134) | fn row_label(rows: usize) -> String {
function changed_row_count (line 1142) | fn changed_row_count(rows: usize) -> usize {
FILE: packages/engine/benches/json_pointer_physical/main.rs
constant PNPM_LOCK_JSON (line 19) | const PNPM_LOCK_JSON: &str = include_str!("../fixtures/pnpm-lock.fixture...
constant BASELINE_ROWS (line 20) | const BASELINE_ROWS: usize = 100;
constant SMOKE_ROWS (line 21) | const SMOKE_ROWS: usize = 1_000;
constant SCALE_ROWS (line 22) | const SCALE_ROWS: usize = 10_000;
constant CHANGE_ROW_DENOMINATOR (line 23) | const CHANGE_ROW_DENOMINATOR: usize = 10;
type PointerRow (line 26) | struct PointerRow {
type RawSqliteFixture (line 32) | struct RawSqliteFixture {
type BackendProfile (line 38) | enum BackendProfile {
method label (line 44) | fn label(self) -> &'static str {
function json_pointer_physical_benches (line 52) | fn json_pointer_physical_benches(c: &mut Criterion) {
function bench_raw_sqlite (line 67) | fn bench_raw_sqlite(c: &mut Criterion, all_rows: &[PointerRow], row_coun...
function bench_physical (line 190) | fn bench_physical(
function fixture_rows (line 539) | fn fixture_rows() -> Vec<PointerRow> {
function prepare_raw_sqlite_empty (line 551) | fn prepare_raw_sqlite_empty() -> RawSqliteFixture {
function prepare_raw_sqlite_seeded (line 571) | fn prepare_raw_sqlite_seeded(rows: &[PointerRow]) -> RawSqliteFixture {
function raw_sqlite_seed (line 577) | fn raw_sqlite_seed(conn: &Connection, rows: &[PointerRow]) {
function raw_sqlite_insert_all (line 597) | fn raw_sqlite_insert_all(fixture: RawSqliteFixture, rows: &[PointerRow])...
function raw_sqlite_get_many_exact (line 602) | fn raw_sqlite_get_many_exact(fixture: RawSqliteFixture, rows: &[PointerR...
function raw_sqlite_get_many_missing (line 622) | fn raw_sqlite_get_many_missing(fixture: RawSqliteFixture, row_count: usi...
function raw_sqlite_exists_many (line 643) | fn raw_sqlite_exists_many(fixture: RawSqliteFixture, rows: &[PointerRow]...
function raw_sqlite_scan_keys_only (line 663) | fn raw_sqlite_scan_keys_only(fixture: RawSqliteFixture, expected_rows: u...
function raw_sqlite_scan_full_rows (line 676) | fn raw_sqlite_scan_full_rows(fixture: RawSqliteFixture, expected_rows: u...
function raw_sqlite_update_first_rows (line 689) | fn raw_sqlite_update_first_rows(
function raw_sqlite_delete_first_rows (line 718) | fn raw_sqlite_delete_first_rows(
function storage_rows (line 747) | fn storage_rows(rows: &[PointerRow]) -> Vec<storage_bench::JsonPointerSt...
function physical_backend (line 757) | fn physical_backend(profile: BackendProfile) -> Arc<dyn Backend + Send +...
function prepare_physical_read (line 768) | fn prepare_physical_read(
function flatten_json (line 785) | fn flatten_json(path: &str, value: &JsonValue, rows: &mut Vec<PointerRow...
function updated_value_for (line 809) | fn updated_value_for(path: &str) -> String {
function escape_pointer_token (line 817) | fn escape_pointer_token(token: &str) -> String {
function row_label (line 821) | fn row_label(rows: usize) -> String {
function changed_row_count (line 829) | fn changed_row_count(rows: usize) -> usize {
FILE: packages/engine/benches/optimization9_sql2/main.rs
constant JSON_POINTER_SCHEMA_JSON (line 16) | const JSON_POINTER_SCHEMA_JSON: &str = include_str!("json_pointer.schema...
constant PNPM_LOCK_JSON (line 17) | const PNPM_LOCK_JSON: &str = include_str!("pnpm-lock.fixture.json");
constant ROW_COUNT (line 18) | const ROW_COUNT: usize = 1_000;
constant INSERT_ROWS (line 19) | const INSERT_ROWS: usize = 500;
constant CHUNK_SIZE (line 20) | const CHUNK_SIZE: usize = 500;
type PointerRow (line 23) | struct PointerRow {
type LixBackendProfile (line 30) | enum LixBackendProfile {
method name (line 36) | fn name(self) -> &'static str {
type LixFixture (line 44) | struct LixFixture {
function optimization9_sql2_benches (line 48) | fn optimization9_sql2_benches(c: &mut Criterion) {
function bench_smoke_crud (line 64) | fn bench_smoke_crud(
function bench_planning_only (line 186) | fn bench_planning_only(
function bench_execute_preplanned (line 274) | fn bench_execute_preplanned(
function bench_e2e_literal (line 336) | fn bench_e2e_literal(
function bench_e2e_parameterized (line 396) | fn bench_e2e_parameterized(
function configure_group (line 471) | fn configure_group(group: &mut criterion::BenchmarkGroup<'_, criterion::...
function prepare_lix_empty (line 477) | async fn prepare_lix_empty(profile: LixBackendProfile) -> LixFixture {
function prepare_lix_seeded (line 511) | async fn prepare_lix_seeded(profile: LixBackendProfile, rows: &[PointerR...
function register_json_pointer_schema (line 517) | async fn register_json_pointer_schema(session: &SessionContext) {
function insert_lix_rows (line 531) | async fn insert_lix_rows(session: &SessionContext, rows: &[PointerRow]) {
function insert_lix_rows_blocking (line 543) | fn insert_lix_rows_blocking(runtime: &Runtime, session: &SessionContext,...
function fixture_rows (line 547) | fn fixture_rows() -> Vec<PointerRow> {
function flatten_json (line 559) | fn flatten_json(path: &str, value: &JsonValue, rows: &mut Vec<PointerRow...
function insert_literal_sql (line 583) | fn insert_literal_sql(rows: &[PointerRow]) -> String {
function select_one_literal_sql (line 598) | fn select_one_literal_sql(row: &PointerRow) -> String {
function select_one_parameterized_sql (line 605) | fn select_one_parameterized_sql() -> &'static str {
function update_one_literal_sql (line 609) | fn update_one_literal_sql(row: &PointerRow) -> String {
function delete_one_literal_sql (line 617) | fn delete_one_literal_sql(row: &PointerRow) -> String {
function pick_pk_row (line 624) | fn pick_pk_row(rows: &[PointerRow]) -> &PointerRow {
function updated_value_for (line 628) | fn updated_value_for(path: &str) -> String {
function escape_pointer_token (line 636) | fn escape_pointer_token(token: &str) -> String {
function sql_string (line 640) | fn sql_string(value: &str) -> String {
FILE: packages/engine/benches/physical_layout/backend_kv.rs
type BackendFactory (line 10) | type BackendFactory = fn() -> Arc<dyn Backend + Send + Sync>;
type BackendProfile (line 13) | struct BackendProfile {
function bench (line 18) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function bench_fast (line 25) | fn bench_fast(c: &mut Criterion, runtime: &Runtime, args: Args, profile:...
function bench_full (line 95) | fn bench_full(c: &mut Criterion, runtime: &Runtime, args: Args, profile:...
function prepare_read (line 214) | fn prepare_read(
function prepare_selective_scan (line 227) | fn prepare_selective_scan(
function physical_backends (line 242) | fn physical_backends() -> [BackendProfile; 2] {
function sqlite_tempfile_backend (line 255) | fn sqlite_tempfile_backend() -> Arc<dyn Backend + Send + Sync> {
function rocksdb_backend (line 259) | fn rocksdb_backend() -> Arc<dyn Backend + Send + Sync> {
function label (line 263) | fn label(rows: usize) -> &'static str {
FILE: packages/engine/benches/physical_layout/changelog.rs
type BackendFactory (line 11) | type BackendFactory = fn() -> Arc<dyn Backend + Send + Sync>;
type BackendProfile (line 14) | struct BackendProfile {
function bench (line 19) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function bench_smoke (line 27) | fn bench_smoke(c: &mut Criterion, runtime: &Runtime, args: Args, profile...
function bench_fast (line 85) | fn bench_fast(c: &mut Criterion, runtime: &Runtime, args: Args, profile:...
function bench_full (line 139) | fn bench_full(c: &mut Criterion, runtime: &Runtime, args: Args, profile:...
function prepare_append (line 224) | fn prepare_append(
function prepare_read (line 239) | fn prepare_read(
function physical_backends (line 254) | fn physical_backends() -> [BackendProfile; 2] {
function sqlite_tempfile_backend (line 267) | fn sqlite_tempfile_backend() -> Arc<dyn Backend + Send + Sync> {
function rocksdb_backend (line 271) | fn rocksdb_backend() -> Arc<dyn Backend + Send + Sync> {
function label (line 275) | fn label(rows: usize) -> &'static str {
FILE: packages/engine/benches/physical_layout/json_store.rs
type BackendFactory (line 12) | type BackendFactory = fn() -> Arc<dyn Backend + Send + Sync>;
type BackendProfile (line 15) | struct BackendProfile {
function bench (line 20) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function bench_fast (line 27) | fn bench_fast(c: &mut Criterion, runtime: &Runtime, _args: Args, profile...
function bench_full (line 87) | fn bench_full(c: &mut Criterion, runtime: &Runtime, _args: Args, profile...
function prepare_write (line 228) | fn prepare_write(
function prepare_write_dedupe (line 238) | fn prepare_write_dedupe(
function prepare_read (line 248) | fn prepare_read(
function physical_backends (line 264) | fn physical_backends() -> [BackendProfile; 2] {
function sqlite_tempfile_backend (line 277) | fn sqlite_tempfile_backend() -> Arc<dyn Backend + Send + Sync> {
function rocksdb_backend (line 281) | fn rocksdb_backend() -> Arc<dyn Backend + Send + Sync> {
FILE: packages/engine/benches/physical_layout/main.rs
constant BENCH_ROWS (line 20) | const BENCH_ROWS: usize = 10_000;
constant BENCH_BLOB_BYTES (line 21) | const BENCH_BLOB_BYTES: usize = 1024;
constant BENCH_STATE_PAYLOAD_BYTES (line 22) | const BENCH_STATE_PAYLOAD_BYTES: usize = 256;
type Args (line 25) | pub(crate) struct Args {
method config (line 42) | pub(crate) fn config(self) -> StorageBenchConfig {
method default (line 32) | fn default() -> Self {
function physical_layout_benches (line 54) | fn physical_layout_benches(c: &mut Criterion) {
FILE: packages/engine/benches/physical_layout/tracked_state.rs
type BackendFactory (line 11) | type BackendFactory = fn() -> Arc<dyn Backend + Send + Sync>;
type BackendProfile (line 14) | struct BackendProfile {
function bench (line 19) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function bench_smoke (line 27) | fn bench_smoke(c: &mut Criterion, runtime: &Runtime, args: Args, profile...
function bench_fast (line 131) | fn bench_fast(c: &mut Criterion, runtime: &Runtime, args: Args, profile:...
function bench_full (line 316) | fn bench_full(c: &mut Criterion, runtime: &Runtime, args: Args, profile:...
function prepare_write_root (line 449) | fn prepare_write_root(
function prepare_read (line 464) | fn prepare_read(
function prepare_read_file_selective (line 479) | fn prepare_read_file_selective(
function prepare_update_rows (line 496) | fn prepare_update_rows(
function prepare_tombstone_rows (line 514) | fn prepare_tombstone_rows(
function prepare_diff_equal (line 532) | fn prepare_diff_equal(
function prepare_diff_update_rows (line 549) | fn prepare_diff_update_rows(
function prepare_diff_tombstone_rows (line 567) | fn prepare_diff_tombstone_rows(
function physical_backends (line 585) | fn physical_backends() -> [BackendProfile; 2] {
function sqlite_tempfile_backend (line 598) | fn sqlite_tempfile_backend() -> Arc<dyn Backend + Send + Sync> {
function rocksdb_backend (line 602) | fn rocksdb_backend() -> Arc<dyn Backend + Send + Sync> {
function label (line 606) | fn label(rows: usize) -> &'static str {
FILE: packages/engine/benches/physical_layout/workflow.rs
type BackendFactory (line 11) | type BackendFactory = fn() -> Arc<dyn Backend + Send + Sync>;
type BackendProfile (line 14) | struct BackendProfile {
function bench (line 19) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function bench_smoke (line 27) | fn bench_smoke(c: &mut Criterion, runtime: &Runtime, args: Args, profile...
function bench_fast (line 243) | fn bench_fast(c: &mut Criterion, runtime: &Runtime, args: Args, profile:...
function bench_full (line 395) | fn bench_full(c: &mut Criterion, runtime: &Runtime, args: Args, profile:...
type InsertTrackedCommitFixture (line 449) | struct InsertTrackedCommitFixture {
type UpdateTrackedCommitFixture (line 455) | struct UpdateTrackedCommitFixture {
function run_insert_tracked_commit (line 461) | async fn run_insert_tracked_commit(
function run_update_tracked_commit (line 479) | async fn run_update_tracked_commit(
function prepare_insert_tracked_commit (line 499) | fn prepare_insert_tracked_commit(
function prepare_update_tracked_commit (line 518) | fn prepare_update_tracked_commit(
function prepare_delete_tracked_commit (line 544) | fn prepare_delete_tracked_commit(
function prepare_diff_update (line 570) | fn prepare_diff_update(
function prepare_select_tracked_commit (line 590) | fn prepare_select_tracked_commit(
function prepare_select_tracked_commit_file_selective (line 605) | fn prepare_select_tracked_commit_file_selective(
function prepare_select_after_update (line 622) | fn prepare_select_after_update(
function prepare_select_delta_chain (line 642) | fn prepare_select_delta_chain(
function prepare_select_materialized_delta_chain (line 664) | fn prepare_select_materialized_delta_chain(
function prepare_diff_delta_chain (line 688) | fn prepare_diff_delta_chain(
function prepare_materialize_delta_chain (line 710) | fn prepare_materialize_delta_chain(
function physical_backends (line 734) | fn physical_backends() -> [BackendProfile; 2] {
function sqlite_tempfile_backend (line 747) | fn sqlite_tempfile_backend() -> Arc<dyn Backend + Send + Sync> {
function rocksdb_backend (line 751) | fn rocksdb_backend() -> Arc<dyn Backend + Send + Sync> {
FILE: packages/engine/benches/storage/backend.rs
type Store (line 11) | type Store = BTreeMap<(String, Vec<u8>), Vec<u8>>;
type BenchBackend (line 14) | pub(crate) struct BenchBackend {
method new (line 24) | pub(crate) fn new() -> Arc<dyn Backend + Send + Sync> {
type BenchTransaction (line 18) | pub(crate) struct BenchTransaction {
method lock_store (line 171) | fn lock_store(&self) -> Result<std::sync::MutexGuard<'_, Store>, LixEr...
method begin_read_transaction (line 31) | async fn begin_read_transaction(
method begin_write_transaction (line 40) | async fn begin_write_transaction(
method get_values (line 52) | async fn get_values(
method exists_many (line 80) | async fn exists_many(
method scan_keys (line 98) | async fn scan_keys(
method scan_values (line 106) | async fn scan_values(
method scan_entries (line 114) | async fn scan_entries(
method rollback (line 122) | async fn rollback(mut self: Box<Self>) -> Result<(), LixError> {
method write_kv_batch (line 130) | async fn write_kv_batch(
method commit (line 164) | async fn commit(mut self: Box<Self>) -> Result<(), LixError> {
function scan_store_keys (line 178) | fn scan_store_keys(store: &Store, request: BackendKvScanRequest) -> Back...
function scan_store_values (line 214) | fn scan_store_values(store: &Store, request: BackendKvScanRequest) -> Ba...
function scan_store_entries (line 250) | fn scan_store_entries(store: &Store, request: BackendKvScanRequest) -> B...
function key_matches_range (line 289) | fn key_matches_range(key: &[u8], range: &BackendKvScanRange) -> bool {
function scan_start_key (line 296) | fn scan_start_key(request: &BackendKvScanRequest) -> Vec<u8> {
FILE: packages/engine/benches/storage/binary_cas.rs
function bench (line 7) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function prepare_read (line 168) | fn prepare_read(
function config (line 185) | fn config(args: &Args) -> StorageBenchConfig {
FILE: packages/engine/benches/storage/changelog.rs
function bench (line 9) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function prepare_read (line 479) | fn prepare_read(
function prepare_read_with (line 496) | fn prepare_read_with(
function config (line 510) | fn config(args: &Args) -> StorageBenchConfig {
FILE: packages/engine/benches/storage/commit_graph.rs
function bench (line 7) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function prepare_read (line 29) | fn prepare_read(
function config (line 46) | fn config(args: &Args) -> StorageBenchConfig {
FILE: packages/engine/benches/storage/json_store.rs
function bench (line 9) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, _args: Args) {
function prepare_read (line 268) | fn prepare_read(
FILE: packages/engine/benches/storage/main.rs
constant BENCH_ROWS (line 21) | const BENCH_ROWS: usize = 10_000;
constant BENCH_BLOB_BYTES (line 22) | const BENCH_BLOB_BYTES: usize = 1024;
constant BENCH_STATE_PAYLOAD_BYTES (line 23) | const BENCH_STATE_PAYLOAD_BYTES: usize = 256;
type Args (line 26) | pub(crate) struct Args {
method config (line 43) | pub(crate) fn config(self) -> StorageBenchConfig {
method default (line 33) | fn default() -> Self {
function storage_benches (line 55) | fn storage_benches(c: &mut Criterion) {
FILE: packages/engine/benches/storage/rocksdb_backend.rs
type RocksDbBenchBackend (line 16) | pub(crate) struct RocksDbBenchBackend {
method new (line 36) | pub(crate) fn new() -> Result<Self, LixError> {
method path (line 45) | pub(crate) fn path(&self) -> &Path {
type RocksDbBenchInner (line 20) | struct RocksDbBenchInner {
type RocksDbBenchTransaction (line 25) | pub(crate) struct RocksDbBenchTransaction {
type PendingWrite (line 30) | enum PendingWrite {
method begin_read_transaction (line 52) | async fn begin_read_transaction(
method begin_write_transaction (line 61) | async fn begin_write_transaction(
method get_values (line 73) | async fn get_values(
method exists_many (line 123) | async fn exists_many(
method scan_keys (line 130) | async fn scan_keys(
method scan_values (line 137) | async fn scan_values(
method scan_entries (line 144) | async fn scan_entries(
method rollback (line 151) | async fn rollback(self: Box<Self>) -> Result<(), LixError> {
method write_kv_batch (line 158) | async fn write_kv_batch(
method commit (line 195) | async fn commit(self: Box<Self>) -> Result<(), LixError> {
function open_rocksdb (line 208) | fn open_rocksdb(path: &Path) -> Result<DB, LixError> {
function rocksdb_get_exists_many (line 216) | fn rocksdb_get_exists_many(
function fill_committed_exists (line 245) | fn fill_committed_exists(
function rocksdb_scan_keys (line 286) | fn rocksdb_scan_keys(
function rocksdb_scan_values (line 339) | fn rocksdb_scan_values(
function rocksdb_scan_entries (line 369) | fn rocksdb_scan_entries(
type ScanBounds (line 398) | struct ScanBounds {
method new (line 405) | fn new(request: &BackendKvScanRequest) -> Self {
method contains_encoded (line 421) | fn contains_encoded(&self, encoded_key: &[u8]) -> bool {
function rocksdb_scan_committed_keys (line 427) | fn rocksdb_scan_committed_keys(
function rocksdb_scan_committed_values (line 469) | fn rocksdb_scan_committed_values(
function rocksdb_scan_committed_entries (line 508) | fn rocksdb_scan_committed_entries(
function overlay_pending_values (line 550) | fn overlay_pending_values(
function key_page_from_iter (line 578) | fn key_page_from_iter(
function value_page_from_iter (line 602) | fn value_page_from_iter(
function entry_page_from_iter (line 626) | fn entry_page_from_iter(
function scan_start_key (line 653) | fn scan_start_key(request: &BackendKvScanRequest) -> Vec<u8> {
function scan_end_key (line 664) | fn scan_end_key(range: &BackendKvScanRange) -> Option<Vec<u8>> {
function key_in_range (line 671) | fn key_in_range(key: &[u8], range: &BackendKvScanRange) -> bool {
function key_after_cursor (line 678) | fn key_after_cursor(request: &BackendKvScanRequest, key: &[u8]) -> bool {
function encode_key (line 682) | fn encode_key(namespace: &str, key: &[u8]) -> Vec<u8> {
function namespace_prefix (line 692) | fn namespace_prefix(namespace: &str) -> Vec<u8> {
function namespace_end_key (line 696) | fn namespace_end_key(namespace: &str) -> Vec<u8> {
function decode_key (line 702) | fn decode_key(namespace: &str, encoded: &[u8]) -> Result<Vec<u8>, LixErr...
function prefix_end (line 710) | fn prefix_end(prefix: &[u8]) -> Option<Vec<u8>> {
function rocksdb_error (line 722) | fn rocksdb_error(error: rocksdb::Error) -> LixError {
function io_error (line 729) | fn io_error(error: std::io::Error) -> LixError {
FILE: packages/engine/benches/storage/sqlite_backend.rs
type SqliteBenchBackend (line 15) | pub(crate) struct SqliteBenchBackend {
method tempfile (line 28) | pub(crate) fn tempfile() -> Result<Self, LixError> {
method path (line 46) | pub(crate) fn path(&self) -> Option<&Path> {
method lock_connection (line 50) | fn lock_connection(&self) -> Result<std::sync::MutexGuard<'_, Connecti...
type SqliteBenchTransaction (line 22) | pub(crate) struct SqliteBenchTransaction {
method lock_connection (line 268) | fn lock_connection(&self) -> Result<std::sync::MutexGuard<'_, Connecti...
function configure_connection (line 57) | fn configure_connection(connection: &Connection) -> Result<(), LixError> {
method begin_read_transaction (line 79) | async fn begin_read_transaction(
method begin_write_transaction (line 93) | async fn begin_write_transaction(
method get_values (line 110) | async fn get_values(
method exists_many (line 147) | async fn exists_many(
method scan_keys (line 173) | async fn scan_keys(
method scan_values (line 181) | async fn scan_values(
method scan_entries (line 189) | async fn scan_entries(
method rollback (line 197) | async fn rollback(mut self: Box<Self>) -> Result<(), LixError> {
method write_kv_batch (line 208) | async fn write_kv_batch(
method commit (line 258) | async fn commit(mut self: Box<Self>) -> Result<(), LixError> {
method drop (line 276) | fn drop(&mut self) {
function sqlite_scan_keys (line 285) | fn sqlite_scan_keys(
function sqlite_scan_values (line 334) | fn sqlite_scan_values(
function sqlite_scan_entries (line 383) | fn sqlite_scan_entries(
function sqlite_fetch_limit (line 436) | fn sqlite_fetch_limit(limit: usize) -> Result<i64, LixError> {
function scan_start_key (line 454) | fn scan_start_key(request: &BackendKvScanRequest) -> Vec<u8> {
function scan_end_key (line 465) | fn scan_end_key(range: &BackendKvScanRange) -> Option<Vec<u8>> {
function prefix_end (line 472) | fn prefix_end(prefix: &[u8]) -> Option<Vec<u8>> {
function sqlite_error (line 484) | fn sqlite_error(error: rusqlite::Error) -> LixError {
FILE: packages/engine/benches/storage/storage_api.rs
type BackendFactory (line 10) | type BackendFactory = fn() -> Arc<dyn Backend + Send + Sync>;
type BackendProfile (line 13) | struct BackendProfile {
function bench (line 18) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function bench_backend (line 37) | fn bench_backend(c: &mut Criterion, runtime: &Runtime, args: Args, profi...
function prepare_read (line 408) | fn prepare_read(
function prepare_selective_scan (line 419) | fn prepare_selective_scan(
function in_memory_backend (line 435) | fn in_memory_backend() -> Arc<dyn Backend + Send + Sync> {
function sqlite_tempfile_backend (line 439) | fn sqlite_tempfile_backend() -> Arc<dyn Backend + Send + Sync> {
function rocksdb_backend (line 443) | fn rocksdb_backend() -> Arc<dyn Backend + Send + Sync> {
function label (line 447) | fn label(rows: usize) -> String {
FILE: packages/engine/benches/storage/tracked_state.rs
function bench (line 10) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function bench_fast (line 732) | pub(crate) fn bench_fast(c: &mut Criterion, runtime: &Runtime, args: Arg...
function prepare_read (line 925) | fn prepare_read(
function prepare_read_with (line 942) | fn prepare_read_with(
function prepare_read_file_selective_with (line 958) | fn prepare_read_file_selective_with(
function config (line 976) | fn config(args: &Args) -> StorageBenchConfig {
FILE: packages/engine/benches/storage/untracked_state.rs
function bench (line 10) | pub(crate) fn bench(c: &mut Criterion, runtime: &Runtime, args: Args) {
function prepare_read (line 423) | fn prepare_read(
function prepare_read_with (line 440) | fn prepare_read_with(
function config (line 456) | fn config(args: &Args) -> StorageBenchConfig {
FILE: packages/engine/benches/transaction/main.rs
constant ENTITY_ROWS (line 20) | const ENTITY_ROWS: usize = 10_000;
constant LARGE_ENTITY_ROWS (line 21) | const LARGE_ENTITY_ROWS: usize = 1_000;
constant UPDATE_ROWS_SMALL (line 22) | const UPDATE_ROWS_SMALL: usize = 1;
constant UPDATE_ROWS_BATCH (line 23) | const UPDATE_ROWS_BATCH: usize = 100;
constant SCALING_ROWS (line 24) | const SCALING_ROWS: &[usize] = &[1_000, 2_000, 5_000, 10_000, 20_000];
function transaction_benches (line 26) | fn transaction_benches(c: &mut Criterion) {
function row_count_label (line 817) | fn row_count_label(rows: usize) -> String {
function commit (line 825) | fn commit(
function stage_only (line 837) | fn stage_only(
function commit_only (line 849) | fn commit_only(
function latency_backend (line 861) | fn latency_backend() -> Arc<dyn Backend + Send + Sync> {
type AccountingFixture (line 870) | struct AccountingFixture {
function prepare_accounting (line 875) | fn prepare_accounting<F, Fut>(runtime: &Runtime, prepare: F) -> Accounti...
function accounting (line 889) | fn accounting(
function print_accounting_once (line 910) | fn print_accounting_once(label: &str, report: &TransactionAccountingRepo...
type StorageAccounting (line 925) | struct StorageAccounting {
method reset (line 937) | fn reset(&self) {
method record_write_batch (line 944) | fn record_write_batch(&self, batch: &BackendKvWriteBatch) {
method snapshot (line 980) | fn snapshot(&self) -> StorageAccountingSnapshot {
type StorageAccountingSnapshot (line 930) | struct StorageAccountingSnapshot {
type CountingBackend (line 993) | struct CountingBackend {
method new (line 999) | fn new(
type LatencyBackend (line 1013) | struct LatencyBackend {
method delay (line 1021) | fn delay(duration: Duration) {
method begin_read_transaction (line 1030) | async fn begin_read_transaction(
method begin_write_transaction (line 1040) | async fn begin_write_transaction(
type LatencyReadTransaction (line 1053) | struct LatencyReadTransaction {
method get_values (line 1060) | async fn get_values(
method exists_many (line 1068) | async fn exists_many(
method scan_keys (line 1076) | async fn scan_keys(
method scan_values (line 1084) | async fn scan_values(
method scan_entries (line 1092) | async fn scan_entries(
method rollback (line 1100) | async fn rollback(self: Box<Self>) -> Result<(), LixError> {
type LatencyWriteTransaction (line 1105) | struct LatencyWriteTransaction {
method get_values (line 1114) | async fn get_values(
method exists_many (line 1122) | async fn exists_many(
method scan_keys (line 1130) | async fn scan_keys(
method scan_values (line 1138) | async fn scan_values(
method scan_entries (line 1146) | async fn scan_entries(
method rollback (line 1154) | async fn rollback(self: Box<Self>) -> Result<(), LixError> {
method write_kv_batch (line 1161) | async fn write_kv_batch(
method commit (line 1169) | async fn commit(self: Box<Self>) -> Result<(), LixError> {
method begin_read_transaction (line 1177) | async fn begin_read_transaction(
method begin_write_transaction (line 1184) | async fn begin_write_transaction(
type CountingReadTransaction (line 1195) | struct CountingReadTransaction {
method get_values (line 1201) | async fn get_values(
method exists_many (line 1208) | async fn exists_many(
method scan_keys (line 1215) | async fn scan_keys(
method scan_values (line 1222) | async fn scan_values(
method scan_entries (line 1229) | async fn scan_entries(
method rollback (line 1236) | async fn rollback(self: Box<Self>) -> Result<(), LixError> {
type CountingWriteTransaction (line 1241) | struct CountingWriteTransaction {
method get_values (line 1248) | async fn get_values(
method exists_many (line 1255) | async fn exists_many(
method scan_keys (line 1262) | async fn scan_keys(
method scan_values (line 1269) | async fn scan_values(
method scan_entries (line 1276) | async fn scan_entries(
method rollback (line 1283) | async fn rollback(self: Box<Self>) -> Result<(), LixError> {
method write_kv_batch (line 1290) | async fn write_kv_batch(
method commit (line 1298) | async fn commit(self: Box<Self>) -> Result<(), LixError> {
FILE: packages/engine/src/backend/kv.rs
type BytePage (line 2) | pub struct BytePage {
method new (line 8) | pub fn new() -> Self {
method len (line 15) | pub fn len(&self) -> usize {
method is_empty (line 19) | pub fn is_empty(&self) -> bool {
method get (line 23) | pub fn get(&self, index: usize) -> Option<&[u8]> {
method iter (line 29) | pub fn iter(&self) -> BytePageIter<'_> {
type BytePageIter (line 37) | pub struct BytePageIter<'a> {
type Item (line 43) | type Item = &'a [u8];
method next (line 45) | fn next(&mut self) -> Option<Self::Item> {
type BytePageBuilder (line 53) | pub struct BytePageBuilder {
method new (line 59) | pub fn new() -> Self {
method with_capacity (line 66) | pub fn with_capacity(items: usize, bytes: usize) -> Self {
method from_page (line 75) | pub fn from_page(page: BytePage) -> Self {
method len (line 82) | pub fn len(&self) -> usize {
method is_empty (line 86) | pub fn is_empty(&self) -> bool {
method get (line 90) | pub fn get(&self, index: usize) -> Option<&[u8]> {
method push (line 96) | pub fn push(&mut self, value: impl AsRef<[u8]>) {
method finish (line 103) | pub fn finish(self) -> BytePage {
type BackendKvScanRange (line 117) | pub enum BackendKvScanRange {
method prefix (line 123) | pub fn prefix(prefix: impl Into<Vec<u8>>) -> Self {
method range (line 127) | pub fn range(start: impl Into<Vec<u8>>, end: impl Into<Vec<u8>>) -> Se...
type BackendKvGetRequest (line 136) | pub struct BackendKvGetRequest {
type BackendKvGetGroup (line 141) | pub struct BackendKvGetGroup {
method namespace (line 147) | pub fn namespace(&self) -> &str {
type BackendKvValueBatch (line 153) | pub struct BackendKvValueBatch {
type BackendKvValueGroup (line 158) | pub struct BackendKvValueGroup {
method new (line 165) | pub fn new(namespace: impl Into<String>, values: BytePage, present: Ve...
method namespace (line 178) | pub fn namespace(&self) -> &str {
method len (line 182) | pub fn len(&self) -> usize {
method is_empty (line 186) | pub fn is_empty(&self) -> bool {
method value (line 190) | pub fn value(&self, index: usize) -> Option<Option<&[u8]>> {
method values_iter (line 203) | pub fn values_iter(&self) -> impl Iterator<Item = Option<&[u8]>> {
method into_parts (line 207) | pub fn into_parts(self) -> (String, BytePage, Vec<bool>) {
type BackendKvExistsBatch (line 213) | pub struct BackendKvExistsBatch {
type BackendKvExistsGroup (line 218) | pub struct BackendKvExistsGroup {
type BackendKvScanRequest (line 224) | pub struct BackendKvScanRequest {
type BackendKvKeyPage (line 232) | pub struct BackendKvKeyPage {
type BackendKvValuePage (line 238) | pub struct BackendKvValuePage {
type BackendKvEntryPage (line 244) | pub struct BackendKvEntryPage {
method len (line 251) | pub fn len(&self) -> usize {
method is_empty (line 255) | pub fn is_empty(&self) -> bool {
method key (line 259) | pub fn key(&self, index: usize) -> Option<&[u8]> {
method value (line 263) | pub fn value(&self, index: usize) -> Option<&[u8]> {
type BackendKvWriteBatch (line 269) | pub struct BackendKvWriteBatch {
type BackendKvWriteGroup (line 274) | pub struct BackendKvWriteGroup {
method new (line 282) | pub fn new(namespace: impl Into<String>) -> Self {
method from_pages (line 291) | pub fn from_pages(
method put (line 310) | pub fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
method delete (line 315) | pub fn delete(&mut self, key: impl AsRef<[u8]>) {
method namespace (line 319) | pub fn namespace(&self) -> &str {
method put_count (line 323) | pub fn put_count(&self) -> usize {
method delete_count (line 327) | pub fn delete_count(&self) -> usize {
method put_key (line 331) | pub fn put_key(&self, index: usize) -> Option<&[u8]> {
method put_value (line 335) | pub fn put_value(&self, index: usize) -> Option<&[u8]> {
method delete_key (line 339) | pub fn delete_key(&self, index: usize) -> Option<&[u8]> {
method into_parts (line 343) | pub fn into_parts(self) -> (String, BytePage, BytePage, BytePage) {
type BackendKvWriteStats (line 354) | pub struct BackendKvWriteStats {
FILE: packages/engine/src/backend/testing.rs
type KvMap (line 14) | type KvMap = BTreeMap<(String, Vec<u8>), Vec<u8>>;
type UnitTestBackend (line 21) | pub(crate) struct UnitTestBackend {
method new (line 26) | pub(crate) fn new() -> Self {
method begin_read_transaction (line 33) | async fn begin_read_transaction(
method begin_write_transaction (line 47) | async fn begin_write_transaction(
type UnitTestTransaction (line 62) | struct UnitTestTransaction {
method get_values (line 69) | async fn get_values(
method exists_many (line 96) | async fn exists_many(
method scan_keys (line 113) | async fn scan_keys(
method scan_values (line 120) | async fn scan_values(
method scan_entries (line 127) | async fn scan_entries(
method rollback (line 134) | async fn rollback(self: Box<Self>) -> Result<(), LixError> {
method write_kv_batch (line 141) | async fn write_kv_batch(
method commit (line 175) | async fn commit(self: Box<Self>) -> Result<(), LixError> {
method begin_read_transaction (line 186) | async fn begin_read_transaction(
method begin_write_transaction (line 192) | async fn begin_write_transaction(
function scan_pairs (line 199) | fn scan_pairs<'a>(
function scan_map_keys (line 222) | pub(crate) fn scan_map_keys(kv: &KvMap, request: BackendKvScanRequest) -...
function scan_map_values (line 241) | pub(crate) fn scan_map_values(kv: &KvMap, request: BackendKvScanRequest)...
function scan_map_entries (line 260) | pub(crate) fn scan_map_entries(kv: &KvMap, request: BackendKvScanRequest...
function scan_filtered_pairs (line 282) | fn scan_filtered_pairs<'a>(
function key_matches_range (line 301) | fn key_matches_range(key: &[u8], range: &BackendKvScanRange) -> bool {
function lock_error (line 308) | fn lock_error(name: &str) -> LixError {
function put (line 320) | async fn put(
function delete (line 338) | async fn delete(
function get (line 355) | async fn get(backend: &UnitTestBackend, namespace: &str, key: &[u8]) -> ...
function scan (line 380) | async fn scan(
function assert_entries (line 406) | fn assert_entries(page: &BackendKvEntryPage, expected: &[(&[u8], &[u8])]) {
function scan_entries_request (line 414) | async fn scan_entries_request(
function scan_keys_request (line 439) | async fn scan_keys_request(
function committed_put_is_visible_to_backend_reads (line 465) | async fn committed_put_is_visible_to_backend_reads() {
function rollback_discards_puts (line 481) | async fn rollback_discards_puts() {
function close_is_idempotent_and_does_not_destroy_data (line 497) | async fn close_is_idempotent_and_does_not_destroy_data() {
function delete_removes_key_on_commit (line 516) | async fn delete_removes_key_on_commit() {
function prefix_scan_returns_lexicographic_order_with_limit (line 536) | async fn prefix_scan_returns_lexicographic_order_with_limit() {
function scan_sets_resume_after_only_when_more_rows_exist (line 553) | async fn scan_sets_resume_after_only_when_more_rows_exist() {
function scan_exact_page_size_has_no_resume_after (line 575) | async fn scan_exact_page_size_has_no_resume_after() {
function key_only_scan_omits_values (line 591) | async fn key_only_scan_omits_values() {
function existence_get_omits_values (line 607) | async fn existence_get_omits_values() {
function range_scan_is_half_open (line 638) | async fn range_scan_is_half_open() {
FILE: packages/engine/src/backend/types.rs
type Backend (line 11) | pub trait Backend: Send + Sync {
method begin_read_transaction (line 12) | async fn begin_read_transaction(
method begin_write_transaction (line 16) | async fn begin_write_transaction(
method close (line 27) | async fn close(&self) -> Result<(), LixError> {
method destroy (line 48) | async fn destroy(&self) -> Result<(), LixError> {
type BackendReadTransaction (line 59) | pub trait BackendReadTransaction: Send + Sync {
method get_values (line 60) | async fn get_values(
method exists_many (line 65) | async fn exists_many(
method scan_keys (line 70) | async fn scan_keys(
method scan_values (line 75) | async fn scan_values(
method scan_entries (line 80) | async fn scan_entries(
method rollback (line 85) | async fn rollback(self: Box<Self>) -> Result<(), LixError>;
type BackendWriteTransaction (line 89) | pub trait BackendWriteTransaction: BackendReadTransaction {
method write_kv_batch (line 90) | async fn write_kv_batch(
method commit (line 95) | async fn commit(self: Box<Self>) -> Result<(), LixError>;
FILE: packages/engine/src/binary_cas/chunking.rs
constant FASTCDC_MIN_CHUNK_BYTES (line 1) | const FASTCDC_MIN_CHUNK_BYTES: usize = 16 * 1024;
constant FASTCDC_AVG_CHUNK_BYTES (line 2) | const FASTCDC_AVG_CHUNK_BYTES: usize = 64 * 1024;
constant FASTCDC_MAX_CHUNK_BYTES (line 3) | const FASTCDC_MAX_CHUNK_BYTES: usize = 256 * 1024;
constant SINGLE_CHUNK_FAST_PATH_MAX_BYTES (line 4) | const SINGLE_CHUNK_FAST_PATH_MAX_BYTES: usize = 64 * 1024;
function should_materialize_chunk_cas (line 7) | pub(crate) fn should_materialize_chunk_cas(data: &[u8]) -> bool {
function fastcdc_chunk_ranges (line 11) | pub(crate) fn fastcdc_chunk_ranges(data: &[u8]) -> Vec<(usize, usize)> {
FILE: packages/engine/src/binary_cas/codec.rs
constant MANIFEST_MAGIC (line 10) | const MANIFEST_MAGIC: &[u8; 4] = b"BCM2";
constant MANIFEST_CHUNK_MAGIC (line 11) | const MANIFEST_CHUNK_MAGIC: &[u8; 4] = b"BCC1";
constant CHUNK_MAGIC (line 12) | const CHUNK_MAGIC: &[u8; 4] = b"BCK1";
constant MANIFEST_KIND_EMPTY (line 13) | const MANIFEST_KIND_EMPTY: u8 = 0;
constant MANIFEST_KIND_SINGLE_CHUNK (line 14) | const MANIFEST_KIND_SINGLE_CHUNK: u8 = 1;
constant MANIFEST_KIND_CHUNKED (line 15) | const MANIFEST_KIND_CHUNKED: u8 = 2;
constant CHUNK_CODEC_RAW_TAG (line 16) | const CHUNK_CODEC_RAW_TAG: u8 = 0;
constant HASH_BYTES (line 17) | const HASH_BYTES: usize = 32;
constant MANIFEST_HEADER_BYTES (line 18) | const MANIFEST_HEADER_BYTES: usize = 4 + 1 + 8;
constant EMPTY_MANIFEST_BYTES (line 19) | const EMPTY_MANIFEST_BYTES: usize = MANIFEST_HEADER_BYTES;
constant SINGLE_CHUNK_MANIFEST_BYTES (line 20) | const SINGLE_CHUNK_MANIFEST_BYTES: usize = MANIFEST_HEADER_BYTES + HASH_...
constant CHUNKED_MANIFEST_BYTES (line 21) | const CHUNKED_MANIFEST_BYTES: usize = MANIFEST_HEADER_BYTES + 4;
constant MANIFEST_CHUNK_BYTES (line 22) | const MANIFEST_CHUNK_BYTES: usize = 4 + HASH_BYTES + 8;
constant CHUNK_HEADER_BYTES (line 23) | const CHUNK_HEADER_BYTES: usize = 4 + 1 + 8;
type BinaryChunkCodec (line 26) | pub(crate) enum BinaryChunkCodec {
method tag (line 31) | fn tag(self) -> u8 {
method from_tag (line 37) | fn from_tag(tag: u8) -> Result<Self, LixError> {
type EncodedBinaryChunkPayload (line 48) | pub(crate) struct EncodedBinaryChunkPayload {
type BinaryCasManifest (line 54) | pub(crate) enum BinaryCasManifest {
method size_bytes (line 69) | pub(crate) fn size_bytes(&self) -> u64 {
function binary_blob_hash_hex (line 79) | pub(crate) fn binary_blob_hash_hex(data: &[u8]) -> String {
function binary_blob_hash_bytes (line 83) | pub(crate) fn binary_blob_hash_bytes(data: &[u8]) -> [u8; HASH_BYTES] {
function hash_hex_to_bytes (line 87) | pub(crate) fn hash_hex_to_bytes(hash_hex: &str, label: &str) -> Result<[...
function hash_bytes_to_hex (line 105) | pub(crate) fn hash_bytes_to_hex(bytes: &[u8; HASH_BYTES]) -> String {
function encode_binary_cas_manifest (line 109) | pub(crate) fn encode_binary_cas_manifest(manifest: &BinaryCasManifest) -...
function decode_binary_cas_manifest (line 142) | pub(crate) fn decode_binary_cas_manifest(bytes: &[u8]) -> Result<BinaryC...
function encode_binary_cas_manifest_chunk (line 182) | pub(crate) fn encode_binary_cas_manifest_chunk(
function decode_binary_cas_manifest_chunk (line 193) | pub(crate) fn decode_binary_cas_manifest_chunk(
function encode_binary_cas_chunk (line 208) | pub(crate) fn encode_binary_cas_chunk(
function decode_binary_cas_chunk (line 221) | pub(crate) fn decode_binary_cas_chunk(
function require_magic (line 236) | fn require_magic(bytes: &[u8], expected: &[u8; 4], label: &str) -> Resul...
function require_len (line 245) | fn require_len(bytes: &[u8], expected: usize, label: &str) -> Result<(),...
function hex_value (line 255) | fn hex_value(byte: u8, label: &str) -> Result<u8, LixError> {
function codec_error (line 264) | fn codec_error(message: String) -> LixError {
function encode_binary_chunk_payload (line 268) | pub(crate) fn encode_binary_chunk_payload(chunk_data: &[u8]) -> EncodedB...
function manifests_roundtrip_fixed_binary_rows (line 280) | fn manifests_roundtrip_fixed_binary_rows() {
function manifest_chunk_roundtrips_fixed_binary_row (line 310) | fn manifest_chunk_roundtrips_fixed_binary_row() {
function chunk_roundtrips_payload_as_remaining_bytes (line 321) | fn chunk_roundtrips_payload_as_remaining_bytes() {
function wrong_magic_is_rejected (line 332) | fn wrong_magic_is_rejected() {
function hex_hashes_roundtrip_to_32_byte_keys (line 340) | fn hex_hashes_roundtrip_to_32_byte_keys() {
FILE: packages/engine/src/binary_cas/context.rs
type BlobDataReader (line 11) | pub(crate) trait BlobDataReader: Send + Sync {
method load_bytes_many (line 12) | async fn load_bytes_many(&self, hashes: &[BlobHash]) -> Result<BlobByt...
method load_bytes_many (line 48) | async fn load_bytes_many(&self, hashes: &[BlobHash]) -> Result<BlobByt...
type BinaryCasContext (line 20) | pub(crate) struct BinaryCasContext;
method new (line 23) | pub(crate) fn new() -> Self {
method reader (line 31) | pub(crate) fn reader<S>(&self, store: S) -> BinaryCasStoreReader<S>
method writer (line 38) | pub(crate) fn writer<'a>(&self, writes: &'a mut StorageWriteSet) -> Bi...
type BinaryCasStoreReader (line 57) | pub(crate) struct BinaryCasStoreReader<S> {
function exists_many (line 66) | pub(crate) async fn exists_many(
function load_metadata_many (line 74) | pub(crate) async fn load_metadata_many(
function load_bytes_many (line 81) | pub(crate) async fn load_bytes_many(
function count_blob_manifests (line 89) | pub(crate) async fn count_blob_manifests(&mut self) -> Result<usize, Lix...
type BinaryCasWriter (line 98) | pub(crate) struct BinaryCasWriter<'a> {
function new (line 105) | fn new(writes: &'a mut StorageWriteSet) -> Self {
function stage_bytes (line 113) | pub(crate) fn stage_bytes(&mut self, bytes: &[u8]) -> Result<BlobWriteRe...
function stage_many (line 123) | pub(crate) fn stage_many(
FILE: packages/engine/src/binary_cas/kv.rs
constant BINARY_CAS_MANIFEST_NAMESPACE (line 19) | pub(crate) const BINARY_CAS_MANIFEST_NAMESPACE: &str = "binary_cas.manif...
constant BINARY_CAS_MANIFEST_CHUNK_NAMESPACE (line 20) | pub(crate) const BINARY_CAS_MANIFEST_CHUNK_NAMESPACE: &str = "binary_cas...
constant BINARY_CAS_CHUNK_NAMESPACE (line 21) | pub(crate) const BINARY_CAS_CHUNK_NAMESPACE: &str = "binary_cas.chunk";
type KvBlobManifestChunk (line 24) | pub(crate) struct KvBlobManifestChunk {
type KvChunk (line 30) | pub(crate) struct KvChunk {
function load_manifest (line 36) | pub(crate) async fn load_manifest(
function count_manifests (line 53) | pub(crate) async fn count_manifests(store: &mut impl StorageReader) -> R...
function stage_manifest (line 63) | pub(crate) fn stage_manifest(
function scan_manifest_chunks (line 75) | pub(crate) async fn scan_manifest_chunks(
function stage_manifest_chunk (line 96) | pub(crate) fn stage_manifest_chunk(
function load_chunk (line 109) | pub(crate) async fn load_chunk(
function stage_chunk (line 125) | pub(crate) fn stage_chunk(writes: &mut StorageWriteSet, chunk_hash: Blob...
function get_one (line 133) | async fn get_one(
function scan_all_values (line 152) | async fn scan_all_values(
function load_metadata_many (line 169) | pub(crate) async fn load_metadata_many(
function exists_many (line 218) | pub(crate) async fn exists_many(
function load_bytes_many (line 232) | pub(crate) async fn load_bytes_many(
function load_chunk_rows (line 303) | async fn load_chunk_rows(
function assemble_blob_bytes (line 330) | fn assemble_blob_bytes(
function decode_chunk_from_map (line 427) | fn decode_chunk_from_map(
function decode_and_verify_chunk (line 446) | fn decode_and_verify_chunk(
function stage_blob_write (line 491) | pub(crate) fn stage_blob_write(
function metadata_from_manifest (line 595) | fn metadata_from_manifest(
function manifest_key (line 625) | fn manifest_key(blob_hash: BlobHash) -> Vec<u8> {
function manifest_chunk_prefix (line 629) | fn manifest_chunk_prefix(blob_hash: BlobHash) -> Vec<u8> {
function manifest_chunk_key (line 633) | fn manifest_chunk_key(blob_hash: BlobHash, chunk_index: u64) -> Vec<u8> {
function chunk_key (line 640) | fn chunk_key(chunk_hash: BlobHash) -> Vec<u8> {
function persisted_size_to_usize (line 644) | fn persisted_size_to_usize(size: u64, label: &str) -> Result<usize, LixE...
function stage_blob_to_writes (line 660) | fn stage_blob_to_writes(writes: &mut StorageWriteSet, data: &[u8]) {
function stores_manifest_chunks_in_scan_order (line 666) | async fn stores_manifest_chunks_in_scan_order() {
function stores_encoded_chunks_by_chunk_hash (line 746) | async fn stores_encoded_chunks_by_chunk_hash() {
function binary_hash_keys_are_compact_and_manifest_chunks_sort_by_index (line 782) | fn binary_hash_keys_are_compact_and_manifest_chunks_sort_by_index() {
function public_kv_api_roundtrips_blob_bytes (line 798) | async fn public_kv_api_roundtrips_blob_bytes() {
function read_rejects_chunk_bytes_that_do_not_match_manifest_hash (line 865) | async fn read_rejects_chunk_bytes_that_do_not_match_manifest_hash() {
function read_rejects_manifest_that_assembles_wrong_blob_hash (line 916) | async fn read_rejects_manifest_that_assembles_wrong_blob_hash() {
function public_kv_api_roundtrips_empty_blob (line 976) | async fn public_kv_api_roundtrips_empty_blob() {
function public_kv_api_roundtrips_multi_chunk_blob (line 1019) | async fn public_kv_api_roundtrips_multi_chunk_blob() {
FILE: packages/engine/src/binary_cas/types.rs
type BlobHash (line 5) | pub(crate) struct BlobHash([u8; 32]);
method from_bytes (line 8) | pub(crate) fn from_bytes(bytes: [u8; 32]) -> Self {
method from_content (line 12) | pub(crate) fn from_content(content: &[u8]) -> Self {
method from_hex (line 16) | pub(crate) fn from_hex(hash_hex: &str) -> Result<Self, LixError> {
method to_hex (line 20) | pub(crate) fn to_hex(self) -> String {
method as_bytes (line 24) | pub(crate) fn as_bytes(&self) -> &[u8; 32] {
method into_bytes (line 28) | pub(crate) fn into_bytes(self) -> [u8; 32] {
type BlobLayout (line 34) | pub(crate) enum BlobLayout {
type BlobMetadata (line 41) | pub(crate) struct BlobMetadata {
type BlobExistsBatch (line 48) | pub(crate) struct BlobExistsBatch {
method new (line 53) | pub(crate) fn new(entries: Vec<bool>) -> Self {
method get (line 58) | pub(crate) fn get(&self, index: usize) -> bool {
method into_vec (line 63) | pub(crate) fn into_vec(self) -> Vec<bool> {
type BlobMetadataBatch (line 69) | pub(crate) struct BlobMetadataBatch {
method new (line 74) | pub(crate) fn new(entries: Vec<Option<BlobMetadata>>) -> Self {
method get (line 79) | pub(crate) fn get(&self, index: usize) -> Option<&BlobMetadata> {
method into_vec (line 83) | pub(crate) fn into_vec(self) -> Vec<Option<BlobMetadata>> {
type BlobBytesBatch (line 89) | pub(crate) struct BlobBytesBatch {
method new (line 94) | pub(crate) fn new(entries: Vec<Option<Vec<u8>>>) -> Self {
method get (line 99) | pub(crate) fn get(&self, index: usize) -> Option<&[u8]> {
method into_vec (line 106) | pub(crate) fn into_vec(self) -> Vec<Option<Vec<u8>>> {
type BlobWrite (line 112) | pub(crate) struct BlobWrite<'a> {
type BlobWriteReceipt (line 117) | pub(crate) struct BlobWriteReceipt {
FILE: packages/engine/src/catalog/context.rs
constant REGISTERED_SCHEMA_KEY (line 12) | const REGISTERED_SCHEMA_KEY: &str = "lix_registered_schema";
type CatalogContext (line 19) | pub(crate) struct CatalogContext;
method new (line 22) | pub(crate) fn new() -> Self {
method schema_jsons_for_sql_read_planning (line 31) | pub(crate) async fn schema_jsons_for_sql_read_planning<R>(
method schema_facts_for_domain (line 63) | pub(crate) async fn schema_facts_for_domain<R>(
function row_belongs_to_schema_catalog_domain (line 100) | fn row_belongs_to_schema_catalog_domain(row: &MaterializedLiveStateRow, ...
function decode_registered_schema_row (line 109) | fn decode_registered_schema_row(
function visible_schemas_are_loaded_from_registered_schema_rows (line 152) | async fn visible_schemas_are_loaded_from_registered_schema_rows() {
function visible_schemas_include_registered_schema_rows (line 175) | async fn visible_schemas_include_registered_schema_rows() {
function sql_read_planning_rejects_multiple_visible_schemas_for_same_surface (line 192) | async fn sql_read_planning_rejects_multiple_visible_schemas_for_same_sur...
function tracked_domain_sees_tracked_seed_schemas_but_not_user_untracked_schemas (line 210) | async fn tracked_domain_sees_tracked_seed_schemas_but_not_user_untracked...
function tracked_domain_does_not_see_untracked_seed_schemas (line 239) | async fn tracked_domain_does_not_see_untracked_seed_schemas() {
function visible_schemas_ignore_projected_global_schema_rows_for_version_scope (line 260) | async fn visible_schemas_ignore_projected_global_schema_rows_for_version...
function schema_facts_post_filter_non_catalog_rows_even_if_reader_returns_them (line 278) | async fn schema_facts_post_filter_non_catalog_rows_even_if_reader_return...
function visible_schemas_are_empty_when_no_schema_rows_are_visible (line 306) | async fn visible_schemas_are_empty_when_no_schema_rows_are_visible() {
type RowsLiveStateReader (line 317) | struct RowsLiveStateReader {
method new (line 322) | fn new(rows: Vec<MaterializedLiveStateRow>) -> Self {
method scan_rows (line 329) | async fn scan_rows(
method load_row (line 354) | async fn load_row(
function registered_schema_row (line 370) | fn registered_schema_row(schema_key: &str) -> MaterializedLiveStateRow {
function registered_schema_entity_id (line 401) | fn registered_schema_entity_id(schema_key: &str) -> crate::entity_identi...
FILE: packages/engine/src/catalog/snapshot.rs
type CatalogSnapshot (line 14) | pub(crate) struct CatalogSnapshot {
method fmt (line 35) | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
method from_visible_schemas (line 45) | pub(crate) fn from_visible_schemas(visible_schemas: &[JsonValue]) -> R...
method from_schema_facts (line 60) | pub(crate) fn from_schema_facts(facts: &[SchemaCatalogFact]) -> Result...
method fingerprint (line 73) | pub(crate) fn fingerprint(&self) -> &CatalogFingerprint {
method schema (line 77) | pub(crate) fn schema(&self, schema_key: &str) -> Option<&JsonValue> {
method insert_schema_for_domain (line 82) | pub(crate) fn insert_schema_for_domain(
method from_entries (line 99) | fn from_entries(entries: Vec<CatalogEntry>) -> Result<Self, LixError> {
method remember_schema_identity (line 108) | fn remember_schema_identity(
method rebuild_plans (line 152) | fn rebuild_plans(&mut self) -> Result<(), LixError> {
method rebuild_delete_plans (line 176) | fn rebuild_delete_plans(&mut self) {
method compute_fingerprint (line 201) | fn compute_fingerprint(&self) -> Result<CatalogFingerprint, LixError> {
method contains (line 220) | pub(crate) fn contains(&self, schema_key: &str) -> bool {
method len (line 225) | pub(crate) fn len(&self) -> usize {
method plans (line 229) | pub(crate) fn plans(&self) -> impl Iterator<Item = &SchemaPlan> {
method plan (line 233) | pub(crate) fn plan(&self, plan_id: SchemaPlanId) -> Option<&SchemaPlan> {
method plan_for_key (line 237) | pub(crate) fn plan_for_key(&self, schema_key: &str) -> Option<(SchemaP...
method delete_plan_for_key (line 246) | pub(crate) fn delete_plan_for_key(&self, schema_key: &str) -> DeleteVa...
type CatalogEntry (line 25) | struct CatalogEntry {
type CatalogFingerprint (line 32) | pub(crate) struct CatalogFingerprint(String);
function hash_fingerprint_part (line 261) | fn hash_fingerprint_part(hasher: &mut blake3::Hasher, value: &str) {
type SchemaPlanId (line 267) | pub(crate) struct SchemaPlanId(u32);
method index (line 270) | fn index(self) -> usize {
method for_test (line 275) | pub(crate) fn for_test(index: u32) -> Self {
type PointerGroup (line 280) | pub(crate) type PointerGroup = Vec<Vec<String>>;
type SchemaPlan (line 282) | pub(crate) struct SchemaPlan {
method compile (line 294) | fn compile(
type DefaultPlan (line 326) | pub(crate) struct DefaultPlan {
method from_schema (line 343) | fn from_schema(schema: &JsonValue) -> Self {
method apply (line 373) | pub(crate) fn apply(
type DefaultPropertyPlan (line 331) | struct DefaultPropertyPlan {
type DefaultValuePlan (line 337) | enum DefaultValuePlan {
type ForeignKeyPlan (line 410) | pub(crate) struct ForeignKeyPlan {
type DeleteReferencePlan (line 418) | pub(crate) struct DeleteReferencePlan {
type StateDeleteReferencePlan (line 424) | pub(crate) struct StateDeleteReferencePlan {
type DeleteValidationPlan (line 430) | pub(crate) struct DeleteValidationPlan<'a> {
function has_committed_checks (line 436) | pub(crate) fn has_committed_checks(self) -> bool {
type UnboundForeignKeyPlan (line 442) | struct UnboundForeignKeyPlan {
type StateForeignKeyPlan (line 449) | pub(crate) struct StateForeignKeyPlan {
method local_properties (line 459) | pub(crate) fn local_properties(&self) -> PointerGroup {
type SchemaCatalogKey (line 469) | pub(crate) struct SchemaCatalogKey {
method from_schema_key (line 474) | pub(crate) fn from_schema_key(key: SchemaKey) -> Self {
type SchemaCatalogFact (line 482) | pub(crate) struct SchemaCatalogFact {
method new (line 489) | pub(crate) fn new(domain: Domain, key: SchemaKey, schema: JsonValue) -...
method schema (line 499) | pub(crate) fn schema(&self) -> &JsonValue {
method catalog_key (line 503) | pub(crate) fn catalog_key(&self) -> &SchemaCatalogKey {
function primary_key_paths (line 508) | fn primary_key_paths(schema: &JsonValue) -> Result<Option<Vec<Vec<String...
function pointer_groups (line 534) | fn pointer_groups(schema: &JsonValue, field: &str) -> Result<Vec<Pointer...
function foreign_key_plans (line 568) | fn foreign_key_plans(schema: &JsonValue) -> Result<Vec<UnboundForeignKey...
function bind_foreign_key_plans (line 637) | fn bind_foreign_key_plans(
function schema_field_at_pointer (line 745) | fn schema_field_at_pointer<'a>(
function validate_foreign_key_field_types (line 770) | fn validate_foreign_key_field_types(
function compatible_json_schema_type (line 816) | fn compatible_json_schema_type(field_schema: &JsonValue) -> Option<JsonV...
function schema_properties_are_keyed (line 834) | fn schema_properties_are_keyed(
function state_foreign_key_plans (line 848) | fn state_foreign_key_plans(schema: &JsonValue) -> Result<Vec<StateForeig...
function pointer_array (line 884) | fn pointer_array(value: Option<&JsonValue>, context: &str) -> Result<Poi...
function format_pointer_group (line 912) | fn format_pointer_group(paths: &[Vec<String>]) -> String {
function catalog_rejects_same_schema_key_from_multiple_domains (line 927) | fn catalog_rejects_same_schema_key_from_multiple_domains() {
function insert_schema_for_domain_is_atomic_when_binding_fails (line 947) | fn insert_schema_for_domain_is_atomic_when_binding_fails() {
function catalog_fingerprint_is_independent_of_fact_order (line 972) | fn catalog_fingerprint_is_independent_of_fact_order() {
function delete_plan_has_no_committed_checks_for_unreferenced_schema (line 993) | fn delete_plan_has_no_committed_checks_for_unreferenced_schema() {
function delete_plan_indexes_foreign_keys_by_referenced_schema (line 1009) | fn delete_plan_indexes_foreign_keys_by_referenced_schema() {
function delete_plan_conservatively_applies_state_foreign_keys_to_every_schema (line 1038) | fn delete_plan_conservatively_applies_state_foreign_keys_to_every_schema...
function schema_json (line 1064) | fn schema_json(schema_key: &str) -> JsonValue {
function child_schema_json (line 1077) | fn child_schema_json(schema_key: &str, parent_schema_key: &str) -> JsonV...
function state_fk_schema_json (line 1098) | fn state_fk_schema_json(schema_key: &str) -> JsonValue {
FILE: packages/engine/src/cel/context.rs
function build_context_with_functions (line 9) | pub(crate) fn build_context_with_functions<P>(
function registers_lix_uuid_v7_function (line 41) | fn registers_lix_uuid_v7_function() {
function errors_on_unknown_variables (line 51) | fn errors_on_unknown_variables() {
type FixedFunctions (line 62) | struct FixedFunctions;
method call_uuid_v7 (line 65) | fn call_uuid_v7(&self) -> String {
method call_timestamp (line 69) | fn call_timestamp(&self) -> String {
function fixed_functions (line 74) | fn fixed_functions() -> FixedFunctions {
function uses_supplied_function_provider (line 79) | fn uses_supplied_function_provider() {
FILE: packages/engine/src/cel/error.rs
function cel_parse_error (line 3) | pub(crate) fn cel_parse_error(expression: &str, error: impl std::fmt::Di...
function cel_runtime_error (line 12) | pub(crate) fn cel_runtime_error(expression: &str, error: impl std::fmt::...
FILE: packages/engine/src/cel/provider.rs
type CelFunctionProvider (line 6) | pub(crate) trait CelFunctionProvider: Clone + Send + Sync + 'static {
method call_uuid_v7 (line 7) | fn call_uuid_v7(&self) -> String;
method call_timestamp (line 8) | fn call_timestamp(&self) -> String;
FILE: packages/engine/src/cel/runtime.rs
type CompiledProgram (line 15) | struct CompiledProgram {
type CelEvaluator (line 20) | pub struct CelEvaluator {
method new (line 25) | pub fn new() -> Self {
method evaluate_with_functions (line 29) | pub fn evaluate_with_functions<P>(
method compile (line 47) | fn compile(&self, expression: &str) -> Result<Arc<CompiledProgram>, Li...
function shared_runtime (line 65) | pub(crate) fn shared_runtime() -> &'static CelEvaluator {
type FixedFunctions (line 77) | struct FixedFunctions;
method call_uuid_v7 (line 80) | fn call_uuid_v7(&self) -> String {
method call_timestamp (line 84) | fn call_timestamp(&self) -> String {
function fixed_functions (line 89) | fn fixed_functions() -> FixedFunctions {
function evaluates_basic_expressions (line 94) | fn evaluates_basic_expressions() {
function evaluates_with_variables (line 103) | fn evaluates_with_variables() {
function reports_parse_errors (line 114) | fn reports_parse_errors() {
function reports_runtime_errors (line 123) | fn reports_runtime_errors() {
function supports_function_calls (line 134) | fn supports_function_calls() {
function caches_compiled_programs (line 143) | fn caches_compiled_programs() {
function errors_on_unknown_variable (line 160) | fn errors_on_unknown_variable() {
FILE: packages/engine/src/cel/value.rs
function json_to_cel (line 6) | pub fn json_to_cel(value: &JsonValue) -> Result<CelValue, LixError> {
function cel_to_json (line 15) | pub fn cel_to_json(value: &CelValue) -> Result<JsonValue, LixError> {
function converts_json_scalars (line 30) | fn converts_json_scalars() {
function converts_json_objects_and_arrays (line 38) | fn converts_json_objects_and_arrays() {
FILE: packages/engine/src/commit_graph/context.rs
constant COMMIT_SCHEMA_KEY (line 14) | const COMMIT_SCHEMA_KEY: &str = "lix_commit";
type CommitGraphContext (line 22) | pub(crate) struct CommitGraphContext {
method new (line 27) | pub(crate) fn new() -> Self {
method reader (line 34) | pub(crate) fn reader<S>(&self, store: S) -> CommitGraphStoreReader<S>
type CommitGraphStoreReader (line 46) | pub(crate) struct CommitGraphStoreReader<S>
function load_commit (line 58) | pub(crate) async fn load_commit(
function all_commits (line 72) | pub(crate) async fn all_commits(&mut self) -> Result<Vec<CommitGraphComm...
function reachable_commits (line 83) | pub(crate) async fn reachable_commits(
function best_common_ancestors (line 95) | pub(crate) async fn best_common_ancestors(
function merge_base (line 108) | pub(crate) async fn merge_base(
function commit_edges (line 136) | pub(crate) fn commit_edges(&self, commits: &[CommitGraphCommit]) -> Vec<...
function change_history_from_commit (line 156) | pub(crate) async fn change_history_from_commit(
function load_member_canonical_change (line 192) | async fn load_member_canonical_change(
function graph_commit_from_store_commit (line 213) | async fn graph_commit_from_store_commit(
function load_commit_change_ids (line 221) | async fn load_commit_change_ids(&self, commit: &Commit) -> Result<Vec<St...
function load_canonical_changes (line 246) | async fn load_canonical_changes(
method load_commit (line 273) | async fn load_commit(
method all_commits (line 280) | async fn all_commits(&mut self) -> Result<Vec<CommitGraphCommit>, LixErr...
method reachable_commits (line 284) | async fn reachable_commits(
method best_common_ancestors (line 291) | async fn best_common_ancestors(
method merge_base (line 299) | async fn merge_base(
method commit_edges (line 307) | fn commit_edges(&self, commits: &[CommitGraphCommit]) -> Vec<CommitGraph...
method change_history_from_commit (line 311) | async fn change_history_from_commit(
function depth_matches (line 320) | fn depth_matches(depth: u32, request: &CommitGraphChangeHistoryRequest) ...
function change_matches_history_request (line 325) | fn change_matches_history_request(
function commit_graph_commit_from_store_commit (line 339) | fn commit_graph_commit_from_store_commit(
function commit_header_canonical_change (line 354) | fn commit_header_canonical_change(commit: Commit) -> Change {
function canonical_change_from_store_change (line 366) | fn canonical_change_from_store_change(change: Change) -> Change {
function missing_pack_error (line 378) | fn missing_pack_error(label: &str, commit_id: &str, pack_id: u32) -> Lix...
function load_commit_parses_commit_snapshot (line 398) | async fn load_commit_parses_commit_snapshot() {
function load_commit_returns_none_for_missing_commit (line 427) | async fn load_commit_returns_none_for_missing_commit() {
function all_commits_returns_parsed_commits_sorted_by_id (line 442) | async fn all_commits_returns_parsed_commits_sorted_by_id() {
function commit_edges_are_derived_from_parent_commit_ids (line 472) | async fn commit_edges_are_derived_from_parent_commit_ids() {
function change_history_from_commit_reports_matching_canonical_changes_with_depth (line 500) | async fn change_history_from_commit_reports_matching_canonical_changes_w...
function change_history_from_commit_filters_depth_entity_file_and_tombstones (line 551) | async fn change_history_from_commit_filters_depth_entity_file_and_tombst...
function change_history_from_commit_includes_tombstones_when_requested (line 606) | async fn change_history_from_commit_includes_tombstones_when_requested() {
type TestChange (line 646) | struct TestChange {
method commit (line 654) | fn commit(
method entity (line 676) | fn entity(
method is_commit (line 702) | fn is_commit(&self) -> bool {
function append_changes (line 707) | async fn append_changes(storage: StorageContext, changes: &[TestChange]) {
function change_ref_from_canonical (line 794) | fn change_ref_from_canonical<'a>(change: crate::commit_store::ChangeRef<...
function commit_change (line 806) | fn commit_change(
function parsed_commit (line 815) | fn parsed_commit(
function entity_change (line 842) | fn entity_change(
function entity_change_at (line 857) | fn entity_change_at(
function entity_change_with_file (line 874) | fn entity_change_with_file(
function entity_tombstone (line 891) | fn entity_tombstone(change_id: &str, entity_id: &str, schema_key: &str) ...
FILE: packages/engine/src/commit_graph/types.rs
type CommitGraphCommit (line 13) | pub(crate) struct CommitGraphCommit {
type ReachableCommitGraphCommit (line 24) | pub(crate) struct ReachableCommitGraphCommit {
type CommitGraphEdge (line 31) | pub(crate) struct CommitGraphEdge {
type CommitGraphChangeHistoryRequest (line 39) | pub(crate) struct CommitGraphChangeHistoryRequest {
type CommitGraphChangeHistoryEntry (line 53) | pub(crate) struct CommitGraphChangeHistoryEntry {
type CommitGraphReader (line 66) | pub(crate) trait CommitGraphReader: Send + Sync {
method load_commit (line 68) | async fn load_commit(&mut self, commit_id: &str)
method all_commits (line 71) | async fn all_commits(&mut self) -> Result<Vec<CommitGraphCommit>, LixE...
method reachable_commits (line 73) | async fn reachable_commits(
method best_common_ancestors (line 84) | async fn best_common_ancestors(
method merge_base (line 96) | async fn merge_base(
method commit_edges (line 102) | fn commit_edges(&self, commits: &[CommitGraphCommit]) -> Vec<CommitGra...
method change_history_from_commit (line 104) | async fn change_history_from_commit(
FILE: packages/engine/src/commit_graph/walker.rs
function walk_reachable_commits (line 13) | pub(crate) async fn walk_reachable_commits<S>(
function best_common_ancestors (line 64) | pub(crate) async fn best_common_ancestors<S>(
function has_descendant_in_set (line 101) | async fn has_descendant_in_set<S>(
type CommitTraversalLoader (line 124) | struct CommitTraversalLoader<'a, S>
function new (line 136) | fn new(reader: &'a mut CommitGraphStoreReader<S>) -> Self {
function walk_commit (line 143) | async fn walk_commit(
function load_commit (line 198) | async fn load_commit(&mut self, commit_id: &str) -> Result<CommitGraphCo...
type TraversalFrame (line 213) | struct TraversalFrame {
function reachable_commits_returns_commits_nearest_first (line 232) | async fn reachable_commits_returns_commits_nearest_first() {
function reachable_commits_errors_on_missing_parent_commit (line 267) | async fn reachable_commits_errors_on_missing_parent_commit() {
function reachable_commits_errors_on_cycle (line 292) | async fn reachable_commits_errors_on_cycle() {
function reachable_commits_dedupes_shared_ancestors_in_diamond (line 315) | async fn reachable_commits_dedupes_shared_ancestors_in_diamond() {
function reachable_commits_keeps_nearest_depth_for_multiple_paths (line 356) | async fn reachable_commits_keeps_nearest_depth_for_multiple_paths() {
function reachable_commits_orders_same_depth_commits_by_id (line 396) | async fn reachable_commits_orders_same_depth_commits_by_id() {
function reachable_commits_errors_on_missing_head_commit (line 431) | async fn reachable_commits_errors_on_missing_head_commit() {
function best_common_ancestors_returns_nearest_common_commit_in_simple_graph (line 446) | async fn best_common_ancestors_returns_nearest_common_commit_in_simple_g...
function best_common_ancestors_returns_shared_fork_in_diamond_graph (line 477) | async fn best_common_ancestors_returns_shared_fork_in_diamond_graph() {
function best_common_ancestors_returns_parent_when_one_side_is_ancestor (line 519) | async fn best_common_ancestors_returns_parent_when_one_side_is_ancestor() {
function best_common_ancestors_returns_multiple_bases_for_criss_cross_graph (line 549) | async fn best_common_ancestors_returns_multiple_bases_for_criss_cross_gr...
function merge_base_returns_single_best_common_ancestor (line 591) | async fn merge_base_returns_single_best_common_ancestor() {
function merge_base_errors_when_histories_have_no_common_commit (line 616) | async fn merge_base_errors_when_histories_have_no_common_commit() {
function merge_base_errors_when_best_common_ancestor_is_ambiguous (line 639) | async fn merge_base_errors_when_best_common_ancestor_is_ambiguous() {
type TestCommitChange (line 696) | struct TestCommitChange {
function append_changes (line 701) | async fn append_changes(storage: StorageContext, changes: &[TestCommitCh...
function commit_change (line 736) | fn commit_change(
FILE: packages/engine/src/commit_store/codec.rs
constant COMMIT_MAGIC (line 8) | const COMMIT_MAGIC: &[u8; 5] = b"LXCM1";
constant CHANGE_MAGIC (line 9) | const CHANGE_MAGIC: &[u8; 5] = b"LXCH2";
constant CHANGE_PACK_MAGIC (line 10) | const CHANGE_PACK_MAGIC: &[u8; 5] = b"LXCP3";
constant MEMBERSHIP_PACK_MAGIC (line 11) | const MEMBERSHIP_PACK_MAGIC: &[u8; 5] = b"LXMP1";
constant CHANGE_ID_FULL (line 12) | const CHANGE_ID_FULL: u8 = 0;
constant CHANGE_ID_COMMIT_SUFFIX (line 13) | const CHANGE_ID_COMMIT_SUFFIX: u8 = 1;
function encode_commit_ref (line 15) | pub(crate) fn encode_commit_ref(commit: StoredCommitRef<'_>) -> Result<V...
function decode_commit (line 31) | pub(crate) fn decode_commit(bytes: &[u8]) -> Result<Commit, LixError> {
function encode_change_ref (line 53) | pub(crate) fn encode_change_ref(change: ChangeRef<'_>) -> Result<Vec<u8>...
function write_change_ref (line 59) | fn write_change_ref(bytes: &mut Vec<u8>, change: ChangeRef<'_>) -> Resul...
function decode_change (line 76) | pub(crate) fn decode_change(bytes: &[u8]) -> Result<Change, LixError> {
function encode_change_pack (line 103) | pub(crate) fn encode_change_pack(
function decode_change_pack (line 130) | pub(crate) fn decode_change_pack(bytes: &[u8]) -> Result<(String, u32, V...
type ChangeShapeRef (line 173) | struct ChangeShapeRef<'a> {
type ChangeShape (line 179) | struct ChangeShape {
function change_shapes (line 184) | fn change_shapes<'a>(changes: &'a [ChangeRef<'a>]) -> (Vec<ChangeShapeRe...
function encode_membership_pack (line 205) | pub(crate) fn encode_membership_pack<'a>(
function decode_membership_pack (line 222) | pub(crate) fn decode_membership_pack(
function encode_locator (line 238) | fn encode_locator(bytes: &mut Vec<u8>, locator: ChangeLocatorRef<'_>) ->...
function decode_locator (line 245) | fn decode_locator(cursor: &mut ByteCursor<'_>) -> Result<ChangeLocator, ...
function write_str (line 254) | fn write_str(bytes: &mut Vec<u8>, value: &str) -> Result<(), LixError> {
function write_optional_str (line 266) | fn write_optional_str(bytes: &mut Vec<u8>, value: Option<&str>) -> Resul...
function write_optional_json_ref (line 277) | fn write_optional_json_ref(bytes: &mut Vec<u8>, value: Option<&JsonRef>) {
function write_len (line 287) | fn write_len(bytes: &mut Vec<u8>, len: usize, field: &str) -> Result<(),...
function write_var_len (line 298) | fn write_var_len(bytes: &mut Vec<u8>, len: usize, field: &str) -> Result...
function write_var_str (line 313) | fn write_var_str(bytes: &mut Vec<u8>, value: &str, field: &str) -> Resul...
function write_optional_var_str (line 319) | fn write_optional_var_str(
function write_change_id (line 334) | fn write_change_id(bytes: &mut Vec<u8>, commit_id: &str, change_id: &str...
function write_var_change_id (line 344) | fn write_var_change_id(
function write_entity_identity (line 358) | fn write_entity_identity(bytes: &mut Vec<u8>, identity: &EntityIdentity)...
function write_var_entity_identity (line 370) | fn write_var_entity_identity(
function write_strs (line 385) | fn write_strs<'a>(
type ByteCursor (line 403) | struct ByteCursor<'a> {
function new (line 409) | fn new(bytes: &'a [u8]) -> Self {
function expect_magic (line 413) | fn expect_magic(&mut self, magic: &[u8], label: &str) -> Result<(), LixE...
function read_string (line 424) | fn read_string(&mut self, field: &str) -> Result<String, LixError> {
function read_strings (line 447) | fn read_strings(&mut self, field: &str) -> Result<Vec<String>, LixError> {
function read_optional_string (line 456) | fn read_optional_string(&mut self, field: &str) -> Result<Option<String>...
function read_optional_json_ref (line 467) | fn read_optional_json_ref(&mut self, field: &str) -> Result<Option<JsonR...
function read_u8 (line 494) | fn read_u8(&mut self, field: &str) -> Result<u8, LixError> {
function read_u32 (line 505) | fn read_u32(&mut self, field: &str) -> Result<u32, LixError> {
function read_var_usize (line 526) | fn read_var_usize(&mut self, field: &str) -> Result<usize, LixError> {
function read_var_string (line 555) | fn read_var_string(&mut self, field: &str) -> Result<String, LixError> {
function read_optional_var_string (line 578) | fn read_optional_var_string(&mut self, field: &str) -> Result<Option<Str...
function read_change_id (line 589) | fn read_change_id(&mut self, commit_id: &str) -> Result<String, LixError> {
function read_var_change_id (line 602) | fn read_var_change_id(&mut self, commit_id: &str) -> Result<String, LixE...
function read_entity_identity (line 615) | fn read_entity_identity(&mut self) -> Result<EntityIdentity, LixError> {
function read_var_entity_identity (line 630) | fn read_var_entity_identity(&mut self) -> Result<EntityIdentity, LixErro...
function expect_end (line 645) | fn expect_end(&self, label: &str) -> Result<(), LixError> {
function commit_codec_roundtrips (line 661) | fn commit_codec_roundtrips() {
function change_codec_roundtrips (line 679) | fn change_codec_roundtrips() {
function change_codec_roundtrips_empty_optionals (line 697) | fn change_codec_roundtrips_empty_optionals() {
function change_pack_compacts_shared_shape_and_commit_id_prefix (line 715) | fn change_pack_compacts_shared_shape_and_commit_id_prefix() {
function change_pack_rejects_overlong_varint (line 774) | fn change_pack_rejects_overlong_varint() {
function change_pack_rejects_varint_above_u32 (line 787) | fn change_pack_rejects_varint_above_u32() {
function change_pack_rejects_non_canonical_varint (line 800) | fn change_pack_rejects_non_canonical_varint() {
function change_codec_rejects_invalid_optional_tag (line 813) | fn change_codec_rejects_invalid_optional_tag() {
function change_codec_rejects_truncated_json_ref (line 840) | fn change_codec_rejects_truncated_json_ref() {
function change_codec_rejects_trailing_bytes (line 868) | fn change_codec_rejects_trailing_bytes() {
FILE: packages/engine/src/commit_store/context.rs
type CommitStoreContext (line 12) | pub(crate) struct CommitStoreContext;
method new (line 15) | pub(crate) fn new() -> Self {
method writer (line 20) | pub(crate) fn writer<'a, S>(
method reader (line 32) | pub(crate) fn reader<S>(&self, store: S) -> CommitStoreReader<S>
method load_commit_from (line 41) | pub(crate) async fn load_commit_from(
method load_change_pack_from (line 49) | pub(crate) async fn load_change_pack_from(
method load_membership_pack_from (line 58) | pub(crate) async fn load_membership_pack_from(
type CommitStoreReader (line 69) | pub(crate) struct CommitStoreReader<S> {
function load_change_index_entries (line 77) | pub(crate) async fn load_change_index_entries(
function load_commit (line 88) | pub(crate) async fn load_commit(
function scan_commits (line 95) | pub(crate) async fn scan_commits(&self) -> Result<Vec<crate::commit_stor...
function load_change_pack (line 99) | pub(crate) async fn load_change_pack(
function load_membership_pack (line 112) | pub(crate) async fn load_membership_pack(
function load_changes (line 125) | pub(crate) async fn load_changes(
function load_located_changes (line 170) | pub(crate) async fn load_located_changes(
function load_commit_changes (line 217) | pub(crate) async fn load_commit_changes(
function scan_changes (line 256) | pub(crate) async fn scan_changes(
type CommitStoreWriter (line 265) | pub(crate) struct CommitStoreWriter<'a, S: ?Sized> {
type PendingCommitDraft (line 270) | struct PendingCommitDraft<'a> {
function stage_commit_draft (line 285) | pub(crate) async fn stage_commit_draft<'a>(
function stage_tracked_commit_draft (line 304) | pub(crate) async fn stage_tracked_commit_draft<'a>(
function stage_commit_drafts (line 322) | pub(crate) async fn stage_commit_drafts<'a>(
function stage_tracked_commit_drafts (line 332) | pub(crate) async fn stage_tracked_commit_drafts<'a>(
function stage_commit_drafts_with_authored_pack (line 340) | async fn stage_commit_drafts_with_authored_pack<'a>(
function validate_stage_commits (line 391) | async fn validate_stage_commits<'a>(
function scan_changes_from_commit_store (line 399) | async fn scan_changes_from_commit_store(
function load_change_by_locator (line 436) | async fn load_change_by_locator(
function load_change_by_locator_cached (line 482) | async fn load_change_by_locator_cached(
function commit_header_change (line 539) | fn commit_header_change(commit: Commit) -> Change {
function located_commit_header_change (line 551) | fn located_commit_header_change(commit: Commit, source_pack_id: u32) -> ...
function missing_pack_error (line 560) | fn missing_pack_error(label: &str, commit_id: &str, pack_id: u32) -> Lix...
function validate_new_changes_absent (line 567) | async fn validate_new_changes_absent<'a>(
function validate_adopted_changes_present (line 599) | async fn validate_adopted_changes_present<'a>(
function load_packed_change (line 672) | async fn load_packed_change<S>(
function change_matches_ref (line 721) | fn change_matches_ref(change: &Change, expected: ChangeRef<'_>) -> bool {
function duplicate_change_id_error (line 731) | fn duplicate_change_id_error(change_id: &str) -> LixError {
function load_changes_materializes_commit_header_and_packed_change (line 756) | async fn load_changes_materializes_commit_header_and_packed_change() {
function load_commit_changes_returns_equivalent_authored_and_adopted_changes (line 849) | async fn load_commit_changes_returns_equivalent_authored_and_adopted_cha...
function stage_test_commit (line 896) | async fn stage_test_commit(
function test_change (line 933) | fn test_change(id: &str) -> Change {
FILE: packages/engine/src/commit_store/materialization.rs
function materialize_change (line 6) | pub(crate) async fn materialize_change<S>(
function load_optional_json_text (line 51) | async fn load_optional_json_text<S>(
FILE: packages/engine/src/commit_store/storage.rs
constant COMMIT_NAMESPACE (line 11) | pub(crate) const COMMIT_NAMESPACE: &str = "commit_store.commit";
constant CHANGE_PACK_NAMESPACE (line 12) | pub(crate) const CHANGE_PACK_NAMESPACE: &str = "commit_store.change_pack";
constant MEMBERSHIP_PACK_NAMESPACE (line 13) | pub(crate) const MEMBERSHIP_PACK_NAMESPACE: &str = "commit_store.members...
constant SINGLE_PACK_ID (line 15) | const SINGLE_PACK_ID: u32 = 0;
function stage_commit (line 17) | pub(crate) fn stage_commit(
function stage_commit_with_external_authored_pack (line 26) | pub(crate) fn stage_commit_with_external_authored_pack(
function stage_commit_with_authored_pack (line 35) | fn stage_commit_with_authored_pack(
function load_commit (line 104) | pub(crate) async fn load_commit(
function scan_commits (line 114) | pub(crate) async fn scan_commits(
function load_change_pack (line 131) | pub(crate) async fn load_change_pack(
function load_tracked_authored_change_pack (line 152) | pub(crate) async fn load_tracked_authored_change_pack(
function load_membership_pack (line 203) | pub(crate) async fn load_membership_pack(
function load_change_index_entries (line 229) | pub(crate) async fn load_change_index_entries(
function get_one (line 303) | async fn get_one(
function ensure_pack_identity (line 322) | fn ensure_pack_identity(
function commit_key (line 340) | fn commit_key(commit_id: &str) -> Vec<u8> {
function pack_key (line 344) | fn pack_key(commit_id: &str, pack_id: u32) -> Result<Vec<u8>, LixError> {
function stage_commit_writes_all_commit_store_namespaces (line 372) | async fn stage_commit_writes_all_commit_store_namespaces() {
function tracked_commit_change_pack_loads_from_delta_pack (line 464) | async fn tracked_commit_change_pack_loads_from_delta_pack() {
function tracked_commit_change_pack_rejects_sparse_delta_ordinals (line 535) | async fn tracked_commit_change_pack_rejects_sparse_delta_ordinals() {
function test_commit (line 577) | fn test_commit() -> Commit {
function test_change (line 589) | fn test_change(id: &str) -> Change {
FILE: packages/engine/src/commit_store/types.rs
type Commit (line 7) | pub(crate) struct Commit {
method as_ref (line 18) | pub(crate) fn as_ref(&self) -> StoredCommitRef<'_> {
type StoredCommitRef (line 33) | pub(crate) struct StoredCommitRef<'a> {
type CommitDraftRef (line 45) | pub(crate) struct CommitDraftRef<'a> {
type Change (line 55) | pub(crate) struct Change {
method as_ref (line 86) | pub(crate) fn as_ref(&self) -> ChangeRef<'_> {
type MaterializedChange (line 67) | pub(crate) struct MaterializedChange {
type LocatedChange (line 79) | pub(crate) struct LocatedChange {
type ChangeRef (line 101) | pub(crate) struct ChangeRef<'a> {
type ChangeScanRequest (line 113) | pub(crate) struct ChangeScanRequest {
type ChangePack (line 119) | pub(crate) struct ChangePack {
method as_view (line 126) | pub(crate) fn as_view(&self) -> ChangePackView<'_> {
type ChangePackView (line 137) | pub(crate) struct ChangePackView<'a> {
type ChangeLocator (line 145) | pub(crate) struct ChangeLocator {
method as_ref (line 153) | pub(crate) fn as_ref(&self) -> ChangeLocatorRef<'_> {
type ChangeLocatorRef (line 165) | pub(crate) struct ChangeLocatorRef<'a> {
type ChangeIndexEntry (line 174) | pub(crate) enum ChangeIndexEntry {
type MembershipPack (line 186) | pub(crate) struct MembershipPack {
method as_view (line 193) | pub(crate) fn as_view(&self) -> MembershipPackView<'_> {
type MembershipPackView (line 204) | pub(crate) struct MembershipPackView<'a> {
type StagedCommitStoreCommit (line 212) | pub(crate) struct StagedCommitStoreCommit {
FILE: packages/engine/src/common/error.rs
type LixError (line 23) | pub struct LixError {
constant CODE_UNKNOWN (line 35) | pub const CODE_UNKNOWN: &'static str = "LIX_ERROR_UNKNOWN";
constant CODE_PARSE_ERROR (line 38) | pub const CODE_PARSE_ERROR: &'static str = "LIX_PARSE_ERROR";
constant CODE_UDF_NOT_FOUND (line 41) | pub const CODE_UDF_NOT_FOUND: &'static str = "LIX_UDF_NOT_FOUND";
constant CODE_TYPE_MISMATCH (line 44) | pub const CODE_TYPE_MISMATCH: &'static str = "LIX_TYPE_MISMATCH";
constant CODE_INVALID_JSON_PATH (line 48) | pub const CODE_INVALID_JSON_PATH: &'static str = "LIX_INVALID_JSON_PATH";
constant CODE_DIALECT_UNSUPPORTED (line 52) | pub const CODE_DIALECT_UNSUPPORTED: &'static str = "LIX_DIALECT_UNSUPP...
constant CODE_BINDING_ERROR (line 55) | pub const CODE_BINDING_ERROR: &'static str = "LIX_BINDING_ERROR";
constant CODE_INVALID_PARAM (line 58) | pub const CODE_INVALID_PARAM: &'static str = "LIX_INVALID_PARAM";
constant CODE_TABLE_NOT_FOUND (line 61) | pub const CODE_TABLE_NOT_FOUND: &'static str = "LIX_TABLE_NOT_FOUND";
constant CODE_COLUMN_NOT_FOUND (line 64) | pub const CODE_COLUMN_NOT_FOUND: &'static str = "LIX_COLUMN_NOT_FOUND";
constant CODE_CONSTRAINT_VIOLATION (line 68) | pub const CODE_CONSTRAINT_VIOLATION: &'static str = "LIX_CONSTRAINT_VI...
constant CODE_READ_ONLY (line 71) | pub const CODE_READ_ONLY: &'static str = "LIX_ERROR_READ_ONLY";
constant CODE_HISTORY_FILTER_REQUIRED (line 74) | pub const CODE_HISTORY_FILTER_REQUIRED: &'static str = "LIX_HISTORY_FI...
constant CODE_UNSUPPORTED_SQL (line 78) | pub const CODE_UNSUPPORTED_SQL: &'static str = "LIX_UNSUPPORTED_SQL";
constant CODE_UNSUPPORTED_SQL_RUNTIME_PLAN (line 82) | pub const CODE_UNSUPPORTED_SQL_RUNTIME_PLAN: &'static str = "LIX_UNSUP...
constant CODE_STORAGE_ERROR (line 85) | pub const CODE_STORAGE_ERROR: &'static str = "LIX_STORAGE_ERROR";
constant CODE_INTERNAL_ERROR (line 88) | pub const CODE_INTERNAL_ERROR: &'static str = "LIX_INTERNAL_ERROR";
constant CODE_SCHEMA_VALIDATION (line 94) | pub const CODE_SCHEMA_VALIDATION: &'static str = "LIX_ERROR_SCHEMA_VAL...
constant CODE_FOREIGN_KEY (line 99) | pub const CODE_FOREIGN_KEY: &'static str = "LIX_ERROR_FOREIGN_KEY";
constant CODE_FILE_NOT_FOUND (line 103) | pub const CODE_FILE_NOT_FOUND: &'static str = "LIX_ERROR_FILE_NOT_FOUND";
constant CODE_UNIQUE (line 107) | pub const CODE_UNIQUE: &'static str = "LIX_ERROR_UNIQUE";
constant CODE_UNSUPPORTED_WRITE_EXPRESSION (line 112) | pub const CODE_UNSUPPORTED_WRITE_EXPRESSION: &'static str =
constant CODE_SCHEMA_DEFINITION (line 119) | pub const CODE_SCHEMA_DEFINITION: &'static str = "LIX_ERROR_SCHEMA_DEF...
constant CODE_CLOSED (line 124) | pub const CODE_CLOSED: &'static str = "LIX_ERROR_CLOSED";
constant CODE_MERGE_CONFLICT (line 127) | pub const CODE_MERGE_CONFLICT: &'static str = "LIX_MERGE_CONFLICT";
constant CODE_VERSION_NOT_FOUND (line 130) | pub const CODE_VERSION_NOT_FOUND: &'static str = "LIX_VERSION_NOT_FOUND";
constant CODE_INVALID_STORAGE_SCOPE (line 134) | pub const CODE_INVALID_STORAGE_SCOPE: &'static str = "LIX_ERROR_INVALI...
constant CODE_AMBIGUOUS_MERGE_BASE (line 137) | pub const CODE_AMBIGUOUS_MERGE_BASE: &'static str = "LIX_AMBIGUOUS_MER...
constant CODE_INVALID_MERGE (line 141) | pub const CODE_INVALID_MERGE: &'static str = "LIX_INVALID_MERGE";
method new (line 143) | pub fn new(code: impl Into<String>, message: impl Into<String>) -> Self {
method unknown (line 152) | pub fn unknown(message: impl Into<String>) -> Self {
method version_not_found (line 156) | pub fn version_not_found(
method ambiguous_merge_base (line 175) | pub fn ambiguous_merge_base(
method invalid_self_merge (line 193) | pub fn invalid_self_merge(version_id: impl Into<String>) -> Self {
method with_hint (line 215) | pub fn with_hint(mut self, hint: impl Into<String>) -> Self {
method with_details (line 221) | pub fn with_details(mut self, details: JsonValue) -> Self {
method hint (line 242) | pub fn hint(&self) -> Option<&str> {
method message_with_hint (line 246) | pub fn message_with_hint(&self) -> String {
method format (line 253) | pub fn format(&self) -> String {
method fmt (line 263) | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
function format_without_hint_omits_hint_line (line 275) | fn format_without_hint_omits_hint_line() {
function format_with_hint_appends_hint_line (line 285) | fn format_with_hint_appends_hint_line() {
function with_hint_is_chainable_and_replaces_prior_hint (line 294) | fn with_hint_is_chainable_and_replaces_prior_hint() {
function new_defaults_hint_to_none (line 302) | fn new_defaults_hint_to_none() {
function unknown_defaults_hint_to_none (line 308) | fn unknown_defaults_hint_to_none() {
FILE: packages/engine/src/common/fingerprint.rs
function stable_content_fingerprint_hex (line 1) | pub(crate) fn stable_content_fingerprint_hex(data: &[u8]) -> String {
FILE: packages/engine/src/common/fs_path.rs
constant MAX_CANONICAL_PATH_BYTES (line 80) | const MAX_CANONICAL_PATH_BYTES: usize = 4096;
constant MAX_CANONICAL_PATH_SEGMENT_BYTES (line 81) | const MAX_CANONICAL_PATH_SEGMENT_BYTES: usize = 255;
constant MAX_RAW_PATH_INPUT_BYTES (line 82) | const MAX_RAW_PATH_INPUT_BYTES: usize = 16 * 1024;
type NormalizedDirectoryPath (line 85) | pub(crate) struct NormalizedDirectoryPath(String);
method try_from_path (line 89) | pub(crate) fn try_from_path(path: &str) -> Result<Self, LixError> {
method from_normalized (line 92) | pub(crate) fn from_normalized(path: String) -> Self {
method as_str (line 96) | pub(crate) fn as_str(&self) -> &str {
method fmt (line 110) | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
type Target (line 102) | type Target = str;
method deref (line 104) | fn deref(&self) -> &Self::Target {
type NormalizedFilePath (line 116) | pub(crate) struct NormalizedFilePath(String);
method from_normalized (line 119) | pub(crate) fn from_normalized(path: String) -> Self {
method as_str (line 123) | pub(crate) fn as_str(&self) -> &str {
method fmt (line 137) | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
type Target (line 129) | type Target = str;
method deref (line 131) | fn deref(&self) -> &Self::Target {
type ParsedFilePath (line 143) | pub(crate) struct ParsedFilePath {
method try_from_path (line 150) | pub(crate) fn try_from_path(path: &str) -> Result<Self, LixError> {
type PathResult (line 155) | type PathResult<T> = Result<T, PathError>;
type PathError (line 158) | enum PathError {
method into_lix_error (line 178) | fn into_lix_error(self) -> LixError {
function normalize_path_segment (line 266) | pub(crate) fn normalize_path_segment(raw: &str) -> Result<String, LixErr...
function normalize_path_segment_impl (line 270) | fn normalize_path_segment_impl(raw: &str) -> PathResult<String> {
function validate_path_segment_chars (line 280) | fn validate_path_segment_chars(normalized: &str) -> PathResult<String> {
function normalize_validated_path_segment (line 301) | fn normalize_validated_path_segment(normalized: &str) -> PathResult<Stri...
function decode_percent_encoded_segment (line 309) | fn decode_percent_encoded_segment(segment: &str) -> PathResult<String> {
function hex_value (line 333) | fn hex_value(byte: u8) -> u8 {
function segment_has_valid_percent_encoding (line 342) | fn segment_has_valid_percent_encoding(segment: &str) -> bool {
function validate_decoded_path_segment_structure (line 363) | fn validate_decoded_path_segment_structure(segment: &str) -> PathResult<...
function enforce_precis_segment (line 382) | fn enforce_precis_segment(segment: &str) -> PathResult<String> {
function normalize_file_path_impl (line 389) | fn normalize_file_path_impl(path: &str) -> PathResult<String> {
function normalize_directory_path (line 423) | pub(crate) fn normalize_directory_path(path: &str) -> Result<String, Lix...
function normalize_directory_path_impl (line 427) | fn normalize_directory_path_impl(path: &str) -> PathResult<String> {
function canonicalize_path_segments (line 458) | fn canonicalize_path_segments(segments: &[&str]) -> PathResult<Vec<Strin...
function ensure_canonical_path_len (line 472) | fn ensure_canonical_path_len(path: &str) -> PathResult<()> {
function ensure_raw_path_input_len (line 480) | fn ensure_raw_path_input_len(path: &str) -> PathResult<()> {
function ensure_canonical_segment_len (line 488) | fn ensure_canonical_segment_len(segment: &str) -> PathResult<()> {
function parse_file_path (line 496) | pub(crate) fn parse_file_path(path: &str) -> Result<ParsedFilePath, LixE...
function parse_file_path_impl (line 500) | fn parse_file_path_impl(path: &str) -> PathResult<ParsedFilePath> {
function directory_ancestor_paths (line 526) | pub(crate) fn directory_ancestor_paths(path: &str) -> Vec<String> {
function ancestor_directory_paths (line 530) | fn ancestor_directory_paths(path: &str) -> Vec<String> {
function parent_directory_path (line 549) | pub(crate) fn parent_directory_path(path: &str) -> Option<String> {
function directory_name_from_path (line 561) | pub(crate) fn directory_name_from_path(path: &str) -> Option<String> {
function compose_directory_path (line 570) | pub(crate) fn compose_directory_path(parent_path: &str, name: &str) -> R...
type NormalizationKind (line 587) | enum NormalizationKind {
type LixFixtureKind (line 594) | enum LixFixtureKind {
type RfcFixture (line 600) | struct RfcFixture {
type LixProfileFixture (line 606) | struct LixProfileFixture {
type NormalizationFixture (line 615) | struct NormalizationFixture {
function assert_path_error (line 622) | fn assert_path_error<T: fmt::Debug>(result: PathResult<T>, expected: Pat...
function iri_oracle_accepts (line 626) | fn iri_oracle_accepts(path: &str) -> bool {
function normalize_with_kind (line 630) | fn normalize_with_kind(kind: NormalizationKind, input: &str) -> Result<S...
function normalize_file_path (line 640) | fn normalize_file_path(path: &str) -> Result<String, LixError> {
function assert_lix_profile_fixture (line 644) | fn assert_lix_profile_fixture(fixture: LixProfileFixture) {
constant RFC_POSITIVE_FIXTURES (line 681) | const RFC_POSITIVE_FIXTURES: &[RfcFixture] = &[
constant RFC_NEGATIVE_FIXTURES (line 692) | const RFC_NEGATIVE_FIXTURES: &[RfcFixture] = &[
constant LIX_PROFILE_POSITIVE_FIXTURES (line 715) | const LIX_PROFILE_POSITIVE_FIXTURES: &[LixProfileFixture] = &[
constant LIX_PROFILE_NEGATIVE_FIXTURES (line 739) | const LIX_PROFILE_NEGATIVE_FIXTURES: &[LixProfileFixture] = &[
constant NORMALIZATION_FIXTURES (line 798) | const NORMALIZATION_FIXTURES: &[NormalizationFixture] = &[
function rfc_positive_path_fixtures_agree_with_iref (line 832) | fn rfc_positive_path_fixtures_agree_with_iref() {
function rfc_negative_path_fixtures_agree_with_iref (line 850) | fn rfc_negative_path_fixtures_agree_with_iref() {
function lix_profile_positive_fixtures_are_pinned (line 868) | fn lix_profile_positive_fixtures_are_pinned() {
function lix_profile_negative_fixtures_document_divergence_from_the_oracle (line 875) | fn lix_profile_negative_fixtures_document_divergence_from_the_oracle() {
function normalization_fixture_table_covers_canonicalization_rules (line 882) | fn normalization_fixture_table_covers_canonicalization_rules() {
function accepts_normalized_file_paths_with_unicode_and_percent_encoding (line 895) | fn accepts_normalized_file_paths_with_unicode_and_percent_encoding() {
function rejects_structural_file_path_anomalies (line 911) | fn rejects_structural_file_path_anomalies() {
function rejects_file_paths_with_dot_segments (line 928) | fn rejects_file_paths_with_dot_segments() {
function rejects_file_paths_with_invalid_characters (line 940) | fn rejects_file_paths_with_invalid_characters() {
function rejects_file_paths_and_segments_over_length_limits (line 950) | fn rejects_file_paths_and_segments_over_length_limits() {
function rejects_file_paths_with_private_use_and_noncharacter_code_points (line 981) | fn rejects_file_paths_with_private_use_and_noncharacter_code_points() {
function rejects_file_paths_with_bidi_formatting_characters (line 991) | fn rejects_file_paths_with_bidi_formatting_characters() {
function rejects_default_ignorable_and_invisible_segment_characters (line 1001) | fn rejects_default_ignorable_and_invisible_segment_characters() {
function rejects_unicode_separators_and_leading_combining_marks (line 1020) | fn rejects_unicode_separators_and_leading_combining_marks() {
function validates_percent_encoding_in_file_paths (line 1035) | fn validates_percent_encoding_in_file_paths() {
function applies_segment_length_limit_to_canonical_text_not_percent_encoded_boundary_spelling (line 1055) | fn applies_segment_length_limit_to_canonical_text_not_percent_encoded_bo...
function rejects_raw_path_input_over_length_budget_before_unicode_processing (line 1079) | fn rejects_raw_path_input_over_length_budget_before_unicode_processing() {
function rejects_percent_encoded_forbidden_code_points_in_file_paths (line 1094) | fn rejects_percent_encoded_forbidden_code_points_in_file_paths() {
function rejects_percent_encoded_forbidden_code_points_in_directory_paths (line 1145) | fn rejects_percent_encoded_forbidden_code_points_in_directory_paths() {
function canonicalizes_percent_encoding_in_file_paths (line 1181) | fn canonicalizes_percent_encoding_in_file_paths() {
function normalization_is_stable_on_renormalization (line 1193) | fn normalization_is_stable_on_renormalization() {
function accepts_and_rejects_directory_paths_like_legacy_rules (line 1200) | fn accepts_and_rejects_directory_paths_like_legacy_rules() {
function canonicalizes_directory_paths (line 1234) | fn canonicalizes_directory_paths() {
function rejects_directory_paths_and_segments_over_length_limits (line 1242) | fn rejects_directory_paths_and_segments_over_length_limits() {
function rejects_directory_paths_with_dot_segments (line 1269) | fn rejects_directory_paths_with_dot_segments() {
function represents_root_as_a_normalized_directory_path (line 1276) | fn represents_root_as_a_normalized_directory_path() {
function root_parent_and_top_level_parent_are_absent (line 1286) | fn root_parent_and_top_level_parent_are_absent() {
function compose_directory_path_under_root (line 1292) | fn compose_directory_path_under_root() {
function exposes_stable_lix_errors_with_hints (line 1297) | fn exposes_stable_lix_errors_with_hints() {
FILE: packages/engine/src/common/identity.rs
function validate_non_empty_identity_value (line 115) | pub(crate) fn validate_non_empty_identity_value(
function json_pointer_get (line 129) | pub(crate) fn json_pointer_get<'a>(
FILE: packages/engine/src/common/json_pointer.rs
function parse_json_pointer (line 3) | pub(crate) fn parse_json_pointer(pointer: &str) -> Result<Vec<String>, L...
function format_json_pointer (line 19) | pub(crate) fn format_json_pointer(segments: &[String]) -> String {
function top_level_property_name (line 33) | pub(crate) fn top_level_property_name(pointer: &str) -> Result<Option<St...
function decode_json_pointer_segment (line 47) | fn decode_json_pointer_segment(segment: &str) -> Result<String, LixError> {
FILE: packages/engine/src/common/metadata.rs
function parse_row_metadata (line 3) | pub(crate) fn parse_row_metadata(
function parse_row_metadata_value (line 11) | pub(crate) fn parse_row_metadata_value(
function validate_row_metadata (line 25) | pub(crate) fn validate_row_metadata(
function serialize_row_metadata (line 38) | pub(crate) fn serialize_row_metadata(metadata: &String) -> String {
FILE: packages/engine/src/common/types.rs
type Value (line 4) | pub enum Value {
type NullableKeyFilter (line 15) | pub enum NullableKeyFilter<T> {
method default (line 22) | fn default() -> Self {
function is_any (line 28) | pub fn is_any(&self) -> bool {
function as_value (line 32) | pub fn as_value(&self) -> Option<&T> {
function as_ref (line 39) | pub fn as_ref(&self) -> NullableKeyFilter<&T> {
function from_nullable (line 47) | pub fn from_nullable(value: Option<T>) -> Self {
function as_deref (line 59) | pub fn as_deref(&self) -> NullableKeyFilter<&T::Target> {
function matches (line 69) | pub fn matches(&self, candidate: Option<&T>) -> bool {
type SqlQueryResult (line 79) | pub struct SqlQueryResult {
type LixNotice (line 88) | pub struct LixNotice {
type WriteReceipt (line 96) | pub struct WriteReceipt {
method is_empty (line 102) | pub fn is_empty(&self) -> bool {
FILE: packages/engine/src/common/wire.rs
type WireValue (line 7) | pub enum WireValue {
method try_from_engine (line 27) | pub fn try_from_engine(value: &Value) -> Result<Self, LixError> {
method try_into_engine (line 55) | pub fn try_into_engine(self) -> Result<Value, LixError> {
type WireQueryResult (line 18) | pub struct WireQueryResult {
method try_from_engine (line 90) | pub fn try_from_engine(result: &SqlQueryResult) -> Result<Self, LixErr...
method try_into_engine (line 106) | pub fn try_into_engine(self) -> Result<SqlQueryResult, LixError> {
function value_roundtrip_preserves_all_variants (line 130) | fn value_roundtrip_preserves_all_variants() {
function query_result_roundtrip_preserves_rows_and_columns (line 151) | fn query_result_roundtrip_preserves_rows_and_columns() {
function canonical_json_uses_lowercase_kinds_only (line 177) | fn canonical_json_uses_lowercase_kinds_only() {
function null_shape_is_explicitly_canonical (line 217) | fn null_shape_is_explicitly_canonical() {
FILE: packages/engine/src/domain.rs
type Domain (line 11) | pub(crate) struct Domain {
method exact_file (line 18) | pub(crate) fn exact_file(
method any_file (line 30) | pub(crate) fn any_file(version_id: impl Into<String>, untracked: bool)...
method schema_catalog (line 38) | pub(crate) fn schema_catalog(version_id: impl Into<String>, untracked:...
method for_live_row (line 42) | pub(crate) fn for_live_row(row: &MaterializedLiveStateRow) -> Self {
method schema_catalog_domain (line 46) | pub(crate) fn schema_catalog_domain(&self) -> Self {
method version_id (line 53) | pub(crate) fn version_id(&self) -> &str {
method untracked (line 57) | pub(crate) fn untracked(&self) -> bool {
method fingerprint_component (line 61) | pub(crate) fn fingerprint_component(&self) -> String {
method file_scope (line 71) | pub(crate) fn file_scope(&self) -> &DomainFileScope {
method is_exact_file (line 75) | pub(crate) fn is_exact_file(&self, file_id: &Option<String>) -> bool {
method with_untracked (line 79) | pub(crate) fn with_untracked(&self, untracked: bool) -> Self {
method with_file_scope (line 87) | pub(crate) fn with_file_scope(&self, file_scope: DomainFileScope) -> S...
method with_exact_file_scope (line 95) | pub(crate) fn with_exact_file_scope(&self, file_id: Option<String>) ->...
method file_filters (line 99) | pub(crate) fn file_filters(&self) -> Vec<NullableKeyFilter<String>> {
method contains (line 106) | pub(crate) fn contains(&self, row: &MaterializedLiveStateRow) -> bool {
method reachable_target_domains (line 116) | fn reachable_target_domains(&self) -> Vec<Self> {
method source_domains_that_can_reach (line 124) | fn source_domains_that_can_reach(&self) -> Vec<Self> {
method can_reach (line 132) | fn can_reach(&self, target: &Self) -> bool {
method schema_catalog_domains (line 138) | pub(crate) fn schema_catalog_domains(&self) -> Vec<Self> {
method fk_target_domains (line 142) | pub(crate) fn fk_target_domains(&self) -> Vec<Self> {
method fk_source_domains_for_target (line 146) | pub(crate) fn fk_source_domains_for_target(&self) -> Vec<Self> {
method file_owner_domains (line 150) | pub(crate) fn file_owner_domains(&self) -> Vec<Self> {
method directory_parent_domains (line 154) | pub(crate) fn directory_parent_domains(&self) -> Vec<Self> {
method version_descriptor_domains_for_ref_delete (line 158) | pub(crate) fn version_descriptor_domains_for_ref_delete(&self) -> Vec<...
method file_scoped_row_domains_for_file_descriptor_delete (line 162) | pub(crate) fn file_scoped_row_domains_for_file_descriptor_delete(&self...
method validation_scope_contains_constraint_domain (line 166) | pub(crate) fn validation_scope_contains_constraint_domain(&self, targe...
method tombstone_domain_affects_validation_scope (line 170) | pub(crate) fn tombstone_domain_affects_validation_scope(
type DomainFileScope (line 179) | pub(crate) enum DomainFileScope {
type DomainRowIdentity (line 185) | pub(crate) struct DomainRowIdentity {
method new (line 192) | pub(crate) fn new(
method from_live_row (line 204) | pub(crate) fn from_live_row(row: &MaterializedLiveStateRow) -> Self {
method in_domain (line 212) | pub(crate) fn in_domain(
method exact (line 221) | pub(crate) fn exact(
method with_domain (line 235) | pub(crate) fn with_domain(&self, domain: Domain) -> Self {
method domain (line 243) | pub(crate) fn domain(&self) -> &Domain {
method schema_key (line 247) | pub(crate) fn schema_key(&self) -> &str {
method schema_key_owned (line 251) | pub(crate) fn schema_key_owned(&self) -> String {
method entity_id (line 255) | pub(crate) fn entity_id(&self) -> &EntityIdentity {
method entity_id_owned (line 259) | pub(crate) fn entity_id_owned(&self) -> EntityIdentity {
method matches_parts (line 263) | pub(crate) fn matches_parts(
method reachable_target_identities (line 272) | pub(crate) fn reachable_target_identities(&self) -> Vec<Self> {
method source_identities_that_can_reach (line 280) | pub(crate) fn source_identities_that_can_reach(&self) -> Vec<Self> {
type DomainSchemaIdentity (line 290) | pub(crate) struct DomainSchemaIdentity {
method new (line 296) | pub(crate) fn new(domain: Domain, schema_key: impl Into<String>) -> Se...
method fingerprint_component (line 303) | pub(crate) fn fingerprint_component(&self) -> String {
function committed_row_is_exact_version_scoped (line 312) | pub(crate) fn committed_row_is_exact_version_scoped(
function nullable_filter_from_option (line 319) | fn nullable_filter_from_option(value: &Option<String>) -> NullableKeyFil...
FILE: packages/engine/src/engine.rs
type Engine (line 20) | pub struct Engine {
method initialize (line 36) | pub async fn initialize(
method new (line 56) | pub async fn new(backend: Box<dyn Backend + Send + Sync>) -> Result<Se...
method storage (line 87) | pub(crate) fn storage(&self) -> StorageContext {
method load_version_head_commit_id (line 96) | pub async fn load_version_head_commit_id(
method open_session (line 118) | pub async fn open_session(
method open_workspace_session (line 135) | pub async fn open_workspace_session(&self) -> Result<SessionContext, L...
method rebuild_tracked_state_for_version (line 154) | pub async fn rebuild_tracked_state_for_version(
function assert_initialized (line 192) | async fn assert_initialized(
FILE: packages/engine/src/entity_identity.rs
type EntityIdentity (line 13) | pub(crate) struct EntityIdentity {
method single (line 63) | pub(crate) fn single(value: impl Into<String>) -> Self {
method tuple (line 70) | pub(crate) fn tuple(parts: Vec<String>) -> Result<Self, EntityIdentity...
method from_primary_key_paths (line 80) | pub(crate) fn from_primary_key_paths(
method as_json_array_value (line 102) | pub(crate) fn as_json_array_value(&self) -> Result<JsonValue, LixError> {
method as_json_array_text (line 117) | pub(crate) fn as_json_array_text(&self) -> Result<String, LixError> {
method as_single_string (line 123) | pub(crate) fn as_single_string(&self) -> Result<&str, LixError> {
method as_single_string_owned (line 139) | pub(crate) fn as_single_string_owned(&self) -> Result<String, LixError> {
method from_json_array_text (line 143) | pub(crate) fn from_json_array_text(entity_id: &str) -> Result<Self, En...
method from_json_array_value (line 149) | pub(crate) fn from_json_array_value(
type EntityIdentityError (line 18) | pub(crate) enum EntityIdentityError {
method fmt (line 28) | fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Re...
function string_part_from_json_value (line 167) | fn string_part_from_json_value(
function canonical_json_text (line 180) | pub(crate) fn canonical_json_text(value: &JsonValue) -> serde_json::Resu...
function canonical_json_value (line 184) | fn canonical_json_value(value: &JsonValue) -> JsonValue {
function single_string_identity_projects_to_single_string (line 210) | fn single_string_identity_projects_to_single_string() {
function single_identity_projects_to_json_array_entity_id (line 220) | fn single_identity_projects_to_json_array_entity_id() {
function composite_identity_projects_to_json_array_entity_id (line 232) | fn composite_identity_projects_to_json_array_entity_id() {
function entity_id_json_array_roundtrips (line 245) | fn entity_id_json_array_roundtrips() {
function entity_id_json_array_rejects_empty_string_part (line 259) | fn entity_id_json_array_rejects_empty_string_part() {
function tuple_rejects_empty_string_part (line 267) | fn tuple_rejects_empty_string_part() {
function entity_id_json_array_does_not_collide_on_delimiter_like_values (line 275) | fn entity_id_json_array_does_not_collide_on_delimiter_like_values() {
function composite_identity_rejects_single_string_projection (line 288) | fn composite_identity_rejects_single_string_projection() {
function composite_identity_does_not_collide_on_delimiter_like_values (line 296) | fn composite_identity_does_not_collide_on_delimiter_like_values() {
function from_primary_key_paths_derives_ordered_parts (line 309) | fn from_primary_key_paths_derives_ordered_parts() {
function entity_id_json_array_rejects_non_string_parts (line 330) | fn entity_id_json_array_rejects_non_string_parts() {
function from_primary_key_paths_rejects_non_string_parts (line 346) | fn from_primary_key_paths_rejects_non_string_parts() {
function from_primary_key_paths_rejects_empty_string_parts (line 362) | fn from_primary_key_paths_rejects_empty_string_parts() {
function from_primary_key_paths_rejects_nested_json_parts (line 378) | fn from_primary_key_paths_rejects_nested_json_parts() {
function from_primary_key_paths_rejects_missing_parts (line 397) | fn from_primary_key_paths_rejects_missing_parts() {
FILE: packages/engine/src/functions/context.rs
type FunctionContext (line 15) | pub(crate) struct FunctionContext {
method prepare (line 25) | pub(crate) async fn prepare(live_state: &dyn LiveStateReader) -> Resul...
method provider (line 50) | pub(crate) fn provider(&self) -> FunctionProviderHandle {
method stage_persist_if_needed (line 58) | pub(crate) async fn stage_persist_if_needed(
function live_state_context (line 88) | fn live_state_context() -> LiveStateContext {
function prepare_uses_system_functions_when_mode_missing (line 97) | async fn prepare_uses_system_functions_when_mode_missing() {
function prepare_starts_deterministic_functions_at_sequence_zero (line 116) | async fn prepare_starts_deterministic_functions_at_sequence_zero() {
function prepare_continues_from_persisted_sequence (line 150) | async fn prepare_continues_from_persisted_sequence() {
function persist_if_needed_writes_sequence_when_deterministic_functions_advanced (line 189) | async fn persist_if_needed_writes_sequence_when_deterministic_functions_...
function persist_if_needed_is_noop_for_system_functions (line 232) | async fn persist_if_needed_is_noop_for_system_functions() {
function write_key_value (line 260) | async fn write_key_value(storage: StorageContext, key: &str, value: serd...
FILE: packages/engine/src/functions/deterministic.rs
constant DETERMINISTIC_UUID_COUNTER_MASK (line 3) | const DETERMINISTIC_UUID_COUNTER_MASK: u64 = 0x0000_FFFF_FFFF_FFFF;
type DeterministicFunctionProvider (line 11) | pub(crate) struct DeterministicFunctionProvider {
method new (line 18) | pub(crate) fn new(next_sequence: i64, timestamp_shuffle: bool) -> Self {
method highest_seen (line 26) | pub(crate) fn highest_seen(&self) -> Option<i64> {
method take_sequence (line 30) | fn take_sequence(&mut self) -> i64 {
method uuid_v7 (line 39) | fn uuid_v7(&mut self) -> String {
method timestamp (line 45) | fn timestamp(&mut self) -> String {
method deterministic_sequence_persist_highest_seen (line 57) | fn deterministic_sequence_persist_highest_seen(&self) -> Option<i64> {
function shuffled_timestamp_millis (line 62) | fn shuffled_timestamp_millis(counter: i64) -> i64 {
function deterministic_uuid_uses_sequence_counter (line 79) | fn deterministic_uuid_uses_sequence_counter() {
function deterministic_timestamp_uses_sequence_counter (line 88) | fn deterministic_timestamp_uses_sequence_counter() {
function deterministic_timestamp_shuffle_can_be_non_monotonic (line 96) | fn deterministic_timestamp_shuffle_can_be_non_monotonic() {
function deterministic_sequence_can_start_after_persisted_highest_seen (line 106) | fn deterministic_sequence_can_start_after_persisted_highest_seen() {
FILE: packages/engine/src/functions/provider.rs
type FunctionProvider (line 6) | pub(crate) trait FunctionProvider: Send {
method uuid_v7 (line 7) | fn uuid_v7(&mut self) -> String;
method timestamp (line 8) | fn timestamp(&mut self) -> String;
method deterministic_sequence_persist_highest_seen (line 10) | fn deterministic_sequence_persist_highest_seen(&self) -> Option<i64> {
method uuid_v7 (line 88) | fn uuid_v7(&mut self) -> String {
method timestamp (line 92) | fn timestamp(&mut self) -> String {
method deterministic_sequence_persist_highest_seen (line 96) | fn deterministic_sequence_persist_highest_seen(&self) -> Option<i64> {
method uuid_v7 (line 105) | fn uuid_v7(&mut self) -> String {
method timestamp (line 109) | fn timestamp(&mut self) -> String {
method deterministic_sequence_persist_highest_seen (line 113) | fn deterministic_sequence_persist_highest_seen(&self) -> Option<i64> {
method uuid_v7 (line 123) | fn uuid_v7(&mut self) -> String {
method timestamp (line 127) | fn timestamp(&mut self) -> String {
type FunctionProviderHandle (line 15) | pub(crate) type FunctionProviderHandle = SharedFunctionProvider<Box<dyn ...
type SharedFunctionProvider (line 18) | pub(crate) struct SharedFunctionProvider<P> {
method clone (line 23) | fn clone(&self) -> Self {
function new (line 31) | pub(crate) fn new(provider: P) -> Self {
function with_lock (line 37) | fn with_lock<R>(&self, f: impl FnOnce(&P) -> R) -> R {
function with_lock_mut (line 45) | fn with_lock_mut<R>(&self, f: impl FnOnce(&mut P) -> R) -> R {
function call_uuid_v7 (line 58) | pub(crate) fn call_uuid_v7(&self) -> String {
function call_timestamp (line 62) | pub(crate) fn call_timestamp(&self) -> String {
function deterministic_sequence_persist_highest_seen (line 66) | pub(crate) fn deterministic_sequence_persist_highest_seen(&self) -> Opti...
method call_uuid_v7 (line 75) | fn call_uuid_v7(&self) -> String {
method call_timestamp (line 79) | fn call_timestamp(&self) -> String {
type SystemFunctionProvider (line 120) | pub(crate) struct SystemFunctionProvider;
FILE: packages/engine/src/functions/state.rs
constant DETERMINISTIC_MODE_KEY (line 14) | pub(crate) const DETERMINISTIC_MODE_KEY: &str = "lix_deterministic_mode";
constant DETERMINISTIC_SEQUENCE_KEY (line 15) | pub(crate) const DETERMINISTIC_SEQUENCE_KEY: &str = "lix_deterministic_s...
constant KEY_VALUE_SCHEMA_KEY (line 17) | const KEY_VALUE_SCHEMA_KEY: &str = "lix_key_value";
function load_mode (line 23) | pub(crate) async fn load_mode(
function load_sequence (line 37) | pub(crate) async fn load_sequence(
function stage_sequence (line 51) | pub(crate) async fn stage_sequence(
function load_key_value_row (line 74) | async fn load_key_value_row(
function key_value_payload (line 88) | fn key_value_payload(row: &MaterializedLiveStateRow, key: &str) -> Resul...
function parse_mode_value (line 116) | fn parse_mode_value(value: JsonValue) -> Result<DeterministicMode, LixEr...
function parse_sequence_value (line 141) | fn parse_sequence_value(value: JsonValue) -> Result<DeterministicSequenc...
function deterministic_key_value_row (line 151) | fn deterministic_key_value_row(
function live_state_context (line 179) | fn live_state_context() -> LiveStateContext {
function missing_mode_is_disabled (line 188) | async fn missing_mode_is_disabled() {
function valid_mode_decodes_flags (line 202) | async fn valid_mode_decodes_flags() {
function missing_sequence_is_uninitialized (line 230) | async fn missing_sequence_is_uninitialized() {
function valid_sequence_decodes_highest_seen (line 244) | async fn valid_sequence_decodes_highest_seen() {
function write_sequence_persists_untracked_global_key_value (line 266) | async fn write_sequence_persists_untracked_global_key_value() {
function write_test_key_value (line 313) | async fn write_test_key_value(storage: StorageContext, key: &str, value:...
FILE: packages/engine/src/functions/types.rs
type DeterministicMode (line 6) | pub(crate) struct DeterministicMode {
method disabled (line 12) | pub(crate) fn disabled() -> Self {
type DeterministicSequence (line 25) | pub(crate) struct DeterministicSequence {
method uninitialized (line 30) | pub(crate) fn uninitialized() -> Self {
method next_sequence (line 34) | pub(crate) fn next_sequence(self) -> i64 {
FILE: packages/engine/src/init.rs
constant KEY_VALUE_SCHEMA_KEY (line 20) | const KEY_VALUE_SCHEMA_KEY: &str = "lix_key_value";
constant LIX_ID_KEY (line 21) | const LIX_ID_KEY: &str = "lix_id";
constant WORKSPACE_VERSION_KEY (line 22) | const WORKSPACE_VERSION_KEY: &str = "lix_workspace_version_id";
constant REGISTERED_SCHEMA_KEY (line 23) | const REGISTERED_SCHEMA_KEY: &str = "lix_registered_schema";
type InitSeedPlan (line 30) | pub(crate) struct InitSeedPlan {
type InitSeedCommit (line 38) | struct InitSeedCommit {
type InitSeedChange (line 47) | struct InitSeedChange {
type InitSeedLiveRow (line 56) | struct InitSeedLiveRow {
type InitReceipt (line 68) | pub struct InitReceipt {
function plan_init_seed (line 79) | pub(crate) fn plan_init_seed(functions: FunctionProviderHandle) -> Resul...
function initialize (line 175) | pub(crate) async fn initialize(
function seed_change_to_commit_store_change (line 255) | fn seed_change_to_commit_store_change(change: &InitSeedChange) -> Result...
function untracked_state_row_from_seed (line 267) | fn untracked_state_row_from_seed(row: &InitSeedLiveRow) -> Result<Untrac...
function untracked_row (line 281) | fn untracked_row(
function canonical_change (line 298) | fn canonical_change(
function version_descriptor_snapshot (line 314) | fn version_descriptor_snapshot(id: &str, name: &str, hidden: bool) -> Re...
function key_value_snapshot (line 322) | fn key_value_snapshot(key: &str, value: &str) -> Result<String, LixError> {
function registered_schema_snapshot (line 329) | fn registered_schema_snapshot(schema: &serde_json::Value) -> Result<Stri...
function version_ref_snapshot (line 335) | fn version_ref_snapshot(id: &str, commit_id: &str) -> Result<String, Lix...
function encode_snapshot (line 342) | fn encode_snapshot(value: serde_json::Value) -> Result<String, LixError> {
function plan_init_seed_returns_tracked_changes_and_untracked_workspace_state (line 363) | fn plan_init_seed_returns_tracked_changes_and_untracked_workspace_state() {
function plan_init_seed_commit_header_tracks_schema_registrations_descriptor_and_lix_id_changes (line 375) | fn plan_init_seed_commit_header_tracks_schema_registrations_descriptor_a...
function plan_init_seed_registers_seed_schemas_as_initial_commit_rows (line 405) | fn plan_init_seed_registers_seed_schemas_as_initial_commit_rows() {
function plan_init_seed_version_refs_point_to_initial_commit (line 432) | fn plan_init_seed_version_refs_point_to_initial_commit() {
function plan_init_seed_workspace_version_points_to_main_version (line 457) | fn plan_init_seed_workspace_version_points_to_main_version() {
function initialize_writes_initial_commit_through_commit_store (line 483) | async fn initialize_writes_initial_commit_through_commit_store() {
function snapshot (line 527) | fn snapshot(change: &InitSeedChange) -> JsonValue {
function untracked_snapshot (line 531) | fn untracked_snapshot(row: &InitSeedLiveRow) -> JsonValue {
function test_functions (line 535) | fn test_functions() -> FunctionProviderHandle {
type TestFunctionProvider (line 542) | struct TestFunctionProvider {
method uuid_v7 (line 548) | fn uuid_v7(&mut self) -> String {
method timestamp (line 553) | fn timestamp(&mut self) -> String {
FILE: packages/engine/src/json_store/compression.rs
function compress_json_payload (line 4) | pub(crate) fn compress_json_payload(json_data: &[u8]) -> Result<Vec<u8>,...
function compress_json_payload (line 14) | pub(crate) fn compress_json_payload(json_data: &[u8]) -> Result<Vec<u8>,...
function decode_json_zstd_payload (line 22) | pub(crate) fn decode_json_zstd_payload(
function decode_json_zstd_payload (line 36) | pub(crate) fn decode_json_zstd_payload(
function zstd_payload_roundtrips (line 66) | fn zstd_payload_roundtrips() {
FILE: packages/engine/src/json_store/context.rs
constant PACK_LOCAL_MAX_JSON_BYTES (line 11) | const PACK_LOCAL_MAX_JSON_BYTES: usize = 64 * 1024;
type JsonStoreContext (line 14) | pub(crate) struct JsonStoreContext;
method new (line 17) | pub(crate) fn new() -> Self {
method reader (line 21) | pub(crate) fn reader<S>(&self, store: S) -> JsonStoreReader<S>
method writer (line 28) | pub(crate) fn writer(&self) -> JsonStoreWriter {
method load_bytes_many (line 32) | pub(crate) async fn load_bytes_many(
method commit_pack_get_group (line 42) | pub(crate) fn commit_pack_get_group(&self, commit_id: &str, pack_id: u...
method decode_pack_refs (line 49) | pub(crate) fn decode_pack_refs(&self, bytes: &[u8]) -> Result<Vec<Json...
type JsonStoreReader (line 54) | pub(crate) struct JsonStoreReader<S> {
method clone (line 62) | fn clone(&self) -> Self {
function load_bytes_many (line 73) | pub(crate) async fn load_bytes_many(
function load_values_many (line 82) | pub(crate) async fn load_values_many(
function load_projections_many (line 109) | pub(crate) async fn load_projections_many(
type JsonStoreWriter (line 137) | pub(crate) struct JsonStoreWriter;
method new (line 146) | fn new() -> Self {
method stage_batch (line 150) | pub(crate) fn stage_batch<'a>(
method stage_batch_report (line 160) | pub(crate) fn stage_batch_report<'a>(
type JsonStageBatchReport (line 140) | pub(crate) struct JsonStageBatchReport {
function commit_local_batch_writes_pack_without_direct_rows (line 235) | async fn commit_local_batch_writes_pack_without_direct_rows() {
function commit_local_batch_dedupes_pack_payloads_but_returns_request_order (line 305) | async fn commit_local_batch_dedupes_pack_payloads_but_returns_request_or...
function commit_local_batch_accepts_trusted_prehashed_payload (line 377) | async fn commit_local_batch_accepts_trusted_prehashed_payload() {
FILE: packages/engine/src/json_store/encoded.rs
type JsonCodec (line 5) | pub(crate) enum JsonCodec {
type EncodedJson (line 10) | pub(crate) struct EncodedJson<'a> {
FILE: packages/engine/src/json_store/store.rs
constant JSON_NAMESPACE (line 9) | pub(crate) const JSON_NAMESPACE: &str = "json_store.json";
constant JSON_PACK_NAMESPACE (line 10) | pub(crate) const JSON_PACK_NAMESPACE: &str = "json_store.pack";
constant STORED_JSON_MAGIC (line 11) | const STORED_JSON_MAGIC: &[u8] = b"lix-json:v1";
constant STORED_JSON_HEADER_LEN (line 12) | const STORED_JSON_HEADER_LEN: usize = STORED_JSON_MAGIC.len() + 1 + 8;
constant STORED_JSON_PACK_MAGIC (line 13) | const STORED_JSON_PACK_MAGIC: &[u8] = b"lix-json-pack:v2";
constant STORED_JSON_PACK_ENTRY_HEADER_LEN (line 14) | const STORED_JSON_PACK_ENTRY_HEADER_LEN: usize = 32 + 1 + 4 + 4 + 4;
constant ZSTD_MIN_JSON_BYTES (line 15) | const ZSTD_MIN_JSON_BYTES: usize = 16 * 1024;
constant MIN_ZSTD_SAVINGS_BYTES (line 16) | const MIN_ZSTD_SAVINGS_BYTES: usize = 128;
type StoredJsonPayload (line 18) | struct StoredJsonPayload<'a> {
type JsonPackLayout (line 24) | struct JsonPackLayout {
type JsonPackEntry (line 30) | struct JsonPackEntry<'a> {
type JsonHashCheck (line 36) | enum JsonHashCheck {
type OrderedSinglePackProbe (line 44) | enum OrderedSinglePackProbe {
function raw_json_ref_for_content (line 50) | fn raw_json_ref_for_content(json: &str) -> JsonRef {
function json_ref_for_content (line 54) | pub(crate) fn json_ref_for_content(bytes: &[u8]) -> JsonRef {
function encode_json (line 59) | fn encode_json(json: &str) -> Result<EncodedJson<'_>, LixError> {
function encode_json_for_storage (line 63) | fn encode_json_for_storage(json: &str) -> Result<EncodedJson<'_>, LixErr...
function encode_json_for_storage_with_ref (line 68) | fn encode_json_for_storage_with_ref(
function encode_json_str (line 94) | pub(crate) fn encode_json_str(json: &str) -> Result<EncodedJson<'_>, Lix...
function encode_json_str_with_ref (line 98) | pub(crate) fn encode_json_str_with_ref(
function encode_direct_json_payload (line 106) | pub(crate) fn encode_direct_json_payload(encoded_json: &EncodedJson<'_>)...
function pack_key (line 110) | pub(crate) fn pack_key(commit_id: &str, pack_id: u32) -> Vec<u8> {
function decode_json_pack_refs (line 119) | pub(crate) fn decode_json_pack_refs(bytes: &[u8]) -> Result<Vec<JsonRef>...
function encode_json_pack (line 130) | pub(crate) fn encode_json_pack(entries: &[&EncodedJson<'_>]) -> Result<V...
function json_pack_u32 (line 170) | fn json_pack_u32(value: usize, field: &str) -> Result<[u8; 4], LixError> {
function encode_json_bytes_for_storage (line 180) | pub(crate) fn encode_json_bytes_for_storage(bytes: &[u8]) -> Result<(Jso...
function encode_json_str_for_storage_with_ref (line 191) | pub(crate) fn encode_json_str_for_storage_with_ref(
function load_json_bytes_direct (line 200) | async fn load_json_bytes_direct(
function load_json_bytes_many_in_scope (line 224) | pub(crate) async fn load_json_bytes_many_in_scope(
function verify_json_bytes_many_in_scope (line 238) | pub(crate) async fn verify_json_bytes_many_in_scope(
function load_json_bytes_many_in_scope_with_hash_check (line 247) | async fn load_json_bytes_many_in_scope_with_hash_check(
function json_values_in_request_order (line 378) | fn json_values_in_request_order(
function load_ordered_single_pack (line 398) | async fn load_ordered_single_pack(
function load_from_single_pack_bytes (line 440) | fn load_from_single_pack_bytes(
function load_from_packs (line 458) | async fn load_from_packs(
function encode_stored_json_payload (line 511) | fn encode_stored_json_payload(encoded_json: &EncodedJson<'_>) -> Vec<u8> {
function decode_stored_json_payload (line 520) | fn decode_stored_json_payload(bytes: &[u8]) -> Result<StoredJsonPayload<...
function json_codec_byte (line 548) | fn json_codec_byte(codec: JsonCodec) -> u8 {
function read_json_codec (line 555) | fn read_json_codec(byte: u8) -> Result<JsonCodec, LixError> {
function decode_json_payload (line 566) | fn decode_json_payload(
function load_json_pack_values_in_request_order (line 602) | fn load_json_pack_values_in_request_order(
function load_json_pack_values (line 632) | fn load_json_pack_values(
function json_pack_layout (line 650) | fn json_pack_layout(bytes: &[u8]) -> Result<JsonPackLayout, LixError> {
function json_pack_entry (line 698) | fn json_pack_entry<'a>(
function json_roundtrips_raw_payload (line 770) | async fn json_roundtrips_raw_payload() {
function json_batch_load_roundtrips_in_request_order (line 805) | async fn json_batch_load_roundtrips_in_request_order() {
function verified_batch_load_rejects_hash_mismatch (line 854) | async fn verified_batch_load_rejects_hash_mismatch() {
function verified_pack_load_checks_only_requested_entries (line 903) | async fn verified_pack_load_checks_only_requested_entries() {
function json_pack_directory_uses_compact_u32_fields (line 961) | fn json_pack_directory_uses_compact_u32_fields() {
function json_pack_u32_rejects_oversized_directory_fields (line 975) | fn json_pack_u32_rejects_oversized_directory_fields() {
function ordered_pack_load_fast_path_requires_exact_pack_order (line 985) | fn ordered_pack_load_fast_path_requires_exact_pack_order() {
function pack_batch_load_falls_back_for_unordered_refs (line 1020) | async fn pack_batch_load_falls_back_for_unordered_refs() {
function ordered_pack_probe_falls_back_to_direct_rows (line 1066) | async fn ordered_pack_probe_falls_back_to_direct_rows() {
FILE: packages/engine/src/json_store/types.rs
type NormalizedJson (line 6) | pub(crate) struct NormalizedJson(Arc<str>);
method from_arc_unchecked (line 9) | pub(crate) fn from_arc_unchecked(normalized: Arc<str>) -> Self {
method from_value (line 13) | pub(crate) fn from_value(value: &serde_json::Value, context: &str) -> ...
method as_str (line 25) | pub(crate) fn as_str(&self) -> &str {
method as_bytes (line 29) | pub(crate) fn as_bytes(&self) -> &[u8] {
type JsonRef (line 35) | pub(crate) struct JsonRef {
method from_hash (line 40) | pub(crate) fn from_hash(hash: blake3::Hash) -> Self {
method from_hash_bytes (line 46) | pub(crate) fn from_hash_bytes(hash: [u8; 32]) -> Self {
method for_content (line 50) | pub(crate) fn for_content(bytes: &[u8]) -> Self {
method as_hash_bytes (line 54) | pub(crate) fn as_hash_bytes(&self) -> &[u8] {
method as_hash_array (line 58) | pub(crate) fn as_hash_array(&self) -> &[u8; 32] {
method to_hex (line 62) | pub(crate) fn to_hex(&self) -> String {
type NormalizedJsonRef (line 68) | pub(crate) struct NormalizedJsonRef<'a> {
function new (line 74) | pub(crate) fn new(normalized: &'a str) -> Self {
function trusted_prehashed (line 84) | pub(crate) fn trusted_prehashed(normalized: &'a str, json_ref: JsonRef) ...
function normalized (line 91) | pub(crate) fn normalized(&self) -> &'a str {
function trusted_json_ref (line 95) | pub(crate) fn trusted_json_ref(&self) -> Option<JsonRef> {
function from (line 101) | fn from(value: &'a NormalizedJson) -> Self {
type JsonWritePlacementRef (line 107) | pub(crate) enum JsonWritePlacementRef<'a> {
type JsonReadScopeRef (line 113) | pub(crate) enum JsonReadScopeRef<'a> {
type JsonLoadRequestRef (line 122) | pub(crate) struct JsonLoadRequestRef<'a> {
type JsonProjectionLoadRequestRef (line 128) | pub(crate) struct JsonProjectionLoadRequestRef<'a> {
type JsonLoadBatch (line 135) | pub(crate) struct JsonLoadBatch {
method new (line 140) | pub(crate) fn new(values: Vec<Option<Vec<u8>>>) -> Self {
method values (line 144) | pub(crate) fn values(&self) -> &[Option<Vec<u8>>] {
method into_values (line 148) | pub(crate) fn into_values(self) -> Vec<Option<Vec<u8>>> {
type JsonValueBatch (line 154) | pub(crate) struct JsonValueBatch {
method new (line 159) | pub(crate) fn new(values: Vec<Option<serde_json::Value>>) -> Self {
method values (line 163) | pub(crate) fn values(&self) -> &[Option<serde_json::Value>] {
method into_values (line 167) | pub(crate) fn into_values(self) -> Vec<Option<serde_json::Value>> {
type JsonProjectionPath (line 173) | pub(crate) struct JsonProjectionPath(String);
method new (line 176) | pub(crate) fn new(pointer: impl Into<String>) -> Self {
method as_str (line 180) | pub(crate) fn as_str(&self) -> &str {
type JsonProjection (line 186) | pub(crate) struct JsonProjection {
method new (line 191) | pub(crate) fn new(values: Vec<Option<serde_json::Value>>) -> Self {
method values (line 195) | pub(crate) fn values(&self) -> &[Option<serde_json::Value>] {
type JsonProjectionBatch (line 201) | pub(crate) struct JsonProjectionBatch {
method new (line 206) | pub(crate) fn new(values: Vec<Option<JsonProjection>>) -> Self {
method values (line 210) | pub(crate) fn values(&self) -> &[Option<JsonProjection>] {
method into_values (line 214) | pub(crate) fn into_values(self) -> Vec<Option<JsonProjection>> {
FILE: packages/engine/src/lib.rs
constant GLOBAL_VERSION_ID (line 62) | pub(crate) const GLOBAL_VERSION_ID: &str = "global";
FILE: packages/engine/src/live_state/context.rs
constant COMMIT_SCHEMA_KEY (line 23) | const COMMIT_SCHEMA_KEY: &str = "lix_commit";
constant COMMIT_EDGE_SCHEMA_KEY (line 24) | const COMMIT_EDGE_SCHEMA_KEY: &str = "lix_commit_edge";
type LiveStateContext (line 31) | pub(crate) struct LiveStateContext {
method new (line 38) | pub(crate) fn new(
method reader (line 51) | pub(crate) fn reader<S>(&self, store: S) -> LiveStateStoreReader<S>
type LiveStateStoreReader (line 65) | pub(crate) struct LiveStateStoreReader<S> {
function scan_rows (line 76) | pub(crate) async fn scan_rows(
function load_row (line 146) | pub(crate) async fn load_row(
method scan_rows (line 240) | async fn scan_rows(
method load_row (line 247) | async fn load_row(
function scan_commit_derived_rows (line 255) | async fn scan_commit_derived_rows(
function request_may_include_commit_derived (line 301) | fn request_may_include_commit_derived(request: &LiveStateScanRequest) ->...
function is_commit_derived_only_request (line 310) | fn is_commit_derived_only_request(request: &LiveStateScanRequest) -> bool {
function is_commit_derived_schema (line 319) | fn is_commit_derived_schema(schema_key: &str) -> bool {
function schema_filter_allows (line 323) | fn schema_filter_allows(schema_keys: &[String], schema_key: &str) -> bool {
function file_filter_allows_null (line 327) | fn file_filter_allows_null(file_ids: &[NullableKeyFilter<String>]) -> bo...
function commit_row (line 334) | fn commit_row(
function commit_edge_row (line 364) | fn commit_edge_row(
function tracked_scan_request_from_live (line 398) | fn tracked_scan_request_from_live(request: &LiveStateScanRequest) -> Tra...
function untracked_scan_request_from_live (line 415) | fn untracked_scan_request_from_live(
type LiveStateScanScope (line 431) | struct LiveStateScanScope {
function scan_scope (line 436) | async fn scan_scope(
function all_version_ref_ids (line 462) | async fn all_version_ref_ids(
function load_version_ref_commit_id (line 482) | async fn load_version_ref_commit_id(
function version_ref_exists (line 515) | async fn version_ref_exists(
type TrackedRowSource (line 528) | enum TrackedRowSource {
function tracked_source_from_version_id (line 533) | fn tracked_source_from_version_id(version_id: &str) -> TrackedRowSource {
function project_tracked_row (line 541) | fn project_tracked_row(
type LiveStateLookupSource (line 564) | enum LiveStateLookupSource {
type LiveStateLookupCandidate (line 570) | struct LiveStateLookupCandidate {
function load_row_candidates (line 575) | fn load_row_candidates(request: &LiveStateRowRequest) -> Vec<LiveStateLo...
function untracked_row_request_from_live (line 603) | fn untracked_row_request_from_live(
function tracked_row_request_from_live (line 615) | fn tracked_row_request_from_live(request: &LiveStateRowRequest) -> Track...
constant COMMIT_SCHEMA_KEY (line 641) | const COMMIT_SCHEMA_KEY: &str = "lix_commit";
function live_state_context (line 643) | fn live_state_context() -> LiveStateContext {
function write_untracked_rows_to_store (line 651) | async fn write_untracked_rows_to_store(
function write_empty_commits_to_store (line 671) | async fn write_empty_commits_to_store(
function stage_materialized_live_rows (line 700) | async fn stage_materialized_live_rows(
function stage_tracked_materialized_json (line 792) | fn stage_tracked_materialized_json(
function parent_commit_id_from_test_commit_row (line 819) | fn parent_commit_id_from_test_commit_row(
function live_state_overlays_untracked_rows (line 840) | async fn live_state_overlays_untracked_rows() {
function tracked_row_is_visible_without_untracked_overlay (line 911) | async fn tracked_row_is_visible_without_untracked_overlay() {
function deleting_untracked_row_reveals_tracked_row (line 962) | async fn deleting_untracked_row_reveals_tracked_row() {
function load_row_falls_back_to_global_tracked_row_for_requested_version (line 1032) | async fn load_row_falls_back_to_global_tracked_row_for_requested_version...
function main_sees_global_row_by_reading_global_root_separately (line 1090) | async fn main_sees_global_row_by_reading_global_root_separately() {
function load_row_prefers_requested_version_over_global (line 1159) | async fn load_row_prefers_requested_version_over_global() {
function main_override_hides_global_row (line 1219) | async fn main_override_hides_global_row() {
function load_row_prefers_requested_untracked_over_requested_tracked_and_global_rows (line 1279) | async fn load_row_prefers_requested_untracked_over_requested_tracked_and...
function scan_rows_overlays_requested_version_over_global (line 1341) | async fn scan_rows_overlays_requested_version_over_global() {
function scan_rows_projects_global_row_into_requested_version (line 1400) | async fn scan_rows_projects_global_row_into_requested_version() {
function scan_rows_does_not_project_global_rows_into_missing_version (line 1457) | async fn scan_rows_does_not_project_global_rows_into_missing_version() {
function winning_tombstone_hides_row_unless_tombstones_are_included (line 1508) | async fn winning_tombstone_hides_row_unless_tombstones_are_included() {
function main_tombstone_hides_global_row (line 1567) | async fn main_tombstone_hides_global_row() {
function writer_allows_commit_fact_to_share_the_touched_version_commit_id (line 1627) | async fn writer_allows_commit_fact_to_share_the_touched_version_commit_i...
function writer_uses_first_parent_as_merge_root_base (line 1681) | async fn writer_uses_first_parent_as_merge_root_base() {
function non_global_root_does_not_store_global_rows (line 1758) | async fn non_global_root_does_not_store_global_rows() {
function load_selected_tab (line 1813) | async fn load_selected_tab(
function load_selected_tab_at (line 1828) | async fn load_selected_tab_at(
function scan_selected_tab_at (line 1844) | async fn scan_selected_tab_at(
function scan_tracked_root (line 1868) | async fn scan_tracked_root(
function tracked_row_with_commit (line 1889) | fn tracked_row_with_commit(
function tracked_row_at_with_commit (line 1897) | fn tracked_row_at_with_commit(
function tombstone_tracked_row_at_with_commit (line 1920) | fn tombstone_tracked_row_at_with_commit(
function untracked_row (line 1932) | fn untracked_row(value: &str) -> MaterializedUntrackedStateRow {
function untracked_row_at (line 1936) | fn untracked_row_at(version_id: &str, value: &str) -> MaterializedUntrac...
function version_ref_row (line 1951) | fn version_ref_row(version_id: &str, commit_id: &str) -> MaterializedUnt...
function commit_live_state_row (line 1972) | fn commit_live_state_row(commit_id: &str) -> MaterializedLiveStateRow {
function commit_live_state_row_with_parents (line 1976) | fn commit_live_state_row_with_parents(
function commit_live_state_row_with_snapshot (line 1993) | fn commit_live_state_row_with_snapshot(
function identity (line 2016) | fn identity(entity_id: &str) -> EntityIdentity {
FILE: packages/engine/src/live_state/overlay.rs
function overlay_untracked_rows (line 11) | pub(crate) fn overlay_untracked_rows(
function untracked_row_wins_for_same_identity (line 32) | fn untracked_row_wins_for_same_identity() {
function different_identities_are_preserved (line 48) | fn different_identities_are_preserved() {
function live_row (line 58) | fn live_row(value: &str, untracked: bool, change_id: Option<&str>) -> Ma...
FILE: packages/engine/src/live_state/reader.rs
type LiveStateReader (line 13) | pub(crate) trait LiveStateReader: Send + Sync {
method scan_rows (line 14) | async fn scan_rows(
method load_row (line 19) | async fn load_row(
FILE: packages/engine/src/live_state/types.rs
type MaterializedLiveStateRow (line 13) | pub(crate) struct MaterializedLiveStateRow {
method from (line 30) | fn from(row: MaterializedUntrackedStateRow) -> Self {
type Error (line 50) | type Error = crate::LixError;
method try_from (line 52) | fn try_from(row: &MaterializedLiveStateRow) -> Result<Self, Self::Error> {
method from (line 88) | fn from(row: &MaterializedLiveStateRow) -> Self {
type ScanField (line 106) | pub(crate) enum ScanField {
type Bound (line 113) | pub(crate) struct Bound {
type ScanConstraint (line 120) | pub(crate) struct ScanConstraint {
type ScanOperator (line 127) | pub(crate) enum ScanOperator {
type LiveStateFilter (line 138) | pub(crate) struct LiveStateFilter {
method from (line 156) | fn from(filter: LiveStateFilter) -> Self {
type LiveStateProjection (line 168) | pub(crate) struct LiveStateProjection {
type LiveStateScanRequest (line 175) | pub(crate) struct LiveStateScanRequest {
type LiveStateRowRequest (line 186) | pub(crate) struct LiveStateRowRequest {
method from (line 194) | fn from(request: &LiveStateRowRequest) -> Self {
type LiveStateRowIdentity (line 206) | pub(crate) struct LiveStateRowIdentity {
method from_row (line 214) | pub(crate) fn from_row(row: &MaterializedLiveStateRow) -> Self {
FILE: packages/engine/src/live_state/visibility.rs
function expanded_version_ids (line 8) | pub(crate) fn expanded_version_ids(version_ids: &[String]) -> Vec<String> {
function resolve_scan_rows (line 34) | pub(crate) fn resolve_scan_rows(
function project_loaded_row (line 48) | pub(crate) fn project_loaded_row(
function project_global_rows_into_requested_versions (line 61) | fn project_global_rows_into_requested_versions(
function expands_requested_version_with_global_candidates (line 94) | fn expands_requested_version_with_global_candidates() {
function scan_projects_global_row_into_requested_version (line 106) | fn scan_projects_global_row_into_requested_version() {
function scan_prefers_requested_version_row_over_projected_global_row (line 128) | fn scan_prefers_requested_version_row_over_projected_global_row() {
function version_tombstone_hides_global_row_after_visibility_resolution (line 148) | fn version_tombstone_hides_global_row_after_visibility_resolution() {
function tombstone_can_be_returned_when_requested (line 162) | fn tombstone_can_be_returned_when_requested() {
function loaded_global_row_is_projected_into_requested_version (line 178) | fn loaded_global_row_is_projected_into_requested_version() {
function row_at (line 189) | fn row_at(
function tombstone_at (line 212) | fn tombstone_at(
FILE: packages/engine/src/plugin/archive.rs
type ParsedPluginArchive (line 14) | pub(crate) struct ParsedPluginArchive {
function parse_plugin_archive_for_install (line 19) | pub(crate) fn parse_plugin_archive_for_install(
function load_installed_plugin_from_archive_bytes (line 94) | pub(crate) fn load_installed_plugin_from_archive_bytes(
function read_archive_files_for_install (line 159) | fn read_archive_files_for_install(
function read_plugin_archive_files (line 221) | fn read_plugin_archive_files(
function normalize_archive_path_for_install (line 281) | fn normalize_archive_path_for_install(path: &str) -> Result<String, LixE...
function normalize_plugin_archive_path_for_materialization (line 354) | fn normalize_plugin_archive_path_for_materialization(path: &str) -> Resu...
function ensure_valid_plugin_wasm_for_install (line 399) | fn ensure_valid_plugin_wasm_for_install(wasm_bytes: &[u8]) -> Result<(),...
function ensure_valid_plugin_wasm_for_materialization (line 419) | fn ensure_valid_plugin_wasm_for_materialization(bytes: &[u8]) -> Result<...
function is_symlink_mode (line 434) | fn is_symlink_mode(mode: Option<u32>) -> bool {
FILE: packages/engine/src/plugin/component.rs
type CachedPluginComponent (line 9) | pub(crate) struct CachedPluginComponent {
constant APPLY_CHANGES_EXPORTS (line 14) | const APPLY_CHANGES_EXPORTS: &[&str] = &["apply-changes", "api#apply-cha...
type PluginComponentHost (line 16) | pub(crate) trait PluginComponentHost {
method plugin_component_cache (line 17) | fn plugin_component_cache(
method wasm_runtime (line 21) | fn wasm_runtime(&self) -> &Arc<dyn WasmRuntime>;
method plugin_component_cache (line 114) | fn plugin_component_cache(
method wasm_runtime (line 120) | fn wasm_runtime(&self) -> &Arc<dyn WasmRuntime> {
function load_or_init_plugin_component (line 24) | pub(crate) async fn load_or_init_plugin_component(
function apply_changes_with_plugin (line 67) | pub(crate) async fn apply_changes_with_plugin(
function invoke_apply_changes_export (line 76) | async fn invoke_apply_changes_export(
type TestHost (line 107) | struct TestHost {
type CountingRuntime (line 126) | struct CountingRuntime {
type NoopComponent (line 130) | struct NoopComponent;
method init_component (line 134) | async fn init_component(
method call (line 146) | async fn call(&self, _export: &str, _input: &[u8]) -> Result<Vec<u8>, Li...
function component_cache_reinitializes_when_same_key_wasm_changes (line 152) | async fn component_cache_reinitializes_when_same_key_wasm_changes() {
FILE: packages/engine/src/plugin/install.rs
constant REGISTERED_SCHEMA_STORAGE_SCHEMA_KEY (line 39) | const REGISTERED_SCHEMA_STORAGE_SCHEMA_KEY: &str = "lix_registered_schema";
constant FILESYSTEM_DESCRIPTOR_SCHEMA_KEY (line 40) | const FILESYSTEM_DESCRIPTOR_SCHEMA_KEY: &str = "lix_file_descriptor";
constant FILESYSTEM_BINARY_BLOB_REF_SCHEMA_KEY (line 41) | const FILESYSTEM_BINARY_BLOB_REF_SCHEMA_KEY: &str = "lix_binary_blob_ref";
type PluginInstallWriteContext (line 44) | pub(crate) struct PluginInstallWriteContext {
method new (line 53) | pub(crate) fn new(
method target_version_id (line 69) | fn target_version_id(&self) -> &str {
type PluginInstallWriteExecutor (line 75) | pub(crate) trait PluginInstallWriteExecutor {
method plugin_install_write_context (line 76) | fn plugin_install_write_context(&self) -> PluginInstallWriteContext;
method stage_prepared_write_statement (line 78) | fn stage_prepared_write_statement(&mut self, statement: WriteCommand) ...
method resolve_directory_id (line 80) | async fn resolve_directory_id(
function install_plugin_archive_with_writer (line 86) | pub(crate) async fn install_plugin_archive_with_writer(
function prepare_registered_schema_write_statement (line 94) | pub(crate) fn prepare_registered_schema_write_statement(
function install_plugin_with_writer (line 101) | async fn install_plugin_with_writer(
type RegisteredSchemaRowSpec (line 142) | struct RegisteredSchemaRowSpec {
function prepare_registered_schema_write_statement_from_schemas (line 149) | fn prepare_registered_schema_write_statement_from_schemas(
function prepare_plugin_archive_write_statement (line 208) | fn prepare_plugin_archive_write_statement(
function registered_schema_row_spec_from_json (line 263) | fn registered_schema_row_spec_from_json(
function registered_schema_planned_row (line 276) | fn registered_schema_planned_row(
function plugin_archive_file_descriptor_row (line 306) | fn plugin_archive_file_descriptor_row(
function plugin_archive_binary_blob_ref_row (line 344) | fn plugin_archive_binary_blob_ref_row(
function prepare_public_tracked_write_statement (line 390) | fn prepare_public_tracked_write_statement(
function semantic_plan_effects_from_changes (line 462) | fn semantic_plan_effects_from_changes(
function semantic_effect_markers_from_changes (line 476) | fn semantic_effect_markers_from_changes(changes: &[PublicChange]) -> Vec...
function planned_row_to_public_change (line 489) | fn planned_row_to_public_change(row: &PlannedStateRow) -> Result<PublicC...
function planned_row_text_value (line 515) | fn planned_row_text_value(row: &PlannedStateRow, key: &str) -> Option<St...
function planned_row_json_text_value (line 525) | fn planned_row_json_text_value(row: &PlannedStateRow, key: &str) -> Opti...
function semantic_idempotency_key (line 532) | fn semantic_idempotency_key(
function summarize_change (line 551) | fn summarize_change(change: &PublicChange) -> JsonValue {
function summarize_planned_row (line 565) | fn summarize_planned_row(row: &PlannedStateRow) -> JsonValue {
function require_resolved_surface (line 607) | fn require_resolved_surface(
FILE: packages/engine/src/plugin/manifest.rs
type PluginRuntime (line 15) | pub enum PluginRuntime {
method as_str (line 21) | pub fn as_str(self) -> &'static str {
method from_str (line 27) | pub fn from_str(value: &str) -> Option<Self> {
type PluginManifest (line 36) | pub struct PluginManifest {
type PluginMatch (line 49) | pub struct PluginMatch {
type PluginContentType (line 57) | pub enum PluginContentType {
type ValidatedPluginManifest (line 63) | pub struct ValidatedPluginManifest {
type DetectChangesConfig (line 69) | pub struct DetectChangesConfig {
type DetectStateContextConfig (line 75) | pub struct DetectStateContextConfig {
method includes_active_state (line 84) | pub fn includes_active_state(&self) -> bool {
method resolved_columns_or_default (line 88) | pub fn resolved_columns_or_default(&self) -> Option<Vec<StateContextCo...
type StateContextColumn (line 102) | pub enum StateContextColumn {
method default_active_state_columns (line 118) | pub const fn default_active_state_columns() -> &'static [StateContextC...
function parse_plugin_manifest_json (line 128) | pub fn parse_plugin_manifest_json(raw: &str) -> Result<ValidatedPluginMa...
function select_best_glob_match (line 160) | pub fn select_best_glob_match<'a, T, C: Copy + PartialEq>(
function glob_matches_path (line 200) | pub fn glob_matches_path(glob: &str, path: &str) -> bool {
function validate_path_glob (line 218) | fn validate_path_glob(glob: &str) -> Result<(), LixError> {
function validate_plugin_manifest_json (line 228) | fn validate_plugin_manifest_json(manifest: &JsonValue) -> Result<(), Lix...
function glob_specificity_rank (line 242) | fn glob_specificity_rank(glob: &str) -> (u8, i32) {
function glob_specificity_score (line 250) | fn glob_specificity_score(glob: &str) -> i32 {
function is_catch_all_glob (line 262) | fn is_catch_all_glob(glob: &str) -> bool {
function plugin_manifest_validator (line 266) | fn plugin_manifest_validator() -> Result<&'static JSONSchema, LixError> {
function plugin_manifest_schema (line 299) | fn plugin_manifest_schema() -> &'static JsonValue {
function format_validation_errors (line 306) | fn format_validation_errors<'a>(
function resolved_columns_returns_none_when_active_state_is_not_enabled (line 333) | fn resolved_columns_returns_none_when_active_state_is_not_enabled() {
function resolved_columns_uses_defaults_when_columns_are_omitted (line 343) | fn resolved_columns_uses_defaults_when_columns_are_omitted() {
function resolved_columns_uses_explicit_column_selection (line 356) | fn resolved_columns_uses_explicit_column_selection() {
function parses_valid_manifest (line 375) | fn parses_valid_manifest() {
function rejects_invalid_manifest (line 394) | fn rejects_invalid_manifest() {
function rejects_invalid_path_glob (line 411) | fn rejects_invalid_path_glob() {
function parses_manifest_with_content_type_match_filter (line 428) | fn parses_manifest_with_content_type_match_filter() {
function parses_manifest_with_active_state_columns (line 448) | fn parses_manifest_with_active_state_columns() {
function parses_manifest_with_active_state_and_default_columns (line 486) | fn parses_manifest_with_active_state_and_default_columns() {
FILE: packages/engine/src/plugin/materializer.rs
type InstalledPlugin (line 17) | pub struct InstalledPlugin {
type FilesystemPluginMaterializer (line 29) | pub trait FilesystemPluginMaterializer {
method load_installed_plugins (line 30) | async fn load_installed_plugins(&self) -> Result<Vec<InstalledPlugin>,...
method apply_plugin_changes (line 32) | async fn apply_plugin_changes(
method load_installed_plugins (line 191) | async fn load_installed_plugins(&self) -> Result<Vec<InstalledPlugin>,...
method apply_plugin_changes (line 195) | async fn apply_plugin_changes(
type PluginMaterializationHost (line 39) | pub(crate) trait PluginMaterializationHost: PluginComponentHost {
method plugin_backend (line 40) | fn plugin_backend(&self) -> &Arc<dyn Backend + Send + Sync>;
method installed_plugins_cache (line 42) | fn installed_plugins_cache(&self) -> &RwLock<Option<Vec<InstalledPlugi...
function load_installed_plugins_with_runtime_cache (line 45) | pub(crate) async fn load_installed_plugins_with_runtime_cache(
function load_installed_plugins_from_backend (line 76) | pub(crate) async fn load_installed_plugins_from_backend(
function load_installed_plugins_from_backend_state (line 82) | pub(crate) async fn load_installed_plugins_from_backend_state(
function load_installed_plugin_from_archive_ref_with_backend (line 95) | pub(crate) async fn load_installed_plugin_from_archive_ref_with_backend(
function list_installed_plugin_manifest_keys (line 143) | pub(crate) async fn list_installed_plugin_manifest_keys(
function installed_plugin_manifest_key_exists (line 154) | pub(crate) async fn installed_plugin_manifest_key_exists(
function invalidate_installed_plugins_cache (line 163) | pub(crate) fn invalidate_installed_plugins_cache(
type InstalledPluginLookupBackend (line 226) | struc
Condensed preview — 493 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (6,400K chars).
[
{
"path": ".gitattributes",
"chars": 177,
"preview": "pnpm-lock.yaml merge=ours\n# automatically normalize line endings in text files to be line feed \n# https://github.com/opr"
},
{
"path": ".gitignore",
"chars": 7518,
"preview": "### inlang ###\n\n# .devcontainer.json\n.pnpm-store\n\n# **/out\nexamples/svelte/package-lock.json\nexamples/sveltekit/package-"
},
{
"path": ".infisical.json",
"chars": 129,
"preview": "{\n \"workspaceId\": \"6e0353e4-b0b0-4c6d-a338-38f09cfafa22\",\n \"defaultEnvironment\": \"\",\n \"gitBranchToEnvironmentMapping\""
},
{
"path": ".prettierignore",
"chars": 445,
"preview": "## adding the copied sources from the markdown plugin to be able to see changes since copy..\npackages/md-app/src/compone"
},
{
"path": "CONTRIBUTING.md",
"chars": 1137,
"preview": "# Contributing\n\n## Prerequisites\n\n- [Node.js](https://nodejs.org/en/) (v20 or higher)\n- [pnpm](https://pnpm.io/) (v8 or "
},
{
"path": "Cargo.toml",
"chars": 377,
"preview": "[workspace]\nresolver = \"2\"\nmembers = [\n \"benchmarks/git-compare\",\n \"benchmarks/10k-entities\",\n \"benchmarks/engine2-js"
},
{
"path": "README.md",
"chars": 7065,
"preview": "<p align=\"center\">\n <img src=\"https://raw.githubusercontent.com/opral/lix/main/assets/logo.svg\" alt=\"Lix\" height=\"60\">\n"
},
{
"path": "benchmarks/10k-entities/Cargo.toml",
"chars": 604,
"preview": "[package]\nname = \"ten_k_entities_benchmark\"\nversion = \"0.1.0\"\nedition = \"2021\"\npublish = false\n\n[dependencies]\nasync-tra"
},
{
"path": "benchmarks/10k-entities/README.md",
"chars": 2259,
"preview": "# 10k Entities Benchmark\n\nThis benchmark compares two engine paths for the same logical JSON document:\n\n1. File write: i"
},
{
"path": "benchmarks/10k-entities/src/main.rs",
"chars": 38968,
"preview": "use clap::Parser;\nuse lix_engine::wasm::WasmRuntime;\nuse lix_engine::{boot, BootArgs, ExecuteOptions, LixError, Session,"
},
{
"path": "benchmarks/10k-entities/src/sqlite_backend.rs",
"chars": 9279,
"preview": "use std::path::Path;\nuse std::str::FromStr;\nuse std::sync::Arc;\n\nuse lix_engine::{\n collapse_prepared_batch_for_diale"
},
{
"path": "benchmarks/10k-entities/src/wasmtime_runtime.rs",
"chars": 12356,
"preview": "use std::collections::HashMap;\nuse std::hash::{DefaultHasher, Hash, Hasher};\nuse std::sync::{Arc, Mutex};\n\nuse async_tra"
},
{
"path": "benchmarks/engine2-json-pointer/Cargo.toml",
"chars": 405,
"preview": "[package]\nname = \"engine2_json_pointer_benchmark\"\nversion = \"0.1.0\"\nedition = \"2021\"\npublish = false\n\n[dependencies]\nasy"
},
{
"path": "benchmarks/engine2-json-pointer/README.md",
"chars": 849,
"preview": "# Engine2 JSON Pointer Benchmark\n\nThis benchmark exercises engine2 end to end on a fresh on-disk SQLite-backed KV\nstore."
},
{
"path": "benchmarks/engine2-json-pointer/src/main.rs",
"chars": 12670,
"preview": "use clap::Parser;\nuse lix_rs_sdk::{open_lix, ExecuteResult, Lix, LixError, OpenLixOptions, Value};\nuse serde::Serialize;"
},
{
"path": "benchmarks/engine2-json-pointer/src/sqlite_backend.rs",
"chars": 8094,
"preview": "use async_trait::async_trait;\nuse lix_rs_sdk::{\n KvPair, KvScanRange, LixBackend, LixBackendTransaction, LixError, Tr"
},
{
"path": "benchmarks/git-compare/Cargo.toml",
"chars": 340,
"preview": "[package]\nname = \"git_compare_benchmark\"\nversion = \"0.1.0\"\nedition = \"2021\"\npublish = false\n\n[dependencies]\nclap = { ver"
},
{
"path": "benchmarks/git-compare/README.md",
"chars": 2162,
"preview": "# Git Compare Benchmark\n\nThis benchmark answers a narrower question than `exp git-replay`:\n\n- a repo already exists\n- a "
},
{
"path": "benchmarks/git-compare/src/main.rs",
"chars": 50940,
"preview": "use clap::Parser;\nuse lix_engine::{\n boot as boot_engine, BootArgs as EngineConfig, ExecuteOptions, Session, SessionT"
},
{
"path": "blog/001-introducing-lix/index.md",
"chars": 6640,
"preview": "---\ndate: \"2026-01-20\"\nog:description: \"Lix is a version control system you import as a library. It records semantic cha"
},
{
"path": "blog/002-modeling-a-company-as-a-repository/index.md",
"chars": 4532,
"preview": "---\ndate: \"2026-02-23\"\nog:description: \"Modeling a company as a filesystem is promising for AI agents, but binary files "
},
{
"path": "blog/003-february-2026-update/index.md",
"chars": 3864,
"preview": "---\ndate: \"2026-03-04\"\nog:description: \"The Rust rewrite is complete. 33x faster file writes, lix was trending on Hacker"
},
{
"path": "blog/004-march-2026-update/index.md",
"chars": 9586,
"preview": "---\ndate: \"2026-04-03\"\nog:description: \"500 real commits replayed with no corruption bugs. Without the semantic layer, L"
},
{
"path": "blog/005-april-2026-update/index.md",
"chars": 8577,
"preview": "---\ndate: \"2026-05-11\"\nog:description: \"The new DataFusion path runs the core Lix MVP flow. April did not hit the 10k in"
},
{
"path": "blog/authors.json",
"chars": 235,
"preview": "{\n \"samuelstroschein\": {\n \"name\": \"Samuel Stroschein\",\n \"avatar\": \"https://avatars.githubusercontent.com/u/354291"
},
{
"path": "blog/table_of_contents.json",
"chars": 669,
"preview": "[\n {\n \"path\": \"./005-april-2026-update/index.md\",\n \"slug\": \"april-2026-update\",\n \"authors\": [\"samuelstroschein"
},
{
"path": "cla-signatures.json",
"chars": 21734,
"preview": "{\n \"signedContributors\": [\n {\n \"name\": \"janfjohannes\",\n \"id\": 110794494,\n \"comment_id\": 1711859828,\n "
},
{
"path": "docs/api-reference.md",
"chars": 6663,
"preview": "---\ndescription: Reference for the @lix-js/sdk public API: openLix, execute, version and merge methods, result shapes, a"
},
{
"path": "docs/backend.md",
"chars": 13070,
"preview": "---\ndescription: Lix's storage is pluggable. Implement the LixBackend interface (a synchronous, transactional, namespace"
},
{
"path": "docs/comparison-to-git.md",
"chars": 2931,
"preview": "---\ndescription: Git versions text files line-by-line. Lix versions any file format (DOCX, XLSX, CAD, etc.) semantically"
},
{
"path": "docs/getting-started.md",
"chars": 3441,
"preview": "---\ndescription: Install Lix, open an in-memory repository, register a schema, write rows, and inspect a change in under"
},
{
"path": "docs/history.md",
"chars": 6061,
"preview": "---\ndescription: Lix journals every change. Query lix_change for global per-entity history, lix_state_history for what's"
},
{
"path": "docs/lix-for-ai-agents.md",
"chars": 2798,
"preview": "---\ndescription: Route agent writes through Lix to get isolated workspaces, previewable changes, and approve-or-discard "
},
{
"path": "docs/persistence.md",
"chars": 1865,
"preview": "---\ndescription: Open Lix in memory for tests, or persist to a .lix SQLite file via the better-sqlite3 backend. For othe"
},
{
"path": "docs/schemas.md",
"chars": 10927,
"preview": "---\ndescription: Define the entity types Lix tracks for you. The x-lix-* JSON Schema extensions control the SQL table na"
},
{
"path": "docs/sql-functions.md",
"chars": 5291,
"preview": "---\ndescription: Built-in scalar SQL functions provided by the Lix engine. Covers JSON parsing and projection, ID and ti"
},
{
"path": "docs/surfaces.md",
"chars": 9681,
"preview": "---\ndescription: The SQL surfaces in Lix at a glance. State surfaces are JSON-shaped and schema-agnostic; per-entity, fi"
},
{
"path": "docs/table_of_contents.json",
"chars": 829,
"preview": "{\n \"Overview\": [\n { \"path\": \"./what-is-lix.md\", \"label\": \"What is Lix?\" },\n { \"path\": \"./getting-started.md\", \"la"
},
{
"path": "docs/versions.md",
"chars": 5279,
"preview": "---\ndescription: Versions are isolated lines of state. Create them, switch into them, read across them with _by_version "
},
{
"path": "docs/what-is-lix.md",
"chars": 4716,
"preview": "---\ndescription: Lix is an embeddable version control system for files of any format. Diffs are semantic and per entity "
},
{
"path": "nx.json",
"chars": 1088,
"preview": "{\n\t\"$schema\": \"./node_modules/nx/schemas/nx-schema.json\",\n\t\"tui\": {\n\t\t\"autoExit\": true\n\t},\n\t\"namedInputs\": {\n\t\t\"default\""
},
{
"path": "optimization_log6_crud.md",
"chars": 4276,
"preview": "# Optimization Log 6: JSON Pointer CRUD\n\nGoal: make typed-table JSON pointer CRUD fast enough that Lix behaves like a\nno"
},
{
"path": "optimization_log7.md",
"chars": 27813,
"preview": "# Optimization Log 7: Physical Layout for CRUD + Branch/Merge\n\nGoal: find the optimal physical storage layout for Lix's "
},
{
"path": "optimization_log8.md",
"chars": 262055,
"preview": "# Optimization Log 8: JSON Pointer Physical Layout Decision Log\n\nGoal: nail the physical layout Lix uses for tracked log"
},
{
"path": "optimization_log9_sql2.md",
"chars": 24695,
"preview": "# Optimization Log 9: SQL2 Logical CRUD\n\nGoal: make the logical work inside `sql2` fast for an isolated JSON-pointer\nCRU"
},
{
"path": "package.json",
"chars": 1187,
"preview": "{\n \"private\": true,\n \"name\": \"monorepo\",\n \"type\": \"module\",\n \"scripts\": {\n \"build\": \"pnpm exec nx run-many --nx-b"
},
{
"path": "packages/cli/Cargo.toml",
"chars": 410,
"preview": "[package]\nname = \"lix_cli\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[[bin]]\nname = \"lix\"\npath = \"src/main.rs\"\n\n[dependencies]"
},
{
"path": "packages/cli/src/app/context.rs",
"chars": 134,
"preview": "use std::path::PathBuf;\n\n#[derive(Debug, Clone)]\npub struct AppContext {\n pub lix_path: Option<PathBuf>,\n pub no_h"
},
{
"path": "packages/cli/src/app/mod.rs",
"chars": 83,
"preview": "mod context;\nmod run;\nmod welcome;\n\npub use context::AppContext;\npub use run::run;\n"
},
{
"path": "packages/cli/src/app/run.rs",
"chars": 3645,
"preview": "use super::context::AppContext;\nuse super::welcome;\nuse crate::cli::root::{Cli, Command};\nuse crate::commands;\nuse crate"
},
{
"path": "packages/cli/src/app/welcome.rs",
"chars": 2739,
"preview": "use std::io::IsTerminal;\nuse std::path::{Path, PathBuf};\n\nconst CYAN: &str = \"\\x1b[38;2;8;181;214m\";\nconst RESET: &str ="
},
{
"path": "packages/cli/src/cli/exp.rs",
"chars": 1749,
"preview": "use clap::{value_parser, Args, Subcommand, ValueHint};\nuse std::path::PathBuf;\n\n#[derive(Debug, Args)]\npub struct ExpCom"
},
{
"path": "packages/cli/src/cli/init.rs",
"chars": 217,
"preview": "use clap::{Args, ValueHint};\nuse std::path::PathBuf;\n\n#[derive(Debug, Args)]\npub struct InitCommand {\n /// Path to th"
},
{
"path": "packages/cli/src/cli/mod.rs",
"chars": 99,
"preview": "pub mod exp;\npub mod init;\npub mod redo;\npub mod root;\npub mod sql;\npub mod undo;\npub mod version;\n"
},
{
"path": "packages/cli/src/cli/redo.rs",
"chars": 245,
"preview": "use clap::Args;\n\n#[derive(Debug, Args)]\npub struct RedoCommand {\n /// Override the target version by `lix_version.id`"
},
{
"path": "packages/cli/src/cli/root.rs",
"chars": 5891,
"preview": "use super::exp::ExpCommand;\nuse super::init::InitCommand;\nuse super::redo::RedoCommand;\nuse super::sql::SqlCommand;\nuse "
},
{
"path": "packages/cli/src/cli/sql.rs",
"chars": 1296,
"preview": "use clap::{Args, Subcommand, ValueEnum};\n\n#[derive(Debug, Args)]\npub struct SqlCommand {\n #[command(subcommand)]\n "
},
{
"path": "packages/cli/src/cli/undo.rs",
"chars": 245,
"preview": "use clap::Args;\n\n#[derive(Debug, Args)]\npub struct UndoCommand {\n /// Override the target version by `lix_version.id`"
},
{
"path": "packages/cli/src/cli/version.rs",
"chars": 2330,
"preview": "use clap::{Args, Subcommand};\n\n#[derive(Debug, Args)]\npub struct VersionCommand {\n #[command(subcommand)]\n pub com"
},
{
"path": "packages/cli/src/commands/exp/git_replay.rs",
"chars": 48457,
"preview": "use crate::cli::exp::ExpGitReplayArgs;\nuse crate::db;\nuse crate::error::CliError;\nuse lix_rs_sdk::{Lix, Value};\nuse serd"
},
{
"path": "packages/cli/src/commands/exp/mod.rs",
"chars": 414,
"preview": "mod git_replay;\n\nuse crate::app::AppContext;\nuse crate::cli::exp::{ExpCommand, ExpSubcommand};\nuse crate::error::CliErro"
},
{
"path": "packages/cli/src/commands/init.rs",
"chars": 475,
"preview": "use crate::cli::init::InitCommand;\nuse crate::db;\nuse crate::error::CliError;\nuse crate::hints::{self, CommandOutput};\n\n"
},
{
"path": "packages/cli/src/commands/mod.rs",
"chars": 85,
"preview": "pub mod exp;\npub mod init;\npub mod redo;\npub mod sql;\npub mod undo;\npub mod version;\n"
},
{
"path": "packages/cli/src/commands/redo.rs",
"chars": 314,
"preview": "use crate::app::AppContext;\nuse crate::cli::redo::RedoCommand;\nuse crate::error::CliError;\nuse crate::hints::CommandOutp"
},
{
"path": "packages/cli/src/commands/sql/execute.rs",
"chars": 8916,
"preview": "use crate::app::AppContext;\nuse crate::cli::sql::{SqlExecuteArgs, SqlOutputFormat};\nuse crate::db;\nuse crate::error::Cli"
},
{
"path": "packages/cli/src/commands/sql/mod.rs",
"chars": 350,
"preview": "mod execute;\n\nuse crate::app::AppContext;\nuse crate::cli::sql::{SqlCommand, SqlSubcommand};\nuse crate::error::CliError;\n"
},
{
"path": "packages/cli/src/commands/undo.rs",
"chars": 314,
"preview": "use crate::app::AppContext;\nuse crate::cli::undo::UndoCommand;\nuse crate::error::CliError;\nuse crate::hints::CommandOutp"
},
{
"path": "packages/cli/src/commands/version/create.rs",
"chars": 3527,
"preview": "use crate::app::AppContext;\nuse crate::cli::version::CreateVersionCommand;\nuse crate::commands::version::{\n resolve_a"
},
{
"path": "packages/cli/src/commands/version/merge.rs",
"chars": 2722,
"preview": "use crate::app::AppContext;\nuse crate::cli::version::MergeVersionCommand;\nuse crate::commands::version::{resolve_version"
},
{
"path": "packages/cli/src/commands/version/mod.rs",
"chars": 11567,
"preview": "mod create;\nmod merge;\nmod switch;\n\nuse crate::app::AppContext;\nuse crate::cli::version::{VersionCommand, VersionSubcomm"
},
{
"path": "packages/cli/src/commands/version/switch.rs",
"chars": 1202,
"preview": "use crate::app::AppContext;\nuse crate::cli::version::SwitchVersionCommand;\nuse crate::commands::version::{resolve_versio"
},
{
"path": "packages/cli/src/db/mod.rs",
"chars": 13487,
"preview": "use crate::app::AppContext;\nuse crate::error::CliError;\nuse async_trait::async_trait;\nuse base64::Engine as _;\nuse lix_r"
},
{
"path": "packages/cli/src/error.rs",
"chars": 2669,
"preview": "use lix_rs_sdk::LixError;\nuse std::fmt::{Display, Formatter};\n\n#[derive(Debug)]\npub enum CliError {\n InvalidArgs(&'st"
},
{
"path": "packages/cli/src/hints.rs",
"chars": 3176,
"preview": "use crate::error::CliError;\nuse lix_rs_sdk::{ExecuteResult, Lix, Value};\n\n#[derive(Debug)]\npub struct CommandOutput {\n "
},
{
"path": "packages/cli/src/lib.rs",
"chars": 166,
"preview": "pub mod app;\npub mod cli;\npub mod commands;\npub mod db;\npub mod error;\npub mod hints;\npub mod output;\n\npub fn run() -> R"
},
{
"path": "packages/cli/src/main.rs",
"chars": 84,
"preview": "fn main() {\n if lix_cli::run().is_err() {\n std::process::exit(1);\n }\n}\n"
},
{
"path": "packages/cli/src/output/mod.rs",
"chars": 5207,
"preview": "use base64::Engine as _;\nuse comfy_table::{presets::UTF8_BORDERS_ONLY, Cell, ContentArrangement, Row, Table};\nuse lix_rs"
},
{
"path": "packages/engine/.gitignore",
"chars": 222,
"preview": "benches/results/\n\n# local rust build output when invoked from this package\ntarget/\n\n# criterion benchmark output\ncriteri"
},
{
"path": "packages/engine/AGENTS.md",
"chars": 119,
"preview": "## Lix Engine\n\n- testing with sqlite simulation is enough for development. before committing, test the all simulations\n"
},
{
"path": "packages/engine/Cargo.toml",
"chars": 2338,
"preview": "[package]\nname = \"lix_engine\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[features]\nstorage-benches = []\n\n[[bench]]\nname = \"sto"
},
{
"path": "packages/engine/benches/fixtures/pnpm-lock.fixture.json",
"chars": 392313,
"preview": "{\"lockfileVersion\":\"9.0\",\"settings\":{\"autoInstallPeers\":true,\"excludeLinksFromLockfile\":false},\"importers\":{\".\":{\"devDep"
},
{
"path": "packages/engine/benches/json_pointer_crud/main.rs",
"chars": 40742,
"preview": "use std::sync::Arc;\nuse std::time::Duration;\n\nuse criterion::{black_box, criterion_group, criterion_main, BatchSize, Cri"
},
{
"path": "packages/engine/benches/json_pointer_physical/main.rs",
"chars": 29120,
"preview": "use std::sync::Arc;\nuse std::time::Duration;\n\nuse criterion::{black_box, criterion_group, criterion_main, BatchSize, Cri"
},
{
"path": "packages/engine/benches/optimization9_sql2/json_pointer.schema.json",
"chars": 648,
"preview": "{\n \"x-lix-key\": \"json_pointer\",\n \"x-lix-primary-key\": [\n \"/path\"\n ],\n \"type\": \"object\",\n \"properties\": {\n \"pa"
},
{
"path": "packages/engine/benches/optimization9_sql2/main.rs",
"chars": 21326,
"preview": "use std::time::Duration;\n\nuse criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};\nuse lix_eng"
},
{
"path": "packages/engine/benches/optimization9_sql2/pnpm-lock.fixture.json",
"chars": 392313,
"preview": "{\"lockfileVersion\":\"9.0\",\"settings\":{\"autoInstallPeers\":true,\"excludeLinksFromLockfile\":false},\"importers\":{\".\":{\"devDep"
},
{
"path": "packages/engine/benches/physical_layout/backend_kv.rs",
"chars": 8815,
"preview": "use std::sync::Arc;\n\nuse criterion::{black_box, BatchSize, Criterion};\nuse lix_engine::storage_bench::{self, StorageBenc"
},
{
"path": "packages/engine/benches/physical_layout/changelog.rs",
"chars": 9779,
"preview": "use std::sync::Arc;\nuse std::time::Duration;\n\nuse criterion::{black_box, BatchSize, Criterion};\nuse lix_engine::storage_"
},
{
"path": "packages/engine/benches/physical_layout/json_store.rs",
"chars": 9014,
"preview": "use std::sync::Arc;\n\nuse criterion::{black_box, BatchSize, Criterion};\nuse lix_engine::storage_bench::{\n self, JsonSt"
},
{
"path": "packages/engine/benches/physical_layout/main.rs",
"chars": 1970,
"preview": "use criterion::{criterion_group, criterion_main, Criterion};\nuse lix_engine::storage_bench::{\n StorageBenchConfig, St"
},
{
"path": "packages/engine/benches/physical_layout/tracked_state.rs",
"chars": 20651,
"preview": "use std::sync::Arc;\nuse std::time::Duration;\n\nuse criterion::{black_box, BatchSize, Criterion};\nuse lix_engine::storage_"
},
{
"path": "packages/engine/benches/physical_layout/workflow.rs",
"chars": 25278,
"preview": "use std::sync::Arc;\nuse std::time::Duration;\n\nuse criterion::{black_box, BatchSize, Criterion};\nuse lix_engine::storage_"
},
{
"path": "packages/engine/benches/storage/README.md",
"chars": 6831,
"preview": "# Engine Storage Benchmarks\n\nThese Criterion benchmarks measure engine-owned storage layers directly,\nwithout going thro"
},
{
"path": "packages/engine/benches/storage/backend.rs",
"chars": 9880,
"preview": "use async_trait::async_trait;\nuse lix_engine::{\n Backend, BackendKvEntryPage, BackendKvExistsBatch, BackendKvExistsGr"
},
{
"path": "packages/engine/benches/storage/binary_cas.rs",
"chars": 6768,
"preview": "use lix_engine::storage_bench::{self, StorageBenchConfig};\n\nuse crate::{Args, BenchBackend};\nuse criterion::{black_box, "
},
{
"path": "packages/engine/benches/storage/changelog.rs",
"chars": 19364,
"preview": "use lix_engine::storage_bench::{\n self, StorageBenchConfig, StorageBenchKeyPattern, StorageBenchSelectivity,\n};\n\nuse "
},
{
"path": "packages/engine/benches/storage/commit_graph.rs",
"chars": 1459,
"preview": "use lix_engine::storage_bench::{self, StorageBenchConfig};\n\nuse crate::{Args, BenchBackend};\nuse criterion::{black_box, "
},
{
"path": "packages/engine/benches/storage/json_store.rs",
"chars": 9249,
"preview": "use lix_engine::storage_bench::{\n self, JsonStorePayloadShape, JsonStoreProjectionShape, JsonStoreReadFixture,\n};\n\nus"
},
{
"path": "packages/engine/benches/storage/main.rs",
"chars": 2088,
"preview": "use criterion::{criterion_group, criterion_main, Criterion};\nuse lix_engine::storage_bench::{\n StorageBenchConfig, St"
},
{
"path": "packages/engine/benches/storage/rocksdb_backend.rs",
"chars": 22821,
"preview": "use std::collections::{BTreeMap, BTreeSet};\nuse std::path::Path;\nuse std::sync::Arc;\n\nuse async_trait::async_trait;\nuse "
},
{
"path": "packages/engine/benches/storage/sqlite_backend.rs",
"chars": 16050,
"preview": "use std::sync::{Arc, Mutex};\n\nuse async_trait::async_trait;\nuse lix_engine::{\n Backend, BackendKvEntryPage, BackendKv"
},
{
"path": "packages/engine/benches/storage/storage_api.rs",
"chars": 15585,
"preview": "use std::sync::Arc;\n\nuse criterion::{black_box, BatchSize, Criterion};\nuse lix_engine::storage_bench::{self, StorageApiF"
},
{
"path": "packages/engine/benches/storage/tracked_state.rs",
"chars": 37318,
"preview": "use lix_engine::storage_bench::{\n self, StorageBenchConfig, StorageBenchKeyPattern, StorageBenchSelectivity,\n Stor"
},
{
"path": "packages/engine/benches/storage/untracked_state.rs",
"chars": 17200,
"preview": "use lix_engine::storage_bench::{\n self, StorageBenchConfig, StorageBenchKeyPattern, StorageBenchSelectivity,\n Stor"
},
{
"path": "packages/engine/benches/transaction/main.rs",
"chars": 43612,
"preview": "use async_trait::async_trait;\nuse criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};\nuse lix"
},
{
"path": "packages/engine/src/backend/kv.rs",
"chars": 8646,
"preview": "#[derive(Debug, Clone, PartialEq, Eq, Default)]\npub struct BytePage {\n bytes: Vec<u8>,\n offsets: Vec<u32>,\n}\n\nimpl"
},
{
"path": "packages/engine/src/backend/mod.rs",
"chars": 481,
"preview": "mod kv;\n#[cfg(test)]\npub(crate) mod testing;\nmod types;\n\npub use kv::{\n BackendKvEntryPage, BackendKvExistsBatch, Bac"
},
{
"path": "packages/engine/src/backend/testing.rs",
"chars": 21829,
"preview": "use std::collections::BTreeMap;\nuse std::sync::{Arc, Mutex};\n\nuse async_trait::async_trait;\n\nuse crate::backend::{\n B"
},
{
"path": "packages/engine/src/backend/types.rs",
"chars": 3235,
"preview": "use async_trait::async_trait;\n\nuse crate::backend::{\n BackendKvEntryPage, BackendKvExistsBatch, BackendKvGetRequest, "
},
{
"path": "packages/engine/src/binary_cas/chunking.rs",
"chars": 914,
"preview": "const FASTCDC_MIN_CHUNK_BYTES: usize = 16 * 1024;\nconst FASTCDC_AVG_CHUNK_BYTES: usize = 64 * 1024;\nconst FASTCDC_MAX_CH"
},
{
"path": "packages/engine/src/binary_cas/codec.rs",
"chars": 11422,
"preview": "use crate::LixError;\n\n// Binary CAS physical rows:\n// - manifest: BCM2 | kind:u8 | blob_size:u64 | kind payload\n//"
},
{
"path": "packages/engine/src/binary_cas/context.rs",
"chars": 4089,
"preview": "use async_trait::async_trait;\n\nuse crate::binary_cas::{\n BlobBytesBatch, BlobExistsBatch, BlobHash, BlobMetadataBatch"
},
{
"path": "packages/engine/src/binary_cas/kv.rs",
"chars": 35368,
"preview": "#![allow(dead_code)]\n\nuse crate::binary_cas::chunking::fastcdc_chunk_ranges;\nuse crate::binary_cas::codec::{\n decode_"
},
{
"path": "packages/engine/src/binary_cas/mod.rs",
"chars": 281,
"preview": "mod chunking;\nmod codec;\nmod context;\npub(crate) mod kv;\nmod types;\n\npub(crate) use context::{BinaryCasContext, BlobData"
},
{
"path": "packages/engine/src/binary_cas/types.rs",
"chars": 2888,
"preview": "use crate::binary_cas::codec::{binary_blob_hash_bytes, hash_bytes_to_hex, hash_hex_to_bytes};\nuse crate::LixError;\n\n#[de"
},
{
"path": "packages/engine/src/catalog/context.rs",
"chars": 14661,
"preview": "use std::collections::BTreeMap;\n\nuse serde_json::Value as JsonValue;\n\nuse crate::catalog::SchemaCatalogFact;\nuse crate::"
},
{
"path": "packages/engine/src/catalog/mod.rs",
"chars": 286,
"preview": "mod context;\nmod schema;\nmod snapshot;\n\npub(crate) use context::CatalogContext;\npub(crate) use schema::{\n ForeignKeyP"
},
{
"path": "packages/engine/src/catalog/schema.rs",
"chars": 145,
"preview": "pub(crate) use super::snapshot::{\n ForeignKeyPlan, SchemaCatalogFact, SchemaCatalogKey, SchemaPlan, SchemaPlanId,\n "
},
{
"path": "packages/engine/src/catalog/snapshot.rs",
"chars": 40439,
"preview": "use std::{collections::BTreeMap, sync::Arc};\n\nuse jsonschema::JSONSchema;\nuse serde_json::{Map as JsonMap, Value as Json"
},
{
"path": "packages/engine/src/cel/context.rs",
"chars": 2692,
"preview": "use cel::Context;\nuse serde_json::{Map as JsonMap, Value as JsonValue};\n\nuse crate::LixError;\n\nuse super::provider::CelF"
},
{
"path": "packages/engine/src/cel/error.rs",
"chars": 608,
"preview": "use crate::LixError;\n\npub(crate) fn cel_parse_error(expression: &str, error: impl std::fmt::Display) -> LixError {\n L"
},
{
"path": "packages/engine/src/cel/mod.rs",
"chars": 149,
"preview": "mod context;\nmod error;\nmod provider;\nmod runtime;\nmod value;\n\npub(crate) use provider::CelFunctionProvider;\npub(crate) "
},
{
"path": "packages/engine/src/cel/provider.rs",
"chars": 419,
"preview": "/// Function source available to CEL expressions.\n///\n/// CEL is shared infrastructure for schema expressions. It should"
},
{
"path": "packages/engine/src/cel/runtime.rs",
"chars": 5171,
"preview": "use std::collections::HashMap;\nuse std::sync::{Arc, OnceLock, RwLock};\n\nuse cel::Program;\nuse serde_json::{Map as JsonMa"
},
{
"path": "packages/engine/src/cel/value.rs",
"chars": 1430,
"preview": "use cel::Value as CelValue;\nuse serde_json::Value as JsonValue;\n\nuse crate::LixError;\n\npub fn json_to_cel(value: &JsonVa"
},
{
"path": "packages/engine/src/commit_graph/context.rs",
"chars": 31671,
"preview": "use std::collections::BTreeSet;\n\nuse crate::commit_graph::walker::{best_common_ancestors, walk_reachable_commits};\nuse c"
},
{
"path": "packages/engine/src/commit_graph/mod.rs",
"chars": 339,
"preview": "mod context;\nmod types;\nmod walker;\n\n#[allow(unused_imports)]\npub(crate) use context::{CommitGraphContext, CommitGraphSt"
},
{
"path": "packages/engine/src/commit_graph/types.rs",
"chars": 4048,
"preview": "use crate::commit_store::{Change, LocatedChange};\nuse crate::entity_identity::EntityIdentity;\nuse crate::LixError;\n\n/// "
},
{
"path": "packages/engine/src/commit_graph/walker.rs",
"chars": 25348,
"preview": "use std::collections::{BTreeMap, BTreeSet};\n\nuse crate::commit_graph::{CommitGraphCommit, CommitGraphStoreReader, Reacha"
},
{
"path": "packages/engine/src/commit_store/codec.rs",
"chars": 32057,
"preview": "use crate::commit_store::{\n Change, ChangeLocator, ChangeLocatorRef, ChangeRef, Commit, StoredCommitRef,\n};\nuse crate"
},
{
"path": "packages/engine/src/commit_store/context.rs",
"chars": 33580,
"preview": "use crate::commit_store::{\n Change, ChangeIndexEntry, ChangeLocator, ChangeRef, ChangeScanRequest, Commit, CommitDraf"
},
{
"path": "packages/engine/src/commit_store/materialization.rs",
"chars": 2376,
"preview": "use crate::commit_store::{LocatedChange, MaterializedChange};\nuse crate::json_store::{JsonLoadRequestRef, JsonReadScopeR"
},
{
"path": "packages/engine/src/commit_store/mod.rs",
"chars": 596,
"preview": "pub(crate) mod codec;\nmod context;\nmod materialization;\npub(crate) mod storage;\nmod types;\n\n#[allow(unused_imports)]\npub"
},
{
"path": "packages/engine/src/commit_store/storage.rs",
"chars": 20433,
"preview": "use crate::commit_store::{\n Change, ChangeIndexEntry, ChangeLocator, ChangeRef, Commit, CommitDraftRef,\n StagedCom"
},
{
"path": "packages/engine/src/commit_store/types.rs",
"chars": 6769,
"preview": "use crate::entity_identity::EntityIdentity;\nuse crate::json_store::JsonRef;\n\n/// Physical append/locality unit for commi"
},
{
"path": "packages/engine/src/common/error.rs",
"chars": 11745,
"preview": "use serde_json::{json, Value as JsonValue};\n\n/// Structured error type surfaced by Lix to every SDK binding.\n///\n/// Car"
},
{
"path": "packages/engine/src/common/fingerprint.rs",
"chars": 116,
"preview": "pub(crate) fn stable_content_fingerprint_hex(data: &[u8]) -> String {\n blake3::hash(data).to_hex().to_string()\n}\n"
},
{
"path": "packages/engine/src/common/fs_path.rs",
"chars": 45307,
"preview": "//! Canonical Lix filesystem paths live in this module.\n//!\n//! Contract:\n//!\n//! - Canonical internal form is an absolu"
},
{
"path": "packages/engine/src/common/identity.rs",
"chars": 3911,
"preview": "use std::borrow::Borrow;\nuse std::fmt;\nuse std::ops::Deref;\n\nuse crate::LixError;\nuse serde::{Deserialize, Deserializer,"
},
{
"path": "packages/engine/src/common/json_pointer.rs",
"chars": 1867,
"preview": "use crate::LixError;\n\npub(crate) fn parse_json_pointer(pointer: &str) -> Result<Vec<String>, LixError> {\n if pointer."
},
{
"path": "packages/engine/src/common/metadata.rs",
"chars": 1158,
"preview": "use crate::LixError;\n\npub(crate) fn parse_row_metadata(\n value: &str,\n context: impl AsRef<str>,\n) -> Result<Strin"
},
{
"path": "packages/engine/src/common/mod.rs",
"chars": 981,
"preview": "pub(crate) mod error;\npub(crate) mod fingerprint;\npub(crate) mod fs_path;\npub(crate) mod identity;\npub(crate) mod json_p"
},
{
"path": "packages/engine/src/common/types.rs",
"chars": 2624,
"preview": "use std::ops::Deref;\n\n#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]\npub enum Value {\n Null"
},
{
"path": "packages/engine/src/common/wire.rs",
"chars": 8054,
"preview": "use crate::{LixError, LixNotice, SqlQueryResult, Value};\nuse base64::Engine as _;\nuse serde::{Deserialize, Serialize};\n\n"
},
{
"path": "packages/engine/src/domain.rs",
"chars": 9425,
"preview": "use crate::entity_identity::EntityIdentity;\nuse crate::live_state::MaterializedLiveStateRow;\nuse crate::{NullableKeyFilt"
},
{
"path": "packages/engine/src/engine.rs",
"chars": 7967,
"preview": "use std::sync::Arc;\n\nuse crate::binary_cas::BinaryCasContext;\nuse crate::catalog::CatalogContext;\nuse crate::commit_grap"
},
{
"path": "packages/engine/src/entity_identity.rs",
"chars": 13116,
"preview": "use serde_json::Value as JsonValue;\n\nuse crate::common::json_pointer_get;\nuse crate::LixError;\n\n/// Logical entity ident"
},
{
"path": "packages/engine/src/functions/context.rs",
"chars": 10364,
"preview": "use crate::functions::{\n state, DeterministicFunctionProvider, DeterministicSequence, FunctionProvider,\n FunctionP"
},
{
"path": "packages/engine/src/functions/deterministic.rs",
"chars": 3680,
"preview": "use crate::functions::FunctionProvider;\n\nconst DETERMINISTIC_UUID_COUNTER_MASK: u64 = 0x0000_FFFF_FFFF_FFFF;\n\n/// Determ"
},
{
"path": "packages/engine/src/functions/mod.rs",
"chars": 604,
"preview": "//! Engine runtime function boundary.\n//!\n//! Sessions prepare one function context per execution. SQL, providers, and\n/"
},
{
"path": "packages/engine/src/functions/provider.rs",
"chars": 3308,
"preview": "use std::sync::{Arc, Mutex};\n\nuse crate::cel::CelFunctionProvider;\n\n/// Engine-owned runtime function provider trait.\npu"
},
{
"path": "packages/engine/src/functions/state.rs",
"chars": 11819,
"preview": "use serde_json::Value as JsonValue;\nuse std::sync::Arc;\n\nuse crate::entity_identity::EntityIdentity;\nuse crate::function"
},
{
"path": "packages/engine/src/functions/types.rs",
"chars": 1030,
"preview": "/// Decoded deterministic-mode setting.\n///\n/// Storage can decide where this setting lives. The type only describes the"
},
{
"path": "packages/engine/src/init.rs",
"chars": 19042,
"preview": "use crate::commit_store::{Change, CommitDraftRef, CommitStoreContext};\nuse crate::entity_identity::EntityIdentity;\nuse c"
},
{
"path": "packages/engine/src/json_store/compression.rs",
"chars": 2407,
"preview": "use crate::LixError;\n\n#[cfg(not(target_arch = \"wasm32\"))]\npub(crate) fn compress_json_payload(json_data: &[u8]) -> Resul"
},
{
"path": "packages/engine/src/json_store/context.rs",
"chars": 13701,
"preview": "use crate::json_store::store;\nuse crate::json_store::types::{\n JsonLoadBatch, JsonLoadRequestRef, JsonProjection, Jso"
},
{
"path": "packages/engine/src/json_store/encoded.rs",
"chars": 338,
"preview": "use crate::json_store::types::JsonRef;\nuse std::borrow::Cow;\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub(crate) en"
},
{
"path": "packages/engine/src/json_store/mod.rs",
"chars": 353,
"preview": "pub(crate) mod compression;\npub(crate) mod context;\nmod encoded;\npub(crate) mod store;\npub(crate) mod types;\n\n#[allow(un"
},
{
"path": "packages/engine/src/json_store/store.rs",
"chars": 36558,
"preview": "use crate::json_store::compression::{compress_json_payload, decode_json_zstd_payload};\nuse crate::json_store::encoded::{"
},
{
"path": "packages/engine/src/json_store/types.rs",
"chars": 5276,
"preview": "use std::sync::Arc;\n\nuse crate::LixError;\n\n#[derive(Debug, Clone, PartialEq, Eq)]\npub(crate) struct NormalizedJson(Arc<s"
},
{
"path": "packages/engine/src/lib.rs",
"chars": 2281,
"preview": "mod backend;\nmod binary_cas;\npub(crate) mod catalog;\npub(crate) mod cel;\npub(crate) mod commit_graph;\n#[allow(dead_code,"
},
{
"path": "packages/engine/src/live_state/context.rs",
"chars": 71904,
"preview": "use async_trait::async_trait;\nuse tokio::sync::Mutex;\n\nuse crate::commit_graph::CommitGraphContext;\nuse crate::entity_id"
},
{
"path": "packages/engine/src/live_state/mod.rs",
"chars": 459,
"preview": "mod context;\nmod overlay;\nmod reader;\nmod types;\nmod visibility;\n\n#[allow(unused_imports)]\npub(crate) use context::{Live"
},
{
"path": "packages/engine/src/live_state/overlay.rs",
"chars": 2643,
"preview": "use std::collections::BTreeMap;\n\nuse crate::live_state::{LiveStateRowIdentity, MaterializedLiveStateRow};\n\n/// Applies t"
},
{
"path": "packages/engine/src/live_state/reader.rs",
"chars": 789,
"preview": "use async_trait::async_trait;\n\nuse crate::live_state::MaterializedLiveStateRow;\nuse crate::live_state::{LiveStateRowRequ"
},
{
"path": "packages/engine/src/live_state/types.rs",
"chars": 7448,
"preview": "use crate::entity_identity::EntityIdentity;\nuse crate::tracked_state::MaterializedTrackedStateRow;\nuse crate::untracked_"
},
{
"path": "packages/engine/src/live_state/visibility.rs",
"chars": 7192,
"preview": "use std::collections::BTreeMap;\n\nuse crate::live_state::{LiveStateRowIdentity, MaterializedLiveStateRow};\nuse crate::GLO"
},
{
"path": "packages/engine/src/plugin/archive.rs",
"chars": 15370,
"preview": "use std::collections::{BTreeMap, BTreeSet};\nuse std::io::{Cursor, Read};\nuse std::path::{Component, Path};\n\nuse serde_js"
},
{
"path": "packages/engine/src/plugin/component.rs",
"chars": 5684,
"preview": "use std::sync::Arc;\n\nuse crate::common::LixError;\nuse crate::wasm::{WasmComponentInstance, WasmLimits, WasmRuntime};\n\nus"
},
{
"path": "packages/engine/src/plugin/install.rs",
"chars": 22633,
"preview": "//! Plugin install write helpers.\n//!\n//! This module owns plugin archive parsing, registered-schema staging, and the\n//"
},
{
"path": "packages/engine/src/plugin/manifest.rs",
"chars": 15395,
"preview": "use std::sync::OnceLock;\n\nuse globset::{Glob, GlobBuilder};\nuse jsonschema::{Draft, JSONSchema};\nuse serde::{Deserialize"
},
{
"path": "packages/engine/src/plugin/materializer.rs",
"chars": 16233,
"preview": "use std::collections::BTreeSet;\nuse std::sync::{Arc, RwLock};\n\nuse async_trait::async_trait;\n\nuse crate::common::LixErro"
},
{
"path": "packages/engine/src/plugin/mod.rs",
"chars": 1279,
"preview": "//! Plugin subsystem root.\n//!\n//! Phase 1 establishes `crate::plugin::*` as the owner path for plugin-domain\n//! code u"
},
{
"path": "packages/engine/src/plugin/plugin_manifest.json",
"chars": 2648,
"preview": "{\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"type\": \"object\",\n \"additionalProperties\": false,\n \"r"
},
{
"path": "packages/engine/src/plugin/storage.rs",
"chars": 2207,
"preview": "use crate::LixError;\n\npub const PLUGIN_STORAGE_ROOT_DIRECTORY_PATH: &str = \"/.lix/plugins/\";\npub const PLUGIN_ARCHIVE_FI"
},
{
"path": "packages/engine/src/schema/annotations/defaults.rs",
"chars": 7720,
"preview": "use serde_json::{Map as JsonMap, Value as JsonValue};\n\nuse crate::cel::{CelEvaluator, CelFunctionProvider};\nuse crate::L"
},
{
"path": "packages/engine/src/schema/annotations/mod.rs",
"chars": 25,
"preview": "pub(crate) mod defaults;\n"
},
{
"path": "packages/engine/src/schema/builtin/lix_account.json",
"chars": 316,
"preview": "{\n \"x-lix-key\": \"lix_account\",\n \"x-lix-primary-key\": [\n \"/id\"\n ],\n \"type\": \"object\",\n \"properties\": {\n \"id\": "
},
{
"path": "packages/engine/src/schema/builtin/lix_active_account.json",
"chars": 463,
"preview": "{\n \"x-lix-key\": \"lix_active_account\",\n \"x-lix-primary-key\": [\n \"/account_id\"\n ],\n \"x-lix-foreign-keys\": [\n {\n "
},
{
"path": "packages/engine/src/schema/builtin/lix_binary_blob_ref.json",
"chars": 784,
"preview": "{\n \"x-lix-key\": \"lix_binary_blob_ref\",\n \"description\": \"Metadata pointer from a file version to its binary payload in "
},
{
"path": "packages/engine/src/schema/builtin/lix_change.json",
"chars": 1881,
"preview": "{\n \"x-lix-key\": \"lix_change\",\n \"description\": \"A change records one edit to a Lix entity, including what changed, when"
},
{
"path": "packages/engine/src/schema/builtin/lix_change_author.json",
"chars": 726,
"preview": "{\n \"x-lix-key\": \"lix_change_author\",\n \"x-lix-primary-key\": [\n \"/change_id\",\n \"/account_id\"\n ],\n \"x-lix-foreign"
},
{
"path": "packages/engine/src/schema/builtin/lix_commit.json",
"chars": 523,
"preview": "{\n \"x-lix-key\": \"lix_commit\",\n \"description\": \"A commit is a stable point in project history. Versions point to commit"
},
{
"path": "packages/engine/src/schema/builtin/lix_commit_edge.json",
"chars": 1566,
"preview": "{\n \"x-lix-key\": \"lix_commit_edge\",\n \"description\": \"Direct parent relationship between two commits. Merge commits have"
},
{
"path": "packages/engine/src/schema/builtin/lix_directory_descriptor.json",
"chars": 848,
"preview": "{\n \"x-lix-key\": \"lix_directory_descriptor\",\n \"x-lix-primary-key\": [\n \"/id\"\n ],\n \"x-lix-unique\": [\n [\n \"/p"
},
{
"path": "packages/engine/src/schema/builtin/lix_file_descriptor.json",
"chars": 855,
"preview": "{\n \"x-lix-key\": \"lix_file_descriptor\",\n \"x-lix-primary-key\": [\n \"/id\"\n ],\n \"x-lix-unique\": [\n [\n \"/direct"
},
{
"path": "packages/engine/src/schema/builtin/lix_key_value.json",
"chars": 704,
"preview": "{\n \"x-lix-key\": \"lix_key_value\",\n \"x-lix-primary-key\": [\n \"/key\"\n ],\n \"type\": \"object\",\n \"properties\": {\n \"ke"
},
{
"path": "packages/engine/src/schema/builtin/lix_label.json",
"chars": 643,
"preview": "{\n \"x-lix-key\": \"lix_label\",\n \"description\": \"Catalog of labels that can be assigned to arbitrary live Lix rows throug"
},
{
"path": "packages/engine/src/schema/builtin/lix_label_assignment.json",
"chars": 1922,
"preview": "{\n \"x-lix-key\": \"lix_label_assignment\",\n \"description\": \"Mapping table that assigns a label to any live Lix row addres"
},
{
"path": "packages/engine/src/schema/builtin/lix_registered_schema.json",
"chars": 425,
"preview": "{\n \"x-lix-key\": \"lix_registered_schema\",\n \"x-lix-primary-key\": [\n \"/value/x-lix-key\"\n ],\n \"type\": \"object\",\n \"pr"
},
{
"path": "packages/engine/src/schema/builtin/lix_version_descriptor.json",
"chars": 1123,
"preview": "{\n \"x-lix-key\": \"lix_version_descriptor\",\n \"description\": \"User-facing version metadata (name and visibility) for a br"
},
{
"path": "packages/engine/src/schema/builtin/lix_version_ref.json",
"chars": 1293,
"preview": "{\n \"x-lix-key\": \"lix_version_ref\",\n \"description\": \"Version head pointer. Records which commit a version should curren"
},
{
"path": "packages/engine/src/schema/builtin/mod.rs",
"chars": 10404,
"preview": "use serde_json::Value as JsonValue;\nuse std::sync::OnceLock;\n\nuse crate::schema::lix_schema_definition;\n\nconst LIX_REGIS"
},
{
"path": "packages/engine/src/schema/compatibility.rs",
"chars": 26074,
"preview": "use std::collections::{BTreeMap, BTreeSet};\n\nuse serde_json::Value as JsonValue;\n\nuse crate::common::top_level_property_"
},
{
"path": "packages/engine/src/schema/definition.json",
"chars": 7839,
"preview": "{\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"title\": \"Lix Schema Definition\",\n \"description\": \"A L"
},
{
"path": "packages/engine/src/schema/definition.rs",
"chars": 24160,
"preview": "use cel::Program;\nuse jsonschema::{Draft, JSONSchema};\nuse serde_json::Value as JsonValue;\nuse std::collections::BTreeSe"
},
{
"path": "packages/engine/src/schema/key.rs",
"chars": 4148,
"preview": "use serde_json::Value as JsonValue;\n\nuse crate::entity_identity::EntityIdentity;\nuse crate::LixError;\n\n#[derive(Debug, C"
},
{
"path": "packages/engine/src/schema/mod.rs",
"chars": 676,
"preview": "mod builtin;\n#[allow(dead_code)]\npub(crate) mod compatibility;\nmod definition;\nmod key;\npub(crate) mod seed;\n#[cfg(test)"
},
{
"path": "packages/engine/src/schema/seed.rs",
"chars": 427,
"preview": "use serde_json::Value as JsonValue;\n\npub(crate) fn is_seed_schema_key(schema_key: &str) -> bool {\n super::builtin::is"
},
{
"path": "packages/engine/src/schema/tests.rs",
"chars": 22546,
"preview": "use crate::{validate_lix_schema, validate_lix_schema_definition};\nuse serde_json::json;\n\n#[test]\nfn validate_lix_schema_"
}
]
// ... and 293 more files (download for full content)
About this extraction
This page contains the full source code of the opral/lix GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 493 files (5.8 MB), approximately 1.5M tokens, and a symbol index with 7045 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.