Showing preview only (728K chars total). Download the full file or copy to clipboard to get everything.
Repository: personaelabs/spartan-ecdsa
Branch: main
Commit: 4bf236a1a556
Files: 165
Total size: 679.7 KB
Directory structure:
gitextract_fd7z0c7m/
├── .cargo/
│ └── config
├── .eslintignore
├── .eslintrc.js
├── .github/
│ └── workflows/
│ └── publish.yaml
├── .gitignore
├── .prettierignore
├── .prettierrc.json
├── .vscode/
│ └── settings.json
├── Cargo.toml
├── README.md
├── lerna.json
├── package.json
├── packages/
│ ├── Spartan-secq/
│ │ ├── CODE_OF_CONDUCT.md
│ │ ├── CONTRIBUTING.md
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── SECURITY.md
│ │ ├── benches/
│ │ │ ├── nizk.rs
│ │ │ └── snark.rs
│ │ ├── examples/
│ │ │ └── cubic.rs
│ │ ├── profiler/
│ │ │ ├── nizk.rs
│ │ │ └── snark.rs
│ │ ├── rustfmt.toml
│ │ └── src/
│ │ ├── bin/
│ │ │ └── mont_params.rs
│ │ ├── commitments.rs
│ │ ├── dense_mlpoly.rs
│ │ ├── errors.rs
│ │ ├── group.rs
│ │ ├── lib.rs
│ │ ├── math.rs
│ │ ├── nizk/
│ │ │ ├── bullet.rs
│ │ │ └── mod.rs
│ │ ├── product_tree.rs
│ │ ├── r1csinstance.rs
│ │ ├── r1csproof.rs
│ │ ├── random.rs
│ │ ├── scalar/
│ │ │ ├── mod.rs
│ │ │ └── scalar.rs
│ │ ├── sparse_mlpoly.rs
│ │ ├── sumcheck.rs
│ │ ├── timer.rs
│ │ ├── transcript.rs
│ │ └── unipoly.rs
│ ├── benchmark/
│ │ ├── node/
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── package.json
│ │ │ ├── src/
│ │ │ │ ├── node.bench.ts
│ │ │ │ ├── node.bench_addr_membership.ts
│ │ │ │ └── node.bench_pubkey_membership.ts
│ │ │ └── tsconfig.json
│ │ └── web/
│ │ ├── .vscode/
│ │ │ └── settings.json
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── next.config.js
│ │ ├── package.json
│ │ ├── pages/
│ │ │ ├── _app.tsx
│ │ │ └── index.tsx
│ │ └── tsconfig.json
│ ├── circuit_reader/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ ├── bin/
│ │ │ └── gen_spartan_inst.rs
│ │ ├── circom_reader.rs
│ │ └── lib.rs
│ ├── circuits/
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── eff_ecdsa_membership/
│ │ │ ├── addr_membership.circom
│ │ │ ├── eff_ecdsa.circom
│ │ │ ├── eff_ecdsa_to_addr.circom
│ │ │ ├── pubkey_membership.circom
│ │ │ ├── secp256k1/
│ │ │ │ ├── add.circom
│ │ │ │ ├── double.circom
│ │ │ │ └── mul.circom
│ │ │ ├── to_address/
│ │ │ │ ├── vocdoni-keccak/
│ │ │ │ │ ├── keccak.circom
│ │ │ │ │ ├── permutations.circom
│ │ │ │ │ └── utils.circom
│ │ │ │ └── zk-identity/
│ │ │ │ └── eth.circom
│ │ │ └── tree.circom
│ │ ├── instances/
│ │ │ ├── addr_membership.circom
│ │ │ └── pubkey_membership.circom
│ │ ├── jest.config.js
│ │ ├── package.json
│ │ ├── poseidon/
│ │ │ ├── poseidon.circom
│ │ │ └── poseidon_constants.circom
│ │ └── tests/
│ │ ├── addr_membership.test.ts
│ │ ├── circuits/
│ │ │ ├── add_complete_test.circom
│ │ │ ├── add_incomplete_test.circom
│ │ │ ├── addr_membership_test.circom
│ │ │ ├── double_test.circom
│ │ │ ├── eff_ecdsa_test.circom
│ │ │ ├── eff_ecdsa_to_addr_test.circom
│ │ │ ├── k_test.circom
│ │ │ ├── mul_test.circom
│ │ │ ├── poseidon_test.circom
│ │ │ └── pubkey_membership_test.circom
│ │ ├── eff_ecdsa.test.ts
│ │ ├── eff_ecdsa_to_addr.test.ts
│ │ ├── poseidon.test.ts
│ │ ├── pubkey_membership.test.ts
│ │ ├── secp256k1.test.ts
│ │ └── test_utils.ts
│ ├── lib/
│ │ ├── .npmignore
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── embedWasmBytes.ts
│ │ ├── jest.config.js
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── config/
│ │ │ │ └── index.ts
│ │ │ ├── core/
│ │ │ │ ├── prover.ts
│ │ │ │ └── verifier.ts
│ │ │ ├── helpers/
│ │ │ │ ├── poseidon.ts
│ │ │ │ ├── profiler.ts
│ │ │ │ ├── publicInputs.ts
│ │ │ │ ├── tree.ts
│ │ │ │ └── utils.ts
│ │ │ ├── index.ts
│ │ │ ├── types/
│ │ │ │ └── index.ts
│ │ │ └── wasm/
│ │ │ ├── index.ts
│ │ │ ├── wasm.d.ts
│ │ │ └── wasm.js
│ │ ├── tests/
│ │ │ ├── efficientEcdsa.test.ts
│ │ │ ├── membershipNizk.test.ts
│ │ │ └── tree.test.ts
│ │ ├── tsconfig.build.json
│ │ └── tsconfig.json
│ ├── poseidon/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── k256_params.sh
│ │ ├── sage/
│ │ │ ├── generate_params_poseidon.sage
│ │ │ └── security_inequalities.sage
│ │ └── src/
│ │ ├── k256_consts.rs
│ │ ├── lib.rs
│ │ └── poseidon_k256.rs
│ ├── secq256k1/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── sage/
│ │ │ ├── hashtocurve_params.sage
│ │ │ ├── sqrt_ratio_params.sage
│ │ │ └── sswu_generic.sage
│ │ └── src/
│ │ ├── affine.rs
│ │ ├── field/
│ │ │ ├── field_secp.rs
│ │ │ ├── field_secq.rs
│ │ │ └── mod.rs
│ │ ├── hashtocurve.rs
│ │ ├── lib.rs
│ │ └── scalar.rs
│ └── spartan_wasm/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── src/
│ │ ├── lib.rs
│ │ └── wasm.rs
│ └── test_circuit/
│ ├── test_circuit.circom
│ ├── test_circuit.circuit
│ ├── test_circuit.r1cs
│ ├── test_circuit_js/
│ │ ├── generate_witness.js
│ │ ├── test_circuit.wasm
│ │ └── witness_calculator.js
│ └── witness.wtns
├── rust-toolchain
└── scripts/
├── addr_membership_circuit.sh
├── build.sh
├── build_wasm.sh
├── compile_circuit.sh
├── pubkey_membership_circuit.sh
└── test.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: .cargo/config
================================================
[target.wasm32-unknown-unknown]
rustflags = ["-C", "link-arg=--max-memory=4294967296"]
[unstable]
build-std = ["panic_abort", "std"]
[build]
target = "x86_64-apple-darwin"
================================================
FILE: .eslintignore
================================================
wasm_bytes.ts
================================================
FILE: .eslintrc.js
================================================
/* eslint-disable no-undef */
module.exports = {
root: true,
extends: [
"eslint:recommended",
"plugin:react/recommended",
"plugin:react-hooks/recommended",
"plugin:@typescript-eslint/recommended",
"plugin:security/recommended"
],
parser: "@typescript-eslint/parser",
parserOptions: {
ecmaFeatures: {
jsx: true
},
ecmaVersion: 13,
sourceType: "module"
},
plugins: ["react", "@typescript-eslint", "security"],
rules: {
"@typescript-eslint/no-var-requires": "off",
"@typescript-eslint/ban-ts-comment": "off",
"@typescript-eslint/no-explicit-any": "off",
"react/react-in-jsx-scope": "off",
"react/prop-types": "off",
}
};
================================================
FILE: .github/workflows/publish.yaml
================================================
name: Publish Package to npmjs
on:
release:
types: [published]
workflow_dispatch:
jobs:
publish:
runs-on: macos-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.ref_name }}
# Setup Node.js
- uses: actions/setup-node@v3
with:
node-version: 18
registry-url: "https://registry.npmjs.org"
# Setup Rust
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-10-31
- run: rustup component add rust-src
- run: rustup target add x86_64-apple-darwin
# Install circom-secq
- uses: GuillaumeFalourd/clone-github-repo-action@v2
with:
owner: "DanTehrani"
repository: "circom-secq"
- run: cd circom-secq && cargo build --release && cargo install --path circom
# Install wasm-pack
- uses: jetli/wasm-pack-action@v0.4.0
with:
version: "v0.10.3"
- run: cargo test --release
- run: yarn
- run: yarn build
- run: yarn test
- run: npm publish
working-directory: ./packages/lib
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
================================================
FILE: .gitignore
================================================
# Generated by Cargo
# will have compiled files and executables
/target/
# These are backup files generated by rustfmt
**/*.rs.bk
*.txt
node_modules/
.next/
next-env.d.ts
yarn-error.log
.DS_Store
pkg/
circom_input.json
circom_witness.wtns
*.ptau
build/
dist/
*.r1cs
*.sym
!test_circuit.r1cs
packages/prover/test_circuit/test_circuit_js/
#input files
packages/prover/test_circuit/*.json
wasmBytes.ts
**/sage/*.sage.py
packages/lib/src/circuits/
packages/lib/example/
================================================
FILE: .prettierignore
================================================
wasm_bytes.ts
================================================
FILE: .prettierrc.json
================================================
{
"trailingComma": "none",
"tabWidth": 2,
"semi": true,
"singleQuote": false,
"arrowParens": "avoid",
"uppercase": true
}
================================================
FILE: .vscode/settings.json
================================================
{
"editor.formatOnSave": true,
"cSpell.words": [
"merkle",
"NIZK"
]
}
================================================
FILE: Cargo.toml
================================================
[workspace]
members = [
"packages/spartan_wasm",
"packages/secq256k1",
"packages/poseidon",
"packages/Spartan-secq",
"packages/circuit_reader",
]
================================================
FILE: README.md
================================================
# Spartan-ecdsa
Spartan-ecdsa (which to our knowledge) is the fastest open-source method to verify ECDSA (secp256k1) signatures in zero-knowledge. It can prove ECDSA group membership 10 times faster than [efficient-zk-ecdsa](https://github.com/personaelabs/efficient-zk-ecdsa), our previous implementation of fast ECDSA signature proving. Please refer to [this blog post](https://personaelabs.org/posts/spartan-ecdsa/) for further information.
## Constraint breakdown
spartan-ecdsa achieves the phenomenal result of **hashing becoming the bottleneck instead of ECC operations** for the `pubkey_membership.circom` circuit. In particular, there are **3,039** constraints for efficient ECDSA signature verification, and **5,037** constraints for a depth 20 merkle tree membership check + 1 Poseidon hash of the ECDSA public key. The drop from the original 1.5 million constraints of [circom-ecdsa](https://github.com/0xPARC/circom-ecdsa) comes primarily from doing right-field arithmetic with secq and avoiding SNARK-unfriendly range checks and big integer math.
We also use [efficient ECDSA signatures](https://personaelabs.org/posts/efficient-ecdsa-1/) instead of standard ECDSA siagnatures to save an additional **14,505** constraints. To review, the standard ECDSA signature consists of $(r, s)$ for a public key $Q_a$ and message $m$, where $r$ is the x-coordinate of a random elliptic curve point $R$. Standard ECDSA signature verification checks if
```math
R == m s ^{-1} * G + r s ^{-1} * Q_a
```
where $G$ is the generator point of the curve. The efficient ECDSA signature consists of $s$ as well as $T = r^{-1} * R$ and $U = -r^{-1} * m * G$, which can both be computed outside of the SNARK without breaking correctness. Efficient ECDSA signature verification checks if
```math
s * T + U == Q_a
```
Thus, verifying a standard ECDSA signature instead of the efficient ECDSA signature requires (1) computing $s^{-1}$, $r \* s^{-1}$, $m \* s^{-1}$, and (2) an extra ECC scalar multiply to compute $m s ^{-1} * G$. The former computations happen in the scalar field of secp, which is unequal to the scalar field of secq, and so we incur 11,494 additional constraints for the wrong-field math. The latter can use the `Secp256k1Mul` subroutine and incurs 3,011 additional constraints.
## Benchmarks
Proving membership to a group of ECDSA public keys
| Benchmark | # |
| :--------------------------: | :---: |
| Constraints | 8,076 |
| Proving time in browser | 4s |
| Proving time in Node.js | 2s |
| Verification time in browser | 1s |
| Verification time in Node.js | 300ms |
| Proof size | 16kb |
- Measured on a M1 MacBook Pro with 80Mbps internet speed.
- Both proving and verification time in browser includes the time to download the circuit.
## Disclaimers
- Spartan-ecdsa is unaudited. Please use it at your own risk.
- Usage on mobile browsers isn’t currently supported.
## Install
```jsx
yarn add @personaelabs/spartan-ecdsa
```
## Development
### Node.js
v18 or later
### Build
1. Install Circom with secq256k1 support
```
git clone https://github.com/DanTehrani/circom-secq
cd circom-secq && cargo build --release && cargo install --path circom
```
2. Install [wasm-pack](https://rustwasm.github.io/wasm-pack/installer/)
4. Install dependencies & Build all packages
```jsx
yarn && yarn build
```
================================================
FILE: lerna.json
================================================
{
"$schema": "node_modules/lerna/schemas/lerna-schema.json",
"useWorkspaces": true,
"version": "0.0.0"
}
================================================
FILE: package.json
================================================
{
"private": true,
"name": "spartan-ecdsa-monorepo",
"version": "1.0.0",
"main": "index.js",
"repository": "https://github.com/DanTehrani/spartan-wasm.git",
"author": "Daniel Tehrani <contact@dantehrani.com>",
"scripts": {
"build": "sh ./scripts/build.sh && lerna run build",
"test": "sh ./scripts/test.sh"
},
"devDependencies": {
"@types/jest": "^29.2.4",
"@typescript-eslint/eslint-plugin": "5.49.0",
"eslint": "8.32.0",
"eslint-plugin-react": "7.32.1",
"eslint-plugin-react-hooks": "4.6.0",
"eslint-plugin-security": "1.7.0",
"lerna": "^6.4.0"
},
"workspaces": [
"packages/lib",
"packages/benchmark/web",
"packages/benchmark/node",
"packages/circuits"
]
}
================================================
FILE: packages/Spartan-secq/CODE_OF_CONDUCT.md
================================================
# Microsoft Open Source Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
Resources:
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
================================================
FILE: packages/Spartan-secq/CONTRIBUTING.md
================================================
This project welcomes contributions and suggestions. Most contributions require you to
agree to a Contributor License Agreement (CLA) declaring that you have the right to,
and actually do, grant us the rights to use your contribution. For details, visit
https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need
to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the
instructions provided by the bot. You will only need to do this once across all repositories using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
================================================
FILE: packages/Spartan-secq/Cargo.toml
================================================
[package]
name = "spartan"
version = "0.7.1"
authors = ["Srinath Setty <srinath@microsoft.com>"]
edition = "2021"
description = "High-speed zkSNARKs without trusted setup"
documentation = "https://docs.rs/spartan/"
readme = "README.md"
repository = "https://github.com/microsoft/Spartan"
license-file = "LICENSE"
keywords = ["zkSNARKs", "cryptography", "proofs"]
[dependencies]
num-bigint-dig = "^0.7"
secq256k1 = { path = "../secq256k1" }
merlin = "3.0.0"
rand = "0.7.3"
digest = "0.8.1"
sha3 = "0.8.2"
byteorder = "1.3.4"
rayon = { version = "1.3.0", optional = true }
serde = { version = "1.0.106", features = ["derive"] }
bincode = "1.2.1"
subtle = { version = "2.4", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "1", default-features = false }
itertools = "0.10.0"
colored = "2.0.0"
flate2 = "1.0.14"
thiserror = "1.0"
num-traits = "0.2.15"
hex-literal = { version = "0.3" }
multiexp = "0.2.2"
[dev-dependencies]
criterion = "0.3.1"
[lib]
name = "libspartan"
path = "src/lib.rs"
crate-type = ["cdylib", "rlib"]
[[bin]]
name = "snark"
path = "profiler/snark.rs"
[[bin]]
name = "nizk"
path = "profiler/nizk.rs"
[[bench]]
name = "snark"
harness = false
[[bench]]
name = "nizk"
harness = false
================================================
FILE: packages/Spartan-secq/LICENSE
================================================
MIT License
Copyright (c) Microsoft Corporation.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE
================================================
FILE: packages/Spartan-secq/README.md
================================================
## Fork of [Spartan](https://github.com/microsoft/Spartan)
_This fork is still under development._
Modify Spartan to operate over the **base field** of secp256k1.
### Changes from the original Spartan
- Use the secq256k1 crate instead of curve25519-dalek
- Modify values in scalar.rs (originally ristretto255.rs)
Please refer to [spartan-ecdsa](https://github.com/personaelabs/spartan-ecdsa) for development status.
================================================
FILE: packages/Spartan-secq/SECURITY.md
================================================
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.3 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)) of a security vulnerability, please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->
================================================
FILE: packages/Spartan-secq/benches/nizk.rs
================================================
#![allow(clippy::assertions_on_result_states)]
extern crate byteorder;
extern crate core;
extern crate criterion;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate rand;
extern crate sha3;
use libspartan::{Instance, NIZKGens, NIZK};
use merlin::Transcript;
use criterion::*;
fn nizk_prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("NIZK_prove_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let name = format!("NIZK_prove_{}", num_vars);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut prover_transcript = Transcript::new(b"example");
NIZK::prove(
black_box(&inst),
black_box(vars.clone()),
black_box(&inputs),
black_box(&gens),
black_box(&mut prover_transcript),
);
});
});
group.finish();
}
}
fn nizk_verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("NIZK_verify_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"example");
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let name = format!("NIZK_verify_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
black_box(&inst),
black_box(&inputs),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
}
criterion_group! {
name = benches_nizk;
config = set_duration();
targets = nizk_prove_benchmark, nizk_verify_benchmark
}
criterion_main!(benches_nizk);
================================================
FILE: packages/Spartan-secq/benches/snark.rs
================================================
#![allow(clippy::assertions_on_result_states)]
extern crate libspartan;
extern crate merlin;
use libspartan::{Instance, SNARKGens, SNARK};
use merlin::Transcript;
use criterion::*;
fn snark_encode_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_encode_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, _vars, _inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let name = format!("SNARK_encode_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
SNARK::encode(black_box(&inst), black_box(&gens));
});
});
group.finish();
}
}
fn snark_prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_prove_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof
let name = format!("SNARK_prove_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut prover_transcript = Transcript::new(b"example");
SNARK::prove(
black_box(&inst),
black_box(&comm),
black_box(&decomm),
black_box(vars.clone()),
black_box(&inputs),
black_box(&gens),
black_box(&mut prover_transcript),
);
});
});
group.finish();
}
}
fn snark_verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_verify_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
// verify the proof
let name = format!("SNARK_verify_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
black_box(&comm),
black_box(&inputs),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
}
criterion_group! {
name = benches_snark;
config = set_duration();
targets = snark_encode_benchmark, snark_prove_benchmark, snark_verify_benchmark
}
criterion_main!(benches_snark);
================================================
FILE: packages/Spartan-secq/examples/cubic.rs
================================================
//! Demonstrates how to produces a proof for canonical cubic equation: `x^3 + x + 5 = y`.
//! The example is described in detail [here].
//!
//! The R1CS for this problem consists of the following 4 constraints:
//! `Z0 * Z0 - Z1 = 0`
//! `Z1 * Z0 - Z2 = 0`
//! `(Z2 + Z0) * 1 - Z3 = 0`
//! `(Z3 + 5) * 1 - I0 = 0`
//!
//! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649
#![allow(clippy::assertions_on_result_states)]
use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
use merlin::Transcript;
use rand_core::OsRng;
use secq256k1::elliptic_curve::Field;
use secq256k1::Scalar;
#[allow(non_snake_case)]
fn produce_r1cs() -> (
usize,
usize,
usize,
usize,
Instance,
VarsAssignment,
InputsAssignment,
) {
// parameters of the R1CS instance
let num_cons = 4;
let num_vars = 4;
let num_inputs = 1;
let num_non_zero_entries = 8;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
let one: [u8; 32] = Scalar::ONE.to_bytes().into();
// R1CS is a set of three sparse matrices A B C, where is a row for every
// constraint and a column for every entry in z = (vars, 1, inputs)
// An R1CS instance is satisfiable iff:
// Az \circ Bz = Cz, where z = (vars, 1, inputs)
// constraint 0 entries in (A,B,C)
// constraint 0 is Z0 * Z0 - Z1 = 0.
A.push((0, 0, one));
B.push((0, 0, one));
C.push((0, 1, one));
// constraint 1 entries in (A,B,C)
// constraint 1 is Z1 * Z0 - Z2 = 0.
A.push((1, 1, one));
B.push((1, 0, one));
C.push((1, 2, one));
// constraint 2 entries in (A,B,C)
// constraint 2 is (Z2 + Z0) * 1 - Z3 = 0.
A.push((2, 2, one));
A.push((2, 0, one));
B.push((2, num_vars, one));
C.push((2, 3, one));
// constraint 3 entries in (A,B,C)
// constraint 3 is (Z3 + 5) * 1 - I0 = 0.
A.push((3, 3, one));
A.push((3, num_vars, Scalar::from(5u32).to_bytes().into()));
B.push((3, num_vars, one));
C.push((3, num_vars + 1, one));
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
// compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let z0 = Scalar::random(&mut csprng);
let z1 = z0 * z0; // constraint 0
let z2 = z1 * z0; // constraint 1
let z3 = z2 + z0; // constraint 2
let i0 = z3 + Scalar::from(5u32); // constraint 3
// create a VarsAssignment
let mut vars: Vec<[u8; 32]> = vec![Scalar::ZERO.to_bytes().into(); num_vars];
vars[0] = z0.to_bytes().into();
vars[1] = z1.to_bytes().into();
vars[2] = z2.to_bytes().into();
vars[3] = z3.to_bytes().into();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// create an InputsAssignment
let mut inputs: Vec<[u8; 32]> = vec![Scalar::ZERO.to_bytes().into(); num_inputs];
inputs[0] = i0.to_bytes().into();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
// check if the instance we created is satisfiable
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
(
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
)
}
fn main() {
// produce an R1CS instance
let (
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
) = produce_r1cs();
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
// create a commitment to the R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"snark_example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the proof of satisfiability
let mut verifier_transcript = Transcript::new(b"snark_example");
assert!(proof
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
println!("proof verification successful!");
}
================================================
FILE: packages/Spartan-secq/profiler/nizk.rs
================================================
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate flate2;
extern crate libspartan;
extern crate merlin;
extern crate rand;
use flate2::{write::ZlibEncoder, Compression};
use libspartan::{Instance, NIZKGens, NIZK};
use merlin::Transcript;
fn print(msg: &str) {
let star = "* ";
println!("{:indent$}{}{}", "", star, msg, indent = 2);
}
pub fn main() {
// the list of number of variables (and constraints) in an R1CS instance
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
println!("Profiler:: NIZK");
for &s in inst_sizes.iter() {
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public generators
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"nizk_example");
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &proof).unwrap();
let proof_encoded = encoder.finish().unwrap();
let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len());
print(&msg_proof_len);
// verify the proof of satisfiability
let mut verifier_transcript = Transcript::new(b"nizk_example");
assert!(proof
.verify(&inst, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!();
}
}
================================================
FILE: packages/Spartan-secq/profiler/snark.rs
================================================
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate flate2;
extern crate libspartan;
extern crate merlin;
use flate2::{write::ZlibEncoder, Compression};
use libspartan::{Instance, SNARKGens, SNARK};
use merlin::Transcript;
fn print(msg: &str) {
let star = "* ";
println!("{:indent$}{}{}", "", star, msg, indent = 2);
}
pub fn main() {
// the list of number of variables (and constraints) in an R1CS instance
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
println!("Profiler:: SNARK");
for &s in inst_sizes.iter() {
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public generators
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// create a commitment to R1CSInstance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"snark_example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &proof).unwrap();
let proof_encoded = encoder.finish().unwrap();
let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len());
print(&msg_proof_len);
// verify the proof of satisfiability
let mut verifier_transcript = Transcript::new(b"snark_example");
assert!(proof
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!();
}
}
================================================
FILE: packages/Spartan-secq/rustfmt.toml
================================================
edition = "2018"
tab_spaces = 2
newline_style = "Unix"
use_try_shorthand = true
================================================
FILE: packages/Spartan-secq/src/bin/mont_params.rs
================================================
use hex_literal::hex;
use num_bigint_dig::{BigInt, BigUint, ModInverse, ToBigInt};
use num_traits::{FromPrimitive, ToPrimitive};
use std::ops::Neg;
fn get_words(n: &BigUint) -> [u64; 4] {
let mut words = [0u64; 4];
for i in 0..4 {
let word = n.clone() >> (64 * i) & BigUint::from(0xffffffffffffffffu64);
words[i] = word.to_u64().unwrap();
}
words
}
fn render_hex(label: String, words: &[u64; 4]) {
println!("// {}", label);
for word in words {
println!("0x{:016x},", word);
}
}
fn main() {
let modulus = BigUint::from_bytes_be(&hex!(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"
));
let r = BigUint::from_u8(2)
.unwrap()
.modpow(&BigUint::from_u64(256).unwrap(), &modulus);
let r2 = BigUint::from_u8(2)
.unwrap()
.modpow(&BigUint::from_u64(512).unwrap(), &modulus);
let r3 = BigUint::from_u8(2)
.unwrap()
.modpow(&BigUint::from_u64(768).unwrap(), &modulus);
let two_pow_64 = BigUint::from_u128(18446744073709551616u128).unwrap();
let one = BigInt::from_u8(1).unwrap();
let inv = modulus
.clone()
.mod_inverse(&two_pow_64)
.unwrap()
.neg()
.modpow(&one, &two_pow_64.to_bigint().unwrap());
render_hex("Modulus".to_string(), &get_words(&modulus));
render_hex("R".to_string(), &get_words(&r));
render_hex("R2".to_string(), &get_words(&r2));
render_hex("R3".to_string(), &get_words(&r3));
render_hex("INV".to_string(), &get_words(&inv.to_biguint().unwrap()));
}
================================================
FILE: packages/Spartan-secq/src/commitments.rs
================================================
use super::group::{GroupElement, VartimeMultiscalarMul};
use super::scalar::Scalar;
use digest::{ExtendableOutput, Input};
use secq256k1::AffinePoint;
use sha3::Shake256;
use std::io::Read;
#[derive(Debug)]
pub struct MultiCommitGens {
pub n: usize,
pub G: Vec<GroupElement>,
pub h: GroupElement,
}
impl MultiCommitGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let mut shake = Shake256::default();
shake.input(label);
shake.input(AffinePoint::generator().compress().as_bytes());
let mut reader = shake.xof_result();
let mut gens: Vec<GroupElement> = Vec::new();
let mut uniform_bytes = [0u8; 128];
for _ in 0..n + 1 {
reader.read_exact(&mut uniform_bytes).unwrap();
gens.push(AffinePoint::from_uniform_bytes(&uniform_bytes));
}
MultiCommitGens {
n,
G: gens[..n].to_vec(),
h: gens[n],
}
}
pub fn clone(&self) -> MultiCommitGens {
MultiCommitGens {
n: self.n,
h: self.h,
G: self.G.clone(),
}
}
pub fn scale(&self, s: &Scalar) -> MultiCommitGens {
MultiCommitGens {
n: self.n,
h: self.h,
G: (0..self.n).map(|i| s * self.G[i]).collect(),
}
}
pub fn split_at(&self, mid: usize) -> (MultiCommitGens, MultiCommitGens) {
let (G1, G2) = self.G.split_at(mid);
(
MultiCommitGens {
n: G1.len(),
G: G1.to_vec(),
h: self.h,
},
MultiCommitGens {
n: G2.len(),
G: G2.to_vec(),
h: self.h,
},
)
}
}
pub trait Commitments {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement;
}
impl Commitments for Scalar {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, 1);
GroupElement::vartime_multiscalar_mul(
[*self, *blind].to_vec(),
[gens_n.G[0], gens_n.h].to_vec(),
)
}
}
impl Commitments for Vec<Scalar> {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, self.len());
GroupElement::vartime_multiscalar_mul((*self).clone(), gens_n.G.clone()) + blind * gens_n.h
}
}
impl Commitments for [Scalar] {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, self.len());
GroupElement::vartime_multiscalar_mul(self.to_vec(), gens_n.G.clone()) + blind * gens_n.h
}
}
================================================
FILE: packages/Spartan-secq/src/dense_mlpoly.rs
================================================
#![allow(clippy::too_many_arguments)]
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::nizk::{DotProductProofGens, DotProductProofLog};
use super::random::RandomTape;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use crate::group::DecompressEncodedPoint;
use core::ops::Index;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[cfg(feature = "multicore")]
use rayon::prelude::*;
#[derive(Debug)]
pub struct DensePolynomial {
num_vars: usize, // the number of variables in the multilinear polynomial
len: usize,
Z: Vec<Scalar>, // evaluations of the polynomial in all the 2^num_vars Boolean inputs
}
pub struct PolyCommitmentGens {
pub gens: DotProductProofGens,
}
impl PolyCommitmentGens {
// the number of variables in the multilinear polynomial
pub fn new(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens {
let (_left, right) = EqPolynomial::compute_factored_lens(num_vars);
let gens = DotProductProofGens::new(right.pow2(), label);
PolyCommitmentGens { gens }
}
}
pub struct PolyCommitmentBlinds {
blinds: Vec<Scalar>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyCommitment {
C: Vec<CompressedGroup>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ConstPolyCommitment {
C: CompressedGroup,
}
pub struct EqPolynomial {
r: Vec<Scalar>,
}
impl EqPolynomial {
pub fn new(r: Vec<Scalar>) -> Self {
EqPolynomial { r }
}
pub fn evaluate(&self, rx: &[Scalar]) -> Scalar {
assert_eq!(self.r.len(), rx.len());
(0..rx.len())
.map(|i| self.r[i] * rx[i] + (Scalar::one() - self.r[i]) * (Scalar::one() - rx[i]))
.product()
}
pub fn evals(&self) -> Vec<Scalar> {
let ell = self.r.len();
let mut evals: Vec<Scalar> = vec![Scalar::one(); ell.pow2()];
let mut size = 1;
for j in 0..ell {
// in each iteration, we double the size of chis
size *= 2;
for i in (0..size).rev().step_by(2) {
// copy each element from the prior iteration twice
let scalar = evals[i / 2];
evals[i] = scalar * self.r[j];
evals[i - 1] = scalar - evals[i];
}
}
evals
}
pub fn compute_factored_lens(ell: usize) -> (usize, usize) {
(ell / 2, ell - ell / 2)
}
pub fn compute_factored_evals(&self) -> (Vec<Scalar>, Vec<Scalar>) {
let ell = self.r.len();
let (left_num_vars, _right_num_vars) = EqPolynomial::compute_factored_lens(ell);
let L = EqPolynomial::new(self.r[..left_num_vars].to_vec()).evals();
let R = EqPolynomial::new(self.r[left_num_vars..ell].to_vec()).evals();
(L, R)
}
}
pub struct IdentityPolynomial {
size_point: usize,
}
impl IdentityPolynomial {
pub fn new(size_point: usize) -> Self {
IdentityPolynomial { size_point }
}
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
let len = r.len();
assert_eq!(len, self.size_point);
(0..len)
.map(|i| Scalar::from((len - i - 1).pow2() as u64) * r[i])
.sum()
}
}
impl DensePolynomial {
pub fn new(Z: Vec<Scalar>) -> Self {
DensePolynomial {
num_vars: Z.len().log_2(),
len: Z.len(),
Z,
}
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn len(&self) -> usize {
self.len
}
pub fn clone(&self) -> DensePolynomial {
DensePolynomial::new(self.Z[0..self.len].to_vec())
}
pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) {
assert!(idx < self.len());
(
DensePolynomial::new(self.Z[..idx].to_vec()),
DensePolynomial::new(self.Z[idx..2 * idx].to_vec()),
)
}
#[cfg(feature = "multicore")]
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
let L_size = blinds.len();
let R_size = self.Z.len() / L_size;
assert_eq!(L_size * R_size, self.Z.len());
let C = (0..L_size)
.into_par_iter()
.map(|i| {
self.Z[R_size * i..R_size * (i + 1)]
.commit(&blinds[i], gens)
.compress()
})
.collect();
PolyCommitment { C }
}
#[cfg(not(feature = "multicore"))]
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
let L_size = blinds.len();
let R_size = self.Z.len() / L_size;
assert_eq!(L_size * R_size, self.Z.len());
let C = (0..L_size)
.map(|i| {
self.Z[R_size * i..R_size * (i + 1)]
.commit(&blinds[i], gens)
.compress()
})
.collect();
PolyCommitment { C }
}
pub fn commit(
&self,
gens: &PolyCommitmentGens,
random_tape: Option<&mut RandomTape>,
) -> (PolyCommitment, PolyCommitmentBlinds) {
let n = self.Z.len();
let ell = self.get_num_vars();
assert_eq!(n, ell.pow2());
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(ell);
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
assert_eq!(L_size * R_size, n);
let blinds = if let Some(t) = random_tape {
PolyCommitmentBlinds {
blinds: t.random_vector(b"poly_blinds", L_size),
}
} else {
PolyCommitmentBlinds {
blinds: vec![Scalar::zero(); L_size],
}
};
(self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds)
}
pub fn bound(&self, L: &[Scalar]) -> Vec<Scalar> {
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(self.get_num_vars());
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
(0..R_size)
.map(|i| (0..L_size).map(|j| L[j] * self.Z[j * R_size + i]).sum())
.collect()
}
pub fn bound_poly_var_top(&mut self, r: &Scalar) {
let n = self.len() / 2;
for i in 0..n {
self.Z[i] = self.Z[i] + r * (self.Z[i + n] - self.Z[i]);
}
self.num_vars -= 1;
self.len = n;
}
pub fn bound_poly_var_bot(&mut self, r: &Scalar) {
let n = self.len() / 2;
for i in 0..n {
self.Z[i] = self.Z[2 * i] + r * (self.Z[2 * i + 1] - self.Z[2 * i]);
}
self.num_vars -= 1;
self.len = n;
}
// returns Z(r) in O(n) time
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
// r must have a value for each variable
assert_eq!(r.len(), self.get_num_vars());
let chis = EqPolynomial::new(r.to_vec()).evals();
assert_eq!(chis.len(), self.Z.len());
DotProductProofLog::compute_dotproduct(&self.Z, &chis)
}
fn vec(&self) -> &Vec<Scalar> {
&self.Z
}
pub fn extend(&mut self, other: &DensePolynomial) {
// TODO: allow extension even when some vars are bound
assert_eq!(self.Z.len(), self.len);
let other_vec = other.vec();
assert_eq!(other_vec.len(), self.len);
self.Z.extend(other_vec);
self.num_vars += 1;
self.len *= 2;
assert_eq!(self.Z.len(), self.len);
}
pub fn merge<'a, I>(polys: I) -> DensePolynomial
where
I: IntoIterator<Item = &'a DensePolynomial>,
{
let mut Z: Vec<Scalar> = Vec::new();
for poly in polys.into_iter() {
Z.extend(poly.vec());
}
// pad the polynomial with zero polynomial at the end
Z.resize(Z.len().next_power_of_two(), Scalar::zero());
DensePolynomial::new(Z)
}
pub fn from_usize(Z: &[usize]) -> Self {
DensePolynomial::new(
(0..Z.len())
.map(|i| Scalar::from(Z[i] as u64))
.collect::<Vec<Scalar>>(),
)
}
}
impl Index<usize> for DensePolynomial {
type Output = Scalar;
#[inline(always)]
fn index(&self, _index: usize) -> &Scalar {
&(self.Z[_index])
}
}
impl AppendToTranscript for PolyCommitment {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"poly_commitment_begin");
for i in 0..self.C.len() {
transcript.append_point(b"poly_commitment_share", &self.C[i]);
}
transcript.append_message(label, b"poly_commitment_end");
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyEvalProof {
proof: DotProductProofLog,
}
impl PolyEvalProof {
fn protocol_name() -> &'static [u8] {
b"polynomial evaluation proof"
}
pub fn prove(
poly: &DensePolynomial,
blinds_opt: Option<&PolyCommitmentBlinds>,
r: &[Scalar], // point at which the polynomial is evaluated
Zr: &Scalar, // evaluation of \widetilde{Z}(r)
blind_Zr_opt: Option<&Scalar>, // specifies a blind for Zr
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (PolyEvalProof, CompressedGroup) {
transcript.append_protocol_name(PolyEvalProof::protocol_name());
// assert vectors are of the right size
assert_eq!(poly.get_num_vars(), r.len());
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(r.len());
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
let default_blinds = PolyCommitmentBlinds {
blinds: vec![Scalar::zero(); L_size],
};
let blinds = blinds_opt.map_or(&default_blinds, |p| p);
assert_eq!(blinds.blinds.len(), L_size);
let zero = Scalar::zero();
let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p);
// compute the L and R vectors
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
assert_eq!(L.len(), L_size);
assert_eq!(R.len(), R_size);
// compute the vector underneath L*Z and the L*blinds
// compute vector-matrix product between L and Z viewed as a matrix
let LZ = poly.bound(&L);
let LZ_blind: Scalar = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum();
// a dot product proof of size R_size
let (proof, _C_LR, C_Zr_prime) = DotProductProofLog::prove(
&gens.gens,
transcript,
random_tape,
&LZ,
&LZ_blind,
&R,
Zr,
blind_Zr,
);
(PolyEvalProof { proof }, C_Zr_prime)
}
pub fn verify(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &[Scalar], // point at which the polynomial is evaluated
C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r)
comm: &PolyCommitment,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(PolyEvalProof::protocol_name());
// compute L and R
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
// compute a weighted sum of commitments and L
let C_decompressed = comm.C.iter().map(|pt| pt.decompress().unwrap());
let C_LZ = GroupElement::vartime_multiscalar_mul(L, C_decompressed.collect()).compress();
self
.proof
.verify(R.len(), &gens.gens, transcript, &R, &C_LZ, C_Zr)
}
pub fn verify_plain(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &[Scalar], // point at which the polynomial is evaluated
Zr: &Scalar, // evaluation \widetilde{Z}(r)
comm: &PolyCommitment,
) -> Result<(), ProofVerifyError> {
// compute a commitment to Zr with a blind of zero
let C_Zr = Zr.commit(&Scalar::zero(), &gens.gens.gens_1).compress();
self.verify(gens, transcript, r, &C_Zr, comm)
}
}
#[cfg(test)]
mod tests {
use super::super::scalar::ScalarFromPrimitives;
use super::*;
use rand_core::OsRng;
fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar {
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
let ell = r.len();
// ensure ell is even
assert!(ell % 2 == 0);
// compute n = 2^\ell
let n = ell.pow2();
// compute m = sqrt(n) = 2^{\ell/2}
let m = n.square_root();
// compute vector-matrix product between L and Z viewed as a matrix
let LZ = (0..m)
.map(|i| (0..m).map(|j| L[j] * Z[j * m + i]).sum())
.collect::<Vec<Scalar>>();
// compute dot product between LZ and R
DotProductProofLog::compute_dotproduct(&LZ, &R)
}
#[test]
fn check_polynomial_evaluation() {
// Z = [1, 2, 1, 4]
let Z = vec![
Scalar::one(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
];
// r = [4,3]
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let eval_with_LR = evaluate_with_LR(&Z, &r);
let poly = DensePolynomial::new(Z);
let eval = poly.evaluate(&r);
assert_eq!(eval, (28_usize).to_scalar());
assert_eq!(eval_with_LR, eval);
}
pub fn compute_factored_chis_at_r(r: &[Scalar]) -> (Vec<Scalar>, Vec<Scalar>) {
let mut L: Vec<Scalar> = Vec::new();
let mut R: Vec<Scalar> = Vec::new();
let ell = r.len();
assert!(ell % 2 == 0); // ensure ell is even
let n = ell.pow2();
let m = n.square_root();
// compute row vector L
for i in 0..m {
let mut chi_i = Scalar::one();
for j in 0..ell / 2 {
let bit_j = ((m * i) & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
L.push(chi_i);
}
// compute column vector R
for i in 0..m {
let mut chi_i = Scalar::one();
for j in ell / 2..ell {
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
R.push(chi_i);
}
(L, R)
}
pub fn compute_chis_at_r(r: &[Scalar]) -> Vec<Scalar> {
let ell = r.len();
let n = ell.pow2();
let mut chis: Vec<Scalar> = Vec::new();
for i in 0..n {
let mut chi_i = Scalar::one();
for j in 0..r.len() {
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
chis.push(chi_i);
}
chis
}
pub fn compute_outerproduct(L: Vec<Scalar>, R: Vec<Scalar>) -> Vec<Scalar> {
assert_eq!(L.len(), R.len());
(0..L.len())
.map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::<Vec<Scalar>>())
.collect::<Vec<Vec<Scalar>>>()
.into_iter()
.flatten()
.collect::<Vec<Scalar>>()
}
#[test]
fn check_memoized_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let chis = tests::compute_chis_at_r(&r);
let chis_m = EqPolynomial::new(r).evals();
assert_eq!(chis, chis_m);
}
#[test]
fn check_factored_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let chis = EqPolynomial::new(r.clone()).evals();
let (L, R) = EqPolynomial::new(r).compute_factored_evals();
let O = compute_outerproduct(L, R);
assert_eq!(chis, O);
}
#[test]
fn check_memoized_factored_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let (L, R) = tests::compute_factored_chis_at_r(&r);
let eq = EqPolynomial::new(r);
let (L2, R2) = eq.compute_factored_evals();
assert_eq!(L, L2);
assert_eq!(R, R2);
}
#[test]
fn check_polynomial_commit() {
let Z = vec![
(1_usize).to_scalar(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
];
let poly = DensePolynomial::new(Z);
// r = [4,3]
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let eval = poly.evaluate(&r);
assert_eq!(eval, (28_usize).to_scalar());
let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two");
let (poly_commitment, blinds) = poly.commit(&gens, None);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, C_Zr) = PolyEvalProof::prove(
&poly,
Some(&blinds),
&r,
&eval,
None,
&gens,
&mut prover_transcript,
&mut random_tape,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens, &mut verifier_transcript, &r, &C_Zr, &poly_commitment)
.is_ok());
}
}
================================================
FILE: packages/Spartan-secq/src/errors.rs
================================================
use core::fmt::Debug;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ProofVerifyError {
#[error("Proof verification failed")]
InternalError,
#[error("Compressed group element failed to decompress: {0:?}")]
DecompressionError([u8; 32]),
}
impl Default for ProofVerifyError {
fn default() -> Self {
ProofVerifyError::InternalError
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum R1CSError {
/// returned if the number of constraints is not a power of 2
NonPowerOfTwoCons,
/// returned if the number of variables is not a power of 2
NonPowerOfTwoVars,
/// returned if a wrong number of inputs in an assignment are supplied
InvalidNumberOfInputs,
/// returned if a wrong number of variables in an assignment are supplied
InvalidNumberOfVars,
/// returned if a [u8;32] does not parse into a valid Scalar in the field of secq256k1
InvalidScalar,
/// returned if the supplied row or col in (row,col,val) tuple is out of range
InvalidIndex,
}
================================================
FILE: packages/Spartan-secq/src/group.rs
================================================
use secq256k1::{AffinePoint, ProjectivePoint};
use super::errors::ProofVerifyError;
use super::scalar::{Scalar, ScalarBytes, ScalarBytesFromScalar};
use core::ops::{Mul, MulAssign};
use multiexp::multiexp;
pub type GroupElement = secq256k1::AffinePoint;
pub type CompressedGroup = secq256k1::EncodedPoint;
pub trait CompressedGroupExt {
type Group;
fn unpack(&self) -> Result<Self::Group, ProofVerifyError>;
}
impl CompressedGroupExt for CompressedGroup {
type Group = secq256k1::AffinePoint;
fn unpack(&self) -> Result<Self::Group, ProofVerifyError> {
let result = AffinePoint::decompress(*self);
if result.is_some().into() {
return Ok(result.unwrap());
} else {
Err(ProofVerifyError::DecompressionError(
(*self.to_bytes()).try_into().unwrap(),
))
}
}
}
pub trait DecompressEncodedPoint {
fn decompress(&self) -> Option<GroupElement>;
}
impl DecompressEncodedPoint for CompressedGroup {
fn decompress(&self) -> Option<GroupElement> {
Some(self.unpack().unwrap())
}
}
impl<'b> MulAssign<&'b Scalar> for GroupElement {
fn mul_assign(&mut self, scalar: &'b Scalar) {
let result = (self as &GroupElement) * Scalar::decompress_scalar(scalar);
*self = result;
}
}
impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement {
type Output = GroupElement;
fn mul(self, scalar: &'b Scalar) -> GroupElement {
*self * Scalar::decompress_scalar(scalar)
}
}
impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar {
type Output = GroupElement;
fn mul(self, point: &'b GroupElement) -> GroupElement {
(*point * Scalar::decompress_scalar(self)).into()
}
}
macro_rules! define_mul_variants {
(LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => {
impl<'b> Mul<&'b $rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: &'b $rhs) -> $out {
&self * rhs
}
}
impl<'a> Mul<$rhs> for &'a $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
self * &rhs
}
}
impl Mul<$rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
&self * &rhs
}
}
};
}
macro_rules! define_mul_assign_variants {
(LHS = $lhs:ty, RHS = $rhs:ty) => {
impl MulAssign<$rhs> for $lhs {
fn mul_assign(&mut self, rhs: $rhs) {
*self *= &rhs;
}
}
};
}
define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar);
define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement);
define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement);
pub trait VartimeMultiscalarMul {
type Scalar;
fn vartime_multiscalar_mul(scalars: Vec<Scalar>, points: Vec<GroupElement>) -> Self;
}
impl VartimeMultiscalarMul for GroupElement {
type Scalar = super::scalar::Scalar;
// TODO Borrow the arguments so we don't have to clone them, as it was in the original implementation
fn vartime_multiscalar_mul(scalars: Vec<Scalar>, points: Vec<GroupElement>) -> Self {
let points: Vec<ProjectivePoint> = points.iter().map(|p| ProjectivePoint::from(p.0)).collect();
let pairs: Vec<(ScalarBytes, ProjectivePoint)> = scalars
.into_iter()
.enumerate()
.map(|(i, s)| (Scalar::decompress_scalar(&s), points[i]))
.collect();
let result = multiexp::<ProjectivePoint>(pairs.as_slice());
AffinePoint(result.to_affine())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn msm() {
let scalars = vec![Scalar::from(1), Scalar::from(2), Scalar::from(3)];
let points = vec![
GroupElement::generator(),
GroupElement::generator(),
GroupElement::generator(),
];
let result = GroupElement::vartime_multiscalar_mul(scalars, points);
assert_eq!(result, GroupElement::generator() * Scalar::from(6));
}
}
================================================
FILE: packages/Spartan-secq/src/lib.rs
================================================
#![allow(non_snake_case)]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
#![allow(clippy::assertions_on_result_states)]
extern crate byteorder;
extern crate core;
extern crate digest;
extern crate merlin;
extern crate rand;
extern crate sha3;
#[cfg(feature = "multicore")]
extern crate rayon;
mod commitments;
mod dense_mlpoly;
mod errors;
mod group;
mod math;
mod nizk;
mod product_tree;
mod r1csinstance;
mod r1csproof;
mod random;
mod scalar;
mod sparse_mlpoly;
mod sumcheck;
mod timer;
mod transcript;
mod unipoly;
use core::cmp::max;
use errors::{ProofVerifyError, R1CSError};
use merlin::Transcript;
use r1csinstance::{
R1CSCommitment, R1CSCommitmentGens, R1CSDecommitment, R1CSEvalProof, R1CSInstance,
};
use r1csproof::{R1CSGens, R1CSProof};
use random::RandomTape;
use scalar::Scalar;
use serde::{Deserialize, Serialize};
use timer::Timer;
use transcript::{AppendToTranscript, ProofTranscript};
/// `ComputationCommitment` holds a public preprocessed NP statement (e.g., R1CS)
pub struct ComputationCommitment {
comm: R1CSCommitment,
}
/// `ComputationDecommitment` holds information to decommit `ComputationCommitment`
pub struct ComputationDecommitment {
decomm: R1CSDecommitment,
}
/// `Assignment` holds an assignment of values to either the inputs or variables in an `Instance`
#[derive(Serialize, Deserialize, Clone)]
pub struct Assignment {
assignment: Vec<Scalar>,
}
impl Assignment {
/// Constructs a new `Assignment` from a vector
pub fn new(assignment: &[[u8; 32]]) -> Result<Assignment, R1CSError> {
let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result<Vec<Scalar>, R1CSError> {
let mut vec_scalar: Vec<Scalar> = Vec::new();
for v in vec {
let val = Scalar::from_bytes(v);
if val.is_some().unwrap_u8() == 1 {
vec_scalar.push(val.unwrap());
} else {
return Err(R1CSError::InvalidScalar);
}
}
Ok(vec_scalar)
};
let assignment_scalar = bytes_to_scalar(assignment);
// check for any parsing errors
if assignment_scalar.is_err() {
return Err(R1CSError::InvalidScalar);
}
Ok(Assignment {
assignment: assignment_scalar.unwrap(),
})
}
/// pads Assignment to the specified length
fn pad(&self, len: usize) -> VarsAssignment {
// check that the new length is higher than current length
assert!(len > self.assignment.len());
let padded_assignment = {
let mut padded_assignment = self.assignment.clone();
padded_assignment.extend(vec![Scalar::zero(); len - self.assignment.len()]);
padded_assignment
};
VarsAssignment {
assignment: padded_assignment,
}
}
}
/// `VarsAssignment` holds an assignment of values to variables in an `Instance`
pub type VarsAssignment = Assignment;
/// `InputsAssignment` holds an assignment of values to variables in an `Instance`
pub type InputsAssignment = Assignment;
/// `Instance` holds the description of R1CS matrices and a hash of the matrices
#[derive(Serialize, Deserialize)]
pub struct Instance {
/// R1CS instance
pub inst: R1CSInstance,
digest: Vec<u8>,
}
impl Instance {
/// Constructs a new `Instance` and an associated satisfying assignment
pub fn new(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &[(usize, usize, [u8; 32])],
B: &[(usize, usize, [u8; 32])],
C: &[(usize, usize, [u8; 32])],
) -> Result<Instance, R1CSError> {
let (num_vars_padded, num_cons_padded) = {
let num_vars_padded = {
let mut num_vars_padded = num_vars;
// ensure that num_inputs + 1 <= num_vars
num_vars_padded = max(num_vars_padded, num_inputs + 1);
// ensure that num_vars_padded a power of two
if num_vars_padded.next_power_of_two() != num_vars_padded {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let num_cons_padded = {
let mut num_cons_padded = num_cons;
// ensure that num_cons_padded is at least 2
if num_cons_padded == 0 || num_cons_padded == 1 {
num_cons_padded = 2;
}
// ensure that num_cons_padded is power of 2
if num_cons.next_power_of_two() != num_cons {
num_cons_padded = num_cons.next_power_of_two();
}
num_cons_padded
};
(num_vars_padded, num_cons_padded)
};
let bytes_to_scalar =
|tups: &[(usize, usize, [u8; 32])]| -> Result<Vec<(usize, usize, Scalar)>, R1CSError> {
let mut mat: Vec<(usize, usize, Scalar)> = Vec::new();
for &(row, col, val_bytes) in tups {
// row must be smaller than num_cons
if row >= num_cons {
return Err(R1CSError::InvalidIndex);
}
// col must be smaller than num_vars + 1 + num_inputs
if col >= num_vars + 1 + num_inputs {
return Err(R1CSError::InvalidIndex);
}
let val = Scalar::from_bytes(&val_bytes);
if val.is_some().unwrap_u8() == 1 {
// if col >= num_vars, it means that it is referencing a 1 or input in the satisfying
// assignment
if col >= num_vars {
mat.push((row, col + num_vars_padded - num_vars, val.unwrap()));
} else {
mat.push((row, col, val.unwrap()));
}
} else {
return Err(R1CSError::InvalidScalar);
}
}
// pad with additional constraints up until num_cons_padded if the original constraints were 0 or 1
// we do not need to pad otherwise because the dummy constraints are implicit in the sum-check protocol
if num_cons == 0 || num_cons == 1 {
for i in tups.len()..num_cons_padded {
mat.push((i, num_vars, Scalar::zero()));
}
}
Ok(mat)
};
let A_scalar = bytes_to_scalar(A);
if A_scalar.is_err() {
return Err(A_scalar.err().unwrap());
}
let B_scalar = bytes_to_scalar(B);
if B_scalar.is_err() {
return Err(B_scalar.err().unwrap());
}
let C_scalar = bytes_to_scalar(C);
if C_scalar.is_err() {
return Err(C_scalar.err().unwrap());
}
let inst = R1CSInstance::new(
num_cons_padded,
num_vars_padded,
num_inputs,
&A_scalar.unwrap(),
&B_scalar.unwrap(),
&C_scalar.unwrap(),
);
let digest = inst.get_digest();
Ok(Instance { inst, digest })
}
/// Checks if a given R1CSInstance is satisfiable with a given variables and inputs assignments
pub fn is_sat(
&self,
vars: &VarsAssignment,
inputs: &InputsAssignment,
) -> Result<bool, R1CSError> {
if vars.assignment.len() > self.inst.get_num_vars() {
return Err(R1CSError::InvalidNumberOfInputs);
}
if inputs.assignment.len() != self.inst.get_num_inputs() {
return Err(R1CSError::InvalidNumberOfInputs);
}
// we might need to pad variables
let padded_vars = {
let num_padded_vars = self.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars.clone()
}
};
Ok(
self
.inst
.is_sat(&padded_vars.assignment, &inputs.assignment),
)
}
/// Constructs a new synthetic R1CS `Instance` and an associated satisfying assignment
pub fn produce_synthetic_r1cs(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
) -> (Instance, VarsAssignment, InputsAssignment) {
let (inst, vars, inputs) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let digest = inst.get_digest();
(
Instance { inst, digest },
VarsAssignment { assignment: vars },
InputsAssignment { assignment: inputs },
)
}
}
/// `SNARKGens` holds public parameters for producing and verifying proofs with the Spartan SNARK
pub struct SNARKGens {
gens_r1cs_sat: R1CSGens,
gens_r1cs_eval: R1CSCommitmentGens,
}
impl SNARKGens {
/// Constructs a new `SNARKGens` given the size of the R1CS statement
/// `num_nz_entries` specifies the maximum number of non-zero entries in any of the three R1CS matrices
pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize, num_nz_entries: usize) -> Self {
let num_vars_padded = {
let mut num_vars_padded = max(num_vars, num_inputs + 1);
if num_vars_padded != num_vars_padded.next_power_of_two() {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded);
let gens_r1cs_eval = R1CSCommitmentGens::new(
b"gens_r1cs_eval",
num_cons,
num_vars_padded,
num_inputs,
num_nz_entries,
);
SNARKGens {
gens_r1cs_sat,
gens_r1cs_eval,
}
}
}
/// `SNARK` holds a proof produced by Spartan SNARK
#[derive(Serialize, Deserialize, Debug)]
pub struct SNARK {
r1cs_sat_proof: R1CSProof,
inst_evals: (Scalar, Scalar, Scalar),
r1cs_eval_proof: R1CSEvalProof,
}
impl SNARK {
fn protocol_name() -> &'static [u8] {
b"Spartan SNARK proof"
}
/// A public computation to create a commitment to an R1CS instance
pub fn encode(
inst: &Instance,
gens: &SNARKGens,
) -> (ComputationCommitment, ComputationDecommitment) {
let timer_encode = Timer::new("SNARK::encode");
let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval);
timer_encode.stop();
(
ComputationCommitment { comm },
ComputationDecommitment { decomm },
)
}
/// A method to produce a SNARK proof of the satisfiability of an R1CS instance
pub fn prove(
inst: &Instance,
comm: &ComputationCommitment,
decomm: &ComputationDecommitment,
vars: VarsAssignment,
inputs: &InputsAssignment,
gens: &SNARKGens,
transcript: &mut Transcript,
) -> Self {
let timer_prove = Timer::new("SNARK::prove");
// we create a Transcript object seeded with a random Scalar
// to aid the prover produce its randomness
let mut random_tape = RandomTape::new(b"proof");
transcript.append_protocol_name(SNARK::protocol_name());
comm.comm.append_to_transcript(b"comm", transcript);
let (r1cs_sat_proof, rx, ry) = {
let (proof, rx, ry) = {
// we might need to pad variables
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
}
};
R1CSProof::prove(
&inst.inst,
padded_vars.assignment,
&inputs.assignment,
&gens.gens_r1cs_sat,
transcript,
&mut random_tape,
)
};
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
(proof, rx, ry)
};
// We send evaluations of A, B, C at r = (rx, ry) as claims
// to enable the verifier complete the first sum-check
let timer_eval = Timer::new("eval_sparse_polys");
let inst_evals = {
let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry);
Ar.append_to_transcript(b"Ar_claim", transcript);
Br.append_to_transcript(b"Br_claim", transcript);
Cr.append_to_transcript(b"Cr_claim", transcript);
(Ar, Br, Cr)
};
timer_eval.stop();
let r1cs_eval_proof = {
let proof = R1CSEvalProof::prove(
&decomm.decomm,
&rx,
&ry,
&inst_evals,
&gens.gens_r1cs_eval,
transcript,
&mut random_tape,
);
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len()));
proof
};
timer_prove.stop();
SNARK {
r1cs_sat_proof,
inst_evals,
r1cs_eval_proof,
}
}
/// A method to verify the SNARK proof of the satisfiability of an R1CS instance
pub fn verify(
&self,
comm: &ComputationCommitment,
input: &InputsAssignment,
transcript: &mut Transcript,
gens: &SNARKGens,
) -> Result<(), ProofVerifyError> {
let timer_verify = Timer::new("SNARK::verify");
transcript.append_protocol_name(SNARK::protocol_name());
// append a commitment to the computation to the transcript
comm.comm.append_to_transcript(b"comm", transcript);
let timer_sat_proof = Timer::new("verify_sat_proof");
assert_eq!(input.assignment.len(), comm.comm.get_num_inputs());
let (rx, ry) = self.r1cs_sat_proof.verify(
comm.comm.get_num_vars(),
comm.comm.get_num_cons(),
&input.assignment,
&self.inst_evals,
transcript,
&gens.gens_r1cs_sat,
)?;
timer_sat_proof.stop();
let timer_eval_proof = Timer::new("verify_eval_proof");
let (Ar, Br, Cr) = &self.inst_evals;
Ar.append_to_transcript(b"Ar_claim", transcript);
Br.append_to_transcript(b"Br_claim", transcript);
Cr.append_to_transcript(b"Cr_claim", transcript);
self.r1cs_eval_proof.verify(
&comm.comm,
&rx,
&ry,
&self.inst_evals,
&gens.gens_r1cs_eval,
transcript,
)?;
timer_eval_proof.stop();
timer_verify.stop();
Ok(())
}
}
/// `NIZKGens` holds public parameters for producing and verifying proofs with the Spartan NIZK
pub struct NIZKGens {
gens_r1cs_sat: R1CSGens,
}
impl NIZKGens {
/// Constructs a new `NIZKGens` given the size of the R1CS statement
pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize) -> Self {
let num_vars_padded = {
let mut num_vars_padded = max(num_vars, num_inputs + 1);
if num_vars_padded != num_vars_padded.next_power_of_two() {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded);
NIZKGens { gens_r1cs_sat }
}
}
/// `NIZK` holds a proof produced by Spartan NIZK
#[derive(Serialize, Deserialize, Debug)]
pub struct NIZK {
r1cs_sat_proof: R1CSProof,
r: (Vec<Scalar>, Vec<Scalar>),
}
impl NIZK {
fn protocol_name() -> &'static [u8] {
b"Spartan NIZK proof"
}
/// A method to produce a NIZK proof of the satisfiability of an R1CS instance
pub fn prove(
inst: &Instance,
vars: VarsAssignment,
input: &InputsAssignment,
gens: &NIZKGens,
transcript: &mut Transcript,
) -> Self {
let timer_prove = Timer::new("NIZK::prove");
// we create a Transcript object seeded with a random Scalar
// to aid the prover produce its randomness
let mut random_tape = RandomTape::new(b"proof");
transcript.append_protocol_name(NIZK::protocol_name());
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
let (r1cs_sat_proof, rx, ry) = {
// we might need to pad variables
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
}
};
let (proof, rx, ry) = R1CSProof::prove(
&inst.inst,
padded_vars.assignment,
&input.assignment,
&gens.gens_r1cs_sat,
transcript,
&mut random_tape,
);
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
(proof, rx, ry)
};
timer_prove.stop();
NIZK {
r1cs_sat_proof,
r: (rx, ry),
}
}
/// A method to verify a NIZK proof of the satisfiability of an R1CS instance
pub fn verify(
&self,
inst: &Instance,
input: &InputsAssignment,
transcript: &mut Transcript,
gens: &NIZKGens,
) -> Result<(), ProofVerifyError> {
let timer_verify = Timer::new("NIZK::verify");
transcript.append_protocol_name(NIZK::protocol_name());
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
// We send evaluations of A, B, C at r = (rx, ry) as claims
// to enable the verifier complete the first sum-check
let timer_eval = Timer::new("eval_sparse_polys");
let (claimed_rx, claimed_ry) = &self.r;
let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry);
timer_eval.stop();
let timer_sat_proof = Timer::new("verify_sat_proof");
assert_eq!(input.assignment.len(), inst.inst.get_num_inputs());
let (rx, ry) = self.r1cs_sat_proof.verify(
inst.inst.get_num_vars(),
inst.inst.get_num_cons(),
&input.assignment,
&inst_evals,
transcript,
&gens.gens_r1cs_sat,
)?;
// verify if claimed rx and ry are correct
assert_eq!(rx, *claimed_rx);
assert_eq!(ry, *claimed_ry);
timer_sat_proof.stop();
timer_verify.stop();
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn check_snark() {
let num_vars = 256;
let num_cons = num_vars;
let num_inputs = 10;
// produce public generators
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// create a commitment to R1CSInstance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof
let mut prover_transcript = Transcript::new(b"example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
// verify the proof
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
.is_ok());
}
#[test]
pub fn check_r1cs_invalid_index() {
let num_cons = 4;
let num_vars = 8;
let num_inputs = 1;
let zero: [u8; 32] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
];
let A = vec![(0, 0, zero)];
let B = vec![(100, 1, zero)];
let C = vec![(1, 1, zero)];
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
assert!(inst.is_err());
assert_eq!(inst.err(), Some(R1CSError::InvalidIndex));
}
#[test]
pub fn check_r1cs_invalid_scalar() {
let num_cons = 4;
let num_vars = 8;
let num_inputs = 1;
let zero: [u8; 32] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
];
let larger_than_mod = [255; 32];
let A = vec![(0, 0, zero)];
let B = vec![(1, 1, larger_than_mod)];
let C = vec![(1, 1, zero)];
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
assert!(inst.is_err());
assert_eq!(inst.err(), Some(R1CSError::InvalidScalar));
}
#[test]
fn test_padded_constraints() {
// parameters of the R1CS instance
let num_cons = 1;
let num_vars = 0;
let num_inputs = 3;
let num_non_zero_entries = 3;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
// Create a^2 + b + 13
A.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a
B.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a
C.push((0, num_vars + 1, Scalar::one().to_bytes())); // 1*z
C.push((0, num_vars, (-Scalar::from(13u64)).to_bytes())); // -13*1
C.push((0, num_vars + 3, (-Scalar::one()).to_bytes())); // -1*b
// Var Assignments (Z_0 = 16 is the only output)
let vars = vec![Scalar::zero().to_bytes(); num_vars];
// create an InputsAssignment (a = 1, b = 2)
let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs];
inputs[0] = Scalar::from(16u64).to_bytes();
inputs[1] = Scalar::from(1u64).to_bytes();
inputs[2] = Scalar::from(2u64).to_bytes();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// Check if instance is satisfiable
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
// SNARK public params
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
// create a commitment to the R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a SNARK
let mut prover_transcript = Transcript::new(b"snark_example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
assignment_vars.clone(),
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the SNARK
let mut verifier_transcript = Transcript::new(b"snark_example");
assert!(proof
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
// NIZK public params
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a NIZK
let mut prover_transcript = Transcript::new(b"nizk_example");
let proof = NIZK::prove(
&inst,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the NIZK
let mut verifier_transcript = Transcript::new(b"nizk_example");
assert!(proof
.verify(&inst, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
}
}
================================================
FILE: packages/Spartan-secq/src/math.rs
================================================
pub trait Math {
fn square_root(self) -> usize;
fn pow2(self) -> usize;
fn get_bits(self, num_bits: usize) -> Vec<bool>;
fn log_2(self) -> usize;
}
impl Math for usize {
#[inline]
fn square_root(self) -> usize {
(self as f64).sqrt() as usize
}
#[inline]
fn pow2(self) -> usize {
let base: usize = 2;
base.pow(self as u32)
}
/// Returns the num_bits from n in a canonical order
fn get_bits(self, num_bits: usize) -> Vec<bool> {
(0..num_bits)
.map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0))
.collect::<Vec<bool>>()
}
fn log_2(self) -> usize {
assert_ne!(self, 0);
if self.is_power_of_two() {
(1usize.leading_zeros() - self.leading_zeros()) as usize
} else {
(0usize.leading_zeros() - self.leading_zeros()) as usize
}
}
}
================================================
FILE: packages/Spartan-secq/src/nizk/bullet.rs
================================================
//! This module is an adaptation of code from the bulletproofs crate.
//! See NOTICE.md for more details
#![allow(non_snake_case)]
#![allow(clippy::type_complexity)]
#![allow(clippy::too_many_arguments)]
use super::super::errors::ProofVerifyError;
use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::super::math::Math;
use super::super::scalar::Scalar;
use super::super::transcript::ProofTranscript;
use crate::group::DecompressEncodedPoint;
use core::iter;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct BulletReductionProof {
L_vec: Vec<CompressedGroup>,
R_vec: Vec<CompressedGroup>,
}
impl BulletReductionProof {
/// Create an inner-product proof.
///
/// The proof is created with respect to the bases \\(G\\).
///
/// The `transcript` is passed in as a parameter so that the
/// challenges depend on the *entire* transcript (including parent
/// protocols).
///
/// The lengths of the vectors must all be the same, and must all be
/// either 0 or a power of 2.
pub fn prove(
transcript: &mut Transcript,
Q: &GroupElement,
G_vec: &[GroupElement],
H: &GroupElement,
a_vec: &[Scalar],
b_vec: &[Scalar],
blind: &Scalar,
blinds_vec: &[(Scalar, Scalar)],
) -> (
BulletReductionProof,
GroupElement,
Scalar,
Scalar,
GroupElement,
Scalar,
) {
// Create slices G, H, a, b backed by their respective
// vectors. This lets us reslice as we compress the lengths
// of the vectors in the main loop below.
let mut G = &mut G_vec.to_owned()[..];
let mut a = &mut a_vec.to_owned()[..];
let mut b = &mut b_vec.to_owned()[..];
// All of the input vectors must have a length that is a power of two.
let mut n = G.len();
assert!(n.is_power_of_two());
let lg_n = n.log_2();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
assert_eq!(blinds_vec.len(), 2 * lg_n);
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
let mut blinds_iter = blinds_vec.iter();
let mut blind_fin = *blind;
while n != 1 {
n /= 2;
let (a_L, a_R) = a.split_at_mut(n);
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let c_L = inner_product(a_L, b_R);
let c_R = inner_product(a_R, b_L);
let (blind_L, blind_R) = blinds_iter.next().unwrap();
let L = GroupElement::vartime_multiscalar_mul(
a_L
.iter()
.chain(iter::once(&c_L))
.chain(iter::once(blind_L))
.map(|s| *s)
.collect(),
G_R
.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.map(|s| *s)
.collect(),
);
let R = GroupElement::vartime_multiscalar_mul(
a_R
.iter()
.chain(iter::once(&c_R))
.chain(iter::once(blind_R))
.map(|s| *s)
.collect(),
G_L
.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.map(|s| *s)
.collect(),
);
transcript.append_point(b"L", &L.compress());
transcript.append_point(b"R", &R.compress());
let u = transcript.challenge_scalar(b"u");
let u_inv = u.invert().unwrap();
for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i];
b_L[i] = b_L[i] * u_inv + u * b_R[i];
G_L[i] =
GroupElement::vartime_multiscalar_mul([u_inv, u].to_vec(), [G_L[i], G_R[i]].to_vec());
}
blind_fin = blind_fin + blind_L * u * u + blind_R * u_inv * u_inv;
L_vec.push(L.compress());
R_vec.push(R.compress());
a = a_L;
b = b_L;
G = G_L;
}
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
[a[0], a[0] * b[0], blind_fin].to_vec(),
[G[0], *Q, *H].to_vec(),
);
(
BulletReductionProof { L_vec, R_vec },
Gamma_hat,
a[0],
b[0],
G[0],
blind_fin,
)
}
/// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication
/// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details.
/// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof.
fn verification_scalars(
&self,
n: usize,
transcript: &mut Transcript,
) -> Result<(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
let lg_n = self.L_vec.len();
if lg_n >= 32 {
// 4 billion multiplications should be enough for anyone
// and this check prevents overflow in 1<<lg_n below.
return Err(ProofVerifyError::InternalError);
}
if n != (1 << lg_n) {
return Err(ProofVerifyError::InternalError);
}
// 1. Recompute x_k,...,x_1 based on the proof transcript
let mut challenges = Vec::with_capacity(lg_n);
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
transcript.append_point(b"L", L);
transcript.append_point(b"R", R);
challenges.push(transcript.challenge_scalar(b"u"));
}
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv = challenges.clone();
let allinv = Scalar::batch_invert(&mut challenges_inv);
// 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n {
challenges[i] = challenges[i].square();
challenges_inv[i] = challenges_inv[i].square();
}
let challenges_sq = challenges;
let challenges_inv_sq = challenges_inv;
// 4. Compute s values inductively.
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
// The challenges are stored in "creation order" as [u_k,...,u_1],
// so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
Ok((challenges_sq, challenges_inv_sq, s))
}
/// This method is for testing that proof generation work,
/// but for efficiency the actual protocols would use `verification_scalars`
/// method to combine inner product verification with other checks
/// in a single multiscalar multiplication.
pub fn verify(
&self,
n: usize,
a: &[Scalar],
transcript: &mut Transcript,
Gamma: &GroupElement,
G: &[GroupElement],
) -> Result<(GroupElement, GroupElement, Scalar), ProofVerifyError> {
let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?;
let Ls = self
.L_vec
.iter()
.map(|p| p.decompress().ok_or(ProofVerifyError::InternalError))
.collect::<Result<Vec<_>, _>>()?;
let Rs = self
.R_vec
.iter()
.map(|p| p.decompress().ok_or(ProofVerifyError::InternalError))
.collect::<Result<Vec<_>, _>>()?;
let G_hat = GroupElement::vartime_multiscalar_mul(s.clone(), G.to_vec());
let a_hat = inner_product(a, &s);
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
u_sq
.iter()
.chain(u_inv_sq.iter())
.chain(iter::once(&Scalar::one()))
.map(|s| *s)
.collect(),
Ls.iter()
.chain(Rs.iter())
.chain(iter::once(Gamma))
.map(|p| *p)
.collect(),
);
Ok((G_hat, Gamma_hat, a_hat))
}
}
/// Computes an inner product of two vectors
/// \\[
/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i.
/// \\]
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert!(
a.len() == b.len(),
"inner_product(a,b): lengths of vectors do not match"
);
let mut out = Scalar::zero();
for i in 0..a.len() {
out += a[i] * b[i];
}
out
}
================================================
FILE: packages/Spartan-secq/src/nizk/mod.rs
================================================
#![allow(clippy::too_many_arguments)]
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, CompressedGroupExt};
use super::math::Math;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use crate::group::DecompressEncodedPoint;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
mod bullet;
use bullet::BulletReductionProof;
#[derive(Serialize, Deserialize, Debug)]
pub struct KnowledgeProof {
alpha: CompressedGroup,
z1: Scalar,
z2: Scalar,
}
impl KnowledgeProof {
fn protocol_name() -> &'static [u8] {
b"knowledge proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x: &Scalar,
r: &Scalar,
) -> (KnowledgeProof, CompressedGroup) {
transcript.append_protocol_name(KnowledgeProof::protocol_name());
// produce two random Scalars
let t1 = random_tape.random_scalar(b"t1");
let t2 = random_tape.random_scalar(b"t2");
let C = x.commit(r, gens_n).compress();
C.append_to_transcript(b"C", transcript);
let alpha = t1.commit(&t2, gens_n).compress();
alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = x * c + t1;
let z2 = r * c + t2;
(KnowledgeProof { alpha, z1, z2 }, C)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
C: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(KnowledgeProof::protocol_name());
C.append_to_transcript(b"C", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let lhs = self.z1.commit(&self.z2, gens_n).compress();
let rhs = (c * C.unpack()? + self.alpha.unpack()?).compress();
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct EqualityProof {
alpha: CompressedGroup,
z: Scalar,
}
impl EqualityProof {
fn protocol_name() -> &'static [u8] {
b"equality proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
v1: &Scalar,
s1: &Scalar,
v2: &Scalar,
s2: &Scalar,
) -> (EqualityProof, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(EqualityProof::protocol_name());
// produce a random Scalar
let r = random_tape.random_scalar(b"r");
let C1 = v1.commit(s1, gens_n).compress();
C1.append_to_transcript(b"C1", transcript);
let C2 = v2.commit(s2, gens_n).compress();
C2.append_to_transcript(b"C2", transcript);
let alpha = (r * gens_n.h).compress();
alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let z = c * (s1 - s2) + r;
(EqualityProof { alpha, z }, C1, C2)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
C1: &CompressedGroup,
C2: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(EqualityProof::protocol_name());
C1.append_to_transcript(b"C1", transcript);
C2.append_to_transcript(b"C2", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let rhs = {
let C = C1.unpack()? - C2.unpack()?;
(c * C + self.alpha.unpack()?).compress()
};
let lhs = (self.z * gens_n.h).compress();
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ProductProof {
alpha: CompressedGroup,
beta: CompressedGroup,
delta: CompressedGroup,
z: [Scalar; 5],
}
impl ProductProof {
fn protocol_name() -> &'static [u8] {
b"product proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x: &Scalar,
rX: &Scalar,
y: &Scalar,
rY: &Scalar,
z: &Scalar,
rZ: &Scalar,
) -> (
ProductProof,
CompressedGroup,
CompressedGroup,
CompressedGroup,
) {
transcript.append_protocol_name(ProductProof::protocol_name());
// produce five random Scalar
let b1 = random_tape.random_scalar(b"b1");
let b2 = random_tape.random_scalar(b"b2");
let b3 = random_tape.random_scalar(b"b3");
let b4 = random_tape.random_scalar(b"b4");
let b5 = random_tape.random_scalar(b"b5");
let X = x.commit(rX, gens_n).compress();
X.append_to_transcript(b"X", transcript);
let Y = y.commit(rY, gens_n).compress();
Y.append_to_transcript(b"Y", transcript);
let Z = z.commit(rZ, gens_n).compress();
Z.append_to_transcript(b"Z", transcript);
let alpha = b1.commit(&b2, gens_n).compress();
alpha.append_to_transcript(b"alpha", transcript);
let beta = b3.commit(&b4, gens_n).compress();
beta.append_to_transcript(b"beta", transcript);
let delta = {
let gens_X = &MultiCommitGens {
n: 1,
G: vec![X.decompress().unwrap()],
h: gens_n.h,
};
b3.commit(&b5, gens_X).compress()
};
delta.append_to_transcript(b"delta", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = b1 + c * x;
let z2 = b2 + c * rX;
let z3 = b3 + c * y;
let z4 = b4 + c * rY;
let z5 = b5 + c * (rZ - rX * y);
let z = [z1, z2, z3, z4, z5];
(
ProductProof {
alpha,
beta,
delta,
z,
},
X,
Y,
Z,
)
}
fn check_equality(
P: &CompressedGroup,
X: &CompressedGroup,
c: &Scalar,
gens_n: &MultiCommitGens,
z1: &Scalar,
z2: &Scalar,
) -> bool {
let lhs = (P.decompress().unwrap() + c * X.decompress().unwrap()).compress();
let rhs = z1.commit(z2, gens_n).compress();
lhs == rhs
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
X: &CompressedGroup,
Y: &CompressedGroup,
Z: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(ProductProof::protocol_name());
X.append_to_transcript(b"X", transcript);
Y.append_to_transcript(b"Y", transcript);
Z.append_to_transcript(b"Z", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
self.beta.append_to_transcript(b"beta", transcript);
self.delta.append_to_transcript(b"delta", transcript);
let z1 = self.z[0];
let z2 = self.z[1];
let z3 = self.z[2];
let z4 = self.z[3];
let z5 = self.z[4];
let c = transcript.challenge_scalar(b"c");
if ProductProof::check_equality(&self.alpha, X, &c, gens_n, &z1, &z2)
&& ProductProof::check_equality(&self.beta, Y, &c, gens_n, &z3, &z4)
&& ProductProof::check_equality(
&self.delta,
Z,
&c,
&MultiCommitGens {
n: 1,
G: vec![X.unpack()?],
h: gens_n.h,
},
&z3,
&z5,
)
{
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProof {
delta: CompressedGroup,
beta: CompressedGroup,
z: Vec<Scalar>,
z_delta: Scalar,
z_beta: Scalar,
}
impl DotProductProof {
fn protocol_name() -> &'static [u8] {
b"dot product proof"
}
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| a[i] * b[i]).sum()
}
pub fn prove(
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x_vec: &[Scalar],
blind_x: &Scalar,
a_vec: &[Scalar],
y: &Scalar,
blind_y: &Scalar,
) -> (DotProductProof, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(DotProductProof::protocol_name());
let n = x_vec.len();
assert_eq!(x_vec.len(), a_vec.len());
assert_eq!(gens_n.n, a_vec.len());
assert_eq!(gens_1.n, 1);
// produce randomness for the proofs
let d_vec = random_tape.random_vector(b"d_vec", n);
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_beta");
let Cx = x_vec.commit(blind_x, gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(blind_y, gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
a_vec.append_to_transcript(b"a", transcript);
let delta = d_vec.commit(&r_delta, gens_n).compress();
delta.append_to_transcript(b"delta", transcript);
let dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec);
let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress();
beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let z = (0..d_vec.len())
.map(|i| c * x_vec[i] + d_vec[i])
.collect::<Vec<Scalar>>();
let z_delta = c * blind_x + r_delta;
let z_beta = c * blind_y + r_beta;
(
DotProductProof {
delta,
beta,
z,
z_delta,
z_beta,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
a: &[Scalar],
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens_n.n, a.len());
assert_eq!(gens_1.n, 1);
transcript.append_protocol_name(DotProductProof::protocol_name());
Cx.append_to_transcript(b"Cx", transcript);
Cy.append_to_transcript(b"Cy", transcript);
a.append_to_transcript(b"a", transcript);
self.delta.append_to_transcript(b"delta", transcript);
self.beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let mut result =
c * Cx.unpack()? + self.delta.unpack()? == self.z.commit(&self.z_delta, gens_n);
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a);
result &= c * Cy.unpack()? + self.beta.unpack()? == dotproduct_z_a.commit(&self.z_beta, gens_1);
if result {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
pub struct DotProductProofGens {
n: usize,
pub gens_n: MultiCommitGens,
pub gens_1: MultiCommitGens,
}
impl DotProductProofGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let (gens_n, gens_1) = MultiCommitGens::new(n + 1, label).split_at(n);
DotProductProofGens { n, gens_n, gens_1 }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProofLog {
bullet_reduction_proof: BulletReductionProof,
delta: CompressedGroup,
beta: CompressedGroup,
z1: Scalar,
z2: Scalar,
}
impl DotProductProofLog {
fn protocol_name() -> &'static [u8] {
b"dot product proof (log)"
}
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| a[i] * b[i]).sum()
}
pub fn prove(
gens: &DotProductProofGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x_vec: &[Scalar],
blind_x: &Scalar,
a_vec: &[Scalar],
y: &Scalar,
blind_y: &Scalar,
) -> (DotProductProofLog, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(DotProductProofLog::protocol_name());
let n = x_vec.len();
assert_eq!(x_vec.len(), a_vec.len());
assert_eq!(gens.n, n);
// produce randomness for generating a proof
let d = random_tape.random_scalar(b"d");
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_delta");
let blinds_vec = {
let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2());
let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2());
(0..v1.len())
.map(|i| (v1[i], v2[i]))
.collect::<Vec<(Scalar, Scalar)>>()
};
let Cx = x_vec.commit(blind_x, &gens.gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(blind_y, &gens.gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
a_vec.append_to_transcript(b"a", transcript);
// sample a random base and scale the generator used for
// the output of the inner product
let r = transcript.challenge_scalar(b"r");
let gens_1_scaled = gens.gens_1.scale(&r);
let blind_Gamma = blind_x + r * blind_y;
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
BulletReductionProof::prove(
transcript,
&gens_1_scaled.G[0],
&gens.gens_n.G,
&gens.gens_n.h,
x_vec,
a_vec,
&blind_Gamma,
&blinds_vec,
);
let y_hat = x_hat * a_hat;
let delta = {
let gens_hat = MultiCommitGens {
n: 1,
G: vec![g_hat],
h: gens.gens_1.h,
};
d.commit(&r_delta, &gens_hat).compress()
};
delta.append_to_transcript(b"delta", transcript);
let beta = d.commit(&r_beta, &gens_1_scaled).compress();
beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = d + c * y_hat;
let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta;
(
DotProductProofLog {
bullet_reduction_proof,
delta,
beta,
z1,
z2,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
n: usize,
gens: &DotProductProofGens,
transcript: &mut Transcript,
a: &[Scalar],
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens.n, n);
assert_eq!(a.len(), n);
transcript.append_protocol_name(DotProductProofLog::protocol_name());
Cx.append_to_transcript(b"Cx", transcript);
Cy.append_to_transcript(b"Cy", transcript);
a.append_to_transcript(b"a", transcript);
// sample a random base and scale the generator used for
// the output of the inner product
let r = transcript.challenge_scalar(b"r");
let gens_1_scaled = gens.gens_1.scale(&r);
let Gamma = Cx.unpack()? + r * Cy.unpack()?;
let (g_hat, Gamma_hat, a_hat) =
self
.bullet_reduction_proof
.verify(n, a, transcript, &Gamma, &gens.gens_n.G)?;
self.delta.append_to_transcript(b"delta", transcript);
self.beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let c_s = &c;
let beta_s = self.beta.unpack()?;
let a_hat_s = &a_hat;
let delta_s = self.delta.unpack()?;
let z1_s = &self.z1;
let z2_s = &self.z2;
let lhs = ((Gamma_hat * c_s + beta_s) * a_hat_s + delta_s).compress();
let rhs = ((g_hat + gens_1_scaled.G[0] * a_hat_s) * z1_s + gens_1_scaled.h * z2_s).compress();
assert_eq!(lhs, rhs);
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand_core::OsRng;
#[test]
fn check_knowledgeproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof");
let x = Scalar::random(&mut csprng);
let r = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, committed_value) =
KnowledgeProof::prove(&gens_1, &mut prover_transcript, &mut random_tape, &x, &r);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &committed_value)
.is_ok());
}
#[test]
fn check_equalityproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-equalityproof");
let v1 = Scalar::random(&mut csprng);
let v2 = v1;
let s1 = Scalar::random(&mut csprng);
let s2 = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, C1, C2) = EqualityProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&v1,
&s1,
&v2,
&s2,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &C1, &C2)
.is_ok());
}
#[test]
fn check_productproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-productproof");
let x = Scalar::random(&mut csprng);
let rX = Scalar::random(&mut csprng);
let y = Scalar::random(&mut csprng);
let rY = Scalar::random(&mut csprng);
let z = x * y;
let rZ = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, X, Y, Z) = ProductProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&x,
&rX,
&y,
&rY,
&z,
&rZ,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &X, &Y, &Z)
.is_ok());
}
#[test]
fn check_dotproductproof() {
let mut csprng: OsRng = OsRng;
let n = 1024;
let gens_1 = MultiCommitGens::new(1, b"test-two");
let gens_1024 = MultiCommitGens::new(n, b"test-1024");
let mut x: Vec<Scalar> = Vec::new();
let mut a: Vec<Scalar> = Vec::new();
for _ in 0..n {
x.push(Scalar::random(&mut csprng));
a.push(Scalar::random(&mut csprng));
}
let y = DotProductProofLog::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, Cx, Cy) = DotProductProof::prove(
&gens_1,
&gens_1024,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &gens_1024, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
#[test]
fn check_dotproductproof_log() {
let mut csprng: OsRng = OsRng;
let n = 1024;
let gens = DotProductProofGens::new(n, b"test-1024");
let x: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let a: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let y = DotProductProof::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, Cx, Cy) = DotProductProofLog::prove(
&gens,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
}
================================================
FILE: packages/Spartan-secq/src/product_tree.rs
================================================
#![allow(dead_code)]
use super::dense_mlpoly::DensePolynomial;
use super::dense_mlpoly::EqPolynomial;
use super::math::Math;
use super::scalar::Scalar;
use super::sumcheck::SumcheckInstanceProof;
use super::transcript::ProofTranscript;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Debug)]
pub struct ProductCircuit {
left_vec: Vec<DensePolynomial>,
right_vec: Vec<DensePolynomial>,
}
impl ProductCircuit {
fn compute_layer(
inp_left: &DensePolynomial,
inp_right: &DensePolynomial,
) -> (DensePolynomial, DensePolynomial) {
let len = inp_left.len() + inp_right.len();
let outp_left = (0..len / 4)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<Scalar>>();
let outp_right = (len / 4..len / 2)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<Scalar>>();
(
DensePolynomial::new(outp_left),
DensePolynomial::new(outp_right),
)
}
pub fn new(poly: &DensePolynomial) -> Self {
let mut left_vec: Vec<DensePolynomial> = Vec::new();
let mut right_vec: Vec<DensePolynomial> = Vec::new();
let num_layers = poly.len().log_2();
let (outp_left, outp_right) = poly.split(poly.len() / 2);
left_vec.push(outp_left);
right_vec.push(outp_right);
for i in 0..num_layers - 1 {
let (outp_left, outp_right) = ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]);
left_vec.push(outp_left);
right_vec.push(outp_right);
}
ProductCircuit {
left_vec,
right_vec,
}
}
pub fn evaluate(&self) -> Scalar {
let len = self.left_vec.len();
assert_eq!(self.left_vec[len - 1].get_num_vars(), 0);
assert_eq!(self.right_vec[len - 1].get_num_vars(), 0);
self.left_vec[len - 1][0] * self.right_vec[len - 1][0]
}
}
pub struct DotProductCircuit {
left: DensePolynomial,
right: DensePolynomial,
weight: DensePolynomial,
}
impl DotProductCircuit {
pub fn new(left: DensePolynomial, right: DensePolynomial, weight: DensePolynomial) -> Self {
assert_eq!(left.len(), right.len());
assert_eq!(left.len(), weight.len());
DotProductCircuit {
left,
right,
weight,
}
}
pub fn evaluate(&self) -> Scalar {
(0..self.left.len())
.map(|i| self.left[i] * self.right[i] * self.weight[i])
.sum()
}
pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) {
let idx = self.left.len() / 2;
assert_eq!(idx * 2, self.left.len());
let (l1, l2) = self.left.split(idx);
let (r1, r2) = self.right.split(idx);
let (w1, w2) = self.weight.split(idx);
(
DotProductCircuit {
left: l1,
right: r1,
weight: w1,
},
DotProductCircuit {
left: l2,
right: r2,
weight: w2,
},
)
}
}
#[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
pub struct LayerProof {
pub proof: SumcheckInstanceProof,
pub claims: Vec<Scalar>,
}
#[allow(dead_code)]
impl LayerProof {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
}
#[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
pub struct LayerProofBatched {
pub proof: SumcheckInstanceProof,
pub claims_prod_left: Vec<Scalar>,
pub claims_prod_right: Vec<Scalar>,
}
#[allow(dead_code)]
impl LayerProofBatched {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProductCircuitEvalProof {
proof: Vec<LayerProof>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProductCircuitEvalProofBatched {
proof: Vec<LayerProofBatched>,
claims_dotp: (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
}
impl ProductCircuitEvalProof {
#![allow(dead_code)]
pub fn prove(
circuit: &mut ProductCircuit,
transcript: &mut Transcript,
) -> (Self, Scalar, Vec<Scalar>) {
let mut proof: Vec<LayerProof> = Vec::new();
let num_layers = circuit.left_vec.len();
let mut claim = circuit.evaluate();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len();
let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C.len(), len / 2);
let num_rounds_prod = poly_C.len().log_2();
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic(
&claim,
num_rounds_prod,
&mut circuit.left_vec[layer_id],
&mut circuit.right_vec[layer_id],
&mut poly_C,
comb_func_prod,
transcript,
);
transcript.append_scalar(b"claim_prod_left", &claims_prod[0]);
transcript.append_scalar(b"claim_prod_right", &claims_prod[1]);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]);
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof.push(LayerProof {
proof: proof_prod,
claims: claims_prod[0..claims_prod.len() - 1].to_vec(),
});
}
(ProductCircuitEvalProof { proof }, claim, rand)
}
pub fn verify(
&self,
eval: Scalar,
len: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
let num_layers = len.log_2();
let mut claim = eval;
let mut rand: Vec<Scalar> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
for (num_rounds, i) in (0..num_layers).enumerate() {
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod = &self.proof[i].claims;
transcript.append_scalar(b"claim_prod_left", &claims_prod[0]);
transcript.append_scalar(b"claim_prod_right", &claims_prod[1]);
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
}
(claim, rand)
}
}
impl ProductCircuitEvalProofBatched {
pub fn prove(
prod_circuit_vec: &mut Vec<&mut ProductCircuit>,
dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>,
transcript: &mut Transcript,
) -> (Self, Vec<Scalar>) {
assert!(!prod_circuit_vec.is_empty());
let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new());
let mut proof_layers: Vec<LayerProofBatched> = Vec::new();
let num_layers = prod_circuit_vec[0].left_vec.len();
let mut claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| prod_circuit_vec[i].evaluate())
.collect::<Vec<Scalar>>();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
// prepare paralell instance that share poly_C first
let len = prod_circuit_vec[0].left_vec[layer_id].len()
+ prod_circuit_vec[0].right_vec[layer_id].len();
let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C_par.len(), len / 2);
let num_rounds_prod = poly_C_par.len().log_2();
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new();
for prod_circuit in prod_circuit_vec.iter_mut() {
poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]);
poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id])
}
let poly_vec_par = (
&mut poly_A_batched_par,
&mut poly_B_batched_par,
&mut poly_C_par,
);
// prepare sequential instances that don't share poly_C
let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
// add additional claims
for item in dotp_circuit_vec.iter() {
claims_to_verify.push(item.evaluate());
assert_eq!(len / 2, item.left.len());
assert_eq!(len / 2, item.right.len());
assert_eq!(len / 2, item.weight.len());
}
for dotp_circuit in dotp_circuit_vec.iter_mut() {
poly_A_batched_seq.push(&mut dotp_circuit.left);
poly_B_batched_seq.push(&mut dotp_circuit.right);
poly_C_batched_seq.push(&mut dotp_circuit.weight);
}
}
let poly_vec_seq = (
&mut poly_A_batched_seq,
&mut poly_B_batched_seq,
&mut poly_C_batched_seq,
);
// produce a fresh set of coeffs and a joint claim
let coeff_vec =
transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len());
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (proof, rand_prod, claims_prod, claims_dotp) = SumcheckInstanceProof::prove_cubic_batched(
&claim,
num_rounds_prod,
poly_vec_par,
poly_vec_seq,
&coeff_vec,
comb_func_prod,
transcript,
);
let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod;
for i in 0..prod_circuit_vec.len() {
transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]);
transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]);
}
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp;
for i in 0..dotp_circuit_vec.len() {
transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]);
transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]);
transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]);
}
claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight);
}
// produce a random challenge to condense two claims into a single claim
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect::<Vec<Scalar>>();
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof_layers.push(LayerProofBatched {
proof,
claims_prod_left,
claims_prod_right,
});
}
(
ProductCircuitEvalProofBatched {
proof: proof_layers,
claims_dotp: claims_dotp_final,
},
rand,
)
}
pub fn verify(
&self,
claims_prod_vec: &[Scalar],
claims_dotp_vec: &[Scalar],
len: usize,
transcript: &mut Transcript,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
let num_layers = len.log_2();
let mut rand: Vec<Scalar> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
let mut claims_to_verify = claims_prod_vec.to_owned();
let mut claims_to_verify_dotp: Vec<Scalar> = Vec::new();
for (num_rounds, i) in (0..num_layers).enumerate() {
if i == num_layers - 1 {
claims_to_verify.extend(claims_dotp_vec);
}
// produce random coefficients, one for each instance
let coeff_vec =
transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len());
// produce a joint claim
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod_left = &self.proof[i].claims_prod_left;
let claims_prod_right = &self.proof[i].claims_prod_right;
assert_eq!(claims_prod_left.len(), claims_prod_vec.len());
assert_eq!(claims_prod_right.len(), claims_prod_vec.len());
for i in 0..claims_prod_vec.len() {
transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]);
transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]);
}
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
let mut claim_expected: Scalar = (0..claims_prod_vec.len())
.map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq))
.sum();
// add claims from the dotp instances
if i == num_layers - 1 {
let num_prod_instances = claims_prod_vec.len();
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_left.len() {
transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]);
transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]);
transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]);
claim_expected += coeff_vec[i + num_prod_instances]
* claims_dotp_left[i]
* claims_dotp_right[i]
* claims_dotp_weight[i];
}
}
assert_eq!(claim_expected, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claims_to_verify = (0..claims_prod_left.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect::<Vec<Scalar>>();
// add claims to verify for dotp circuit
if i == num_layers - 1 {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_vec.len() / 2 {
// combine left claims
let claim_left = claims_dotp_left[2 * i]
+ r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]);
let claim_right = claims_dotp_right[2 * i]
+ r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]);
let claim_weight = claims_dotp_weight[2 * i]
+ r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]);
claims_to_verify_dotp.push(claim_left);
claims_to_verify_dotp.push(claim_right);
claims_to_verify_dotp.push(claim_weight);
}
}
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
}
(claims_to_verify, claims_to_verify_dotp, rand)
}
}
================================================
FILE: packages/Spartan-secq/src/r1csinstance.rs
================================================
use crate::transcript::AppendToTranscript;
use super::dense_mlpoly::DensePolynomial;
use super::errors::ProofVerifyError;
use super::math::Math;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::sparse_mlpoly::{
MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment,
SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial,
};
use super::timer::Timer;
use flate2::{write::ZlibEncoder, Compression};
use merlin::Transcript;
use rand_core::OsRng;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSInstance {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: SparseMatPolynomial,
B: SparseMatPolynomial,
C: SparseMatPolynomial,
}
pub struct R1CSCommitmentGens {
gens: SparseMatPolyCommitmentGens,
}
impl R1CSCommitmentGens {
pub fn new(
label: &'static [u8],
num_cons: usize,
num_vars: usize,
num_inputs: usize,
num_nz_entries: usize,
) -> R1CSCommitmentGens {
assert!(num_inputs < num_vars);
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let gens =
SparseMatPolyCommitmentGens::new(label, num_poly_vars_x, num_poly_vars_y, num_nz_entries, 3);
R1CSCommitmentGens { gens }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSCommitment {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
comm: SparseMatPolyCommitment,
}
impl AppendToTranscript for R1CSCommitment {
fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) {
transcript.append_u64(b"num_cons", self.num_cons as u64);
transcript.append_u64(b"num_vars", self.num_vars as u64);
transcript.append_u64(b"num_inputs", self.num_inputs as u64);
self.comm.append_to_transcript(b"comm", transcript);
}
}
pub struct R1CSDecommitment {
dense: MultiSparseMatPolynomialAsDense,
}
impl R1CSCommitment {
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
}
impl R1CSInstance {
pub fn new(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &[(usize, usize, Scalar)],
B: &[(usize, usize, Scalar)],
C: &[(usize, usize, Scalar)],
) -> R1CSInstance {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
// check that num_cons is a power of 2
assert_eq!(num_cons.next_power_of_two(), num_cons);
// check that num_vars is a power of 2
assert_eq!(num_vars.next_power_of_two(), num_vars);
// check that number_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// no errors, so create polynomials
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let mat_A = (0..A.len())
.map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2))
.collect::<Vec<SparseMatEntry>>();
let mat_B = (0..B.len())
.map(|i| SparseMatEntry::new(B[i].0, B[i].1, B[i].2))
.collect::<Vec<SparseMatEntry>>();
let mat_C = (0..C.len())
.map(|i| SparseMatEntry::new(C[i].0, C[i].1, C[i].2))
.collect::<Vec<SparseMatEntry>>();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C);
R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
}
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
pub fn get_digest(&self) -> Vec<u8> {
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &self).unwrap();
encoder.finish().unwrap()
}
pub fn produce_synthetic_r1cs(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
) -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
let mut csprng: OsRng = OsRng;
// assert num_cons and num_vars are power of 2
assert_eq!((num_cons.log_2()).pow2(), num_cons);
assert_eq!((num_vars.log_2()).pow2(), num_vars);
// num_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// z is organized as [vars,1,io]
let size_z = num_vars + num_inputs + 1;
// produce a random satisfying assignment
let Z = {
let mut Z: Vec<Scalar> = (0..size_z)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
Z[num_vars] = Scalar::one(); // set the constant term to 1
Z
};
// three sparse matrices
let mut A: Vec<SparseMatEntry> = Vec::new();
let mut B: Vec<SparseMatEntry> = Vec::new();
let mut C: Vec<SparseMatEntry> = Vec::new();
let one = Scalar::one();
for i in 0..num_cons {
let A_idx = i % size_z;
let B_idx = (i + 2) % size_z;
A.push(SparseMatEntry::new(i, A_idx, one));
B.push(SparseMatEntry::new(i, B_idx, one));
let AB_val = Z[A_idx] * Z[B_idx];
let C_idx = (i + 3) % size_z;
let C_val = Z[C_idx];
if C_val == Scalar::zero() {
C.push(SparseMatEntry::new(i, num_vars, AB_val));
} else {
C.push(SparseMatEntry::new(
i,
C_idx,
AB_val * C_val.invert().unwrap(),
));
}
}
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C);
let inst = R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
};
assert!(inst.is_sat(&Z[..num_vars], &Z[num_vars + 1..]));
(inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec())
}
pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool {
assert_eq!(vars.len(), self.num_vars);
assert_eq!(input.len(), self.num_inputs);
let z = {
let mut z = vars.to_vec();
z.extend(&vec![Scalar::one()]);
z.extend(input);
z
};
// verify if Az * Bz - Cz = [0...]
let Az = self
.A
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Bz = self
.B
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Cz = self
.C
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
assert_eq!(Az.len(), self.num_cons);
assert_eq!(Bz.len(), self.num_cons);
assert_eq!(Cz.len(), self.num_cons);
let res: usize = (0..self.num_cons)
.map(|i| usize::from(Az[i] * Bz[i] != Cz[i]))
.sum();
res == 0
}
pub fn multiply_vec(
&self,
num_rows: usize,
num_cols: usize,
z: &[Scalar],
) -> (DensePolynomial, DensePolynomial, DensePolynomial) {
assert_eq!(num_rows, self.num_cons);
assert_eq!(z.len(), num_cols);
assert!(num_cols > self.num_vars);
(
DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)),
)
}
pub fn compute_eval_table_sparse(
&self,
num_rows: usize,
num_cols: usize,
evals: &[Scalar],
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
assert_eq!(num_rows, self.num_cons);
assert!(num_cols > self.num_vars);
let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols);
(evals_A, evals_B, evals_C)
}
pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> (Scalar, Scalar, Scalar) {
let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry);
(evals[0], evals[1], evals[2])
}
pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) {
let (comm, dense) = SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens);
let r1cs_comm = R1CSCommitment {
num_cons: self.num_cons,
num_vars: self.num_vars,
num_inputs: self.num_inputs,
comm,
};
let r1cs_decomm = R1CSDecommitment { dense };
(r1cs_comm, r1cs_decomm)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSEvalProof {
proof: SparseMatPolyEvalProof,
}
impl R1CSEvalProof {
pub fn prove(
decomm: &R1CSDecommitment,
rx: &[Scalar], // point at which the polynomial is evaluated
ry: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
gens: &R1CSCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> R1CSEvalProof {
let timer = Timer::new("R1CSEvalProof::prove");
let proof = SparseMatPolyEvalProof::prove(
&decomm.dense,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
random_tape,
);
timer.stop();
R1CSEvalProof { proof }
}
pub fn verify(
&self,
comm: &R1CSCommitment,
rx: &[Scalar], // point at which the R1CS matrix polynomials are evaluated
ry: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
gens: &R1CSCommitmentGens,
transcript: &mut Transcript,
) -> Result<(), ProofVerifyError> {
self.proof.verify(
&comm.comm,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
)
}
}
================================================
FILE: packages/Spartan-secq/src/r1csproof.rs
================================================
#![allow(clippy::too_many_arguments)]
use super::commitments::{Commitments, MultiCommitGens};
use super::dense_mlpoly::{
DensePolynomial, EqPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof,
};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::nizk::{EqualityProof, KnowledgeProof, ProductProof};
use super::r1csinstance::R1CSInstance;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial};
use super::sumcheck::ZKSumcheckInstanceProof;
use super::timer::Timer;
use super::transcript::{AppendToTranscript, ProofTranscript};
use crate::group::DecompressEncodedPoint;
use core::iter;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct R1CSProof {
comm_vars: PolyCommitment,
sc_proof_phase1: ZKSumcheckInstanceProof,
claims_phase2: (
CompressedGroup,
CompressedGroup,
CompressedGroup,
CompressedGroup,
),
pok_claims_phase2: (KnowledgeProof, ProductProof),
proof_eq_sc_phase1: EqualityProof,
sc_proof_phase2: ZKSumcheckInstanceProof,
comm_vars_at_ry: CompressedGroup,
proof_eval_vars_at_ry: PolyEvalProof,
proof_eq_sc_phase2: EqualityProof,
}
pub struct R1CSSumcheckGens {
gens_1: MultiCommitGens,
gens_3: MultiCommitGens,
gens_4: MultiCommitGens,
}
// TODO: fix passing gens_1_ref
impl R1CSSumcheckGens {
pub fn new(label: &'static [u8], gens_1_ref: &MultiCommitGens) -> Self {
let gens_1 = gens_1_ref.clone();
let gens_3 = MultiCommitGens::new(3, label);
let gens_4 = MultiCommitGens::new(4, label);
R1CSSumcheckGens {
gens_1,
gens_3,
gens_4,
}
}
}
pub struct R1CSGens {
gens_sc: R1CSSumcheckGens,
gens_pc: PolyCommitmentGens,
}
impl R1CSGens {
pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self {
let num_poly_vars = num_vars.log_2();
let gens_pc = PolyCommitmentGens::new(num_poly_vars, label);
let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1);
R1CSGens { gens_sc, gens_pc }
}
}
impl R1CSProof {
fn prove_phase_one(
num_rounds: usize,
evals_tau: &mut DensePolynomial,
evals_Az: &mut DensePolynomial,
evals_Bz: &mut DensePolynomial,
evals_Cz: &mut DensePolynomial,
gens: &R1CSSumcheckGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
let comb_func = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar,
poly_D_comp: &Scalar|
-> Scalar { poly_A_comp * (poly_B_comp * poly_C_comp - poly_D_comp) };
let (sc_proof_phase_one, r, claims, blind_claim_postsc) =
ZKSumcheckInstanceProof::prove_cubic_with_additive_term(
&Scalar::zero(), // claim is zero
&Scalar::zero(), // blind for claim is also zero
num_rounds,
evals_tau,
evals_Az,
evals_Bz,
evals_Cz,
comb_func,
&gens.gens_1,
&gens.gens_4,
transcript,
random_tape,
);
(sc_proof_phase_one, r, claims, blind_claim_postsc)
}
fn prove_phase_two(
num_rounds: usize,
claim: &Scalar,
blind_claim: &Scalar,
evals_z: &mut DensePolynomial,
evals_ABC: &mut DensePolynomial,
gens: &R1CSSumcheckGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
let comb_func =
|poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { poly_A_comp * poly_B_comp };
let (sc_proof_phase_two, r, claims, blind_claim_postsc) = ZKSumcheckInstanceProof::prove_quad(
claim,
blind_claim,
num_rounds,
evals_z,
evals_ABC,
comb_func,
&gens.gens_1,
&gens.gens_3,
transcript,
random_tape,
);
(sc_proof_phase_two, r, claims, blind_claim_postsc)
}
fn protocol_name() -> &'static [u8] {
b"R1CS proof"
}
pub fn prove(
inst: &R1CSInstance,
vars: Vec<Scalar>,
input: &[Scalar],
gens: &R1CSGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (R1CSProof, Vec<Scalar>, Vec<Scalar>) {
let timer_prove = Timer::new("R1CSProof::prove");
transcript.append_protocol_name(R1CSProof::protocol_name());
// we currently require the number of |inputs| + 1 to be at most number of vars
assert!(input.len() < vars.len());
input.append_to_transcript(b"input", transcript);
let timer_commit = Timer::new("polycommit");
let (poly_vars, comm_vars, blinds_vars) = {
// create a multilinear polynomial using the supplied assignment for variables
let poly_vars = DensePolynomial::new(vars.clone());
// produce a commitment to the satisfying assignment
let (comm_vars, blinds_vars) = poly_vars.commit(&gens.gens_pc, Some(random_tape));
// add the commitment to the prover's transcript
comm_vars.append_to_transcript(b"poly_commitment", transcript);
(poly_vars, comm_vars, blinds_vars)
};
timer_commit.stop();
let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one");
// append input to variables to create a single vector z
let z = {
let num_inputs = input.len();
let num_vars = vars.len();
let mut z = vars;
z.extend(&vec![Scalar::one()]); // add constant term in z
z.extend(input);
z.extend(&vec![Scalar::zero(); num_vars - num_inputs - 1]); // we will pad with zeros
z
};
// derive the verifier's challenge tau
let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2());
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
// compute the initial evaluation table for R(\tau, x)
let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals());
let (mut poly_Az, mut poly_Bz, mut poly_Cz) =
inst.multiply_vec(inst.get_num_cons(), z.len(), &z);
let (sc_proof_phase1, rx, _claims_phase1, blind_claim_postsc1) = R1CSProof::prove_phase_one(
num_rounds_x,
&mut poly_tau,
&mut poly_Az,
&mut poly_Bz,
&mut poly_Cz,
&gens.gens_sc,
transcript,
random_tape,
);
assert_eq!(poly_tau.len(), 1);
assert_eq!(poly_Az.len(), 1);
assert_eq!(poly_Bz.len(), 1);
assert_eq!(poly_Cz.len(), 1);
timer_sc_proof_phase1.stop();
let (tau_claim, Az_claim, Bz_claim, Cz_claim) =
(&poly_tau[0], &poly_Az[0], &poly_Bz[0], &poly_Cz[0]);
let (Az_blind, Bz_blind, Cz_blind, prod_Az_Bz_blind) = (
random_tape.random_scalar(b"Az_blind"),
random_tape.random_scalar(b"Bz_blind"),
random_tape.random_scalar(b"Cz_blind"),
random_tape.random_scalar(b"prod_Az_Bz_blind"),
);
let (pok_Cz_claim, comm_Cz_claim) = {
KnowledgeProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
Cz_claim,
&Cz_blind,
)
};
let (proof_prod, comm_Az_claim, comm_Bz_claim, comm_prod_Az_Bz_claims) = {
let prod = Az_claim * Bz_claim;
ProductProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
Az_claim,
&Az_blind,
Bz_claim,
&Bz_blind,
&prod,
&prod_Az_Bz_blind,
)
};
comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript);
// prove the final step of sum-check #1
let taus_bound_rx = tau_claim;
let blind_expected_claim_postsc1 = taus_bound_rx * (prod_Az_Bz_blind - Cz_blind);
let claim_post_phase1 = (Az_claim * Bz_claim - Cz_claim) * taus_bound_rx;
let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
&claim_post_phase1,
&blind_expected_claim_postsc1,
&claim_post_phase1,
&blind_claim_postsc1,
);
let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two");
// combine the three claims into a single claim
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim;
let blind_claim_phase2 = r_A * Az_blind + r_B * Bz_blind + r_C * Cz_blind;
let evals_ABC = {
// compute the initial evaluation table for R(\tau, x)
let evals_rx = EqPolynomial::new(rx.clone()).evals();
let (evals_A, evals_B, evals_C) =
inst.compute_eval_table_sparse(inst.get_num_cons(), z.len(), &evals_rx);
assert_eq!(evals_A.len(), evals_B.len());
assert_eq!(evals_A.len(), evals_C.len());
(0..evals_A.len())
.map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i])
.collect::<Vec<Scalar>>()
};
// another instance of the sum-check protocol
let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = R1CSProof::prove_phase_two(
num_rounds_y,
&claim_phase2,
&blind_claim_phase2,
&mut DensePolynomial::new(z),
&mut DensePolynomial::new(evals_ABC),
&gens.gens_sc,
transcript,
random_tape,
);
timer_sc_proof_phase2.stop();
let timer_polyeval = Timer::new("polyeval");
let eval_vars_at_ry = poly_vars.evaluate(&ry[1..]);
let blind_eval = random_tape.random_scalar(b"blind_eval");
let (proof_eval_vars_at_ry, comm_vars_at_ry) = PolyEvalProof::prove(
&poly_vars,
Some(&blinds_vars),
&ry[1..],
&eval_vars_at_ry,
Some(&blind_eval),
&gens.gens_pc,
transcript,
random_tape,
);
timer_polyeval.stop();
// prove the final step of sum-check #2
let blind_eval_Z_at_ry = (Scalar::one() - ry[0]) * blind_eval;
let blind_expected_claim_postsc2 = claims_phase2[1] * blind_eval_Z_at_ry;
let claim_post_phase2 = claims_phase2[0] * claims_phase2[1];
let (proof_eq_sc_phase2, _C1, _C2) = EqualityProof::prove(
&gens.gens_pc.gens.gens_1,
transcript,
random_tape,
&claim_post_phase2,
&blind_expected_claim_postsc2,
&claim_post_phase2,
&blind_claim_postsc2,
);
timer_prove.stop();
(
R1CSProof {
comm_vars,
sc_proof_phase1,
claims_phase2: (
comm_Az_claim,
comm_Bz_claim,
comm_Cz_claim,
comm_prod_Az_Bz_claims,
),
pok_claims_phase2: (pok_Cz_claim, proof_prod),
proof_eq_sc_phase1,
sc_proof_phase2,
comm_vars_at_ry,
proof_eval_vars_at_ry,
proof_eq_sc_phase2,
},
rx,
ry,
)
}
pub fn verify(
&self,
num_vars: usize,
num_cons: usize,
input: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
transcript: &mut Transcript,
gens: &R1CSGens,
) -> Result<(Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
transcript.append_protocol_name(R1CSProof::protocol_name());
input.append_to_transcript(b"input", transcript);
let n = num_vars;
// add the commitment to the verifier's transcript
self
.comm_vars
.append_to_transcript(b"poly_commitment", transcript);
let (num_rounds_x, num_rounds_y) = (num_cons.log_2(), (2 * num_vars).log_2());
// derive the verifier's challenge tau
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
// verify the first sum-check instance
let claim_phase1 = Scalar::zero()
.commit(&Scalar::zero(), &gens.gens_sc.gens_1)
.compress();
let (comm_claim_post_phase1, rx) = self.sc_proof_phase1.verify(
&claim_phase1,
num_rounds_x,
3,
&gens.gens_sc.gens_1,
&gens.gens_sc.gens_4,
transcript,
)?;
// perform the intermediate sum-check test with claimed Az, Bz, and Cz
let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) = &self.claims_phase2;
let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2;
pok_Cz_claim.verify(&gens.gens_sc.gens_1, transcript, comm_Cz_claim)?;
proof_prod.verify(
&gens.gens_sc.gens_1,
transcript,
comm_Az_claim,
comm_Bz_claim,
comm_prod_Az_Bz_claims,
)?;
comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript);
let taus_bound_rx: Scalar = (0..rx.len())
.map(|i| rx[i] * tau[i] + (Scalar::one() - rx[i]) * (Scalar::one() - tau[i]))
.product();
let expected_claim_post_phase1 = (taus_bound_rx
* (comm_prod_Az_Bz_claims.decompress().unwrap() - comm_Cz_claim.decompress().unwrap()))
.compress();
// verify proof that expected_claim_post_phase1 == claim_post_phase1
self.proof_eq_sc_phase1.verify(
&gens.gens_sc.gens_1,
transcript,
&expected_claim_post_phase1,
&comm_claim_post_phase1,
)?;
// derive three public challenges and then derive a joint claim
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
// r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim;
let comm_claim_phase2 = GroupElement::vartime_multiscalar_mul(
iter::once(r_A)
.chain(iter::once(r_B))
.chain(iter::once(r_C))
.collect(),
iter::once(&comm_Az_claim)
.chain(iter::once(&comm_Bz_claim))
.chain(iter::once(&comm_Cz_claim))
.map(|pt| pt.decompress().unwrap())
.collect(),
)
.compress();
// verify the joint claim with a sum-check protocol
let (comm_claim_post_phase2, ry) = self.sc_proof_phase2.verify(
&comm_claim_phase2,
num_rounds_y,
2,
&gens.gens_sc.gens_1,
&gens.gens_sc.gens_3,
transcript,
)?;
// verify Z(ry) proof against the initial commitment
self.proof_eval_vars_at_ry.verify(
&gens.gens_pc,
transcript,
&ry[1..],
&self.comm_vars_at_ry,
&self.comm_vars,
)?;
let poly_input_eval = {
// constant term
let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())];
//remaining inputs
input_as_sparse_poly_entries.extend(
(0..input.len())
.map(|i| SparsePolyEntry::new(i + 1, input[i]))
.collect::<Vec<SparsePolyEntry>>(),
);
SparsePolynomial::new(n.log_2(), input_as_sparse_poly_entries).evaluate(&ry[1..])
};
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
let comm_eval_Z_at_ry = GroupElement::vartime_multiscalar_mul(
iter::once(Scalar::one() - ry[0])
.chain(iter::once(ry[0]))
.map(|s| s)
.collect(),
iter::once(self.comm_vars_at_ry.decompress().unwrap())
.chain(iter::once(
poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1),
))
.collect(),
);
// perform the final check in the second sum-check protocol
let (eval_A_r, eval_B_r, eval_C_r) = evals;
let expected_claim_post_phase2 =
((r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r) * comm_eval_Z_at_ry).compress();
// verify proof that expected_claim_post_phase1 == claim_post_phase1
self.proof_eq_sc_phase2.verify(
&gens.gens_sc.gens_1,
transcript,
&expected_claim_post_phase2,
&comm_claim_post_phase2,
)?;
Ok((rx, ry))
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand_core::OsRng;
fn produce_tiny_r1cs() -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
// three constraints over five variables Z1, Z2, Z3, Z4, and Z5
// rounded to the nearest power of two
let num_cons = 128;
let num_vars = 256;
let num_inputs = 2;
// encode the above constraints into three matrices
let mut A: Vec<(usize, usize, Scalar)> = Vec::new();
let mut B: Vec<(usize, usize, Scalar)> = Vec::new();
let mut C: Vec<(usize, usize, Scalar)> = Vec::new();
let one = Scalar::one();
// constraint 0 entries
// (Z1 + Z2) * I0 - Z3 = 0;
A.push((0, 0, one));
A.push((0, 1, one));
B.push((0, num_vars + 1, one));
C.push((0, 2, one));
// constraint 1 entries
// (Z1 + I1) * (Z3) - Z4 = 0
A.push((1, 0, one));
A.push((1, num_vars + 2, one));
B.push((1, 2, one));
C.push((1, 3, one));
// constraint 3 entries
// Z5 * 1 - 0 = 0
A.push((2, 4, one));
B.push((2, num_vars, one));
let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
// compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let i0 = Scalar::random(&mut csprng);
let i1 = Scalar::random(&mut csprng);
let z1 = Scalar::random(&mut csprng);
let z2 = Scalar::random(&mut csprng);
let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0;
let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0
let z5 = Scalar::zero(); //constraint 3
let mut vars = vec![Scalar::zero(); num_vars];
vars[0] = z1;
vars[1] = z2;
vars[2] = z3;
vars[3] = z4;
vars[4] = z5;
let mut input = vec![Scalar::zero(); num_inputs];
input[0] = i0;
input[1] = i1;
(inst, vars, input)
}
#[test]
fn test_tiny_r1cs() {
let (inst, vars, input) = tests::produce_tiny_r1cs();
let is_sat = inst.is_sat(&vars, &input);
assert!(is_sat);
}
#[test]
fn test_synthetic_r1cs() {
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(1024, 1024, 10);
let is_sat = inst.is_sat(&vars, &input);
assert!(is_sat);
}
#[test]
pub fn check_r1cs_proof() {
let num_vars = 1024;
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = R1CSGens::new(b"test-m", num_cons, num_vars);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, rx, ry) = R1CSProof::prove(
&inst,
vars,
&input,
&gens,
&mut prover_transcript,
&mut random_tape,
);
let inst_evals = inst.evaluate(&rx, &ry);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
inst.get_num_vars(),
inst.get_num_cons(),
&input,
&inst_evals,
&mut verifier_transcript,
&gens,
)
.is_ok());
}
}
================================================
FILE: packages/Spartan-secq/src/random.rs
================================================
use super::scalar::Scalar;
use super::transcript::ProofTranscript;
use merlin::Transcript;
use rand_core::OsRng;
pub struct RandomTape {
tape: Transcript,
}
impl RandomTape {
pub fn new(name: &'static [u8]) -> Self {
let tape = {
let mut rng = OsRng::default();
let mut tape = Transcript::new(name);
tape.append_scalar(b"init_randomness", &Scalar::random(&mut rng));
tape
};
Self { tape }
}
pub fn random_scalar(&mut self, label: &'static [u8]) -> Scalar {
self.tape.challenge_scalar(label)
}
pub fn random_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
self.tape.challenge_vector(label, len)
}
}
================================================
FILE: packages/Spartan-secq/src/scalar/mod.rs
================================================
use secq256k1::elliptic_curve::ops::Reduce;
use secq256k1::U256;
mod scalar;
pub type Scalar = scalar::Scalar;
pub type ScalarBytes = secq256k1::Scalar;
pub trait ScalarFromPrimitives {
fn to_scalar(self) -> Scalar;
}
impl ScalarFromPrimitives for usize {
#[inline]
fn to_scalar(self) -> Scalar {
(0..self).map(|_i| Scalar::one()).sum()
}
}
impl ScalarFromPrimitives for bool {
#[inline]
fn to_scalar(self) -> Scalar {
if self {
Scalar::one()
} else {
Scalar::zero()
}
}
}
pub trait ScalarBytesFromScalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes;
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes>;
}
impl ScalarBytesFromScalar for Scalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes {
ScalarBytes::from_uint_reduced(U256::from_le_slice(&s.to_bytes()))
}
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes> {
(0..s.len())
.map(|i| Scalar::decompress_scalar(&s[i]))
.collect::<Vec<ScalarBytes>>()
}
}
================================================
FILE: packages/Spartan-secq/src/scalar/scalar.rs
================================================
//! This module provides an implementation of the secq256k1's scalar field $\mathbb{F}_q$
//! where `q = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f`
//! This module is an adaptation of code from the bls12-381 crate.
//! We modify various constants (MODULUS, R, R2, etc.) to appropriate values for secq256k1 and update tests
#![allow(clippy::all)]
use core::borrow::Borrow;
use core::convert::TryFrom;
use core::fmt;
use core::iter::{Product, Sum};
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use hex_literal::hex;
use num_bigint_dig::{BigUint, ModInverse};
use rand_core::{CryptoRng, RngCore};
use serde::de::Visitor;
use serde::{Deserialize, Serialize};
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
use zeroize::Zeroize;
// use crate::util::{adc, mac, sbb};
/// Compute a + b + carry, returning the result and the new carry over.
#[inline(always)]
pub const fn adc(a: u64, b: u64, carry: u64) -> (u64, u64) {
let ret = (a as u128) + (b as u128) + (carry as u128);
(ret as u64, (ret >> 64) as u64)
}
/// Compute a - (b + borrow), returning the result and the new borrow.
#[inline(always)]
pub const fn sbb(a: u64, b: u64, borrow: u64) -> (u64, u64) {
let ret = (a as u128).wrapping_sub((b as u128) + ((borrow >> 63) as u128));
(ret as u64, (ret >> 64) as u64)
}
/// Compute a + (b * c) + carry, returning the result and the new carry over.
#[inline(always)]
pub const fn mac(a: u64, b: u64, c: u64, carry: u64) -> (u64, u64) {
let ret = (a as u128) + ((b as u128) * (c as u128)) + (carry as u128);
(ret as u64, (ret >> 64) as u64)
}
macro_rules! impl_add_binop_specify_output {
($lhs:ident, $rhs:ident, $output:ident) => {
impl<'b> Add<&'b $rhs> for $lhs {
type Output = $output;
#[inline]
fn add(self, rhs: &'b $rhs) -> $output {
&self + rhs
}
}
impl<'a> Add<$rhs> for &'a $lhs {
type Output = $output;
#[inline]
fn add(self, rhs: $rhs) -> $output {
self + &rhs
}
}
impl Add<$rhs> for $lhs {
type Output = $output;
#[inline]
fn add(self, rhs: $rhs) -> $output {
&self + &rhs
}
}
};
}
macro_rules! impl_sub_binop_specify_output {
($lhs:ident, $rhs:ident, $output:ident) => {
impl<'b> Sub<&'b $rhs> for $lhs {
type Output = $output;
#[inline]
fn sub(self, rhs: &'b $rhs) -> $output {
&self - rhs
}
}
impl<'a> Sub<$rhs> for &'a $lhs {
type Output = $output;
#[inline]
fn sub(self, rhs: $rhs) -> $output {
self - &rhs
}
}
impl Sub<$rhs> for $lhs {
type Output = $output;
#[inline]
fn sub(self, rhs: $rhs) -> $output {
&self - &rhs
}
}
};
}
macro_rules! impl_binops_additive_specify_output {
($lhs:ident, $rhs:ident, $output:ident) => {
impl_add_binop_specify_output!($lhs, $rhs, $output);
impl_sub_binop_specify_output!($lhs, $rhs, $output);
};
}
macro_rules! impl_binops_multiplicative_mixed {
($lhs:ident, $rhs:ident, $output:ident) => {
impl<'b> Mul<&'b $rhs> for $lhs {
type Output = $output;
#[inline]
fn mul(self, rhs: &'b $rhs) -> $output {
&self * rhs
}
}
impl<'a> Mul<$rhs> for &'a $lhs {
type Output = $output;
#[inline]
fn mul(self, rhs: $rhs) -> $output {
self * &rhs
}
}
impl Mul<$rhs> for $lhs {
type Output = $output;
#[inline]
fn mul(self, rhs: $rhs) -> $output {
&self * &rhs
}
}
};
}
macro_rules! impl_binops_additive {
($lhs:ident, $rhs:ident) => {
impl_binops_additive_specify_output!($lhs, $rhs, $lhs);
impl SubAssign<$rhs> for $lhs {
#[inline]
fn sub_assign(&mut self, rhs: $rhs) {
*self = &*self - &rhs;
}
}
impl AddAssign<$rhs> for $lhs {
#[inline]
fn add_assign(&mut self, rhs: $rhs) {
*self = &*self + &rhs;
}
}
impl<'b> SubAssign<&'b $rhs> for $lhs {
#[inline]
fn sub_assign(&mut self, rhs: &'b $rhs) {
*self = &*self - rhs;
}
}
impl<'b> AddAssign<&'b $rhs> for $lhs {
#[inline]
fn add_assign(&mut self, rhs: &'b $rhs) {
*self = &*self + rhs;
}
}
};
}
macro_rules! impl_binops_multiplicative {
($lhs:ident, $rhs:ident) => {
impl_binops_multiplicative_mixed!($lhs, $rhs, $lhs);
impl MulAssign<$rhs> for $lhs {
#[inline]
fn mul_assign(&mut self, rhs: $rhs) {
*self = &*self * &rhs;
}
}
impl<'b> MulAssign<&'b $rhs> for $lhs {
#[inline]
fn mul_assign(&mut self, rhs: &'b $rhs) {
*self = &*self * rhs;
}
}
};
}
/// Represents an element of the scalar field $\mathbb{F}_q$ of the secq256k1 elliptic
/// curve construction.
// The internal representation of this type is four 64-bit unsigned
// integers in little-endian order. `Scalar` values are always in
// Montgomery form; i.e., Scalar(a) = aR mod q, with R = 2^256.
#[derive(Clone, Copy, Eq)]
pub struct Scalar(pub(crate) [u64; 5]);
use serde::ser::SerializeSeq;
use serde::{Deserializer, Serializer};
impl Serialize for Scalar {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let values = self.to_bytes();
let mut seq = serializer.serialize_seq(Some(values.len()))?;
for val in values.iter() {
seq.serialize_element(val)?;
}
seq.end()
}
}
struct U64ArrayVisitor;
impl<'de> Visitor<'de> for U64ArrayVisitor {
type Value = Scalar;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a sequence of 4 u64 values")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let mut result = [0u64; 4];
for i in 0..4 {
let mut val: u64 = 0;
for j in 0..8 {
val += (seq.next_element::<u8>().unwrap().unwrap() as u64) * 256u64.pow(j)
}
result[i] = val;
}
Ok(Scalar::from_raw(result))
}
}
impl<'de> Deserialize<'de> for Scalar {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(U64ArrayVisitor)
}
}
impl fmt::Debug for Scalar {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let tmp = self.to_bytes();
write!(f, "0x")?;
for &b in tmp.iter().rev() {
write!(f, "{:02x}", b)?;
}
Ok(())
}
}
impl From<u64> for Scalar {
fn from(val: u64) -> Scalar {
Scalar([val, 0, 0, 0, 0]) * R2
}
}
impl ConstantTimeEq for Scalar {
fn ct_eq(&self, other: &Self) -> Choice {
self.0[0].ct_eq(&other.0[0])
& self.0[1].ct_eq(&other.0[1])
& self.0[2].ct_eq(&other.0[2])
& self.0[3].ct_eq(&other.0[3])
}
}
impl PartialEq for Scalar {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.ct_eq(other).unwrap_u8() == 1
}
}
impl ConditionallySelectable for Scalar {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Scalar([
u64::conditional_select(&a.0[0], &b.0[0], choice),
u64::conditional_select(&a.0[1], &b.0[1], choice),
u64::conditional_select(&a.0[2], &b.0[2], choice),
u64::conditional_select(&a.0[3], &b.0[3], choice),
u64::conditional_select(&a.0[4], &b.0[4], choice),
])
}
}
/// Constant representing the modulus
/// 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f
const MODULUS: Scalar = Scalar([
0xfffffffefffffc2f,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0,
]);
impl<'a> Neg for &'a Scalar {
type Output = Scalar;
#[inline]
fn neg(self) -> Scalar {
self.neg()
}
}
impl Neg for Scalar {
type Output = Scalar;
#[inline]
fn neg(self) -> Scalar {
-&self
}
}
impl<'a, 'b> Sub<&'b Scalar> for &'a Scalar {
type Output = Scalar;
#[inline]
fn sub(self, rhs: &'b Scalar) -> Scalar {
self.sub(rhs)
}
}
impl<'a, 'b> Add<&'b Scalar> for &'a Scalar {
type Output = Scalar;
#[inline]
fn add(self, rhs: &'b Scalar) -> Scalar {
self.add(rhs)
}
}
impl<'a, 'b> Mul<&'b Scalar> for &'a Scalar {
type Output = Scalar;
#[inline]
fn mul(self, rhs: &'b Scalar) -> Scalar {
self.mul(rhs)
}
}
impl_binops_additive!(Scalar, Scalar);
impl_binops_multiplicative!(Scalar, Scalar);
/// INV = -(q^{-1} mod 2^64) mod 2^64
const INV: u64 = 0xd838091dd2253531;
/// R = 2^256 mod q
const R: Scalar = Scalar([
0x00000001000003d1,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x0,
]);
/// R^2 = 2^512 mod q
const R2: Scalar = Scalar([
0x000007a2000e90a1,
0x0000000000000001,
0x0000000000000000,
0x0000000000000000,
0,
]);
/// R^3 = 2^768 mod q
const R3: Scalar = Scalar([
0x002bb1e33795f671,
0x0000000100000b73,
0x0000000000000000,
0x0000000000000000,
0x0,
]);
impl Default for Scalar {
#[inline]
fn default() -> Self {
Self::zero()
}
}
impl<T> Product<T> for Scalar
where
T: Borrow<Scalar>,
{
fn product<I>(iter: I) -> Self
where
I: Iterator<Item = T>,
{
iter.fold(Scalar::one(), |acc, item| acc * item.borrow())
}
}
impl<T> Sum<T> for Scalar
where
T: Borrow<Scalar>,
{
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = T>,
{
iter.fold(Scalar::zero(), |acc, item| acc + item.borrow())
}
}
impl Zeroize for Scalar {
fn zeroize(&mut self) {
self.0 = [0u64; 5];
}
}
impl Scalar {
/// Returns zero, the additive identity.
#[inline]
pub const fn zero() -> Scalar {
Scalar([0, 0, 0, 0, 0])
}
/// Returns one, the multiplicative identity.
#[inline]
pub const fn one() -> Scalar {
R
}
pub fn random<Rng: RngCore + CryptoRng>(rng: &mut Rng) -> Self {
let mut limbs = [0u64; 8];
for i in 0..8 {
limbs[i] = rng.next_u64();
}
Scalar::from_u512(limbs)
}
/// Doubles this field element.
#[inline]
pub const fn double(&self) -> Scalar {
// TODO: This can be achieved more efficiently with a bitshift.
self.add(self)
}
/// Attempts to convert a little-endian byte representation of
/// a scalar into a `Scalar`, failing if the input is not canonical.
pub fn from_bytes(bytes: &[u8; 32]) -> CtOption<Scalar> {
let mut tmp = Scalar([0, 0, 0, 0, 0]);
tmp.0[0] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[..8]).unwrap());
tmp.0[1] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[8..16]).unwrap());
tmp.0[2] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[16..24]).unwrap());
tmp.0[3] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[24..32]).unwrap());
// Try to subtract the modulus
let (_, borrow) = sbb(tmp.0[0], MODULUS.0[0], 0);
let (_, borrow) = sbb(tmp.0[1], MODULUS.0[1], borrow);
let (_, borrow) = sbb(tmp.0[2], MODULUS.0[2], borrow);
let (_, borrow) = sbb(tmp.0[3], MODULUS.0[3], borrow);
// If the element is smaller than MODULUS then the
// subtraction will underflow, producing a borrow value
// of 0xffff...ffff. Otherwise, it'll be zero.
let is_some = (borrow as u8) & 1;
// Convert to Montgomery form by computing
// (a.R^0 * R^2) / R = a.R
tmp *= &R2;
CtOption::new(tmp, Choice::from(is_some))
}
/// Converts an element of `Scalar` into a byte representation in
/// little-endian byte order.
pub fn to_bytes(&self) -> [u8; 32] {
// Turn into canonical form by computing
// (a.R) / R = a
let tmp = Scalar::montgomery_reduce(
self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], 0, 0, 0, 0,
);
let mut res = [0; 32];
res[..8].copy_from_slice(&tmp.0[0].to_le_bytes());
res[8..16].copy_from_slice(&tmp.0[1].to_le_bytes());
res[16..24].copy_from_slice(&tmp.0[2].to_le_bytes());
res[24..32].copy_from_slice(&tmp.0[3].to_le_bytes());
res
}
/// Converts a 512-bit little endian integer into
/// a `Scalar` by reducing by the modulus.
pub fn from_bytes_wide(bytes: &[u8; 64]) -> Scalar {
Scalar::from_u512([
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[..8]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[8..16]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[16..24]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[24..32]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[32..40]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[40..48]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[48..56]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[56..64]).unwrap()),
])
}
fn from_u512(limbs: [u64; 8]) -> Scalar {
// We reduce an arbitrary 512-bit number by decomposing it into two 256-bit digits
// with the higher bits multiplied by 2^256. Thus, we perform two reductions
//
// 1. the lower bits are multiplied by R^2, as normal
// 2. the upper bits are multiplied by R^2 * 2^256 = R^3
//
// and computing their sum in the field. It remains to see that arbitrary 256-bit
// numbers can be placed into Montgomery form safely using the reduction. The
// reduction works so long as the product is less than R=2^256 multipled by
// the modulus. This holds because for any `c` smaller than the modulus, we have
// that (2^256 - 1)*c is an acceptable product for the reduction. Therefore, the
// reduction always works so long as `c` is in the field; in this case it is either the
// constant `R2` or `R3`.
let d0 = Scalar([limbs[0], limbs[1], limbs[2], limbs[3], 0]);
let d1 = Scalar([limbs[4], limbs[5], limbs[6], limbs[7], 0]);
// Convert to Montgomery form
d0 * R2 + d1 * R3
}
/// Converts from an integer represented in little endian
/// into its (congruent) `Scalar` representation.
pub const fn from_raw(val: [u64; 4]) -> Self {
(&Scalar([val[0], val[1], val[2], val[3], 0])).mul(&R2)
}
/// Squares this element.
#[inline]
pub const fn square(&self) -> Scalar {
let (r1, carry) = mac(0, self.0[0], self.0[1], 0);
let (r2, carry) = mac(0, self.0[0], self.0[2], carry);
let (r3, r4) = mac(0, self.0[0], self.0[3], carry);
let (r3, carry) = mac(r3, self.0[1], self.0[2], 0);
let (r4, r5) = mac(r4, self.0[1], self.0[3], carry);
let (r5, r6) = mac(r5, self.0[2], self.0[3], 0);
let r7 = r6 >> 63;
let r6 = (r6 << 1) | (r5 >> 63);
let r5 = (r5 << 1) | (r4 >> 63);
let r4 = (r4 << 1) | (r3 >> 63);
let r3 = (r3 << 1) | (r2 >> 63);
let r2 = (r2 << 1) | (r1 >> 63);
let r1 = r1 << 1;
let (r0, carry) = mac(0, self.0[0], self.0[0], 0);
let (r1, carry) = adc(0, r1, carry);
let (r2, carry) = mac(r2, self.0[1], self.0[1], carry);
let (r3, carry) = adc(0, r3, carry);
let (r4, carry) = mac(r4, self.0[2], self.0[2], carry);
let (r5, carry) = adc(0, r5, carry);
let (r6, carry) = mac(r6, self.0[3], self.0[3], carry);
let (r7, _) = adc(0, r7, carry);
Scalar::montgomery_reduce(r0, r1, r2, r3, r4, r5, r6, r7, 0)
}
/// Exponentiates `self` by `by`, where `by` is a
/// little-endian order integer exponent.
pub fn pow(&self, by: &[u64; 4]) -> Self {
let mut res = Self::one();
for e in by.iter().rev() {
for i in (0..64).rev() {
res = res.square();
let mut tmp = res;
tmp *= self;
res.conditional_assign(&tmp, (((*e >> i) & 0x1) as u8).into());
}
}
res
}
/// Exponentiates `self` by `by`, where `by` is a
/// little-endian order integer exponent.
///
/// **This operation is variable time with respect
/// to the exponent.** If the exponent is fixed,
/// this operation is effectively constant time.
pub fn pow_vartime(&self, by: &[u64; 4]) -> Self {
let mut res = Self::one();
for e in by.iter().rev() {
for i in (0..64).rev() {
res = res.square();
if ((*e >> i) & 1) == 1 {
res.mul_assign(self);
}
}
}
res
}
pub fn invert(&self) -> CtOption<Self> {
let val = BigUint::from_bytes_le(&self.to_bytes());
let result = val.mod_inverse(&BigUint::from_bytes_be(&hex!(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"
)));
if result.is_some() {
let mut result = result.unwrap().to_bytes_le().1.to_vec();
result.resize(64, 0);
let result_bytes: [u8; 64] = result.try_into().unwrap();
let result = Scalar::from_bytes_wide(&result_bytes);
CtOption::new(result, Choice::from(1))
} else {
CtOption::new(Scalar::zero(), Choice::from(0))
}
}
pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
// This code is essentially identical to the FieldElement
// implementation, and is documented there. Unfortunately,
// it's not easy to write it generically, since here we want
// to use `UnpackedScalar`s internally, and `Scalar`s
// externally, but there's no corresponding distinction for
// field elements.
use zeroize::Zeroizing;
let n = inputs.len();
let one = Scalar::one();
// Place scratch storage in a Zeroizing wrapper to wipe it when
// we pass out of scope.
let scratch_vec = vec![one; n];
let mut scratch = Zeroizing::new(scratch_vec);
// Keep an accumulator of all of the previous products
let mut acc = Scalar::one();
// Pass through the input vector, recording the previous
// products in the scratch space
for (input, scratch) in inputs.iter().zip(scratch.iter_mut()) {
*scratch = acc;
acc = acc * input;
}
// acc is nonzero iff all inputs are nonzero
debug_assert!(acc != Scalar::zero());
// Compute the inverse of all products
acc = acc.invert().unwrap();
// We need to return the product of all inverses later
let ret = acc;
// Pass through the vector backwards to compute the inverses
// in place
for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter().rev()) {
let tmp = &acc * input.clone();
*input = &acc * scratch;
acc = tmp;
}
ret
}
#[inline(always)]
const fn montgomery_reduce(
r0: u64,
r1: u64,
r2: u64,
r3: u64,
r4: u64,
r5: u64,
r6: u64,
r7: u64,
r8: u64,
) -> Self {
// The Montgomery reduction here is based on Algorithm 14.32 in
// Handbook of Applied Cryptography
// <http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
let k = r0.wrapping_mul(INV);
let (_, carry) = mac(r0, k, MODULUS.0[0], 0);
let (r1, carry) = mac(r1, k, MODULUS.0[1], carry);
let (r2, carry) = mac(r2, k, MODULUS.0[2], carry);
let (r3, carry) = mac(r3, k, MODULUS.0[3], carry);
let (r4, carry) = mac(r4, k, MODULUS.0[4], carry);
let (r5, carry2) = adc(r5, 0, carry);
let k = r1.wrapping_mul(INV);
let (_, carry) = mac(r1, k, MODULUS.0[0], 0);
let (r2, carry) = mac(r2, k, MODULUS.0[1], carry);
let (r3, carry) = mac(r3, k, MODULUS.0[2], carry);
let (r4, carry) = mac(r4, k, MODULUS.0[3], carry);
let (r5, carry) = mac(r5, k, MODULUS.0[4], carry);
let (r6, carry2) = adc(r6, carry2, carry);
let k = r2.wrapping_mul(INV);
let (_, carry) = mac(r2, k, MODULUS.0[0], 0);
let (r3, carry) = mac(r3, k, MODULUS.0[1], carry);
let (r4, carry) = mac(r4, k, MODULUS.0[2], carry);
let (r5, carry) = mac(r5, k, MODULUS.0[3], carry);
let (r6, carry) = mac(r6, k, MODULUS.0[4], carry);
let (r7, carry2) = adc(r7, carry2, carry);
let k = r3.wrapping_mul(INV);
let (_, carry) = mac(r3, k, MODULUS.0[0], 0);
let (r4, carry) = mac(r4, k, MODULUS.0[1], carry);
let (r5, carry) = mac(r5, k, MODULUS.0[2], carry);
let (r6, carry) = mac(r6, k, MODULUS.0[3], carry);
let (r7, carry) = mac(r7, k, MODULUS.0[4], carry);
let (r8, _) = adc(r8, carry2, carry);
// Result may be within MODULUS of the correct value
(&Scalar([r4, r5, r6, r7, r8])).sub(&MODULUS)
}
/// Multiplies `rhs` by `self`, returning the result.
#[inline]
pub const fn mul(&self, rhs: &Self) -> Self {
// Schoolbook multiplication
let (r0, carry) = mac(0, self.0[0], rhs.0[0], 0);
let (r1, carry) = mac(0, self.0[0], rhs.0[1], carry);
let (r2, carry) = mac(0, self.0[0], rhs.0[2], carry);
let (r3, carry) = mac(0, self.0[0], rhs.0[3], carry);
let (r4, r5) = mac(0, self.0[0], rhs.0[4], carry);
let (r1, carry) = mac(r1, self.0[1], rhs.0[0], 0);
let (r2, carry) = mac(r2, self.0[1], rhs.0[1], carry);
let (r3, carry) = mac(r3, self.0[1], rhs.0[2], carry);
let (r4, carry) = mac(r4, self.0[1], rhs.0[3], carry);
let (r5, r6) = mac(r5, self.0[1], rhs.0[4], carry);
let (r2, carry) = mac(r2, self.0[2], rhs.0[0], 0);
let (r3, carry) = mac(r3, self.0[2], rhs.0[1], carry);
let (r4, carry) = mac(r4, self.0[2], rhs.0[2], carry);
let (r5, carry) = mac(r5, self.0[2], rhs.0[3], carry);
let (r6, r7) = mac(r6, self.0[2], rhs.0[4], carry);
let (r3, carry) = mac(r3, self.0[3], rhs.0[0], 0);
let (r4, carry) = mac(r4, self.0[3], rhs.0[1], carry);
let (r5, carry) = mac(r5, self.0[3], rhs.0[2], carry);
let (r6, carry) = mac(r6, self.0[3], rhs.0[3], carry);
let (r7, r8) = mac(r7, self.0[3], rhs.0[4], carry);
let (r4, carry) = mac(r4, self.0[4], rhs.0[0], 0);
let (r5, carry) = mac(r5, self.0[4], rhs.0[1], carry);
let (r6, carry) = mac(r6, self.0[4], rhs.0[2], carry);
let (r7, carry) = mac(r7, self.0[4], rhs.0[3], carry);
let (r8, _) = mac(r8, self.0[4], rhs.0[4], carry);
Scalar::montgomery_reduce(r0, r1, r2, r3, r4, r5, r6, r7, r8)
}
/// Subtracts `rhs` from `self`, returning the result.
#[inline]
pub const fn sub(&self, rhs: &Self) -> Self {
let (d0, borrow) = sbb(self.0[0], rhs.0[0], 0);
let (d1, borrow) = sbb(self.0[1], rhs.0[1], borrow);
let (d2, borrow) = sbb(self.0[2], rhs.0[2], borrow);
let (d3, borrow) = sbb(self.0[3], rhs.0[3], borrow);
let (d4, borrow) = sbb(self.0[4], rhs.0[4], borrow);
// If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise
// borrow = 0x000...000. Thus, we use it as a mask to conditionally add the modulus.
let (d0, carry) = adc(d0, MODULUS.0[0] & borrow, 0);
let (d1, carry) = adc(d1, MODULUS.0[1] & borrow, carry);
let (d2, carry) = adc(d2, MODULUS.0[2] & borrow, carry);
let (d3, carry) = adc(d3, MODULUS.0[3] & borrow, carry);
let (d4, _) = adc(d4, MODULUS.0[4] & borrow, carry);
Scalar([d0, d1, d2, d3, d4])
}
/// Adds `rhs` to `self`, returning the result.
#[inline]
pub const fn add(&self, rhs: &Self) -> Self {
let (d0, carry) = adc(self.0[0], rhs.0[0], 0);
let (d1, carry) = adc(self.0[1], rhs.0[1], carry);
let (d2, carry) = adc(self.0[2], rhs.0[2], carry);
let (d3, carry) = adc(self.0[3], rhs.0[3], carry);
let (d4, _) = adc(self.0[4], rhs.0[4], carry);
// Attempt to subtract the modulus, to ensure the value
// is smaller than the modulus.
(&Scalar([d0, d1, d2, d3, d4])).sub(&MODULUS)
}
/// Negates `self`.
#[inline]
pub const fn neg(&self) -> Self {
// Subtract `self` from `MODULUS` to negate. Ignore the final
// borrow because it cannot underflow; self is guaranteed to
// be in the field.
let (d0, borrow) = sbb(MODULUS.0[0], self.0[0], 0);
let (d1, borrow) = sbb(MODULUS.0[1], self.0[1], borrow);
let (d2, borrow) = sbb(MODULUS.0[2], self.0[2], borrow);
let (d3, borrow) = sbb(MODULUS.0[3], self.0[3], borrow);
let (d4, _) = sbb(MODULUS.0[4], self.0[4], borrow);
// `tmp` could be `MODULUS` if `self` was zero. Create a mask that is
// zero if `self` was zero, and `u64::max_value()` if self was nonzero.
let mask =
(((self.0[0] | self.0[1] | self.0[2] | self.0[3] | self.0[4]) == 0) as u64).wrapping_sub(1);
Scalar([d0 & mask, d1 & mask, d2 & mask, d3 & mask, d4 & mask])
}
}
impl<'a> From<&'a Scalar> for [u8; 32] {
fn from(value: &'a Scalar) -> [u8; 32] {
value.to_bytes()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_inv() {
// Compute -(q^{-1} mod 2^64) mod 2^64 by exponentiating
// by totient(2**64) - 1
let mut inv = 1u64;
for _ in 0..63 {
inv = inv.wrapping_mul(inv);
inv = inv.wrapping_mul(MODULUS.0[0]);
}
inv = inv.wrapping_neg();
assert_eq!(inv, INV);
}
#[cfg(feature = "std")]
#[test]
fn test_debug() {
assert_eq!(
format!("{:?}", Scalar::zero()),
"0x0000000000000000000000000000000000000000000000000000000000000000"
);
assert_eq!(
format!("{:?}", Scalar::one()),
"0x0000000000000000000000000000000000000000000000000000000000000001"
);
assert_eq!(
format!("{:?}", R2),
"0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe"
);
}
#[test]
fn test_equality() {
assert_eq!(Scalar::zero(), Scalar::zero());
assert_eq!(Scalar::one(), Scalar::one());
assert_eq!(R2, R2);
assert!(Scalar::zero() != Scalar::one());
assert!(Scalar::one() != R2);
}
#[test]
fn test_to_bytes() {
assert_eq!(
Scalar::zero().to_bytes(),
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
]
);
assert_eq!(
Scalar::one().to_bytes(),
[
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
]
);
/*
assert_eq!(
R2.to_bytes(),
[
29, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15
]
);
assert_eq!(
(-&Scalar::one()).to_bytes(),
[
236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 16
]
);
*/
}
#[test]
fn test_from_bytes() {
assert_eq!(
Scalar::from_bytes(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
])
.unwrap(),
Scalar::zero()
);
assert_eq!(
Scalar::from_bytes(&[
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
])
.unwrap(),
Scalar::one()
);
assert_eq!(
Scalar::from_bytes(&[
209, 3, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0
])
.unwrap(),
R2
);
/*
// -1 should work
assert!(
Scalar::from_bytes(&[
236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 16
])
.is_some()
.unwrap_u8()
== 1
);
// modulus is invalid
assert!(
Scalar::from_bytes(&[
1, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216,
57, 51, 72, 125, 157, 41, 83, 167, 237, 115
])
.is_none()
.unwrap_u8()
== 1
);
// Anything larger than the modulus is invalid
assert!(
Scalar::from_bytes(&[
2, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216,
57, 51, 72, 125, 157, 41, 83, 167, 237, 115
])
.is_none()
.unwrap_u8()
== 1
);
assert!(
Scalar::from_bytes(&[
1, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216,
58, 51, 72, 125, 157, 41, 83, 167, 237, 115
])
.is_none()
.unwrap_u8()
== 1
);
assert!(
Scalar::from_bytes(&[
1, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216,
57, 51, 72, 125, 157, 41, 83, 167, 237, 116
])
.is_none()
.unwrap_u8()
== 1
);
*/
}
#[test]
fn test_from_u512_zero() {
assert_eq!(
Scalar::zero(),
Scalar::from_u512([
MODULUS.0[0],
MODULUS.0[1],
MODULUS.0[2],
MODULUS.0[3],
0,
0,
0,
0
])
);
}
#[test]
fn test_from_u512_r() {
assert_eq!(R, Scalar::from_u512([1, 0, 0, 0, 0, 0, 0, 0]));
}
#[test]
fn test_from_u512_r2() {
assert_eq!(R2, Scalar::from_u512([0, 0, 0, 0, 1, 0, 0, 0]));
}
#[test]
fn test_from_u512_max() {
let max_u64 = 0xffffffffffffffff;
assert_eq!(
R3 - R,
Scalar::from_u512([max_u64, max_u64, max_u64, max_u64, max_u64, max_u64, max_u64, max_u64])
);
}
#[test]
fn test_from_bytes_wide_r2() {
assert_eq!(
R2,
Scalar::from_bytes_wide(&[
209, 3, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
])
);
}
#[test]
fn test_from_bytes_wide_negative_one() {
assert_eq!(
-&Scalar::one(),
Scalar::from_bytes_wide(&[
46, 252, 255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
])
);
}
#[test]
fn test_from_bytes_wide_maximum() {
assert_eq!(
Scalar::from_raw([0x000007a2000e90a0, 0x1, 0, 0]),
Scalar::from_bytes_wide(&[0xff; 64])
);
}
#[test]
fn test_zero() {
assert_eq!(Scalar::zero(), -&Scalar::zero());
assert_eq!(Scalar::zero(), Scalar::zero() + Scalar::zero());
assert_eq!(Scalar::zero(), Scalar::zero() - Scalar::zero());
assert_eq!(Scalar::zero(), Scalar::zero() * Scalar::zero());
}
const LARGEST: Scalar = Scalar([
0xfffffffefffffc2e,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0,
]);
#[test]
fn test_addition() {
let mut tmp = LARGEST;
tmp += &LARGEST;
let target = Scalar([
0xfffffffefffffc2d,
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0,
]);
assert_eq!(tmp, target);
let mut tmp = LARGEST;
tmp += &Scalar([1, 0, 0, 0, 0]);
assert_eq!(tmp, Scalar::zero());
}
#[test]
fn test_negation() {
let tmp = -&LARGEST;
assert_eq!(tmp, Scalar([1, 0, 0, 0, 0]));
let tmp = -&Scalar::zero();
assert_eq!(tmp, Scalar::zero());
let tmp = -&Scalar([1, 0, 0, 0, 0]);
assert_eq!(tmp, LARGEST);
}
#[test]
fn test_subtraction() {
let mut tmp = LARGEST;
tmp -= &LARGEST;
assert_eq!(tmp, Scalar::zero());
let mut tmp = Scalar::zero();
tmp -= &LARGEST;
let mut tmp2 = MODULUS;
tmp2 -= &LARGEST;
assert_eq!(tmp, tmp2);
}
#[test]
fn test_multiplication() {
let mut cur = LARGEST;
for _ in 0..100 {
let mut tmp = cur;
tmp *= &cur;
let mut tmp2 = Scalar::zero();
for b in cur
.to_bytes()
.iter()
.rev()
.flat_map(|byte| (0..8).rev().map(move |i| ((byte >> i) & 1u8) == 1u8))
{
let tmp3 = tmp2;
tmp2.add_assign(&tmp3);
if b {
tmp2.add_assign(&cur);
}
}
assert_eq!(tmp, tmp2);
cur.add_assign(&LARGEST);
}
}
#[test]
fn test_squaring() {
let mut cur = LARGEST;
for _ in 0..100 {
let mut tmp = cur;
tmp = tmp.square();
let mut tmp2 = Scalar::zero();
for b in cur
.to_bytes()
.iter()
.rev()
.flat_map(|byte| (0..8).rev().map(move |i| ((byte >> i) & 1u8) == 1u8))
{
let tmp3 = tmp2;
tmp2.add_assign(&tmp3);
if b {
tmp2.add_assign(&cur);
}
}
assert_eq!(tmp, tmp2);
cur.add_assign(&LARGEST);
}
}
#[test]
fn test_inversion() {
assert_eq!(Scalar::zero().invert().is_none().unwrap_u8(), 1);
assert_eq!(Scalar::one().invert().unwrap(), Scalar::one());
assert_eq!((-&Scalar::one()).invert().unwrap(), -&Scalar::one());
let a = Scalar::from(123);
let result = a.invert().unwrap();
println!("result {:?}", result);
let mut tmp = R2;
for _ in 0..100 {
let mut tmp2 = tmp.invert().unwrap();
println!("tmp2 {:?}", tmp2);
tmp2.mul_assign(&tmp);
assert_eq!(tmp2, Scalar::one());
tmp.add_assign(&R2);
}
}
#[test]
fn test_invert_is_pow() {
let q_minus_2 = [
0xffff_fffe_ffff_fc2d,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
];
let mut r1 = R;
let mut r2 = R;
let mut r3 = R;
for _ in 0..100 {
r1 = r1.invert().unwrap();
r2 = r2.pow_vartime(&q_minus_2);
r3 = r3.pow(&q_minus_2);
assert_eq!(r1, r2);
assert_eq!(r2, r3);
// Add R so we check something different next time around
r1.add_assign(&R);
r2 = r1;
r3 = r1;
}
}
#[test]
fn test_from_raw() {
assert_eq!(
Scalar::from_raw([
0x00000001000003d0,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
]),
Scalar::from_raw([0xffffffffffffffff; 4])
);
assert_eq!(
Scalar::from_raw(MODULUS.0[..4].try_into().unwrap()),
Scalar::zero()
);
assert_eq!(Scalar::from_raw([1, 0, 0, 0]), R);
}
#[test]
fn test_double() {
let a = Scalar::from_raw([
0x1fff3231233ffffd,
0x4884b7fa00034802,
0x998c4fefecbc4ff3,
0x1824b159acc50562,
]);
assert_eq!(a.double(), a + a);
}
}
================================================
FILE: packages/Spartan-secq/src/sparse_mlpoly.rs
================================================
#![allow(clippy::type_complexity)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::needless_range_loop)]
use super::dense_mlpoly::DensePolynomial;
use super::dense_mlpoly::{
EqPolynomial, IdentityPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof,
};
use super::errors::ProofVerifyError;
use super::math::Math;
use super::product_tree::{DotProductCircuit, ProductCircuit, ProductCircuitEvalProofBatched};
use super::random::RandomTape;
use super::scalar::Scalar;
use super::timer::Timer;
use super::transcript::{AppendToTranscript, ProofTranscript};
use core::cmp::Ordering;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct SparseMatEntry {
row: usize,
col: usize,
val: Scalar,
}
impl SparseMatEntry {
pub fn new(row: usize, col: usize, val: Scalar) -> Self {
SparseMatEntry { row, col, val }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct SparseMatPolynomial {
num_vars_x: usize,
num_vars_y: usize,
M: Vec<SparseMatEntry>,
}
pub struct Derefs {
row_ops_val: Vec<DensePolynomial>,
col_ops_val: Vec<DensePolynomial>,
comb: DensePolynomial,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DerefsCommitment {
comm_ops_val: PolyCommitment,
}
impl Derefs {
pub fn new(row_ops_val: Vec<DensePolynomial>, col_ops_val: Vec<DensePolynomial>) -> Self {
assert_eq!(row_ops_val.len(), col_ops_val.len());
let derefs = {
// combine all polynomials into a single polynomial (used below to produce a single commitment)
let comb = DensePolynomial::merge(row_ops_val.iter().chain(col_ops_val.iter()));
Derefs {
row_ops_val,
col_ops_val,
comb,
}
};
derefs
}
pub fn commit(&self, gens: &PolyCommitmentGens) -> DerefsCommitment {
let (comm_ops_val, _blinds) = self.comb.commit(gens, None);
DerefsCommitment { comm_ops_val }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DerefsEvalProof {
proof_derefs: PolyEvalProof,
}
impl DerefsEvalProof {
fn protocol_name() -> &'static [u8] {
b"Derefs evaluation proof"
}
fn prove_single(
joint_poly: &DensePolynomial,
r: &[Scalar],
evals: Vec<Scalar>,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> PolyEvalProof {
assert_eq!(joint_poly.get_num_vars(), r.len() + evals.len().log_2());
// append the claimed evaluations to transcript
evals.append_to_transcript(b"evals_ops_val", transcript);
// n-to-1 reduction
let (r_joint, eval_joint) = {
let challenges =
transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2());
let mut poly_evals = DensePolynomial::new(evals);
for i in (0..challenges.len()).rev() {
poly_evals.bound_poly_var_bot(&challenges[i]);
}
assert_eq!(poly_evals.len(), 1);
let joint_claim_eval = poly_evals[0];
let mut r_joint = challenges;
r_joint.extend(r);
debug_assert_eq!(joint_poly.evaluate(&r_joint), joint_claim_eval);
(r_joint, joint_claim_eval)
};
// decommit the joint polynomial at r_joint
eval_joint.append_to_transcript(b"joint_claim_eval", transcript);
let (proof_derefs, _comm_derefs_eval) = PolyEvalProof::prove(
joint_poly,
None,
&r_joint,
&eval_joint,
None,
gens,
transcript,
random_tape,
);
proof_derefs
}
// evalues both polynomials at r and produces a joint proof of opening
pub fn prove(
derefs: &Derefs,
eval_row_ops_val_vec: &[Scalar],
eval_col_ops_val_vec: &[Scalar],
r: &[Scalar],
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> Self {
transcript.append_protocol_name(DerefsEvalProof::protocol_name());
let evals = {
let mut evals = eval_row_ops_val_vec.to_owned();
evals.extend(eval_col_ops_val_vec);
evals.resize(evals.len().next_power_of_two(), Scalar::zero());
evals
};
let proof_derefs =
DerefsEvalProof::prove_single(&derefs.comb, r, evals, gens, transcript, random_tape);
DerefsEvalProof { proof_derefs }
}
fn verify_single(
proof: &PolyEvalProof,
comm: &PolyCommitment,
r: &[Scalar],
evals: Vec<Scalar>,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
) -> Result<(), ProofVerifyError> {
// append the claimed evaluations to transcript
evals.append_to_transcript(b"evals_ops_val", transcript);
// n-to-1 reduction
let challenges =
transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2());
let mut poly_evals = DensePolynomial::new(evals);
for i in (0..challenges.len()).rev() {
poly_evals.bound_poly_var_bot(&challenges[i]);
}
assert_eq!(poly_evals.len(), 1);
let joint_claim_eval = poly_evals[0];
let mut r_joint = challenges;
r_joint.extend(r);
// decommit the joint polynomial at r_joint
joint_claim_eval.append_to_transcript(b"joint_claim_eval", transcript);
proof.verify_plain(gens, transcript, &r_joint, &joint_claim_eval, comm)
}
// verify evaluations of both polynomials at r
pub fn verify(
&self,
r: &[Scalar],
eval_row_ops_val_vec: &[Scalar],
eval_col_ops_val_vec: &[Scalar],
gens: &PolyCommitmentGens,
comm: &DerefsCommitment,
transcript: &mut Transcript,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(DerefsEvalProof::protocol_name());
let mut evals = eval_row_ops_val_vec.to_owned();
evals.extend(eval_col_ops_val_vec);
evals.resize(evals.len().next_power_of_two(), Scalar::zero());
DerefsEvalProof::verify_single(
&self.proof_derefs,
&comm.comm_ops_val,
r,
evals,
gens,
transcript,
)
}
}
impl AppendToTranscript for DerefsCommitment {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(b"derefs_commitment", b"begin_derefs_commitment");
self.comm_ops_val.append_to_transcript(label, transcript);
transcript.append_message(b"derefs_commitment", b"end_derefs_commitment");
}
}
struct AddrTimestamps {
ops_addr_usize: Vec<Vec<usize>>,
ops_addr: Vec<DensePolynomial>,
read_ts: Vec<DensePolynomial>,
audit_ts: DensePolynomial,
}
impl AddrTimestamps {
pub fn new(num_cells: usize, num_ops: usize, ops_addr: Vec<Vec<usize>>) -> Self {
for item in ops_addr.iter() {
assert_eq!(item.len(), num_ops);
}
let mut audit_ts = vec![0usize; num_cells];
let mut ops_addr_vec: Vec<DensePolynomial> = Vec::new();
let mut read_ts_vec: Vec<DensePolynomial> = Vec::new();
for ops_addr_inst in ops_addr.iter() {
let mut read_ts = vec![0usize; num_ops];
// since read timestamps are trustworthy, we can simply increment the r-ts to obtain a w-ts
// this is sufficient to ensure that the write-set, consisting of (addr, val, ts) tuples, is a set
for i in 0..num_ops {
let addr = ops_addr_inst[i];
assert!(addr < num_cells);
let r_ts = audit_ts[addr];
read_ts[i] = r_ts;
let w_ts = r_ts + 1;
audit_ts[addr] = w_ts;
}
ops_addr_vec.push(DensePolynomial::from_usize(ops_addr_inst));
read_ts_vec.push(DensePolynomial::from_usize(&read_ts));
}
AddrTimestamps {
ops_addr: ops_addr_vec,
ops_addr_usize: ops_addr,
read_ts: read_ts_vec,
audit_ts: DensePolynomial::from_usize(&audit_ts),
}
}
fn deref_mem(addr: &[usize], mem_val: &[Scalar]) -> DensePolynomial {
DensePolynomial::new(
(0..addr.len())
.map(|i| {
let a = addr[i];
mem_val[a]
})
.collect::<Vec<Scalar>>(),
)
}
pub fn deref(&self, mem_val: &[Scalar]) -> Vec<DensePolynomial> {
(0..self.ops_addr.len())
.map(|i| AddrTimestamps::deref_mem(&self.ops_addr_usize[i], mem_val))
.collect::<Vec<DensePolynomial>>()
}
}
pub struct MultiSparseMatPolynomialAsDense {
batch_size: usize,
val: Vec<DensePolynomial>,
row: AddrTimestamps,
col: AddrTimestamps,
comb_ops: DensePolynomial,
comb_mem: DensePolynomial,
}
pub struct SparseMatPolyCommitmentGens {
gens_ops: PolyCommitmentGens,
gens_mem: PolyCommitmentGens,
gens_derefs: PolyCommitmentGens,
}
impl SparseMatPolyCommitmentGens {
pub fn new(
label: &'static [u8],
num_vars_x: usize,
num_vars_y: usize,
num_nz_entries: usize,
batch_size: usize,
) -> SparseMatPolyCommitmentGens {
let num_vars_ops =
num_nz_entries.next_power_of_two().log_2() + (batch_size * 5).next_power_of_two().log_2();
let num_vars_mem = if num_vars_x > num_vars_y {
num_vars_x
} else {
num_vars_y
} + 1;
let num_vars_derefs =
num_nz_entries.next_power_of_two().log_2() + (batch_size * 2).next_power_of_two().log_2();
let gens_ops = PolyCommitmentGens::new(num_vars_ops, label);
let gens_mem = PolyCommitmentGens::new(num_vars_mem, label);
let gens_derefs = PolyCommitmentGens::new(num_vars_derefs, label);
SparseMatPolyCommitmentGens {
gens_ops,
gens_mem,
gens_derefs,
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct SparseMatPolyCommitment {
batch_size: usize,
num_ops: usize,
num_mem_cells: usize,
comm_comb_ops: PolyCommitment,
comm_comb_mem: PolyCommitment,
}
impl AppendToTranscript for SparseMatPolyCommitment {
fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) {
transcript.append_u64(b"batch_size", self.batch_size as u64);
transcript.append_u64(b"num_ops", self.num_ops as u64);
transcript.append_u64(b"num_mem_cells", self.num_mem_cells as u64);
self
.comm_comb_ops
.append_to_transcript(b"comm_comb_ops", transcript);
self
.comm_comb_mem
.append_t
gitextract_fd7z0c7m/
├── .cargo/
│ └── config
├── .eslintignore
├── .eslintrc.js
├── .github/
│ └── workflows/
│ └── publish.yaml
├── .gitignore
├── .prettierignore
├── .prettierrc.json
├── .vscode/
│ └── settings.json
├── Cargo.toml
├── README.md
├── lerna.json
├── package.json
├── packages/
│ ├── Spartan-secq/
│ │ ├── CODE_OF_CONDUCT.md
│ │ ├── CONTRIBUTING.md
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── SECURITY.md
│ │ ├── benches/
│ │ │ ├── nizk.rs
│ │ │ └── snark.rs
│ │ ├── examples/
│ │ │ └── cubic.rs
│ │ ├── profiler/
│ │ │ ├── nizk.rs
│ │ │ └── snark.rs
│ │ ├── rustfmt.toml
│ │ └── src/
│ │ ├── bin/
│ │ │ └── mont_params.rs
│ │ ├── commitments.rs
│ │ ├── dense_mlpoly.rs
│ │ ├── errors.rs
│ │ ├── group.rs
│ │ ├── lib.rs
│ │ ├── math.rs
│ │ ├── nizk/
│ │ │ ├── bullet.rs
│ │ │ └── mod.rs
│ │ ├── product_tree.rs
│ │ ├── r1csinstance.rs
│ │ ├── r1csproof.rs
│ │ ├── random.rs
│ │ ├── scalar/
│ │ │ ├── mod.rs
│ │ │ └── scalar.rs
│ │ ├── sparse_mlpoly.rs
│ │ ├── sumcheck.rs
│ │ ├── timer.rs
│ │ ├── transcript.rs
│ │ └── unipoly.rs
│ ├── benchmark/
│ │ ├── node/
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── package.json
│ │ │ ├── src/
│ │ │ │ ├── node.bench.ts
│ │ │ │ ├── node.bench_addr_membership.ts
│ │ │ │ └── node.bench_pubkey_membership.ts
│ │ │ └── tsconfig.json
│ │ └── web/
│ │ ├── .vscode/
│ │ │ └── settings.json
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── next.config.js
│ │ ├── package.json
│ │ ├── pages/
│ │ │ ├── _app.tsx
│ │ │ └── index.tsx
│ │ └── tsconfig.json
│ ├── circuit_reader/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ ├── bin/
│ │ │ └── gen_spartan_inst.rs
│ │ ├── circom_reader.rs
│ │ └── lib.rs
│ ├── circuits/
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── eff_ecdsa_membership/
│ │ │ ├── addr_membership.circom
│ │ │ ├── eff_ecdsa.circom
│ │ │ ├── eff_ecdsa_to_addr.circom
│ │ │ ├── pubkey_membership.circom
│ │ │ ├── secp256k1/
│ │ │ │ ├── add.circom
│ │ │ │ ├── double.circom
│ │ │ │ └── mul.circom
│ │ │ ├── to_address/
│ │ │ │ ├── vocdoni-keccak/
│ │ │ │ │ ├── keccak.circom
│ │ │ │ │ ├── permutations.circom
│ │ │ │ │ └── utils.circom
│ │ │ │ └── zk-identity/
│ │ │ │ └── eth.circom
│ │ │ └── tree.circom
│ │ ├── instances/
│ │ │ ├── addr_membership.circom
│ │ │ └── pubkey_membership.circom
│ │ ├── jest.config.js
│ │ ├── package.json
│ │ ├── poseidon/
│ │ │ ├── poseidon.circom
│ │ │ └── poseidon_constants.circom
│ │ └── tests/
│ │ ├── addr_membership.test.ts
│ │ ├── circuits/
│ │ │ ├── add_complete_test.circom
│ │ │ ├── add_incomplete_test.circom
│ │ │ ├── addr_membership_test.circom
│ │ │ ├── double_test.circom
│ │ │ ├── eff_ecdsa_test.circom
│ │ │ ├── eff_ecdsa_to_addr_test.circom
│ │ │ ├── k_test.circom
│ │ │ ├── mul_test.circom
│ │ │ ├── poseidon_test.circom
│ │ │ └── pubkey_membership_test.circom
│ │ ├── eff_ecdsa.test.ts
│ │ ├── eff_ecdsa_to_addr.test.ts
│ │ ├── poseidon.test.ts
│ │ ├── pubkey_membership.test.ts
│ │ ├── secp256k1.test.ts
│ │ └── test_utils.ts
│ ├── lib/
│ │ ├── .npmignore
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── embedWasmBytes.ts
│ │ ├── jest.config.js
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── config/
│ │ │ │ └── index.ts
│ │ │ ├── core/
│ │ │ │ ├── prover.ts
│ │ │ │ └── verifier.ts
│ │ │ ├── helpers/
│ │ │ │ ├── poseidon.ts
│ │ │ │ ├── profiler.ts
│ │ │ │ ├── publicInputs.ts
│ │ │ │ ├── tree.ts
│ │ │ │ └── utils.ts
│ │ │ ├── index.ts
│ │ │ ├── types/
│ │ │ │ └── index.ts
│ │ │ └── wasm/
│ │ │ ├── index.ts
│ │ │ ├── wasm.d.ts
│ │ │ └── wasm.js
│ │ ├── tests/
│ │ │ ├── efficientEcdsa.test.ts
│ │ │ ├── membershipNizk.test.ts
│ │ │ └── tree.test.ts
│ │ ├── tsconfig.build.json
│ │ └── tsconfig.json
│ ├── poseidon/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── k256_params.sh
│ │ ├── sage/
│ │ │ ├── generate_params_poseidon.sage
│ │ │ └── security_inequalities.sage
│ │ └── src/
│ │ ├── k256_consts.rs
│ │ ├── lib.rs
│ │ └── poseidon_k256.rs
│ ├── secq256k1/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── sage/
│ │ │ ├── hashtocurve_params.sage
│ │ │ ├── sqrt_ratio_params.sage
│ │ │ └── sswu_generic.sage
│ │ └── src/
│ │ ├── affine.rs
│ │ ├── field/
│ │ │ ├── field_secp.rs
│ │ │ ├── field_secq.rs
│ │ │ └── mod.rs
│ │ ├── hashtocurve.rs
│ │ ├── lib.rs
│ │ └── scalar.rs
│ └── spartan_wasm/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── src/
│ │ ├── lib.rs
│ │ └── wasm.rs
│ └── test_circuit/
│ ├── test_circuit.circom
│ ├── test_circuit.circuit
│ ├── test_circuit.r1cs
│ ├── test_circuit_js/
│ │ ├── generate_witness.js
│ │ ├── test_circuit.wasm
│ │ └── witness_calculator.js
│ └── witness.wtns
├── rust-toolchain
└── scripts/
├── addr_membership_circuit.sh
├── build.sh
├── build_wasm.sh
├── compile_circuit.sh
├── pubkey_membership_circuit.sh
└── test.sh
SYMBOL INDEX (914 symbols across 52 files)
FILE: packages/Spartan-secq/benches/nizk.rs
function nizk_prove_benchmark (line 16) | fn nizk_prove_benchmark(c: &mut Criterion) {
function nizk_verify_benchmark (line 47) | fn nizk_verify_benchmark(c: &mut Criterion) {
function set_duration (line 82) | fn set_duration() -> Criterion {
FILE: packages/Spartan-secq/benches/snark.rs
function snark_encode_benchmark (line 10) | fn snark_encode_benchmark(c: &mut Criterion) {
function snark_prove_benchmark (line 35) | fn snark_prove_benchmark(c: &mut Criterion) {
function snark_verify_benchmark (line 73) | fn snark_verify_benchmark(c: &mut Criterion) {
function set_duration (line 121) | fn set_duration() -> Criterion {
FILE: packages/Spartan-secq/examples/cubic.rs
function produce_r1cs (line 19) | fn produce_r1cs() -> (
function main (line 111) | fn main() {
FILE: packages/Spartan-secq/profiler/nizk.rs
function print (line 13) | fn print(msg: &str) {
function main (line 18) | pub fn main() {
FILE: packages/Spartan-secq/profiler/snark.rs
function print (line 12) | fn print(msg: &str) {
function main (line 17) | pub fn main() {
FILE: packages/Spartan-secq/src/bin/mont_params.rs
function get_words (line 6) | fn get_words(n: &BigUint) -> [u64; 4] {
function render_hex (line 15) | fn render_hex(label: String, words: &[u64; 4]) {
function main (line 22) | fn main() {
FILE: packages/Spartan-secq/src/commitments.rs
type MultiCommitGens (line 9) | pub struct MultiCommitGens {
method new (line 16) | pub fn new(n: usize, label: &[u8]) -> Self {
method clone (line 36) | pub fn clone(&self) -> MultiCommitGens {
method scale (line 44) | pub fn scale(&self, s: &Scalar) -> MultiCommitGens {
method split_at (line 52) | pub fn split_at(&self, mid: usize) -> (MultiCommitGens, MultiCommitGen...
type Commitments (line 70) | pub trait Commitments {
method commit (line 71) | fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupEle...
method commit (line 75) | fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupEle...
method commit (line 85) | fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupEle...
method commit (line 92) | fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupEle...
FILE: packages/Spartan-secq/src/dense_mlpoly.rs
type DensePolynomial (line 19) | pub struct DensePolynomial {
method new (line 120) | pub fn new(Z: Vec<Scalar>) -> Self {
method get_num_vars (line 128) | pub fn get_num_vars(&self) -> usize {
method len (line 132) | pub fn len(&self) -> usize {
method clone (line 136) | pub fn clone(&self) -> DensePolynomial {
method split (line 140) | pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) {
method commit_inner (line 149) | fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> P...
method commit_inner (line 165) | fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> P...
method commit (line 179) | pub fn commit(
method bound (line 206) | pub fn bound(&self, L: &[Scalar]) -> Vec<Scalar> {
method bound_poly_var_top (line 215) | pub fn bound_poly_var_top(&mut self, r: &Scalar) {
method bound_poly_var_bot (line 224) | pub fn bound_poly_var_bot(&mut self, r: &Scalar) {
method evaluate (line 234) | pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
method vec (line 242) | fn vec(&self) -> &Vec<Scalar> {
method extend (line 246) | pub fn extend(&mut self, other: &DensePolynomial) {
method merge (line 257) | pub fn merge<'a, I>(polys: I) -> DensePolynomial
method from_usize (line 272) | pub fn from_usize(Z: &[usize]) -> Self {
type Output (line 282) | type Output = Scalar;
method index (line 285) | fn index(&self, _index: usize) -> &Scalar {
type PolyCommitmentGens (line 25) | pub struct PolyCommitmentGens {
method new (line 31) | pub fn new(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens {
type PolyCommitmentBlinds (line 38) | pub struct PolyCommitmentBlinds {
type PolyCommitment (line 43) | pub struct PolyCommitment {
type ConstPolyCommitment (line 48) | pub struct ConstPolyCommitment {
type EqPolynomial (line 52) | pub struct EqPolynomial {
method new (line 57) | pub fn new(r: Vec<Scalar>) -> Self {
method evaluate (line 61) | pub fn evaluate(&self, rx: &[Scalar]) -> Scalar {
method evals (line 68) | pub fn evals(&self) -> Vec<Scalar> {
method compute_factored_lens (line 86) | pub fn compute_factored_lens(ell: usize) -> (usize, usize) {
method compute_factored_evals (line 90) | pub fn compute_factored_evals(&self) -> (Vec<Scalar>, Vec<Scalar>) {
type IdentityPolynomial (line 101) | pub struct IdentityPolynomial {
method new (line 106) | pub fn new(size_point: usize) -> Self {
method evaluate (line 110) | pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
method append_to_transcript (line 291) | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Tr...
type PolyEvalProof (line 301) | pub struct PolyEvalProof {
method protocol_name (line 306) | fn protocol_name() -> &'static [u8] {
method prove (line 310) | pub fn prove(
method verify (line 365) | pub fn verify(
method verify_plain (line 389) | pub fn verify_plain(
function evaluate_with_LR (line 410) | fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar {
function check_polynomial_evaluation (line 432) | fn check_polynomial_evaluation() {
function compute_factored_chis_at_r (line 452) | pub fn compute_factored_chis_at_r(r: &[Scalar]) -> (Vec<Scalar>, Vec<Sca...
function compute_chis_at_r (line 491) | pub fn compute_chis_at_r(r: &[Scalar]) -> Vec<Scalar> {
function compute_outerproduct (line 510) | pub fn compute_outerproduct(L: Vec<Scalar>, R: Vec<Scalar>) -> Vec<Scala...
function check_memoized_chis (line 521) | fn check_memoized_chis() {
function check_factored_chis (line 535) | fn check_factored_chis() {
function check_memoized_factored_chis (line 550) | fn check_memoized_factored_chis() {
function check_polynomial_commit (line 566) | fn check_polynomial_commit() {
FILE: packages/Spartan-secq/src/errors.rs
type ProofVerifyError (line 5) | pub enum ProofVerifyError {
method default (line 13) | fn default() -> Self {
type R1CSError (line 19) | pub enum R1CSError {
FILE: packages/Spartan-secq/src/group.rs
type GroupElement (line 8) | pub type GroupElement = secq256k1::AffinePoint;
method mul_assign (line 40) | fn mul_assign(&mut self, scalar: &'b Scalar) {
type CompressedGroup (line 9) | pub type CompressedGroup = secq256k1::EncodedPoint;
type CompressedGroupExt (line 10) | pub trait CompressedGroupExt {
method unpack (line 12) | fn unpack(&self) -> Result<Self::Group, ProofVerifyError>;
type Group (line 16) | type Group = secq256k1::AffinePoint;
method unpack (line 17) | fn unpack(&self) -> Result<Self::Group, ProofVerifyError> {
type DecompressEncodedPoint (line 29) | pub trait DecompressEncodedPoint {
method decompress (line 30) | fn decompress(&self) -> Option<GroupElement>;
method decompress (line 34) | fn decompress(&self) -> Option<GroupElement> {
type Output (line 47) | type Output = GroupElement;
function mul (line 48) | fn mul(self, scalar: &'b Scalar) -> GroupElement {
type Output (line 54) | type Output = GroupElement;
function mul (line 56) | fn mul(self, point: &'b GroupElement) -> GroupElement {
type VartimeMultiscalarMul (line 100) | pub trait VartimeMultiscalarMul {
method vartime_multiscalar_mul (line 102) | fn vartime_multiscalar_mul(scalars: Vec<Scalar>, points: Vec<GroupElem...
type Scalar (line 106) | type Scalar = super::scalar::Scalar;
method vartime_multiscalar_mul (line 108) | fn vartime_multiscalar_mul(scalars: Vec<Scalar>, points: Vec<GroupElem...
function msm (line 127) | fn msm() {
FILE: packages/Spartan-secq/src/lib.rs
type ComputationCommitment (line 47) | pub struct ComputationCommitment {
type ComputationDecommitment (line 52) | pub struct ComputationDecommitment {
type Assignment (line 58) | pub struct Assignment {
method new (line 64) | pub fn new(assignment: &[[u8; 32]]) -> Result<Assignment, R1CSError> {
method pad (line 91) | fn pad(&self, len: usize) -> VarsAssignment {
type VarsAssignment (line 108) | pub type VarsAssignment = Assignment;
type InputsAssignment (line 111) | pub type InputsAssignment = Assignment;
type Instance (line 115) | pub struct Instance {
method new (line 123) | pub fn new(
method is_sat (line 232) | pub fn is_sat(
method produce_synthetic_r1cs (line 264) | pub fn produce_synthetic_r1cs(
type SNARKGens (line 280) | pub struct SNARKGens {
method new (line 288) | pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize, num_nz...
type SNARK (line 314) | pub struct SNARK {
method protocol_name (line 321) | fn protocol_name() -> &'static [u8] {
method encode (line 326) | pub fn encode(
method prove (line 340) | pub fn prove(
method verify (line 424) | pub fn verify(
type NIZKGens (line 469) | pub struct NIZKGens {
method new (line 475) | pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize) -> Self {
type NIZK (line 491) | pub struct NIZK {
method protocol_name (line 497) | fn protocol_name() -> &'static [u8] {
method prove (line 502) | pub fn prove(
method verify (line 550) | pub fn verify(
function check_snark (line 595) | pub fn check_snark() {
function check_r1cs_invalid_index (line 629) | pub fn check_r1cs_invalid_index() {
function check_r1cs_invalid_scalar (line 649) | pub fn check_r1cs_invalid_scalar() {
function test_padded_constraints (line 671) | fn test_padded_constraints() {
FILE: packages/Spartan-secq/src/math.rs
type Math (line 1) | pub trait Math {
method square_root (line 2) | fn square_root(self) -> usize;
method pow2 (line 3) | fn pow2(self) -> usize;
method get_bits (line 4) | fn get_bits(self, num_bits: usize) -> Vec<bool>;
method log_2 (line 5) | fn log_2(self) -> usize;
method square_root (line 10) | fn square_root(self) -> usize {
method pow2 (line 15) | fn pow2(self) -> usize {
method get_bits (line 21) | fn get_bits(self, num_bits: usize) -> Vec<bool> {
method log_2 (line 27) | fn log_2(self) -> usize {
FILE: packages/Spartan-secq/src/nizk/bullet.rs
type BulletReductionProof (line 17) | pub struct BulletReductionProof {
method prove (line 33) | pub fn prove(
method verification_scalars (line 155) | fn verification_scalars(
method verify (line 209) | pub fn verify(
function inner_product (line 257) | pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
FILE: packages/Spartan-secq/src/nizk/mod.rs
type KnowledgeProof (line 17) | pub struct KnowledgeProof {
method protocol_name (line 24) | fn protocol_name() -> &'static [u8] {
method prove (line 28) | pub fn prove(
method verify (line 55) | pub fn verify(
type EqualityProof (line 79) | pub struct EqualityProof {
method protocol_name (line 85) | fn protocol_name() -> &'static [u8] {
method prove (line 89) | pub fn prove(
method verify (line 119) | pub fn verify(
type ProductProof (line 148) | pub struct ProductProof {
method protocol_name (line 156) | fn protocol_name() -> &'static [u8] {
method prove (line 160) | pub fn prove(
method check_equality (line 232) | fn check_equality(
method verify (line 246) | pub fn verify(
type DotProductProof (line 294) | pub struct DotProductProof {
method protocol_name (line 303) | fn protocol_name() -> &'static [u8] {
method compute_dotproduct (line 307) | pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
method prove (line 312) | pub fn prove(
method verify (line 373) | pub fn verify(
type DotProductProofGens (line 408) | pub struct DotProductProofGens {
method new (line 415) | pub fn new(n: usize, label: &[u8]) -> Self {
type DotProductProofLog (line 422) | pub struct DotProductProofLog {
method protocol_name (line 431) | fn protocol_name() -> &'static [u8] {
method compute_dotproduct (line 435) | pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
method prove (line 440) | pub fn prove(
method verify (line 526) | pub fn verify(
function check_knowledgeproof (line 584) | fn check_knowledgeproof() {
function check_equalityproof (line 604) | fn check_equalityproof() {
function check_productproof (line 632) | fn check_productproof() {
function check_dotproductproof (line 664) | fn check_dotproductproof() {
function check_dotproductproof_log (line 703) | fn check_dotproductproof_log() {
FILE: packages/Spartan-secq/src/product_tree.rs
type ProductCircuit (line 12) | pub struct ProductCircuit {
method compute_layer (line 18) | fn compute_layer(
method new (line 36) | pub fn new(poly: &DensePolynomial) -> Self {
method evaluate (line 58) | pub fn evaluate(&self) -> Scalar {
type DotProductCircuit (line 66) | pub struct DotProductCircuit {
method new (line 73) | pub fn new(left: DensePolynomial, right: DensePolynomial, weight: Dens...
method evaluate (line 83) | pub fn evaluate(&self) -> Scalar {
method split (line 89) | pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) {
type LayerProof (line 112) | pub struct LayerProof {
method verify (line 119) | pub fn verify(
type LayerProofBatched (line 135) | pub struct LayerProofBatched {
method verify (line 143) | pub fn verify(
type ProductCircuitEvalProof (line 158) | pub struct ProductCircuitEvalProof {
method prove (line 170) | pub fn prove(
method verify (line 220) | pub fn verify(
type ProductCircuitEvalProofBatched (line 163) | pub struct ProductCircuitEvalProofBatched {
method prove (line 259) | pub fn prove(
method verify (line 385) | pub fn verify(
FILE: packages/Spartan-secq/src/r1csinstance.rs
type R1CSInstance (line 19) | pub struct R1CSInstance {
method new (line 85) | pub fn new(
method get_num_vars (line 137) | pub fn get_num_vars(&self) -> usize {
method get_num_cons (line 141) | pub fn get_num_cons(&self) -> usize {
method get_num_inputs (line 145) | pub fn get_num_inputs(&self) -> usize {
method get_digest (line 149) | pub fn get_digest(&self) -> Vec<u8> {
method produce_synthetic_r1cs (line 155) | pub fn produce_synthetic_r1cs(
method is_sat (line 235) | pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool {
method multiply_vec (line 267) | pub fn multiply_vec(
method compute_eval_table_sparse (line 283) | pub fn compute_eval_table_sparse(
method evaluate (line 299) | pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> (Scalar, Scala...
method commit (line 304) | pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1...
type R1CSCommitmentGens (line 28) | pub struct R1CSCommitmentGens {
method new (line 33) | pub fn new(
type R1CSCommitment (line 50) | pub struct R1CSCommitment {
method get_num_cons (line 71) | pub fn get_num_cons(&self) -> usize {
method get_num_vars (line 75) | pub fn get_num_vars(&self) -> usize {
method get_num_inputs (line 79) | pub fn get_num_inputs(&self) -> usize {
method append_to_transcript (line 58) | fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut T...
type R1CSDecommitment (line 66) | pub struct R1CSDecommitment {
type R1CSEvalProof (line 320) | pub struct R1CSEvalProof {
method prove (line 325) | pub fn prove(
method verify (line 349) | pub fn verify(
FILE: packages/Spartan-secq/src/r1csproof.rs
type R1CSProof (line 23) | pub struct R1CSProof {
method prove_phase_one (line 76) | fn prove_phase_one(
method prove_phase_two (line 111) | fn prove_phase_two(
method protocol_name (line 139) | fn protocol_name() -> &'static [u8] {
method prove (line 143) | pub fn prove(
method verify (line 350) | pub fn verify(
type R1CSSumcheckGens (line 40) | pub struct R1CSSumcheckGens {
method new (line 48) | pub fn new(label: &'static [u8], gens_1_ref: &MultiCommitGens) -> Self {
type R1CSGens (line 61) | pub struct R1CSGens {
method new (line 67) | pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) ->...
function produce_tiny_r1cs (line 503) | fn produce_tiny_r1cs() -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
function test_tiny_r1cs (line 561) | fn test_tiny_r1cs() {
function test_synthetic_r1cs (line 568) | fn test_synthetic_r1cs() {
function check_r1cs_proof (line 575) | pub fn check_r1cs_proof() {
FILE: packages/Spartan-secq/src/random.rs
type RandomTape (line 5) | pub struct RandomTape {
method new (line 10) | pub fn new(name: &'static [u8]) -> Self {
method random_scalar (line 20) | pub fn random_scalar(&mut self, label: &'static [u8]) -> Scalar {
method random_vector (line 24) | pub fn random_vector(&mut self, label: &'static [u8], len: usize) -> V...
FILE: packages/Spartan-secq/src/scalar/mod.rs
type Scalar (line 6) | pub type Scalar = scalar::Scalar;
type ScalarBytes (line 7) | pub type ScalarBytes = secq256k1::Scalar;
type ScalarFromPrimitives (line 9) | pub trait ScalarFromPrimitives {
method to_scalar (line 10) | fn to_scalar(self) -> Scalar;
method to_scalar (line 15) | fn to_scalar(self) -> Scalar {
method to_scalar (line 22) | fn to_scalar(self) -> Scalar {
type ScalarBytesFromScalar (line 31) | pub trait ScalarBytesFromScalar {
method decompress_scalar (line 32) | fn decompress_scalar(s: &Scalar) -> ScalarBytes;
method decompress_vector (line 33) | fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes>;
method decompress_scalar (line 37) | fn decompress_scalar(s: &Scalar) -> ScalarBytes {
method decompress_vector (line 41) | fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes> {
FILE: packages/Spartan-secq/src/scalar/scalar.rs
function adc (line 22) | pub const fn adc(a: u64, b: u64, carry: u64) -> (u64, u64) {
function sbb (line 29) | pub const fn sbb(a: u64, b: u64, borrow: u64) -> (u64, u64) {
function mac (line 36) | pub const fn mac(a: u64, b: u64, c: u64, carry: u64) -> (u64, u64) {
type Scalar (line 201) | pub struct Scalar(pub(crate) [u64; 5]);
method deserialize (line 247) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
method fmt (line 256) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
method from (line 267) | fn from(val: u64) -> Scalar {
method product (line 399) | fn product<I>(iter: I) -> Self
method sum (line 411) | fn sum<I>(iter: I) -> Self
method zero (line 428) | pub const fn zero() -> Scalar {
method one (line 434) | pub const fn one() -> Scalar {
method random (line 438) | pub fn random<Rng: RngCore + CryptoRng>(rng: &mut Rng) -> Self {
method double (line 448) | pub const fn double(&self) -> Scalar {
method from_bytes (line 455) | pub fn from_bytes(bytes: &[u8; 32]) -> CtOption<Scalar> {
method to_bytes (line 483) | pub fn to_bytes(&self) -> [u8; 32] {
method from_bytes_wide (line 501) | pub fn from_bytes_wide(bytes: &[u8; 64]) -> Scalar {
method from_u512 (line 514) | fn from_u512(limbs: [u64; 8]) -> Scalar {
method from_raw (line 536) | pub const fn from_raw(val: [u64; 4]) -> Self {
method square (line 542) | pub const fn square(&self) -> Scalar {
method pow (line 574) | pub fn pow(&self, by: &[u64; 4]) -> Self {
method pow_vartime (line 593) | pub fn pow_vartime(&self, by: &[u64; 4]) -> Self {
method invert (line 607) | pub fn invert(&self) -> CtOption<Self> {
method batch_invert (line 628) | pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
method montgomery_reduce (line 678) | const fn montgomery_reduce(
method mul (line 731) | pub const fn mul(&self, rhs: &Self) -> Self {
method sub (line 769) | pub const fn sub(&self, rhs: &Self) -> Self {
method add (line 789) | pub const fn add(&self, rhs: &Self) -> Self {
method neg (line 803) | pub const fn neg(&self) -> Self {
method serialize (line 207) | fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Er...
type U64ArrayVisitor (line 219) | struct U64ArrayVisitor;
type Value (line 222) | type Value = Scalar;
method expecting (line 224) | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::...
method visit_seq (line 228) | fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
method ct_eq (line 273) | fn ct_eq(&self, other: &Self) -> Choice {
method eq (line 283) | fn eq(&self, other: &Self) -> bool {
method conditional_select (line 289) | fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
constant MODULUS (line 302) | const MODULUS: Scalar = Scalar([
type Output (line 311) | type Output = Scalar;
method neg (line 314) | fn neg(self) -> Scalar {
type Output (line 320) | type Output = Scalar;
method neg (line 323) | fn neg(self) -> Scalar {
type Output (line 329) | type Output = Scalar;
function sub (line 332) | fn sub(self, rhs: &'b Scalar) -> Scalar {
type Output (line 338) | type Output = Scalar;
function add (line 341) | fn add(self, rhs: &'b Scalar) -> Scalar {
type Output (line 347) | type Output = Scalar;
function mul (line 350) | fn mul(self, rhs: &'b Scalar) -> Scalar {
constant INV (line 359) | const INV: u64 = 0xd838091dd2253531;
constant R (line 362) | const R: Scalar = Scalar([
constant R2 (line 371) | const R2: Scalar = Scalar([
constant R3 (line 380) | const R3: Scalar = Scalar([
method default (line 390) | fn default() -> Self {
method zeroize (line 420) | fn zeroize(&mut self) {
function from (line 823) | fn from(value: &'a Scalar) -> [u8; 32] {
function test_inv (line 833) | fn test_inv() {
function test_debug (line 849) | fn test_debug() {
function test_equality (line 865) | fn test_equality() {
function test_to_bytes (line 875) | fn test_to_bytes() {
function test_from_bytes (line 912) | fn test_from_bytes() {
function test_from_u512_zero (line 995) | fn test_from_u512_zero() {
function test_from_u512_r (line 1012) | fn test_from_u512_r() {
function test_from_u512_r2 (line 1017) | fn test_from_u512_r2() {
function test_from_u512_max (line 1022) | fn test_from_u512_max() {
function test_from_bytes_wide_r2 (line 1031) | fn test_from_bytes_wide_r2() {
function test_from_bytes_wide_negative_one (line 1043) | fn test_from_bytes_wide_negative_one() {
function test_from_bytes_wide_maximum (line 1055) | fn test_from_bytes_wide_maximum() {
function test_zero (line 1063) | fn test_zero() {
constant LARGEST (line 1070) | const LARGEST: Scalar = Scalar([
function test_addition (line 1079) | fn test_addition() {
function test_negation (line 1100) | fn test_negation() {
function test_subtraction (line 1112) | fn test_subtraction() {
function test_multiplication (line 1128) | fn test_multiplication() {
function test_squaring (line 1157) | fn test_squaring() {
function test_inversion (line 1186) | fn test_inversion() {
function test_invert_is_pow (line 1209) | fn test_invert_is_pow() {
function test_from_raw (line 1236) | fn test_from_raw() {
function test_double (line 1256) | fn test_double() {
FILE: packages/Spartan-secq/src/sparse_mlpoly.rs
type SparseMatEntry (line 20) | pub struct SparseMatEntry {
method new (line 27) | pub fn new(row: usize, col: usize, val: Scalar) -> Self {
type SparseMatPolynomial (line 33) | pub struct SparseMatPolynomial {
method new (line 345) | pub fn new(num_vars_x: usize, num_vars_y: usize, M: Vec<SparseMatEntry...
method get_num_nz_entries (line 353) | pub fn get_num_nz_entries(&self) -> usize {
method sparse_to_dense_vecs (line 357) | fn sparse_to_dense_vecs(&self, N: usize) -> (Vec<usize>, Vec<usize>, V...
method multi_sparse_to_dense_rep (line 371) | fn multi_sparse_to_dense_rep(
method evaluate_with_tables (line 430) | fn evaluate_with_tables(&self, eval_table_rx: &[Scalar], eval_table_ry...
method multi_evaluate (line 444) | pub fn multi_evaluate(
method multiply_vec (line 457) | pub fn multiply_vec(&self, num_rows: usize, num_cols: usize, z: &[Scal...
method compute_eval_table_sparse (line 473) | pub fn compute_eval_table_sparse(
method multi_commit (line 490) | pub fn multi_commit(
type Derefs (line 39) | pub struct Derefs {
method new (line 51) | pub fn new(row_ops_val: Vec<DensePolynomial>, col_ops_val: Vec<DensePo...
method commit (line 68) | pub fn commit(&self, gens: &PolyCommitmentGens) -> DerefsCommitment {
type DerefsCommitment (line 46) | pub struct DerefsCommitment {
type DerefsEvalProof (line 75) | pub struct DerefsEvalProof {
method protocol_name (line 80) | fn protocol_name() -> &'static [u8] {
method prove_single (line 84) | fn prove_single(
method prove (line 130) | pub fn prove(
method verify_single (line 153) | fn verify_single(
method verify (line 183) | pub fn verify(
method append_to_transcript (line 209) | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Tr...
type AddrTimestamps (line 216) | struct AddrTimestamps {
method new (line 224) | pub fn new(num_cells: usize, num_ops: usize, ops_addr: Vec<Vec<usize>>...
method deref_mem (line 259) | fn deref_mem(addr: &[usize], mem_val: &[Scalar]) -> DensePolynomial {
method deref (line 270) | pub fn deref(&self, mem_val: &[Scalar]) -> Vec<DensePolynomial> {
type MultiSparseMatPolynomialAsDense (line 277) | pub struct MultiSparseMatPolynomialAsDense {
method deref (line 514) | pub fn deref(&self, row_mem_val: &[Scalar], col_mem_val: &[Scalar]) ->...
type SparseMatPolyCommitmentGens (line 286) | pub struct SparseMatPolyCommitmentGens {
method new (line 293) | pub fn new(
type SparseMatPolyCommitment (line 322) | pub struct SparseMatPolyCommitment {
method append_to_transcript (line 331) | fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut T...
type ProductLayer (line 523) | struct ProductLayer {
type Layers (line 531) | struct Layers {
method build_hash_layer (line 536) | fn build_hash_layer(
method new (line 613) | pub fn new(
type PolyEvalNetwork (line 664) | struct PolyEvalNetwork {
method new (line 670) | pub fn new(
type HashLayerProof (line 688) | struct HashLayerProof {
method protocol_name (line 699) | fn protocol_name() -> &'static [u8] {
method prove_helper (line 703) | fn prove_helper(
method prove (line 729) | fn prove(
method verify_helper (line 840) | fn verify_helper(
method verify (line 891) | fn verify(
type ProductLayerProof (line 1025) | struct ProductLayerProof {
method protocol_name (line 1034) | fn protocol_name() -> &'static [u8] {
method prove (line 1038) | pub fn prove(
method verify (line 1218) | pub fn verify(
type PolyEvalNetworkProof (line 1315) | struct PolyEvalNetworkProof {
method protocol_name (line 1321) | fn protocol_name() -> &'static [u8] {
method prove (line 1325) | pub fn prove(
method verify (line 1361) | pub fn verify(
type SparseMatPolyEvalProof (line 1426) | pub struct SparseMatPolyEvalProof {
method protocol_name (line 1432) | fn protocol_name() -> &'static [u8] {
method equalize (line 1436) | fn equalize(rx: &[Scalar], ry: &[Scalar]) -> (Vec<Scalar>, Vec<Scalar>) {
method prove (line 1454) | pub fn prove(
method verify (line 1523) | pub fn verify(
type SparsePolyEntry (line 1562) | pub struct SparsePolyEntry {
method new (line 1568) | pub fn new(idx: usize, val: Scalar) -> Self {
type SparsePolynomial (line 1573) | pub struct SparsePolynomial {
method new (line 1579) | pub fn new(num_vars: usize, Z: Vec<SparsePolyEntry>) -> Self {
method compute_chi (line 1583) | fn compute_chi(a: &[bool], r: &[Scalar]) -> Scalar {
method evaluate (line 1597) | pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
function check_sparse_polyeval_proof (line 1614) | fn check_sparse_polyeval_proof() {
FILE: packages/Spartan-secq/src/sumcheck.rs
type SumcheckInstanceProof (line 19) | pub struct SumcheckInstanceProof {
method new (line 24) | pub fn new(compressed_polys: Vec<CompressedUniPoly>) -> SumcheckInstan...
method verify (line 28) | pub fn verify(
method prove_cubic (line 184) | pub fn prove_cubic<F>(
method prove_cubic_batched (line 255) | pub fn prove_cubic_batched<F>(
type ZKSumcheckInstanceProof (line 66) | pub struct ZKSumcheckInstanceProof {
method new (line 73) | pub fn new(
method verify (line 85) | pub fn verify(
method prove_quad (line 429) | pub fn prove_quad<F>(
method prove_cubic_with_additive_term (line 589) | pub fn prove_cubic_with_additive_term<F>(
FILE: packages/Spartan-secq/src/timer.rs
type Timer (line 14) | pub struct Timer {
method new (line 22) | pub fn new(label: &str) -> Self {
method stop (line 40) | pub fn stop(&self) {
method print (line 55) | pub fn print(msg: &str) {
method new (line 77) | pub fn new(label: &str) -> Self {
method stop (line 84) | pub fn stop(&self) {}
method print (line 87) | pub fn print(_msg: &str) {}
type Timer (line 70) | pub struct Timer {
method new (line 22) | pub fn new(label: &str) -> Self {
method stop (line 40) | pub fn stop(&self) {
method print (line 55) | pub fn print(msg: &str) {
method new (line 77) | pub fn new(label: &str) -> Self {
method stop (line 84) | pub fn stop(&self) {}
method print (line 87) | pub fn print(_msg: &str) {}
FILE: packages/Spartan-secq/src/transcript.rs
type ProofTranscript (line 5) | pub trait ProofTranscript {
method append_protocol_name (line 6) | fn append_protocol_name(&mut self, protocol_name: &'static [u8]);
method append_scalar (line 7) | fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar);
method append_point (line 8) | fn append_point(&mut self, label: &'static [u8], point: &CompressedGro...
method challenge_scalar (line 9) | fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar;
method challenge_vector (line 10) | fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Ve...
method append_protocol_name (line 14) | fn append_protocol_name(&mut self, protocol_name: &'static [u8]) {
method append_scalar (line 18) | fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) {
method append_point (line 22) | fn append_point(&mut self, label: &'static [u8], point: &CompressedGro...
method challenge_scalar (line 26) | fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar {
method challenge_vector (line 32) | fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Ve...
type AppendToTranscript (line 39) | pub trait AppendToTranscript {
method append_to_transcript (line 40) | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut ...
method append_to_transcript (line 44) | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut ...
method append_to_transcript (line 50) | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut ...
method append_to_transcript (line 60) | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut ...
FILE: packages/Spartan-secq/src/unipoly.rs
type UniPoly (line 11) | pub struct UniPoly {
method from_evals (line 23) | pub fn from_evals(evals: &[Scalar]) -> Self {
method degree (line 56) | pub fn degree(&self) -> usize {
method as_vec (line 60) | pub fn as_vec(&self) -> Vec<Scalar> {
method eval_at_zero (line 64) | pub fn eval_at_zero(&self) -> Scalar {
method eval_at_one (line 68) | pub fn eval_at_one(&self) -> Scalar {
method evaluate (line 72) | pub fn evaluate(&self, r: &Scalar) -> Scalar {
method compress (line 82) | pub fn compress(&self) -> CompressedUniPoly {
method commit (line 90) | pub fn commit(&self, gens: &MultiCommitGens, blind: &Scalar) -> GroupE...
type CompressedUniPoly (line 18) | pub struct CompressedUniPoly {
method decompress (line 98) | pub fn decompress(&self, hint: &Scalar) -> UniPoly {
method append_to_transcript (line 113) | fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Tr...
function test_from_evals_quad (line 128) | fn test_from_evals_quad() {
function test_from_evals_cubic (line 155) | fn test_from_evals_cubic() {
FILE: packages/benchmark/web/next.config.js
method headers (line 11) | async headers() {
FILE: packages/benchmark/web/pages/_app.tsx
function App (line 3) | function App({ Component, pageProps }: AppProps) {
FILE: packages/benchmark/web/pages/index.tsx
function Home (line 21) | function Home() {
FILE: packages/circuit_reader/src/bin/gen_spartan_inst.rs
function main (line 8) | fn main() {
FILE: packages/circuit_reader/src/circom_reader.rs
type Constraint (line 11) | pub type Constraint<Fr> = (Vec<(usize, Fr)>, Vec<(usize, Fr)>, Vec<(usiz...
type R1CS (line 14) | pub struct R1CS<Fr: PrimeField> {
type Header (line 23) | pub struct Header {
type R1CSFile (line 36) | pub struct R1CSFile<Fr: PrimeField> {
function load_r1cs_from_bin_file (line 45) | pub fn load_r1cs_from_bin_file<G1: Group>(filename: &Path) -> (R1CS<G1::...
function load_r1cs_from_bin (line 53) | pub fn load_r1cs_from_bin<G1: Group, R: Read + Seek>(reader: R) -> (R1CS...
function read_field (line 69) | pub(crate) fn read_field<R: Read, Fr: PrimeField>(mut reader: R) -> Resu...
function read_header (line 79) | fn read_header<R: Read>(mut reader: R, size: u64) -> Result<Header> {
function read_constraint_vec (line 102) | fn read_constraint_vec<R: Read, Fr: PrimeField>(mut reader: R) -> Result...
function read_constraints (line 114) | fn read_constraints<R: Read, Fr: PrimeField>(
function read_map (line 130) | fn read_map<R: Read>(mut reader: R, size: u64, header: &Header) -> Resul...
function from_reader (line 150) | pub fn from_reader<G1: Group, R: Read + Seek>(mut reader: R) -> Result<R...
FILE: packages/circuit_reader/src/lib.rs
function load_as_spartan_inst (line 10) | pub fn load_as_spartan_inst(circuit_file: PathBuf, num_pub_inputs: usize...
function convert_to_spartan_r1cs (line 16) | fn convert_to_spartan_r1cs<F: PrimeField<Repr = FieldBytes>>(
FILE: packages/lib/src/core/prover.ts
class MembershipProver (line 18) | class MembershipProver extends Profiler implements IProver {
method constructor (line 23) | constructor({
method initWasm (line 51) | async initWasm() {
method prove (line 55) | async prove({ sig, msgHash, merkleProof }: ProveArgs): Promise<NIZK> {
FILE: packages/lib/src/core/verifier.ts
class MembershipVerifier (line 14) | class MembershipVerifier extends Profiler implements IVerifier {
method constructor (line 18) | constructor({
method initWasm (line 42) | async initWasm() {
method verify (line 46) | async verify({ proof, publicInputSer }: VerifyArgs): Promise<boolean> {
FILE: packages/lib/src/helpers/poseidon.ts
class Poseidon (line 4) | class Poseidon {
method hash (line 5) | hash(inputs: bigint[]): bigint {
method initWasm (line 15) | async initWasm() {
method hashPubKey (line 19) | hashPubKey(pubKey: Buffer): bigint {
FILE: packages/lib/src/helpers/profiler.ts
class Profiler (line 2) | class Profiler {
method constructor (line 5) | constructor(options: { enabled?: boolean }) {
method time (line 9) | time(label: string) {
method timeEnd (line 13) | timeEnd(label: string) {
FILE: packages/lib/src/helpers/publicInputs.ts
constant SECP256K1_N (line 9) | const SECP256K1_N = new BN(
class CircuitPubInput (line 19) | class CircuitPubInput {
method constructor (line 26) | constructor(
method serialize (line 40) | serialize(): Uint8Array {
method deserialize (line 52) | static deserialize(serialized: Uint8Array): CircuitPubInput {
class PublicInput (line 66) | class PublicInput {
method constructor (line 72) | constructor(
method serialize (line 84) | serialize(): Uint8Array {
method deserialize (line 98) | static deserialize(serialized: Uint8Array): PublicInput {
FILE: packages/lib/src/helpers/tree.ts
class Tree (line 5) | class Tree {
method constructor (line 10) | constructor(depth: number, poseidon: Poseidon) {
method insert (line 18) | insert(leaf: bigint) {
method delete (line 22) | delete(index: number) {
method leaves (line 26) | leaves(): bigint[] {
method root (line 30) | root(): bigint {
method indexOf (line 34) | indexOf(leaf: bigint): number {
method createProof (line 38) | createProof(index: number): MerkleProof {
method verifyProof (line 47) | verifyProof(proof: MerkleProof, leaf: bigint): boolean {
FILE: packages/lib/src/types/index.ts
type MerkleProof (line 6) | interface MerkleProof {
type EffECDSAPubInput (line 11) | interface EffECDSAPubInput {
type NIZK (line 18) | interface NIZK {
type ProverConfig (line 23) | interface ProverConfig {
type ProveArgs (line 30) | interface ProveArgs {
type VerifyArgs (line 36) | interface VerifyArgs {
type VerifyConfig (line 41) | interface VerifyConfig {
type IProver (line 47) | interface IProver {
type IVerifier (line 54) | interface IVerifier {
FILE: packages/lib/src/wasm/wasm.d.ts
type InitInput (line 26) | type InitInput = RequestInfo | URL | Response | BufferSource | WebAssemb...
type InitOutput (line 28) | interface InitOutput {
type SyncInitInput (line 41) | type SyncInitInput = BufferSource | WebAssembly.Module;
FILE: packages/lib/src/wasm/wasm.js
function getObject (line 7) | function getObject(idx) { return heap[idx]; }
function dropObject (line 11) | function dropObject(idx) {
function takeObject (line 17) | function takeObject(idx) {
function getUint8Memory0 (line 29) | function getUint8Memory0() {
function getStringFromWasm0 (line 36) | function getStringFromWasm0(ptr, len) {
function addHeapObject (line 41) | function addHeapObject(obj) {
function init_panic_hook (line 51) | function init_panic_hook() {
constant WASM_VECTOR_LEN (line 55) | let WASM_VECTOR_LEN = 0;
function passArray8ToWasm0 (line 57) | function passArray8ToWasm0(arg, malloc) {
function getInt32Memory0 (line 66) | function getInt32Memory0() {
function getArrayU8FromWasm0 (line 73) | function getArrayU8FromWasm0(ptr, len) {
function prove (line 83) | function prove(circuit, vars, public_inputs) {
function verify (line 114) | function verify(circuit, proof, public_input) {
function poseidon (line 140) | function poseidon(input_bytes) {
function handleError (line 161) | function handleError(f, args) {
function passStringToWasm0 (line 184) | function passStringToWasm0(arg, malloc, realloc) {
function __wbg_load (line 222) | async function __wbg_load(module, imports) {
function __wbg_get_imports (line 253) | function __wbg_get_imports() {
function __wbg_init_memory (line 398) | function __wbg_init_memory(imports, maybe_memory) {
function __wbg_finalize_init (line 402) | function __wbg_finalize_init(instance, module) {
function initSync (line 412) | async function initSync(module, maybe_memory) {
function __wbg_init (line 432) | async function __wbg_init(input, maybe_memory) {
FILE: packages/poseidon/src/k256_consts.rs
constant NUM_FULL_ROUNDS (line 5) | pub(crate) const NUM_FULL_ROUNDS: usize = 8;
constant NUM_PARTIAL_ROUNDS (line 6) | pub(crate) const NUM_PARTIAL_ROUNDS: usize = 56;
FILE: packages/poseidon/src/lib.rs
type PoseidonConstants (line 6) | pub struct PoseidonConstants<F: PrimeField> {
function new (line 14) | pub fn new(
type Poseidon (line 29) | pub struct Poseidon<F: PrimeField> {
function new (line 36) | pub fn new(constants: PoseidonConstants<F>) -> Self {
function hash (line 45) | pub fn hash(&mut self, input: &[F; 2]) -> F {
function add_constants (line 72) | fn add_constants(&mut self) {
function matrix_mul (line 80) | fn matrix_mul(&mut self) {
function full_round (line 94) | fn full_round(&mut self) {
function partial_round (line 109) | fn partial_round(&mut self) {
function test_k256 (line 129) | fn test_k256() {
FILE: packages/poseidon/src/poseidon_k256.rs
function hash (line 6) | pub fn hash(input: &[FieldElement; 2]) -> FieldElement {
FILE: packages/secq256k1/src/affine.rs
type AffinePointCore (line 15) | pub type AffinePointCore = primeorder::AffinePoint<Secq256K1>;
type AffinePoint (line 18) | pub struct AffinePoint(pub AffinePointCore);
type Output (line 21) | type Output = AffinePoint;
method mul (line 23) | fn mul(self, rhs: Scalar) -> Self::Output {
type Output (line 37) | type Output = AffinePoint;
method mul (line 39) | fn mul(self, rhs: &Scalar) -> Self::Output {
method mul_assign (line 45) | fn mul_assign(&mut self, rhs: &Scalar) {
method mul_assign (line 51) | fn mul_assign(&mut self, rhs: Scalar) {
type Output (line 57) | type Output = AffinePoint;
method add (line 59) | fn add(self, rhs: AffinePoint) -> Self::Output {
method add_assign (line 65) | fn add_assign(&mut self, rhs: AffinePoint) {
type Output (line 71) | type Output = AffinePoint;
method sub (line 73) | fn sub(self, rhs: AffinePoint) -> Self::Output {
method sub_assign (line 79) | fn sub_assign(&mut self, rhs: AffinePoint) {
method identity (line 87) | pub const fn identity() -> Self {
method generator (line 91) | pub const fn generator() -> Self {
method iso_a (line 97) | pub const fn iso_a() -> FieldElement {
method iso_b (line 108) | pub const fn iso_b() -> FieldElement {
method iso_z (line 113) | pub const fn iso_z() -> FieldElement {
method iso_constants (line 124) | pub const fn iso_constants() -> [FieldElement; 13] {
method compress (line 207) | pub fn compress(&self) -> EncodedPoint {
method decompress (line 211) | pub fn decompress(bytes: EncodedPoint) -> CtOption<Self> {
method from_uniform_bytes (line 215) | pub fn from_uniform_bytes(bytes: &[u8; 128]) -> Self {
method from (line 246) | fn from(p: ProjectivePoint) -> Self {
type Output (line 260) | type Output = AffinePoint;
method add (line 262) | fn add(self, rhs: &AffinePoint) -> Self::Output {
method add_assign (line 268) | fn add_assign(&mut self, rhs: &AffinePoint) {
type Output (line 274) | type Output = AffinePoint;
method sub (line 276) | fn sub(self, rhs: &AffinePoint) -> Self::Output {
method sub_assign (line 282) | fn sub_assign(&mut self, rhs: &AffinePoint) {
method sum (line 294) | fn sum<I: Iterator<Item = &'a AffinePoint>>(iter: I) -> Self {
type Output (line 29) | type Output = AffinePoint;
function mul (line 31) | fn mul(self, rhs: Scalar) -> Self::Output {
type Output (line 252) | type Output = AffinePoint;
method neg (line 254) | fn neg(self) -> Self::Output {
method sum (line 288) | fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
type Scalar (line 300) | type Scalar = Scalar;
method random (line 302) | fn random(rng: impl rand_core::RngCore) -> Self {
method generator (line 306) | fn generator() -> Self {
method identity (line 310) | fn identity() -> Self {
method is_identity (line 314) | fn is_identity(&self) -> Choice {
method double (line 318) | fn double(&self) -> Self {
function test_from_uniform_bytes (line 328) | fn test_from_uniform_bytes() {
FILE: packages/secq256k1/src/field/field_secp.rs
function adc (line 26) | pub const fn adc(a: u64, b: u64, carry: u64) -> (u64, u64) {
function sbb (line 33) | pub const fn sbb(a: u64, b: u64, borrow: u64) -> (u64, u64) {
function mac (line 40) | pub const fn mac(a: u64, b: u64, c: u64, carry: u64) -> (u64, u64) {
type FieldElement (line 205) | pub struct FieldElement(pub(crate) [u64; 5]);
method deserialize (line 324) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
method fmt (line 333) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
method from (line 344) | fn from(val: u64) -> FieldElement {
method product (line 581) | fn product<I>(iter: I) -> Self
method sum (line 593) | fn sum<I>(iter: I) -> Self
constant ZERO (line 608) | pub const ZERO: Self = Self([0, 0, 0, 0, 0]);
constant ONE (line 609) | pub const ONE: Self = R;
method pow2k (line 611) | fn pow2k(&self, k: usize) -> Self {
method zero (line 621) | pub const fn zero() -> FieldElement {
method one (line 627) | pub const fn one() -> FieldElement {
method random (line 631) | pub fn random<Rng: RngCore + CryptoRng>(rng: &mut Rng) -> Self {
method double (line 641) | pub const fn double(&self) -> FieldElement {
method from_bytes_wide (line 648) | pub fn from_bytes_wide(bytes: &[u8; 64]) -> FieldElement {
method from_u512 (line 661) | fn from_u512(limbs: [u64; 8]) -> FieldElement {
method from_raw (line 683) | pub const fn from_raw(val: [u64; 4]) -> Self {
method square (line 689) | pub const fn square(&self) -> FieldElement {
method pow (line 721) | pub fn pow(&self, by: &[u64; 4]) -> Self {
method pow_by_self (line 734) | pub fn pow_by_self(&self, exp: &Self) -> Self {
method pow_vartime (line 752) | pub fn pow_vartime(&self, by: &[u64; 4]) -> Self {
method invert (line 766) | pub fn invert(&self) -> CtOption<Self> {
method batch_invert (line 787) | pub fn batch_invert(inputs: &mut [FieldElement]) -> FieldElement {
method montgomery_reduce (line 837) | const fn montgomery_reduce(
method mul (line 890) | pub const fn mul(&self, rhs: &Self) -> Self {
method sub (line 928) | pub const fn sub(&self, rhs: &Self) -> Self {
method add (line 948) | pub const fn add(&self, rhs: &Self) -> Self {
method neg (line 962) | pub const fn neg(&self) -> Self {
method from_sec1 (line 1056) | pub fn from_sec1(bytes: FieldBytes) -> CtOption<Self> {
method to_sec1 (line 1064) | pub fn to_sec1(self) -> FieldBytes {
constant C1 (line 216) | const C1: u64 = 1;
constant C3 (line 219) | const C3: Self = FieldElement([
constant C4 (line 227) | const C4: Self = Self::ONE;
constant C5 (line 228) | const C5: Self = Self::ONE;
constant C6 (line 231) | const C6: Self = FieldElement([
constant C7 (line 240) | const C7: Self = FieldElement([
method sqrt_ratio (line 248) | fn sqrt_ratio(u: &Self, v: &Self) -> (Choice, Self) {
method serialize (line 285) | fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Er...
type U64ArrayVisitor (line 296) | struct U64ArrayVisitor;
type Value (line 299) | type Value = FieldElement;
method expecting (line 301) | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::...
method visit_seq (line 305) | fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
method random (line 350) | fn random(mut rng: impl RngCore) -> Self {
method zero (line 361) | fn zero() -> Self {
method one (line 365) | fn one() -> Self {
method is_zero (line 369) | fn is_zero(&self) -> Choice {
method square (line 373) | fn square(&self) -> Self {
method double (line 377) | fn double(&self) -> Self {
method sqrt (line 381) | fn sqrt(&self) -> CtOption<Self> {
method is_zero_vartime (line 401) | fn is_zero_vartime(&self) -> bool {
method cube (line 405) | fn cube(&self) -> Self {
method invert (line 409) | fn invert(&self) -> CtOption<Self> {
type Repr (line 415) | type Repr = FieldBytes;
constant NUM_BITS (line 417) | const NUM_BITS: u32 = 256;
constant CAPACITY (line 418) | const CAPACITY: u32 = 255;
constant S (line 419) | const S: u32 = 1;
method from_repr (line 421) | fn from_repr(bytes: FieldBytes) -> CtOption<Self> {
method to_repr (line 425) | fn to_repr(&self) -> FieldBytes {
method is_odd (line 429) | fn is_odd(&self) -> Choice {
method multiplicative_generator (line 437) | fn multiplicative_generator() -> Self {
method root_of_unity (line 441) | fn root_of_unity() -> Self {
method ct_eq (line 452) | fn ct_eq(&self, other: &Self) -> Choice {
method eq (line 462) | fn eq(&self, other: &Self) -> bool {
method conditional_select (line 468) | fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
constant MODULUS (line 481) | const MODULUS: FieldElement = FieldElement([
type Output (line 490) | type Output = FieldElement;
method neg (line 493) | fn neg(self) -> FieldElement {
type Output (line 499) | type Output = FieldElement;
method neg (line 502) | fn neg(self) -> FieldElement {
type Output (line 508) | type Output = FieldElement;
function sub (line 511) | fn sub(self, rhs: &'b FieldElement) -> FieldElement {
type Output (line 517) | type Output = FieldElement;
function add (line 520) | fn add(self, rhs: &'b FieldElement) -> FieldElement {
type Output (line 526) | type Output = FieldElement;
function mul (line 529) | fn mul(self, rhs: &'b FieldElement) -> FieldElement {
constant INV (line 538) | const INV: u64 = 0xd838091dd2253531;
constant R (line 542) | const R: FieldElement = FieldElement([
constant R2 (line 552) | const R2: FieldElement = FieldElement([
constant R3 (line 562) | const R3: FieldElement = FieldElement([
method default (line 572) | fn default() -> Self {
method zeroize (line 602) | fn zeroize(&mut self) {
method to_bytes (line 984) | fn to_bytes(&self) -> [u8; 32] {
method to_be_bytes (line 1002) | fn to_be_bytes(&self) -> [u8; 32] {
method from_bytes (line 1018) | fn from_bytes(bytes: &[u8; 32]) -> CtOption<Self> {
function from (line 1046) | fn from(value: &'a FieldElement) -> [u8; 32] {
function test_inv (line 1074) | fn test_inv() {
function test_debug (line 1090) | fn test_debug() {
function test_equality (line 1106) | fn test_equality() {
function test_to_bytes (line 1116) | fn test_to_bytes() {
function test_from_bytes (line 1135) | fn test_from_bytes() {
function test_from_u512_zero (line 1156) | fn test_from_u512_zero() {
function test_from_u512_r (line 1173) | fn test_from_u512_r() {
function test_from_u512_r2 (line 1178) | fn test_from_u512_r2() {
function test_from_u512_max (line 1183) | fn test_from_u512_max() {
function test_from_bytes_wide_r2 (line 1194) | fn test_from_bytes_wide_r2() {
function test_from_bytes_wide_negative_one (line 1206) | fn test_from_bytes_wide_negative_one() {
function test_zero (line 1220) | fn test_zero() {
constant LARGEST (line 1236) | const LARGEST: FieldElement = FieldElement([
function test_addition (line 1245) | fn test_addition() {
function test_negation (line 1266) | fn test_negation() {
function test_subtraction (line 1278) | fn test_subtraction() {
function test_multiplication (line 1294) | fn test_multiplication() {
function test_squaring (line 1323) | fn test_squaring() {
function test_inversion (line 1363) | fn test_inversion() {
function test_invert_is_pow (line 1388) | fn test_invert_is_pow() {
function test_from_raw (line 1415) | fn test_from_raw() {
function test_double (line 1435) | fn test_double() {
FILE: packages/secq256k1/src/field/field_secq.rs
function adc (line 25) | pub const fn adc(a: u64, b: u64, carry: u64) -> (u64, u64) {
function sbb (line 32) | pub const fn sbb(a: u64, b: u64, borrow: u64) -> (u64, u64) {
function mac (line 39) | pub const fn mac(a: u64, b: u64, c: u64, carry: u64) -> (u64, u64) {
type FieldElement (line 204) | pub struct FieldElement(pub(crate) [u64; 5]);
method deserialize (line 388) | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
method fmt (line 397) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
method from (line 408) | fn from(val: u64) -> FieldElement {
method product (line 632) | fn product<I>(iter: I) -> Self
method sum (line 644) | fn sum<I>(iter: I) -> Self
constant ZERO (line 659) | pub const ZERO: Self = Self([0, 0, 0, 0, 0]);
constant ONE (line 660) | pub const ONE: Self = R;
method pow2k (line 662) | fn pow2k(&self, k: usize) -> Self {
method zero (line 672) | pub const fn zero() -> FieldElement {
method one (line 678) | pub const fn one() -> FieldElement {
method random (line 682) | pub fn random<Rng: RngCore + CryptoRng>(rng: &mut Rng) -> Self {
method double (line 692) | pub const fn double(&self) -> FieldElement {
method from_bytes_wide (line 699) | pub fn from_bytes_wide(bytes: &[u8; 64]) -> FieldElement {
method from_u512 (line 712) | fn from_u512(limbs: [u64; 8]) -> FieldElement {
method from_raw (line 734) | pub const fn from_raw(val: [u64; 4]) -> Self {
method square (line 740) | pub const fn square(&self) -> FieldElement {
method pow_by_self (line 770) | pub fn pow_by_self(&self, exp: &Self) -> Self {
method pow (line 784) | pub fn pow(&self, by: &[u64; 4]) -> Self {
method invert (line 797) | pub fn invert(&self) -> CtOption<Self> {
method batch_invert (line 847) | pub fn batch_invert(inputs: &mut [FieldElement]) -> FieldElement {
method montgomery_reduce (line 897) | const fn montgomery_reduce(
method mul (line 950) | pub const fn mul(&self, rhs: &Self) -> Self {
method sub (line 988) | pub const fn sub(&self, rhs: &Self) -> Self {
method add (line 1008) | pub const fn add(&self, rhs: &Self) -> Self {
method neg (line 1022) | pub const fn neg(&self) -> Self {
method from_sec1 (line 1052) | pub fn from_sec1(bytes: FieldBytes) -> CtOption<Self> {
method to_sec1 (line 1060) | pub fn to_sec1(self) -> FieldBytes {
constant C1 (line 213) | const C1: u64 = 6;
constant C3 (line 216) | const C3: Self = FieldElement([
constant C4 (line 224) | const C4: Self = FieldElement([14644223128245760257, 1078510108991852875...
constant C5 (line 225) | const C5: Self = FieldElement([411004481505318880, 12260033118033672328,...
constant C6 (line 228) | const C6: Self = FieldElement([
constant C7 (line 237) | const C7: Self = FieldElement([
method sqrt_ratio (line 246) | fn sqrt_ratio(u: &Self, v: &Self) -> (Choice, Self) {
method from_bytes (line 285) | fn from_bytes(bytes: &[u8; 32]) -> CtOption<FieldElement> {
method to_bytes (line 313) | fn to_bytes(&self) -> [u8; 32] {
method to_be_bytes (line 331) | fn to_be_bytes(&self) -> [u8; 32] {
method serialize (line 349) | fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Er...
type U64ArrayVisitor (line 360) | struct U64ArrayVisitor;
type Value (line 363) | type Value = FieldElement;
method expecting (line 365) | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::...
method visit_seq (line 369) | fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
method random (line 414) | fn random(mut rng: impl RngCore) -> Self {
method zero (line 425) | fn zero() -> Self {
method one (line 429) | fn one() -> Self {
method is_zero (line 433) | fn is_zero(&self) -> Choice {
method square (line 437) | fn square(&self) -> Self {
method double (line 441) | fn double(&self) -> Self {
method sqrt (line 445) | fn sqrt(&self) -> CtOption<Self> {
method is_zero_vartime (line 452) | fn is_zero_vartime(&self) -> bool {
method cube (line 456) | fn cube(&self) -> Self {
method invert (line 460) | fn invert(&self) -> CtOption<Self> {
type Repr (line 466) | type Repr = FieldBytes;
constant NUM_BITS (line 468) | const NUM_BITS: u32 = 256;
constant CAPACITY (line 469) | const CAPACITY: u32 = 255;
constant S (line 470) | const S: u32 = 1;
method from_repr (line 472) | fn from_repr(bytes: FieldBytes) -> CtOption<Self> {
method to_repr (line 476) | fn to_repr(&self) -> FieldBytes {
method is_odd (line 480) | fn is_odd(&self) -> Choice {
method multiplicative_generator (line 488) | fn multiplicative_generator() -> Self {
method root_of_unity (line 492) | fn root_of_unity() -> Self {
method ct_eq (line 503) | fn ct_eq(&self, other: &Self) -> Choice {
method eq (line 513) | fn eq(&self, other: &Self) -> bool {
method conditional_select (line 519) | fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
constant MODULUS (line 532) | const MODULUS: FieldElement = FieldElement([
type Output (line 541) | type Output = FieldElement;
method neg (line 544) | fn neg(self) -> FieldElement {
type Output (line 550) | type Output = FieldElement;
method neg (line 553) | fn neg(self) -> FieldElement {
type Output (line 559) | type Output = FieldElement;
function sub (line 562) | fn sub(self, rhs: &'b FieldElement) -> FieldElement {
type Output (line 568) | type Output = FieldElement;
function add (line 571) | fn add(self, rhs: &'b FieldElement) -> FieldElement {
type Output (line 577) | type Output = FieldElement;
function mul (line 580) | fn mul(self, rhs: &'b FieldElement) -> FieldElement {
constant INV (line 589) | const INV: u64 = 0x4b0dff665588b13f;
constant R (line 593) | const R: FieldElement = FieldElement([
constant R2 (line 603) | const R2: FieldElement = FieldElement([
constant R3 (line 613) | const R3: FieldElement = FieldElement([
method default (line 623) | fn default() -> Self {
method zeroize (line 653) | fn zeroize(&mut self) {
function from (line 1042) | fn from(value: &'a FieldElement) -> [u8; 32] {
function test_inv (line 1073) | fn test_inv() {
function test_debug (line 1089) | fn test_debug() {
function test_equality (line 1105) | fn test_equality() {
function test_to_bytes (line 1115) | fn test_to_bytes() {
function test_from_bytes (line 1134) | fn test_from_bytes() {
function test_from_u512_zero (line 1155) | fn test_from_u512_zero() {
function test_from_u512_r (line 1172) | fn test_from_u512_r() {
function test_from_u512_r2 (line 1177) | fn test_from_u512_r2() {
function test_from_u512_max (line 1182) | fn test_from_u512_max() {
function test_from_bytes_wide_r2 (line 1193) | fn test_from_bytes_wide_r2() {
function test_from_bytes_wide_negative_one (line 1205) | fn test_from_bytes_wide_negative_one() {
function test_zero (line 1217) | fn test_zero() {
constant LARGEST (line 1233) | const LARGEST: FieldElement = FieldElement([
function test_addition (line 1242) | fn test_addition() {
function test_negation (line 1263) | fn test_negation() {
function test_subtraction (line 1275) | fn test_subtraction() {
function test_multiplication (line 1291) | fn test_multiplication() {
function test_squaring (line 1320) | fn test_squaring() {
function test_inversion (line 1349) | fn test_inversion() {
function test_invert_is_pow (line 1375) | fn test_invert_is_pow() {
function test_from_raw (line 1402) | fn test_from_raw() {
function test_double (line 1417) | fn test_double() {
FILE: packages/secq256k1/src/field/mod.rs
type BaseField (line 6) | pub trait BaseField: PrimeField {
method to_bytes (line 7) | fn to_bytes(&self) -> [u8; 32];
method to_be_bytes (line 10) | fn to_be_bytes(&self) -> [u8; 32];
method from_bytes (line 11) | fn from_bytes(bytes: &[u8; 32]) -> CtOption<Self>;
type SqrtRatio (line 14) | pub trait SqrtRatio: BaseField {
constant C1 (line 15) | const C1: u64;
constant C3 (line 16) | const C3: Self;
constant C4 (line 17) | const C4: Self;
constant C5 (line 18) | const C5: Self;
constant C6 (line 19) | const C6: Self;
constant C7 (line 20) | const C7: Self;
method sqrt_ratio (line 22) | fn sqrt_ratio(u: &Self, v: &Self) -> (Choice, Self);
FILE: packages/secq256k1/src/hashtocurve.rs
function hash_to_curve (line 5) | pub fn hash_to_curve<F: BaseField + SqrtRatio>(
function iso_map (line 24) | fn iso_map<F: BaseField + SqrtRatio>(x: F, y: F, k: [F; 13]) -> (F, F) {
function map_to_curve_simple_swu (line 42) | fn map_to_curve_simple_swu<F: BaseField + SqrtRatio>(u: F, curve_a: F, c...
type F (line 85) | type F = FieldElement;
constant ISO_A (line 90) | const ISO_A: F = FieldElement([
constant ISO_B (line 99) | const ISO_B: F = FieldElement([7606388811483, 0, 0, 0, 0]);
constant ISO_Z (line 102) | const ISO_Z: F = FieldElement([
constant ISO_CONSTANTS (line 110) | const ISO_CONSTANTS: [F; 13] = [
type TestSuite (line 191) | struct TestSuite {
method new (line 199) | fn new(u1: [u8; 32], u2: [u8; 32], px: [u8; 32], py: [u8; 32]) -> Self {
function assert_hash_to_curve (line 204) | fn assert_hash_to_curve(u1: FieldElement, u2: FieldElement, expected: Af...
function test_secp_hash_to_curve (line 224) | fn test_secp_hash_to_curve() {
FILE: packages/secq256k1/src/lib.rs
type EncodedPoint (line 16) | pub type EncodedPoint = primeorder::elliptic_curve::sec1::EncodedPoint<S...
type FieldBytes (line 17) | pub type FieldBytes = primeorder::elliptic_curve::FieldBytes<Secq256K1>;
type ProjectivePoint (line 18) | pub type ProjectivePoint = primeorder::ProjectivePoint<Secq256K1>;
constant ORDER (line 20) | pub const ORDER: U256 =
type Secq256K1 (line 24) | pub struct Secq256K1;
type UInt (line 27) | type UInt = U256;
constant ORDER (line 29) | const ORDER: U256 =
type FieldElement (line 34) | type FieldElement = FieldElement;
constant ZERO (line 36) | const ZERO: FieldElement = FieldElement::ZERO;
constant ONE (line 37) | const ONE: FieldElement = FieldElement::ONE;
constant EQUATION_A (line 39) | const EQUATION_A: FieldElement = FieldElement::ZERO;
constant EQUATION_B (line 41) | const EQUATION_B: FieldElement =
constant GENERATOR (line 44) | const GENERATOR: (FieldElement, FieldElement) = (
type AffinePoint (line 67) | type AffinePoint = AffinePointCore;
type ProjectivePoint (line 71) | type ProjectivePoint = ProjectivePoint;
type Scalar (line 75) | type Scalar = Scalar;
FILE: packages/secq256k1/src/scalar.rs
type ScalarCore (line 23) | type ScalarCore = primeorder::elliptic_curve::ScalarCore<Secq256K1>;
type Scalar (line 26) | pub struct Scalar(pub ScalarCore);
type Output (line 113) | type Output = Scalar;
method add (line 115) | fn add(self, other: Scalar) -> Scalar {
type Output (line 121) | type Output = Scalar;
method add (line 123) | fn add(self, other: &Scalar) -> Scalar {
method add_assign (line 129) | fn add_assign(&mut self, other: Scalar) {
method add_assign (line 135) | fn add_assign(&mut self, other: &Scalar) {
type Output (line 141) | type Output = Scalar;
method sub (line 143) | fn sub(self, other: Scalar) -> Scalar {
type Output (line 149) | type Output = Scalar;
method sub (line 151) | fn sub(self, other: &Scalar) -> Scalar {
method sub_assign (line 157) | fn sub_assign(&mut self, other: Scalar) {
method sub_assign (line 163) | fn sub_assign(&mut self, other: &Scalar) {
type Output (line 169) | type Output = Scalar;
method mul (line 171) | fn mul(self, other: Scalar) -> Scalar {
type Output (line 180) | type Output = Scalar;
method mul (line 182) | fn mul(self, other: &Scalar) -> Scalar {
method mul_assign (line 188) | fn mul_assign(&mut self, rhs: Scalar) {
method mul_assign (line 194) | fn mul_assign(&mut self, rhs: &Scalar) {
method sum (line 214) | fn sum<I: Iterator<Item = &'a Scalar>>(_iter: I) -> Self {
method product (line 226) | fn product<I: Iterator<Item = &'a Scalar>>(_iter: I) -> Self {
method from_uint_reduced (line 232) | fn from_uint_reduced(w: U256) -> Self {
type Error (line 241) | type Error = Error;
method try_from (line 243) | fn try_from(w: U256) -> Result<Self> {
method from (line 255) | fn from(n: u64) -> Scalar {
method from (line 261) | fn from(scalar: ScalarCore) -> Scalar {
constant ZERO (line 291) | pub const ZERO: Scalar = Scalar(ScalarCore::ZERO);
constant ONE (line 292) | pub const ONE: Scalar = Scalar(ScalarCore::ONE);
method to_bytes (line 294) | pub fn to_bytes(&self) -> [u8; 32] {
method from (line 300) | fn from(n: u32) -> Scalar {
method one (line 29) | fn one() -> Self {
method zero (line 33) | fn zero() -> Self {
method random (line 37) | fn random(mut rng: impl RngCore) -> Self {
method is_zero (line 48) | fn is_zero(&self) -> Choice {
method square (line 53) | fn square(&self) -> Self {
method double (line 58) | fn double(&self) -> Self {
method invert (line 62) | fn invert(&self) -> CtOption<Self> {
method sqrt (line 66) | fn sqrt(&self) -> CtOption<Self> {
type Repr (line 72) | type Repr = FieldBytes;
constant NUM_BITS (line 74) | const NUM_BITS: u32 = 256;
constant CAPACITY (line 75) | const CAPACITY: u32 = 255;
constant S (line 76) | const S: u32 = 4;
method from_repr (line 78) | fn from_repr(bytes: FieldBytes) -> CtOption<Self> {
method to_repr (line 82) | fn to_repr(&self) -> FieldBytes {
method is_odd (line 86) | fn is_odd(&self) -> Choice {
method multiplicative_generator (line 90) | fn multiplicative_generator() -> Self {
method root_of_unity (line 94) | fn root_of_unity() -> Self {
method ct_eq (line 107) | fn ct_eq(&self, other: &Self) -> Choice {
type Output (line 200) | type Output = Scalar;
method neg (line 202) | fn neg(self) -> Scalar {
method sum (line 208) | fn sum<I: Iterator<Item = Self>>(_iter: I) -> Self {
method product (line 220) | fn product<I: Iterator<Item = Self>>(_iter: I) -> Self {
method from (line 249) | fn from(scalar: Scalar) -> U256 {
method from (line 267) | fn from(scalar: Scalar) -> Self {
method from (line 273) | fn from(scalar: &Scalar) -> Self {
method is_high (line 279) | fn is_high(&self) -> Choice {
method conditional_select (line 285) | fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
type ReprBits (line 306) | type ReprBits = [u8; 32];
method to_le_bits (line 308) | fn to_le_bits(&self) -> ff::FieldBits<Self::ReprBits> {
method char_le_bits (line 312) | fn char_le_bits() -> ff::FieldBits<Self::ReprBits> {
function add (line 322) | fn add() {
function test_all (line 330) | fn test_all() {
FILE: packages/spartan_wasm/src/wasm.rs
type G1 (line 11) | pub type G1 = secq256k1::AffinePoint;
type F1 (line 12) | pub type F1 = <G1 as Group>::Scalar;
function init_panic_hook (line 15) | pub fn init_panic_hook() {
function prove (line 20) | pub fn prove(circuit: &[u8], vars: &[u8], public_inputs: &[u8]) -> Resul...
function verify (line 58) | pub fn verify(circuit: &[u8], proof: &[u8], public_input: &[u8]) -> Resu...
function poseidon (line 86) | pub fn poseidon(input_bytes: &[u8]) -> Result<Vec<u8>, JsValue> {
function read_field (line 100) | pub fn read_field<R: Read, Fr: PrimeField>(mut reader: R) -> Result<Fr, ...
function load_witness_from_bin_reader (line 110) | pub fn load_witness_from_bin_reader<Fr: PrimeField, R: Read>(
function check_nizk (line 169) | fn check_nizk() {
function test_poseidon (line 197) | fn test_poseidon() {
FILE: packages/spartan_wasm/test_circuit/test_circuit_js/witness_calculator.js
function getMessage (line 81) | function getMessage() {
function printSharedRWMemory (line 91) | function printSharedRWMemory () {
class WitnessCalculator (line 108) | class WitnessCalculator {
method constructor (line 109) | constructor(instance, sanityCheck) {
method circom_version (line 127) | circom_version() {
method _doCalculateWitness (line 131) | async _doCalculateWitness(input, sanityCheck) {
method calculateWitness (line 171) | async calculateWitness(input, sanityCheck) {
method calculateBinWitness (line 190) | async calculateBinWitness(input, sanityCheck) {
method calculateWTNSBin (line 208) | async calculateWTNSBin(input, sanityCheck) {
function toArray32 (line 277) | function toArray32(rem,size) {
function fromArray32 (line 294) | function fromArray32(arr) { //returns a BigInt
function flatArray (line 303) | function flatArray(a) {
function normalize (line 319) | function normalize(n, prime) {
function fnvHash (line 325) | function fnvHash(str) {
Condensed preview — 165 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (733K chars).
[
{
"path": ".cargo/config",
"chars": 173,
"preview": "[target.wasm32-unknown-unknown]\nrustflags = [\"-C\", \"link-arg=--max-memory=4294967296\"]\n\n[unstable]\nbuild-std = [\"panic_a"
},
{
"path": ".eslintignore",
"chars": 13,
"preview": "wasm_bytes.ts"
},
{
"path": ".eslintrc.js",
"chars": 698,
"preview": "/* eslint-disable no-undef */\nmodule.exports = {\n root: true,\n extends: [\n \"eslint:recommended\",\n \"plugin:react/"
},
{
"path": ".github/workflows/publish.yaml",
"chars": 1183,
"preview": "name: Publish Package to npmjs\non:\n release:\n types: [published]\n workflow_dispatch:\n\njobs:\n publish:\n runs-on:"
},
{
"path": ".gitignore",
"chars": 481,
"preview": "# Generated by Cargo\n# will have compiled files and executables\n/target/\n\n\n# These are backup files generated by rustfmt"
},
{
"path": ".prettierignore",
"chars": 13,
"preview": "wasm_bytes.ts"
},
{
"path": ".prettierrc.json",
"chars": 147,
"preview": "{\n \"trailingComma\": \"none\",\n \"tabWidth\": 2,\n \"semi\": true,\n \"singleQuote\": false,\n \"arrowParens\": \"avoid\""
},
{
"path": ".vscode/settings.json",
"chars": 97,
"preview": "{\n \"editor.formatOnSave\": true,\n \"cSpell.words\": [\n \"merkle\",\n \"NIZK\"\n ]\n}"
},
{
"path": "Cargo.toml",
"chars": 165,
"preview": "[workspace]\nmembers = [\n \"packages/spartan_wasm\",\n \"packages/secq256k1\",\n \"packages/poseidon\",\n \"packages/Sp"
},
{
"path": "README.md",
"chars": 3412,
"preview": "# Spartan-ecdsa\n\nSpartan-ecdsa (which to our knowledge) is the fastest open-source method to verify ECDSA (secp256k1) si"
},
{
"path": "lerna.json",
"chars": 111,
"preview": "{\n \"$schema\": \"node_modules/lerna/schemas/lerna-schema.json\",\n \"useWorkspaces\": true,\n \"version\": \"0.0.0\"\n}\n"
},
{
"path": "package.json",
"chars": 735,
"preview": "{\n \"private\": true,\n \"name\": \"spartan-ecdsa-monorepo\",\n \"version\": \"1.0.0\",\n \"main\": \"index.js\",\n \"repository\": \"ht"
},
{
"path": "packages/Spartan-secq/CODE_OF_CONDUCT.md",
"chars": 444,
"preview": "# Microsoft Open Source Code of Conduct\n\nThis project has adopted the [Microsoft Open Source Code of Conduct](https://op"
},
{
"path": "packages/Spartan-secq/CONTRIBUTING.md",
"chars": 904,
"preview": "This project welcomes contributions and suggestions. Most contributions require you to\nagree to a Contributor License Ag"
},
{
"path": "packages/Spartan-secq/Cargo.toml",
"chars": 1264,
"preview": "[package]\nname = \"spartan\"\nversion = \"0.7.1\"\nauthors = [\"Srinath Setty <srinath@microsoft.com>\"]\nedition = \"2021\"\ndescri"
},
{
"path": "packages/Spartan-secq/LICENSE",
"chars": 1141,
"preview": " MIT License\n\n Copyright (c) Microsoft Corporation.\n\n Permission is hereby granted, free of charge, to any pers"
},
{
"path": "packages/Spartan-secq/README.md",
"chars": 420,
"preview": "## Fork of [Spartan](https://github.com/microsoft/Spartan)\n_This fork is still under development._\n\nModify Spartan to op"
},
{
"path": "packages/Spartan-secq/SECURITY.md",
"chars": 2824,
"preview": "<!-- BEGIN MICROSOFT SECURITY.MD V0.0.3 BLOCK -->\n\n## Security\n\nMicrosoft takes the security of our software products an"
},
{
"path": "packages/Spartan-secq/benches/nizk.rs",
"chars": 2639,
"preview": "#![allow(clippy::assertions_on_result_states)]\nextern crate byteorder;\nextern crate core;\nextern crate criterion;\nextern"
},
{
"path": "packages/Spartan-secq/benches/snark.rs",
"chars": 3850,
"preview": "#![allow(clippy::assertions_on_result_states)]\nextern crate libspartan;\nextern crate merlin;\n\nuse libspartan::{Instance,"
},
{
"path": "packages/Spartan-secq/examples/cubic.rs",
"chars": 4351,
"preview": "//! Demonstrates how to produces a proof for canonical cubic equation: `x^3 + x + 5 = y`.\n//! The example is described i"
},
{
"path": "packages/Spartan-secq/profiler/nizk.rs",
"chars": 1660,
"preview": "#![allow(non_snake_case)]\n#![allow(clippy::assertions_on_result_states)]\n\nextern crate flate2;\nextern crate libspartan;\n"
},
{
"path": "packages/Spartan-secq/profiler/snark.rs",
"chars": 1822,
"preview": "#![allow(non_snake_case)]\n#![allow(clippy::assertions_on_result_states)]\n\nextern crate flate2;\nextern crate libspartan;\n"
},
{
"path": "packages/Spartan-secq/rustfmt.toml",
"chars": 80,
"preview": "edition = \"2018\"\ntab_spaces = 2\nnewline_style = \"Unix\"\nuse_try_shorthand = true\n"
},
{
"path": "packages/Spartan-secq/src/bin/mont_params.rs",
"chars": 1488,
"preview": "use hex_literal::hex;\nuse num_bigint_dig::{BigInt, BigUint, ModInverse, ToBigInt};\nuse num_traits::{FromPrimitive, ToPri"
},
{
"path": "packages/Spartan-secq/src/commitments.rs",
"chars": 2398,
"preview": "use super::group::{GroupElement, VartimeMultiscalarMul};\nuse super::scalar::Scalar;\nuse digest::{ExtendableOutput, Input"
},
{
"path": "packages/Spartan-secq/src/dense_mlpoly.rs",
"chars": 16359,
"preview": "#![allow(clippy::too_many_arguments)]\nuse super::commitments::{Commitments, MultiCommitGens};\nuse super::errors::ProofVe"
},
{
"path": "packages/Spartan-secq/src/errors.rs",
"chars": 990,
"preview": "use core::fmt::Debug;\nuse thiserror::Error;\n\n#[derive(Error, Debug)]\npub enum ProofVerifyError {\n #[error(\"Proof verifi"
},
{
"path": "packages/Spartan-secq/src/group.rs",
"chars": 3793,
"preview": "use secq256k1::{AffinePoint, ProjectivePoint};\n\nuse super::errors::ProofVerifyError;\nuse super::scalar::{Scalar, ScalarB"
},
{
"path": "packages/Spartan-secq/src/lib.rs",
"chars": 21846,
"preview": "#![allow(non_snake_case)]\n#![doc = include_str!(\"../README.md\")]\n#![deny(missing_docs)]\n#![allow(clippy::assertions_on_r"
},
{
"path": "packages/Spartan-secq/src/math.rs",
"chars": 835,
"preview": "pub trait Math {\n fn square_root(self) -> usize;\n fn pow2(self) -> usize;\n fn get_bits(self, num_bits: usize) -> Vec<"
},
{
"path": "packages/Spartan-secq/src/nizk/bullet.rs",
"chars": 8067,
"preview": "//! This module is an adaptation of code from the bulletproofs crate.\n//! See NOTICE.md for more details\n#![allow(non_sn"
},
{
"path": "packages/Spartan-secq/src/nizk/mod.rs",
"chars": 19342,
"preview": "#![allow(clippy::too_many_arguments)]\nuse super::commitments::{Commitments, MultiCommitGens};\nuse super::errors::ProofVe"
},
{
"path": "packages/Spartan-secq/src/product_tree.rs",
"chars": 15743,
"preview": "#![allow(dead_code)]\nuse super::dense_mlpoly::DensePolynomial;\nuse super::dense_mlpoly::EqPolynomial;\nuse super::math::M"
},
{
"path": "packages/Spartan-secq/src/r1csinstance.rs",
"chars": 10565,
"preview": "use crate::transcript::AppendToTranscript;\n\nuse super::dense_mlpoly::DensePolynomial;\nuse super::errors::ProofVerifyErro"
},
{
"path": "packages/Spartan-secq/src/r1csproof.rs",
"chars": 19087,
"preview": "#![allow(clippy::too_many_arguments)]\nuse super::commitments::{Commitments, MultiCommitGens};\nuse super::dense_mlpoly::{"
},
{
"path": "packages/Spartan-secq/src/random.rs",
"chars": 680,
"preview": "use super::scalar::Scalar;\nuse super::transcript::ProofTranscript;\nuse merlin::Transcript;\nuse rand_core::OsRng;\npub str"
},
{
"path": "packages/Spartan-secq/src/scalar/mod.rs",
"chars": 1003,
"preview": "use secq256k1::elliptic_curve::ops::Reduce;\nuse secq256k1::U256;\n\nmod scalar;\n\npub type Scalar = scalar::Scalar;\npub typ"
},
{
"path": "packages/Spartan-secq/src/scalar/scalar.rs",
"chars": 33750,
"preview": "//! This module provides an implementation of the secq256k1's scalar field $\\mathbb{F}_q$\n//! where `q = 0xfffffffffffff"
},
{
"path": "packages/Spartan-secq/src/sparse_mlpoly.rs",
"chars": 53321,
"preview": "#![allow(clippy::type_complexity)]\n#![allow(clippy::too_many_arguments)]\n#![allow(clippy::needless_range_loop)]\nuse supe"
},
{
"path": "packages/Spartan-secq/src/sumcheck.rs",
"chars": 25873,
"preview": "#![allow(clippy::too_many_arguments)]\n#![allow(clippy::type_complexity)]\nuse super::commitments::{Commitments, MultiComm"
},
{
"path": "packages/Spartan-secq/src/timer.rs",
"chars": 1832,
"preview": "#[cfg(feature = \"profile\")]\nuse colored::Colorize;\n#[cfg(feature = \"profile\")]\nuse core::sync::atomic::AtomicUsize;\n#[cf"
},
{
"path": "packages/Spartan-secq/src/transcript.rs",
"chars": 2073,
"preview": "use super::group::CompressedGroup;\nuse super::scalar::Scalar;\nuse merlin::Transcript;\n\npub trait ProofTranscript {\n fn "
},
{
"path": "packages/Spartan-secq/src/unipoly.rs",
"chars": 5542,
"preview": "use super::commitments::{Commitments, MultiCommitGens};\nuse super::group::GroupElement;\nuse super::scalar::{Scalar, Scal"
},
{
"path": "packages/benchmark/node/LICENSE",
"chars": 1075,
"preview": "MIT License\n\nCopyright (c) 2022 Ethereum Foundation\n\nPermission is hereby granted, free of charge, to any person obtaini"
},
{
"path": "packages/benchmark/node/README.md",
"chars": 115,
"preview": "## Node.js\n\nRecommended: v18 or later\n\n## Install dependencies\n\n```\nyarn\n```\n\n## Run benchmark\n\n```\nyarn bench\n```\n"
},
{
"path": "packages/benchmark/node/package.json",
"chars": 347,
"preview": "{\n \"name\": \"node\",\n \"version\": \"1.0.0\",\n \"main\": \"node.bench.ts\",\n \"license\": \"MIT\",\n \"scripts\": {\n \"bench\": \"ts"
},
{
"path": "packages/benchmark/node/src/node.bench.ts",
"chars": 244,
"preview": "import benchPubKeyMembership from \"./node.bench_pubkey_membership\";\nimport benchAddressMembership from \"./node.bench_add"
},
{
"path": "packages/benchmark/node/src/node.bench_addr_membership.ts",
"chars": 2155,
"preview": "import {\n hashPersonalMessage,\n privateToAddress,\n ecsign\n} from \"@ethereumjs/util\";\nimport {\n Tree,\n Poseidon,\n M"
},
{
"path": "packages/benchmark/node/src/node.bench_pubkey_membership.ts",
"chars": 2158,
"preview": "import {\n MembershipProver,\n Poseidon,\n Tree,\n MembershipVerifier\n} from \"@personaelabs/spartan-ecdsa\";\nimport {\n h"
},
{
"path": "packages/benchmark/node/tsconfig.json",
"chars": 632,
"preview": "{\n \"include\": [\n \"./src/**/*\",\n ],\n \"exclude\": [\n \"./node_modules\",\n \"./build\"\n ],\n \"compilerOptions\": {"
},
{
"path": "packages/benchmark/web/.vscode/settings.json",
"chars": 143,
"preview": "{\n \"editor.formatOnSave\": true,\n \"editor.defaultFormatter\": \"esbenp.prettier-vscode\",\n \"cSpell.words\": [\"layout"
},
{
"path": "packages/benchmark/web/LICENSE",
"chars": 1075,
"preview": "MIT License\n\nCopyright (c) 2022 Ethereum Foundation\n\nPermission is hereby granted, free of charge, to any person obtaini"
},
{
"path": "packages/benchmark/web/README.md",
"chars": 114,
"preview": "## Node.js\n\nRecommended: v18 or later\n\n### Install dependencies\n\n```\nyarn\n```\n\n### Start server\n\n```\nyarn dev\n```\n"
},
{
"path": "packages/benchmark/web/next.config.js",
"chars": 611,
"preview": "/** @type {import('next').NextConfig} */\nconst nextConfig = {\n reactStrictMode: true,\n swcMinify: true,\n webpack: con"
},
{
"path": "packages/benchmark/web/package.json",
"chars": 721,
"preview": "{\n \"name\": \"spartan-bench\",\n \"version\": \"0.1.0\",\n \"private\": true,\n \"scripts\": {\n \"dev\": \"next dev\",\n \"build\":"
},
{
"path": "packages/benchmark/web/pages/_app.tsx",
"chars": 150,
"preview": "import type { AppProps } from \"next/app\";\n\nexport default function App({ Component, pageProps }: AppProps) {\n return <C"
},
{
"path": "packages/benchmark/web/pages/index.tsx",
"chars": 4653,
"preview": "import { useState } from \"react\";\nimport {\n MembershipProver,\n MembershipVerifier,\n Tree,\n Poseidon,\n defaultAddres"
},
{
"path": "packages/benchmark/web/tsconfig.json",
"chars": 558,
"preview": "{\n \"compilerOptions\": {\n \"target\": \"es5\",\n \"lib\": [\n \"dom\",\n \"dom.iterable\",\n \"esnext\"\n ],\n "
},
{
"path": "packages/circuit_reader/Cargo.toml",
"chars": 424,
"preview": "[package]\nname = \"circuit_reader\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://d"
},
{
"path": "packages/circuit_reader/src/bin/gen_spartan_inst.rs",
"chars": 786,
"preview": "#![allow(non_snake_case)]\nuse bincode;\nuse circuit_reader::load_as_spartan_inst;\nuse std::env::{args, current_dir};\nuse "
},
{
"path": "packages/circuit_reader/src/circom_reader.rs",
"chars": 6782,
"preview": "// Code borrowed from Nova-Scotia https://github.com/nalinbhardwaj/Nova-Scotia\nuse byteorder::{LittleEndian, ReadBytesEx"
},
{
"path": "packages/circuit_reader/src/lib.rs",
"chars": 1490,
"preview": "mod circom_reader;\n\nuse circom_reader::{load_r1cs_from_bin_file, R1CS};\nuse ff::PrimeField;\nuse libspartan::Instance;\nus"
},
{
"path": "packages/circuits/LICENSE",
"chars": 35148,
"preview": " GNU GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n Copyright (C) 2007 Free "
},
{
"path": "packages/circuits/README.md",
"chars": 445,
"preview": "## Node.js\n\nRecommended: v18 or later\n\n## Install dependencies\n\n```\nyarn\n```\n\n## Run tests\n\nInstall [this](https://githu"
},
{
"path": "packages/circuits/eff_ecdsa_membership/addr_membership.circom",
"chars": 1687,
"preview": "pragma circom 2.1.2;\n\ninclude \"./eff_ecdsa.circom\";\ninclude \"./tree.circom\";\ninclude \"./to_address/zk-identity/eth.circo"
},
{
"path": "packages/circuits/eff_ecdsa_membership/eff_ecdsa.circom",
"chars": 914,
"preview": "pragma circom 2.1.2;\n\ninclude \"./secp256k1/mul.circom\";\ninclude \"../../../node_modules/circomlib/circuits/bitify.circom\""
},
{
"path": "packages/circuits/eff_ecdsa_membership/eff_ecdsa_to_addr.circom",
"chars": 1026,
"preview": "pragma circom 2.1.2;\n\ninclude \"./eff_ecdsa.circom\";\ninclude \"./to_address/zk-identity/eth.circom\";\n\n/**\n * EfficientECD"
},
{
"path": "packages/circuits/eff_ecdsa_membership/pubkey_membership.circom",
"chars": 1338,
"preview": "pragma circom 2.1.2;\n\ninclude \"./eff_ecdsa.circom\";\ninclude \"./tree.circom\";\ninclude \"../poseidon/poseidon.circom\";\n\n/**"
},
{
"path": "packages/circuits/eff_ecdsa_membership/secp256k1/add.circom",
"chars": 3864,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../../../node_modules/circomlib/circuits/comparators.circom\";\ninclude \"../../../../nod"
},
{
"path": "packages/circuits/eff_ecdsa_membership/secp256k1/double.circom",
"chars": 557,
"preview": "pragma circom 2.1.2;\n\n/**\n * Secp256k1Double\n * ===============\n *\n * Double a specific point (xP, yP) on the secp256"
},
{
"path": "packages/circuits/eff_ecdsa_membership/secp256k1/mul.circom",
"chars": 6025,
"preview": "pragma circom 2.1.2;\n\ninclude \"./add.circom\";\ninclude \"./double.circom\";\ninclude \"../../../../node_modules/circomlib/cir"
},
{
"path": "packages/circuits/eff_ecdsa_membership/to_address/vocdoni-keccak/keccak.circom",
"chars": 3937,
"preview": "pragma circom 2.0.2;\n\ninclude \"./utils.circom\";\ninclude \"./permutations.circom\";\n\ntemplate Pad(nBits) {\n signal input"
},
{
"path": "packages/circuits/eff_ecdsa_membership/to_address/vocdoni-keccak/permutations.circom",
"chars": 20264,
"preview": "pragma circom 2.0.2;\n\ninclude \"./utils.circom\";\n\n\n// Theta\n\ntemplate D(n, shl, shr) {\n // d = b ^ (a<<shl | a>>shr)\n "
},
{
"path": "packages/circuits/eff_ecdsa_membership/to_address/vocdoni-keccak/utils.circom",
"chars": 2335,
"preview": "pragma circom 2.0.2;\n\ninclude \"../../../../../node_modules/circomlib/circuits/gates.circom\";\ninclude \"../../../../../nod"
},
{
"path": "packages/circuits/eff_ecdsa_membership/to_address/zk-identity/eth.circom",
"chars": 2868,
"preview": "pragma circom 2.0.2;\n\ninclude \"../vocdoni-keccak/keccak.circom\";\n\ninclude \"../../../../../node_modules/circomlib/circuit"
},
{
"path": "packages/circuits/eff_ecdsa_membership/tree.circom",
"chars": 1308,
"preview": "pragma circom 2.1.2;\ninclude \"../poseidon/poseidon.circom\";\ninclude \"../../../node_modules/circomlib/circuits/mux1.circo"
},
{
"path": "packages/circuits/instances/addr_membership.circom",
"chars": 151,
"preview": "pragma circom 2.1.2;\n\ninclude \"../eff_ecdsa_membership/addr_membership.circom\";\n\ncomponent main { public[ root, Tx, Ty, "
},
{
"path": "packages/circuits/instances/pubkey_membership.circom",
"chars": 155,
"preview": "pragma circom 2.1.2;\n\ninclude \"../eff_ecdsa_membership/pubkey_membership.circom\";\n\ncomponent main { public[ root, Tx, Ty"
},
{
"path": "packages/circuits/jest.config.js",
"chars": 146,
"preview": "/** @type {import('ts-jest').JestConfigWithTsJest} */\nmodule.exports = {\n preset: 'ts-jest',\n testEnvironment: 'node',"
},
{
"path": "packages/circuits/package.json",
"chars": 505,
"preview": "{\n \"name\": \"@personaelabs/spartan-ecdsa-circuits\",\n \"version\": \"0.1.0\",\n \"main\": \"index.js\",\n \"license\": \"MIT\",\n \"d"
},
{
"path": "packages/circuits/poseidon/poseidon.circom",
"chars": 3801,
"preview": "pragma circom 2.1.2;\n\ninclude \"./poseidon_constants.circom\";\n\ntemplate SBox() {\n signal input in;\n signal output o"
},
{
"path": "packages/circuits/poseidon/poseidon_constants.circom",
"chars": 16933,
"preview": "pragma circom 2.1.2;\n\nfunction ROUND_KEYS() {\n return [\n 151805686049018032439891559299344379972459527750713953859"
},
{
"path": "packages/circuits/tests/addr_membership.test.ts",
"chars": 2136,
"preview": "const wasm_tester = require(\"circom_tester\").wasm;\nvar EC = require(\"elliptic\").ec;\nimport * as path from \"path\";\nconst "
},
{
"path": "packages/circuits/tests/circuits/add_complete_test.circom",
"chars": 122,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../eff_ecdsa_membership/secp256k1/add.circom\";\n\ncomponent main = Secp256k1AddComplete("
},
{
"path": "packages/circuits/tests/circuits/add_incomplete_test.circom",
"chars": 124,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../eff_ecdsa_membership/secp256k1/add.circom\";\n\ncomponent main = Secp256k1AddIncomplet"
},
{
"path": "packages/circuits/tests/circuits/addr_membership_test.circom",
"chars": 120,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../eff_ecdsa_membership/addr_membership.circom\";\n\ncomponent main = AddrMembership(10);"
},
{
"path": "packages/circuits/tests/circuits/double_test.circom",
"chars": 120,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../eff_ecdsa_membership/secp256k1/double.circom\";\n\ncomponent main = Secp256k1Double();"
},
{
"path": "packages/circuits/tests/circuits/eff_ecdsa_test.circom",
"chars": 140,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../eff_ecdsa_membership/eff_ecdsa.circom\";\n\ncomponent main { public[ Tx, Ty, Ux, Uy ]}"
},
{
"path": "packages/circuits/tests/circuits/eff_ecdsa_to_addr_test.circom",
"chars": 154,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../eff_ecdsa_membership/eff_ecdsa_to_addr.circom\";\n\ncomponent main { public[ Tx, Ty, U"
},
{
"path": "packages/circuits/tests/circuits/k_test.circom",
"chars": 103,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../eff_ecdsa_membership/secp256k1/mul.circom\";\n\ncomponent main = K();"
},
{
"path": "packages/circuits/tests/circuits/mul_test.circom",
"chars": 114,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../eff_ecdsa_membership/secp256k1/mul.circom\";\n\ncomponent main = Secp256k1Mul();"
},
{
"path": "packages/circuits/tests/circuits/poseidon_test.circom",
"chars": 93,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../poseidon/poseidon.circom\";\n\ncomponent main = Poseidon();"
},
{
"path": "packages/circuits/tests/circuits/pubkey_membership_test.circom",
"chars": 124,
"preview": "pragma circom 2.1.2;\n\ninclude \"../../eff_ecdsa_membership/pubkey_membership.circom\";\n\ncomponent main = PubKeyMembership("
},
{
"path": "packages/circuits/tests/eff_ecdsa.test.ts",
"chars": 1001,
"preview": "const wasm_tester = require(\"circom_tester\").wasm;\nvar EC = require(\"elliptic\").ec;\nimport * as path from \"path\";\nimport"
},
{
"path": "packages/circuits/tests/eff_ecdsa_to_addr.test.ts",
"chars": 1117,
"preview": "const wasm_tester = require(\"circom_tester\").wasm;\nvar EC = require(\"elliptic\").ec;\nimport * as path from \"path\";\nimport"
},
{
"path": "packages/circuits/tests/poseidon.test.ts",
"chars": 842,
"preview": "const wasm_tester = require(\"circom_tester\").wasm;\nimport * as path from \"path\";\n\ndescribe(\"poseidon\", () => {\n it(\"sho"
},
{
"path": "packages/circuits/tests/pubkey_membership.test.ts",
"chars": 2158,
"preview": "const wasm_tester = require(\"circom_tester\").wasm;\nvar EC = require(\"elliptic\").ec;\nimport * as path from \"path\";\nimport"
},
{
"path": "packages/circuits/tests/secp256k1.test.ts",
"chars": 8464,
"preview": "const wasm_tester = require(\"circom_tester\").wasm;\nvar EC = require(\"elliptic\").ec;\nimport * as path from \"path\";\nconst "
},
{
"path": "packages/circuits/tests/test_utils.ts",
"chars": 730,
"preview": "import { hashPersonalMessage, ecsign } from \"@ethereumjs/util\";\nimport { computeEffEcdsaPubInput } from \"@personaelabs/s"
},
{
"path": "packages/lib/.npmignore",
"chars": 85,
"preview": "/node_modules\n/src\n/tests\ntsconfig.json\njest.config.js\ncopy_artifacts.sh\nload_wasm.ts"
},
{
"path": "packages/lib/LICENSE",
"chars": 35148,
"preview": " GNU GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n Copyright (C) 2007 Free "
},
{
"path": "packages/lib/README.md",
"chars": 4149,
"preview": "# Spartan-ecdsa\n\nSpartan-ecdsa (which to our knowledge) is the fastest open-source method to verify ECDSA (secp256k1) si"
},
{
"path": "packages/lib/embedWasmBytes.ts",
"chars": 439,
"preview": "import * as fs from \"fs\";\n\n/**\n * Load the wasm file and output a typescript file with the wasm bytes embedded\n */\nconst"
},
{
"path": "packages/lib/jest.config.js",
"chars": 264,
"preview": "/** @type {import('ts-jest').JestConfigWithTsJest} */\nmodule.exports = {\n preset: 'ts-jest',\n testEnvironment: 'node',"
},
{
"path": "packages/lib/package.json",
"chars": 1245,
"preview": "{\n \"name\": \"@personaelabs/spartan-ecdsa\",\n \"version\": \"2.3.1\",\n \"description\": \"Spartan-ecdsa (which to our knowledge"
},
{
"path": "packages/lib/src/config/index.ts",
"chars": 979,
"preview": "import { ProverConfig, VerifyConfig } from \"@src/types\";\n\n// Default configs for pubkey membership proving/verifying\nexp"
},
{
"path": "packages/lib/src/core/prover.ts",
"chars": 2897,
"preview": "import { Profiler } from \"@src/helpers/profiler\";\nimport { IProver, MerkleProof, NIZK, ProveArgs, ProverConfig } from \"@"
},
{
"path": "packages/lib/src/core/verifier.ts",
"chars": 2148,
"preview": "import {\n defaultAddressVerifierConfig,\n defaultPubkeyVerifierConfig\n} from \"@src/config\";\nimport { Profiler } from \"@"
},
{
"path": "packages/lib/src/helpers/poseidon.ts",
"chars": 737,
"preview": "import { init, wasm } from \"@src/wasm\";\nimport { bigIntToLeBytes, bytesLeToBigInt } from \"./utils\";\n\nexport class Poseid"
},
{
"path": "packages/lib/src/helpers/profiler.ts",
"chars": 363,
"preview": "// A helper class to optionally run console.time/console.timeEnd\nexport class Profiler {\n private enabled: boolean;\n\n "
},
{
"path": "packages/lib/src/helpers/publicInputs.ts",
"chars": 4275,
"preview": "var EC = require(\"elliptic\").ec;\nconst BN = require(\"bn.js\");\n\nimport { EffECDSAPubInput } from \"@src/types\";\nimport { b"
},
{
"path": "packages/lib/src/helpers/tree.ts",
"chars": 1158,
"preview": "import { IncrementalMerkleTree } from \"@zk-kit/incremental-merkle-tree\";\nimport { Poseidon } from \"./poseidon\";\nimport {"
},
{
"path": "packages/lib/src/helpers/utils.ts",
"chars": 1829,
"preview": "// @ts-ignore\nconst snarkJs = require(\"snarkjs\");\nimport { fromRpcSig } from \"@ethereumjs/util\";\nimport * as fs from \"fs"
},
{
"path": "packages/lib/src/index.ts",
"chars": 772,
"preview": "export { MembershipProver } from \"@src/core/prover\";\nexport { MembershipVerifier } from \"@src/core/verifier\";\nexport { C"
},
{
"path": "packages/lib/src/types/index.ts",
"chars": 1500,
"preview": "import { PublicInput } from \"@src/helpers/publicInputs\";\n\n// The same structure as MerkleProof in @zk-kit/incremental-me"
},
{
"path": "packages/lib/src/wasm/index.ts",
"chars": 197,
"preview": "import * as wasm from \"./wasm\";\n\nimport { wasmBytes } from \"./wasmBytes\";\n\nexport const init = async () => {\n await was"
},
{
"path": "packages/lib/src/wasm/wasm.d.ts",
"chars": 2165,
"preview": "/* tslint:disable */\n/* eslint-disable */\n/**\n*/\nexport function init_panic_hook(): void;\n/**\n* @param {Uint8Array} circ"
},
{
"path": "packages/lib/src/wasm/wasm.js",
"chars": 14795,
"preview": "let wasm;\n\nconst heap = new Array(128).fill(undefined);\n\nheap.push(undefined, null, true, false);\n\nfunction getObject(id"
},
{
"path": "packages/lib/tests/efficientEcdsa.test.ts",
"chars": 2256,
"preview": "import { hashPersonalMessage } from \"@ethereumjs/util\";\n\nimport {\n CircuitPubInput,\n PublicInput,\n verifyEffEcdsaPubI"
},
{
"path": "packages/lib/tests/membershipNizk.test.ts",
"chars": 5652,
"preview": "import {\n hashPersonalMessage,\n ecsign,\n privateToAddress,\n privateToPublic\n} from \"@ethereumjs/util\";\n\nimport * as "
},
{
"path": "packages/lib/tests/tree.test.ts",
"chars": 923,
"preview": "import { Tree, Poseidon } from \"../src\";\n\ndescribe(\"Merkle tree prove and verify\", () => {\n let poseidon: Poseidon;\n l"
},
{
"path": "packages/lib/tsconfig.build.json",
"chars": 84,
"preview": "{\n \"extends\": \"./tsconfig.json\",\n \"exclude\": [\n \"./tests/**/*\"\n ],\n}"
},
{
"path": "packages/lib/tsconfig.json",
"chars": 564,
"preview": "{\n \"compilerOptions\": {\n \"baseUrl\": \".\",\n \"rootDir\": \".\",\n \"outDir\": \"./dist\",\n \"declaration\": true,\n \"t"
},
{
"path": "packages/poseidon/Cargo.toml",
"chars": 394,
"preview": "[package]\nname = \"poseidon\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n\n[dependencies]\nff = \"0.12.0\"\nhex = \"0.4.3\"\nhex-literal "
},
{
"path": "packages/poseidon/LICENSE",
"chars": 1075,
"preview": "MIT License\n\nCopyright (c) 2022 Ethereum Foundation\n\nPermission is hereby granted, free of charge, to any person obtaini"
},
{
"path": "packages/poseidon/README.md",
"chars": 592,
"preview": "Generate Poseidon params for the secp256k1 base field\n\n```\nsh ./k256_params.sh\n```\n\n## Parameters\n\nWe use the following "
},
{
"path": "packages/poseidon/k256_params.sh",
"chars": 124,
"preview": "sage ./sage/generate_params_poseidon.sage 1 0 256 3 5 128 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffeffff"
},
{
"path": "packages/poseidon/sage/generate_params_poseidon.sage",
"chars": 20727,
"preview": "# https://extgit.iaik.tugraz.at/krypto/hadeshash/-/blob/master/code/generate_params_poseidon.sage\n\nfrom math import *\nim"
},
{
"path": "packages/poseidon/sage/security_inequalities.sage",
"chars": 788,
"preview": "# Check security inequalities as specified in the Neptune specification\n\nM=128\nt=3\np=0xfffffffffffffffffffffffffffffffff"
},
{
"path": "packages/poseidon/src/k256_consts.rs",
"chars": 18596,
"preview": "use ff::PrimeField;\nuse lazy_static::lazy_static;\npub use secq256k1::field::field_secp::FieldElement;\n\npub(crate) const "
},
{
"path": "packages/poseidon/src/lib.rs",
"chars": 5064,
"preview": "mod k256_consts;\npub mod poseidon_k256;\n\nuse ff::PrimeField;\n\npub struct PoseidonConstants<F: PrimeField> {\n pub roun"
},
{
"path": "packages/poseidon/src/poseidon_k256.rs",
"chars": 589,
"preview": "use crate::k256_consts::*;\nuse crate::{Poseidon, PoseidonConstants};\npub use secq256k1::field::field_secp::FieldElement;"
},
{
"path": "packages/secq256k1/Cargo.toml",
"chars": 530,
"preview": "[package]\nname = \"secq256k1\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.ru"
},
{
"path": "packages/secq256k1/LICENSE",
"chars": 1075,
"preview": "MIT License\n\nCopyright (c) 2022 Ethereum Foundation\n\nPermission is hereby granted, free of charge, to any person obtaini"
},
{
"path": "packages/secq256k1/README.md",
"chars": 16,
"preview": "# Secq256k1\n\nwip"
},
{
"path": "packages/secq256k1/sage/hashtocurve_params.sage",
"chars": 3423,
"preview": "import sage.schemes.elliptic_curves.isogeny_small_degree as isd\nload(\"sqrt_ratio_params.sage\")\n\n# https://neuromancer.sk"
},
{
"path": "packages/secq256k1/sage/sqrt_ratio_params.sage",
"chars": 494,
"preview": "# https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-sqrt_ratio-for-any-field\ndef sqrt_ratio_par"
},
{
"path": "packages/secq256k1/sage/sswu_generic.sage",
"chars": 3082,
"preview": "#!/usr/bin/sage\n# vim: syntax=python\n\nimport sys\ntry:\n from sagelib.common import CMOV\n from sagelib.generic_map i"
},
{
"path": "packages/secq256k1/src/affine.rs",
"chars": 9758,
"preview": "use std::iter::Sum;\nuse std::ops::{Add, Mul, MulAssign, Neg, Sub};\nuse std::ops::{AddAssign, SubAssign};\n\nuse super::{Pr"
},
{
"path": "packages/secq256k1/src/field/field_secp.rs",
"chars": 43552,
"preview": "//! This module provides an implementation of the secq256k1's scalar field $\\mathbb{F}_q$\n//! where `q = 0xfffffffffffff"
},
{
"path": "packages/secq256k1/src/field/field_secq.rs",
"chars": 43112,
"preview": "//! This module provides an implementation of the secq256k1's scalar field $\\mathbb{F}_q$\n//! where `q = 0xfffffffffffff"
},
{
"path": "packages/secq256k1/src/field/mod.rs",
"chars": 613,
"preview": "use primeorder::{\n elliptic_curve::subtle::{Choice, CtOption},\n PrimeField,\n};\n\npub trait BaseField: PrimeField {\n"
},
{
"path": "packages/secq256k1/src/hashtocurve.rs",
"chars": 9201,
"preview": "use crate::field::{BaseField, SqrtRatio};\nuse k256::elliptic_curve::subtle::{Choice, ConstantTimeEq};\n\n// https://www.ie"
},
{
"path": "packages/secq256k1/src/lib.rs",
"chars": 2252,
"preview": "pub mod affine;\npub mod field;\nmod hashtocurve;\npub mod scalar;\n\npub use affine::AffinePoint;\nuse affine::AffinePointCor"
},
{
"path": "packages/secq256k1/src/scalar.rs",
"chars": 7518,
"preview": "use crate::field::field_secp::FieldElement;\n\nuse super::{FieldBytes, Secq256K1};\nuse crate::field::BaseField;\n\nuse ff::{"
},
{
"path": "packages/spartan_wasm/Cargo.toml",
"chars": 943,
"preview": "[package]\nname = \"spartan_wasm\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lib]\nname = \"spartan_wasm\"\npath = \"src/lib.rs\"\ncrat"
},
{
"path": "packages/spartan_wasm/LICENSE",
"chars": 1075,
"preview": "MIT License\n\nCopyright (c) 2022 Ethereum Foundation\n\nPermission is hereby granted, free of charge, to any person obtaini"
},
{
"path": "packages/spartan_wasm/README.md",
"chars": 178,
"preview": "### Compile\n\nInstall wasm-pack\n\n```\ncurl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh\n```\n\nRun compi"
},
{
"path": "packages/spartan_wasm/src/lib.rs",
"chars": 14,
"preview": "pub mod wasm;\n"
},
{
"path": "packages/spartan_wasm/src/wasm.rs",
"chars": 6948,
"preview": "use byteorder::{LittleEndian, ReadBytesExt};\nuse console_error_panic_hook;\nuse ff::PrimeField;\nuse libspartan::{Assignme"
},
{
"path": "packages/spartan_wasm/test_circuit/test_circuit.circom",
"chars": 236,
"preview": "pragma circom 2.1.2;\n\ntemplate TestCircuit() {\n signal input a;\n signal input b[2];\n signal output c;\n\n sign"
},
{
"path": "packages/spartan_wasm/test_circuit/test_circuit_js/generate_witness.js",
"chars": 698,
"preview": "const wc = require(\"./witness_calculator.js\");\nconst { readFileSync, writeFile } = require(\"fs\");\n\nif (process.argv.len"
},
{
"path": "packages/spartan_wasm/test_circuit/test_circuit_js/witness_calculator.js",
"chars": 9181,
"preview": "module.exports = async function builder(code, options) {\n\n options = options || {};\n\n let wasmModule;\n try {\n\tw"
},
{
"path": "rust-toolchain",
"chars": 18,
"preview": "nightly-2022-10-31"
},
{
"path": "scripts/addr_membership_circuit.sh",
"chars": 62,
"preview": "#!/bin/bash \nsh ./scripts/compile_circuit.sh addr_membership 5"
},
{
"path": "scripts/build.sh",
"chars": 128,
"preview": "#!/bin/bash \nsh ./scripts/build_wasm.sh &&\nsh ./scripts/addr_membership_circuit.sh &&\nsh ./scripts/pubkey_membership_cir"
},
{
"path": "scripts/build_wasm.sh",
"chars": 131,
"preview": "rm -rf ./packages/spartan_wasm/build &&\ncd ./packages/spartan_wasm &&\nwasm-pack build --target web --out-dir ../spartan_"
},
{
"path": "scripts/compile_circuit.sh",
"chars": 576,
"preview": "CIRCUIT_NAME=$1\nNUM_PUB_INPUTS=$2\n\nBUILD_DIR=./packages/circuits/build/$CIRCUIT_NAME\nmkdir -p $BUILD_DIR &&\ncircom ./pac"
},
{
"path": "scripts/pubkey_membership_circuit.sh",
"chars": 64,
"preview": "#!/bin/bash \nsh ./scripts/compile_circuit.sh pubkey_membership 5"
},
{
"path": "scripts/test.sh",
"chars": 43,
"preview": "cargo test --release &&\nyarn lerna run test"
}
]
// ... and 4 more files (download for full content)
About this extraction
This page contains the full source code of the personaelabs/spartan-ecdsa GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 165 files (679.7 KB), approximately 209.5k tokens, and a symbol index with 914 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.