Repository: supranational/blst
Branch: master
Commit: f62244ef50ad
Files: 229
Total size: 3.7 MB
Directory structure:
gitextract_z39wz_mc/
├── .gitattributes
├── .github/
│ └── workflows/
│ ├── ci.yml
│ ├── codeql-analysis.yml
│ └── golang-lint.yml
├── .gitignore
├── .golangci.yml
├── .lgtm.yml
├── .travis.yml
├── LICENSE
├── README.md
├── SECURITY.md
├── bindings/
│ ├── blst.h
│ ├── blst.hpp
│ ├── blst.swg
│ ├── blst_aux.h
│ ├── c#/
│ │ ├── poc.cs
│ │ ├── poc.csproj
│ │ ├── run.me
│ │ └── supranational.blst.cs
│ ├── go/
│ │ ├── README.md
│ │ ├── blst.go
│ │ ├── blst.tgo
│ │ ├── blst_htoc_test.go
│ │ ├── blst_miller_loop_test.go
│ │ ├── blst_minpk.tgo
│ │ ├── blst_minpk_test.go
│ │ ├── blst_minsig_test.go
│ │ ├── blst_misc.tgo
│ │ ├── blst_px.tgo
│ │ ├── blst_wasm.go
│ │ ├── cgo_assembly.S
│ │ ├── cgo_server.c
│ │ ├── generate.py
│ │ └── rb_tree.go
│ ├── rust/
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── benches/
│ │ │ └── blst_benches.rs
│ │ ├── build.rs
│ │ ├── publish.sh
│ │ ├── rustfmt.toml
│ │ └── src/
│ │ ├── bindings.rs
│ │ ├── lib.rs
│ │ ├── pippenger-no_std.rs
│ │ ├── pippenger-test_mod.rs
│ │ └── pippenger.rs
│ ├── vectors/
│ │ └── hash_to_curve/
│ │ ├── BLS12381G1_XMD_SHA-256_SSWU_NU_.json
│ │ ├── BLS12381G1_XMD_SHA-256_SSWU_RO_.json
│ │ ├── BLS12381G2_XMD_SHA-256_SSWU_NU_.json
│ │ ├── BLS12381G2_XMD_SHA-256_SSWU_RO_.json
│ │ ├── README
│ │ ├── expand_message_xmd_SHA256_256.json
│ │ └── expand_message_xmd_SHA256_38.json
│ └── zig/
│ ├── README.md
│ ├── blst.zig
│ ├── c.zig
│ ├── generate.py
│ └── tests.zig
├── build/
│ ├── assembly.S
│ ├── bindings_trim.pl
│ ├── cheri/
│ │ ├── add_mod_256-armv8.S
│ │ ├── add_mod_384-armv8.S
│ │ ├── ct_inverse_mod_256-armv8.S
│ │ ├── ct_inverse_mod_384-armv8.S
│ │ ├── ct_is_square_mod_384-armv8.S
│ │ ├── div3w-armv8.S
│ │ ├── mul_mont_256-armv8.S
│ │ ├── mul_mont_384-armv8.S
│ │ └── sha256-armv8.S
│ ├── coff/
│ │ ├── add_mod_256-armv8.S
│ │ ├── add_mod_256-x86_64.s
│ │ ├── add_mod_384-armv8.S
│ │ ├── add_mod_384-x86_64.s
│ │ ├── add_mod_384x384-x86_64.s
│ │ ├── ct_inverse_mod_256-armv8.S
│ │ ├── ct_inverse_mod_256-x86_64.s
│ │ ├── ct_inverse_mod_384-armv8.S
│ │ ├── ct_is_square_mod_384-armv8.S
│ │ ├── ct_is_square_mod_384-x86_64.s
│ │ ├── ctq_inverse_mod_384-x86_64.s
│ │ ├── ctx_inverse_mod_384-x86_64.s
│ │ ├── div3w-armv8.S
│ │ ├── div3w-x86_64.s
│ │ ├── mul_mont_256-armv8.S
│ │ ├── mul_mont_384-armv8.S
│ │ ├── mulq_mont_256-x86_64.s
│ │ ├── mulq_mont_384-x86_64.s
│ │ ├── mulx_mont_256-x86_64.s
│ │ ├── mulx_mont_384-x86_64.s
│ │ ├── sha256-armv8.S
│ │ ├── sha256-portable-x86_64.s
│ │ └── sha256-x86_64.s
│ ├── elf/
│ │ ├── add_mod_256-armv8.S
│ │ ├── add_mod_256-x86_64.s
│ │ ├── add_mod_384-armv8.S
│ │ ├── add_mod_384-x86_64.s
│ │ ├── add_mod_384x384-x86_64.s
│ │ ├── ct_inverse_mod_256-armv8.S
│ │ ├── ct_inverse_mod_256-x86_64.s
│ │ ├── ct_inverse_mod_384-armv8.S
│ │ ├── ct_is_square_mod_384-armv8.S
│ │ ├── ct_is_square_mod_384-x86_64.s
│ │ ├── ctq_inverse_mod_384-x86_64.s
│ │ ├── ctx_inverse_mod_384-x86_64.s
│ │ ├── div3w-armv8.S
│ │ ├── div3w-x86_64.s
│ │ ├── mul_mont_256-armv8.S
│ │ ├── mul_mont_384-armv8.S
│ │ ├── mulq_mont_256-x86_64.s
│ │ ├── mulq_mont_384-x86_64.s
│ │ ├── mulx_mont_256-x86_64.s
│ │ ├── mulx_mont_384-x86_64.s
│ │ ├── sha256-armv8.S
│ │ ├── sha256-portable-x86_64.s
│ │ └── sha256-x86_64.s
│ ├── mach-o/
│ │ ├── add_mod_256-armv8.S
│ │ ├── add_mod_256-x86_64.s
│ │ ├── add_mod_384-armv8.S
│ │ ├── add_mod_384-x86_64.s
│ │ ├── add_mod_384x384-x86_64.s
│ │ ├── ct_inverse_mod_256-armv8.S
│ │ ├── ct_inverse_mod_256-x86_64.s
│ │ ├── ct_inverse_mod_384-armv8.S
│ │ ├── ct_is_square_mod_384-armv8.S
│ │ ├── ct_is_square_mod_384-x86_64.s
│ │ ├── ctq_inverse_mod_384-x86_64.s
│ │ ├── ctx_inverse_mod_384-x86_64.s
│ │ ├── div3w-armv8.S
│ │ ├── div3w-x86_64.s
│ │ ├── mul_mont_256-armv8.S
│ │ ├── mul_mont_384-armv8.S
│ │ ├── mulq_mont_256-x86_64.s
│ │ ├── mulq_mont_384-x86_64.s
│ │ ├── mulx_mont_256-x86_64.s
│ │ ├── mulx_mont_384-x86_64.s
│ │ ├── sha256-armv8.S
│ │ ├── sha256-portable-x86_64.s
│ │ └── sha256-x86_64.s
│ ├── refresh.sh
│ ├── srcroot.go
│ └── win64/
│ ├── add_mod_256-armv8.asm
│ ├── add_mod_256-x86_64.asm
│ ├── add_mod_384-armv8.asm
│ ├── add_mod_384-x86_64.asm
│ ├── add_mod_384x384-x86_64.asm
│ ├── blst.def
│ ├── ct_inverse_mod_256-armv8.asm
│ ├── ct_inverse_mod_256-x86_64.asm
│ ├── ct_inverse_mod_384-armv8.asm
│ ├── ct_is_square_mod_384-armv8.asm
│ ├── ct_is_square_mod_384-x86_64.asm
│ ├── ctq_inverse_mod_384-x86_64.asm
│ ├── ctx_inverse_mod_384-x86_64.asm
│ ├── div3w-armv8.asm
│ ├── div3w-x86_64.asm
│ ├── dll.c
│ ├── mul_mont_256-armv8.asm
│ ├── mul_mont_384-armv8.asm
│ ├── mulq_mont_256-x86_64.asm
│ ├── mulq_mont_384-x86_64.asm
│ ├── mulx_mont_256-x86_64.asm
│ ├── mulx_mont_384-x86_64.asm
│ ├── sha256-armv8.asm
│ └── sha256-x86_64.asm
├── build.bat
├── build.sh
├── build.zig
├── build.zig.zon
└── src/
├── aggregate.c
├── asm/
│ ├── add_mod_256-armv8.pl
│ ├── add_mod_256-x86_64.pl
│ ├── add_mod_384-armv8.pl
│ ├── add_mod_384-x86_64.pl
│ ├── add_mod_384x384-x86_64.pl
│ ├── arm-xlate.pl
│ ├── ct_inverse_mod_256-armv8.pl
│ ├── ct_inverse_mod_256-x86_64.pl
│ ├── ct_inverse_mod_384-armv8.pl
│ ├── ct_is_square_mod_384-armv8.pl
│ ├── ct_is_square_mod_384-x86_64.pl
│ ├── ctq_inverse_mod_384-x86_64.pl
│ ├── ctx_inverse_mod_384-x86_64.pl
│ ├── div3w-armv8.pl
│ ├── div3w-x86_64.pl
│ ├── mul_mont_256-armv8.pl
│ ├── mul_mont_384-armv8.pl
│ ├── mulq_mont_256-x86_64.pl
│ ├── mulq_mont_384-x86_64.pl
│ ├── mulx_mont_256-x86_64.pl
│ ├── mulx_mont_384-x86_64.pl
│ ├── sha256-armv8.pl
│ ├── sha256-portable-x86_64.pl
│ ├── sha256-x86_64.pl
│ └── x86_64-xlate.pl
├── blst_t.hpp
├── bulk_addition.c
├── bytes.h
├── client_min_pk.c
├── client_min_sig.c
├── consts.c
├── consts.h
├── cpuid.c
├── e1.c
├── e2.c
├── ec_mult.h
├── ec_ops.h
├── errors.h
├── exp.c
├── exports.c
├── fields.h
├── fp12_tower.c
├── hash_to_field.c
├── keygen.c
├── map_to_g1.c
├── map_to_g2.c
├── multi_scalar.c
├── no_asm.h
├── pairing.c
├── pentaroot-addchain.h
├── pentaroot.c
├── point.h
├── rb_tree.c
├── recip-addchain.h
├── recip.c
├── server.c
├── sha256.h
├── sqrt-addchain.h
├── sqrt.c
├── vect.c
└── vect.h
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitattributes
================================================
*.pl linguist-language=assembly
*.h linguist-language=c
*.tgo linguist-language=go
================================================
FILE: .github/workflows/ci.yml
================================================
name: build
on:
push:
branches:
- '**'
workflow_dispatch:
branches:
- '**'
pull_request:
branches:
- master
jobs:
rust-n-go:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-latest, ubuntu-24.04-arm, windows-latest, windows-11-arm, macos-latest ]
steps:
- uses: actions/checkout@v6
- name: Get date
id: get-date
run: echo "date=$(date -u +%Y-%m)" >> $GITHUB_OUTPUT
shell: bash
- uses: actions/cache@v5
with:
path: |
~/.cargo/registry
**/Cargo.lock
**/bindings/rust/target
~/.wasmtime
key: ${{ runner.os }}-${{ runner.arch }}-cargo-${{ steps.get-date.outputs.date }}
- name: Environment
shell: bash
run: |
lscpu 2>/dev/null && echo --- || true
sysctl hw 2>/dev/null && echo --- || true
env | sort
- name: Install Wasmtime
if: ${{ runner.os == 'Linux' }}
shell: bash
run: if [ ! -d ~/.wasmtime/bin ]; then curl https://wasmtime.dev/install.sh -sSf | bash; fi
- name: Rust
shell: bash
run: |
rustc --version --verbose
export CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
cd bindings/rust
sed "s/^crit/#crit/" Cargo.toml > Cargo.$$.toml && \
mv Cargo.$$.toml Cargo.toml
if [ "$GITHUB_EVENT_NAME" != "pull_request" ]; then
cargo update
fi
cargo test --release
echo '--- test portable'
echo
cargo test --release --features=portable
echo '--- test no-threads'
echo
cargo test --release --features=no-threads
echo '--- test serde-secret'
echo
cargo test --release --features=serde-secret
echo '--- test no_std'
echo
echo 'set -e' > ulimit-s
echo 'export RUST_MIN_STACK=$(($1 * 1024)); shift' >> ulimit-s
echo 'exec "$@"' >> ulimit-s
triplet=`rustc -vV | awk '/host:/ {print $2}' | tr 'a-z-' 'A-Z_'`
stack_size=`[ $RUNNER_OS = "Windows" ] && echo 65 || echo 56`
env BLST_TEST_NO_STD= \
CARGO_TARGET_${triplet}_RUNNER="bash ulimit-s $stack_size" \
cargo test --release
if [ -x ~/.wasmtime/bin/wasmtime ]; then
echo '--- test wasm32-wasip1'
echo
rustup target add wasm32-wasip1
env CARGO_TARGET_WASM32_WASIP1_RUNNER=~/.wasmtime/bin/wasmtime \
cargo test --release --target=wasm32-wasip1
cargo clean -p blst --release --target=wasm32-wasip1
echo
fi
if [ $RUNNER_OS = "Linux" ]; then
if [ `uname -p` = "x86_64" ]; then
echo '--- test -mlvi-hardening'
echo
env CC=clang CFLAGS="-mlvi-hardening -D__SGX_LVI_HARDENING__" \
cargo test --release
echo '--- build x86_64-fortanix-unknown-sgx'
echo
rustup target add x86_64-fortanix-unknown-sgx
cargo test --no-run --release --target=x86_64-fortanix-unknown-sgx
cargo clean -p blst --release --target=x86_64-fortanix-unknown-sgx
echo
fi
echo '--- dry-run publish'
echo
./publish.sh --dry-run
elif [ $RUNNER_OS = "macOS" ]; then
if [ $RUNNER_ARCH = "ARM64" ]; then
echo '--- test x86_64-apple-darwin'
echo
rustup target add x86_64-apple-darwin
cargo test --release --target=x86_64-apple-darwin
cargo clean -p blst --release --target=x86_64-apple-darwin
echo
else
echo '--- build aarch64-apple-darwin'
echo
rustup target add aarch64-apple-darwin
cargo test --no-run --release --target=aarch64-apple-darwin
cargo clean -p blst --release --target=aarch64-apple-darwin
echo
fi
echo '--- build aarch64-apple-ios'
echo
rustup target add aarch64-apple-ios
env IPHONEOS_DEPLOYMENT_TARGET=10.0 \
cargo test --no-run --release --target=aarch64-apple-ios
cargo clean -p blst --release --target=aarch64-apple-ios
echo
elif [ $RUNNER_OS = "Windows" -a $RUNNER_ARCH = "X64" ]; then
if which clang-cl > /dev/null 2>&1; then
echo '-- test i686-pc-windows-msvc'
echo
rustup target add i686-pc-windows-msvc
cargo test --release --target=i686-pc-windows-msvc
cargo clean -p blst --release --target=i686-pc-windows-msvc
echo
fi
echo '-- test x86_64-pc-windows-gnu'
echo
rustup target add x86_64-pc-windows-gnu
cargo test --release --target=x86_64-pc-windows-gnu
cargo clean -p blst --release --target=x86_64-pc-windows-gnu
echo
fi
echo
echo '--- cargo clippy'
echo
echo 'msrv = "1.56"' > .clippy.toml
cargo clippy --release
cargo clean -p blst
cargo clean -p blst --release
rm -rf target/.rustc_info.json
rm -rf target/package
rm -rf target/{debug,release}/incremental
rm -rf target/*/{debug,release}/incremental
rm -rf ~/.cargo/registry/src
rm -rf ~/.cargo/registry/index/*/.cache
mkdir -p ~/.wasmtime
- name: Go
if: ${{ runner.os != 'Windows' || runner.arch != 'ARM64' }}
shell: bash
run: |
go version 2>/dev/null || exit 0
if ! (grep -q -e '^flags.*\badx\b' /proc/cpuinfo) 2>/dev/null; then
export CGO_CFLAGS="-O -D__BLST_PORTABLE__"
fi
cd bindings/go
go test -test.v
misc-ubuntu-latest:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/cache@v5
with:
path: ~/swig
key: ${{ runner.os }}-swig-github
- uses: actions/setup-java@v5
with:
distribution: temurin
java-version: 11
- uses: actions/setup-node@v6
with:
node-version: '20.x'
- name: Environment
run: |
lscpu
echo ---
env | sort
- name: Python
run: if [ -x bindings/python/run.me ]; then bindings/python/run.me; fi
- name: Java
run: if [ -x bindings/java/run.me ]; then bindings/java/run.me; fi
- name: Node.js
run: |
node_js=bindings/node.js
if [ -x $node_js/run.me ]; then
if [ ! -x ~/swig/bin/swig ]; then
( git clone --branch v4.3.0 https://github.com/swig/swig;
cd swig;
./autogen.sh;
./configure --prefix=$HOME/swig;
make;
make install;
(cd ~/swig/share/swig && ln -s `ls` current)
)
fi
env PATH=~/swig/bin:$PATH SWIG_LIB=~/swig/share/swig/current \
$node_js/run.me
fi
- name: node-gyp
run: |
node_js=bindings/node.js
if [ -f $node_js/binding.gyp -a -f $node_js/blst_wrap.cpp ]; then
npm install --global node-gyp || true
if which node-gyp > /dev/null 2>&1; then
( export PATH=~/swig/bin:$PATH SWIG_LIB=~/swig/share/swig/current;
cd $node_js;
node-gyp configure;
node-gyp build;
env NODE_PATH=build/Release: node runnable.js;
)
fi
fi
- name: TypeScript
run: |
node_js=bindings/node.js
if [ -f $node_js/blst.hpp.ts -a -f $node_js/blst.node ]; then
npm install --global typescript || true
if which tsc > /dev/null 2>&1; then
( cd $node_js;
npm install @types/node;
tsc runnable.ts --ignoreConfig --types node --module commonjs;
env NODE_PATH=.: node runnable.js;
)
fi
fi
- name: Emscripten
uses: docker://emscripten/emsdk
with:
args: >
bindings/emscripten/run.me -O2
- name: C#
run: |
if [ -x bindings/c#/run.me ]; then
bindings/c#/run.me;
if which dotnet > /dev/null 2>&1; then
cd bindings/c#
[ -f libblst.dll.so ] || ../../build.sh -dll
dotnet run -c Release
fi
fi
- uses: actions/cache@v5
with:
path: |
~/.cache/zig
~/zig-x86_64-linux-*
~/.wasmtime
key: ${{ runner.os }}-zig-github
- name: Zig
run: |
ver=0.15.2
base_dir=zig-x86_64-linux-$ver
if [ ! -d ~/$base_dir ]; then
curl -sSf https://ziglang.org/download/$ver/$base_dir.tar.xz | unxz -c | tar xf - --directory ~
fi
if [ -x ~/$base_dir/zig ]; then
PATH=~/$base_dir:$PATH
zig build test --summary new
echo '--- test wasm32-wasi'
if [ ! -d ~/.wasmtime ]; then
curl https://wasmtime.dev/install.sh -sSf | bash
fi
PATH=~/.wasmtime/bin:$PATH
zig build test -Dtarget=wasm32-wasi -fwasmtime --summary new
fi
================================================
FILE: .github/workflows/codeql-analysis.yml
================================================
name: "CodeQL"
on:
push:
branches:
- '**'
paths:
- 'src/*'
- 'bindings/c#/*'
- '.github/workflows/codeql-analysis.yml'
pull_request:
branches:
- master
paths:
- 'src/*'
- 'bindings/c#/*'
#schedule:
# - cron: '0 23+ * * 4'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'cpp', 'csharp' ]
steps:
- name: Checkout repository
uses: actions/checkout@v6
with:
# We must fetch at least the immediate parents so that if this is
# a pull request then we can checkout the head.
fetch-depth: 2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v4
with:
languages: ${{ matrix.language }}
queries: security-extended
- if: matrix.language == 'cpp'
name: Custom build
run: ./build.sh -m32 -ffreestanding
- if: matrix.language != 'cpp'
name: Autobuild
uses: github/codeql-action/autobuild@v4
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4
================================================
FILE: .github/workflows/golang-lint.yml
================================================
name: golang-lint
on:
push:
branches:
- '**'
paths:
- 'bindings/go/*.go'
- '.github/workflows/golang-lint.yml'
- '.golangci.yml'
pull_request:
branches:
- master
paths:
- 'bindings/go/*.go'
jobs:
golang-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/setup-go@v6
with:
go-version: '>=1.21'
cache: false
- name: "go version"
run: go version
- uses: golangci/golangci-lint-action@v9
with:
# Require: The version of golangci-lint to use.
# When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version.
# When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit.
version: v2.9
# Optional: working directory, useful for monorepos
# working-directory: somedir
# Optional: golangci-lint command line arguments.
#
# Note: By default, the `.golangci.yml` file should be at the root of the repository.
# The location of the configuration file can be changed by using `--config=`
# args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true
# Optional: if set to true, then all caching functionality will be completely disabled,
# takes precedence over all other caching options.
skip-cache: true
# Optional: if set to true, then the action won't cache or restore ~/go/pkg.
# skip-pkg-cache: true
# Optional: if set to true, then the action won't cache or restore ~/.cache/go-build.
# skip-build-cache: true
# Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'.
# install-mode: "goinstall"
================================================
FILE: .gitignore
================================================
# Prerequisites
*.d
# Object files
*.o
*.ko
*.obj
*.elf
# Linker output
*.ilk
*.map
*.exp
# Precompiled Headers
*.gch
*.pch
# Libraries
*.lib
*.a
*.la
*.lo
# Shared objects (inc. Windows DLLs)
*.dll
*.so
*.so.*
*.dylib
# Executables
*.exe
*.out
*.app
*.i*86
*.x86_64
*.hex
# Debug files
*.dSYM/
*.su
*.idb
*.pdb
# Kernel Module Compile Results
*.mod*
*.cmd
.tmp_versions/
modules.order
Module.symvers
Mkfile.old
dkms.conf
# Open swap files
*.swp
# Emacs backup files
*~
# Rust build
Cargo.lock
bindings/rust/target
bindings/rust/blst
# These are customarily filled with swig artefacts
bindings/python
bindings/java
bindings/node.js
bindings/emscripten
bin/
obj/
zig-out
.zig-cache
================================================
FILE: .golangci.yml
================================================
version: "2"
linters:
default: all
disable:
# just whining
- copyloopvar # go>=1.22
- cyclop
- dupword
- forbidigo
- funlen
- gochecknoglobals
- gochecknoinits
- gocognit
- gocritic
- gocyclo
- godot
- intrange # go>=1.22
- lll
- mnd
- nestif
- nlreturn
- varnamelen
- whitespace
- wsl
- wsl_v5
# auto-generation artefact
- dupl
# maybe some day...
- godoclint
- godox
- maintidx
# maybe some day in tests...
- forcetypeassert
- nonamedreturns
- perfsprint
- testpackage
# 83 active linters remaining including gosec, gosimple, govet, etc.
settings:
revive:
enable-all-rules: true
rules:
- name: add-constant
disabled: true
- name: argument-limit
disabled: true
- name: cognitive-complexity # similar to 'gocognit' above
disabled: true
- name: cyclomatic # similar to 'cyclop' & 'gocyclo' above
disabled: true
- name: empty-block
disabled: true
- name: empty-lines
disabled: true
- name: flag-parameter
disabled: true
- name: function-length # similar to 'funlen' above
disabled: true
- name: function-result-limit
disabled: true
- name: increment-decrement
disabled: true
- name: line-length-limit # similar to 'lll' above
disabled: true
- name: max-public-structs
disabled: true
- name: package-directory-mismatch
disabled: true
- name: receiver-naming
disabled: true
- name: var-naming
disabled: true
- name: unchecked-type-assertion # similar to 'forcetypeassert' above
disabled: true
- name: unexported-naming
disabled: true
- name: unhandled-error
arguments:
- fmt.Println
- fmt.Printf
- name: use-any # applicable to go>=1.18 only
disabled: true
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- third_party$
- builtin$
- examples$
formatters:
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
================================================
FILE: .lgtm.yml
================================================
queries:
- include: "*"
- exclude: cpp/unused-static-function
- exclude: cpp/include-non-header
- exclude: cs/call-to-unmanaged-code
- exclude: cs/unmanaged-code
extraction:
cpp:
index:
build_command:
- ./build.sh -m32
go:
index:
build_command:
- (cd bindings/go; go test -c)
csharp:
index:
nuget_restore: false
================================================
FILE: .travis.yml
================================================
branches:
only:
- /.*/
language: rust
git:
quiet: true
os:
- linux
arch:
- arm64
- s390x
before_script:
- lscpu 2>/dev/null && echo --- || true
- env | sort
script:
- if [ "$TRAVIS_LANGUAGE" = "rust" ]; then
if [ "$TRAVIS_OS_NAME" = "windows" ]; then
rustup set default-host x86_64-pc-windows-msvc;
export ML=-nologo;
fi;
( cd bindings/rust;
if [ -f target/Cargo.lock ]; then
mv -f target/Cargo.lock .;
fi;
NOW=`date +%s`;
REF=.cargo/registry/index/*/.last-updated;
THEN=`(stat -c %Y "$TRAVIS_HOME"/$REF || stat -f %m "$TRAVIS_HOME"/$REF) 2>/dev/null`;
if [ $(($NOW - ${THEN:-0})) -gt 604800 ]; then
env CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse cargo update;
fi;
cargo test --release )
fi
- if which go > /dev/null 2>&1; then
go version;
if ! (grep -q -e '^flags.*\badx\b' /proc/cpuinfo) 2>/dev/null; then
export CGO_CFLAGS="-O -D__BLST_PORTABLE__";
fi;
(cd bindings/go; go test -test.v)
fi
matrix:
include:
- os: linux
arch: arm64
language: go
notifications:
email: false
before_cache:
- if [ "$TRAVIS_LANGUAGE" = "rust" ]; then
( cd bindings/rust;
cargo clean -p blst; cargo clean -p blst --release;
rm -rf target/.rustc_info.json;
rm -rf target/{debug,release}/incremental;
mv -f Cargo.lock target )
fi
- (cd "$TRAVIS_HOME"; rm -rf .cargo/registry/src)
- (cd "$TRAVIS_HOME"; rm -rf .cargo/registry/index/*/.cache)
cache:
cargo: true
directories:
- bindings/rust/target
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.md
================================================
[](https://github.com/supranational/blst/actions) [](https://github.com/supranational/blst/actions/workflows/codeql-analysis.yml)
# blst
blst (pronounced 'blast') is a BLS12-381 signature library focused on performance and security. It is written in C and assembly.
## Table of Contents
* [Status](#status)
* [General notes on implementation](#general-notes-on-implementation)
* [Platform and Language Compatibility](#platform-and-language-compatibility)
* [API](#api)
* [Introductory Tutorial](#introductory-tutorial)
+ [Public Keys and Signatures](#public-keys-and-signatures)
+ [Signature Verification](#signature-verification)
+ [Signature Aggregation](#signature-aggregation)
+ [Serialization Format](#serialization-format)
* [Build](#build)
+ [C static library](#c-static-library)
* [Language-specific notes](#language-specific-notes)
+ [Go](#go)
+ [Rust](#rust)
* [Repository Structure](#repository-structure)
* [Performance](#performance)
* [License](#license)
## Status
**This library is under active development**
An initial audit of this library was conducted by NCC Group in January 2021 and can be found [here](https://research.nccgroup.com/wp-content/uploads/2021/01/NCC_Group_EthereumFoundation_ETHF002_Report_2021-01-20_v1.0.pdf).
Formal verification of this library by Galois is on-going and can be found [here](https://github.com/GaloisInc/BLST-Verification).
This library is compliant with the following IETF draft specifications:
- [IETF BLS Signature V6](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature)
- [IETF RFC 9380 Hashing to Elliptic Curves](https://www.rfc-editor.org/rfc/rfc9380.html)
The serialization formatting is implemented according to [the ZCash definition](#serialization-format).
## General notes on implementation
The goal of the blst library is to provide a foundational component for applications and other libraries that require high performance and formally verified BLS12-381 operations. With that in mind some decisions are made to maximize the public good beyond BLS12-381. For example, the field operations are optimized for general 384-bit usage, as opposed to tuned specifically for the 381-bit BLS12-381 curve parameters. With the formal verification of these foundational components, we believe they can provide a reliable building block for other curves that would like high performance and an extra element of security.
The library deliberately abstains from dealing with memory management and multi-threading, with the rationale that these ultimately belong in language-specific bindings. Another responsibility that is left to application is random number generation. All this in the name of run-time neutrality, which makes integration into more stringent environments like Intel SGX or ARM TrustZone trivial.
## Platform and Language Compatibility
This library primarily supports x86_64 and ARM64 hardware platforms, and Linux, Mac, and Windows operating systems. But it does have a portable replacement for the assembly modules, which can be compiled for a plethora of other platforms. Problem reports for these will be considered and are likely to be addressed.
This repository includes explicit bindings for:
- [Go](bindings/go)
- [Rust](bindings/rust)
Unless deemed appropriate to implement, bindings for other languages will be provided using [SWIG](http://swig.org). Proof-of-concept scripts are available for:
- [Python](bindings/python)
- [Java](bindings/java)
- [Node.js](bindings/node.js)
- [Emscripten](bindings/emscripten)
- [C#](bindings/c%23)
- [Zig](bindings/zig)
## API
The blst API is defined in the C header [bindings/blst.h](bindings/blst.h). The API can be categorized as follows, with some example operations:
- Field Operations (add, sub, mul, neg, inv, to/from Montgomery)
- Curve Operations (add, double, mul, to/from affine, group check)
- Intermediate (hash to curve, pairing, serdes)
- BLS12-381 signature (sign, verify, aggregate)
Note: there is also an auxiliary header file, [bindings/blst_aux.h](bindings/blst_aux.h), that is used as a staging area for experimental interfaces that may or may not get promoted to blst.h.
## Introductory Tutorial
Programming is understanding, and understanding implies mastering the lingo. So we have a pair of additive groups being mapped to multiplicative one... What does it mean? Well, this tutorial is not about explaining that, but rather about making the connection between what you're supposed to know about [pairing-based cryptography](https://en.wikipedia.org/wiki/Pairing-based_cryptography) and the interface provided by the library.
### Public Keys and Signatures
We have two elliptic curves, E1 and E2, points on which are contained in `blst_p1` and `blst_p2`, or `blst_p1_affine` and `blst_p2_affine` structures. Elements in the multiplicative group are held in a `blst_fp12` structure. One of the curves, or more specifically, a subset of points that form a cyclic group, is chosen for public keys, and another, for signatures. The choice is denoted by the subroutines' suffixes, `_pk_in_g1` or `_pk_in_g2`. The most common choice appears to be the former, that is, `blst_p1` for public keys, and `blst_p2` for signatures. But it all starts with a secret key...
The secret key is held in a 256-bit `blst_scalar` structure which can be instantiated with either [`blst_keygen`](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature#section-2.3), or deserialized with `blst_scalar_from_bendian` or `blst_scalar_from_lendian` from a previously serialized byte sequence. It shouldn't come as surprise that there are two uses for a secret key:
- generating the associated public key, either with `blst_sk_to_pk_in_g1` or `blst_sk_to_pk_in_g2`;
- performing a sign operation, either with `blst_sign_pk_in_g1` or `blst_sign_pk_in_g2`;
As for signing, unlike what your intuition might suggest, `blst_sign_*` doesn't sign a message, but rather a point on the corresponding elliptic curve. You can obtain this point from a message by calling `blst_hash_to_g2` or `blst_encode_to_g2` (see the [IETF hash-to-curve](https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve#section-3) draft for distinction). Another counter-intuitive aspect is the apparent g1 vs. g2 naming mismatch, in the sense that `blst_sign_pk_in_g1` accepts output from `blst_hash_to_g2`, and `blst_sign_pk_in_g2` accepts output from `blst_hash_to_g1`. This is because, as you should recall, public keys and signatures come from complementary groups.
Now that you have a public key and signature, as points on corresponding elliptic curves, you can serialize them with `blst_p1_serialize`/`blst_p1_compress` and `blst_p2_serialize`/`blst_p2_compress` and send the resulting byte sequences over the network for deserialization/uncompression and verification.
### Signature Verification
Even though there are "single-shot" `blst_core_verify_pk_in_g1` and `blst_core_verify_pk_in_g2`, you should really familiarize yourself with the more generalized pairing interface. `blst_pairing` is an opaque structure, and the only thing you know about it is `blst_pairing_sizeof`, which is how much memory you're supposed to allocate for it. In order to verify an aggregated signature for a set of public keys and messages, or just one[!], you would:
```
blst_pairing_init(ctx, hash_or_encode, domain_separation_tag);
blst_pairing_aggregate_pk_in_g1(ctx, PK[0], aggregated_signature, message[0]);
blst_pairing_aggregate_pk_in_g1(ctx, PK[1], NULL, message[1]);
...
blst_pairing_commit(ctx);
result = blst_pairing_finalverify(ctx, NULL);
```
**The essential point to note** is that it's the caller's responsibility to ensure that public keys are group-checked with `blst_p1_affine_in_g1`. This is because it's a relatively expensive operation and it's naturally assumed that the application would cache the check's outcome. Signatures are group-checked internally. Not shown in the pseudo-code snippet above, but `aggregate` and `commit` calls return `BLST_ERROR` denoting success or failure in performing the operation. Call to `finalverify`, on the other hand, returns boolean.
Another, potentially more useful usage pattern is:
```
blst_p2_affine_in_g2(signature);
blst_aggregated_in_g2(gtsig, signature);
blst_pairing_init(ctx, hash_or_encode, domain_separation_tag);
blst_pairing_aggregate_pk_in_g1(ctx, PK[0], NULL, message[0]);
blst_pairing_aggregate_pk_in_g1(ctx, PK[1], NULL, message[1]);
...
blst_pairing_commit(ctx);
result = blst_pairing_finalverify(ctx, gtsig);
```
What is useful about it is that `aggregated_signature` can be handled in a separate thread. And while we are at it, aggregate calls can also be executed in different threads. This naturally implies that each thread will operate on its own `blst_pairing` context, which will have to be combined with `blst_pairing_merge` as threads join.
### Signature Aggregation
Aggregation is a trivial operation of performing point additions, with `blst_p2_add_or_double_affine` or `blst_p1_add_or_double_affine`. Note that the accumulator is a non-affine point.
---
That's about what you need to know to get started with nitty-gritty of actual function declarations.
### Serialization Format
From the ZCash BLS12-381 specification
* Fq elements are encoded in big-endian form. They occupy 48 bytes in this form.
* Fq2 elements are encoded in big-endian form, meaning that the Fq2 element c0 + c1 * u is represented by the Fq element c1 followed by the Fq element c0. This means Fq2 elements occupy 96 bytes in this form.
* The group G1 uses Fq elements for coordinates. The group G2 uses Fq2 elements for coordinates.
* G1 and G2 elements can be encoded in uncompressed form (the x-coordinate followed by the y-coordinate) or in compressed form (just the x-coordinate). G1 elements occupy 96 bytes in uncompressed form, and 48 bytes in compressed form. G2 elements occupy 192 bytes in uncompressed form, and 96 bytes in compressed form.
The most-significant three bits of a G1 or G2 encoding should be masked away before the coordinate(s) are interpreted. These bits are used to unambiguously represent the underlying element:
* The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form.
* The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group element's encoding should be set to zero.
* The third-most significant bit is set if (and only if) this point is in compressed form _and_ it is not the point at infinity _and_ its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate.
## Build
The build process is very simple and only requires a C compiler. It's integrated into the Go and Rust ecosystems, so that respective users would go about as they would with any other external module. Otherwise, a binary library would have to be compiled.
### C static library
A static library called libblst.a can be built in the current working directory of the user's choice:
Linux, Mac, and Windows (in MinGW or Cygwin environments)
```
/some/where/build.sh
```
Windows (Visual C)
```
\some\where\build.bat
```
If final application crashes with an "illegal instruction" exception [after copying to another system], pass `-D__BLST_PORTABLE__` on `build.sh` command line. If you don't use build.sh, complement the `CFLAGS` environment variable with the said command line option. If you compile a Go application, you will need to modify the `CGO_CFLAGS` variable instead. And if you compile a Rust application, you can pass `--features portable` to `cargo build`. Alternatively, if you compile on an older Intel system, but will execute application on a newer one, consider instead passing `--features force-adx` for better performance.
## Language-specific notes
### [Go](bindings/go)
There are two primary modes of operation that can be chosen based on type definitions in the application.
For minimal-pubkey-size operations:
```
type PublicKey = blst.P1Affine
type Signature = blst.P2Affine
type AggregateSignature = blst.P2Aggregate
type AggregatePublicKey = blst.P1Aggregate
```
For minimal-signature-size operations:
```
type PublicKey = blst.P2Affine
type Signature = blst.P1Affine
type AggregateSignature = blst.P1Aggregate
type AggregatePublicKey = blst.P2Aggregate
```
For more details see the Go binding [readme](bindings/go/README.md).
### [Rust](bindings/rust)
[`blst`](https://crates.io/crates/blst) is the Rust binding crate.
To use min-pk version:
```
use blst::min_pk::*;
```
To use min-sig version:
```
use blst::min_sig::*;
```
For more details see the Rust binding [readme](bindings/rust/README.md).
## Repository Structure
**Root** - Contains various configuration files, documentation, licensing, and a build script
* **Bindings** - Contains the files that define the blst interface
* blst.h - provides C API to blst library
* blst_aux.h - contains experimental functions not yet committed for long-term maintenance
* blst.hpp - provides foundational class-oriented C++ interface to blst library
* blst.swg - provides SWIG definitions for creating blst bindings for other languages, such as Java and Python
* **C#** - folder containing C# bindings and an example of how to use them
* **Emscripten** - folder containing an example of how to use Emscripten WebAssembly bindings from Javascript
* **Go** - folder containing Go bindings for blst, including tests and benchmarks
* **Java** - folder containing an example of how to use SWIG Java bindings for blst
* **Node.js** - folder containing an example of how to use SWIG Javascript bindings for blst
* **Python** - folder containing an example of how to use SWIG Python bindings for blst
* **Rust** - folder containing Rust bindings for blst, including tests and benchmarks
* **Vectors**
* **Hash_to_curve**: folder containing test for hash_to_curve from IETF specification
* **Src** - folder containing C code for lower level blst functions such as field operations, extension field operations, hash-to-field, and more
* **Asm** - folder containing Perl scripts that are used to generate assembly code for different hardware platforms including x86 with ADX instructions, x86 without ADX instructions, and ARMv8, and [ABI](https://en.wikipedia.org/wiki/Application_binary_interface)[1]
* **Build** - this folder containing a set of pre-generated assembly files for a variety of operating systems and maintenance scripts.
* **Cheri** - assembly code for use on [CHERI](https://www.cl.cam.ac.uk/research/security/ctsrd/cheri/) platforms
* **Coff** - assembly code for use on Windows systems with GNU or LLVM toolchain
* **Elf** - assembly code for use on Unix systems
* **Mach-o** - assembly code for use on Apple operating systems
* **Win64** - assembly code for use on Windows systems with Microsoft toolchain
[1]: See [refresh.sh](build/refresh.sh) for usage. This method allows for simple reuse of optimized assembly across various platforms with minimal effort.
## Performance
Currently both the [Go](bindings/go) and [Rust](bindings/rust) bindings provide benchmarks for a variety of signature related operations.
## License
The blst library is licensed under the [Apache License Version 2.0](LICENSE) software license.
================================================
FILE: SECURITY.md
================================================
# Security Policy
## Reporting a Vulnerability
To report security issues please send an e-mail to hello@supranational.net.
For sensitive information or critical issues, please contact the above e-mail address with 'CRITICAL' in the subject line and we will respond with a mechanism to securely communicate.
Please try to provide a clear description of any issue reported, along with how to reproduce the issue if possible.
================================================
FILE: bindings/blst.h
================================================
/*
* Copyright Supranational LLC
* Licensed under the Apache License, Version 2.0, see LICENSE for details.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __BLST_H__
#define __BLST_H__
#ifdef __SIZE_TYPE__
typedef __SIZE_TYPE__ size_t;
#else
#include
#endif
#if defined(__UINT8_TYPE__) && defined(__UINT32_TYPE__) \
&& defined(__UINT64_TYPE__)
typedef __UINT8_TYPE__ uint8_t;
typedef __UINT32_TYPE__ uint32_t;
typedef __UINT64_TYPE__ uint64_t;
#else
#include
#endif
#ifdef __cplusplus
extern "C" {
#elif !defined(__STDC_VERSION__) || __STDC_VERSION__<202311
# if defined(__BLST_CGO__)
typedef _Bool bool; /* it's assumed that cgo calls modern enough compiler */
# elif defined(__BLST_RUST_BINDGEN__) || defined(__BLST_ZIG__)
# define bool _Bool
# elif defined(__STDC_VERSION__) && __STDC_VERSION__>=199901
# include
# elif !defined(bool)
# define bool int
# define __blst_h_bool__
# endif
#endif
#ifdef SWIG
# define DEFNULL =NULL
#elif defined __cplusplus
# define DEFNULL =0
#else
# define DEFNULL
#endif
typedef enum {
BLST_SUCCESS = 0,
BLST_BAD_ENCODING,
BLST_POINT_NOT_ON_CURVE,
BLST_POINT_NOT_IN_GROUP,
BLST_AGGR_TYPE_MISMATCH,
BLST_VERIFY_FAIL,
BLST_PK_IS_INFINITY,
BLST_BAD_SCALAR,
} BLST_ERROR;
typedef uint8_t byte;
typedef uint64_t limb_t;
typedef struct { byte b[256/8]; } blst_scalar;
typedef struct { limb_t l[256/8/sizeof(limb_t)]; } blst_fr;
typedef struct { limb_t l[384/8/sizeof(limb_t)]; } blst_fp;
/* 0 is "real" part, 1 is "imaginary" */
typedef struct { blst_fp fp[2]; } blst_fp2;
typedef struct { blst_fp2 fp2[3]; } blst_fp6;
typedef struct { blst_fp6 fp6[2]; } blst_fp12;
void blst_scalar_from_uint32(blst_scalar *out, const uint32_t a[8]);
void blst_uint32_from_scalar(uint32_t out[8], const blst_scalar *a);
void blst_scalar_from_uint64(blst_scalar *out, const uint64_t a[4]);
void blst_uint64_from_scalar(uint64_t out[4], const blst_scalar *a);
void blst_scalar_from_bendian(blst_scalar *out, const byte a[32]);
void blst_bendian_from_scalar(byte out[32], const blst_scalar *a);
void blst_scalar_from_lendian(blst_scalar *out, const byte a[32]);
void blst_lendian_from_scalar(byte out[32], const blst_scalar *a);
bool blst_scalar_fr_check(const blst_scalar *a);
bool blst_sk_check(const blst_scalar *a);
bool blst_sk_add_n_check(blst_scalar *out, const blst_scalar *a,
const blst_scalar *b);
bool blst_sk_sub_n_check(blst_scalar *out, const blst_scalar *a,
const blst_scalar *b);
bool blst_sk_mul_n_check(blst_scalar *out, const blst_scalar *a,
const blst_scalar *b);
void blst_sk_inverse(blst_scalar *out, const blst_scalar *a);
bool blst_scalar_from_le_bytes(blst_scalar *out, const byte *in, size_t len);
bool blst_scalar_from_be_bytes(blst_scalar *out, const byte *in, size_t len);
#ifndef SWIG
/*
* BLS12-381-specific Fr operations.
*/
void blst_fr_add(blst_fr *ret, const blst_fr *a, const blst_fr *b);
void blst_fr_sub(blst_fr *ret, const blst_fr *a, const blst_fr *b);
void blst_fr_mul_by_3(blst_fr *ret, const blst_fr *a);
void blst_fr_lshift(blst_fr *ret, const blst_fr *a, size_t count);
void blst_fr_rshift(blst_fr *ret, const blst_fr *a, size_t count);
void blst_fr_mul(blst_fr *ret, const blst_fr *a, const blst_fr *b);
void blst_fr_sqr(blst_fr *ret, const blst_fr *a);
void blst_fr_cneg(blst_fr *ret, const blst_fr *a, bool flag);
void blst_fr_eucl_inverse(blst_fr *ret, const blst_fr *a);
void blst_fr_inverse(blst_fr *ret, const blst_fr *a);
void blst_fr_from_uint64(blst_fr *ret, const uint64_t a[4]);
void blst_uint64_from_fr(uint64_t ret[4], const blst_fr *a);
void blst_fr_from_scalar(blst_fr *ret, const blst_scalar *a);
void blst_scalar_from_fr(blst_scalar *ret, const blst_fr *a);
/*
* BLS12-381-specific Fp operations.
*/
void blst_fp_add(blst_fp *ret, const blst_fp *a, const blst_fp *b);
void blst_fp_sub(blst_fp *ret, const blst_fp *a, const blst_fp *b);
void blst_fp_mul_by_3(blst_fp *ret, const blst_fp *a);
void blst_fp_mul_by_8(blst_fp *ret, const blst_fp *a);
void blst_fp_lshift(blst_fp *ret, const blst_fp *a, size_t count);
void blst_fp_mul(blst_fp *ret, const blst_fp *a, const blst_fp *b);
void blst_fp_sqr(blst_fp *ret, const blst_fp *a);
void blst_fp_cneg(blst_fp *ret, const blst_fp *a, bool flag);
void blst_fp_eucl_inverse(blst_fp *ret, const blst_fp *a);
void blst_fp_inverse(blst_fp *ret, const blst_fp *a);
bool blst_fp_sqrt(blst_fp *ret, const blst_fp *a);
void blst_fp_from_uint32(blst_fp *ret, const uint32_t a[12]);
void blst_uint32_from_fp(uint32_t ret[12], const blst_fp *a);
void blst_fp_from_uint64(blst_fp *ret, const uint64_t a[6]);
void blst_uint64_from_fp(uint64_t ret[6], const blst_fp *a);
void blst_fp_from_bendian(blst_fp *ret, const byte a[48]);
void blst_bendian_from_fp(byte ret[48], const blst_fp *a);
void blst_fp_from_lendian(blst_fp *ret, const byte a[48]);
void blst_lendian_from_fp(byte ret[48], const blst_fp *a);
/*
* BLS12-381-specific Fp2 operations.
*/
void blst_fp2_add(blst_fp2 *ret, const blst_fp2 *a, const blst_fp2 *b);
void blst_fp2_sub(blst_fp2 *ret, const blst_fp2 *a, const blst_fp2 *b);
void blst_fp2_mul_by_3(blst_fp2 *ret, const blst_fp2 *a);
void blst_fp2_mul_by_8(blst_fp2 *ret, const blst_fp2 *a);
void blst_fp2_lshift(blst_fp2 *ret, const blst_fp2 *a, size_t count);
void blst_fp2_mul(blst_fp2 *ret, const blst_fp2 *a, const blst_fp2 *b);
void blst_fp2_sqr(blst_fp2 *ret, const blst_fp2 *a);
void blst_fp2_cneg(blst_fp2 *ret, const blst_fp2 *a, bool flag);
void blst_fp2_eucl_inverse(blst_fp2 *ret, const blst_fp2 *a);
void blst_fp2_inverse(blst_fp2 *ret, const blst_fp2 *a);
bool blst_fp2_sqrt(blst_fp2 *ret, const blst_fp2 *a);
/*
* BLS12-381-specific Fp12 operations.
*/
void blst_fp12_sqr(blst_fp12 *ret, const blst_fp12 *a);
void blst_fp12_cyclotomic_sqr(blst_fp12 *ret, const blst_fp12 *a);
void blst_fp12_mul(blst_fp12 *ret, const blst_fp12 *a, const blst_fp12 *b);
void blst_fp12_mul_by_xy00z0(blst_fp12 *ret, const blst_fp12 *a,
const blst_fp6 *xy00z0);
void blst_fp12_conjugate(blst_fp12 *a);
void blst_fp12_inverse(blst_fp12 *ret, const blst_fp12 *a);
/* caveat lector! |n| has to be non-zero and not more than 3! */
void blst_fp12_frobenius_map(blst_fp12 *ret, const blst_fp12 *a, size_t n);
bool blst_fp12_is_equal(const blst_fp12 *a, const blst_fp12 *b);
bool blst_fp12_is_one(const blst_fp12 *a);
bool blst_fp12_in_group(const blst_fp12 *a);
const blst_fp12 *blst_fp12_one(void);
#endif // SWIG
/*
* BLS12-381-specific point operations.
*/
typedef struct { blst_fp x, y, z; } blst_p1;
typedef struct { blst_fp x, y; } blst_p1_affine;
void blst_p1_add(blst_p1 *out, const blst_p1 *a, const blst_p1 *b);
void blst_p1_add_or_double(blst_p1 *out, const blst_p1 *a, const blst_p1 *b);
void blst_p1_add_affine(blst_p1 *out, const blst_p1 *a,
const blst_p1_affine *b);
void blst_p1_add_or_double_affine(blst_p1 *out, const blst_p1 *a,
const blst_p1_affine *b);
void blst_p1_double(blst_p1 *out, const blst_p1 *a);
void blst_p1_mult(blst_p1 *out, const blst_p1 *p, const byte *scalar,
size_t nbits);
void blst_p1_cneg(blst_p1 *p, bool cbit);
void blst_p1_to_affine(blst_p1_affine *out, const blst_p1 *in);
void blst_p1_from_affine(blst_p1 *out, const blst_p1_affine *in);
bool blst_p1_on_curve(const blst_p1 *p);
bool blst_p1_in_g1(const blst_p1 *p);
bool blst_p1_is_equal(const blst_p1 *a, const blst_p1 *b);
bool blst_p1_is_inf(const blst_p1 *a);
const blst_p1 *blst_p1_generator(void);
bool blst_p1_affine_on_curve(const blst_p1_affine *p);
bool blst_p1_affine_in_g1(const blst_p1_affine *p);
bool blst_p1_affine_is_equal(const blst_p1_affine *a, const blst_p1_affine *b);
bool blst_p1_affine_is_inf(const blst_p1_affine *a);
const blst_p1_affine *blst_p1_affine_generator(void);
typedef struct { blst_fp2 x, y, z; } blst_p2;
typedef struct { blst_fp2 x, y; } blst_p2_affine;
void blst_p2_add(blst_p2 *out, const blst_p2 *a, const blst_p2 *b);
void blst_p2_add_or_double(blst_p2 *out, const blst_p2 *a, const blst_p2 *b);
void blst_p2_add_affine(blst_p2 *out, const blst_p2 *a,
const blst_p2_affine *b);
void blst_p2_add_or_double_affine(blst_p2 *out, const blst_p2 *a,
const blst_p2_affine *b);
void blst_p2_double(blst_p2 *out, const blst_p2 *a);
void blst_p2_mult(blst_p2 *out, const blst_p2 *p, const byte *scalar,
size_t nbits);
void blst_p2_cneg(blst_p2 *p, bool cbit);
void blst_p2_to_affine(blst_p2_affine *out, const blst_p2 *in);
void blst_p2_from_affine(blst_p2 *out, const blst_p2_affine *in);
bool blst_p2_on_curve(const blst_p2 *p);
bool blst_p2_in_g2(const blst_p2 *p);
bool blst_p2_is_equal(const blst_p2 *a, const blst_p2 *b);
bool blst_p2_is_inf(const blst_p2 *a);
const blst_p2 *blst_p2_generator(void);
bool blst_p2_affine_on_curve(const blst_p2_affine *p);
bool blst_p2_affine_in_g2(const blst_p2_affine *p);
bool blst_p2_affine_is_equal(const blst_p2_affine *a, const blst_p2_affine *b);
bool blst_p2_affine_is_inf(const blst_p2_affine *a);
const blst_p2_affine *blst_p2_affine_generator(void);
/*
* Multi-scalar multiplications and other multi-point operations.
*/
void blst_p1s_to_affine(blst_p1_affine dst[], const blst_p1 *const points[],
size_t npoints);
void blst_p1s_add(blst_p1 *ret, const blst_p1_affine *const points[],
size_t npoints);
size_t blst_p1s_mult_wbits_precompute_sizeof(size_t wbits, size_t npoints);
void blst_p1s_mult_wbits_precompute(blst_p1_affine table[], size_t wbits,
const blst_p1_affine *const points[],
size_t npoints);
size_t blst_p1s_mult_wbits_scratch_sizeof(size_t npoints);
void blst_p1s_mult_wbits(blst_p1 *ret, const blst_p1_affine table[],
size_t wbits, size_t npoints,
const byte *const scalars[], size_t nbits,
limb_t *scratch);
size_t blst_p1s_mult_pippenger_scratch_sizeof(size_t npoints);
void blst_p1s_mult_pippenger(blst_p1 *ret, const blst_p1_affine *const points[],
size_t npoints, const byte *const scalars[],
size_t nbits, limb_t *scratch);
void blst_p1s_tile_pippenger(blst_p1 *ret, const blst_p1_affine *const points[],
size_t npoints, const byte *const scalars[],
size_t nbits, limb_t *scratch,
size_t bit0, size_t window);
void blst_p2s_to_affine(blst_p2_affine dst[], const blst_p2 *const points[],
size_t npoints);
void blst_p2s_add(blst_p2 *ret, const blst_p2_affine *const points[],
size_t npoints);
size_t blst_p2s_mult_wbits_precompute_sizeof(size_t wbits, size_t npoints);
void blst_p2s_mult_wbits_precompute(blst_p2_affine table[], size_t wbits,
const blst_p2_affine *const points[],
size_t npoints);
size_t blst_p2s_mult_wbits_scratch_sizeof(size_t npoints);
void blst_p2s_mult_wbits(blst_p2 *ret, const blst_p2_affine table[],
size_t wbits, size_t npoints,
const byte *const scalars[], size_t nbits,
limb_t *scratch);
size_t blst_p2s_mult_pippenger_scratch_sizeof(size_t npoints);
void blst_p2s_mult_pippenger(blst_p2 *ret, const blst_p2_affine *const points[],
size_t npoints, const byte *const scalars[],
size_t nbits, limb_t *scratch);
void blst_p2s_tile_pippenger(blst_p2 *ret, const blst_p2_affine *const points[],
size_t npoints, const byte *const scalars[],
size_t nbits, limb_t *scratch,
size_t bit0, size_t window);
/*
* Hash-to-curve operations.
*/
#ifndef SWIG
void blst_map_to_g1(blst_p1 *out, const blst_fp *u, const blst_fp *v DEFNULL);
void blst_map_to_g2(blst_p2 *out, const blst_fp2 *u, const blst_fp2 *v DEFNULL);
#endif
void blst_encode_to_g1(blst_p1 *out,
const byte *msg, size_t msg_len,
const byte *DST DEFNULL, size_t DST_len DEFNULL,
const byte *aug DEFNULL, size_t aug_len DEFNULL);
void blst_hash_to_g1(blst_p1 *out,
const byte *msg, size_t msg_len,
const byte *DST DEFNULL, size_t DST_len DEFNULL,
const byte *aug DEFNULL, size_t aug_len DEFNULL);
void blst_encode_to_g2(blst_p2 *out,
const byte *msg, size_t msg_len,
const byte *DST DEFNULL, size_t DST_len DEFNULL,
const byte *aug DEFNULL, size_t aug_len DEFNULL);
void blst_hash_to_g2(blst_p2 *out,
const byte *msg, size_t msg_len,
const byte *DST DEFNULL, size_t DST_len DEFNULL,
const byte *aug DEFNULL, size_t aug_len DEFNULL);
/*
* Zcash-compatible serialization/deserialization.
*/
void blst_p1_serialize(byte out[96], const blst_p1 *in);
void blst_p1_compress(byte out[48], const blst_p1 *in);
void blst_p1_affine_serialize(byte out[96], const blst_p1_affine *in);
void blst_p1_affine_compress(byte out[48], const blst_p1_affine *in);
BLST_ERROR blst_p1_uncompress(blst_p1_affine *out, const byte in[48]);
BLST_ERROR blst_p1_deserialize(blst_p1_affine *out, const byte in[96]);
void blst_p2_serialize(byte out[192], const blst_p2 *in);
void blst_p2_compress(byte out[96], const blst_p2 *in);
void blst_p2_affine_serialize(byte out[192], const blst_p2_affine *in);
void blst_p2_affine_compress(byte out[96], const blst_p2_affine *in);
BLST_ERROR blst_p2_uncompress(blst_p2_affine *out, const byte in[96]);
BLST_ERROR blst_p2_deserialize(blst_p2_affine *out, const byte in[192]);
/*
* Specification defines two variants, 'minimal-signature-size' and
* 'minimal-pubkey-size'. To unify appearance we choose to distinguish
* them by suffix referring to the public key type, more specifically
* _pk_in_g1 corresponds to 'minimal-pubkey-size' and _pk_in_g2 - to
* 'minimal-signature-size'. It might appear a bit counterintuitive
* in sign call, but no matter how you twist it, something is bound to
* turn a little odd.
*/
/*
* Secret-key operations.
*/
void blst_keygen(blst_scalar *out_SK, const byte *IKM, size_t IKM_len,
const byte *info DEFNULL, size_t info_len DEFNULL);
void blst_sk_to_pk_in_g1(blst_p1 *out_pk, const blst_scalar *SK);
void blst_sign_pk_in_g1(blst_p2 *out_sig, const blst_p2 *hash,
const blst_scalar *SK);
void blst_sk_to_pk_in_g2(blst_p2 *out_pk, const blst_scalar *SK);
void blst_sign_pk_in_g2(blst_p1 *out_sig, const blst_p1 *hash,
const blst_scalar *SK);
/*
* Pairing interface.
*/
#ifndef SWIG
void blst_miller_loop(blst_fp12 *ret, const blst_p2_affine *Q,
const blst_p1_affine *P);
void blst_miller_loop_n(blst_fp12 *ret, const blst_p2_affine *const Qs[],
const blst_p1_affine *const Ps[],
size_t n);
void blst_final_exp(blst_fp12 *ret, const blst_fp12 *f);
void blst_precompute_lines(blst_fp6 Qlines[68], const blst_p2_affine *Q);
void blst_miller_loop_lines(blst_fp12 *ret, const blst_fp6 Qlines[68],
const blst_p1_affine *P);
bool blst_fp12_finalverify(const blst_fp12 *gt1, const blst_fp12 *gt2);
#endif
#ifdef __BLST_CGO__
typedef limb_t blst_pairing;
#elif defined(__BLST_RUST_BINDGEN__)
typedef struct {} blst_pairing;
#else
typedef struct blst_opaque blst_pairing;
#endif
size_t blst_pairing_sizeof(void);
void blst_pairing_init(blst_pairing *new_ctx, bool hash_or_encode,
const byte *DST DEFNULL, size_t DST_len DEFNULL);
const byte *blst_pairing_get_dst(const blst_pairing *ctx);
void blst_pairing_commit(blst_pairing *ctx);
BLST_ERROR blst_pairing_aggregate_pk_in_g2(blst_pairing *ctx,
const blst_p2_affine *PK,
const blst_p1_affine *signature,
const byte *msg, size_t msg_len,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
BLST_ERROR blst_pairing_chk_n_aggr_pk_in_g2(blst_pairing *ctx,
const blst_p2_affine *PK,
bool pk_grpchk,
const blst_p1_affine *signature,
bool sig_grpchk,
const byte *msg, size_t msg_len,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
BLST_ERROR blst_pairing_mul_n_aggregate_pk_in_g2(blst_pairing *ctx,
const blst_p2_affine *PK,
const blst_p1_affine *sig,
const byte *scalar,
size_t nbits,
const byte *msg,
size_t msg_len,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
BLST_ERROR blst_pairing_chk_n_mul_n_aggr_pk_in_g2(blst_pairing *ctx,
const blst_p2_affine *PK,
bool pk_grpchk,
const blst_p1_affine *sig,
bool sig_grpchk,
const byte *scalar,
size_t nbits,
const byte *msg,
size_t msg_len,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
BLST_ERROR blst_pairing_aggregate_pk_in_g1(blst_pairing *ctx,
const blst_p1_affine *PK,
const blst_p2_affine *signature,
const byte *msg, size_t msg_len,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
BLST_ERROR blst_pairing_chk_n_aggr_pk_in_g1(blst_pairing *ctx,
const blst_p1_affine *PK,
bool pk_grpchk,
const blst_p2_affine *signature,
bool sig_grpchk,
const byte *msg, size_t msg_len,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
BLST_ERROR blst_pairing_mul_n_aggregate_pk_in_g1(blst_pairing *ctx,
const blst_p1_affine *PK,
const blst_p2_affine *sig,
const byte *scalar,
size_t nbits,
const byte *msg,
size_t msg_len,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
BLST_ERROR blst_pairing_chk_n_mul_n_aggr_pk_in_g1(blst_pairing *ctx,
const blst_p1_affine *PK,
bool pk_grpchk,
const blst_p2_affine *sig,
bool sig_grpchk,
const byte *scalar,
size_t nbits,
const byte *msg,
size_t msg_len,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
BLST_ERROR blst_pairing_merge(blst_pairing *ctx, const blst_pairing *ctx1);
bool blst_pairing_finalverify(const blst_pairing *ctx,
const blst_fp12 *gtsig DEFNULL);
/*
* Customarily applications aggregate signatures separately.
* In which case application would have to pass NULLs for |signature|
* to blst_pairing_aggregate calls and pass aggregated signature
* collected with these calls to blst_pairing_finalverify. Inputs are
* Zcash-compatible "straight-from-wire" byte vectors, compressed or
* not.
*/
BLST_ERROR blst_aggregate_in_g1(blst_p1 *out, const blst_p1 *in,
const byte *zwire);
BLST_ERROR blst_aggregate_in_g2(blst_p2 *out, const blst_p2 *in,
const byte *zwire);
void blst_aggregated_in_g1(blst_fp12 *out, const blst_p1_affine *signature);
void blst_aggregated_in_g2(blst_fp12 *out, const blst_p2_affine *signature);
/*
* "One-shot" CoreVerify entry points.
*/
BLST_ERROR blst_core_verify_pk_in_g1(const blst_p1_affine *pk,
const blst_p2_affine *signature,
bool hash_or_encode,
const byte *msg, size_t msg_len,
const byte *DST DEFNULL,
size_t DST_len DEFNULL,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
BLST_ERROR blst_core_verify_pk_in_g2(const blst_p2_affine *pk,
const blst_p1_affine *signature,
bool hash_or_encode,
const byte *msg, size_t msg_len,
const byte *DST DEFNULL,
size_t DST_len DEFNULL,
const byte *aug DEFNULL,
size_t aug_len DEFNULL);
extern const blst_p1_affine BLS12_381_G1;
extern const blst_p1_affine BLS12_381_NEG_G1;
extern const blst_p2_affine BLS12_381_G2;
extern const blst_p2_affine BLS12_381_NEG_G2;
#include "blst_aux.h"
#ifdef __cplusplus
}
#elif defined(__blst_h_bool__)
# undef __blst_h_bool__
# undef bool
#endif
#endif
================================================
FILE: bindings/blst.hpp
================================================
/*
* Copyright Supranational LLC
* Licensed under the Apache License, Version 2.0, see LICENSE for details.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __BLST_HPP__
#define __BLST_HPP__
#if !defined(SWIG) && __cplusplus < 201103L \
&& (!defined(_MSVC_LANG) || _MSVC_LANG < 201103L)
# error "C++11 or later is required to compile /bindings/blst.hpp"
#endif
#include
#include
#include
#include
namespace blst {
#ifdef __clang__
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wextern-c-compat"
#endif
#include "blst.h"
#ifdef __clang__
# pragma GCC diagnostic pop
#endif
struct bytes_t {
const byte* ptr;
size_t len;
bytes_t() = default;
bytes_t(const byte* p, size_t l) : ptr{p}, len{l} {}
template class C, typename T>
bytes_t(const C& c)
{
static_assert(sizeof(T) == 1, "unsupported type");
ptr = reinterpret_cast(c.data());
len = c.size();
}
template class C, typename T, size_t N>
bytes_t(const C& c)
{
static_assert(sizeof(T) == 1, "unsupported type");
ptr = reinterpret_cast(c.data());
len = c.size();
}
};
class P1_Affine;
class P1;
class P2_Affine;
class P2;
class Pairing;
inline const byte *C_bytes(const void *ptr)
{ return static_cast(ptr); }
/*
* As for SecretKey being struct and not class, and lack of constructors
* with one accepting for example |IKM|. We can't make assumptions about
* application's policy toward handling secret key material. Hence it's
* argued that application is entitled for transparent structure, not
* opaque or semi-opaque class. And in the context it's appropriate not
* to "entice" developers with idiomatic constructors:-) Though this
* doesn't really apply to SWIG-assisted interfaces...
*/
struct SecretKey {
#ifdef SWIG
private:
#endif
blst_scalar key;
#ifdef SWIG
public:
#endif
#ifndef SWIG
void keygen(const byte* IKM, size_t IKM_len,
const std::string& info = "")
{ blst_keygen(&key, IKM, IKM_len, C_bytes(info.data()), info.size()); }
void keygen_v3(const byte* IKM, size_t IKM_len,
const std::string& info = "")
{ blst_keygen_v3(&key, IKM, IKM_len, C_bytes(info.data()), info.size()); }
void keygen_v4_5(const byte* IKM, size_t IKM_len,
const byte* salt, size_t salt_len,
const std::string& info = "")
{ blst_keygen_v4_5(&key, IKM, IKM_len, salt, salt_len,
C_bytes(info.data()), info.size());
}
void keygen_v5(const byte* IKM, size_t IKM_len,
const byte* salt, size_t salt_len,
const std::string& info = "")
{ blst_keygen_v5(&key, IKM, IKM_len, salt, salt_len,
C_bytes(info.data()), info.size());
}
#endif
void keygen(bytes_t IKM, const std::string& info = "")
{ keygen(IKM.ptr, IKM.len, info); }
void keygen_v3(bytes_t IKM, const std::string& info = "")
{ keygen_v3(IKM.ptr, IKM.len, info); }
void keygen_v4_5(bytes_t IKM, bytes_t salt, const std::string& info = "")
{ keygen_v4_5(IKM.ptr, IKM.len, salt.ptr, salt.len, info); }
void keygen_v5(bytes_t IKM, bytes_t salt, const std::string& info = "")
{ keygen_v5(IKM.ptr, IKM.len, salt.ptr, salt.len, info); }
void derive_master_eip2333(const byte* IKM, size_t IKM_len)
{ blst_derive_master_eip2333(&key, IKM, IKM_len); }
void derive_child_eip2333(const SecretKey& SK, unsigned int child_index)
{ blst_derive_child_eip2333(&key, &SK.key, child_index); }
void from_bendian(const byte in[32]) { blst_scalar_from_bendian(&key, in); }
void from_lendian(const byte in[32]) { blst_scalar_from_lendian(&key, in); }
void to_bendian(byte out[32]) const
{ blst_bendian_from_scalar(out, &key); }
void to_lendian(byte out[32]) const
{ blst_lendian_from_scalar(out, &key); }
};
class Scalar {
private:
blst_scalar val;
public:
Scalar() { memset(&val, 0, sizeof(val)); }
Scalar(const byte* scalar, size_t nbits)
{ blst_scalar_from_le_bytes(&val, scalar, (nbits+7)/8); }
#ifndef SWIG
Scalar(const byte *msg, size_t msg_len, const std::string& DST)
{ (void)hash_to(msg, msg_len, DST); }
Scalar* hash_to(const byte *msg, size_t msg_len, const std::string& DST = "")
{ byte elem[48];
blst_expand_message_xmd(elem, sizeof(elem), msg, msg_len,
C_bytes(DST.data()), DST.size());
blst_scalar_from_be_bytes(&val, elem, sizeof(elem));
return this;
}
#endif
Scalar(bytes_t msg, const std::string& DST)
{ (void)hash_to(msg.ptr, msg.len, DST); }
Scalar* hash_to(bytes_t msg, const std::string& DST = "")
{ return hash_to(msg.ptr, msg.len, DST); }
Scalar dup() const { return *this; }
Scalar* from_bendian(const byte *msg, size_t msg_len)
{ blst_scalar_from_be_bytes(&val, msg, msg_len); return this; }
Scalar* from_lendian(const byte *msg, size_t msg_len)
{ blst_scalar_from_le_bytes(&val, msg, msg_len); return this; }
void to_bendian(byte out[32]) const
{ blst_bendian_from_scalar(out, &val); }
void to_lendian(byte out[32]) const
{ blst_lendian_from_scalar(out, &val); }
Scalar* add(const Scalar& a)
{ if (!blst_sk_add_n_check(&val, &val, a))
throw BLST_BAD_SCALAR;
return this;
}
Scalar* add(const SecretKey& a)
{ if (!blst_sk_add_n_check(&val, &val, &a.key))
throw BLST_BAD_SCALAR;
return this;
}
Scalar* sub(const Scalar& a)
{ if (!blst_sk_sub_n_check(&val, &val, a))
throw BLST_BAD_SCALAR;
return this;
}
Scalar* mul(const Scalar& a)
{ if (!blst_sk_mul_n_check(&val, &val, a))
throw BLST_BAD_SCALAR;
return this;
}
Scalar* inverse()
{ blst_sk_inverse(&val, &val); return this; }
private:
friend class P1;
friend class P2;
operator const blst_scalar*() const { return &val; }
operator const byte*() const { return val.b; }
};
class P1_Affine {
private:
blst_p1_affine point;
P1_Affine(const blst_p1_affine *cptr) { point = *cptr; }
public:
P1_Affine() { memset(&point, 0, sizeof(point)); }
#ifndef SWIG
P1_Affine(const byte *in)
{ BLST_ERROR err = blst_p1_deserialize(&point, in);
if (err != BLST_SUCCESS)
throw err;
}
#endif
P1_Affine(const byte *in, size_t len)
{ if (len == 0 || len != (in[0]&0x80 ? 48 : 96))
throw BLST_BAD_ENCODING;
BLST_ERROR err = blst_p1_deserialize(&point, in);
if (err != BLST_SUCCESS)
throw err;
}
P1_Affine(const P1& jacobian);
P1_Affine dup() const { return *this; }
P1 to_jacobian() const;
void serialize(byte out[96]) const
{ blst_p1_affine_serialize(out, &point); }
void compress(byte out[48]) const
{ blst_p1_affine_compress(out, &point); }
bool on_curve() const { return blst_p1_affine_on_curve(&point); }
bool in_group() const { return blst_p1_affine_in_g1(&point); }
bool is_inf() const { return blst_p1_affine_is_inf(&point); }
bool is_equal(const P1_Affine& p) const
{ return blst_p1_affine_is_equal(&point, &p.point); }
#ifndef SWIG
BLST_ERROR core_verify(const P2_Affine& pk, bool hash_or_encode,
const byte* msg, size_t msg_len,
const std::string& DST = "",
const byte* aug = nullptr, size_t aug_len = 0) const;
#endif
BLST_ERROR core_verify(const P2_Affine& pk, bool hash_or_encode,
bytes_t msg, const std::string& DST = "",
bytes_t aug = {nullptr, 0}) const
{ return core_verify(pk, hash_or_encode, msg.ptr, msg.len, DST,
aug.ptr, aug.len);
}
static P1_Affine generator()
{ return P1_Affine(blst_p1_affine_generator()); }
private:
friend class Pairing;
friend class P2_Affine;
friend class PT;
friend class P1;
friend class P1_Affines;
operator const blst_p1_affine*() const { return &point; }
operator blst_p1_affine*() { return &point; }
};
class P1 {
private:
blst_p1 point;
P1(const blst_p1 *cptr) { point = *cptr; }
public:
P1() { memset(&point, 0, sizeof(point)); }
P1(const SecretKey& sk) { blst_sk_to_pk_in_g1(&point, &sk.key); }
#ifndef SWIG
P1(const byte *in)
{ blst_p1_affine a;
BLST_ERROR err = blst_p1_deserialize(&a, in);
if (err != BLST_SUCCESS)
throw err;
blst_p1_from_affine(&point, &a);
}
#endif
P1(const byte *in, size_t len)
{ if (len == 0 || len != (in[0]&0x80 ? 48 : 96))
throw BLST_BAD_ENCODING;
blst_p1_affine a;
BLST_ERROR err = blst_p1_deserialize(&a, in);
if (err != BLST_SUCCESS)
throw err;
blst_p1_from_affine(&point, &a);
}
P1(const P1_Affine& affine) { blst_p1_from_affine(&point, affine); }
P1 dup() const { return *this; }
P1_Affine to_affine() const { return P1_Affine(*this); }
void serialize(byte out[96]) const { blst_p1_serialize(out, &point); }
void compress(byte out[48]) const { blst_p1_compress(out, &point); }
bool on_curve() const { return blst_p1_on_curve(&point); }
bool in_group() const { return blst_p1_in_g1(&point); }
bool is_inf() const { return blst_p1_is_inf(&point); }
bool is_equal(const P1& p) const
{ return blst_p1_is_equal(&point, &p.point); }
void aggregate(const P1_Affine& in)
{ if (blst_p1_affine_in_g1(in))
blst_p1_add_or_double_affine(&point, &point, in);
else
throw BLST_POINT_NOT_IN_GROUP;
}
P1* sign_with(const SecretKey& sk)
{ blst_sign_pk_in_g2(&point, &point, &sk.key); return this; }
P1* sign_with(const Scalar& scalar)
{ blst_sign_pk_in_g2(&point, &point, scalar); return this; }
P1* hash_to(bytes_t msg, const std::string& DST = "",
bytes_t aug = {nullptr, 0})
{ blst_hash_to_g1(&point, msg.ptr, msg.len, C_bytes(DST.data()), DST.size(),
aug.ptr, aug.len);
return this;
}
P1* encode_to(bytes_t msg, const std::string& DST = "",
bytes_t aug = {nullptr, 0})
{ blst_encode_to_g1(&point, msg.ptr, msg.len, C_bytes(DST.data()), DST.size(),
aug.ptr, aug.len);
return this;
}
#ifndef SWIG
P1* hash_to(const byte* msg, size_t msg_len,
const std::string& DST = "",
const byte* aug = nullptr, size_t aug_len = 0)
{ blst_hash_to_g1(&point, msg, msg_len, C_bytes(DST.data()), DST.size(),
aug, aug_len);
return this;
}
P1* encode_to(const byte* msg, size_t msg_len,
const std::string& DST = "",
const byte* aug = nullptr, size_t aug_len = 0)
{ blst_encode_to_g1(&point, msg, msg_len, C_bytes(DST.data()), DST.size(),
aug, aug_len);
return this;
}
#endif
P1* mult(const byte* scalar, size_t nbits)
{ blst_p1_mult(&point, &point, scalar, nbits); return this; }
P1* mult(const Scalar& scalar)
{ blst_p1_mult(&point, &point, scalar, 255); return this; }
P1* cneg(bool flag)
{ blst_p1_cneg(&point, flag); return this; }
P1* neg()
{ blst_p1_cneg(&point, true); return this; }
P1* add(const P1& a)
{ blst_p1_add_or_double(&point, &point, a); return this; }
P1* add(const P1_Affine &a)
{ blst_p1_add_or_double_affine(&point, &point, a); return this; }
P1* dbl()
{ blst_p1_double(&point, &point); return this; }
#ifndef SWIG
static P1 add(const P1& a, const P1& b)
{ P1 ret; blst_p1_add_or_double(ret, a, b); return ret; }
static P1 add(const P1& a, const P1_Affine& b)
{ P1 ret; blst_p1_add_or_double_affine(ret, a, b); return ret; }
static P1 dbl(const P1& a)
{ P1 ret; blst_p1_double(ret, a); return ret; }
#endif
static P1 generator()
{ return P1(blst_p1_generator()); }
private:
friend class P1_Affine;
friend class P1_Affines;
operator const blst_p1*() const { return &point; }
operator blst_p1*() { return &point; }
};
class P1_Affines {
private:
struct p1_affine_no_init {
blst_p1_affine point;
p1_affine_no_init() { }
operator blst_p1_affine*() { return &point; }
operator const blst_p1_affine*() const { return &point; }
};
std::vector table;
size_t wbits, npoints;
public:
#ifndef SWIG
P1_Affines() {}
P1_Affines(size_t wbits, const P1_Affine* const points[], size_t npoints)
{ this->wbits = wbits;
this->npoints = npoints;
table.resize(npoints << (wbits-1));
blst_p1s_mult_wbits_precompute(table.at(0), wbits,
reinterpret_cast(points),
npoints);
}
P1_Affines(size_t wbits, const P1_Affine points[], size_t npoints)
{ const P1_Affine* const ptrs[2] = { points, nullptr };
P1_Affines(wbits, ptrs, npoints);
}
P1_Affines(size_t wbits, const std::vector& points)
{ P1_Affines(wbits, &points.at(0), points.size()); }
P1_Affines(size_t wbits, const P1* const points[], size_t npoints)
{ size_t cap = npoints << (wbits-1);
this->wbits = wbits;
this->npoints = npoints;
table.resize(cap);
blst_p1s_to_affine(table.at(cap-npoints),
reinterpret_cast(points),
npoints);
const blst_p1_affine* const ptrs[2] = { table[cap-npoints], nullptr };
blst_p1s_mult_wbits_precompute(table[0], wbits, ptrs, npoints);
}
P1_Affines(size_t wbits, const P1 points[], size_t npoints)
{ const P1* const ptrs[2] = { points, nullptr };
P1_Affines(wbits, ptrs, npoints);
}
P1_Affines(size_t wbits, const std::vector& points)
{ P1_Affines(wbits, &points.at(0), points.size()); }
P1_Affines(const P1* const points[], size_t npoints)
{ this->wbits = 0;
this->npoints = npoints;
table.resize(npoints);
blst_p1s_to_affine(table.at(0),
reinterpret_cast(points),
npoints);
}
P1_Affines(const P1 points[], size_t npoints)
{ const P1* const ptrs[2] = { points, nullptr };
P1_Affines(ptrs, npoints);
}
P1_Affines(const std::vector& points)
{ P1_Affines(&points.at(0), points.size()); }
P1 mult(const byte* const scalars[], size_t nbits) const
{ P1 ret;
if (wbits != 0) {
size_t sz = blst_p1s_mult_wbits_scratch_sizeof(npoints);
std::unique_ptr scratch{new limb_t[sz/sizeof(limb_t)]};
blst_p1s_mult_wbits(ret, table.at(0), wbits, npoints,
scalars, nbits, scratch.get());
} else {
size_t sz = blst_p1s_mult_pippenger_scratch_sizeof(npoints);
std::unique_ptr scratch{new limb_t[sz/sizeof(limb_t)]};
const blst_p1_affine* const ptrs[2] = { table.at(0), nullptr };
blst_p1s_mult_pippenger(ret, ptrs, npoints,
scalars, nbits, scratch.get());
}
return ret;
}
static std::vector from(const P1* const points[], size_t npoints)
{ std::vector ret;
ret.resize(npoints);
blst_p1s_to_affine(reinterpret_cast(&ret.at(0)),
reinterpret_cast(points),
npoints);
return ret;
}
static std::vector from(const P1 points[], size_t npoints)
{ const P1* const ptrs[2] = { points, nullptr };
return from(ptrs, npoints);
}
static std::vector from(const std::vector& points)
{ return from(&points.at(0), points.size()); }
#endif
static P1 mult_pippenger(const P1_Affine* const points[], size_t npoints,
const byte* const scalars[], size_t nbits)
{ P1 ret;
size_t sz = blst_p1s_mult_pippenger_scratch_sizeof(npoints);
std::unique_ptr scratch{new limb_t[sz/sizeof(limb_t)]};
blst_p1s_mult_pippenger(ret,
reinterpret_cast(points),
npoints, scalars, nbits, scratch.get());
return ret;
}
#ifndef SWIG
static P1 mult_pippenger(const P1_Affine points[], size_t npoints,
const byte* const scalars[], size_t nbits)
{ const P1_Affine* const ptrs[2] = { points, nullptr };
return mult_pippenger(ptrs, npoints, scalars, nbits);
}
static P1 mult_pippenger(const std::vector& points,
const byte* const scalars[], size_t nbits)
{ return mult_pippenger(&points.at(0), points.size(), scalars, nbits); }
#endif
static P1 add(const P1_Affine* const points[], size_t npoints)
{ P1 ret;
blst_p1s_add(ret,
reinterpret_cast(points),
npoints);
return ret;
}
#ifndef SWIG
static P1 add(const P1_Affine points[], size_t npoints)
{ const P1_Affine* const ptrs[2] = { points, nullptr };
return add(ptrs, npoints);
}
static P1 add(const std::vector& points)
{ return add(&points.at(0), points.size()); }
#endif
};
class P2_Affine {
private:
blst_p2_affine point;
P2_Affine(const blst_p2_affine *cptr) { point = *cptr; }
public:
P2_Affine() { memset(&point, 0, sizeof(point)); }
#ifndef SWIG
P2_Affine(const byte *in)
{ BLST_ERROR err = blst_p2_deserialize(&point, in);
if (err != BLST_SUCCESS)
throw err;
}
#endif
P2_Affine(const byte *in, size_t len)
{ if (len == 0 || len != (in[0]&0x80 ? 96 : 192))
throw BLST_BAD_ENCODING;
BLST_ERROR err = blst_p2_deserialize(&point, in);
if (err != BLST_SUCCESS)
throw err;
}
P2_Affine(const P2& jacobian);
P2_Affine dup() const { return *this; }
P2 to_jacobian() const;
void serialize(byte out[192]) const
{ blst_p2_affine_serialize(out, &point); }
void compress(byte out[96]) const
{ blst_p2_affine_compress(out, &point); }
bool on_curve() const { return blst_p2_affine_on_curve(&point); }
bool in_group() const { return blst_p2_affine_in_g2(&point); }
bool is_inf() const { return blst_p2_affine_is_inf(&point); }
bool is_equal(const P2_Affine& p) const
{ return blst_p2_affine_is_equal(&point, &p.point); }
#ifndef SWIG
BLST_ERROR core_verify(const P1_Affine& pk, bool hash_or_encode,
const byte* msg, size_t msg_len,
const std::string& DST = "",
const byte* aug = nullptr, size_t aug_len = 0) const;
#endif
BLST_ERROR core_verify(const P1_Affine& pk, bool hash_or_encode,
bytes_t msg, const std::string& DST = "",
bytes_t aug = {nullptr, 0}) const
{ return core_verify(pk, hash_or_encode, msg.ptr, msg.len, DST,
aug.ptr, aug.len);
}
static P2_Affine generator()
{ return P2_Affine(blst_p2_affine_generator()); }
private:
friend class Pairing;
friend class P1_Affine;
friend class PT;
friend class P2;
friend class P2_Affines;
operator const blst_p2_affine*() const { return &point; }
operator blst_p2_affine*() { return &point; }
};
class P2 {
private:
blst_p2 point;
P2(const blst_p2 *cptr) { point = *cptr; }
public:
P2() { memset(&point, 0, sizeof(point)); }
P2(const SecretKey& sk) { blst_sk_to_pk_in_g2(&point, &sk.key); }
#ifndef SWIG
P2(const byte *in)
{ blst_p2_affine a;
BLST_ERROR err = blst_p2_deserialize(&a, in);
if (err != BLST_SUCCESS)
throw err;
blst_p2_from_affine(&point, &a);
}
#endif
P2(const byte *in, size_t len)
{ if (len == 0 || len != (in[0]&0x80 ? 96 : 192))
throw BLST_BAD_ENCODING;
blst_p2_affine a;
BLST_ERROR err = blst_p2_deserialize(&a, in);
if (err != BLST_SUCCESS)
throw err;
blst_p2_from_affine(&point, &a);
}
P2(const P2_Affine& affine) { blst_p2_from_affine(&point, affine); }
P2 dup() const { return *this; }
P2_Affine to_affine() const { return P2_Affine(*this); }
void serialize(byte out[192]) const { blst_p2_serialize(out, &point); }
void compress(byte out[96]) const { blst_p2_compress(out, &point); }
bool on_curve() const { return blst_p2_on_curve(&point); }
bool in_group() const { return blst_p2_in_g2(&point); }
bool is_inf() const { return blst_p2_is_inf(&point); }
bool is_equal(const P2& p) const
{ return blst_p2_is_equal(&point, &p.point); }
void aggregate(const P2_Affine& in)
{ if (blst_p2_affine_in_g2(in))
blst_p2_add_or_double_affine(&point, &point, in);
else
throw BLST_POINT_NOT_IN_GROUP;
}
P2* sign_with(const SecretKey& sk)
{ blst_sign_pk_in_g1(&point, &point, &sk.key); return this; }
P2* sign_with(const Scalar& scalar)
{ blst_sign_pk_in_g1(&point, &point, scalar); return this; }
P2* hash_to(bytes_t msg, const std::string& DST = "",
bytes_t aug = {nullptr, 0})
{ blst_hash_to_g2(&point, msg.ptr, msg.len, C_bytes(DST.data()), DST.size(),
aug.ptr, aug.len);
return this;
}
P2* encode_to(bytes_t msg, const std::string& DST = "",
bytes_t aug = {nullptr, 0})
{ blst_encode_to_g2(&point, msg.ptr, msg.len, C_bytes(DST.data()), DST.size(),
aug.ptr, aug.len);
return this;
}
#ifndef SWIG
P2* hash_to(const byte* msg, size_t msg_len,
const std::string& DST = "",
const byte* aug = nullptr, size_t aug_len = 0)
{ blst_hash_to_g2(&point, msg, msg_len, C_bytes(DST.data()), DST.size(),
aug, aug_len);
return this;
}
P2* encode_to(const byte* msg, size_t msg_len,
const std::string& DST = "",
const byte* aug = nullptr, size_t aug_len = 0)
{ blst_encode_to_g2(&point, msg, msg_len, C_bytes(DST.data()), DST.size(),
aug, aug_len);
return this;
}
#endif
P2* mult(const byte* scalar, size_t nbits)
{ blst_p2_mult(&point, &point, scalar, nbits); return this; }
P2* mult(const Scalar& scalar)
{ blst_p2_mult(&point, &point, scalar, 255); return this; }
P2* cneg(bool flag)
{ blst_p2_cneg(&point, flag); return this; }
P2* neg()
{ blst_p2_cneg(&point, true); return this; }
P2* add(const P2& a)
{ blst_p2_add_or_double(&point, &point, a); return this; }
P2* add(const P2_Affine &a)
{ blst_p2_add_or_double_affine(&point, &point, a); return this; }
P2* dbl()
{ blst_p2_double(&point, &point); return this; }
#ifndef SWIG
static P2 add(const P2& a, const P2& b)
{ P2 ret; blst_p2_add_or_double(ret, a, b); return ret; }
static P2 add(const P2& a, const P2_Affine& b)
{ P2 ret; blst_p2_add_or_double_affine(ret, a, b); return ret; }
static P2 dbl(const P2& a)
{ P2 ret; blst_p2_double(ret, a); return ret; }
#endif
static P2 generator()
{ return P2(blst_p2_generator()); }
private:
friend class P2_Affine;
friend class P2_Affines;
operator const blst_p2*() const { return &point; }
operator blst_p2*() { return &point; }
};
class P2_Affines {
private:
struct p2_affine_no_init {
blst_p2_affine point;
p2_affine_no_init() { }
operator blst_p2_affine*() { return &point; }
operator const blst_p2_affine*() const { return &point; }
};
std::vector table;
size_t wbits, npoints;
public:
#ifndef SWIG
P2_Affines() {}
P2_Affines(size_t wbits, const P2_Affine* const points[], size_t npoints)
{ this->wbits = wbits;
this->npoints = npoints;
table.resize(npoints << (wbits-1));
blst_p2s_mult_wbits_precompute(table.at(0), wbits,
reinterpret_cast(points),
npoints);
}
P2_Affines(size_t wbits, const P2_Affine points[], size_t npoints)
{ const P2_Affine* const ptrs[2] = { points, nullptr };
P2_Affines(wbits, ptrs, npoints);
}
P2_Affines(size_t wbits, const std::vector& points)
{ P2_Affines(wbits, &points.at(0), points.size()); }
P2_Affines(size_t wbits, const P2* const points[], size_t npoints)
{ size_t cap = npoints << (wbits-1);
this->wbits = wbits;
this->npoints = npoints;
table.resize(cap);
blst_p2s_to_affine(table.at(cap-npoints),
reinterpret_cast(points),
npoints);
const blst_p2_affine* const ptrs[2] = { table[cap-npoints], nullptr };
blst_p2s_mult_wbits_precompute(table[0], wbits, ptrs, npoints);
}
P2_Affines(size_t wbits, const P2 points[], size_t npoints)
{ const P2* const ptrs[2] = { points, nullptr };
P2_Affines(wbits, ptrs, npoints);
}
P2_Affines(size_t wbits, const std::vector& points)
{ P2_Affines(wbits, &points.at(0), points.size()); }
P2_Affines(const P2* const points[], size_t npoints)
{ this->wbits = 0;
this->npoints = npoints;
table.resize(npoints);
blst_p2s_to_affine(table.at(0),
reinterpret_cast(points),
npoints);
}
P2_Affines(const P2 points[], size_t npoints)
{ const P2* const ptrs[2] = { points, nullptr };
P2_Affines(ptrs, npoints);
}
P2_Affines(const std::vector& points)
{ P2_Affines(&points.at(0), points.size()); }
P2 mult(const byte* const scalars[], size_t nbits) const
{ P2 ret;
if (wbits != 0) {
size_t sz = blst_p2s_mult_wbits_scratch_sizeof(npoints);
std::unique_ptr scratch{new limb_t[sz/sizeof(limb_t)]};
blst_p2s_mult_wbits(ret, table.at(0), wbits, npoints,
scalars, nbits, scratch.get());
} else {
size_t sz = blst_p2s_mult_pippenger_scratch_sizeof(npoints);
std::unique_ptr scratch{new limb_t[sz/sizeof(limb_t)]};
const blst_p2_affine* const ptrs[2] = { table.at(0), nullptr };
blst_p2s_mult_pippenger(ret, ptrs, npoints,
scalars, nbits, scratch.get());
}
return ret;
}
static std::vector from(const P2* const points[], size_t npoints)
{ std::vector ret;
ret.resize(npoints);
blst_p2s_to_affine(reinterpret_cast(&ret.at(0)),
reinterpret_cast(points),
npoints);
return ret;
}
static std::vector from(const P2 points[], size_t npoints)
{ const P2* const ptrs[2] = { points, nullptr };
return from(ptrs, npoints);
}
static std::vector from(const std::vector& points)
{ return from(&points.at(0), points.size()); }
#endif
static P2 mult_pippenger(const P2_Affine* const points[], size_t npoints,
const byte* const scalars[], size_t nbits)
{ P2 ret;
size_t sz = blst_p2s_mult_pippenger_scratch_sizeof(npoints);
std::unique_ptr scratch{new limb_t[sz/sizeof(limb_t)]};
blst_p2s_mult_pippenger(ret,
reinterpret_cast(points),
npoints, scalars, nbits, scratch.get());
return ret;
}
#ifndef SWIG
static P2 mult_pippenger(const P2_Affine points[], size_t npoints,
const byte* const scalars[], size_t nbits)
{ const P2_Affine* const ptrs[2] = { points, nullptr };
return mult_pippenger(ptrs, npoints, scalars, nbits);
}
static P2 mult_pippenger(const std::vector& points,
const byte* const scalars[], size_t nbits)
{ return mult_pippenger(&points.at(0), points.size(), scalars, nbits); }
#endif
static P2 add(const P2_Affine* const points[], size_t npoints)
{ P2 ret;
blst_p2s_add(ret,
reinterpret_cast(points),
npoints);
return ret;
}
#ifndef SWIG
static P2 add(const P2_Affine points[], size_t npoints)
{ const P2_Affine* const ptrs[2] = { points, nullptr };
return add(ptrs, npoints);
}
static P2 add(const std::vector& points)
{ return add(&points.at(0), points.size()); }
#endif
};
inline P1_Affine::P1_Affine(const P1& jacobian)
{ blst_p1_to_affine(&point, jacobian); }
inline P2_Affine::P2_Affine(const P2& jacobian)
{ blst_p2_to_affine(&point, jacobian); }
inline P1 P1_Affine::to_jacobian() const { P1 ret(*this); return ret; }
inline P2 P2_Affine::to_jacobian() const { P2 ret(*this); return ret; }
inline P1 G1() { return P1::generator(); }
inline P2 G2() { return P2::generator(); }
#ifndef SWIG
inline BLST_ERROR P1_Affine::core_verify(const P2_Affine& pk,
bool hash_or_encode,
const byte* msg, size_t msg_len,
const std::string& DST,
const byte* aug, size_t aug_len) const
{ return blst_core_verify_pk_in_g2(pk, &point, hash_or_encode,
msg, msg_len,
C_bytes(DST.data()), DST.size(),
aug, aug_len);
}
inline BLST_ERROR P2_Affine::core_verify(const P1_Affine& pk,
bool hash_or_encode,
const byte* msg, size_t msg_len,
const std::string& DST,
const byte* aug, size_t aug_len) const
{ return blst_core_verify_pk_in_g1(pk, &point, hash_or_encode,
msg, msg_len,
C_bytes(DST.data()), DST.size(),
aug, aug_len);
}
#endif
class PT {
private:
blst_fp12 value;
PT(const blst_fp12 *v) { value = *v; }
public:
PT(const P1_Affine& p) { blst_aggregated_in_g1(&value, p); }
PT(const P2_Affine& q) { blst_aggregated_in_g2(&value, q); }
PT(const P2_Affine& q, const P1_Affine& p)
{ blst_miller_loop(&value, q, p); }
PT(const P1_Affine& p, const P2_Affine& q) : PT(q, p) {}
PT(const P2& q, const P1& p)
{ blst_miller_loop(&value, P2_Affine(q), P1_Affine(p)); }
PT(const P1& p, const P2& q) : PT(q, p) {}
PT dup() const { return *this; }
bool is_one() const { return blst_fp12_is_one(&value); }
bool is_equal(const PT& p) const
{ return blst_fp12_is_equal(&value, p); }
PT* sqr() { blst_fp12_sqr(&value, &value); return this; }
PT* mul(const PT& p) { blst_fp12_mul(&value, &value, p); return this; }
PT* final_exp() { blst_final_exp(&value, &value); return this; }
bool in_group() const { return blst_fp12_in_group(&value); }
void to_bendian(byte out[48*12]) const
{ blst_bendian_from_fp12(out, &value); }
static bool finalverify(const PT& gt1, const PT& gt2)
{ return blst_fp12_finalverify(gt1, gt2); }
static PT one() { return PT(blst_fp12_one()); }
private:
friend class Pairing;
operator const blst_fp12*() const { return &value; }
};
class Pairing {
private:
operator blst_pairing*()
{ return reinterpret_cast(this); }
operator const blst_pairing*() const
{ return reinterpret_cast(this); }
void init(bool hash_or_encode, const byte* DST, size_t DST_len)
{ // Copy DST to heap, std::string can be volatile, especially in SWIG:-(
byte *dst = new byte[DST_len];
memcpy(dst, DST, DST_len);
blst_pairing_init(*this, hash_or_encode, dst, DST_len);
}
public:
#ifndef SWIG
void* operator new(size_t)
{ return new uint64_t[blst_pairing_sizeof()/sizeof(uint64_t)]; }
void operator delete(void *ptr)
{ delete[] static_cast(ptr); }
Pairing(bool hash_or_encode, const std::string& DST)
{ init(hash_or_encode, C_bytes(DST.data()), DST.size()); }
#endif
#ifndef SWIGJAVA
Pairing(bool hash_or_encode, const byte* DST, size_t DST_len)
{ init(hash_or_encode, DST, DST_len); }
~Pairing() { delete[] blst_pairing_get_dst(*this); }
#endif
BLST_ERROR aggregate(const P1_Affine* pk, const P2_Affine* sig,
bytes_t msg, bytes_t aug = {nullptr, 0})
{ return blst_pairing_aggregate_pk_in_g1(*this, *pk, *sig,
msg.ptr, msg.len, aug.ptr, aug.len);
}
BLST_ERROR aggregate(const P2_Affine* pk, const P1_Affine* sig,
bytes_t msg, bytes_t aug = {nullptr, 0})
{ return blst_pairing_aggregate_pk_in_g2(*this, *pk, *sig,
msg.ptr, msg.len, aug.ptr, aug.len);
}
BLST_ERROR mul_n_aggregate(const P1_Affine* pk, const P2_Affine* sig,
const byte* scalar, size_t nbits,
bytes_t msg, bytes_t aug = {nullptr, 0})
{ return blst_pairing_mul_n_aggregate_pk_in_g1(*this, *pk, *sig,
scalar, nbits, msg.ptr, msg.len, aug.ptr, aug.len);
}
BLST_ERROR mul_n_aggregate(const P2_Affine* pk, const P1_Affine* sig,
const byte* scalar, size_t nbits,
bytes_t msg, bytes_t aug = {nullptr, 0})
{ return blst_pairing_mul_n_aggregate_pk_in_g2(*this, *pk, *sig,
scalar, nbits, msg.ptr, msg.len, aug.ptr, aug.len);
}
#ifndef SWIG
BLST_ERROR aggregate(const P1_Affine* pk, const P2_Affine* sig,
const byte* msg, size_t msg_len,
const byte* aug = nullptr, size_t aug_len = 0)
{ return blst_pairing_aggregate_pk_in_g1(*this, *pk, *sig,
msg, msg_len, aug, aug_len);
}
BLST_ERROR aggregate(const P2_Affine* pk, const P1_Affine* sig,
const byte* msg, size_t msg_len,
const byte* aug = nullptr, size_t aug_len = 0)
{ return blst_pairing_aggregate_pk_in_g2(*this, *pk, *sig,
msg, msg_len, aug, aug_len);
}
BLST_ERROR mul_n_aggregate(const P1_Affine* pk, const P2_Affine* sig,
const byte* scalar, size_t nbits,
const byte* msg, size_t msg_len,
const byte* aug = nullptr, size_t aug_len = 0)
{ return blst_pairing_mul_n_aggregate_pk_in_g1(*this, *pk, *sig,
scalar, nbits, msg, msg_len, aug, aug_len);
}
BLST_ERROR mul_n_aggregate(const P2_Affine* pk, const P1_Affine* sig,
const byte* scalar, size_t nbits,
const byte* msg, size_t msg_len,
const byte* aug = nullptr, size_t aug_len = 0)
{ return blst_pairing_mul_n_aggregate_pk_in_g2(*this, *pk, *sig,
scalar, nbits, msg, msg_len, aug, aug_len);
}
#endif
void commit()
{ blst_pairing_commit(*this); }
BLST_ERROR merge(const Pairing* ctx)
{ return blst_pairing_merge(*this, *ctx); }
bool finalverify(const PT* sig = nullptr) const
{ return sig == nullptr ? blst_pairing_finalverify(*this, nullptr)
: blst_pairing_finalverify(*this, *sig);
}
void raw_aggregate(const P2_Affine* q, const P1_Affine* p)
{ blst_pairing_raw_aggregate(*this, *q, *p); }
PT as_fp12()
{ return PT(blst_pairing_as_fp12(*this)); }
};
} // namespace blst
#endif
================================================
FILE: bindings/blst.swg
================================================
// Copyright Supranational LLC
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
%module blst
%rename("%(strip:[blst_])s") ""; // prefix is redundant in named module
%include "exception.i"
#ifdef __cplusplus
%include "std_string.i"
%typemap(out) SELF* OUTPUT = SWIGTYPE*; // to be overridden as required
#else
#warning consider using C++ interface
#endif
%include "stdint.i"
%apply const char* { const byte*, const byte[ANY] }
%apply (const char *STRING, size_t LENGTH) { (const byte *STRING,
size_t LENGTH) }
#if defined(SWIGPYTHON)
%header %{
#if PY_VERSION_HEX<0x030d0000
/* Tailored polyfill, for example no need to handle |n_bytes| == 0 here */
static Py_ssize_t PyLong_AsNativeBytes(PyObject* v, void* buffer,
Py_ssize_t n_bytes, int flags)
{
return _PyLong_AsByteArray((PyLongObject*)v,
(unsigned char*)buffer, n_bytes,
flags&1, (flags&4) == 0) < 0 ? -1 : n_bytes;
}
# define My_PYLONG_FLAGS (1 | 4 | 8)
#else
# define My_PYLONG_FLAGS (Py_ASNATIVEBYTES_LITTLE_ENDIAN | \
Py_ASNATIVEBYTES_UNSIGNED_BUFFER | \
Py_ASNATIVEBYTES_REJECT_NEGATIVE)
#endif
#if PY_VERSION_HEX<0x030e0000
static int PyLong_GetSign(PyObject *obj, int *sign)
{
if (!PyLong_Check(obj))
return -1;
*sign = _PyLong_Sign(obj);
return 0;
}
#endif
%}
// some sorcery to allow assignments as output, e.g.
// hash = blst.encode_to_g1(b"foo")
%typemap(in, numinputs=0) OBJECT *OUTPUT($1_basetype temp) %{ $1 = &temp; %}
%typemap(argout) OBJECT *OUTPUT {
PyObject *obj = SWIG_NewPointerObj(memcpy(malloc(sizeof($1_basetype)),
$1,sizeof($1_basetype)),
$descriptor, SWIG_POINTER_NEW);
$result = SWIG_AppendOutput($result, obj);
}
%apply OBJECT *OUTPUT {
blst_p1 *out, blst_p1 *out_pk, blst_p1 *out_sig,
blst_p1_affine *out, blst_p1_affine *out_pk, blst_p1_affine *out_sig,
blst_p2 *out, blst_p2 *out_pk, blst_p2 *out_sig,
blst_p2_affine *out, blst_p2_affine *out_pk, blst_p2_affine *out_sig,
blst_scalar *out, blst_scalar *out_SK,
blst_fp12 *out
}
// accept 'bytes' and 'bytearray' as inputs...
%typemap(in) const byte* %{
if ($input == Py_None) {
$1 = NULL;
} else if (PyBytes_Check($input)) {
char *buf;
Py_ssize_t nbytes;
if (PyBytes_AsStringAndSize($input, &buf, &nbytes) < 0)
SWIG_exception_fail(SWIG_TypeError, "in method '$symname'");
$1 = ($1_ltype)buf;
} else if (PyByteArray_Check($input)) {
$1 = ($1_ltype)PyByteArray_AsString($input);
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting 'bytes' or 'bytearray'");
}
%}
%typemap(freearg) const byte* ""
%typemap(in) const byte[ANY] %{
if (PyBytes_Check($input)) {
char *buf;
Py_ssize_t nbytes;
if (PyBytes_AsStringAndSize($input, &buf, &nbytes) < 0)
SWIG_exception_fail(SWIG_TypeError, "in method '$symname'");
if (nbytes != $1_dim0)
SWIG_exception_fail(SWIG_ValueError, "in method '$symname', "
"expecting $1_dim0 bytes");
$1 = ($1_ltype)buf;
} else if (PyByteArray_Check($input)) {
if (PyByteArray_Size($input) != $1_dim0)
SWIG_exception_fail(SWIG_ValueError, "in method '$symname', "
"expecting $1_dim0 bytes");
$1 = ($1_ltype)PyByteArray_AsString($input);
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting 'bytes' or 'bytearray'");
}
%}
%typemap(freearg) const byte[ANY] ""
%typemap(in) (const byte *STRING, size_t LENGTH) %{
if ($input == Py_None) {
$1 = NULL;
$2 = 0;
} else if (PyBytes_Check($input)) {
char *buf;
Py_ssize_t nbytes;
if (PyBytes_AsStringAndSize($input, &buf, &nbytes) < 0)
SWIG_exception_fail(SWIG_ValueError, "in method '$symname'");
$1 = ($1_ltype)buf;
$2 = nbytes;
} else if (PyByteArray_Check($input)) {
$1 = ($1_ltype)PyByteArray_AsString($input);
$2 = PyByteArray_Size($input);
#ifdef Py_USING_UNICODE
} else if (PyUnicode_Check($input)) {
char *buf;
Py_ssize_t nbytes;
PyObject *obj = PyUnicode_AsUTF8String($input);
if (obj == NULL || PyBytes_AsStringAndSize(obj, &buf, &nbytes) < 0)
SWIG_exception_fail(SWIG_ValueError, "in method '$symname'");
$1 = ($1_ltype)alloca($2 = nbytes);
memcpy($1, buf, $2);
Py_DECREF(obj);
#endif
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting 'bytes' or 'bytearray'");
}
%}
%typemap(freearg) (const byte *STRING, size_t LENGTH) ""
%typemap(in) blst::bytes_t %{
if ($input == Py_None) {
$1.ptr = NULL;
$1.len = 0;
} else if (PyBytes_Check($input)) {
char *buf;
Py_ssize_t nbytes;
if (PyBytes_AsStringAndSize($input, &buf, &nbytes) < 0)
SWIG_exception_fail(SWIG_ValueError, "in method '$symname'");
$1.ptr = (byte*)buf;
$1.len = nbytes;
} else if (PyByteArray_Check($input)) {
$1.ptr = (byte*)PyByteArray_AsString($input);
$1.len = PyByteArray_Size($input);
#ifdef Py_USING_UNICODE
} else if (PyUnicode_Check($input)) {
char *buf;
Py_ssize_t nbytes;
PyObject *obj = PyUnicode_AsUTF8String($input);
if (obj == NULL || PyBytes_AsStringAndSize(obj, &buf, &nbytes) < 0)
SWIG_exception_fail(SWIG_ValueError, "in method '$symname'");
auto ptr = alloca(nbytes);
memcpy(ptr, buf, nbytes);
$1.ptr = (byte*)ptr;
$1.len = nbytes;
Py_DECREF(obj);
#endif
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting 'bytes' or 'bytearray'");
}
%}
%typemap(freearg) blst::bytes_t ""
%typemap(typecheck) blst::bytes_t ""
// let users use Python 'int', 'bytes' and 'bytearray' as scalars
%typemap(in) (const byte* scalar, size_t nbits) %{
if (PyBytes_Check($input)) {
char *scalar;
Py_ssize_t nbytes;
if (PyBytes_AsStringAndSize($input, &scalar, &nbytes) < 0)
SWIG_exception_fail(SWIG_TypeError, "in method '$symname'");
$1 = ($1_ltype)scalar;
$2 = 8 * nbytes;
} else if (PyByteArray_Check($input)) {
$1 = ($1_ltype)PyByteArray_AsString($input);
$2 = 8 * PyByteArray_Size($input);
} else if (PyLong_Check($input)) {
size_t nbytes;
$2 = _PyLong_NumBits($input);
$1 = ($1_ltype)alloca(nbytes = ($2 + 7)/8);
if (PyLong_AsNativeBytes($input, $1, nbytes, My_PYLONG_FLAGS) < 0)
SWIG_exception_fail(SWIG_OverflowError, "in method '$symname'");
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting 'int', 'bytes' "
"or 'bytearray'");
}
%}
#ifdef __cplusplus
%typemap(in) (const POINT* points[], size_t npoints)
(std::unique_ptr<$*1_ltype[]> points, size_t _global_npoints) %{
if (PyList_Check($input)) {
_global_npoints = PyList_Size($input);
points = std::unique_ptr<$*1_ltype[]>(new $*1_ltype[_global_npoints]);
PyObject* obj = PyList_GET_ITEM($input, 0);
// check the type of the 1st element
if (SWIG_ConvertPtr(obj, (void**)&points[0], $*1_descriptor, 0) != SWIG_OK)
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting 'list' of '$*1_ltype'");
for (size_t i = 1; i < _global_npoints; i++) {
obj = PyList_GET_ITEM($input, i);
points[i] = ($*1_ltype)SWIG_Python_GetSwigThis(obj)->ptr;
}
$1 = points.get();
$2 = _global_npoints;
} else if (PyBytes_Check($input)) {
char *bytes;
Py_ssize_t nbytes;
if (PyBytes_AsStringAndSize($input, &bytes, &nbytes) < 0)
SWIG_exception_fail(SWIG_TypeError, "in method '$symname'");
points = std::unique_ptr<$*1_ltype[]>(new $*1_ltype[2]);
points[0] = ($*1_ltype)bytes;
points[1] = nullptr;
$1 = points.get();
$2 = _global_npoints = nbytes / sizeof(points[0][0]);
} else if (PyMemoryView_Check($input)) { // output from to_affine()
Py_buffer *buf = PyMemoryView_GET_BUFFER($input);
if (!PyBytes_Check(buf->obj))
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting 'bytes' in "
"'memoryview' for points[]");
points = std::unique_ptr<$*1_ltype[]>(new $*1_ltype[2]);
points[0] = ($*1_ltype)buf->buf;
points[1] = nullptr;
$1 = points.get();
$2 = _global_npoints = buf->len / sizeof(points[0][0]);
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', expecting "
"'list', 'bytes' or 'memoryview' "
"for points[]");
}
%}
%apply (const POINT* points[], size_t npoints) {
(const blst::P1_Affine* const points[], size_t npoints),
(const blst::P2_Affine* const points[], size_t npoints),
(const blst::P1* const points[], size_t npoints),
(const blst::P2* const points[], size_t npoints)
}
%typemap(in, numinputs=0) POINT points[] (PyObject *obj) ""
%typemap(check) POINT points[] {
char *bytes;
Py_ssize_t size = sizeof($1[0]) * _global_npoints;
obj$argnum = PyBytes_FromStringAndSize(NULL, size);
if (obj$argnum == NULL) SWIG_fail;
PyBytes_AsStringAndSize(obj$argnum, &bytes, &size);
$1 = ($1_ltype)bytes;
}
%typemap(argout) POINT points[] %{
$result = PyMemoryView_FromObject(obj$argnum);
if ($result != NULL) {
// .itemsize to return size of point, and len() - amount of points
PyMemoryView_GET_BUFFER($result)->itemsize = sizeof($1[0]);
PyMemoryView_GET_BUFFER($result)->shape[0] /= sizeof($1[0]);
} else {
Py_DECREF(obj$argnum);
}
%}
%apply POINT points[] { blst_p1_affine dst[], blst_p2_affine dst[] }
%extend blst::P1_Affines {
static PyObject* as_memory(blst_p1_affine dst[],
const blst::P1* const points[], size_t npoints)
{ blst_p1s_to_affine(dst, (const blst_p1 *const*)points, npoints);
return Py_None; // ignored by 'argout' typemap above
}
}
%extend blst::P2_Affines {
static PyObject* as_memory(blst_p2_affine dst[],
const blst::P2* const points[], size_t npoints)
{ blst_p2s_to_affine(dst, (const blst_p2 *const*)points, npoints);
return Py_None; // ignored by 'argout' typemap above
}
}
%nodefault blst::P1_Affines;
%nodefault blst::P2_Affines;
%typemap(in) (const byte* const scalars[], size_t nbits)
(std::unique_ptr bytes, byte *scalars[2]) %{
if (PyList_Check($input)) {
if ((size_t)PyList_Size($input) != _global_npoints)
SWIG_exception_fail(SWIG_IndexError, "in method '$symname', 'list' "
"length mismatch for scalars[]");
PyObject *obj = PyList_GET_ITEM($input, 0);
if (PyLong_Check(obj)) {
$2 = _PyLong_NumBits(obj);
for (size_t i = 1; i < _global_npoints; i++) {
size_t nbits;
int sign;
obj = PyList_GET_ITEM($input, i);
if (PyLong_GetSign(obj, &sign) < 0 || sign < 0)
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting all 'long's");
nbits = _PyLong_NumBits(obj);
if (nbits > $2) $2 = nbits;
}
size_t nbytes = ($2 + 7)/8;
bytes = std::unique_ptr(new byte[_global_npoints*nbytes]);
byte* scalar = bytes.get();
for (size_t i = 0; i < _global_npoints; i++, scalar += nbytes)
PyLong_AsNativeBytes(PyList_GET_ITEM($input, i),
scalar, nbytes, My_PYLONG_FLAGS);
scalars[0] = bytes.get();
scalars[1] = nullptr;
$1 = scalars;
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting 'list' of 'long's "
"for scalars[]");
}
} else if (PyBytes_Check($input)) {
char *bytes;
Py_ssize_t nbytes;
if (PyBytes_AsStringAndSize($input, &bytes, &nbytes) < 0)
SWIG_exception_fail(SWIG_TypeError, "in method '$symname'");
scalars[0] = ($*1_ltype)bytes;
scalars[1] = nullptr;
$1 = scalars;
$2 = 8 * (nbytes / _global_npoints);
} else if (PyByteArray_Check($input)) {
scalars[0] = ($*1_ltype)PyByteArray_AsString($input);
scalars[1] = nullptr;
$1 = scalars;
$2 = 8 * (PyByteArray_Size($input) / _global_npoints);
} else if (PyMemoryView_Check($input)) {
Py_buffer *buf = PyMemoryView_GET_BUFFER($input);
if (!PyBytes_Check(buf->obj) && !PyByteArray_Check(buf->obj))
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting 'bytes' in "
"'memoryview' for points[]");
scalars[0] = ($*1_ltype)buf->buf;
scalars[1] = nullptr;
$1 = scalars;
$2 = 8 * (buf->len / _global_npoints);
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', expecting "
"'list', 'bytes', 'bytearray' "
"or 'memoryview' for scalars[]");
}
%}
%typemap(out) BLST_ERROR %{
if ($1 != BLST_SUCCESS) {
SWIG_exception(SWIG_ValueError, BLST_ERROR_str[$1]);
SWIG_fail;
}
$result = SWIG_From_int($1);
%}
// return |this|
%typemap(out) SELF* OUTPUT %{ (void)$1; Py_INCREF($result = swig_obj[0]); %}
#endif
#elif defined(SWIGJAVA)
%header %{
#ifdef __cplusplus
# define JCALL(func, ...) jenv->func(__VA_ARGS__)
#else
# define JCALL(func, ...) (*jenv)->func(jenv, __VA_ARGS__)
#endif
%}
%include "enums.swg"
%include "arrays_java.i"
%javaconst(1);
#if SWIG_VERSION < 0x040000
%apply (char *STRING, size_t LENGTH) { (const byte *STRING, size_t LENGTH) }
#endif
%pragma(java) jniclassimports=%{
import java.io.*;
import java.nio.file.*;
%}
%pragma(java) jniclasscode=%{
final static String libName = System.mapLibraryName("$module");
final static String resName = System.getProperty("os.name").replaceFirst(" .*","")
+ "/" + System.getProperty("os.arch")
+ "/" + libName;
static {
Class> imClazz = $imclassname.class;
InputStream res = imClazz.getResourceAsStream(
System.getProperty(imClazz.getPackageName() + ".jniResource", resName));
if (res == null) {
try {
System.loadLibrary("$module");
} catch (UnsatisfiedLinkError e) {
String[] cmd = System.getProperty("sun.java.command").split("/");
if (!"$imclassname".equals(cmd[cmd.length-1]))
// suppress exception if 'main' below is executed
throw new RuntimeException(e.getMessage());
}
} else {
// unpack shared library into a temporary directory and load it
try {
Path tmpdir = Files.createTempDirectory("$module@");
tmpdir.toFile().deleteOnExit();
Path tmpdll = Paths.get(tmpdir.toString(), libName);
tmpdll.toFile().deleteOnExit();
Files.copy(res, tmpdll, StandardCopyOption.REPLACE_EXISTING);
res.close();
System.load(tmpdll.toString());
} catch (IOException e) {
throw new RuntimeException(e.getMessage());
}
}
}
public static void main(String argv[]) {
System.out.println(resName);
}
%}
#ifdef __cplusplus
// Extensive sorcery to shift memory management to JVM GC. General idea is
// to use Java long[] as opaque storage for blst data. Methods that return
// new objects allocate suitably sized long[] arrays from JVM heap,
// references to which are then assigned to |swigCPtr| on the Java side.
// And when passed back to JNI, |swigCPtr|s are dereferenced with
// GetLongArrayElements... And no destructors!
%nodefaultdtor;
%typemap(javafinalize) SWIGTYPE ""
%typemap(javadestruct) SWIGTYPE ""
%typemap(javabody) SWIGTYPE %{
private transient long[] swigCPtr;
protected $javaclassname(long[] cPtr) { swigCPtr = cPtr; }
protected static long[] getCPtr($javaclassname obj) {
return obj != null ? obj.swigCPtr : null;
}
public $javaclassname dup() { return new $javaclassname(swigCPtr.clone()); }
%}
%ignore dup;
%typemap(javaconstruct) SWIGTYPE { this($imcall); }
%typemap(jni) SWIGTYPE, SWIGTYPE&, SWIGTYPE* "jlongArray"
%typemap(jtype) SWIGTYPE, SWIGTYPE&, SWIGTYPE* "long[]"
%typemap(javaout) SWIGTYPE, SWIGTYPE&, SWIGTYPE* {
return new $javaclassname($jnicall);
}
%typemap(in) SWIGTYPE&, SWIGTYPE* %{
$1 = ($1_ltype)JCALL(GetLongArrayElements, $input, 0);
%}
%typemap(in) const SWIGTYPE&, const SWIGTYPE* %{
$1 = $input ? ($1_ltype)JCALL(GetLongArrayElements, $input, 0) : NULL;
%}
%typemap(out) SWIGTYPE&, SWIGTYPE* %{
if ($1 != $null) {
size_t sz = (sizeof($1_basetype) + sizeof(jlong) - 1)/sizeof(jlong);
$result = JCALL(NewLongArray, sz);
if ($result != $null)
JCALL(SetLongArrayRegion, $result, 0, sz, (const jlong *)$1);
}
%}
%typemap(out) SWIGTYPE {
size_t sz = (sizeof($1_basetype) + sizeof(jlong) - 1)/sizeof(jlong);
$result = JCALL(NewLongArray, sz);
if ($result != $null)
JCALL(SetLongArrayRegion, $result, 0, sz, (const jlong *)&$1);
}
%typemap(newfree) SWIGTYPE* "delete $1;"
%typemap(freearg) SWIGTYPE&, SWIGTYPE* %{
JCALL(ReleaseLongArrayElements, $input, (jlong *)$1, 0);
%}
%typemap(freearg) const SWIGTYPE&, const SWIGTYPE* %{
if ($input) JCALL(ReleaseLongArrayElements, $input, (jlong *)$1, JNI_ABORT);
%}
%typemap(freearg) const std::string& ""
// I wish |jenv| was available in the constructor, so that NewLongArray
// could be called at once, without having to resort to matching
// %typemap(out)...
%extend blst::Pairing {
Pairing(bool hash_or_encode, const std::string& DST)
{ size_t sz = blst_pairing_sizeof();
size_t SZ = (sz + DST.size() + sizeof(jlong) - 1)/sizeof(jlong);
blst_pairing *ret = (blst_pairing *)malloc(SZ*sizeof(jlong));
if (DST.size() != 0) {
byte *dst = (byte *)ret + sz;
memcpy(dst, DST.data(), DST.size());
blst_pairing_init(ret, hash_or_encode, dst, DST.size());
} else {
blst_pairing_init(ret, hash_or_encode, NULL, 0);
}
return (Pairing *)ret;
}
}
%typemap(out) blst::Pairing* {
size_t sz = blst_pairing_sizeof();
size_t SZ = (sz + arg2->size() + sizeof(jlong) - 1)/sizeof(jlong);
$result = JCALL(NewLongArray, SZ);
if ($result != $null)
JCALL(SetLongArrayRegion, $result, 0, SZ, (const jlong *)$1);
}
%typemap(newfree) blst::Pairing* "free($1);"
%typemap(javaout) SELF* OUTPUT { $jnicall; return this; }
%typemap(out) SELF* OUTPUT "(void)$1;"
%typemap(jni) SELF* OUTPUT "void"
%typemap(jtype) SELF* OUTPUT "void"
#endif
%typemap(throws) BLST_ERROR %{
SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException,
BLST_ERROR_str[$1]);
%}
// handle input const byte[] more efficiently...
%apply signed char[] { const byte* }
%typemap(in) const byte* %{
$1 = $input ? ($1_ltype)JCALL(GetByteArrayElements, $input, 0) : NULL;
%}
%typemap(argout) const byte* ""
%typemap(freearg) const byte* %{
if ($input) JCALL(ReleaseByteArrayElements, $input, (jbyte *)$1, JNI_ABORT);
%}
%apply const byte* { const byte[ANY] }
%typemap(in) const byte[ANY] {
size_t sz = JCALL(GetArrayLength, $input);
if (sz != $1_dim0) {
SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException,
"BLST_ERROR: input size mismatch");
return $null;
}
$1 = ($1_ltype)JCALL(GetByteArrayElements, $input, 0);
}
// let users use 'java.math.BigInteger' as scalars
%typemap(in) (const byte* scalar, size_t nbits) %{
$2 = JCALL(GetArrayLength, $input);
$1 = ($1_ltype)alloca($2);
JCALL(GetByteArrayRegion, $input, 0, $2, (jbyte*)$1);
if (*(jbyte*)$1 < 0) {
SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException,
"expecting unsigned value");
return $null;
}
{ // BigInteger.toByteArray() emits big-endian, flip the order...
size_t i, j;
for(i=0, j=$2-1; i<$2/2; i++, j--) {
$*1_ltype t=$1[i]; $1[i]=$1[j]; $1[j]=t;
}
}
if ($1[$2-1] == 0)
$2--;
$2 *= 8;
%}
%typemap(jni) (const byte* scalar, size_t nbits) "jbyteArray"
%typemap(jtype) (const byte* scalar, size_t nbits) "byte[]"
%typemap(jstype) (const byte* scalar, size_t nbits) "java.math.BigInteger"
%typemap(javain) (const byte* scalar, size_t nbits) "$javainput.toByteArray()"
%typemap(jni) (const byte *STRING, size_t LENGTH) "jbyteArray"
%typemap(jtype) (const byte *STRING, size_t LENGTH) "byte[]"
%typemap(jstype) (const byte *STRING, size_t LENGTH) "byte[]"
%typemap(javain) (const byte *STRING, size_t LENGTH) "$javainput"
%typemap(freearg)(const byte *STRING, size_t LENGTH) ""
%typemap(jni) blst::bytes_t "jbyteArray"
%typemap(jtype) blst::bytes_t "byte[]"
%typemap(jstype) blst::bytes_t "byte[]"
%typemap(javain) blst::bytes_t "$javainput"
%typemap(freearg)blst::bytes_t ""
%typemap(in) blst::bytes_t %{
$1.ptr = (const byte*)JCALL(GetByteArrayElements, $input, 0);
$1.len = JCALL(GetArrayLength, $input);
%}
%typemap(argout) blst::bytes_t %{
JCALL(ReleaseByteArrayElements, $input, (jbyte *)$1.ptr, JNI_ABORT);
%}
#elif defined(SWIGJAVASCRIPT) && defined(SWIG_JAVASCRIPT_V8)
%header %{
#if V8_MAJOR_VERSION >= 8
# define GetData() GetBackingStore()->Data()
#else
# define GetData() GetContents().Data()
#endif
%}
%typemap(throws) BLST_ERROR %{ SWIG_V8_Raise(BLST_ERROR_str[$1]); SWIG_fail; %}
%typemap(in) const byte* %{
if ($input->IsArrayBufferView()) {
auto av = v8::Local::Cast($input);
auto buf = av->Buffer();
$1 = ($1_ltype)buf->GetData() + av->ByteOffset();
} else if ($input->IsNull()) {
$1 = nullptr;
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting ");
}
%}
%typemap(argout) const byte* ""
%typemap(freearg) const byte* ""
%apply const byte* { const byte[ANY] }
%typemap(in) const byte[ANY] %{
if ($input->IsArrayBufferView()) {
auto av = v8::Local::Cast($input);
if (av->ByteLength() != $1_dim0)
SWIG_exception_fail(SWIG_IndexError, "in method '$symname', "
"expecting $1_dim0 bytes");
auto buf = av->Buffer();
$1 = ($1_ltype)buf->GetData() + av->ByteOffset();
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting ");
}
%}
// let users use JavaScript and as scalars
%typemap(in) (const byte* scalar, size_t nbits) %{
if ($input->IsArrayBufferView()) {
auto av = v8::Local::Cast($input);
auto buf = av->Buffer();
$1 = ($1_ltype)buf->GetData() + av->ByteOffset();
$2 = 8*av->ByteLength();
#if V8_MAJOR_VERSION >=6 && V8_MINOR_VERSION >= 8
} else if ($input->IsBigInt()) {
auto bi = v8::Local::Cast($input);
int sign, word_count = bi->WordCount();
uint64_t* words = (uint64_t*)alloca($2 = word_count*sizeof(uint64_t));
bi->ToWordsArray(&sign, &word_count, words);
if (sign)
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting unsigned value");
$1 = ($1_ltype)words;
$2 *= 8;
const union {
long one;
char little;
} is_endian = { 1 };
if (!is_endian.little) {
byte* p = $1;
for (int i = 0; i < word_count; i++) {
uint64_t val = words[i];
for (size_t j = 0; j < sizeof(val); j++, val >>= 8)
*p++ = (byte)val;
}
}
#endif
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting or ");
}
%}
%typemap(in) (const byte *STRING, size_t LENGTH) %{
if ($input->IsArrayBufferView()) {
auto av = v8::Local::Cast($input);
auto buf = av->Buffer();
$1 = ($1_ltype)buf->GetData() + av->ByteOffset();
$2 = av->ByteLength();
} else if ($input->IsString()) {
auto str = v8::Local::Cast($input);
$2 = SWIGV8_UTF8_LENGTH(str);
$1 = ($1_ltype)alloca($2);
SWIGV8_WRITE_UTF8(str, (char *)$1, $2);
} else if ($input->IsNull()) {
$1 = nullptr;
$2 = 0;
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting or ");
}
%}
%typemap(freearg) (const byte *STRING, size_t LENGTH) ""
%typemap(in) blst::bytes_t %{
if ($input->IsArrayBufferView()) {
auto av = v8::Local::Cast($input);
auto buf = av->Buffer();
$1.ptr = (byte*)buf->GetData() + av->ByteOffset();
$1.len = av->ByteLength();
} else if ($input->IsString()) {
auto str = v8::Local::Cast($input);
$1.len = SWIGV8_UTF8_LENGTH(str);
$1.ptr = (byte*)alloca($1.len);
SWIGV8_WRITE_UTF8(str, (char *)$1.ptr, $1.len);
} else if ($input->IsNull()) {
$1.ptr = nullptr;
$1.len = 0;
} else {
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', "
"expecting or ");
}
%}
%typemap(freearg) blst::bytes_t ""
// return |this|
%typemap(out) SELF* OUTPUT %{ (void)$1; $result = args.Holder(); %}
#elif defined(SWIGPERL)
// let users use byte[] as scalars
%apply (const char *STRING, size_t LENGTH) { (const byte* scalar, size_t nbits) }
%typemap(check) (const byte* scalar, size_t nbits) %{ $2 *= 8; %}
#ifdef __cplusplus
// return |this|
%typemap(out) SELF* OUTPUT %{ (void)$1; argvi++; %}
#endif
#endif // SWIG
// everybody has a way to bundle pointer and buffer size, but C:-(
%apply (const byte *STRING, size_t LENGTH) {
(const byte *msg, size_t msg_len),
(const byte *DST, size_t DST_len),
(const byte *aug, size_t aug_len),
(const byte *IKM, size_t IKM_len),
(const byte *info, size_t info_len),
(const byte *salt, size_t salt_len),
(const byte *in, size_t len)
}
// some sorcery to return byte[] from serialization methods
%typemap(in, numinputs=0) byte out[ANY] (byte temp[$1_dim0]) %{ $1 = temp; %}
%typemap(argout) byte out[ANY] {
#if defined(SWIGPYTHON)
PyObject *obj = SWIG_FromCharPtrAndSize((char *)$1, $1_dim0);
$result = SWIG_AppendOutput($result, obj);
#elif defined(SWIGJAVA)
$result = JCALL(NewByteArray, $1_dim0);
if ($result != $null) {
JCALL(SetByteArrayRegion, $result, 0, $1_dim0, (const jbyte *)$1);
}
#elif defined(SWIGJAVASCRIPT) && defined(SWIG_JAVASCRIPT_V8)
auto ab = v8::ArrayBuffer::New(v8::Isolate::GetCurrent(), $1_dim0);
memcpy(ab->GetData(), $1, $1_dim0);
$result = v8::Uint8Array::New(ab, 0, $1_dim0);
#elif defined(SWIGPERL)
$result = SWIG_FromCharPtrAndSize((char *)$1, $1_dim0); argvi++;
#else // TODO: figure out more language-specific ways to return multi-values...
if ($result == NULL)
$result = SWIG_FromCharPtrAndSize((char *)$1, $1_dim0);
#endif
}
%typemap(freearg) byte out[ANY] ""
#ifdef SWIGJAVA
%typemap(jni) byte out[ANY] "jbyteArray"
%typemap(jtype) byte out[ANY] "byte[]"
%typemap(jstype) byte out[ANY] "byte[]"
%typemap(javaout) byte out[ANY] { return $jnicall; }
#endif
%apply byte out[ANY] {
void to_bendian, void blst_bendian_from_scalar,
void to_lendian, void blst_lendian_from_scalar,
void serialize, void blst_p1_serialize, void blst_p1_affine_serialize,
void blst_p2_serialize, void blst_p2_affine_serialize,
void compress, void blst_p1_compress, void blst_p1_affine_compress,
void blst_p2_compress, void blst_p2_affine_compress,
void blst_sk_to_pk2_in_g1, void blst_sign_pk2_in_g1,
void blst_sk_to_pk2_in_g2, void blst_sign_pk2_in_g2
}
#ifdef __cplusplus
%apply const std::string& { const std::string* }
#pragma SWIG nowarn=509,516
#if !defined(SWIGPYTHON)
%ignore P1_Affines;
%ignore P2_Affines;
#endif
%ignore nullptr;
%ignore None;
%ignore C_bytes;
%ignore bytes_t;
%feature("novaluewrapper") bytes_t;
%catches(BLST_ERROR) P1(const byte* in, size_t len);
%catches(BLST_ERROR) P1_Affine(const byte* in, size_t len);
%catches(BLST_ERROR) aggregate(const P1_Affine& in);
%catches(BLST_ERROR) P2(const byte* in, size_t len);
%catches(BLST_ERROR) P2_Affine(const byte* in, size_t len);
%catches(BLST_ERROR) aggregate(const P2_Affine& in);
%catches(BLST_ERROR) blst::Scalar::add;
%catches(BLST_ERROR) blst::Scalar::sub;
%catches(BLST_ERROR) blst::Scalar::mul;
// methods returning |this|
%apply SELF* OUTPUT {
blst::P1* sign_with, blst::P2* sign_with,
blst::P1* hash_to, blst::P2* hash_to,
blst::P1* encode_to, blst::P2* encode_to,
blst::P1* mult, blst::P2* mult,
blst::P1* cneg, blst::P2* cneg,
blst::P1* neg, blst::P2* neg,
blst::P1* add, blst::P2* add,
blst::P1* dbl, blst::P2* dbl,
blst::PT* mul, blst::PT* sqr,
blst::PT* final_exp,
blst::Scalar* from_bendian,
blst::Scalar* from_lendian,
blst::Scalar* add,
blst::Scalar* sub,
blst::Scalar* mul,
blst::Scalar* inverse
}
typedef enum {
BLST_SUCCESS = 0,
BLST_BAD_ENCODING,
BLST_POINT_NOT_ON_CURVE,
BLST_POINT_NOT_IN_GROUP,
BLST_AGGR_TYPE_MISMATCH,
BLST_VERIFY_FAIL,
BLST_PK_IS_INFINITY,
} BLST_ERROR;
%include "blst.hpp"
extern const blst::P1_Affine BLS12_381_G1;
extern const blst::P1_Affine BLS12_381_NEG_G1;
extern const blst::P2_Affine BLS12_381_G2;
extern const blst::P2_Affine BLS12_381_NEG_G2;
#else
%ignore blst_fr;
%ignore blst_fp;
%ignore blst_fp2;
%ignore blst_fp6;
%ignore blst_scalar_from_uint32;
%ignore blst_scalar_from_uint64;
%ignore blst_uint32_from_scalar;
%ignore blst_uint64_from_scalar;
%ignore blst_pairing_init;
%ignore blst_pairing_get_dst;
%include "blst.h"
%include "blst_aux.h"
%extend blst_pairing {
blst_pairing(bool hash_or_encode, const byte *DST DEFNULL,
size_t DST_len DEFNULL)
{ void *ret = malloc(blst_pairing_sizeof());
if (DST_len != 0) {
void *dst = malloc(DST_len);
memcpy(dst, DST, DST_len);
blst_pairing_init(ret, hash_or_encode, dst, DST_len);
} else {
blst_pairing_init(ret, hash_or_encode, NULL, 0);
}
return ret;
}
~blst_pairing()
{ void *dst = (void *)blst_pairing_get_dst($self);
if (dst != NULL) free(dst);
free($self);
}
}
#endif
%begin %{
#ifdef __cplusplus
# include
# include "blst.hpp"
using namespace blst;
#else
# include "blst.h"
#endif
static const char *const BLST_ERROR_str [] = {
"BLST_ERROR: success",
"BLST_ERROR: bad point encoding",
"BLST_ERROR: point is not on curve",
"BLST_ERROR: point is not in group",
"BLST_ERROR: context type mismatch",
"BLST_ERROR: verify failed",
"BLST_ERROR: public key is infinite",
};
#define SWIG_PYTHON_STRICT_BYTE_CHAR
#if defined(__GNUC__)
# ifndef alloca
# define alloca(s) __builtin_alloca(s)
# endif
#elif defined(__sun)
# include
#elif defined(_WIN32)
# include
# ifndef alloca
# define alloca(s) _alloca(s)
# endif
#endif
%}
#if defined(SWIGPYTHON) || defined(SWIGPERL)
%include "cdata.i"
#endif
#if SWIG_VERSION < 0x040100 && defined(SWIGJAVASCRIPT)
%wrapper %{
#ifdef NODE_MODULE
# undef NODE_MODULE
# define NODE_MODULE NODE_MODULE_CONTEXT_AWARE
// actually error-prone and not exactly suitable for production, but
// sufficient for development purposes till SWIG 4.1.0 is released...
#endif
%}
#endif
#if SWIG_VERSION < 0x040100 && defined(SWIGJAVA)
/* SWIG versions prior 4.1 were crossing the MinGW's ways on the path
* to JNI 'jlong' type */
%begin %{
#if defined(__MINGW32__) && defined(__int64)
# undef __int64
#endif
%}
#endif
================================================
FILE: bindings/blst_aux.h
================================================
/*
* Copyright Supranational LLC
* Licensed under the Apache License, Version 2.0, see LICENSE for details.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __BLST_AUX_H__
#define __BLST_AUX_H__
/*
* This file lists interfaces that might be promoted to blst.h or removed,
* depending on their proven/unproven worthiness.
*/
void blst_fr_ct_bfly(blst_fr *x0, blst_fr *x1, const blst_fr *twiddle);
void blst_fr_gs_bfly(blst_fr *x0, blst_fr *x1, const blst_fr *twiddle);
void blst_fr_to(blst_fr *ret, const blst_fr *a);
void blst_fr_from(blst_fr *ret, const blst_fr *a);
#ifdef BLST_FR_PENTAROOT
void blst_fr_pentaroot(blst_fr *ret, const blst_fr *a);
void blst_fr_pentapow(blst_fr *ret, const blst_fr *a);
#endif
void blst_fp_to(blst_fp *ret, const blst_fp *a);
void blst_fp_from(blst_fp *ret, const blst_fp *a);
bool blst_fp_is_square(const blst_fp *a);
bool blst_fp2_is_square(const blst_fp2 *a);
void blst_p1_from_jacobian(blst_p1 *out, const blst_p1 *in);
void blst_p2_from_jacobian(blst_p2 *out, const blst_p2 *in);
/*
* Below functions produce both point and deserialized outcome of
* SkToPk and Sign. However, deserialized outputs are pre-decorated
* with sign and infinity bits. This means that you have to bring the
* output into compliance prior returning to application. If you want
* compressed point value, then do [equivalent of]
*
* byte temp[96];
* blst_sk_to_pk2_in_g1(temp, out_pk, SK);
* temp[0] |= 0x80;
* memcpy(out, temp, 48);
*
* Otherwise do
*
* blst_sk_to_pk2_in_g1(out, out_pk, SK);
* out[0] &= ~0x20;
*
* Either |out| or |out_| can be NULL.
*/
void blst_sk_to_pk2_in_g1(byte out[96], blst_p1_affine *out_pk,
const blst_scalar *SK);
void blst_sign_pk2_in_g1(byte out[192], blst_p2_affine *out_sig,
const blst_p2 *hash, const blst_scalar *SK);
void blst_sk_to_pk2_in_g2(byte out[192], blst_p2_affine *out_pk,
const blst_scalar *SK);
void blst_sign_pk2_in_g2(byte out[96], blst_p1_affine *out_sig,
const blst_p1 *hash, const blst_scalar *SK);
#ifdef __BLST_RUST_BINDGEN__
typedef struct {} blst_uniq;
#else
typedef struct blst_opaque blst_uniq;
#endif
size_t blst_uniq_sizeof(size_t n_nodes);
void blst_uniq_init(blst_uniq *tree);
bool blst_uniq_test(blst_uniq *tree, const byte *msg, size_t len);
#ifdef expand_message_xmd
void expand_message_xmd(unsigned char *bytes, size_t len_in_bytes,
const unsigned char *aug, size_t aug_len,
const unsigned char *msg, size_t msg_len,
const unsigned char *DST, size_t DST_len);
#else
void blst_expand_message_xmd(byte *out, size_t out_len,
const byte *msg, size_t msg_len,
const byte *DST, size_t DST_len);
#endif
void blst_p1_unchecked_mult(blst_p1 *out, const blst_p1 *p, const byte *scalar,
size_t nbits);
void blst_p2_unchecked_mult(blst_p2 *out, const blst_p2 *p, const byte *scalar,
size_t nbits);
void blst_pairing_raw_aggregate(blst_pairing *ctx, const blst_p2_affine *q,
const blst_p1_affine *p);
blst_fp12 *blst_pairing_as_fp12(blst_pairing *ctx);
void blst_bendian_from_fp12(byte out[48*12], const blst_fp12 *a);
void blst_keygen_v3(blst_scalar *out_SK, const byte *IKM, size_t IKM_len,
const byte *info DEFNULL, size_t info_len DEFNULL);
void blst_keygen_v4_5(blst_scalar *out_SK, const byte *IKM, size_t IKM_len,
const byte *salt, size_t salt_len,
const byte *info DEFNULL, size_t info_len DEFNULL);
void blst_keygen_v5(blst_scalar *out_SK, const byte *IKM, size_t IKM_len,
const byte *salt, size_t salt_len,
const byte *info DEFNULL, size_t info_len DEFNULL);
void blst_derive_master_eip2333(blst_scalar *out_SK,
const byte *IKM, size_t IKM_len);
void blst_derive_child_eip2333(blst_scalar *out_SK, const blst_scalar *SK,
uint32_t child_index);
void blst_scalar_from_hexascii(blst_scalar *out, const byte *hex);
void blst_fr_from_hexascii(blst_fr *ret, const byte *hex);
void blst_fp_from_hexascii(blst_fp *ret, const byte *hex);
size_t blst_p1_sizeof(void);
size_t blst_p1_affine_sizeof(void);
size_t blst_p2_sizeof(void);
size_t blst_p2_affine_sizeof(void);
size_t blst_fp12_sizeof(void);
void blst_fp_from_le_bytes(blst_fp *ret, const byte *in, size_t len);
void blst_fp_from_be_bytes(blst_fp *ret, const byte *in, size_t len);
/*
* Single-shot SHA-256 hash function.
*/
void blst_sha256(byte out[32], const byte *msg, size_t msg_len);
#endif
================================================
FILE: bindings/c#/poc.cs
================================================
using System;
using System.Text;
using supranational;
class PoC {
private static void Main(string[] args)
{
var msg = Encoding.UTF8.GetBytes("assertion");
var DST = "MY-DST";
var SK = new blst.SecretKey();
SK.keygen(Encoding.UTF8.GetBytes(new string('*', 32)));
// generate public key and serialize it...
var pk_for_wire = new blst.P1(SK).serialize();
// sign |msg| and serialize the signature...
var sig_for_wire = new blst.P2().hash_to(msg, DST, pk_for_wire)
.sign_with(SK)
.serialize();
// now on "receiving" side, start with deserialization...
var _sig = new blst.P2_Affine(sig_for_wire);
var _pk = new blst.P1_Affine(pk_for_wire);
if (!_pk.in_group())
throw new blst.Exception(blst.ERROR.POINT_NOT_IN_GROUP);
var ctx = new blst.Pairing(true, DST);
var err = ctx.aggregate(_pk, _sig, msg, pk_for_wire);
if (err != blst.ERROR.SUCCESS)
throw new blst.Exception(err);
ctx.commit();
if (!ctx.finalverify())
throw new blst.Exception(blst.ERROR.VERIFY_FAIL);
Console.WriteLine("OK");
// exercise .as_fp12 by performing equivalent of ctx.finalverify above
var C1 = new blst.PT(_sig);
var C2 = ctx.as_fp12();
if (!blst.PT.finalverify(C1, C2))
throw new blst.Exception(blst.ERROR.VERIFY_FAIL);
// test integers as scalar multiplicands
var p = blst.G1();
var q = p.dup().dbl().dbl().add(p);
if (!p.mult(5).is_equal(q))
throw new ApplicationException("disaster");
if (!blst.G1().mult(-5).is_equal(q.neg()))
throw new ApplicationException("disaster");
// low-order sanity check
var p11 = new blst.P1(fromHexString("80803f0d09fec09a95f2ee7495323c15c162270c7cceaffa8566e941c66bcf206e72955d58b3b32e564de3209d672ca5"));
if (p11.in_group())
throw new ApplicationException("disaster");
if (!p11.mult(11).is_inf())
throw new ApplicationException("disaster");
}
private static int fromHexChar(char c)
{
if (c>='0' && c<='9') return c - '0';
else if (c>='a' && c<='f') return c - 'a' + 10;
else if (c>='A' && c<='F') return c - 'A' + 10;
throw new ArgumentOutOfRangeException("non-hex character");
}
private static byte[] fromHexString(string str)
{
if (str.Length%2 != 0)
throw new ArgumentException("odd number of characters in hex string");
char[] hex = str.ToCharArray();
byte[] ret = new byte[hex.Length/2];
for (int i=0; i
Exe
net8.0
CS8981
================================================
FILE: bindings/c#/run.me
================================================
#!/usr/bin/env python3
# Copyright Supranational LLC
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import os
import re
import sys
import glob
import subprocess
top = """
using System;
using System.Text;
using System.Numerics;
using System.Runtime.InteropServices;
using size_t = System.UIntPtr;
#if NET5_0_OR_GREATER
using System.Runtime.Loader;
using System.Reflection;
using System.IO;
#endif
namespace supranational { public static class blst {
#if NET5_0_OR_GREATER
private static readonly string dll;
static blst()
{
if (String.IsNullOrEmpty(dll)) {
var name = RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "blst.dll"
: RuntimeInformation.IsOSPlatform(OSPlatform.OSX) ? "libblst.dll.dylib"
: "libblst.dll.so";
var dir = Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location);
var arch = RuntimeInformation.ProcessArchitecture switch {
Architecture.X64 => "x64",
Architecture.Arm64 => "arm64",
_ => "unsupported"
};
#if NET8_0_OR_GREATER
// RuntimeInformation.RuntimeIdentifier changed between .NET 7 and 8
// and only aligns to the nuget layout in 8+
var rid = RuntimeInformation.RuntimeIdentifier;
#else
// Mimic pre-8 RuntimeInformation.RuntimeIdentifier as
// "win-x64", "linux-x64", "linux-arm64", "osx-x64", etc.
var os = RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "win"
: RuntimeInformation.IsOSPlatform(OSPlatform.OSX) ? "osx"
: RuntimeInformation.IsOSPlatform(OSPlatform.FreeBSD) ? "freebsd"
: "linux";
var rid = $"{os}-{arch}";
#endif
// first look for the file in the standard locations for a nuget installed native lib
dll = Path.Combine(dir, "runtimes", rid, "native", name);
if (!File.Exists(dll))
dll = Path.Combine(dir, arch, name); // try the original non-standard location
if (!File.Exists(dll))
dll = Path.Combine(Environment.CurrentDirectory, name);
if (File.Exists(dll)) {
AssemblyLoadContext.Default.ResolvingUnmanagedDll += (asm, needs) =>
(needs == "blst.dll" ? NativeLibrary.Load(dll) : IntPtr.Zero);
}
}
}
#endif
public enum ERROR {
SUCCESS = 0,
BAD_ENCODING,
POINT_NOT_ON_CURVE,
POINT_NOT_IN_GROUP,
AGGR_TYPE_MISMATCH,
VERIFY_FAIL,
PK_IS_INFINITY,
BAD_SCALAR,
}
public class Exception : ApplicationException {
private readonly ERROR code;
public Exception(ERROR err) { code = err; }
public override string Message
{ get
{ switch(code) {
case ERROR.BAD_ENCODING: return "bad encoding";
case ERROR.POINT_NOT_ON_CURVE: return "point not on curve";
case ERROR.POINT_NOT_IN_GROUP: return "point not in group";
case ERROR.AGGR_TYPE_MISMATCH: return "aggregate type mismatch";
case ERROR.VERIFY_FAIL: return "verify failure";
case ERROR.PK_IS_INFINITY: return "public key is infinity";
case ERROR.BAD_SCALAR: return "bad scalar";
default: return null;
}
}
}
}
public enum ByteOrder {
BigEndian,
LittleEndian,
}
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_keygen([Out] byte[] key, [In] byte[] IKM, size_t IKM_len,
[In] byte[] info, size_t info_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_keygen_v3([Out] byte[] key, [In] byte[] IKM, size_t IKM_len,
[In] byte[] info, size_t info_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_keygen_v4_5([Out] byte[] key, [In] byte[] IKM, size_t IKM_len,
[In] byte[] salt, size_t salt_len,
[In] byte[] info, size_t info_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_keygen_v5([Out] byte[] key, [In] byte[] IKM, size_t IKM_len,
[In] byte[] salt, size_t salt_len,
[In] byte[] info, size_t info_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_derive_master_eip2333([Out] byte[] key,
[In] byte[] IKM, size_t IKM_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_derive_child_eip2333([Out] byte[] key,
[In] byte[] master, uint child_index);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_scalar_from_bendian([Out] byte[] ret, [In] byte[] key);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_bendian_from_scalar([Out] byte[] ret, [In] byte[] key);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_sk_check([In] byte[] key);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_scalar_from_lendian([Out] byte[] key, [In] byte[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_lendian_from_scalar([Out] byte[] key, [In] byte[] inp);
public struct SecretKey {
internal byte[] key;
//public SecretKey() { key = new byte[32]; }
public SecretKey(byte[] IKM, string info)
{ key = new byte[32]; keygen(IKM, info); }
public SecretKey(byte[] inp, ByteOrder order=ByteOrder.BigEndian)
{ key = new byte[32];
switch(order) {
case ByteOrder.BigEndian: from_bendian(inp); break;
case ByteOrder.LittleEndian: from_lendian(inp); break;
}
}
public void keygen(byte[] IKM, string info="")
{ if (key == null) key = new byte[32];
byte[] info_bytes = Encoding.UTF8.GetBytes(info);
blst_keygen(key, IKM, (size_t)IKM.Length,
info_bytes, (size_t)info_bytes.Length);
}
public void keygen_v3(byte[] IKM, string info="")
{ if (key == null) key = new byte[32];
byte[] info_bytes = Encoding.UTF8.GetBytes(info);
blst_keygen_v3(key, IKM, (size_t)IKM.Length,
info_bytes, (size_t)info_bytes.Length);
}
public void keygen_v4_5(byte[] IKM, string salt, string info="")
{ if (key == null) key = new byte[32];
byte[] salt_bytes = Encoding.UTF8.GetBytes(salt);
byte[] info_bytes = Encoding.UTF8.GetBytes(info);
blst_keygen_v4_5(key, IKM, (size_t)IKM.Length,
salt_bytes, (size_t)salt_bytes.Length,
info_bytes, (size_t)info_bytes.Length);
}
public void keygen_v5(byte[] IKM, byte[] salt, string info="")
{ if (key == null) key = new byte[32];
byte[] info_bytes = Encoding.UTF8.GetBytes(info);
blst_keygen_v5(key, IKM, (size_t)IKM.Length,
salt, (size_t)salt.Length,
info_bytes, (size_t)info_bytes.Length);
}
public void keygen_v5(byte[] IKM, string salt, string info="")
{ keygen_v5(IKM, Encoding.UTF8.GetBytes(salt), info); }
public void derive_master_eip2333(byte[] IKM)
{ if (key == null) key = new byte[32];
blst_derive_master_eip2333(key, IKM, (size_t)IKM.Length);
}
public SecretKey(SecretKey master, uint child_index)
{ key = new byte[32];
blst_derive_child_eip2333(key, master.key, child_index);
}
public void from_bendian(byte[] inp)
{ if (inp.Length != 32)
throw new Exception(ERROR.BAD_ENCODING);
if (key == null) key = new byte[32];
blst_scalar_from_bendian(key, inp);
if (!blst_sk_check(key))
throw new Exception(ERROR.BAD_ENCODING);
}
public void from_lendian(byte[] inp)
{ if (inp.Length != 32)
throw new Exception(ERROR.BAD_ENCODING);
if (key == null) key = new byte[32];
blst_scalar_from_lendian(key, inp);
if (!blst_sk_check(key))
throw new Exception(ERROR.BAD_ENCODING);
}
public byte[] to_bendian()
{ byte[] ret = new byte[32];
blst_bendian_from_scalar(ret, key);
return ret;
}
public byte[] to_lendian()
{ byte[] ret = new byte[32];
blst_lendian_from_scalar(ret, key);
return ret;
}
}
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_scalar_from_be_bytes([Out] byte[] ret, [In] byte[] inp,
size_t inp_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_scalar_from_le_bytes([Out] byte[] ret, [In] byte[] inp,
size_t inp_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_sk_add_n_check([Out] byte[] ret, [In] byte[] a,
[In] byte[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_sk_sub_n_check([Out] byte[] ret, [In] byte[] a,
[In] byte[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_sk_mul_n_check([Out] byte[] ret, [In] byte[] a,
[In] byte[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_sk_inverse([Out] byte[] ret, [In] byte[] a);
public struct Scalar {
internal byte[] val;
//public Scalar() { val = new byte[32]; }
public Scalar(byte[] inp, ByteOrder order=ByteOrder.BigEndian)
{ val = new byte[32];
switch(order) {
case ByteOrder.BigEndian: from_bendian(inp); break;
case ByteOrder.LittleEndian: from_lendian(inp); break;
}
}
private Scalar(bool _) { val = new byte[32]; }
private Scalar(Scalar orig) { val = (byte[])orig.val.Clone(); }
public Scalar dup() { return new Scalar(this); }
public void from_bendian(byte[] inp)
{ if (val == null) val = new byte[32];
blst_scalar_from_be_bytes(val, inp, (size_t)inp.Length);
}
public void from_lendian(byte[] inp)
{ if (val == null) val = new byte[32];
blst_scalar_from_le_bytes(val, inp, (size_t)inp.Length);
}
public byte[] to_bendian()
{ byte[] ret = new byte[32];
blst_bendian_from_scalar(ret, val);
return ret;
}
public byte[] to_lendian()
{ byte[] ret = new byte[32];
blst_lendian_from_scalar(ret, val);
return ret;
}
public Scalar add(SecretKey a)
{ if (!blst_sk_add_n_check(val, val, a.key))
throw new Exception(ERROR.BAD_SCALAR);
return this;
}
public Scalar add(Scalar a)
{ if (!blst_sk_add_n_check(val, val, a.val))
throw new Exception(ERROR.BAD_SCALAR);
return this;
}
public Scalar sub(Scalar a)
{ if (!blst_sk_sub_n_check(val, val, a.val))
throw new Exception(ERROR.BAD_SCALAR);
return this;
}
public Scalar mul(Scalar a)
{ if (!blst_sk_mul_n_check(val, val, a.val))
throw new Exception(ERROR.BAD_SCALAR);
return this;
}
public Scalar inverse()
{ blst_sk_inverse(val, val); return this; }
public static Scalar operator+(Scalar a, Scalar b)
{ return a.dup().add(b); }
public static Scalar operator-(Scalar a, Scalar b)
{ return a.dup().sub(b); }
public static Scalar operator*(Scalar a, Scalar b)
{ return a.dup().mul(b); }
public static Scalar operator/(Scalar a, Scalar b)
{ return b.dup().inverse().mul(a); }
}
private const int P1_COMPRESSED_SZ = 384/8;
private const int P2_COMPRESSED_SZ = 2*P1_COMPRESSED_SZ;
"""
middle = """
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_p1_affine_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_p1_deserialize([Out] long[] ret, [In] byte[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_affine_serialize([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_affine_compress([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_to_affine([Out] long[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_affine_on_curve([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_affine_in_g1([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_affine_is_inf([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_affine_is_equal([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern IntPtr blst_p1_generator();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_core_verify_pk_in_g2([In] long[] pk, [In] long[] sig,
bool hash_or_encode,
[In] byte[] msg, size_t msg_len,
[In] byte[] dst, size_t dst_len,
[In] byte[] aug, size_t aug_len);
public struct P1_Affine {
internal readonly long[] point;
private static readonly int sz = (int)blst_p1_affine_sizeof()/sizeof(long);
//public P1_Affine() { point = new long[sz]; }
private P1_Affine(bool _) { point = new long[sz]; }
private P1_Affine(P1_Affine p) { point = (long[])p.point.Clone(); }
public P1_Affine(byte[] inp) : this(true)
{ int len = inp.Length;
if (len == 0 || len != ((inp[0]&0x80) == 0x80 ? P1_COMPRESSED_SZ
: 2*P1_COMPRESSED_SZ))
throw new Exception(ERROR.BAD_ENCODING);
ERROR err = blst_p1_deserialize(point, inp);
if (err != ERROR.SUCCESS)
throw new Exception(err);
}
public P1_Affine(P1 jacobian) : this(true)
{ blst_p1_to_affine(point, jacobian.point); }
public P1_Affine dup() { return new P1_Affine(this); }
public P1 to_jacobian() { return new P1(this); }
public byte[] serialize()
{ byte[] ret = new byte[2*P1_COMPRESSED_SZ];
blst_p1_affine_serialize(ret, point);
return ret;
}
public byte[] compress()
{ byte[] ret = new byte[P1_COMPRESSED_SZ];
blst_p1_affine_compress(ret, point);
return ret;
}
public bool on_curve() { return blst_p1_affine_on_curve(point); }
public bool in_group() { return blst_p1_affine_in_g1(point); }
public bool is_inf() { return blst_p1_affine_is_inf(point); }
public bool is_equal(P1_Affine p)
{ return blst_p1_affine_is_equal(point, p.point); }
ERROR core_verify(P2_Affine pk, bool hash_or_encode,
byte[] msg, string DST = "", byte[] aug = null)
{ byte[] dst = Encoding.UTF8.GetBytes(DST);
return blst_core_verify_pk_in_g2(pk.point, point,
hash_or_encode,
msg, (size_t)msg.Length,
dst, (size_t)dst.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public static P1_Affine generator()
{ var ret = new P1_Affine(true);
Marshal.Copy(blst_p1_generator(), ret.point, 0, ret.point.Length);
return ret;
}
}
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_p1_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_serialize([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_compress([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_from_affine([Out] long[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_on_curve([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_in_g1([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_is_inf([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_is_equal([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_sk_to_pk_in_g1([Out] long[] ret, [In] byte[] SK);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_encode_to_g1([Out] long[] ret, [In] byte[] msg, size_t msg_len,
[In] byte[] dst, size_t dst_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_hash_to_g1([Out] long[] ret, [In] byte[] msg, size_t msg_len,
[In] byte[] dst, size_t dst_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_sign_pk_in_g2([Out] long[] ret, [In] long[] hash, [In] byte[] SK);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_p1_mult([Out] long[] ret, [In] long[] a,
[In] byte[] scalar, size_t nbits);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_cneg([Out] long[] ret, bool cbit);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_p1_add_or_double([Out] long[] ret, [In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_p1_add_or_double_affine([Out] long[] ret, [In] long[] a,
[In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_double([Out] long[] ret, [In] long[] a);
public struct P1 {
internal long[] point;
private static readonly int sz = (int)blst_p1_sizeof()/sizeof(long);
//public P1() { point = new long[sz]; }
private P1(bool _) { point = new long[sz]; }
private P1(P1 p) { point = (long[])p.point.Clone(); }
private long[] self()
{ if (point==null) { point = new long[sz]; } return point; }
public P1(SecretKey sk) : this(true)
{ blst_sk_to_pk_in_g1(point, sk.key); }
public P1(byte[] inp) : this(true)
{ int len = inp.Length;
if (len == 0 || len != ((inp[0]&0x80) == 0x80 ? P1_COMPRESSED_SZ
: 2*P1_COMPRESSED_SZ))
throw new Exception(ERROR.BAD_ENCODING);
ERROR err = blst_p1_deserialize(point, inp);
if (err != ERROR.SUCCESS)
throw new Exception(err);
blst_p1_from_affine(point, point);
}
public P1(P1_Affine affine) : this(true)
{ blst_p1_from_affine(point, affine.point); }
public P1 dup() { return new P1(this); }
public P1_Affine to_affine() { return new P1_Affine(this); }
public byte[] serialize()
{ byte[] ret = new byte[2*P1_COMPRESSED_SZ];
blst_p1_serialize(ret, point);
return ret;
}
public byte[] compress()
{ byte[] ret = new byte[P1_COMPRESSED_SZ];
blst_p1_compress(ret, point);
return ret;
}
public bool on_curve() { return blst_p1_on_curve(point); }
public bool in_group() { return blst_p1_in_g1(point); }
public bool is_inf() { return blst_p1_is_inf(point); }
public bool is_equal(P1 p) { return blst_p1_is_equal(point, p.point); }
public P1 hash_to(byte[] msg, string DST="", byte[] aug=null)
{ byte[] dst = Encoding.UTF8.GetBytes(DST);
blst_hash_to_g1(self(), msg, (size_t)msg.Length,
dst, (size_t)dst.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
return this;
}
public P1 encode_to(byte[] msg, string DST="", byte[] aug=null)
{ byte[] dst = Encoding.UTF8.GetBytes(DST);
blst_encode_to_g1(self(), msg, (size_t)msg.Length,
dst, (size_t)dst.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
return this;
}
public P1 sign_with(SecretKey sk)
{ blst_sign_pk_in_g2(point, point, sk.key); return this; }
public P1 sign_with(Scalar scalar)
{ blst_sign_pk_in_g2(point, point, scalar.val); return this; }
public void aggregate(P1_Affine inp)
{ if (blst_p1_affine_in_g1(inp.point))
blst_p1_add_or_double_affine(point, point, inp.point);
else
throw new Exception(ERROR.POINT_NOT_IN_GROUP);
}
public P1 mult(byte[] scalar)
{ blst_p1_mult(point, point, scalar, (size_t)(scalar.Length*8));
return this;
}
public P1 mult(Scalar scalar)
{ blst_p1_mult(point, point, scalar.val, (size_t)255);
return this;
}
public P1 mult(BigInteger scalar)
{ byte[] val;
if (scalar.Sign < 0) {
val = BigInteger.Negate(scalar).ToByteArray();
blst_p1_cneg(point, true);
} else {
val = scalar.ToByteArray();
}
int len = val.Length;
if (val[len-1]==0) len--;
blst_p1_mult(point, point, val, (size_t)(len*8));
return this;
}
public P1 cneg(bool flag) { blst_p1_cneg(point, flag); return this; }
public P1 neg() { blst_p1_cneg(point, true); return this; }
public P1 add(P1 a)
{ blst_p1_add_or_double(point, point, a.point); return this; }
public P1 add(P1_Affine a)
{ blst_p1_add_or_double_affine(point, point, a.point); return this; }
public P1 dbl()
{ blst_p1_double(point, point); return this; }
public static P1 generator()
{ var ret = new P1(true);
Marshal.Copy(blst_p1_generator(), ret.point, 0, ret.point.Length);
return ret;
}
}
public static P1 G1() { return P1.generator(); }
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_aggregated_in_g1([Out] long[] fp12, [In] long[] p);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_pairing_aggregate_pk_in_g1([In, Out] long[] fp12,
[In] long[] pk, [In] long[] sig,
[In] byte[] msg, size_t msg_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_pairing_mul_n_aggregate_pk_in_g1([In, Out] long[] fp12,
[In] long[] pk, [In] long[] sig,
[In] byte[] scalar, size_t nbits,
[In] byte[] msg, size_t msg_len,
[In] byte[] aug, size_t aug_len);
"""
bottom = """
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_fp12_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_miller_loop([Out] long[] fp12, [In] long[] q,
[In] long[] p);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_fp12_is_one([In] long[] fp12);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_fp12_is_equal([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_fp12_sqr([Out] long[] ret, [In] long[] a);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_fp12_mul([Out] long[] ret, [In] long[] a,
[In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_final_exp([Out] long[] ret, [In] long[] a);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_fp12_finalverify([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern IntPtr blst_fp12_one();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_fp12_in_group([In] long[] a);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_bendian_from_fp12([Out] byte[] ret, [In] long[] a);
public struct PT {
internal readonly long[] fp12;
private static readonly int sz = (int)blst_fp12_sizeof()/sizeof(long);
internal PT(bool _) { fp12 = new long[sz]; }
private PT(PT orig) { fp12 = (long[])orig.fp12.Clone(); }
public PT(P1_Affine p) : this(true)
{ blst_aggregated_in_g1(fp12, p.point); }
public PT(P1 p) : this(true)
{ blst_aggregated_in_g1(fp12, (new P1_Affine(p)).point); }
public PT(P2_Affine q) : this(true)
{ blst_aggregated_in_g2(fp12, q.point); }
public PT(P2 q) : this(true)
{ blst_aggregated_in_g2(fp12, (new P2_Affine(q)).point); }
public PT(P2_Affine q, P1_Affine p) : this(true)
{ blst_miller_loop(fp12, q.point, p.point); }
public PT(P1_Affine p, P2_Affine q) : this(q, p) {}
public PT(P2 q, P1 p) : this(true)
{ blst_miller_loop(fp12, (new P2_Affine(q)).point,
(new P1_Affine(p)).point);
}
public PT(P1 p, P2 q) : this(q, p) {}
public PT dup() { return new PT(this); }
public bool is_one() { return blst_fp12_is_one(fp12); }
public bool is_equal(PT p)
{ return blst_fp12_is_equal(fp12, p.fp12); }
public PT sqr() { blst_fp12_sqr(fp12, fp12); return this; }
public PT mul(PT p) { blst_fp12_mul(fp12, fp12, p.fp12); return this; }
public PT final_exp() { blst_final_exp(fp12, fp12); return this; }
public bool in_group() { return blst_fp12_in_group(fp12); }
public byte[] to_bendian()
{ byte[] ret = new byte[12*P1_COMPRESSED_SZ];
blst_bendian_from_fp12(ret, fp12);
return ret;
}
public static bool finalverify(PT gt1, PT gt2)
{ return blst_fp12_finalverify(gt1.fp12, gt2.fp12); }
public static PT one()
{ var ret = new PT(true);
Marshal.Copy(blst_fp12_one(), ret.fp12, 0, ret.fp12.Length);
return ret;
}
}
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_pairing_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_pairing_init([In, Out] long[] ctx, bool hash_or_encode,
[In] ref long dst, size_t dst_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_pairing_commit([In, Out] long[] ctx);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_pairing_merge([In, Out] long[] ctx, [In] long[] ctx1);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_pairing_finalverify([In] long[] ctx, [In] long[] sig);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_pairing_raw_aggregate([In, Out] long[] ctx, [In] long[] q,
[In] long[] p);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern IntPtr blst_pairing_as_fp12([In] long[] ctx);
public struct Pairing {
private readonly long[] ctx;
private static readonly int sz = (int)blst_pairing_sizeof()/sizeof(long);
public Pairing(bool hash_or_encode=false, string DST="")
{
byte[] dst = Encoding.UTF8.GetBytes(DST);
int dst_len = dst.Length;
int add_len = dst_len!=0 ? (dst_len+sizeof(long)-1)/sizeof(long) : 1;
Array.Resize(ref dst, add_len*sizeof(long));
ctx = new long[sz+add_len];
for (int i=0; i sig,
byte[] msg, byte[] aug=null)
{ return blst_pairing_aggregate_pk_in_g1(ctx, pk.point,
sig.HasValue ? sig.Value.point : null,
msg, (size_t)msg.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public ERROR aggregate(P2_Affine pk, Nullable sig,
byte[] msg, byte[] aug=null)
{ return blst_pairing_aggregate_pk_in_g2(ctx, pk.point,
sig.HasValue ? sig.Value.point : null,
msg, (size_t)msg.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public ERROR mul_n_aggregate(P2_Affine pk, P1_Affine sig,
byte[] scalar, int nbits,
byte[] msg, byte[] aug=null)
{ return blst_pairing_mul_n_aggregate_pk_in_g2(ctx, pk.point, sig.point,
scalar, (size_t)nbits,
msg, (size_t)msg.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public ERROR mul_n_aggregate(P1_Affine pk, P2_Affine sig,
byte[] scalar, int nbits,
byte[] msg, byte[] aug=null)
{ return blst_pairing_mul_n_aggregate_pk_in_g1(ctx, pk.point, sig.point,
scalar, (size_t)nbits,
msg, (size_t)msg.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public void commit() { blst_pairing_commit(ctx); }
public void merge(Pairing a)
{ var err = blst_pairing_merge(ctx, a.ctx);
if (err != ERROR.SUCCESS)
throw new Exception(err);
}
public bool finalverify(PT sig=new PT())
{ return blst_pairing_finalverify(ctx, sig.fp12); }
public void raw_aggregate(P2_Affine q, P1_Affine p)
{ blst_pairing_raw_aggregate(ctx, q.point, p.point); }
public void raw_aggregate(P1_Affine p, P2_Affine q)
{ raw_aggregate(q, p); }
public void raw_aggregate(P2 q, P1 p)
{ blst_pairing_raw_aggregate(ctx, (new P2_Affine(q)).point,
(new P1_Affine(p)).point);
}
public void raw_aggregate(P1 p, P2 q)
{ raw_aggregate(q, p); }
public PT as_fp12()
{ var ret = new PT(true);
GCHandle h = GCHandle.Alloc(ctx, GCHandleType.Pinned);
Marshal.Copy(blst_pairing_as_fp12(ctx), ret.fp12, 0, ret.fp12.Length);
h.Free();
return ret;
}
}
}}"""
here = re.split(r'[/\\](?=[^/\\]*$)', sys.argv[0])
if len(here) > 1:
os.chdir(here[0])
def xchg_1vs2(matchobj):
if matchobj.group(2) == '1':
return matchobj.group(1) + '2'
else:
return matchobj.group(1) + '1'
def newer(files):
if len(files) == 1:
return True
rh = files[-1]
if not os.path.exists(rh):
return True
for lh in files[:-1]:
if os.stat(lh).st_ctime > os.stat(rh).st_ctime:
return True
return False
fname = "supranational.blst.cs"
if newer([here[-1], fname]):
fd = open(fname, "w")
print("//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=fd)
print("// DO NOT EDIT THIS FILE!!!", file=fd)
print("// The file is auto-generated by " + here[-1], file=fd)
print("//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=fd)
print("\n\n", file=fd)
print(top, file=fd)
print(middle, file=fd)
print(re.sub(r'((? "x64",
Architecture.Arm64 => "arm64",
_ => "unsupported"
};
#if NET8_0_OR_GREATER
// RuntimeInformation.RuntimeIdentifier changed between .NET 7 and 8
// and only aligns to the nuget layout in 8+
var rid = RuntimeInformation.RuntimeIdentifier;
#else
// Mimic pre-8 RuntimeInformation.RuntimeIdentifier as
// "win-x64", "linux-x64", "linux-arm64", "osx-x64", etc.
var os = RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "win"
: RuntimeInformation.IsOSPlatform(OSPlatform.OSX) ? "osx"
: RuntimeInformation.IsOSPlatform(OSPlatform.FreeBSD) ? "freebsd"
: "linux";
var rid = $"{os}-{arch}";
#endif
// first look for the file in the standard locations for a nuget installed native lib
dll = Path.Combine(dir, "runtimes", rid, "native", name);
if (!File.Exists(dll))
dll = Path.Combine(dir, arch, name); // try the original non-standard location
if (!File.Exists(dll))
dll = Path.Combine(Environment.CurrentDirectory, name);
if (File.Exists(dll)) {
AssemblyLoadContext.Default.ResolvingUnmanagedDll += (asm, needs) =>
(needs == "blst.dll" ? NativeLibrary.Load(dll) : IntPtr.Zero);
}
}
}
#endif
public enum ERROR {
SUCCESS = 0,
BAD_ENCODING,
POINT_NOT_ON_CURVE,
POINT_NOT_IN_GROUP,
AGGR_TYPE_MISMATCH,
VERIFY_FAIL,
PK_IS_INFINITY,
BAD_SCALAR,
}
public class Exception : ApplicationException {
private readonly ERROR code;
public Exception(ERROR err) { code = err; }
public override string Message
{ get
{ switch(code) {
case ERROR.BAD_ENCODING: return "bad encoding";
case ERROR.POINT_NOT_ON_CURVE: return "point not on curve";
case ERROR.POINT_NOT_IN_GROUP: return "point not in group";
case ERROR.AGGR_TYPE_MISMATCH: return "aggregate type mismatch";
case ERROR.VERIFY_FAIL: return "verify failure";
case ERROR.PK_IS_INFINITY: return "public key is infinity";
case ERROR.BAD_SCALAR: return "bad scalar";
default: return null;
}
}
}
}
public enum ByteOrder {
BigEndian,
LittleEndian,
}
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_keygen([Out] byte[] key, [In] byte[] IKM, size_t IKM_len,
[In] byte[] info, size_t info_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_keygen_v3([Out] byte[] key, [In] byte[] IKM, size_t IKM_len,
[In] byte[] info, size_t info_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_keygen_v4_5([Out] byte[] key, [In] byte[] IKM, size_t IKM_len,
[In] byte[] salt, size_t salt_len,
[In] byte[] info, size_t info_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_keygen_v5([Out] byte[] key, [In] byte[] IKM, size_t IKM_len,
[In] byte[] salt, size_t salt_len,
[In] byte[] info, size_t info_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_derive_master_eip2333([Out] byte[] key,
[In] byte[] IKM, size_t IKM_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_derive_child_eip2333([Out] byte[] key,
[In] byte[] master, uint child_index);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_scalar_from_bendian([Out] byte[] ret, [In] byte[] key);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_bendian_from_scalar([Out] byte[] ret, [In] byte[] key);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_sk_check([In] byte[] key);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_scalar_from_lendian([Out] byte[] key, [In] byte[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_lendian_from_scalar([Out] byte[] key, [In] byte[] inp);
public struct SecretKey {
internal byte[] key;
//public SecretKey() { key = new byte[32]; }
public SecretKey(byte[] IKM, string info)
{ key = new byte[32]; keygen(IKM, info); }
public SecretKey(byte[] inp, ByteOrder order=ByteOrder.BigEndian)
{ key = new byte[32];
switch(order) {
case ByteOrder.BigEndian: from_bendian(inp); break;
case ByteOrder.LittleEndian: from_lendian(inp); break;
}
}
public void keygen(byte[] IKM, string info="")
{ if (key == null) key = new byte[32];
byte[] info_bytes = Encoding.UTF8.GetBytes(info);
blst_keygen(key, IKM, (size_t)IKM.Length,
info_bytes, (size_t)info_bytes.Length);
}
public void keygen_v3(byte[] IKM, string info="")
{ if (key == null) key = new byte[32];
byte[] info_bytes = Encoding.UTF8.GetBytes(info);
blst_keygen_v3(key, IKM, (size_t)IKM.Length,
info_bytes, (size_t)info_bytes.Length);
}
public void keygen_v4_5(byte[] IKM, string salt, string info="")
{ if (key == null) key = new byte[32];
byte[] salt_bytes = Encoding.UTF8.GetBytes(salt);
byte[] info_bytes = Encoding.UTF8.GetBytes(info);
blst_keygen_v4_5(key, IKM, (size_t)IKM.Length,
salt_bytes, (size_t)salt_bytes.Length,
info_bytes, (size_t)info_bytes.Length);
}
public void keygen_v5(byte[] IKM, byte[] salt, string info="")
{ if (key == null) key = new byte[32];
byte[] info_bytes = Encoding.UTF8.GetBytes(info);
blst_keygen_v5(key, IKM, (size_t)IKM.Length,
salt, (size_t)salt.Length,
info_bytes, (size_t)info_bytes.Length);
}
public void keygen_v5(byte[] IKM, string salt, string info="")
{ keygen_v5(IKM, Encoding.UTF8.GetBytes(salt), info); }
public void derive_master_eip2333(byte[] IKM)
{ if (key == null) key = new byte[32];
blst_derive_master_eip2333(key, IKM, (size_t)IKM.Length);
}
public SecretKey(SecretKey master, uint child_index)
{ key = new byte[32];
blst_derive_child_eip2333(key, master.key, child_index);
}
public void from_bendian(byte[] inp)
{ if (inp.Length != 32)
throw new Exception(ERROR.BAD_ENCODING);
if (key == null) key = new byte[32];
blst_scalar_from_bendian(key, inp);
if (!blst_sk_check(key))
throw new Exception(ERROR.BAD_ENCODING);
}
public void from_lendian(byte[] inp)
{ if (inp.Length != 32)
throw new Exception(ERROR.BAD_ENCODING);
if (key == null) key = new byte[32];
blst_scalar_from_lendian(key, inp);
if (!blst_sk_check(key))
throw new Exception(ERROR.BAD_ENCODING);
}
public byte[] to_bendian()
{ byte[] ret = new byte[32];
blst_bendian_from_scalar(ret, key);
return ret;
}
public byte[] to_lendian()
{ byte[] ret = new byte[32];
blst_lendian_from_scalar(ret, key);
return ret;
}
}
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_scalar_from_be_bytes([Out] byte[] ret, [In] byte[] inp,
size_t inp_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_scalar_from_le_bytes([Out] byte[] ret, [In] byte[] inp,
size_t inp_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_sk_add_n_check([Out] byte[] ret, [In] byte[] a,
[In] byte[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_sk_sub_n_check([Out] byte[] ret, [In] byte[] a,
[In] byte[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_sk_mul_n_check([Out] byte[] ret, [In] byte[] a,
[In] byte[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_sk_inverse([Out] byte[] ret, [In] byte[] a);
public struct Scalar {
internal byte[] val;
//public Scalar() { val = new byte[32]; }
public Scalar(byte[] inp, ByteOrder order=ByteOrder.BigEndian)
{ val = new byte[32];
switch(order) {
case ByteOrder.BigEndian: from_bendian(inp); break;
case ByteOrder.LittleEndian: from_lendian(inp); break;
}
}
private Scalar(bool _) { val = new byte[32]; }
private Scalar(Scalar orig) { val = (byte[])orig.val.Clone(); }
public Scalar dup() { return new Scalar(this); }
public void from_bendian(byte[] inp)
{ if (val == null) val = new byte[32];
blst_scalar_from_be_bytes(val, inp, (size_t)inp.Length);
}
public void from_lendian(byte[] inp)
{ if (val == null) val = new byte[32];
blst_scalar_from_le_bytes(val, inp, (size_t)inp.Length);
}
public byte[] to_bendian()
{ byte[] ret = new byte[32];
blst_bendian_from_scalar(ret, val);
return ret;
}
public byte[] to_lendian()
{ byte[] ret = new byte[32];
blst_lendian_from_scalar(ret, val);
return ret;
}
public Scalar add(SecretKey a)
{ if (!blst_sk_add_n_check(val, val, a.key))
throw new Exception(ERROR.BAD_SCALAR);
return this;
}
public Scalar add(Scalar a)
{ if (!blst_sk_add_n_check(val, val, a.val))
throw new Exception(ERROR.BAD_SCALAR);
return this;
}
public Scalar sub(Scalar a)
{ if (!blst_sk_sub_n_check(val, val, a.val))
throw new Exception(ERROR.BAD_SCALAR);
return this;
}
public Scalar mul(Scalar a)
{ if (!blst_sk_mul_n_check(val, val, a.val))
throw new Exception(ERROR.BAD_SCALAR);
return this;
}
public Scalar inverse()
{ blst_sk_inverse(val, val); return this; }
public static Scalar operator+(Scalar a, Scalar b)
{ return a.dup().add(b); }
public static Scalar operator-(Scalar a, Scalar b)
{ return a.dup().sub(b); }
public static Scalar operator*(Scalar a, Scalar b)
{ return a.dup().mul(b); }
public static Scalar operator/(Scalar a, Scalar b)
{ return b.dup().inverse().mul(a); }
}
private const int P1_COMPRESSED_SZ = 384/8;
private const int P2_COMPRESSED_SZ = 2*P1_COMPRESSED_SZ;
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_p1_affine_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_p1_deserialize([Out] long[] ret, [In] byte[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_affine_serialize([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_affine_compress([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_to_affine([Out] long[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_affine_on_curve([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_affine_in_g1([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_affine_is_inf([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_affine_is_equal([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern IntPtr blst_p1_generator();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_core_verify_pk_in_g2([In] long[] pk, [In] long[] sig,
bool hash_or_encode,
[In] byte[] msg, size_t msg_len,
[In] byte[] dst, size_t dst_len,
[In] byte[] aug, size_t aug_len);
public struct P1_Affine {
internal readonly long[] point;
private static readonly int sz = (int)blst_p1_affine_sizeof()/sizeof(long);
//public P1_Affine() { point = new long[sz]; }
private P1_Affine(bool _) { point = new long[sz]; }
private P1_Affine(P1_Affine p) { point = (long[])p.point.Clone(); }
public P1_Affine(byte[] inp) : this(true)
{ int len = inp.Length;
if (len == 0 || len != ((inp[0]&0x80) == 0x80 ? P1_COMPRESSED_SZ
: 2*P1_COMPRESSED_SZ))
throw new Exception(ERROR.BAD_ENCODING);
ERROR err = blst_p1_deserialize(point, inp);
if (err != ERROR.SUCCESS)
throw new Exception(err);
}
public P1_Affine(P1 jacobian) : this(true)
{ blst_p1_to_affine(point, jacobian.point); }
public P1_Affine dup() { return new P1_Affine(this); }
public P1 to_jacobian() { return new P1(this); }
public byte[] serialize()
{ byte[] ret = new byte[2*P1_COMPRESSED_SZ];
blst_p1_affine_serialize(ret, point);
return ret;
}
public byte[] compress()
{ byte[] ret = new byte[P1_COMPRESSED_SZ];
blst_p1_affine_compress(ret, point);
return ret;
}
public bool on_curve() { return blst_p1_affine_on_curve(point); }
public bool in_group() { return blst_p1_affine_in_g1(point); }
public bool is_inf() { return blst_p1_affine_is_inf(point); }
public bool is_equal(P1_Affine p)
{ return blst_p1_affine_is_equal(point, p.point); }
ERROR core_verify(P2_Affine pk, bool hash_or_encode,
byte[] msg, string DST = "", byte[] aug = null)
{ byte[] dst = Encoding.UTF8.GetBytes(DST);
return blst_core_verify_pk_in_g2(pk.point, point,
hash_or_encode,
msg, (size_t)msg.Length,
dst, (size_t)dst.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public static P1_Affine generator()
{ var ret = new P1_Affine(true);
Marshal.Copy(blst_p1_generator(), ret.point, 0, ret.point.Length);
return ret;
}
}
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_p1_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_serialize([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_compress([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_from_affine([Out] long[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_on_curve([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_in_g1([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_is_inf([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p1_is_equal([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_sk_to_pk_in_g1([Out] long[] ret, [In] byte[] SK);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_encode_to_g1([Out] long[] ret, [In] byte[] msg, size_t msg_len,
[In] byte[] dst, size_t dst_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_hash_to_g1([Out] long[] ret, [In] byte[] msg, size_t msg_len,
[In] byte[] dst, size_t dst_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_sign_pk_in_g2([Out] long[] ret, [In] long[] hash, [In] byte[] SK);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_p1_mult([Out] long[] ret, [In] long[] a,
[In] byte[] scalar, size_t nbits);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_cneg([Out] long[] ret, bool cbit);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_p1_add_or_double([Out] long[] ret, [In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_p1_add_or_double_affine([Out] long[] ret, [In] long[] a,
[In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p1_double([Out] long[] ret, [In] long[] a);
public struct P1 {
internal long[] point;
private static readonly int sz = (int)blst_p1_sizeof()/sizeof(long);
//public P1() { point = new long[sz]; }
private P1(bool _) { point = new long[sz]; }
private P1(P1 p) { point = (long[])p.point.Clone(); }
private long[] self()
{ if (point==null) { point = new long[sz]; } return point; }
public P1(SecretKey sk) : this(true)
{ blst_sk_to_pk_in_g1(point, sk.key); }
public P1(byte[] inp) : this(true)
{ int len = inp.Length;
if (len == 0 || len != ((inp[0]&0x80) == 0x80 ? P1_COMPRESSED_SZ
: 2*P1_COMPRESSED_SZ))
throw new Exception(ERROR.BAD_ENCODING);
ERROR err = blst_p1_deserialize(point, inp);
if (err != ERROR.SUCCESS)
throw new Exception(err);
blst_p1_from_affine(point, point);
}
public P1(P1_Affine affine) : this(true)
{ blst_p1_from_affine(point, affine.point); }
public P1 dup() { return new P1(this); }
public P1_Affine to_affine() { return new P1_Affine(this); }
public byte[] serialize()
{ byte[] ret = new byte[2*P1_COMPRESSED_SZ];
blst_p1_serialize(ret, point);
return ret;
}
public byte[] compress()
{ byte[] ret = new byte[P1_COMPRESSED_SZ];
blst_p1_compress(ret, point);
return ret;
}
public bool on_curve() { return blst_p1_on_curve(point); }
public bool in_group() { return blst_p1_in_g1(point); }
public bool is_inf() { return blst_p1_is_inf(point); }
public bool is_equal(P1 p) { return blst_p1_is_equal(point, p.point); }
public P1 hash_to(byte[] msg, string DST="", byte[] aug=null)
{ byte[] dst = Encoding.UTF8.GetBytes(DST);
blst_hash_to_g1(self(), msg, (size_t)msg.Length,
dst, (size_t)dst.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
return this;
}
public P1 encode_to(byte[] msg, string DST="", byte[] aug=null)
{ byte[] dst = Encoding.UTF8.GetBytes(DST);
blst_encode_to_g1(self(), msg, (size_t)msg.Length,
dst, (size_t)dst.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
return this;
}
public P1 sign_with(SecretKey sk)
{ blst_sign_pk_in_g2(point, point, sk.key); return this; }
public P1 sign_with(Scalar scalar)
{ blst_sign_pk_in_g2(point, point, scalar.val); return this; }
public void aggregate(P1_Affine inp)
{ if (blst_p1_affine_in_g1(inp.point))
blst_p1_add_or_double_affine(point, point, inp.point);
else
throw new Exception(ERROR.POINT_NOT_IN_GROUP);
}
public P1 mult(byte[] scalar)
{ blst_p1_mult(point, point, scalar, (size_t)(scalar.Length*8));
return this;
}
public P1 mult(Scalar scalar)
{ blst_p1_mult(point, point, scalar.val, (size_t)255);
return this;
}
public P1 mult(BigInteger scalar)
{ byte[] val;
if (scalar.Sign < 0) {
val = BigInteger.Negate(scalar).ToByteArray();
blst_p1_cneg(point, true);
} else {
val = scalar.ToByteArray();
}
int len = val.Length;
if (val[len-1]==0) len--;
blst_p1_mult(point, point, val, (size_t)(len*8));
return this;
}
public P1 cneg(bool flag) { blst_p1_cneg(point, flag); return this; }
public P1 neg() { blst_p1_cneg(point, true); return this; }
public P1 add(P1 a)
{ blst_p1_add_or_double(point, point, a.point); return this; }
public P1 add(P1_Affine a)
{ blst_p1_add_or_double_affine(point, point, a.point); return this; }
public P1 dbl()
{ blst_p1_double(point, point); return this; }
public static P1 generator()
{ var ret = new P1(true);
Marshal.Copy(blst_p1_generator(), ret.point, 0, ret.point.Length);
return ret;
}
}
public static P1 G1() { return P1.generator(); }
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_aggregated_in_g1([Out] long[] fp12, [In] long[] p);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_pairing_aggregate_pk_in_g1([In, Out] long[] fp12,
[In] long[] pk, [In] long[] sig,
[In] byte[] msg, size_t msg_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_pairing_mul_n_aggregate_pk_in_g1([In, Out] long[] fp12,
[In] long[] pk, [In] long[] sig,
[In] byte[] scalar, size_t nbits,
[In] byte[] msg, size_t msg_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_p2_affine_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_p2_deserialize([Out] long[] ret, [In] byte[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p2_affine_serialize([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p2_affine_compress([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p2_to_affine([Out] long[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p2_affine_on_curve([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p2_affine_in_g2([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p2_affine_is_inf([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p2_affine_is_equal([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern IntPtr blst_p2_generator();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_core_verify_pk_in_g1([In] long[] pk, [In] long[] sig,
bool hash_or_encode,
[In] byte[] msg, size_t msg_len,
[In] byte[] dst, size_t dst_len,
[In] byte[] aug, size_t aug_len);
public struct P2_Affine {
internal readonly long[] point;
private static readonly int sz = (int)blst_p2_affine_sizeof()/sizeof(long);
//public P2_Affine() { point = new long[sz]; }
private P2_Affine(bool _) { point = new long[sz]; }
private P2_Affine(P2_Affine p) { point = (long[])p.point.Clone(); }
public P2_Affine(byte[] inp) : this(true)
{ int len = inp.Length;
if (len == 0 || len != ((inp[0]&0x80) == 0x80 ? P2_COMPRESSED_SZ
: 2*P2_COMPRESSED_SZ))
throw new Exception(ERROR.BAD_ENCODING);
ERROR err = blst_p2_deserialize(point, inp);
if (err != ERROR.SUCCESS)
throw new Exception(err);
}
public P2_Affine(P2 jacobian) : this(true)
{ blst_p2_to_affine(point, jacobian.point); }
public P2_Affine dup() { return new P2_Affine(this); }
public P2 to_jacobian() { return new P2(this); }
public byte[] serialize()
{ byte[] ret = new byte[2*P2_COMPRESSED_SZ];
blst_p2_affine_serialize(ret, point);
return ret;
}
public byte[] compress()
{ byte[] ret = new byte[P2_COMPRESSED_SZ];
blst_p2_affine_compress(ret, point);
return ret;
}
public bool on_curve() { return blst_p2_affine_on_curve(point); }
public bool in_group() { return blst_p2_affine_in_g2(point); }
public bool is_inf() { return blst_p2_affine_is_inf(point); }
public bool is_equal(P2_Affine p)
{ return blst_p2_affine_is_equal(point, p.point); }
ERROR core_verify(P1_Affine pk, bool hash_or_encode,
byte[] msg, string DST = "", byte[] aug = null)
{ byte[] dst = Encoding.UTF8.GetBytes(DST);
return blst_core_verify_pk_in_g1(pk.point, point,
hash_or_encode,
msg, (size_t)msg.Length,
dst, (size_t)dst.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public static P2_Affine generator()
{ var ret = new P2_Affine(true);
Marshal.Copy(blst_p2_generator(), ret.point, 0, ret.point.Length);
return ret;
}
}
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_p2_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p2_serialize([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p2_compress([Out] byte[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p2_from_affine([Out] long[] ret, [In] long[] inp);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p2_on_curve([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p2_in_g2([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p2_is_inf([In] long[] point);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_p2_is_equal([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_sk_to_pk_in_g2([Out] long[] ret, [In] byte[] SK);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_encode_to_g2([Out] long[] ret, [In] byte[] msg, size_t msg_len,
[In] byte[] dst, size_t dst_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_hash_to_g2([Out] long[] ret, [In] byte[] msg, size_t msg_len,
[In] byte[] dst, size_t dst_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_sign_pk_in_g1([Out] long[] ret, [In] long[] hash, [In] byte[] SK);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_p2_mult([Out] long[] ret, [In] long[] a,
[In] byte[] scalar, size_t nbits);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p2_cneg([Out] long[] ret, bool cbit);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_p2_add_or_double([Out] long[] ret, [In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_p2_add_or_double_affine([Out] long[] ret, [In] long[] a,
[In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_p2_double([Out] long[] ret, [In] long[] a);
public struct P2 {
internal long[] point;
private static readonly int sz = (int)blst_p2_sizeof()/sizeof(long);
//public P2() { point = new long[sz]; }
private P2(bool _) { point = new long[sz]; }
private P2(P2 p) { point = (long[])p.point.Clone(); }
private long[] self()
{ if (point==null) { point = new long[sz]; } return point; }
public P2(SecretKey sk) : this(true)
{ blst_sk_to_pk_in_g2(point, sk.key); }
public P2(byte[] inp) : this(true)
{ int len = inp.Length;
if (len == 0 || len != ((inp[0]&0x80) == 0x80 ? P2_COMPRESSED_SZ
: 2*P2_COMPRESSED_SZ))
throw new Exception(ERROR.BAD_ENCODING);
ERROR err = blst_p2_deserialize(point, inp);
if (err != ERROR.SUCCESS)
throw new Exception(err);
blst_p2_from_affine(point, point);
}
public P2(P2_Affine affine) : this(true)
{ blst_p2_from_affine(point, affine.point); }
public P2 dup() { return new P2(this); }
public P2_Affine to_affine() { return new P2_Affine(this); }
public byte[] serialize()
{ byte[] ret = new byte[2*P2_COMPRESSED_SZ];
blst_p2_serialize(ret, point);
return ret;
}
public byte[] compress()
{ byte[] ret = new byte[P2_COMPRESSED_SZ];
blst_p2_compress(ret, point);
return ret;
}
public bool on_curve() { return blst_p2_on_curve(point); }
public bool in_group() { return blst_p2_in_g2(point); }
public bool is_inf() { return blst_p2_is_inf(point); }
public bool is_equal(P2 p) { return blst_p2_is_equal(point, p.point); }
public P2 hash_to(byte[] msg, string DST="", byte[] aug=null)
{ byte[] dst = Encoding.UTF8.GetBytes(DST);
blst_hash_to_g2(self(), msg, (size_t)msg.Length,
dst, (size_t)dst.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
return this;
}
public P2 encode_to(byte[] msg, string DST="", byte[] aug=null)
{ byte[] dst = Encoding.UTF8.GetBytes(DST);
blst_encode_to_g2(self(), msg, (size_t)msg.Length,
dst, (size_t)dst.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
return this;
}
public P2 sign_with(SecretKey sk)
{ blst_sign_pk_in_g1(point, point, sk.key); return this; }
public P2 sign_with(Scalar scalar)
{ blst_sign_pk_in_g1(point, point, scalar.val); return this; }
public void aggregate(P2_Affine inp)
{ if (blst_p2_affine_in_g2(inp.point))
blst_p2_add_or_double_affine(point, point, inp.point);
else
throw new Exception(ERROR.POINT_NOT_IN_GROUP);
}
public P2 mult(byte[] scalar)
{ blst_p2_mult(point, point, scalar, (size_t)(scalar.Length*8));
return this;
}
public P2 mult(Scalar scalar)
{ blst_p2_mult(point, point, scalar.val, (size_t)255);
return this;
}
public P2 mult(BigInteger scalar)
{ byte[] val;
if (scalar.Sign < 0) {
val = BigInteger.Negate(scalar).ToByteArray();
blst_p2_cneg(point, true);
} else {
val = scalar.ToByteArray();
}
int len = val.Length;
if (val[len-1]==0) len--;
blst_p2_mult(point, point, val, (size_t)(len*8));
return this;
}
public P2 cneg(bool flag) { blst_p2_cneg(point, flag); return this; }
public P2 neg() { blst_p2_cneg(point, true); return this; }
public P2 add(P2 a)
{ blst_p2_add_or_double(point, point, a.point); return this; }
public P2 add(P2_Affine a)
{ blst_p2_add_or_double_affine(point, point, a.point); return this; }
public P2 dbl()
{ blst_p2_double(point, point); return this; }
public static P2 generator()
{ var ret = new P2(true);
Marshal.Copy(blst_p2_generator(), ret.point, 0, ret.point.Length);
return ret;
}
}
public static P2 G2() { return P2.generator(); }
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_aggregated_in_g2([Out] long[] fp12, [In] long[] p);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_pairing_aggregate_pk_in_g2([In, Out] long[] fp12,
[In] long[] pk, [In] long[] sig,
[In] byte[] msg, size_t msg_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_pairing_mul_n_aggregate_pk_in_g2([In, Out] long[] fp12,
[In] long[] pk, [In] long[] sig,
[In] byte[] scalar, size_t nbits,
[In] byte[] msg, size_t msg_len,
[In] byte[] aug, size_t aug_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_fp12_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_miller_loop([Out] long[] fp12, [In] long[] q,
[In] long[] p);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_fp12_is_one([In] long[] fp12);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_fp12_is_equal([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_fp12_sqr([Out] long[] ret, [In] long[] a);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_fp12_mul([Out] long[] ret, [In] long[] a,
[In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_final_exp([Out] long[] ret, [In] long[] a);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_fp12_finalverify([In] long[] a, [In] long[] b);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern IntPtr blst_fp12_one();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_fp12_in_group([In] long[] a);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_bendian_from_fp12([Out] byte[] ret, [In] long[] a);
public struct PT {
internal readonly long[] fp12;
private static readonly int sz = (int)blst_fp12_sizeof()/sizeof(long);
internal PT(bool _) { fp12 = new long[sz]; }
private PT(PT orig) { fp12 = (long[])orig.fp12.Clone(); }
public PT(P1_Affine p) : this(true)
{ blst_aggregated_in_g1(fp12, p.point); }
public PT(P1 p) : this(true)
{ blst_aggregated_in_g1(fp12, (new P1_Affine(p)).point); }
public PT(P2_Affine q) : this(true)
{ blst_aggregated_in_g2(fp12, q.point); }
public PT(P2 q) : this(true)
{ blst_aggregated_in_g2(fp12, (new P2_Affine(q)).point); }
public PT(P2_Affine q, P1_Affine p) : this(true)
{ blst_miller_loop(fp12, q.point, p.point); }
public PT(P1_Affine p, P2_Affine q) : this(q, p) {}
public PT(P2 q, P1 p) : this(true)
{ blst_miller_loop(fp12, (new P2_Affine(q)).point,
(new P1_Affine(p)).point);
}
public PT(P1 p, P2 q) : this(q, p) {}
public PT dup() { return new PT(this); }
public bool is_one() { return blst_fp12_is_one(fp12); }
public bool is_equal(PT p)
{ return blst_fp12_is_equal(fp12, p.fp12); }
public PT sqr() { blst_fp12_sqr(fp12, fp12); return this; }
public PT mul(PT p) { blst_fp12_mul(fp12, fp12, p.fp12); return this; }
public PT final_exp() { blst_final_exp(fp12, fp12); return this; }
public bool in_group() { return blst_fp12_in_group(fp12); }
public byte[] to_bendian()
{ byte[] ret = new byte[12*P1_COMPRESSED_SZ];
blst_bendian_from_fp12(ret, fp12);
return ret;
}
public static bool finalverify(PT gt1, PT gt2)
{ return blst_fp12_finalverify(gt1.fp12, gt2.fp12); }
public static PT one()
{ var ret = new PT(true);
Marshal.Copy(blst_fp12_one(), ret.fp12, 0, ret.fp12.Length);
return ret;
}
}
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern size_t blst_pairing_sizeof();
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_pairing_init([In, Out] long[] ctx, bool hash_or_encode,
[In] ref long dst, size_t dst_len);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern void blst_pairing_commit([In, Out] long[] ctx);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern ERROR blst_pairing_merge([In, Out] long[] ctx, [In] long[] ctx1);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern bool blst_pairing_finalverify([In] long[] ctx, [In] long[] sig);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern
void blst_pairing_raw_aggregate([In, Out] long[] ctx, [In] long[] q,
[In] long[] p);
[DllImport("blst.dll", CallingConvention = CallingConvention.Cdecl)]
static extern IntPtr blst_pairing_as_fp12([In] long[] ctx);
public struct Pairing {
private readonly long[] ctx;
private static readonly int sz = (int)blst_pairing_sizeof()/sizeof(long);
public Pairing(bool hash_or_encode=false, string DST="")
{
byte[] dst = Encoding.UTF8.GetBytes(DST);
int dst_len = dst.Length;
int add_len = dst_len!=0 ? (dst_len+sizeof(long)-1)/sizeof(long) : 1;
Array.Resize(ref dst, add_len*sizeof(long));
ctx = new long[sz+add_len];
for (int i=0; i sig,
byte[] msg, byte[] aug=null)
{ return blst_pairing_aggregate_pk_in_g1(ctx, pk.point,
sig.HasValue ? sig.Value.point : null,
msg, (size_t)msg.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public ERROR aggregate(P2_Affine pk, Nullable sig,
byte[] msg, byte[] aug=null)
{ return blst_pairing_aggregate_pk_in_g2(ctx, pk.point,
sig.HasValue ? sig.Value.point : null,
msg, (size_t)msg.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public ERROR mul_n_aggregate(P2_Affine pk, P1_Affine sig,
byte[] scalar, int nbits,
byte[] msg, byte[] aug=null)
{ return blst_pairing_mul_n_aggregate_pk_in_g2(ctx, pk.point, sig.point,
scalar, (size_t)nbits,
msg, (size_t)msg.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public ERROR mul_n_aggregate(P1_Affine pk, P2_Affine sig,
byte[] scalar, int nbits,
byte[] msg, byte[] aug=null)
{ return blst_pairing_mul_n_aggregate_pk_in_g1(ctx, pk.point, sig.point,
scalar, (size_t)nbits,
msg, (size_t)msg.Length,
aug, (size_t)(aug!=null ? aug.Length : 0));
}
public void commit() { blst_pairing_commit(ctx); }
public void merge(Pairing a)
{ var err = blst_pairing_merge(ctx, a.ctx);
if (err != ERROR.SUCCESS)
throw new Exception(err);
}
public bool finalverify(PT sig=new PT())
{ return blst_pairing_finalverify(ctx, sig.fp12); }
public void raw_aggregate(P2_Affine q, P1_Affine p)
{ blst_pairing_raw_aggregate(ctx, q.point, p.point); }
public void raw_aggregate(P1_Affine p, P2_Affine q)
{ raw_aggregate(q, p); }
public void raw_aggregate(P2 q, P1 p)
{ blst_pairing_raw_aggregate(ctx, (new P2_Affine(q)).point,
(new P1_Affine(p)).point);
}
public void raw_aggregate(P1 p, P2 q)
{ raw_aggregate(q, p); }
public PT as_fp12()
{ var ret = new PT(true);
GCHandle h = GCHandle.Alloc(ctx, GCHandleType.Pinned);
Marshal.Copy(blst_pairing_as_fp12(ctx), ret.fp12, 0, ret.fp12.Length);
h.Free();
return ret;
}
}
}}
================================================
FILE: bindings/go/README.md
================================================
# blst [](https://github.com/supranational/blst/actions/workflows/golang-lint.yml)
The `blst` package provides a Go interface to the blst BLS12-381 signature library.
## Build
The build process consists of two steps, code generation followed by compilation.
```
./generate.py # Optional - only required if making code changes
go build
go test
```
The generate.py script is used to generate both min-pk and min-sig variants of the binding from a common code base. It consumes the `*.tgo` files along with `blst_minpk_test.go` and produces `blst.go` and `blst_minsig_test.go`. The .tgo files can treated as if they were .go files, including the use of gofmt and goimports. The generate script will filter out extra imports while processing and automatically run goimports on the final blst.go file.
After running generate.py, `go build` and `go test` can be run as usual. Cgo will compile `cgo_server.c`, which includes the required C implementation files, and `cgo_assembly.S`, which includes appropriate pre-generated assembly code for the platform.
#### Caveats
If the test or target application crashes with an "illegal instruction" exception [after copying to an older system], rebuild with `CGO_CFLAGS` environment variable set to `-O2 -D__BLST_PORTABLE__`. Don't forget `-O2`!
On Windows the C compiler invoked by cgo, one denoted in `go env CC` output, has to target [MinGW](https://www.mingw-w64.org/). Verify with ` -dM -E -x c nul: | findstr "MINGW64"`.
If you're cross-compiling, you have to set `CC` environment variable to the target C cross-compiler and `CGO_ENABLED` to 1. For example, to compile the test program for ARM:
```
env GOARCH=arm CC=arm-linux-gnueabi-gcc CGO_ENABLED=1 go test -c
```
## Usage
There are two primary modes of operation that can be chosen based on type definitions in the application.
For minimal-pubkey-size operations the application would define core types as:
```
type PublicKey = blst.P1Affine
type Signature = blst.P2Affine
type AggregateSignature = blst.P2Aggregate
type AggregatePublicKey = blst.P1Aggregate
```
For minimal-signature-size operations:
```
type PublicKey = blst.P2Affine
type Signature = blst.P1Affine
type AggregateSignature = blst.P1Aggregate
type AggregatePublicKey = blst.P2Aggregate
```
A complete example for generating a key, signing a message, and verifying the message:
```
package main
import (
"crypto/rand"
"fmt"
blst "github.com/supranational/blst/bindings/go"
)
type PublicKey = blst.P1Affine
type Signature = blst.P2Affine
type AggregateSignature = blst.P2Aggregate
type AggregatePublicKey = blst.P1Aggregate
func main() {
var ikm [32]byte
_, _ = rand.Read(ikm[:])
sk := blst.KeyGen(ikm[:])
pk := new(PublicKey).From(sk)
var dst = []byte("BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_")
msg := []byte("hello foo")
sig := new(Signature).Sign(sk, msg, dst)
if !sig.Verify(true, pk, true, msg, dst) {
fmt.Println("ERROR: Invalid!")
} else {
fmt.Println("Valid!")
}
}
```
See the tests for further examples of usage.
## Core Methods
### SecretKey Methods
- `KeyGen(ikm []byte, optional ...[]byte) *SecretKey` - Derive the secret key scalar from secret input key material, optionally application-specific
- `Serialize() []byte` - Serialize the secret key to bytes
- `Deserialize(data []byte) *SecretKey` - Deserialize secret key from bytes
- `Zeroize()` - Securely zero out the secret key
### PublicKey (P1Affine in minimal-pubkey-size) Methods
- `From(sk *SecretKey) *PublicKey` - Derive public key from secret key
- `Compress() []byte` - Serialize public key to compressed format
- `Uncompress(data []byte) *PublicKey` - Decompress public key from bytes
- `Serialize() []byte` - Serialize public key to uncompressed format
- `Deserialize(data []byte) *PublicKey` - Deserialize public key from bytes
### Signature (P2Affine in minimal-pubkey-size) Methods
- `Sign(sk *SecretKey, msg []byte, dst []byte, ...interface{}) *Signature` - Sign a message
- `Compress() []byte` - Serialize signature to compressed format
- `Uncompress(data []byte) *Signature` - Decompress signature from bytes
- `BatchUncompress(compressedSigs [][]byte) []*Signature` - Efficiently uncompress multiple signatures
- `Serialize() []byte` - Serialize public key to uncompressed format
- `Deserialize(data []byte) *Signature` - Deserialize public key from bytes
- `Verify(sigCheck bool, pk *PublicKey, pkCheck bool, msg []byte, dst []byte, ...interface{}) bool` - Verify a signature
- `VerifyCompressed(sig []byte, sigCheck bool, pk []byte, msgCheck bool, msg []byte, dst []byte, ...interface{}) bool` - Verify a serialized signature in compressed format
- `AggregateVerify(sigCheck bool, pks []*PublicKey, msgCheck bool, msgs [][]byte, dst []byte) bool` - Verify an aggregated signature for multiple messages
- `AggregateVerifyCompressed(sig []byte, sigCheck bool, pks [][]byte, msgCheck bool, msgs [][]byte, dst []byte) bool` - Verify an aggregated serialized signature in compressed format
- `FastAggregateVerify(sigCheck bool, pks []*PublicKey, msg []byte, dst []byte) bool` - Fast verify for same message
- `MultipleAggregateVerify(sigs []*Signature, sigCheck bool, pks []*PublicKey, msgCheck bool, msgs [][]byte, dst []byte, randFn func(*Scalar), randBits int) bool` - Verify multiple signatures
### Aggregate Methods
- `AggregatePublicKey.Aggregate(pks []*PublicKey, check bool)` - Aggregate multiple public keys
- `AggregateSignature.Aggregate(sigs []*Signature, check bool)` - Aggregate multiple signatures
- `AggregateSignature.AggregateCompressed(compressedSigs [][]byte, check bool)` - Aggregate muliple serialized signatures in compressed format
- `AggregatePublicKey.ToAffine() *PublicKey` - Convert aggregate to affine form
- `AggrefateSignature.ToAffine() *Signature` - Convert aggregate to affine form
## Utility Functions
- `HashToG1(msg []byte, dst []byte, optional... []byte) *P1` - Hash message [with optional augmentation] to G1 point
- `HashToG2(msg []byte, dst []byte, optional... []byte) *P2` - Hash message [with optional augmentation] to G2 point
- `P1Generator() *P1` - Get G1 generator point
- `P2Generator() *P2` - Get G2 generator point
- `Uniq(msgs [][]byte)` - Check messages for uniqueness
- `SetMaxProcs(procs int)` - Set maximum number of threads for parallel operations
================================================
FILE: bindings/go/blst.go
================================================
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// DO NOT MODIFY THIS FILE!!
// The file is generated from *.tgo by generate.py
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
/*
* Copyright Supranational LLC
* Licensed under the Apache License, Version 2.0, see LICENSE for details.
* SPDX-License-Identifier: Apache-2.0
*/
package blst
// #cgo CFLAGS: -I${SRCDIR}/.. -I${SRCDIR}/../../build -I${SRCDIR}/../../src -D__BLST_CGO__ -fno-builtin-memcpy -fno-builtin-memset
// #cgo amd64 CFLAGS: -D__ADX__ -mno-avx
// // no-asm 64-bit platforms from https://go.dev/doc/install/source
// #cgo loong64 mips64 mips64le ppc64 ppc64le riscv64 s390x CFLAGS: -D__BLST_NO_ASM__
//
// #include "blst.h"
//
// #if defined(__x86_64__) && (defined(__unix__) || defined(__APPLE__))
// # include
// # include
// static void handler(int signum)
// { ssize_t n = write(2, "Caught SIGILL in blst_cgo_init, "
// "consult /bindings/go/README.md.\n", 70);
// _exit(128+SIGILL);
// (void)n;
// }
// __attribute__((constructor)) static void blst_cgo_init()
// { blst_fp temp = { 0 };
// struct sigaction act = { handler }, oact;
// sigaction(SIGILL, &act, &oact);
// blst_fp_sqr(&temp, &temp);
// sigaction(SIGILL, &oact, NULL);
// }
// #endif
//
// static void go_pairing_init(blst_pairing *new_ctx, bool hash_or_encode,
// const byte *DST, size_t DST_len)
// { if (DST != NULL) {
// byte *dst = (byte*)new_ctx + blst_pairing_sizeof();
// for(size_t i = 0; i < DST_len; i++) dst[i] = DST[i];
// DST = dst;
// }
// blst_pairing_init(new_ctx, hash_or_encode, DST, DST_len);
// }
// static void go_pairing_as_fp12(blst_fp12 *pt, blst_pairing *ctx)
// { *pt = *blst_pairing_as_fp12(ctx); }
//
// static void go_p1slice_to_affine(blst_p1_affine dst[],
// const blst_p1 points[], size_t npoints)
// { const blst_p1 *ppoints[2] = { points, NULL };
// blst_p1s_to_affine(dst, ppoints, npoints);
// }
// static void go_p1slice_add(blst_p1 *dst, const blst_p1_affine points[],
// size_t npoints)
// { const blst_p1_affine *ppoints[2] = { points, NULL };
// blst_p1s_add(dst, ppoints, npoints);
// }
// static void go_p2slice_to_affine(blst_p2_affine dst[],
// const blst_p2 points[], size_t npoints)
// { const blst_p2 *ppoints[2] = { points, NULL };
// blst_p2s_to_affine(dst, ppoints, npoints);
// }
// static void go_p2slice_add(blst_p2 *dst, const blst_p2_affine points[],
// size_t npoints)
// { const blst_p2_affine *ppoints[2] = { points, NULL };
// blst_p2s_add(dst, ppoints, npoints);
// }
//
// static void go_p1_mult_n_acc(blst_p1 *acc, const blst_fp *x, bool affine,
// const byte *scalar, size_t nbits)
// { blst_p1 m[1];
// const void *p = x;
// if (p == NULL)
// p = blst_p1_generator();
// else if (affine)
// blst_p1_from_affine(m, p), p = m;
// blst_p1_mult(m, p, scalar, nbits);
// blst_p1_add_or_double(acc, acc, m);
// }
// static void go_p2_mult_n_acc(blst_p2 *acc, const blst_fp2 *x, bool affine,
// const byte *scalar, size_t nbits)
// { blst_p2 m[1];
// const void *p = x;
// if (p == NULL)
// p = blst_p2_generator();
// else if (affine)
// blst_p2_from_affine(m, p), p = m;
// blst_p2_mult(m, p, scalar, nbits);
// blst_p2_add_or_double(acc, acc, m);
// }
//
// static void go_p1_sub_assign(blst_p1 *a, const blst_fp *x, bool affine)
// { blst_p1 minus_b;
// if (affine)
// blst_p1_from_affine(&minus_b, (const blst_p1_affine*)x);
// else
// minus_b = *(const blst_p1*)x;
// blst_p1_cneg(&minus_b, 1);
// blst_p1_add_or_double(a, a, &minus_b);
// }
//
// static void go_p2_sub_assign(blst_p2 *a, const blst_fp2 *x, bool affine)
// { blst_p2 minus_b;
// if (affine)
// blst_p2_from_affine(&minus_b, (const blst_p2_affine*)x);
// else
// minus_b = *(const blst_p2*)x;
// blst_p2_cneg(&minus_b, 1);
// blst_p2_add_or_double(a, a, &minus_b);
// }
//
// static bool go_scalar_from_bendian(blst_scalar *ret, const byte *in)
// { blst_scalar_from_bendian(ret, in);
// return blst_sk_check(ret);
// }
// static bool go_hash_to_scalar(blst_scalar *ret,
// const byte *msg, size_t msg_len,
// const byte *DST, size_t DST_len)
// { byte elem[48];
// blst_expand_message_xmd(elem, sizeof(elem), msg, msg_len, DST, DST_len);
// return blst_scalar_from_be_bytes(ret, elem, sizeof(elem));
// }
// static void go_miller_loop_n(blst_fp12 *dst, const blst_p2_affine Q[],
// const blst_p1_affine P[],
// size_t npoints, bool acc)
// { const blst_p2_affine *Qs[2] = { Q, NULL };
// const blst_p1_affine *Ps[2] = { P, NULL };
// if (acc) {
// blst_fp12 tmp;
// blst_miller_loop_n(&tmp, Qs, Ps, npoints);
// blst_fp12_mul(dst, dst, &tmp);
// } else {
// blst_miller_loop_n(dst, Qs, Ps, npoints);
// }
// }
// static void go_fp12slice_mul(blst_fp12 *dst, const blst_fp12 in[], size_t n)
// { size_t i;
// blst_fp12_mul(dst, &in[0], &in[1]);
// for (i = 2; i < n; i++)
// blst_fp12_mul(dst, dst, &in[i]);
// }
// static bool go_p1_affine_validate(const blst_p1_affine *p, bool infcheck)
// { if (infcheck && blst_p1_affine_is_inf(p))
// return 0;
// return blst_p1_affine_in_g1(p);
// }
// static bool go_p2_affine_validate(const blst_p2_affine *p, bool infcheck)
// { if (infcheck && blst_p2_affine_is_inf(p))
// return 0;
// return blst_p2_affine_in_g2(p);
// }
import "C"
import (
"fmt"
"math/bits"
"runtime"
"sync"
"sync/atomic"
"unsafe"
)
const BLST_SCALAR_BYTES = 256 / 8
const BLST_FP_BYTES = 384 / 8
const BLST_P1_COMPRESS_BYTES = BLST_FP_BYTES
const BLST_P1_SERIALIZE_BYTES = BLST_FP_BYTES * 2
const BLST_P2_COMPRESS_BYTES = BLST_FP_BYTES * 2
const BLST_P2_SERIALIZE_BYTES = BLST_FP_BYTES * 4
type Scalar struct{ cgo C.blst_scalar }
type Fp struct{ cgo C.blst_fp }
type Fp2 struct{ cgo C.blst_fp2 }
type Fp6 = C.blst_fp6
type Fp12 struct{ cgo C.blst_fp12 }
type P1 struct{ cgo C.blst_p1 }
type P2 struct{ cgo C.blst_p2 }
type P1Affine struct{ cgo C.blst_p1_affine }
type P2Affine struct{ cgo C.blst_p2_affine }
type Message = []byte
type Pairing = []C.blst_pairing
type SecretKey = Scalar
type P1s []P1
type P2s []P2
type P1Affines []P1Affine
type P2Affines []P2Affine
//
// Configuration
//
var maxProcs = initMaxProcs()
func initMaxProcs() int {
maxProcs := runtime.GOMAXPROCS(0)
var version float32
_, err := fmt.Sscanf(runtime.Version(), "go%f", &version)
if err != nil || version < 1.14 {
// be cooperative and leave one processor for the application
maxProcs -= 1
}
if maxProcs <= 0 {
maxProcs = 1
}
return maxProcs
}
func SetMaxProcs(procs int) {
if procs <= 0 {
procs = 1
}
maxProcs = procs
}
func numThreads(maxThreads int) int {
numThreads := maxProcs
// take into consideration the possility that application reduced
// GOMAXPROCS after |maxProcs| was initialized
numProcs := runtime.GOMAXPROCS(0)
if maxProcs > numProcs {
numThreads = numProcs
}
if maxThreads > 0 && numThreads > maxThreads {
return maxThreads
}
return numThreads
}
var cgo_pairingSizeOf = C.blst_pairing_sizeof()
var cgo_p1Generator = P1{*C.blst_p1_generator()}
var cgo_p2Generator = P2{*C.blst_p2_generator()}
var cgo_fp12One = Fp12{*C.blst_fp12_one()}
// Secret key
func (sk *SecretKey) Zeroize() {
var zero SecretKey
*sk = zero
}
func KeyGen(ikm []byte, optional ...[]byte) *SecretKey {
var sk SecretKey
var info []byte
if len(optional) > 0 {
info = optional[0]
}
if len(ikm) < 32 {
return nil
}
C.blst_keygen(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)),
ptrOrNil(info), C.size_t(len(info)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func KeyGenV3(ikm []byte, optional ...[]byte) *SecretKey {
if len(ikm) < 32 {
return nil
}
var sk SecretKey
var info []byte
if len(optional) > 0 {
info = optional[0]
}
C.blst_keygen_v3(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)),
ptrOrNil(info), C.size_t(len(info)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func KeyGenV45(ikm []byte, salt []byte, optional ...[]byte) *SecretKey {
if len(ikm) < 32 {
return nil
}
var sk SecretKey
var info []byte
if len(optional) > 0 {
info = optional[0]
}
C.blst_keygen_v4_5(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)),
(*C.byte)(&salt[0]), C.size_t(len(salt)),
ptrOrNil(info), C.size_t(len(info)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func KeyGenV5(ikm []byte, salt []byte, optional ...[]byte) *SecretKey {
if len(ikm) < 32 {
return nil
}
var sk SecretKey
var info []byte
if len(optional) > 0 {
info = optional[0]
}
saltLen := len(salt)
if saltLen == 0 {
salt = []byte{0}
}
C.blst_keygen_v5(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)),
(*C.byte)(&salt[0]), C.size_t(saltLen),
ptrOrNil(info), C.size_t(len(info)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func DeriveMasterEip2333(ikm []byte) *SecretKey {
if len(ikm) < 32 {
return nil
}
var sk SecretKey
C.blst_derive_master_eip2333(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func (master *SecretKey) DeriveChildEip2333(child_index uint32) *SecretKey {
var sk SecretKey
C.blst_derive_child_eip2333(&sk.cgo, &master.cgo, C.uint(child_index))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
// Pairing
func pairingSizeOf(DST_len C.size_t) int {
return int((cgo_pairingSizeOf + DST_len + 7) / 8)
}
func PairingCtx(hash_or_encode bool, DST []byte) Pairing {
DST_len := C.size_t(len(DST))
ctx := make([]C.blst_pairing, pairingSizeOf(DST_len))
C.go_pairing_init(&ctx[0], C.bool(hash_or_encode), ptrOrNil(DST), DST_len)
return ctx
}
func PairingCommit(ctx Pairing) {
C.blst_pairing_commit(&ctx[0])
}
func PairingMerge(ctx Pairing, ctx1 Pairing) int {
r := C.blst_pairing_merge(&ctx[0], &ctx1[0])
return int(r)
}
func PairingFinalVerify(ctx Pairing, optional ...*Fp12) bool {
var gtsig *Fp12
if len(optional) > 0 {
gtsig = optional[0]
}
return bool(C.blst_pairing_finalverify(&ctx[0], gtsig.asPtr()))
}
func PairingRawAggregate(ctx Pairing, q *P2Affine, p *P1Affine) {
C.blst_pairing_raw_aggregate(&ctx[0], &q.cgo, &p.cgo)
}
func PairingAsFp12(ctx Pairing) *Fp12 {
var pt Fp12
C.go_pairing_as_fp12(&pt.cgo, &ctx[0])
return &pt
}
func Fp12One() Fp12 {
return cgo_fp12One
}
func Fp12FinalVerify(pt1 *Fp12, pt2 *Fp12) bool {
return bool(C.blst_fp12_finalverify(&pt1.cgo, &pt2.cgo))
}
func Fp12MillerLoop(q *P2Affine, p *P1Affine) *Fp12 {
var pt Fp12
C.blst_miller_loop(&pt.cgo, &q.cgo, &p.cgo)
return &pt
}
func Fp12MillerLoopN(qs []P2Affine, ps []P1Affine) *Fp12 {
if len(qs) != len(ps) || len(qs) == 0 {
panic("inputs' lengths mismatch")
}
nElems := uint32(len(qs))
nThreads := uint32(maxProcs)
if nThreads == 1 || nElems == 1 {
var pt Fp12
C.go_miller_loop_n(&pt.cgo, &qs[0].cgo, &ps[0].cgo, C.size_t(nElems), false)
return &pt
}
stride := (nElems + nThreads - 1) / nThreads
if stride > 16 {
stride = 16
}
strides := (nElems + stride - 1) / stride
if nThreads > strides {
nThreads = strides
}
msgsCh := make(chan Fp12, nThreads)
curElem := uint32(0)
for tid := uint32(0); tid < nThreads; tid++ {
go func() {
acc := Fp12One()
first := true
for {
work := atomic.AddUint32(&curElem, stride) - stride
if work >= nElems {
break
}
n := nElems - work
if n > stride {
n = stride
}
C.go_miller_loop_n(&acc.cgo, &qs[work].cgo, &ps[work].cgo, C.size_t(n),
C.bool(!first))
first = false
}
msgsCh <- acc
}()
}
var ret = make([]Fp12, nThreads)
for i := range ret {
ret[i] = <-msgsCh
}
var pt Fp12
C.go_fp12slice_mul(&pt.cgo, &ret[0].cgo, C.size_t(nThreads))
return &pt
}
func (pt *Fp12) MulAssign(p *Fp12) {
C.blst_fp12_mul(&pt.cgo, &pt.cgo, &p.cgo)
}
func (pt *Fp12) FinalExp() {
C.blst_final_exp(&pt.cgo, &pt.cgo)
}
func (pt *Fp12) InGroup() bool {
return bool(C.blst_fp12_in_group(&pt.cgo))
}
func (pt *Fp12) ToBendian() []byte {
var out [BLST_FP_BYTES * 12]byte
C.blst_bendian_from_fp12((*C.byte)(&out[0]), &pt.cgo)
return out[:]
}
func (pt1 *Fp12) Equals(pt2 *Fp12) bool {
return *pt1 == *pt2
}
func (pt *Fp12) asPtr() *C.blst_fp12 {
if pt != nil {
return &pt.cgo
}
return nil
}
func ptrOrNil(bytes []byte) *C.byte {
var ptr *C.byte
if len(bytes) > 0 {
ptr = (*C.byte)(&bytes[0])
}
return ptr
}
//
// MIN-PK
//
//
// PublicKey
//
func (pk *P1Affine) From(s *Scalar) *P1Affine {
C.blst_sk_to_pk2_in_g1(nil, &pk.cgo, &s.cgo)
return pk
}
func (pk *P1Affine) KeyValidate() bool {
return bool(C.go_p1_affine_validate(&pk.cgo, true))
}
// sigInfcheck, check for infinity, is a way to avoid going
// into resource-consuming verification. Passing 'false' is
// always cryptographically safe, but application might want
// to guard against obviously bogus individual[!] signatures.
func (sig *P2Affine) SigValidate(sigInfcheck bool) bool {
return bool(C.go_p2_affine_validate(&sig.cgo, C.bool(sigInfcheck)))
}
//
// Sign
//
func (sig *P2Affine) Sign(sk *SecretKey, msg []byte, dst []byte,
optional ...interface{}) *P2Affine {
augSingle, aug, useHash, ok := parseOpts(optional...)
if !ok || len(aug) != 0 {
return nil
}
var q *P2
if useHash {
q = HashToG2(msg, dst, augSingle)
} else {
q = EncodeToG2(msg, dst, augSingle)
}
C.blst_sign_pk2_in_g1(nil, &sig.cgo, &q.cgo, &sk.cgo)
return sig
}
//
// Signature
//
// Functions to return a signature and public key+augmentation tuple.
// This enables point decompression (if needed) to happen in parallel.
type sigGetterP2 func() *P2Affine
type pkGetterP1 func(i uint32, temp *P1Affine) (*P1Affine, []byte)
// Single verify with decompressed pk
func (sig *P2Affine) Verify(sigGroupcheck bool, pk *P1Affine, pkValidate bool,
msg Message, dst []byte,
optional ...interface{}) bool { // useHash bool, aug []byte
aug, _, useHash, ok := parseOpts(optional...)
if !ok {
return false
}
return sig.AggregateVerify(sigGroupcheck, []*P1Affine{pk}, pkValidate,
[]Message{msg}, dst, useHash, [][]byte{aug})
}
// Single verify with compressed pk
// Uses a dummy signature to get the correct type
func (dummy *P2Affine) VerifyCompressed(sig []byte, sigGroupcheck bool,
pk []byte, pkValidate bool, msg Message, dst []byte,
optional ...bool) bool { // useHash bool, usePksAsAugs bool
return dummy.AggregateVerifyCompressed(sig, sigGroupcheck,
[][]byte{pk}, pkValidate,
[]Message{msg}, dst, optional...)
}
// Aggregate verify with uncompressed signature and public keys
// Note that checking message uniqueness, if required, is left to the user.
// Not all signature schemes require it and this keeps the binding minimal
// and fast. Refer to the Uniq function for one method method of performing
// this check.
func (sig *P2Affine) AggregateVerify(sigGroupcheck bool,
pks []*P1Affine, pksVerify bool, msgs []Message, dst []byte,
optional ...interface{}) bool { // useHash bool, augs [][]byte
// sanity checks and argument parsing
n := len(pks)
if n == 0 || len(msgs) != n {
return false
}
_, augs, useHash, ok := parseOpts(optional...)
useAugs := len(augs) != 0
if !ok || (useAugs && len(augs) != n) {
return false
}
sigFn := func() *P2Affine {
return sig
}
pkFn := func(i uint32, _ *P1Affine) (*P1Affine, []byte) {
if useAugs {
return pks[i], augs[i]
}
return pks[i], nil
}
return coreAggregateVerifyPkInG1(sigFn, sigGroupcheck, pkFn, pksVerify,
msgs, dst, useHash)
}
// Aggregate verify with compressed signature and public keys
// Uses a dummy signature to get the correct type
func (*P2Affine) AggregateVerifyCompressed(sig []byte, sigGroupcheck bool,
pks [][]byte, pksVerify bool, msgs []Message, dst []byte,
optional ...bool) bool { // useHash bool, usePksAsAugs bool
// sanity checks and argument parsing
if len(pks) != len(msgs) {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
usePksAsAugs := false
if len(optional) > 1 {
usePksAsAugs = optional[1]
}
sigFn := func() *P2Affine {
sigP := new(P2Affine)
if sigP.Uncompress(sig) == nil {
return nil
}
return sigP
}
pkFn := func(i uint32, pk *P1Affine) (*P1Affine, []byte) {
bytes := pks[i]
if len(bytes) == BLST_P1_SERIALIZE_BYTES && (bytes[0]&0x80) == 0 {
// Not compressed
if pk.Deserialize(bytes) == nil {
return nil, nil
}
} else if len(bytes) == BLST_P1_COMPRESS_BYTES && (bytes[0]&0x80) != 0 {
if pk.Uncompress(bytes) == nil {
return nil, nil
}
} else {
return nil, nil
}
if usePksAsAugs {
return pk, bytes
}
return pk, nil
}
return coreAggregateVerifyPkInG1(sigFn, sigGroupcheck, pkFn, pksVerify,
msgs, dst, useHash)
}
func coreAggregateVerifyPkInG1(sigFn sigGetterP2, sigGroupcheck bool,
pkFn pkGetterP1, pkValidate bool, msgs []Message, dst []byte,
optional ...bool) bool { // useHash
n := len(msgs)
if n == 0 {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
numCores := runtime.GOMAXPROCS(0)
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding pk,msg[,aug] tuple and
// repeat until n is exceeded. The resulting accumulations will be
// fed into the msgsCh channel.
msgsCh := make(chan Pairing, numThreads)
valid := int32(1)
curItem := uint32(0)
mutex := sync.Mutex{}
mutex.Lock()
for tid := 0; tid < numThreads; tid++ {
go func() {
pairing := PairingCtx(useHash, dst)
var temp P1Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
} else if work == 0 && maxProcs == numCores-1 &&
numThreads == maxProcs {
// Avoid consuming all cores by waiting until the
// main thread has completed its miller loop before
// proceeding.
mutex.Lock()
mutex.Unlock() //nolint:staticcheck
}
// Pull Public Key and augmentation blob
curPk, aug := pkFn(work, &temp)
if curPk == nil {
atomic.StoreInt32(&valid, 0)
break
}
// Pairing and accumulate
ret := PairingAggregatePkInG1(pairing, curPk, pkValidate,
nil, false, msgs[work], aug)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
break
}
// application might have some async work to do
runtime.Gosched()
}
if atomic.LoadInt32(&valid) > 0 {
PairingCommit(pairing)
msgsCh <- pairing
} else {
msgsCh <- nil
}
}()
}
// Uncompress and check signature
var gtsig Fp12
sig := sigFn()
if sig == nil {
atomic.StoreInt32(&valid, 0)
}
if atomic.LoadInt32(&valid) > 0 && sigGroupcheck &&
!sig.SigValidate(false) {
atomic.StoreInt32(&valid, 0)
}
if atomic.LoadInt32(&valid) > 0 {
C.blst_aggregated_in_g2(>sig.cgo, &sig.cgo)
}
mutex.Unlock()
// Accumulate the thread results
var pairings Pairing
for i := 0; i < numThreads; i++ {
msg := <-msgsCh
if msg != nil {
if pairings == nil {
pairings = msg
} else {
ret := PairingMerge(pairings, msg)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
}
}
}
}
if atomic.LoadInt32(&valid) == 0 || pairings == nil {
return false
}
return PairingFinalVerify(pairings, >sig)
}
func CoreVerifyPkInG1(pk *P1Affine, sig *P2Affine, hash_or_encode bool,
msg Message, dst []byte, optional ...[]byte) int {
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
if runtime.NumGoroutine() < maxProcs {
sigFn := func() *P2Affine {
return sig
}
pkFn := func(_ uint32, _ *P1Affine) (*P1Affine, []byte) {
return pk, aug
}
if !coreAggregateVerifyPkInG1(sigFn, true, pkFn, true, []Message{msg},
dst, hash_or_encode) {
return C.BLST_VERIFY_FAIL
}
return C.BLST_SUCCESS
}
return int(C.blst_core_verify_pk_in_g1(&pk.cgo, &sig.cgo, C.bool(hash_or_encode),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug))))
}
// pks are assumed to be verified for proof of possession,
// which implies that they are already group-checked
func (sig *P2Affine) FastAggregateVerify(sigGroupcheck bool,
pks []*P1Affine, msg Message, dst []byte,
optional ...interface{}) bool { // pass-through to Verify
n := len(pks)
// TODO: return value for length zero?
if n == 0 {
return false
}
aggregator := new(P1Aggregate)
if !aggregator.Aggregate(pks, false) {
return false
}
pkAff := aggregator.ToAffine()
// Verify
return sig.Verify(sigGroupcheck, pkAff, false, msg, dst, optional...)
}
func (*P2Affine) MultipleAggregateVerify(sigs []*P2Affine,
sigsGroupcheck bool, pks []*P1Affine, pksVerify bool,
msgs []Message, dst []byte, randFn func(*Scalar), randBits int,
optional ...interface{}) bool { // useHash
// Sanity checks and argument parsing
n := len(pks)
if n == 0 || len(msgs) != n || len(sigs) != n {
return false
}
_, augs, useHash, ok := parseOpts(optional...)
useAugs := len(augs) != 0
if !ok || (useAugs && len(augs) != n) {
return false
}
paramsFn :=
func(work uint32, _ *P2Affine, _ *P1Affine, rand *Scalar) (
*P2Affine, *P1Affine, *Scalar, []byte) {
randFn(rand)
var aug []byte
if useAugs {
aug = augs[work]
}
return sigs[work], pks[work], rand, aug
}
return multipleAggregateVerifyPkInG1(paramsFn, sigsGroupcheck, pksVerify,
msgs, dst, randBits, useHash)
}
type mulAggGetterPkInG1 func(work uint32, sig *P2Affine, pk *P1Affine,
rand *Scalar) (*P2Affine, *P1Affine, *Scalar, []byte)
func multipleAggregateVerifyPkInG1(paramsFn mulAggGetterPkInG1,
sigsGroupcheck bool, pksVerify bool, msgs []Message,
dst []byte, randBits int,
optional ...bool) bool { // useHash
n := len(msgs)
if n == 0 {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding pk,msg[,aug] tuple and
// repeat until n is exceeded. The resulting accumulations will be
// fed into the msgsCh channel.
msgsCh := make(chan Pairing, numThreads)
valid := int32(1)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
pairing := PairingCtx(useHash, dst)
var tempRand Scalar
var tempPk P1Affine
var tempSig P2Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
curSig, curPk, curRand, aug := paramsFn(work, &tempSig,
&tempPk, &tempRand)
if PairingMulNAggregatePkInG1(pairing, curPk, pksVerify,
curSig, sigsGroupcheck, curRand,
randBits, msgs[work], aug) !=
C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
break
}
// application might have some async work to do
runtime.Gosched()
}
if atomic.LoadInt32(&valid) > 0 {
PairingCommit(pairing)
msgsCh <- pairing
} else {
msgsCh <- nil
}
}()
}
// Accumulate the thread results
var pairings Pairing
for i := 0; i < numThreads; i++ {
msg := <-msgsCh
if msg != nil {
if pairings == nil {
pairings = msg
} else {
ret := PairingMerge(pairings, msg)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
}
}
}
}
if atomic.LoadInt32(&valid) == 0 || pairings == nil {
return false
}
return PairingFinalVerify(pairings, nil)
}
//
// Aggregate P2
//
type aggGetterP2 func(i uint32, temp *P2Affine) *P2Affine
type P2Aggregate struct {
v *P2
}
// Aggregate uncompressed elements
func (agg *P2Aggregate) Aggregate(elmts []*P2Affine,
groupcheck bool) bool {
if len(elmts) == 0 {
return true
}
getter := func(i uint32, _ *P2Affine) *P2Affine { return elmts[i] }
return agg.coreAggregate(getter, groupcheck, len(elmts))
}
func (agg *P2Aggregate) AggregateWithRandomness(pointsIf interface{},
scalarsIf interface{}, nbits int, groupcheck bool) bool {
if groupcheck && !P2AffinesValidate(pointsIf) {
return false
}
agg.v = P2AffinesMult(pointsIf, scalarsIf, nbits)
return true
}
// Aggregate compressed elements
func (agg *P2Aggregate) AggregateCompressed(elmts [][]byte,
groupcheck bool) bool {
if len(elmts) == 0 {
return true
}
getter := func(i uint32, p *P2Affine) *P2Affine {
bytes := elmts[i]
if p.Uncompress(bytes) == nil {
return nil
}
return p
}
return agg.coreAggregate(getter, groupcheck, len(elmts))
}
func (agg *P2Aggregate) AddAggregate(other *P2Aggregate) {
if other.v == nil {
// do nothing
} else if agg.v == nil {
agg.v = other.v
} else {
C.blst_p2_add_or_double(&agg.v.cgo, &agg.v.cgo, &other.v.cgo)
}
}
func (agg *P2Aggregate) Add(elmt *P2Affine, groupcheck bool) bool {
if groupcheck && !bool(C.blst_p2_affine_in_g2(&elmt.cgo)) {
return false
}
if agg.v == nil {
agg.v = new(P2)
C.blst_p2_from_affine(&agg.v.cgo, &elmt.cgo)
} else {
C.blst_p2_add_or_double_affine(&agg.v.cgo, &agg.v.cgo, &elmt.cgo)
}
return true
}
func (agg *P2Aggregate) ToAffine() *P2Affine {
if agg.v == nil {
return new(P2Affine)
}
return agg.v.ToAffine()
}
func (agg *P2Aggregate) coreAggregate(getter aggGetterP2, groupcheck bool,
n int) bool {
if n == 0 {
return true
}
// operations are considered short enough for not to care about
// keeping one core free...
numThreads := runtime.GOMAXPROCS(0)
if numThreads > n {
numThreads = n
}
valid := int32(1)
type result struct {
agg *P2
empty bool
}
msgs := make(chan result, numThreads)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
first := true
var agg P2
var temp P2Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
// Signature validate
curElmt := getter(work, &temp)
if curElmt == nil {
atomic.StoreInt32(&valid, 0)
break
}
if groupcheck && !bool(C.blst_p2_affine_in_g2(&curElmt.cgo)) {
atomic.StoreInt32(&valid, 0)
break
}
if first {
C.blst_p2_from_affine(&agg.cgo, &curElmt.cgo)
first = false
} else {
C.blst_p2_add_or_double_affine(&agg.cgo, &agg.cgo, &curElmt.cgo)
}
// application might have some async work to do
runtime.Gosched()
}
if first {
msgs <- result{nil, true}
} else if atomic.LoadInt32(&valid) > 0 {
msgs <- result{&agg, false}
} else {
msgs <- result{nil, false}
}
}()
}
// Accumulate the thread results
first := agg.v == nil
validLocal := true
for i := 0; i < numThreads; i++ {
msg := <-msgs
if !validLocal || msg.empty {
// do nothing
} else if msg.agg == nil {
validLocal = false
// This should be unnecessary but seems safer
atomic.StoreInt32(&valid, 0)
} else {
if first {
agg.v = msg.agg
first = false
} else {
C.blst_p2_add_or_double(&agg.v.cgo, &agg.v.cgo, &msg.agg.cgo)
}
}
}
if atomic.LoadInt32(&valid) == 0 {
agg.v = nil
return false
}
return true
}
//
// MIN-SIG
//
//
// PublicKey
//
func (pk *P2Affine) From(s *Scalar) *P2Affine {
C.blst_sk_to_pk2_in_g2(nil, &pk.cgo, &s.cgo)
return pk
}
func (pk *P2Affine) KeyValidate() bool {
return bool(C.go_p2_affine_validate(&pk.cgo, true))
}
// sigInfcheck, check for infinity, is a way to avoid going
// into resource-consuming verification. Passing 'false' is
// always cryptographically safe, but application might want
// to guard against obviously bogus individual[!] signatures.
func (sig *P1Affine) SigValidate(sigInfcheck bool) bool {
return bool(C.go_p1_affine_validate(&sig.cgo, C.bool(sigInfcheck)))
}
//
// Sign
//
func (sig *P1Affine) Sign(sk *SecretKey, msg []byte, dst []byte,
optional ...interface{}) *P1Affine {
augSingle, aug, useHash, ok := parseOpts(optional...)
if !ok || len(aug) != 0 {
return nil
}
var q *P1
if useHash {
q = HashToG1(msg, dst, augSingle)
} else {
q = EncodeToG1(msg, dst, augSingle)
}
C.blst_sign_pk2_in_g2(nil, &sig.cgo, &q.cgo, &sk.cgo)
return sig
}
//
// Signature
//
// Functions to return a signature and public key+augmentation tuple.
// This enables point decompression (if needed) to happen in parallel.
type sigGetterP1 func() *P1Affine
type pkGetterP2 func(i uint32, temp *P2Affine) (*P2Affine, []byte)
// Single verify with decompressed pk
func (sig *P1Affine) Verify(sigGroupcheck bool, pk *P2Affine, pkValidate bool,
msg Message, dst []byte,
optional ...interface{}) bool { // useHash bool, aug []byte
aug, _, useHash, ok := parseOpts(optional...)
if !ok {
return false
}
return sig.AggregateVerify(sigGroupcheck, []*P2Affine{pk}, pkValidate,
[]Message{msg}, dst, useHash, [][]byte{aug})
}
// Single verify with compressed pk
// Uses a dummy signature to get the correct type
func (dummy *P1Affine) VerifyCompressed(sig []byte, sigGroupcheck bool,
pk []byte, pkValidate bool, msg Message, dst []byte,
optional ...bool) bool { // useHash bool, usePksAsAugs bool
return dummy.AggregateVerifyCompressed(sig, sigGroupcheck,
[][]byte{pk}, pkValidate,
[]Message{msg}, dst, optional...)
}
// Aggregate verify with uncompressed signature and public keys
// Note that checking message uniqueness, if required, is left to the user.
// Not all signature schemes require it and this keeps the binding minimal
// and fast. Refer to the Uniq function for one method method of performing
// this check.
func (sig *P1Affine) AggregateVerify(sigGroupcheck bool,
pks []*P2Affine, pksVerify bool, msgs []Message, dst []byte,
optional ...interface{}) bool { // useHash bool, augs [][]byte
// sanity checks and argument parsing
n := len(pks)
if n == 0 || len(msgs) != n {
return false
}
_, augs, useHash, ok := parseOpts(optional...)
useAugs := len(augs) != 0
if !ok || (useAugs && len(augs) != n) {
return false
}
sigFn := func() *P1Affine {
return sig
}
pkFn := func(i uint32, _ *P2Affine) (*P2Affine, []byte) {
if useAugs {
return pks[i], augs[i]
}
return pks[i], nil
}
return coreAggregateVerifyPkInG2(sigFn, sigGroupcheck, pkFn, pksVerify,
msgs, dst, useHash)
}
// Aggregate verify with compressed signature and public keys
// Uses a dummy signature to get the correct type
func (*P1Affine) AggregateVerifyCompressed(sig []byte, sigGroupcheck bool,
pks [][]byte, pksVerify bool, msgs []Message, dst []byte,
optional ...bool) bool { // useHash bool, usePksAsAugs bool
// sanity checks and argument parsing
if len(pks) != len(msgs) {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
usePksAsAugs := false
if len(optional) > 1 {
usePksAsAugs = optional[1]
}
sigFn := func() *P1Affine {
sigP := new(P1Affine)
if sigP.Uncompress(sig) == nil {
return nil
}
return sigP
}
pkFn := func(i uint32, pk *P2Affine) (*P2Affine, []byte) {
bytes := pks[i]
if len(bytes) == BLST_P2_SERIALIZE_BYTES && (bytes[0]&0x80) == 0 {
// Not compressed
if pk.Deserialize(bytes) == nil {
return nil, nil
}
} else if len(bytes) == BLST_P2_COMPRESS_BYTES && (bytes[0]&0x80) != 0 {
if pk.Uncompress(bytes) == nil {
return nil, nil
}
} else {
return nil, nil
}
if usePksAsAugs {
return pk, bytes
}
return pk, nil
}
return coreAggregateVerifyPkInG2(sigFn, sigGroupcheck, pkFn, pksVerify,
msgs, dst, useHash)
}
func coreAggregateVerifyPkInG2(sigFn sigGetterP1, sigGroupcheck bool,
pkFn pkGetterP2, pkValidate bool, msgs []Message, dst []byte,
optional ...bool) bool { // useHash
n := len(msgs)
if n == 0 {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
numCores := runtime.GOMAXPROCS(0)
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding pk,msg[,aug] tuple and
// repeat until n is exceeded. The resulting accumulations will be
// fed into the msgsCh channel.
msgsCh := make(chan Pairing, numThreads)
valid := int32(1)
curItem := uint32(0)
mutex := sync.Mutex{}
mutex.Lock()
for tid := 0; tid < numThreads; tid++ {
go func() {
pairing := PairingCtx(useHash, dst)
var temp P2Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
} else if work == 0 && maxProcs == numCores-1 &&
numThreads == maxProcs {
// Avoid consuming all cores by waiting until the
// main thread has completed its miller loop before
// proceeding.
mutex.Lock()
mutex.Unlock() //nolint:staticcheck
}
// Pull Public Key and augmentation blob
curPk, aug := pkFn(work, &temp)
if curPk == nil {
atomic.StoreInt32(&valid, 0)
break
}
// Pairing and accumulate
ret := PairingAggregatePkInG2(pairing, curPk, pkValidate,
nil, false, msgs[work], aug)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
break
}
// application might have some async work to do
runtime.Gosched()
}
if atomic.LoadInt32(&valid) > 0 {
PairingCommit(pairing)
msgsCh <- pairing
} else {
msgsCh <- nil
}
}()
}
// Uncompress and check signature
var gtsig Fp12
sig := sigFn()
if sig == nil {
atomic.StoreInt32(&valid, 0)
}
if atomic.LoadInt32(&valid) > 0 && sigGroupcheck &&
!sig.SigValidate(false) {
atomic.StoreInt32(&valid, 0)
}
if atomic.LoadInt32(&valid) > 0 {
C.blst_aggregated_in_g1(>sig.cgo, &sig.cgo)
}
mutex.Unlock()
// Accumulate the thread results
var pairings Pairing
for i := 0; i < numThreads; i++ {
msg := <-msgsCh
if msg != nil {
if pairings == nil {
pairings = msg
} else {
ret := PairingMerge(pairings, msg)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
}
}
}
}
if atomic.LoadInt32(&valid) == 0 || pairings == nil {
return false
}
return PairingFinalVerify(pairings, >sig)
}
func CoreVerifyPkInG2(pk *P2Affine, sig *P1Affine, hash_or_encode bool,
msg Message, dst []byte, optional ...[]byte) int {
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
if runtime.NumGoroutine() < maxProcs {
sigFn := func() *P1Affine {
return sig
}
pkFn := func(_ uint32, _ *P2Affine) (*P2Affine, []byte) {
return pk, aug
}
if !coreAggregateVerifyPkInG2(sigFn, true, pkFn, true, []Message{msg},
dst, hash_or_encode) {
return C.BLST_VERIFY_FAIL
}
return C.BLST_SUCCESS
}
return int(C.blst_core_verify_pk_in_g2(&pk.cgo, &sig.cgo, C.bool(hash_or_encode),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug))))
}
// pks are assumed to be verified for proof of possession,
// which implies that they are already group-checked
func (sig *P1Affine) FastAggregateVerify(sigGroupcheck bool,
pks []*P2Affine, msg Message, dst []byte,
optional ...interface{}) bool { // pass-through to Verify
n := len(pks)
// TODO: return value for length zero?
if n == 0 {
return false
}
aggregator := new(P2Aggregate)
if !aggregator.Aggregate(pks, false) {
return false
}
pkAff := aggregator.ToAffine()
// Verify
return sig.Verify(sigGroupcheck, pkAff, false, msg, dst, optional...)
}
func (*P1Affine) MultipleAggregateVerify(sigs []*P1Affine,
sigsGroupcheck bool, pks []*P2Affine, pksVerify bool,
msgs []Message, dst []byte, randFn func(*Scalar), randBits int,
optional ...interface{}) bool { // useHash
// Sanity checks and argument parsing
n := len(pks)
if n == 0 || len(msgs) != n || len(sigs) != n {
return false
}
_, augs, useHash, ok := parseOpts(optional...)
useAugs := len(augs) != 0
if !ok || (useAugs && len(augs) != n) {
return false
}
paramsFn :=
func(work uint32, _ *P1Affine, _ *P2Affine, rand *Scalar) (
*P1Affine, *P2Affine, *Scalar, []byte) {
randFn(rand)
var aug []byte
if useAugs {
aug = augs[work]
}
return sigs[work], pks[work], rand, aug
}
return multipleAggregateVerifyPkInG2(paramsFn, sigsGroupcheck, pksVerify,
msgs, dst, randBits, useHash)
}
type mulAggGetterPkInG2 func(work uint32, sig *P1Affine, pk *P2Affine,
rand *Scalar) (*P1Affine, *P2Affine, *Scalar, []byte)
func multipleAggregateVerifyPkInG2(paramsFn mulAggGetterPkInG2,
sigsGroupcheck bool, pksVerify bool, msgs []Message,
dst []byte, randBits int,
optional ...bool) bool { // useHash
n := len(msgs)
if n == 0 {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding pk,msg[,aug] tuple and
// repeat until n is exceeded. The resulting accumulations will be
// fed into the msgsCh channel.
msgsCh := make(chan Pairing, numThreads)
valid := int32(1)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
pairing := PairingCtx(useHash, dst)
var tempRand Scalar
var tempPk P2Affine
var tempSig P1Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
curSig, curPk, curRand, aug := paramsFn(work, &tempSig,
&tempPk, &tempRand)
if PairingMulNAggregatePkInG2(pairing, curPk, pksVerify,
curSig, sigsGroupcheck, curRand,
randBits, msgs[work], aug) !=
C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
break
}
// application might have some async work to do
runtime.Gosched()
}
if atomic.LoadInt32(&valid) > 0 {
PairingCommit(pairing)
msgsCh <- pairing
} else {
msgsCh <- nil
}
}()
}
// Accumulate the thread results
var pairings Pairing
for i := 0; i < numThreads; i++ {
msg := <-msgsCh
if msg != nil {
if pairings == nil {
pairings = msg
} else {
ret := PairingMerge(pairings, msg)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
}
}
}
}
if atomic.LoadInt32(&valid) == 0 || pairings == nil {
return false
}
return PairingFinalVerify(pairings, nil)
}
//
// Aggregate P1
//
type aggGetterP1 func(i uint32, temp *P1Affine) *P1Affine
type P1Aggregate struct {
v *P1
}
// Aggregate uncompressed elements
func (agg *P1Aggregate) Aggregate(elmts []*P1Affine,
groupcheck bool) bool {
if len(elmts) == 0 {
return true
}
getter := func(i uint32, _ *P1Affine) *P1Affine { return elmts[i] }
return agg.coreAggregate(getter, groupcheck, len(elmts))
}
func (agg *P1Aggregate) AggregateWithRandomness(pointsIf interface{},
scalarsIf interface{}, nbits int, groupcheck bool) bool {
if groupcheck && !P1AffinesValidate(pointsIf) {
return false
}
agg.v = P1AffinesMult(pointsIf, scalarsIf, nbits)
return true
}
// Aggregate compressed elements
func (agg *P1Aggregate) AggregateCompressed(elmts [][]byte,
groupcheck bool) bool {
if len(elmts) == 0 {
return true
}
getter := func(i uint32, p *P1Affine) *P1Affine {
bytes := elmts[i]
if p.Uncompress(bytes) == nil {
return nil
}
return p
}
return agg.coreAggregate(getter, groupcheck, len(elmts))
}
func (agg *P1Aggregate) AddAggregate(other *P1Aggregate) {
if other.v == nil {
// do nothing
} else if agg.v == nil {
agg.v = other.v
} else {
C.blst_p1_add_or_double(&agg.v.cgo, &agg.v.cgo, &other.v.cgo)
}
}
func (agg *P1Aggregate) Add(elmt *P1Affine, groupcheck bool) bool {
if groupcheck && !bool(C.blst_p1_affine_in_g1(&elmt.cgo)) {
return false
}
if agg.v == nil {
agg.v = new(P1)
C.blst_p1_from_affine(&agg.v.cgo, &elmt.cgo)
} else {
C.blst_p1_add_or_double_affine(&agg.v.cgo, &agg.v.cgo, &elmt.cgo)
}
return true
}
func (agg *P1Aggregate) ToAffine() *P1Affine {
if agg.v == nil {
return new(P1Affine)
}
return agg.v.ToAffine()
}
func (agg *P1Aggregate) coreAggregate(getter aggGetterP1, groupcheck bool,
n int) bool {
if n == 0 {
return true
}
// operations are considered short enough for not to care about
// keeping one core free...
numThreads := runtime.GOMAXPROCS(0)
if numThreads > n {
numThreads = n
}
valid := int32(1)
type result struct {
agg *P1
empty bool
}
msgs := make(chan result, numThreads)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
first := true
var agg P1
var temp P1Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
// Signature validate
curElmt := getter(work, &temp)
if curElmt == nil {
atomic.StoreInt32(&valid, 0)
break
}
if groupcheck && !bool(C.blst_p1_affine_in_g1(&curElmt.cgo)) {
atomic.StoreInt32(&valid, 0)
break
}
if first {
C.blst_p1_from_affine(&agg.cgo, &curElmt.cgo)
first = false
} else {
C.blst_p1_add_or_double_affine(&agg.cgo, &agg.cgo, &curElmt.cgo)
}
// application might have some async work to do
runtime.Gosched()
}
if first {
msgs <- result{nil, true}
} else if atomic.LoadInt32(&valid) > 0 {
msgs <- result{&agg, false}
} else {
msgs <- result{nil, false}
}
}()
}
// Accumulate the thread results
first := agg.v == nil
validLocal := true
for i := 0; i < numThreads; i++ {
msg := <-msgs
if !validLocal || msg.empty {
// do nothing
} else if msg.agg == nil {
validLocal = false
// This should be unnecessary but seems safer
atomic.StoreInt32(&valid, 0)
} else {
if first {
agg.v = msg.agg
first = false
} else {
C.blst_p1_add_or_double(&agg.v.cgo, &agg.v.cgo, &msg.agg.cgo)
}
}
}
if atomic.LoadInt32(&valid) == 0 {
agg.v = nil
return false
}
return true
}
func PairingAggregatePkInG1(ctx Pairing, PK *P1Affine, pkValidate bool,
sig *P2Affine, sigGroupcheck bool, msg []byte,
optional ...[]byte) int { // aug
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
r := C.blst_pairing_chk_n_aggr_pk_in_g1(&ctx[0],
PK.asPtr(), C.bool(pkValidate),
sig.asPtr(), C.bool(sigGroupcheck),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(aug), C.size_t(len(aug)))
return int(r)
}
func PairingMulNAggregatePkInG1(ctx Pairing, PK *P1Affine, pkValidate bool,
sig *P2Affine, sigGroupcheck bool,
rand *Scalar, randBits int, msg []byte,
optional ...[]byte) int { // aug
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
r := C.blst_pairing_chk_n_mul_n_aggr_pk_in_g1(&ctx[0],
PK.asPtr(), C.bool(pkValidate),
sig.asPtr(), C.bool(sigGroupcheck),
&rand.cgo.b[0], C.size_t(randBits),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(aug), C.size_t(len(aug)))
return int(r)
}
//
// Serialization/Deserialization.
//
// P1 Serdes
func (p1 *P1Affine) Serialize() []byte {
var out [BLST_P1_SERIALIZE_BYTES]byte
C.blst_p1_affine_serialize((*C.byte)(&out[0]), &p1.cgo)
return out[:]
}
func (p1 *P1Affine) Deserialize(in []byte) *P1Affine {
if len(in) != BLST_P1_SERIALIZE_BYTES {
return nil
}
if C.blst_p1_deserialize(&p1.cgo, (*C.byte)(&in[0])) != C.BLST_SUCCESS {
return nil
}
return p1
}
func (p1 *P1Affine) Compress() []byte {
var out [BLST_P1_COMPRESS_BYTES]byte
C.blst_p1_affine_compress((*C.byte)(&out[0]), &p1.cgo)
return out[:]
}
func (p1 *P1Affine) Uncompress(in []byte) *P1Affine {
if len(in) != BLST_P1_COMPRESS_BYTES {
return nil
}
if C.blst_p1_uncompress(&p1.cgo, (*C.byte)(&in[0])) != C.BLST_SUCCESS {
return nil
}
return p1
}
func (p1 *P1Affine) InG1() bool {
return bool(C.blst_p1_affine_in_g1(&p1.cgo))
}
func (*P1Affine) BatchUncompress(in [][]byte) []*P1Affine {
// Allocate space for all of the resulting points. Later we'll save pointers
// and return those so that the result could be used in other functions,
// such as MultipleAggregateVerify.
n := len(in)
points := make([]P1Affine, n)
pointsPtrs := make([]*P1Affine, n)
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding point, and
// repeat until n is exceeded. Each thread will send a result (true for
// success, false for failure) into the channel when complete.
resCh := make(chan bool, numThreads)
valid := int32(1)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
if points[work].Uncompress(in[work]) == nil {
atomic.StoreInt32(&valid, 0)
break
}
pointsPtrs[work] = &points[work]
}
if atomic.LoadInt32(&valid) > 0 {
resCh <- true
} else {
resCh <- false
}
}()
}
// Collect the threads
result := true
for i := 0; i < numThreads; i++ {
if !<-resCh {
result = false
}
}
if atomic.LoadInt32(&valid) == 0 || !result {
return nil
}
return pointsPtrs
}
func (p1 *P1) Serialize() []byte {
var out [BLST_P1_SERIALIZE_BYTES]byte
C.blst_p1_serialize((*C.byte)(&out[0]), &p1.cgo)
return out[:]
}
func (p1 *P1) Compress() []byte {
var out [BLST_P1_COMPRESS_BYTES]byte
C.blst_p1_compress((*C.byte)(&out[0]), &p1.cgo)
return out[:]
}
func (p1 *P1) MultAssign(scalarIf interface{}, optional ...int) *P1 {
var nbits int
var scalar *C.byte
switch val := scalarIf.(type) {
case []byte:
scalar = (*C.byte)(&val[0])
nbits = len(val) * 8
case *Scalar:
scalar = &val.cgo.b[0]
nbits = 255
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if len(optional) > 0 {
nbits = optional[0]
}
C.blst_p1_mult(&p1.cgo, &p1.cgo, scalar, C.size_t(nbits))
return p1
}
func (p1 *P1) Mult(scalarIf interface{}, optional ...int) *P1 {
ret := *p1
return ret.MultAssign(scalarIf, optional...)
}
func (p1 *P1) AddAssign(pointIf interface{}) *P1 {
switch val := pointIf.(type) {
case *P1:
C.blst_p1_add_or_double(&p1.cgo, &p1.cgo, &val.cgo)
case *P1Affine:
C.blst_p1_add_or_double_affine(&p1.cgo, &p1.cgo, &val.cgo)
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
return p1
}
func (p1 *P1) Add(pointIf interface{}) *P1 {
ret := *p1
return ret.AddAssign(pointIf)
}
func (p1 *P1) SubAssign(pointIf interface{}) *P1 {
var x *C.blst_fp
var affine C.bool
switch val := pointIf.(type) {
case *P1:
x = &val.cgo.x
affine = false
case *P1Affine:
x = &val.cgo.x
affine = true
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
C.go_p1_sub_assign(&p1.cgo, x, affine)
return p1
}
func (p1 *P1) Sub(pointIf interface{}) *P1 {
ret := *p1
return ret.SubAssign(pointIf)
}
func P1Generator() *P1 {
return &cgo_p1Generator
}
// 'acc += point * scalar', passing 'nil' for 'point' means "use the
//
// group generator point"
func (acc *P1) MultNAccumulate(pointIf interface{}, scalarIf interface{},
optional ...int) *P1 {
var x *C.blst_fp
var affine C.bool
if pointIf != nil {
switch val := pointIf.(type) {
case *P1:
x = &val.cgo.x
affine = false
case *P1Affine:
x = &val.cgo.x
affine = true
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
}
var nbits int
var scalar *C.byte
switch val := scalarIf.(type) {
case []byte:
scalar = (*C.byte)(&val[0])
nbits = len(val) * 8
case *Scalar:
scalar = &val.cgo.b[0]
nbits = 255
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if len(optional) > 0 {
nbits = optional[0]
}
C.go_p1_mult_n_acc(&acc.cgo, x, affine, scalar, C.size_t(nbits))
return acc
}
//
// Affine
//
func (p *P1) ToAffine() *P1Affine {
var pa P1Affine
C.blst_p1_to_affine(&pa.cgo, &p.cgo)
return &pa
}
func (p *P1) FromAffine(pa *P1Affine) {
C.blst_p1_from_affine(&p.cgo, &pa.cgo)
}
// Hash
func HashToG1(msg []byte, dst []byte,
optional ...[]byte) *P1 { // aug
var q P1
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
C.blst_hash_to_g1(&q.cgo, ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug)))
return &q
}
func EncodeToG1(msg []byte, dst []byte,
optional ...[]byte) *P1 { // aug
var q P1
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
C.blst_encode_to_g1(&q.cgo, ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug)))
return &q
}
//
// Multi-point/scalar operations
//
func P1sToAffine(points []*P1, optional ...int) P1Affines {
var npoints int
if len(optional) > 0 {
npoints = optional[0]
} else {
npoints = len(points)
}
ret := make([]P1Affine, npoints)
_cgoCheckPointer := func(...interface{}) {}
C.blst_p1s_to_affine(&ret[0].cgo, (**C.blst_p1)(unsafe.Pointer(&points[0])),
C.size_t(npoints))
return ret
}
func (points P1s) ToAffine(optional ...P1Affines) P1Affines {
npoints := len(points)
var ret P1Affines
if len(optional) > 0 { // used in benchmark
ret = optional[0]
if len(ret) < npoints {
panic("npoints mismatch")
}
} else {
ret = make([]P1Affine, npoints)
}
if maxProcs < 2 || npoints < 768 {
C.go_p1slice_to_affine(&ret[0].cgo, &points[0].cgo, C.size_t(npoints))
return ret
}
nslices := (npoints + 511) / 512
if nslices > maxProcs {
nslices = maxProcs
}
delta, rem := npoints/nslices+1, npoints%nslices
var wg sync.WaitGroup
wg.Add(nslices)
for x := 0; x < npoints; x += delta {
if rem == 0 {
delta -= 1
}
rem -= 1
go func(out *P1Affine, inp *P1, delta int) {
C.go_p1slice_to_affine(&out.cgo, &inp.cgo, C.size_t(delta))
wg.Done()
}(&ret[x], &points[x], delta)
}
wg.Wait()
return ret
}
//
// Batch addition
//
func P1AffinesAdd(points []*P1Affine, optional ...int) *P1 {
var npoints int
if len(optional) > 0 {
npoints = optional[0]
} else {
npoints = len(points)
}
var ret P1
_cgoCheckPointer := func(...interface{}) {}
C.blst_p1s_add(&ret.cgo, (**C.blst_p1_affine)(unsafe.Pointer(&points[0])),
C.size_t(npoints))
return &ret
}
func (points P1Affines) Add() *P1 {
npoints := len(points)
if maxProcs < 2 || npoints < 768 {
var ret P1
C.go_p1slice_add(&ret.cgo, &points[0].cgo, C.size_t(npoints))
return &ret
}
nslices := (npoints + 511) / 512
if nslices > maxProcs {
nslices = maxProcs
}
delta, rem := npoints/nslices+1, npoints%nslices
msgs := make(chan P1, nslices)
for x := 0; x < npoints; x += delta {
if rem == 0 {
delta -= 1
}
rem -= 1
go func(points *P1Affine, delta int) {
var ret P1
C.go_p1slice_add(&ret.cgo, &points.cgo, C.size_t(delta))
msgs <- ret
}(&points[x], delta)
}
ret := <-msgs
for i := 1; i < nslices; i++ {
msg := <-msgs
C.blst_p1_add_or_double(&ret.cgo, &ret.cgo, &msg.cgo)
}
return &ret
}
func (points P1s) Add() *P1 {
return points.ToAffine().Add()
}
//
// Multi-scalar multiplication
//
func P1AffinesMult(pointsIf interface{}, scalarsIf interface{}, nbits int) *P1 {
var npoints int
switch val := pointsIf.(type) {
case []*P1Affine:
npoints = len(val)
case []P1Affine:
npoints = len(val)
case P1Affines:
npoints = len(val)
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
nbytes := (nbits + 7) / 8
var scalars []*C.byte
switch val := scalarsIf.(type) {
case []byte:
if len(val) < npoints*nbytes {
return nil
}
case [][]byte:
if len(val) < npoints {
return nil
}
scalars = make([]*C.byte, npoints)
for i := range scalars {
scalars[i] = (*C.byte)(&val[i][0])
}
case []Scalar:
if len(val) < npoints {
return nil
}
if nbits <= 248 {
scalars = make([]*C.byte, npoints)
for i := range scalars {
scalars[i] = &val[i].cgo.b[0]
}
}
case []*Scalar:
if len(val) < npoints {
return nil
}
scalars = make([]*C.byte, npoints)
for i := range scalars {
scalars[i] = &val[i].cgo.b[0]
}
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
numThreads := numThreads(0)
if numThreads < 2 {
sz := int(C.blst_p1s_mult_pippenger_scratch_sizeof(C.size_t(npoints))) / 8
scratch := make([]uint64, sz)
pointsBySlice := [2]*C.blst_p1_affine{nil, nil}
var p_points **C.blst_p1_affine
switch val := pointsIf.(type) {
case []*P1Affine:
p_points = (**C.blst_p1_affine)(unsafe.Pointer(&val[0]))
case []P1Affine:
pointsBySlice[0] = &val[0].cgo
p_points = &pointsBySlice[0]
case P1Affines:
pointsBySlice[0] = &val[0].cgo
p_points = &pointsBySlice[0]
default: // type is already vetted
}
scalarsBySlice := [2]*C.byte{nil, nil}
var p_scalars **C.byte
switch val := scalarsIf.(type) {
case []byte:
scalarsBySlice[0] = (*C.byte)(&val[0])
p_scalars = &scalarsBySlice[0]
case [][]byte:
p_scalars = &scalars[0]
case []Scalar:
if nbits > 248 {
scalarsBySlice[0] = &val[0].cgo.b[0]
p_scalars = &scalarsBySlice[0]
} else {
p_scalars = &scalars[0]
}
case []*Scalar:
p_scalars = &scalars[0]
default: // type is already vetted
}
var ret P1
_cgoCheckPointer := func(...interface{}) {}
C.blst_p1s_mult_pippenger(&ret.cgo, p_points, C.size_t(npoints),
p_scalars, C.size_t(nbits),
(*C.limb_t)(&scratch[0]))
for i := range scalars {
scalars[i] = nil
}
return &ret
}
if npoints < 32 {
if numThreads > npoints {
numThreads = npoints
}
curItem := uint32(0)
msgs := make(chan P1, numThreads)
for tid := 0; tid < numThreads; tid++ {
go func() {
var acc P1
for {
workItem := int(atomic.AddUint32(&curItem, 1) - 1)
if workItem >= npoints {
break
}
var point *P1Affine
switch val := pointsIf.(type) {
case []*P1Affine:
point = val[workItem]
case []P1Affine:
point = &val[workItem]
case P1Affines:
point = &val[workItem]
default: // type is already vetted
}
var scalar *C.byte
switch val := scalarsIf.(type) {
case []byte:
scalar = (*C.byte)(&val[workItem*nbytes])
case [][]byte:
scalar = scalars[workItem]
case []Scalar:
if nbits > 248 {
scalar = &val[workItem].cgo.b[0]
} else {
scalar = scalars[workItem]
}
case []*Scalar:
scalar = scalars[workItem]
default: // type is already vetted
}
C.go_p1_mult_n_acc(&acc.cgo, &point.cgo.x, true,
scalar, C.size_t(nbits))
}
msgs <- acc
}()
}
ret := <-msgs
for tid := 1; tid < numThreads; tid++ {
point := <-msgs
C.blst_p1_add_or_double(&ret.cgo, &ret.cgo, &point.cgo)
}
for i := range scalars {
scalars[i] = nil
}
return &ret
}
// this is sizeof(scratch[0])
sz := int(C.blst_p1s_mult_pippenger_scratch_sizeof(0)) / 8
nx, ny, window := breakdown(nbits, pippenger_window_size(npoints),
numThreads)
// |grid[]| holds "coordinates" and place for result
grid := make([]struct {
x, dx, y, dy int
point P1
}, nx*ny)
dx := npoints / nx
y := window * (ny - 1)
total := 0
for ; total < nx; total++ {
grid[total].x = total * dx
grid[total].dx = dx
grid[total].y = y
grid[total].dy = nbits - y
}
grid[total-1].dx = npoints - grid[total-1].x
for y > 0 {
y -= window
for i := 0; i < nx; i++ {
grid[total].x = grid[i].x
grid[total].dx = grid[i].dx
grid[total].y = y
grid[total].dy = window
total++
}
}
if numThreads > total {
numThreads = total
}
msgsCh := make(chan int, ny)
rowSync := make([]int32, ny) // count up to |nx|
curItem := int32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
scratch := make([]uint64, sz<= total {
break
}
x := grid[workItem].x
y := grid[workItem].y
var p_points **C.blst_p1_affine
switch val := pointsIf.(type) {
case []*P1Affine:
p_points = (**C.blst_p1_affine)(unsafe.Pointer(&val[x]))
case []P1Affine:
pointsBySlice[0] = &val[x].cgo
p_points = &pointsBySlice[0]
case P1Affines:
pointsBySlice[0] = &val[x].cgo
p_points = &pointsBySlice[0]
default: // type is already vetted
}
var p_scalars **C.byte
switch val := scalarsIf.(type) {
case []byte:
scalarsBySlice[0] = (*C.byte)(&val[x*nbytes])
p_scalars = &scalarsBySlice[0]
case [][]byte:
p_scalars = &scalars[x]
case []Scalar:
if nbits > 248 {
scalarsBySlice[0] = &val[x].cgo.b[0]
p_scalars = &scalarsBySlice[0]
} else {
p_scalars = &scalars[x]
}
case []*Scalar:
p_scalars = &scalars[x]
default: // type is already vetted
}
C.blst_p1s_tile_pippenger(&grid[workItem].point.cgo,
p_points, C.size_t(grid[workItem].dx),
p_scalars, C.size_t(nbits),
(*C.limb_t)(&scratch[0]),
C.size_t(y), C.size_t(window))
if atomic.AddInt32(&rowSync[y/window], 1) == int32(nx) {
msgsCh <- y // "row" is done
} else {
runtime.Gosched() // be nice to the application
}
}
pointsBySlice[0] = nil
scalarsBySlice[0] = nil
}()
}
var ret P1
rows := make([]bool, ny)
row := 0 // actually index in |grid[]|
for i := 0; i < ny; i++ { // we expect |ny| messages, one per "row"
y := <-msgsCh
rows[y/window] = true // mark the "row"
for grid[row].y == y { // if it's current "row", process it
for row < total && grid[row].y == y {
C.blst_p1_add_or_double(&ret.cgo, &ret.cgo, &grid[row].point.cgo)
row++
}
if y == 0 {
break // one can as well 'return &ret' here
}
for j := 0; j < window; j++ {
C.blst_p1_double(&ret.cgo, &ret.cgo)
}
y -= window
if !rows[y/window] { // see if next "row" was marked already
break
}
}
}
for i := range scalars {
scalars[i] = nil
}
return &ret
}
func (points P1Affines) Mult(scalarsIf interface{}, nbits int) *P1 {
return P1AffinesMult(points, scalarsIf, nbits)
}
func (points P1s) Mult(scalarsIf interface{}, nbits int) *P1 {
return points.ToAffine().Mult(scalarsIf, nbits)
}
//
// Group-check
//
func P1AffinesValidate(pointsIf interface{}) bool {
var npoints int
switch val := pointsIf.(type) {
case []*P1Affine:
npoints = len(val)
case []P1Affine:
npoints = len(val)
case P1Affines:
npoints = len(val)
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
numThreads := numThreads(npoints)
if numThreads < 2 {
for i := 0; i < npoints; i++ {
var point *P1Affine
switch val := pointsIf.(type) {
case []*P1Affine:
point = val[i]
case []P1Affine:
point = &val[i]
case P1Affines:
point = &val[i]
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if !C.go_p1_affine_validate(&point.cgo, true) {
return false
}
}
return true
}
valid := int32(1)
curItem := uint32(0)
var wg sync.WaitGroup
wg.Add(numThreads)
for tid := 0; tid < numThreads; tid++ {
go func() {
for atomic.LoadInt32(&valid) != 0 {
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(npoints) {
break
}
var point *P1Affine
switch val := pointsIf.(type) {
case []*P1Affine:
point = val[work]
case []P1Affine:
point = &val[work]
case P1Affines:
point = &val[work]
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if !C.go_p1_affine_validate(&point.cgo, true) {
atomic.StoreInt32(&valid, 0)
break
}
}
wg.Done()
}()
}
wg.Wait()
return atomic.LoadInt32(&valid) != 0
}
func (points P1Affines) Validate() bool {
return P1AffinesValidate(points)
}
func PairingAggregatePkInG2(ctx Pairing, PK *P2Affine, pkValidate bool,
sig *P1Affine, sigGroupcheck bool, msg []byte,
optional ...[]byte) int { // aug
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
r := C.blst_pairing_chk_n_aggr_pk_in_g2(&ctx[0],
PK.asPtr(), C.bool(pkValidate),
sig.asPtr(), C.bool(sigGroupcheck),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(aug), C.size_t(len(aug)))
return int(r)
}
func PairingMulNAggregatePkInG2(ctx Pairing, PK *P2Affine, pkValidate bool,
sig *P1Affine, sigGroupcheck bool,
rand *Scalar, randBits int, msg []byte,
optional ...[]byte) int { // aug
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
r := C.blst_pairing_chk_n_mul_n_aggr_pk_in_g2(&ctx[0],
PK.asPtr(), C.bool(pkValidate),
sig.asPtr(), C.bool(sigGroupcheck),
&rand.cgo.b[0], C.size_t(randBits),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(aug), C.size_t(len(aug)))
return int(r)
}
//
// Serialization/Deserialization.
//
// P2 Serdes
func (p2 *P2Affine) Serialize() []byte {
var out [BLST_P2_SERIALIZE_BYTES]byte
C.blst_p2_affine_serialize((*C.byte)(&out[0]), &p2.cgo)
return out[:]
}
func (p2 *P2Affine) Deserialize(in []byte) *P2Affine {
if len(in) != BLST_P2_SERIALIZE_BYTES {
return nil
}
if C.blst_p2_deserialize(&p2.cgo, (*C.byte)(&in[0])) != C.BLST_SUCCESS {
return nil
}
return p2
}
func (p2 *P2Affine) Compress() []byte {
var out [BLST_P2_COMPRESS_BYTES]byte
C.blst_p2_affine_compress((*C.byte)(&out[0]), &p2.cgo)
return out[:]
}
func (p2 *P2Affine) Uncompress(in []byte) *P2Affine {
if len(in) != BLST_P2_COMPRESS_BYTES {
return nil
}
if C.blst_p2_uncompress(&p2.cgo, (*C.byte)(&in[0])) != C.BLST_SUCCESS {
return nil
}
return p2
}
func (p2 *P2Affine) InG2() bool {
return bool(C.blst_p2_affine_in_g2(&p2.cgo))
}
func (*P2Affine) BatchUncompress(in [][]byte) []*P2Affine {
// Allocate space for all of the resulting points. Later we'll save pointers
// and return those so that the result could be used in other functions,
// such as MultipleAggregateVerify.
n := len(in)
points := make([]P2Affine, n)
pointsPtrs := make([]*P2Affine, n)
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding point, and
// repeat until n is exceeded. Each thread will send a result (true for
// success, false for failure) into the channel when complete.
resCh := make(chan bool, numThreads)
valid := int32(1)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
if points[work].Uncompress(in[work]) == nil {
atomic.StoreInt32(&valid, 0)
break
}
pointsPtrs[work] = &points[work]
}
if atomic.LoadInt32(&valid) > 0 {
resCh <- true
} else {
resCh <- false
}
}()
}
// Collect the threads
result := true
for i := 0; i < numThreads; i++ {
if !<-resCh {
result = false
}
}
if atomic.LoadInt32(&valid) == 0 || !result {
return nil
}
return pointsPtrs
}
func (p2 *P2) Serialize() []byte {
var out [BLST_P2_SERIALIZE_BYTES]byte
C.blst_p2_serialize((*C.byte)(&out[0]), &p2.cgo)
return out[:]
}
func (p2 *P2) Compress() []byte {
var out [BLST_P2_COMPRESS_BYTES]byte
C.blst_p2_compress((*C.byte)(&out[0]), &p2.cgo)
return out[:]
}
func (p2 *P2) MultAssign(scalarIf interface{}, optional ...int) *P2 {
var nbits int
var scalar *C.byte
switch val := scalarIf.(type) {
case []byte:
scalar = (*C.byte)(&val[0])
nbits = len(val) * 8
case *Scalar:
scalar = &val.cgo.b[0]
nbits = 255
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if len(optional) > 0 {
nbits = optional[0]
}
C.blst_p2_mult(&p2.cgo, &p2.cgo, scalar, C.size_t(nbits))
return p2
}
func (p2 *P2) Mult(scalarIf interface{}, optional ...int) *P2 {
ret := *p2
return ret.MultAssign(scalarIf, optional...)
}
func (p2 *P2) AddAssign(pointIf interface{}) *P2 {
switch val := pointIf.(type) {
case *P2:
C.blst_p2_add_or_double(&p2.cgo, &p2.cgo, &val.cgo)
case *P2Affine:
C.blst_p2_add_or_double_affine(&p2.cgo, &p2.cgo, &val.cgo)
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
return p2
}
func (p2 *P2) Add(pointIf interface{}) *P2 {
ret := *p2
return ret.AddAssign(pointIf)
}
func (p2 *P2) SubAssign(pointIf interface{}) *P2 {
var x *C.blst_fp2
var affine C.bool
switch val := pointIf.(type) {
case *P2:
x = &val.cgo.x
affine = false
case *P2Affine:
x = &val.cgo.x
affine = true
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
C.go_p2_sub_assign(&p2.cgo, x, affine)
return p2
}
func (p2 *P2) Sub(pointIf interface{}) *P2 {
ret := *p2
return ret.SubAssign(pointIf)
}
func P2Generator() *P2 {
return &cgo_p2Generator
}
// 'acc += point * scalar', passing 'nil' for 'point' means "use the
//
// group generator point"
func (acc *P2) MultNAccumulate(pointIf interface{}, scalarIf interface{},
optional ...int) *P2 {
var x *C.blst_fp2
var affine C.bool
if pointIf != nil {
switch val := pointIf.(type) {
case *P2:
x = &val.cgo.x
affine = false
case *P2Affine:
x = &val.cgo.x
affine = true
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
}
var nbits int
var scalar *C.byte
switch val := scalarIf.(type) {
case []byte:
scalar = (*C.byte)(&val[0])
nbits = len(val) * 8
case *Scalar:
scalar = &val.cgo.b[0]
nbits = 255
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if len(optional) > 0 {
nbits = optional[0]
}
C.go_p2_mult_n_acc(&acc.cgo, x, affine, scalar, C.size_t(nbits))
return acc
}
//
// Affine
//
func (p *P2) ToAffine() *P2Affine {
var pa P2Affine
C.blst_p2_to_affine(&pa.cgo, &p.cgo)
return &pa
}
func (p *P2) FromAffine(pa *P2Affine) {
C.blst_p2_from_affine(&p.cgo, &pa.cgo)
}
// Hash
func HashToG2(msg []byte, dst []byte,
optional ...[]byte) *P2 { // aug
var q P2
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
C.blst_hash_to_g2(&q.cgo, ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug)))
return &q
}
func EncodeToG2(msg []byte, dst []byte,
optional ...[]byte) *P2 { // aug
var q P2
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
C.blst_encode_to_g2(&q.cgo, ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug)))
return &q
}
//
// Multi-point/scalar operations
//
func P2sToAffine(points []*P2, optional ...int) P2Affines {
var npoints int
if len(optional) > 0 {
npoints = optional[0]
} else {
npoints = len(points)
}
ret := make([]P2Affine, npoints)
_cgoCheckPointer := func(...interface{}) {}
C.blst_p2s_to_affine(&ret[0].cgo, (**C.blst_p2)(unsafe.Pointer(&points[0])),
C.size_t(npoints))
return ret
}
func (points P2s) ToAffine(optional ...P2Affines) P2Affines {
npoints := len(points)
var ret P2Affines
if len(optional) > 0 { // used in benchmark
ret = optional[0]
if len(ret) < npoints {
panic("npoints mismatch")
}
} else {
ret = make([]P2Affine, npoints)
}
if maxProcs < 2 || npoints < 768 {
C.go_p2slice_to_affine(&ret[0].cgo, &points[0].cgo, C.size_t(npoints))
return ret
}
nslices := (npoints + 511) / 512
if nslices > maxProcs {
nslices = maxProcs
}
delta, rem := npoints/nslices+1, npoints%nslices
var wg sync.WaitGroup
wg.Add(nslices)
for x := 0; x < npoints; x += delta {
if rem == 0 {
delta -= 1
}
rem -= 1
go func(out *P2Affine, inp *P2, delta int) {
C.go_p2slice_to_affine(&out.cgo, &inp.cgo, C.size_t(delta))
wg.Done()
}(&ret[x], &points[x], delta)
}
wg.Wait()
return ret
}
//
// Batch addition
//
func P2AffinesAdd(points []*P2Affine, optional ...int) *P2 {
var npoints int
if len(optional) > 0 {
npoints = optional[0]
} else {
npoints = len(points)
}
var ret P2
_cgoCheckPointer := func(...interface{}) {}
C.blst_p2s_add(&ret.cgo, (**C.blst_p2_affine)(unsafe.Pointer(&points[0])),
C.size_t(npoints))
return &ret
}
func (points P2Affines) Add() *P2 {
npoints := len(points)
if maxProcs < 2 || npoints < 768 {
var ret P2
C.go_p2slice_add(&ret.cgo, &points[0].cgo, C.size_t(npoints))
return &ret
}
nslices := (npoints + 511) / 512
if nslices > maxProcs {
nslices = maxProcs
}
delta, rem := npoints/nslices+1, npoints%nslices
msgs := make(chan P2, nslices)
for x := 0; x < npoints; x += delta {
if rem == 0 {
delta -= 1
}
rem -= 1
go func(points *P2Affine, delta int) {
var ret P2
C.go_p2slice_add(&ret.cgo, &points.cgo, C.size_t(delta))
msgs <- ret
}(&points[x], delta)
}
ret := <-msgs
for i := 1; i < nslices; i++ {
msg := <-msgs
C.blst_p2_add_or_double(&ret.cgo, &ret.cgo, &msg.cgo)
}
return &ret
}
func (points P2s) Add() *P2 {
return points.ToAffine().Add()
}
//
// Multi-scalar multiplication
//
func P2AffinesMult(pointsIf interface{}, scalarsIf interface{}, nbits int) *P2 {
var npoints int
switch val := pointsIf.(type) {
case []*P2Affine:
npoints = len(val)
case []P2Affine:
npoints = len(val)
case P2Affines:
npoints = len(val)
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
nbytes := (nbits + 7) / 8
var scalars []*C.byte
switch val := scalarsIf.(type) {
case []byte:
if len(val) < npoints*nbytes {
return nil
}
case [][]byte:
if len(val) < npoints {
return nil
}
scalars = make([]*C.byte, npoints)
for i := range scalars {
scalars[i] = (*C.byte)(&val[i][0])
}
case []Scalar:
if len(val) < npoints {
return nil
}
if nbits <= 248 {
scalars = make([]*C.byte, npoints)
for i := range scalars {
scalars[i] = &val[i].cgo.b[0]
}
}
case []*Scalar:
if len(val) < npoints {
return nil
}
scalars = make([]*C.byte, npoints)
for i := range scalars {
scalars[i] = &val[i].cgo.b[0]
}
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
numThreads := numThreads(0)
if numThreads < 2 {
sz := int(C.blst_p2s_mult_pippenger_scratch_sizeof(C.size_t(npoints))) / 8
scratch := make([]uint64, sz)
pointsBySlice := [2]*C.blst_p2_affine{nil, nil}
var p_points **C.blst_p2_affine
switch val := pointsIf.(type) {
case []*P2Affine:
p_points = (**C.blst_p2_affine)(unsafe.Pointer(&val[0]))
case []P2Affine:
pointsBySlice[0] = &val[0].cgo
p_points = &pointsBySlice[0]
case P2Affines:
pointsBySlice[0] = &val[0].cgo
p_points = &pointsBySlice[0]
default: // type is already vetted
}
scalarsBySlice := [2]*C.byte{nil, nil}
var p_scalars **C.byte
switch val := scalarsIf.(type) {
case []byte:
scalarsBySlice[0] = (*C.byte)(&val[0])
p_scalars = &scalarsBySlice[0]
case [][]byte:
p_scalars = &scalars[0]
case []Scalar:
if nbits > 248 {
scalarsBySlice[0] = &val[0].cgo.b[0]
p_scalars = &scalarsBySlice[0]
} else {
p_scalars = &scalars[0]
}
case []*Scalar:
p_scalars = &scalars[0]
default: // type is already vetted
}
var ret P2
_cgoCheckPointer := func(...interface{}) {}
C.blst_p2s_mult_pippenger(&ret.cgo, p_points, C.size_t(npoints),
p_scalars, C.size_t(nbits),
(*C.limb_t)(&scratch[0]))
for i := range scalars {
scalars[i] = nil
}
return &ret
}
if npoints < 32 {
if numThreads > npoints {
numThreads = npoints
}
curItem := uint32(0)
msgs := make(chan P2, numThreads)
for tid := 0; tid < numThreads; tid++ {
go func() {
var acc P2
for {
workItem := int(atomic.AddUint32(&curItem, 1) - 1)
if workItem >= npoints {
break
}
var point *P2Affine
switch val := pointsIf.(type) {
case []*P2Affine:
point = val[workItem]
case []P2Affine:
point = &val[workItem]
case P2Affines:
point = &val[workItem]
default: // type is already vetted
}
var scalar *C.byte
switch val := scalarsIf.(type) {
case []byte:
scalar = (*C.byte)(&val[workItem*nbytes])
case [][]byte:
scalar = scalars[workItem]
case []Scalar:
if nbits > 248 {
scalar = &val[workItem].cgo.b[0]
} else {
scalar = scalars[workItem]
}
case []*Scalar:
scalar = scalars[workItem]
default: // type is already vetted
}
C.go_p2_mult_n_acc(&acc.cgo, &point.cgo.x, true,
scalar, C.size_t(nbits))
}
msgs <- acc
}()
}
ret := <-msgs
for tid := 1; tid < numThreads; tid++ {
point := <-msgs
C.blst_p2_add_or_double(&ret.cgo, &ret.cgo, &point.cgo)
}
for i := range scalars {
scalars[i] = nil
}
return &ret
}
// this is sizeof(scratch[0])
sz := int(C.blst_p2s_mult_pippenger_scratch_sizeof(0)) / 8
nx, ny, window := breakdown(nbits, pippenger_window_size(npoints),
numThreads)
// |grid[]| holds "coordinates" and place for result
grid := make([]struct {
x, dx, y, dy int
point P2
}, nx*ny)
dx := npoints / nx
y := window * (ny - 1)
total := 0
for ; total < nx; total++ {
grid[total].x = total * dx
grid[total].dx = dx
grid[total].y = y
grid[total].dy = nbits - y
}
grid[total-1].dx = npoints - grid[total-1].x
for y > 0 {
y -= window
for i := 0; i < nx; i++ {
grid[total].x = grid[i].x
grid[total].dx = grid[i].dx
grid[total].y = y
grid[total].dy = window
total++
}
}
if numThreads > total {
numThreads = total
}
msgsCh := make(chan int, ny)
rowSync := make([]int32, ny) // count up to |nx|
curItem := int32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
scratch := make([]uint64, sz<= total {
break
}
x := grid[workItem].x
y := grid[workItem].y
var p_points **C.blst_p2_affine
switch val := pointsIf.(type) {
case []*P2Affine:
p_points = (**C.blst_p2_affine)(unsafe.Pointer(&val[x]))
case []P2Affine:
pointsBySlice[0] = &val[x].cgo
p_points = &pointsBySlice[0]
case P2Affines:
pointsBySlice[0] = &val[x].cgo
p_points = &pointsBySlice[0]
default: // type is already vetted
}
var p_scalars **C.byte
switch val := scalarsIf.(type) {
case []byte:
scalarsBySlice[0] = (*C.byte)(&val[x*nbytes])
p_scalars = &scalarsBySlice[0]
case [][]byte:
p_scalars = &scalars[x]
case []Scalar:
if nbits > 248 {
scalarsBySlice[0] = &val[x].cgo.b[0]
p_scalars = &scalarsBySlice[0]
} else {
p_scalars = &scalars[x]
}
case []*Scalar:
p_scalars = &scalars[x]
default: // type is already vetted
}
C.blst_p2s_tile_pippenger(&grid[workItem].point.cgo,
p_points, C.size_t(grid[workItem].dx),
p_scalars, C.size_t(nbits),
(*C.limb_t)(&scratch[0]),
C.size_t(y), C.size_t(window))
if atomic.AddInt32(&rowSync[y/window], 1) == int32(nx) {
msgsCh <- y // "row" is done
} else {
runtime.Gosched() // be nice to the application
}
}
pointsBySlice[0] = nil
scalarsBySlice[0] = nil
}()
}
var ret P2
rows := make([]bool, ny)
row := 0 // actually index in |grid[]|
for i := 0; i < ny; i++ { // we expect |ny| messages, one per "row"
y := <-msgsCh
rows[y/window] = true // mark the "row"
for grid[row].y == y { // if it's current "row", process it
for row < total && grid[row].y == y {
C.blst_p2_add_or_double(&ret.cgo, &ret.cgo, &grid[row].point.cgo)
row++
}
if y == 0 {
break // one can as well 'return &ret' here
}
for j := 0; j < window; j++ {
C.blst_p2_double(&ret.cgo, &ret.cgo)
}
y -= window
if !rows[y/window] { // see if next "row" was marked already
break
}
}
}
for i := range scalars {
scalars[i] = nil
}
return &ret
}
func (points P2Affines) Mult(scalarsIf interface{}, nbits int) *P2 {
return P2AffinesMult(points, scalarsIf, nbits)
}
func (points P2s) Mult(scalarsIf interface{}, nbits int) *P2 {
return points.ToAffine().Mult(scalarsIf, nbits)
}
//
// Group-check
//
func P2AffinesValidate(pointsIf interface{}) bool {
var npoints int
switch val := pointsIf.(type) {
case []*P2Affine:
npoints = len(val)
case []P2Affine:
npoints = len(val)
case P2Affines:
npoints = len(val)
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
numThreads := numThreads(npoints)
if numThreads < 2 {
for i := 0; i < npoints; i++ {
var point *P2Affine
switch val := pointsIf.(type) {
case []*P2Affine:
point = val[i]
case []P2Affine:
point = &val[i]
case P2Affines:
point = &val[i]
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if !C.go_p2_affine_validate(&point.cgo, true) {
return false
}
}
return true
}
valid := int32(1)
curItem := uint32(0)
var wg sync.WaitGroup
wg.Add(numThreads)
for tid := 0; tid < numThreads; tid++ {
go func() {
for atomic.LoadInt32(&valid) != 0 {
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(npoints) {
break
}
var point *P2Affine
switch val := pointsIf.(type) {
case []*P2Affine:
point = val[work]
case []P2Affine:
point = &val[work]
case P2Affines:
point = &val[work]
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if !C.go_p2_affine_validate(&point.cgo, true) {
atomic.StoreInt32(&valid, 0)
break
}
}
wg.Done()
}()
}
wg.Wait()
return atomic.LoadInt32(&valid) != 0
}
func (points P2Affines) Validate() bool {
return P2AffinesValidate(points)
}
// aug [][]byte - augmentation bytes for signing (default: nil)
func parseOpts(optional ...interface{}) (augSingle []byte, aug [][]byte,
useHash bool, ok bool) {
useHash = true // hash (true), encode (false)
for _, arg := range optional {
switch v := arg.(type) {
case []byte:
augSingle = v
case [][]byte:
aug = v
case bool:
useHash = v
default:
return nil, nil, useHash, false
}
}
return augSingle, aug, useHash, true
}
// These methods are inefficient because of cgo call overhead. For this
// reason they should be used primarily for prototyping with a goal to
// formulate interfaces that would process multiple scalars per cgo call.
func (a *Scalar) MulAssign(b *Scalar) (*Scalar, bool) {
return a, bool(C.blst_sk_mul_n_check(&a.cgo, &a.cgo, &b.cgo))
}
func (a *Scalar) Mul(b *Scalar) (*Scalar, bool) {
var ret Scalar
return &ret, bool(C.blst_sk_mul_n_check(&ret.cgo, &a.cgo, &b.cgo))
}
func (a *Scalar) AddAssign(b *Scalar) (*Scalar, bool) {
return a, bool(C.blst_sk_add_n_check(&a.cgo, &a.cgo, &b.cgo))
}
func (a *Scalar) Add(b *Scalar) (*Scalar, bool) {
var ret Scalar
return &ret, bool(C.blst_sk_add_n_check(&ret.cgo, &a.cgo, &b.cgo))
}
func (a *Scalar) SubAssign(b *Scalar) (*Scalar, bool) {
return a, bool(C.blst_sk_sub_n_check(&a.cgo, &a.cgo, &b.cgo))
}
func (a *Scalar) Sub(b *Scalar) (*Scalar, bool) {
var ret Scalar
return &ret, bool(C.blst_sk_sub_n_check(&ret.cgo, &a.cgo, &b.cgo))
}
func (a *Scalar) Inverse() *Scalar {
var ret Scalar
C.blst_sk_inverse(&ret.cgo, &a.cgo)
return &ret
}
//
// Serialization/Deserialization.
//
// Scalar serdes
func (s *Scalar) Serialize() []byte {
var out [BLST_SCALAR_BYTES]byte
C.blst_bendian_from_scalar((*C.byte)(&out[0]), &s.cgo)
return out[:]
}
func (s *Scalar) Deserialize(in []byte) *Scalar {
if len(in) != BLST_SCALAR_BYTES ||
!C.go_scalar_from_bendian(&s.cgo, (*C.byte)(&in[0])) {
return nil
}
return s
}
func (s *Scalar) Valid() bool {
return bool(C.blst_sk_check(&s.cgo))
}
func (s *Scalar) HashTo(msg []byte, dst []byte) bool {
ret := HashToScalar(msg, dst)
if ret != nil {
*s = *ret
return true
}
return false
}
func HashToScalar(msg []byte, dst []byte) *Scalar {
var ret Scalar
if C.go_hash_to_scalar(&ret.cgo, ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst))) {
return &ret
}
return nil
}
//
// LEndian
//
func (fr *Scalar) ToLEndian() []byte {
var arr [BLST_SCALAR_BYTES]byte
C.blst_lendian_from_scalar((*C.byte)(&arr[0]), &fr.cgo)
return arr[:]
}
func (fp *Fp) ToLEndian() []byte {
var arr [BLST_FP_BYTES]byte
C.blst_lendian_from_fp((*C.byte)(&arr[0]), &fp.cgo)
return arr[:]
}
func (fr *Scalar) FromLEndian(arr []byte) *Scalar {
nbytes := len(arr)
if nbytes < BLST_SCALAR_BYTES ||
!C.blst_scalar_from_le_bytes(&fr.cgo, (*C.byte)(&arr[0]), C.size_t(nbytes)) {
return nil
}
return fr
}
func (fp *Fp) FromLEndian(arr []byte) *Fp {
if len(arr) != BLST_FP_BYTES {
return nil
}
C.blst_fp_from_lendian(&fp.cgo, (*C.byte)(&arr[0]))
return fp
}
//
// BEndian
//
func (fr *Scalar) ToBEndian() []byte {
var arr [BLST_SCALAR_BYTES]byte
C.blst_bendian_from_scalar((*C.byte)(&arr[0]), &fr.cgo)
return arr[:]
}
func (fp *Fp) ToBEndian() []byte {
var arr [BLST_FP_BYTES]byte
C.blst_bendian_from_fp((*C.byte)(&arr[0]), &fp.cgo)
return arr[:]
}
func (fr *Scalar) FromBEndian(arr []byte) *Scalar {
nbytes := len(arr)
if nbytes < BLST_SCALAR_BYTES ||
!C.blst_scalar_from_be_bytes(&fr.cgo, (*C.byte)(&arr[0]), C.size_t(nbytes)) {
return nil
}
return fr
}
func (fp *Fp) FromBEndian(arr []byte) *Fp {
if len(arr) != BLST_FP_BYTES {
return nil
}
C.blst_fp_from_bendian(&fp.cgo, (*C.byte)(&arr[0]))
return fp
}
//
// Printing
//
func PrintBytes(val []byte, name string) {
fmt.Printf("%s = %02x\n", name, val)
}
func (s *Scalar) Print(name string) {
arr := s.ToBEndian()
PrintBytes(arr, name)
}
func (p *P1Affine) Print(name string) {
fmt.Printf("%s:\n", name)
x := Fp{p.cgo.x}
arr := x.ToBEndian()
PrintBytes(arr, " x")
y := Fp{p.cgo.y}
arr = y.ToBEndian()
PrintBytes(arr, " y")
}
func (p *P1) Print(name string) {
fmt.Printf("%s:\n", name)
aff := p.ToAffine()
aff.Print(name)
}
func (f *Fp2) Print(name string) {
fmt.Printf("%s:\n", name)
var arr [BLST_FP_BYTES]byte
C.blst_bendian_from_fp((*C.byte)(&arr[0]), &f.cgo.fp[0])
PrintBytes(arr[:], " 0")
C.blst_bendian_from_fp((*C.byte)(&arr[0]), &f.cgo.fp[1])
PrintBytes(arr[:], " 1")
}
func (p *P2Affine) Print(name string) {
fmt.Printf("%s:\n", name)
x := Fp2{p.cgo.x}
x.Print(" x")
y := Fp2{p.cgo.y}
y.Print(" y")
}
func (p *P2) Print(name string) {
fmt.Printf("%s:\n", name)
aff := p.ToAffine()
aff.Print(name)
}
//
// Equality
//
func (s1 *Scalar) Equals(s2 *Scalar) bool {
return *s1 == *s2
}
func (e1 *Fp) Equals(e2 *Fp) bool {
return *e1 == *e2
}
func (e1 *Fp2) Equals(e2 *Fp2) bool {
return *e1 == *e2
}
func (e1 *P1Affine) Equals(e2 *P1Affine) bool {
return bool(C.blst_p1_affine_is_equal(&e1.cgo, &e2.cgo))
}
func (pt *P1Affine) asPtr() *C.blst_p1_affine {
if pt != nil {
return &pt.cgo
}
return nil
}
func (e1 *P1) Equals(e2 *P1) bool {
return bool(C.blst_p1_is_equal(&e1.cgo, &e2.cgo))
}
func (e1 *P2Affine) Equals(e2 *P2Affine) bool {
return bool(C.blst_p2_affine_is_equal(&e1.cgo, &e2.cgo))
}
func (pt *P2Affine) asPtr() *C.blst_p2_affine {
if pt != nil {
return &pt.cgo
}
return nil
}
func (e1 *P2) Equals(e2 *P2) bool {
return bool(C.blst_p2_is_equal(&e1.cgo, &e2.cgo))
}
// private thunk for testing
func expandMessageXmd(msg []byte, dst []byte, len_in_bytes int) []byte {
ret := make([]byte, len_in_bytes)
C.blst_expand_message_xmd((*C.byte)(&ret[0]), C.size_t(len(ret)),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)))
return ret
}
func breakdown(nbits, window, ncpus int) (nx int, ny int, wnd int) {
if nbits > window*ncpus { //nolint:nestif
nx = 1
wnd = bits.Len(uint(ncpus) / 4)
if (window + wnd) > 18 {
wnd = window - wnd
} else {
wnd = (nbits/window + ncpus - 1) / ncpus
if (nbits/(window+1)+ncpus-1)/ncpus < wnd {
wnd = window + 1
} else {
wnd = window
}
}
} else {
nx = 2
wnd = window - 2
for (nbits/wnd+1)*nx < ncpus {
nx += 1
wnd = window - bits.Len(3*uint(nx)/2)
}
nx -= 1
wnd = window - bits.Len(3*uint(nx)/2)
}
ny = nbits/wnd + 1
wnd = nbits/ny + 1
return nx, ny, wnd
}
func pippenger_window_size(npoints int) int {
wbits := bits.Len(uint(npoints))
if wbits > 13 {
return wbits - 4
}
if wbits > 5 {
return wbits - 3
}
return 2
}
================================================
FILE: bindings/go/blst.tgo
================================================
/*
* Copyright Supranational LLC
* Licensed under the Apache License, Version 2.0, see LICENSE for details.
* SPDX-License-Identifier: Apache-2.0
*/
package blst
// #cgo CFLAGS: -I${SRCDIR}/.. -I${SRCDIR}/../../build -I${SRCDIR}/../../src -D__BLST_CGO__ -fno-builtin-memcpy -fno-builtin-memset
// #cgo amd64 CFLAGS: -D__ADX__ -mno-avx
// // no-asm 64-bit platforms from https://go.dev/doc/install/source
// #cgo loong64 mips64 mips64le ppc64 ppc64le riscv64 s390x CFLAGS: -D__BLST_NO_ASM__
//
// #include "blst.h"
//
// #if defined(__x86_64__) && (defined(__unix__) || defined(__APPLE__))
// # include
// # include
// static void handler(int signum)
// { ssize_t n = write(2, "Caught SIGILL in blst_cgo_init, "
// "consult /bindings/go/README.md.\n", 70);
// _exit(128+SIGILL);
// (void)n;
// }
// __attribute__((constructor)) static void blst_cgo_init()
// { blst_fp temp = { 0 };
// struct sigaction act = { handler }, oact;
// sigaction(SIGILL, &act, &oact);
// blst_fp_sqr(&temp, &temp);
// sigaction(SIGILL, &oact, NULL);
// }
// #endif
//
// static void go_pairing_init(blst_pairing *new_ctx, bool hash_or_encode,
// const byte *DST, size_t DST_len)
// { if (DST != NULL) {
// byte *dst = (byte*)new_ctx + blst_pairing_sizeof();
// for(size_t i = 0; i < DST_len; i++) dst[i] = DST[i];
// DST = dst;
// }
// blst_pairing_init(new_ctx, hash_or_encode, DST, DST_len);
// }
// static void go_pairing_as_fp12(blst_fp12 *pt, blst_pairing *ctx)
// { *pt = *blst_pairing_as_fp12(ctx); }
//
// static void go_p1slice_to_affine(blst_p1_affine dst[],
// const blst_p1 points[], size_t npoints)
// { const blst_p1 *ppoints[2] = { points, NULL };
// blst_p1s_to_affine(dst, ppoints, npoints);
// }
// static void go_p1slice_add(blst_p1 *dst, const blst_p1_affine points[],
// size_t npoints)
// { const blst_p1_affine *ppoints[2] = { points, NULL };
// blst_p1s_add(dst, ppoints, npoints);
// }
// static void go_p2slice_to_affine(blst_p2_affine dst[],
// const blst_p2 points[], size_t npoints)
// { const blst_p2 *ppoints[2] = { points, NULL };
// blst_p2s_to_affine(dst, ppoints, npoints);
// }
// static void go_p2slice_add(blst_p2 *dst, const blst_p2_affine points[],
// size_t npoints)
// { const blst_p2_affine *ppoints[2] = { points, NULL };
// blst_p2s_add(dst, ppoints, npoints);
// }
//
// static void go_p1_mult_n_acc(blst_p1 *acc, const blst_fp *x, bool affine,
// const byte *scalar, size_t nbits)
// { blst_p1 m[1];
// const void *p = x;
// if (p == NULL)
// p = blst_p1_generator();
// else if (affine)
// blst_p1_from_affine(m, p), p = m;
// blst_p1_mult(m, p, scalar, nbits);
// blst_p1_add_or_double(acc, acc, m);
// }
// static void go_p2_mult_n_acc(blst_p2 *acc, const blst_fp2 *x, bool affine,
// const byte *scalar, size_t nbits)
// { blst_p2 m[1];
// const void *p = x;
// if (p == NULL)
// p = blst_p2_generator();
// else if (affine)
// blst_p2_from_affine(m, p), p = m;
// blst_p2_mult(m, p, scalar, nbits);
// blst_p2_add_or_double(acc, acc, m);
// }
//
// static void go_p1_sub_assign(blst_p1 *a, const blst_fp *x, bool affine)
// { blst_p1 minus_b;
// if (affine)
// blst_p1_from_affine(&minus_b, (const blst_p1_affine*)x);
// else
// minus_b = *(const blst_p1*)x;
// blst_p1_cneg(&minus_b, 1);
// blst_p1_add_or_double(a, a, &minus_b);
// }
//
// static void go_p2_sub_assign(blst_p2 *a, const blst_fp2 *x, bool affine)
// { blst_p2 minus_b;
// if (affine)
// blst_p2_from_affine(&minus_b, (const blst_p2_affine*)x);
// else
// minus_b = *(const blst_p2*)x;
// blst_p2_cneg(&minus_b, 1);
// blst_p2_add_or_double(a, a, &minus_b);
// }
//
// static bool go_scalar_from_bendian(blst_scalar *ret, const byte *in)
// { blst_scalar_from_bendian(ret, in);
// return blst_sk_check(ret);
// }
// static bool go_hash_to_scalar(blst_scalar *ret,
// const byte *msg, size_t msg_len,
// const byte *DST, size_t DST_len)
// { byte elem[48];
// blst_expand_message_xmd(elem, sizeof(elem), msg, msg_len, DST, DST_len);
// return blst_scalar_from_be_bytes(ret, elem, sizeof(elem));
// }
// static void go_miller_loop_n(blst_fp12 *dst, const blst_p2_affine Q[],
// const blst_p1_affine P[],
// size_t npoints, bool acc)
// { const blst_p2_affine *Qs[2] = { Q, NULL };
// const blst_p1_affine *Ps[2] = { P, NULL };
// if (acc) {
// blst_fp12 tmp;
// blst_miller_loop_n(&tmp, Qs, Ps, npoints);
// blst_fp12_mul(dst, dst, &tmp);
// } else {
// blst_miller_loop_n(dst, Qs, Ps, npoints);
// }
// }
// static void go_fp12slice_mul(blst_fp12 *dst, const blst_fp12 in[], size_t n)
// { size_t i;
// blst_fp12_mul(dst, &in[0], &in[1]);
// for (i = 2; i < n; i++)
// blst_fp12_mul(dst, dst, &in[i]);
// }
// static bool go_p1_affine_validate(const blst_p1_affine *p, bool infcheck)
// { if (infcheck && blst_p1_affine_is_inf(p))
// return 0;
// return blst_p1_affine_in_g1(p);
// }
// static bool go_p2_affine_validate(const blst_p2_affine *p, bool infcheck)
// { if (infcheck && blst_p2_affine_is_inf(p))
// return 0;
// return blst_p2_affine_in_g2(p);
// }
import "C"
import "runtime"
const BLST_SCALAR_BYTES = 256 / 8
const BLST_FP_BYTES = 384 / 8
const BLST_P1_COMPRESS_BYTES = BLST_FP_BYTES
const BLST_P1_SERIALIZE_BYTES = BLST_FP_BYTES * 2
const BLST_P2_COMPRESS_BYTES = BLST_FP_BYTES * 2
const BLST_P2_SERIALIZE_BYTES = BLST_FP_BYTES * 4
type Scalar struct{ cgo C.blst_scalar }
type Fp struct{ cgo C.blst_fp }
type Fp2 struct{ cgo C.blst_fp2 }
type Fp6 = C.blst_fp6
type Fp12 struct{ cgo C.blst_fp12 }
type P1 struct{ cgo C.blst_p1 }
type P2 struct{ cgo C.blst_p2 }
type P1Affine struct{ cgo C.blst_p1_affine }
type P2Affine struct{ cgo C.blst_p2_affine }
type Message = []byte
type Pairing = []C.blst_pairing
type SecretKey = Scalar
type P1s []P1
type P2s []P2
type P1Affines []P1Affine
type P2Affines []P2Affine
//
// Configuration
//
var maxProcs = initMaxProcs()
func initMaxProcs() int {
maxProcs := runtime.GOMAXPROCS(0)
var version float32
_, err := fmt.Sscanf(runtime.Version(), "go%f", &version)
if err != nil || version < 1.14 {
// be cooperative and leave one processor for the application
maxProcs -= 1
}
if maxProcs <= 0 {
maxProcs = 1
}
return maxProcs
}
func SetMaxProcs(procs int) {
if procs <= 0 {
procs = 1
}
maxProcs = procs
}
func numThreads(maxThreads int) int {
numThreads := maxProcs
// take into consideration the possility that application reduced
// GOMAXPROCS after |maxProcs| was initialized
numProcs := runtime.GOMAXPROCS(0)
if maxProcs > numProcs {
numThreads = numProcs
}
if maxThreads > 0 && numThreads > maxThreads {
return maxThreads
}
return numThreads
}
var cgo_pairingSizeOf = C.blst_pairing_sizeof()
var cgo_p1Generator = P1{*C.blst_p1_generator()}
var cgo_p2Generator = P2{*C.blst_p2_generator()}
var cgo_fp12One = Fp12{*C.blst_fp12_one()}
//
// Secret key
//
func (sk *SecretKey) Zeroize() {
var zero SecretKey
*sk = zero
}
func KeyGen(ikm []byte, optional ...[]byte) *SecretKey {
var sk SecretKey
var info []byte
if len(optional) > 0 {
info = optional[0]
}
if len(ikm) < 32 {
return nil
}
C.blst_keygen(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)),
ptrOrNil(info), C.size_t(len(info)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func KeyGenV3(ikm []byte, optional ...[]byte) *SecretKey {
if len(ikm) < 32 {
return nil
}
var sk SecretKey
var info []byte
if len(optional) > 0 {
info = optional[0]
}
C.blst_keygen_v3(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)),
ptrOrNil(info), C.size_t(len(info)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func KeyGenV45(ikm []byte, salt []byte, optional ...[]byte) *SecretKey {
if len(ikm) < 32 {
return nil
}
var sk SecretKey
var info []byte
if len(optional) > 0 {
info = optional[0]
}
C.blst_keygen_v4_5(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)),
(*C.byte)(&salt[0]), C.size_t(len(salt)),
ptrOrNil(info), C.size_t(len(info)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func KeyGenV5(ikm []byte, salt []byte, optional ...[]byte) *SecretKey {
if len(ikm) < 32 {
return nil
}
var sk SecretKey
var info []byte
if len(optional) > 0 {
info = optional[0]
}
saltLen := len(salt)
if saltLen == 0 {
salt = []byte{0}
}
C.blst_keygen_v5(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)),
(*C.byte)(&salt[0]), C.size_t(saltLen),
ptrOrNil(info), C.size_t(len(info)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func DeriveMasterEip2333(ikm []byte) *SecretKey {
if len(ikm) < 32 {
return nil
}
var sk SecretKey
C.blst_derive_master_eip2333(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
func (master *SecretKey) DeriveChildEip2333(child_index uint32) *SecretKey {
var sk SecretKey
C.blst_derive_child_eip2333(&sk.cgo, &master.cgo, C.uint(child_index))
// Postponing secret key zeroing till garbage collection can be too
// late to be effective, but every little bit helps...
runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() })
return &sk
}
//
// Pairing
//
func pairingSizeOf(DST_len C.size_t) int {
return int((cgo_pairingSizeOf + DST_len + 7) / 8)
}
func PairingCtx(hash_or_encode bool, DST []byte) Pairing {
DST_len := C.size_t(len(DST))
ctx := make([]C.blst_pairing, pairingSizeOf(DST_len))
C.go_pairing_init(&ctx[0], C.bool(hash_or_encode), ptrOrNil(DST), DST_len)
return ctx
}
func PairingCommit(ctx Pairing) {
C.blst_pairing_commit(&ctx[0])
}
func PairingMerge(ctx Pairing, ctx1 Pairing) int {
r := C.blst_pairing_merge(&ctx[0], &ctx1[0])
return int(r)
}
func PairingFinalVerify(ctx Pairing, optional ...*Fp12) bool {
var gtsig *Fp12
if len(optional) > 0 {
gtsig = optional[0]
}
return bool(C.blst_pairing_finalverify(&ctx[0], gtsig.asPtr()))
}
func PairingRawAggregate(ctx Pairing, q *P2Affine, p *P1Affine) {
C.blst_pairing_raw_aggregate(&ctx[0], &q.cgo, &p.cgo)
}
func PairingAsFp12(ctx Pairing) *Fp12 {
var pt Fp12
C.go_pairing_as_fp12(&pt.cgo, &ctx[0])
return &pt
}
func Fp12One() Fp12 {
return cgo_fp12One
}
func Fp12FinalVerify(pt1 *Fp12, pt2 *Fp12) bool {
return bool(C.blst_fp12_finalverify(&pt1.cgo, &pt2.cgo))
}
func Fp12MillerLoop(q *P2Affine, p *P1Affine) *Fp12 {
var pt Fp12
C.blst_miller_loop(&pt.cgo, &q.cgo, &p.cgo)
return &pt
}
func Fp12MillerLoopN(qs []P2Affine, ps []P1Affine) *Fp12 {
if len(qs) != len(ps) || len(qs) == 0 {
panic("inputs' lengths mismatch")
}
nElems := uint32(len(qs))
nThreads := uint32(maxProcs)
if nThreads == 1 || nElems == 1 {
var pt Fp12
C.go_miller_loop_n(&pt.cgo, &qs[0].cgo, &ps[0].cgo, C.size_t(nElems), false)
return &pt
}
stride := (nElems + nThreads - 1) / nThreads
if stride > 16 {
stride = 16
}
strides := (nElems + stride - 1) / stride
if nThreads > strides {
nThreads = strides
}
msgsCh := make(chan Fp12, nThreads)
curElem := uint32(0)
for tid := uint32(0); tid < nThreads; tid++ {
go func() {
acc := Fp12One()
first := true
for {
work := atomic.AddUint32(&curElem, stride) - stride
if work >= nElems {
break
}
n := nElems - work
if n > stride {
n = stride
}
C.go_miller_loop_n(&acc.cgo, &qs[work].cgo, &ps[work].cgo, C.size_t(n),
C.bool(!first))
first = false
}
msgsCh <- acc
}()
}
var ret = make([]Fp12, nThreads);
for i := range(ret) {
ret[i] = <- msgsCh
}
var pt Fp12
C.go_fp12slice_mul(&pt.cgo, &ret[0].cgo, C.size_t(nThreads))
return &pt
}
func (pt *Fp12) MulAssign(p *Fp12) {
C.blst_fp12_mul(&pt.cgo, &pt.cgo, &p.cgo)
}
func (pt *Fp12) FinalExp() {
C.blst_final_exp(&pt.cgo, &pt.cgo)
}
func (pt *Fp12) InGroup() bool {
return bool(C.blst_fp12_in_group(&pt.cgo))
}
func (pt *Fp12) ToBendian() []byte {
var out [BLST_FP_BYTES*12]byte
C.blst_bendian_from_fp12((*C.byte)(&out[0]), &pt.cgo)
return out[:]
}
func (pt1 *Fp12) Equals(pt2 *Fp12) bool {
return *pt1 == *pt2
}
func (pt *Fp12) asPtr() *C.blst_fp12 {
if (pt != nil) {
return &pt.cgo
}
return nil
}
func ptrOrNil(bytes []byte) *C.byte {
var ptr *C.byte
if len(bytes) > 0 {
ptr = (*C.byte)(&bytes[0])
}
return ptr
}
================================================
FILE: bindings/go/blst_htoc_test.go
================================================
/*
* Copyright Supranational LLC
* Licensed under the Apache License, Version 2.0, see LICENSE for details.
* SPDX-License-Identifier: Apache-2.0
*/
package blst
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"testing"
)
func decodeP1(m map[string]interface{}) *P1Affine {
x, err := hex.DecodeString(m["x"].(string)[2:])
if err != nil {
fmt.Println(err)
return nil
}
y, err := hex.DecodeString(m["y"].(string)[2:])
if err != nil {
fmt.Println(err)
return nil
}
var p1 P1Affine
p1.Deserialize(append(x, y...))
return &p1
}
func readAll(file *os.File) ([]byte, error) {
defer file.Close()
stat, err := file.Stat()
if err != nil {
return nil, err //nolint:wrapcheck
}
buf := make([]byte, stat.Size())
total := 0
for total < len(buf) {
read, err := file.Read(buf[total:])
if err != nil {
return nil, err //nolint:wrapcheck
}
total += read
}
return buf, nil
}
func jsonG1HashToCurve(t *testing.T, fname string) {
t.Helper()
vfile, err := os.Open(fname)
if err != nil {
t.Skipf("%.16s... not found", fname)
}
buf, err := readAll(vfile)
if err != nil {
t.Error(err.Error())
}
var vectors map[string]interface{}
err = json.Unmarshal(buf, &vectors)
if err != nil {
t.Error(err.Error())
}
dst := []byte(vectors["dst"].(string))
hash_or_encode := vectors["randomOracle"].(bool)
vectorsArr, ok := vectors["vectors"].([]interface{})
if !ok {
t.Error("Could not cast vectors to an array")
}
for _, v := range vectorsArr {
testMap, ok := v.(map[string]interface{})
if !ok {
t.Error("Could not cast vector to map")
}
msg := []byte(testMap["msg"].(string))
p1Expected := decodeP1(testMap["P"].(map[string]interface{}))
var p1Hashed *P1Affine
if hash_or_encode {
p1Hashed = HashToG1(msg, dst).ToAffine()
} else {
p1Hashed = EncodeToG1(msg, dst).ToAffine()
}
if !p1Hashed.Equals(p1Expected) {
t.Error("hashed != expected")
}
}
}
func TestG1HashToCurve(t *testing.T) {
t.Parallel()
jsonG1HashToCurve(t, "../vectors/hash_to_curve/BLS12381G1_XMD_SHA-256_SSWU_RO_.json")
jsonG1HashToCurve(t, "../vectors/hash_to_curve/BLS12381G1_XMD_SHA-256_SSWU_NU_.json")
}
func decodeP2(m map[string]interface{}) *P2Affine {
xArr := strings.Split(m["x"].(string), ",")
x0, err := hex.DecodeString(xArr[0][2:])
if err != nil {
fmt.Println(err)
return nil
}
x1, err := hex.DecodeString(xArr[1][2:])
if err != nil {
fmt.Println(err)
return nil
}
yArr := strings.Split(m["y"].(string), ",")
y0, err := hex.DecodeString(yArr[0][2:])
if err != nil {
fmt.Println(err)
return nil
}
y1, err := hex.DecodeString(yArr[1][2:])
if err != nil {
fmt.Println(err)
return nil
}
var p2 P2Affine
p2.Deserialize(append(x1, append(x0, append(y1, y0...)...)...))
return &p2
}
func jsonG2HashToCurve(t *testing.T, fname string) {
t.Helper()
vfile, err := os.Open(fname)
if err != nil {
t.Skipf("%.16s... not found", fname)
}
buf, err := readAll(vfile)
if err != nil {
t.Error(err.Error())
}
var vectors map[string]interface{}
err = json.Unmarshal(buf, &vectors)
if err != nil {
t.Error(err.Error())
}
dst := []byte(vectors["dst"].(string))
hash_or_encode := vectors["randomOracle"].(bool)
vectorsArr, ok := vectors["vectors"].([]interface{})
if !ok {
t.Error("Could not cast vectors to an array")
}
for _, v := range vectorsArr {
testMap, ok := v.(map[string]interface{})
if !ok {
t.Error("Could not cast vector to map")
}
msg := []byte(testMap["msg"].(string))
p2Expected := decodeP2(testMap["P"].(map[string]interface{}))
var p2Hashed *P2Affine
if hash_or_encode {
p2Hashed = HashToG2(msg, dst).ToAffine()
} else {
p2Hashed = EncodeToG2(msg, dst).ToAffine()
}
if !p2Hashed.Equals(p2Expected) {
t.Error("hashed != expected")
}
}
}
func TestG2HashToCurve(t *testing.T) {
t.Parallel()
jsonG2HashToCurve(t, "../vectors/hash_to_curve/BLS12381G2_XMD_SHA-256_SSWU_RO_.json")
jsonG2HashToCurve(t, "../vectors/hash_to_curve/BLS12381G2_XMD_SHA-256_SSWU_NU_.json")
}
func jsonExpandMessageXmd(t *testing.T, fname string) {
t.Helper()
vfile, err := os.Open(fname)
if err != nil {
t.Skipf("%.16s... not found", fname)
}
buf, err := readAll(vfile)
if err != nil {
t.Error(err.Error())
}
var vectors map[string]interface{}
err = json.Unmarshal(buf, &vectors)
if err != nil {
t.Error(err.Error())
}
DST := []byte(vectors["DST"].(string))
tests, ok := vectors["tests"].([]interface{})
if !ok {
t.Error("Could not cast 'tests' to an array")
}
for _, v := range tests {
test, ok := v.(map[string]interface{})
if !ok {
t.Error("Could not map 'tests[]' element")
}
len_in_bytes, err := strconv.ParseInt(test["len_in_bytes"].(string), 0, 0)
if err != nil {
t.Error(err.Error())
}
msg := []byte(test["msg"].(string))
expected, err := hex.DecodeString(test["uniform_bytes"].(string))
if err != nil {
t.Error(err.Error())
}
hashed := expandMessageXmd(msg, DST, int(len_in_bytes))
if !bytes.Equal(hashed, expected) {
t.Error("hashed != expected")
}
}
}
func TestExpandMessageXmd(t *testing.T) {
t.Parallel()
jsonExpandMessageXmd(t, "../vectors/hash_to_curve/expand_message_xmd_SHA256_256.json")
jsonExpandMessageXmd(t, "../vectors/hash_to_curve/expand_message_xmd_SHA256_38.json")
}
================================================
FILE: bindings/go/blst_miller_loop_test.go
================================================
package blst
import (
"crypto/rand"
"testing"
)
func TestMillerLoopN(t *testing.T) {
t.Parallel()
const npoints = 97
scalars := make([]byte, npoints*8)
_, err := rand.Read(scalars)
if err != nil {
t.Error(err.Error())
return
}
p1s := make([]P1, npoints)
p2s := make([]P2, npoints)
g1 := P1Generator()
g2 := P2Generator()
for i := range p1s {
p1s[i] = *g1.Mult(scalars[i*8:i*8+4], 32)
p2s[i] = *g2.Mult(scalars[i*8+4:i*8+8], 32)
}
ps := P1s(p1s).ToAffine()
qs := P2s(p2s).ToAffine()
naive := Fp12One()
for i := range p1s {
naive.MulAssign(Fp12MillerLoop(&qs[i], &ps[i]))
}
if !naive.Equals(Fp12MillerLoopN(qs, ps)) {
t.Error("failed self-consistency Fp12MillerLoopN test")
}
}
================================================
FILE: bindings/go/blst_minpk.tgo
================================================
import (
"runtime"
"sync"
"sync/atomic"
)
//
// PublicKey
//
func (pk *P1Affine) From(s *Scalar) *P1Affine {
C.blst_sk_to_pk2_in_g1(nil, &pk.cgo, &s.cgo)
return pk
}
func (pk *P1Affine) KeyValidate() bool {
return bool(C.go_p1_affine_validate(&pk.cgo, true))
}
// sigInfcheck, check for infinity, is a way to avoid going
// into resource-consuming verification. Passing 'false' is
// always cryptographically safe, but application might want
// to guard against obviously bogus individual[!] signatures.
func (sig *P2Affine) SigValidate(sigInfcheck bool) bool {
return bool(C.go_p2_affine_validate(&sig.cgo, C.bool(sigInfcheck)))
}
//
// Sign
//
func (sig *P2Affine) Sign(sk *SecretKey, msg []byte, dst []byte,
optional ...interface{}) *P2Affine {
augSingle, aug, useHash, ok := parseOpts(optional...)
if !ok || len(aug) != 0 {
return nil
}
var q *P2
if useHash {
q = HashToG2(msg, dst, augSingle)
} else {
q = EncodeToG2(msg, dst, augSingle)
}
C.blst_sign_pk2_in_g1(nil, &sig.cgo, &q.cgo, &sk.cgo)
return sig
}
//
// Signature
//
// Functions to return a signature and public key+augmentation tuple.
// This enables point decompression (if needed) to happen in parallel.
type sigGetterP2 func() *P2Affine
type pkGetterP1 func(i uint32, temp *P1Affine) (*P1Affine, []byte)
// Single verify with decompressed pk
func (sig *P2Affine) Verify(sigGroupcheck bool, pk *P1Affine, pkValidate bool,
msg Message, dst []byte,
optional ...interface{}) bool { // useHash bool, aug []byte
aug, _, useHash, ok := parseOpts(optional...)
if !ok {
return false
}
return sig.AggregateVerify(sigGroupcheck, []*P1Affine{pk}, pkValidate,
[]Message{msg}, dst, useHash, [][]byte{aug})
}
// Single verify with compressed pk
// Uses a dummy signature to get the correct type
func (dummy *P2Affine) VerifyCompressed(sig []byte, sigGroupcheck bool,
pk []byte, pkValidate bool, msg Message, dst []byte,
optional ...bool) bool { // useHash bool, usePksAsAugs bool
return dummy.AggregateVerifyCompressed(sig, sigGroupcheck,
[][]byte{pk}, pkValidate,
[]Message{msg}, dst, optional...)
}
// Aggregate verify with uncompressed signature and public keys
// Note that checking message uniqueness, if required, is left to the user.
// Not all signature schemes require it and this keeps the binding minimal
// and fast. Refer to the Uniq function for one method method of performing
// this check.
func (sig *P2Affine) AggregateVerify(sigGroupcheck bool,
pks []*P1Affine, pksVerify bool, msgs []Message, dst []byte,
optional ...interface{}) bool { // useHash bool, augs [][]byte
// sanity checks and argument parsing
n := len(pks)
if n == 0 || len(msgs) != n {
return false
}
_, augs, useHash, ok := parseOpts(optional...)
useAugs := len(augs) != 0
if !ok || (useAugs && len(augs) != n) {
return false
}
sigFn := func() *P2Affine {
return sig
}
pkFn := func(i uint32, _ *P1Affine) (*P1Affine, []byte) {
if useAugs {
return pks[i], augs[i]
}
return pks[i], nil
}
return coreAggregateVerifyPkInG1(sigFn, sigGroupcheck, pkFn, pksVerify,
msgs, dst, useHash)
}
// Aggregate verify with compressed signature and public keys
// Uses a dummy signature to get the correct type
func (*P2Affine) AggregateVerifyCompressed(sig []byte, sigGroupcheck bool,
pks [][]byte, pksVerify bool, msgs []Message, dst []byte,
optional ...bool) bool { // useHash bool, usePksAsAugs bool
// sanity checks and argument parsing
if len(pks) != len(msgs) {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
usePksAsAugs := false
if len(optional) > 1 {
usePksAsAugs = optional[1]
}
sigFn := func() *P2Affine {
sigP := new(P2Affine)
if sigP.Uncompress(sig) == nil {
return nil
}
return sigP
}
pkFn := func(i uint32, pk *P1Affine) (*P1Affine, []byte) {
bytes := pks[i]
if len(bytes) == BLST_P1_SERIALIZE_BYTES && (bytes[0] & 0x80) == 0 {
// Not compressed
if pk.Deserialize(bytes) == nil {
return nil, nil
}
} else if len(bytes) == BLST_P1_COMPRESS_BYTES && (bytes[0] & 0x80) != 0 {
if pk.Uncompress(bytes) == nil {
return nil, nil
}
} else {
return nil, nil
}
if usePksAsAugs {
return pk, bytes
}
return pk, nil
}
return coreAggregateVerifyPkInG1(sigFn, sigGroupcheck, pkFn, pksVerify,
msgs, dst, useHash)
}
func coreAggregateVerifyPkInG1(sigFn sigGetterP2, sigGroupcheck bool,
pkFn pkGetterP1, pkValidate bool, msgs []Message, dst []byte,
optional ...bool) bool { // useHash
n := len(msgs)
if n == 0 {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
numCores := runtime.GOMAXPROCS(0)
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding pk,msg[,aug] tuple and
// repeat until n is exceeded. The resulting accumulations will be
// fed into the msgsCh channel.
msgsCh := make(chan Pairing, numThreads)
valid := int32(1)
curItem := uint32(0)
mutex := sync.Mutex{}
mutex.Lock()
for tid := 0; tid < numThreads; tid++ {
go func() {
pairing := PairingCtx(useHash, dst)
var temp P1Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
} else if work == 0 && maxProcs == numCores-1 &&
numThreads == maxProcs {
// Avoid consuming all cores by waiting until the
// main thread has completed its miller loop before
// proceeding.
mutex.Lock()
mutex.Unlock() //nolint:staticcheck
}
// Pull Public Key and augmentation blob
curPk, aug := pkFn(work, &temp)
if curPk == nil {
atomic.StoreInt32(&valid, 0)
break
}
// Pairing and accumulate
ret := PairingAggregatePkInG1(pairing, curPk, pkValidate,
nil, false, msgs[work], aug)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
break
}
// application might have some async work to do
runtime.Gosched()
}
if atomic.LoadInt32(&valid) > 0 {
PairingCommit(pairing)
msgsCh <- pairing
} else {
msgsCh <- nil
}
}()
}
// Uncompress and check signature
var gtsig Fp12
sig := sigFn()
if sig == nil {
atomic.StoreInt32(&valid, 0)
}
if atomic.LoadInt32(&valid) > 0 && sigGroupcheck &&
!sig.SigValidate(false) {
atomic.StoreInt32(&valid, 0)
}
if atomic.LoadInt32(&valid) > 0 {
C.blst_aggregated_in_g2(>sig.cgo, &sig.cgo)
}
mutex.Unlock()
// Accumulate the thread results
var pairings Pairing
for i := 0; i < numThreads; i++ {
msg := <-msgsCh
if msg != nil {
if pairings == nil {
pairings = msg
} else {
ret := PairingMerge(pairings, msg)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
}
}
}
}
if atomic.LoadInt32(&valid) == 0 || pairings == nil {
return false
}
return PairingFinalVerify(pairings, >sig)
}
func CoreVerifyPkInG1(pk *P1Affine, sig *P2Affine, hash_or_encode bool,
msg Message, dst []byte, optional ...[]byte) int {
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
if runtime.NumGoroutine() < maxProcs {
sigFn := func() *P2Affine {
return sig
}
pkFn := func(_ uint32, _ *P1Affine) (*P1Affine, []byte) {
return pk, aug
}
if !coreAggregateVerifyPkInG1(sigFn, true, pkFn, true, []Message{msg},
dst, hash_or_encode) {
return C.BLST_VERIFY_FAIL
}
return C.BLST_SUCCESS
}
return int(C.blst_core_verify_pk_in_g1(&pk.cgo, &sig.cgo, C.bool(hash_or_encode),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug))))
}
// pks are assumed to be verified for proof of possession,
// which implies that they are already group-checked
func (sig *P2Affine) FastAggregateVerify(sigGroupcheck bool,
pks []*P1Affine, msg Message, dst []byte,
optional ...interface{}) bool { // pass-through to Verify
n := len(pks)
// TODO: return value for length zero?
if n == 0 {
return false
}
aggregator := new(P1Aggregate)
if !aggregator.Aggregate(pks, false) {
return false
}
pkAff := aggregator.ToAffine()
// Verify
return sig.Verify(sigGroupcheck, pkAff, false, msg, dst, optional...)
}
func (*P2Affine) MultipleAggregateVerify(sigs []*P2Affine,
sigsGroupcheck bool, pks []*P1Affine, pksVerify bool,
msgs []Message, dst []byte, randFn func(*Scalar), randBits int,
optional ...interface{}) bool { // useHash
// Sanity checks and argument parsing
n := len(pks)
if n == 0 || len(msgs) != n || len(sigs) != n {
return false
}
_, augs, useHash, ok := parseOpts(optional...)
useAugs := len(augs) != 0
if !ok || (useAugs && len(augs) != n) {
return false
}
paramsFn :=
func(work uint32, _ *P2Affine, _ *P1Affine, rand *Scalar) (
*P2Affine, *P1Affine, *Scalar, []byte) {
randFn(rand)
var aug []byte
if useAugs {
aug = augs[work]
}
return sigs[work], pks[work], rand, aug
}
return multipleAggregateVerifyPkInG1(paramsFn, sigsGroupcheck, pksVerify,
msgs, dst, randBits, useHash)
}
type mulAggGetterPkInG1 func(work uint32, sig *P2Affine, pk *P1Affine,
rand *Scalar) (*P2Affine, *P1Affine, *Scalar, []byte)
func multipleAggregateVerifyPkInG1(paramsFn mulAggGetterPkInG1,
sigsGroupcheck bool, pksVerify bool, msgs []Message,
dst []byte, randBits int,
optional ...bool) bool { // useHash
n := len(msgs)
if n == 0 {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding pk,msg[,aug] tuple and
// repeat until n is exceeded. The resulting accumulations will be
// fed into the msgsCh channel.
msgsCh := make(chan Pairing, numThreads)
valid := int32(1)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
pairing := PairingCtx(useHash, dst)
var tempRand Scalar
var tempPk P1Affine
var tempSig P2Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
curSig, curPk, curRand, aug := paramsFn(work, &tempSig,
&tempPk, &tempRand)
if PairingMulNAggregatePkInG1(pairing, curPk, pksVerify,
curSig, sigsGroupcheck, curRand,
randBits, msgs[work], aug) !=
C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
break
}
// application might have some async work to do
runtime.Gosched()
}
if atomic.LoadInt32(&valid) > 0 {
PairingCommit(pairing)
msgsCh <- pairing
} else {
msgsCh <- nil
}
}()
}
// Accumulate the thread results
var pairings Pairing
for i := 0; i < numThreads; i++ {
msg := <-msgsCh
if msg != nil {
if pairings == nil {
pairings = msg
} else {
ret := PairingMerge(pairings, msg)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
}
}
}
}
if atomic.LoadInt32(&valid) == 0 || pairings == nil {
return false
}
return PairingFinalVerify(pairings, nil)
}
//
// Aggregate P2
//
type aggGetterP2 func(i uint32, temp *P2Affine) *P2Affine
type P2Aggregate struct {
v *P2
}
// Aggregate uncompressed elements
func (agg *P2Aggregate) Aggregate(elmts []*P2Affine,
groupcheck bool) bool {
if len(elmts) == 0 {
return true
}
getter := func(i uint32, _ *P2Affine) *P2Affine { return elmts[i] }
return agg.coreAggregate(getter, groupcheck, len(elmts))
}
func (agg *P2Aggregate) AggregateWithRandomness(pointsIf interface{},
scalarsIf interface{}, nbits int, groupcheck bool) bool {
if groupcheck && !P2AffinesValidate(pointsIf) {
return false
}
agg.v = P2AffinesMult(pointsIf, scalarsIf, nbits)
return true
}
// Aggregate compressed elements
func (agg *P2Aggregate) AggregateCompressed(elmts [][]byte,
groupcheck bool) bool {
if len(elmts) == 0 {
return true
}
getter := func(i uint32, p *P2Affine) *P2Affine {
bytes := elmts[i]
if p.Uncompress(bytes) == nil {
return nil
}
return p
}
return agg.coreAggregate(getter, groupcheck, len(elmts))
}
func (agg *P2Aggregate) AddAggregate(other *P2Aggregate) {
if other.v == nil {
// do nothing
} else if agg.v == nil {
agg.v = other.v
} else {
C.blst_p2_add_or_double(&agg.v.cgo, &agg.v.cgo, &other.v.cgo)
}
}
func (agg *P2Aggregate) Add(elmt *P2Affine, groupcheck bool) bool {
if groupcheck && !bool(C.blst_p2_affine_in_g2(&elmt.cgo)) {
return false
}
if agg.v == nil {
agg.v = new(P2)
C.blst_p2_from_affine(&agg.v.cgo, &elmt.cgo)
} else {
C.blst_p2_add_or_double_affine(&agg.v.cgo, &agg.v.cgo, &elmt.cgo)
}
return true
}
func (agg *P2Aggregate) ToAffine() *P2Affine {
if agg.v == nil {
return new(P2Affine)
}
return agg.v.ToAffine()
}
func (agg *P2Aggregate) coreAggregate(getter aggGetterP2, groupcheck bool,
n int) bool {
if n == 0 {
return true
}
// operations are considered short enough for not to care about
// keeping one core free...
numThreads := runtime.GOMAXPROCS(0)
if numThreads > n {
numThreads = n
}
valid := int32(1)
type result struct {
agg *P2
empty bool
}
msgs := make(chan result, numThreads)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
first := true
var agg P2
var temp P2Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
// Signature validate
curElmt := getter(work, &temp)
if curElmt == nil {
atomic.StoreInt32(&valid, 0)
break
}
if groupcheck && !bool(C.blst_p2_affine_in_g2(&curElmt.cgo)) {
atomic.StoreInt32(&valid, 0)
break
}
if first {
C.blst_p2_from_affine(&agg.cgo, &curElmt.cgo)
first = false
} else {
C.blst_p2_add_or_double_affine(&agg.cgo, &agg.cgo, &curElmt.cgo)
}
// application might have some async work to do
runtime.Gosched()
}
if first {
msgs <- result{nil, true}
} else if atomic.LoadInt32(&valid) > 0 {
msgs <- result{&agg, false}
} else {
msgs <- result{nil, false}
}
}()
}
// Accumulate the thread results
first := agg.v == nil
validLocal := true
for i := 0; i < numThreads; i++ {
msg := <-msgs
if !validLocal || msg.empty {
// do nothing
} else if msg.agg == nil {
validLocal = false
// This should be unnecessary but seems safer
atomic.StoreInt32(&valid, 0)
} else {
if first {
agg.v = msg.agg
first = false
} else {
C.blst_p2_add_or_double(&agg.v.cgo, &agg.v.cgo, &msg.agg.cgo)
}
}
}
if atomic.LoadInt32(&valid) == 0 {
agg.v = nil
return false
}
return true
}
================================================
FILE: bindings/go/blst_minpk_test.go
================================================
/*
* Copyright Supranational LLC
* Licensed under the Apache License, Version 2.0, see LICENSE for details.
* SPDX-License-Identifier: Apache-2.0
*/
package blst
import (
"crypto/rand"
"fmt"
"runtime"
"testing"
)
// Min PK.
type PublicKeyMinPk = P1Affine
type SignatureMinPk = P2Affine
type AggregateSignatureMinPk = P2Aggregate
type AggregatePublicKeyMinPk = P1Aggregate
// Names in this file must be unique to support min-sig so we can't use 'dst'
// here.
var dstMinPk = []byte("BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_")
func init() {
// Use all cores when testing and benchmarking
SetMaxProcs(runtime.GOMAXPROCS(0))
}
func TestInfinityMinPk(t *testing.T) {
t.Parallel()
var infComp [BLST_P1_COMPRESS_BYTES]byte
infComp[0] |= 0xc0
new(PublicKeyMinPk).Uncompress(infComp[:])
}
func TestSerdesMinPk(t *testing.T) {
t.Parallel()
var ikm = [...]byte{
0x93, 0xad, 0x7e, 0x65, 0xde, 0xad, 0x05, 0x2a,
0x08, 0x3a, 0x91, 0x0c, 0x8b, 0x72, 0x85, 0x91,
0x46, 0x4c, 0xca, 0x56, 0x60, 0x5b, 0xb0, 0x56,
0xed, 0xfe, 0x2b, 0x60, 0xa6, 0x3c, 0x48, 0x99}
sk := KeyGen(ikm[:])
defer sk.Zeroize()
// Serialize/deserialize sk
sk2 := new(SecretKey).Deserialize(sk.Serialize())
defer sk2.Zeroize()
if !sk.Equals(sk2) {
t.Error("sk2 != sk")
}
// Negative test equals
sk.cgo.b[0]++
if sk.Equals(sk2) {
t.Error("sk2 == sk")
}
// pk
pk := new(PublicKeyMinPk).From(sk)
// Compress/decompress sk
pk2 := new(PublicKeyMinPk).Uncompress(pk.Compress())
if !pk.Equals(pk2) {
t.Error("pk2 != pk")
}
// Serialize/deserialize sk
pk3 := new(PublicKeyMinPk).Deserialize(pk.Serialize())
if !pk.Equals(pk3) {
t.Error("pk3 != pk")
}
// Negative test equals
// pk.x.l[0] = pk.x.l[0] + 1
// if pk.Equals(pk2) {
// t.Error("pk2 == pk")
// }
}
func TestSignVerifyMinPk(t *testing.T) {
t.Parallel()
var ikm = [...]byte{
0x93, 0xad, 0x7e, 0x65, 0xde, 0xad, 0x05, 0x2a,
0x08, 0x3a, 0x91, 0x0c, 0x8b, 0x72, 0x85, 0x91,
0x46, 0x4c, 0xca, 0x56, 0x60, 0x5b, 0xb0, 0x56,
0xed, 0xfe, 0x2b, 0x60, 0xa6, 0x3c, 0x48, 0x99}
sk0 := KeyGen(ikm[:])
ikm[0]++
sk1 := KeyGen(ikm[:])
// pk
pk0 := new(PublicKeyMinPk).From(sk0)
pk1 := new(PublicKeyMinPk).From(sk1)
// Sign
msg0 := []byte("hello foo")
msg1 := []byte("hello bar!")
sig0 := new(SignatureMinPk).Sign(sk0, msg0, dstMinPk)
sig1 := new(SignatureMinPk).Sign(sk1, msg1, dstMinPk)
// Verify
if !sig0.Verify(true, pk0, false, msg0, dstMinPk) {
t.Error("verify sig0")
}
if !sig1.Verify(true, pk1, false, msg1, dstMinPk) {
t.Error("verify sig1")
}
if !new(SignatureMinPk).VerifyCompressed(sig1.Compress(), true,
pk1.Compress(), false,
msg1, dstMinPk) {
t.Error("verify sig1")
}
// Batch verify
if !sig0.AggregateVerify(true, []*PublicKeyMinPk{pk0}, false,
[]Message{msg0}, dstMinPk) {
t.Error("aggregate verify sig0")
}
// Verify compressed inputs
if !new(SignatureMinPk).AggregateVerifyCompressed(sig0.Compress(), true,
[][]byte{pk0.Compress()},
false,
[]Message{msg0}, dstMinPk) {
t.Error("aggregate verify sig0 compressed")
}
// Verify serialized inputs
if !new(SignatureMinPk).AggregateVerifyCompressed(sig0.Compress(), true,
[][]byte{pk0.Serialize()},
false,
[]Message{msg0}, dstMinPk) {
t.Error("aggregate verify sig0 serialized")
}
// Compressed with empty pk
var emptyPk []byte
if new(SignatureMinPk).VerifyCompressed(sig0.Compress(), true,
emptyPk, false, msg0, dstMinPk) {
t.Error("verify sig compressed inputs")
}
// Wrong message
if sig0.Verify(true, pk0, false, msg1, dstMinPk) {
t.Error("Expected Verify to return false")
}
// Wrong key
if sig0.Verify(true, pk1, false, msg0, dstMinPk) {
t.Error("Expected Verify to return false")
}
// Wrong sig
if sig1.Verify(true, pk0, false, msg0, dstMinPk) {
t.Error("Expected Verify to return false")
}
}
func TestSignVerifyAugMinPk(t *testing.T) {
t.Parallel()
sk := genRandomKeyMinPk()
pk := new(PublicKeyMinPk).From(sk)
msg := []byte("hello foo")
aug := []byte("augmentation")
sig := new(SignatureMinPk).Sign(sk, msg, dstMinPk, aug)
if !sig.Verify(true, pk, false, msg, dstMinPk, aug) {
t.Error("verify sig")
}
aug2 := []byte("augmentation2")
if sig.Verify(true, pk, false, msg, dstMinPk, aug2) {
t.Error("verify sig, wrong augmentation")
}
if sig.Verify(true, pk, false, msg, dstMinPk) {
t.Error("verify sig, no augmentation")
}
// TODO: augmentation with aggregate verify
}
func TestSignVerifyEncodeMinPk(t *testing.T) {
t.Parallel()
sk := genRandomKeyMinPk()
pk := new(PublicKeyMinPk).From(sk)
msg := []byte("hello foo")
sig := new(SignatureMinPk).Sign(sk, msg, dstMinPk, false)
if !sig.Verify(true, pk, false, msg, dstMinPk, false) {
t.Error("verify sig")
}
if sig.Verify(true, pk, false, msg, dstMinPk) {
t.Error("verify sig expected fail, wrong hashing engine")
}
if sig.Verify(true, pk, false, msg, dstMinPk, 0) {
t.Error("verify sig expected fail, illegal argument")
}
}
func TestSignVerifyAggregateMinPk(t *testing.T) {
t.Parallel()
for size := 1; size < 20; size++ {
sks, msgs, _, pubks, _, err :=
generateBatchTestDataUncompressedMinPk(size)
if err {
t.Error("Error generating test data")
return
}
// All signers sign the same message
sigs := make([]*SignatureMinPk, 0)
for i := 0; i < size; i++ {
sigs = append(sigs, new(SignatureMinPk).Sign(sks[i], msgs[0],
dstMinPk))
}
agProj := new(AggregateSignatureMinPk)
if !agProj.Aggregate(sigs, false) {
t.Error("Aggregate unexpectedly returned nil")
return
}
agSig := agProj.ToAffine()
if !agSig.FastAggregateVerify(false, pubks, msgs[0], dstMinPk) {
t.Errorf("failed to verify size %d", size)
}
// Negative test
if agSig.FastAggregateVerify(false, pubks, msgs[0][1:], dstMinPk) {
t.Errorf("failed to not verify size %d", size)
}
// Test compressed signature aggregation
compSigs := make([][]byte, size)
for i := 0; i < size; i++ {
compSigs[i] = sigs[i].Compress()
}
agProj = new(AggregateSignatureMinPk)
if !agProj.AggregateCompressed(compSigs, false) {
t.Error("AggregateCompressed unexpectedly returned nil")
return
}
agSig = agProj.ToAffine()
if !agSig.FastAggregateVerify(false, pubks, msgs[0], dstMinPk) {
t.Errorf("failed to verify size %d", size)
}
// Negative test
if agSig.FastAggregateVerify(false, pubks, msgs[0][1:], dstMinPk) {
t.Errorf("failed to not verify size %d", size)
}
}
}
func TestSignMultipleVerifyAggregateMinPk(t *testing.T) {
t.Parallel()
msgCount := 5
for size := 1; size < 20; size++ {
msgs := make([]Message, 0)
sks := make([]*SecretKey, 0)
pks := make([]*PublicKeyMinPk, 0)
// Generate messages
for i := 0; i < msgCount; i++ {
msg := Message(fmt.Sprintf("blst is a blast!! %d %d", i, size))
msgs = append(msgs, msg)
}
// Generate keypairs
for i := 0; i < size; i++ {
priv := genRandomKeyMinPk()
sks = append(sks, priv)
pks = append(pks, new(PublicKeyMinPk).From(priv))
}
// All signers sign each message
aggSigs := make([]*SignatureMinPk, 0)
aggPks := make([]*PublicKeyMinPk, 0)
for i := 0; i < msgCount; i++ {
sigsToAgg := make([]*SignatureMinPk, 0)
pksToAgg := make([]*PublicKeyMinPk, 0)
for j := 0; j < size; j++ {
sigsToAgg = append(sigsToAgg,
new(SignatureMinPk).Sign(sks[j], msgs[i],
dstMinPk))
pksToAgg = append(pksToAgg, pks[j])
}
agSig := new(AggregateSignatureMinPk)
if !agSig.Aggregate(sigsToAgg, true) {
t.Error("failed to aggregate")
}
afSig := agSig.ToAffine()
agPk := new(AggregatePublicKeyMinPk)
agPk.Aggregate(pksToAgg, false)
afPk := agPk.ToAffine()
aggSigs = append(aggSigs, afSig)
aggPks = append(aggPks, afPk)
// Verify aggregated signature and pk
if !afSig.Verify(false, afPk, false, msgs[i], dstMinPk) {
t.Errorf("failed to verify single aggregate size %d", size)
}
}
randFn := func(s *Scalar) {
var rbytes [BLST_SCALAR_BYTES]byte
_, err := rand.Read(rbytes[:])
if err != nil {
t.Error(err.Error())
}
s.FromBEndian(rbytes[:])
}
// Verify
randBits := 64
if !new(SignatureMinPk).MultipleAggregateVerify(aggSigs, true,
aggPks, false,
msgs, dstMinPk,
randFn, randBits) {
t.Errorf("failed to verify multiple aggregate size %d", size)
}
// Negative test
if new(SignatureMinPk).MultipleAggregateVerify(aggSigs, true,
aggPks, false,
msgs, dstMinPk[1:],
randFn, randBits) {
t.Errorf("failed to not verify multiple aggregate size %d", size)
}
}
}
func TestBatchUncompressMinPk(t *testing.T) {
t.Parallel()
size := 128
var points []*P2Affine
var compPoints [][]byte
for i := 0; i < size; i++ {
msg := Message(fmt.Sprintf("blst is a blast!! %d", i))
p2 := HashToG2(msg, dstMinPk).ToAffine()
points = append(points, p2)
compPoints = append(compPoints, p2.Compress())
}
uncompPoints := new(SignatureMinPk).BatchUncompress(compPoints)
if uncompPoints == nil {
t.Errorf("BatchUncompress returned nil size %d", size)
}
for i := 0; i < size; i++ {
if !points[i].Equals(uncompPoints[i]) {
t.Errorf("Uncompressed point does not equal initial point %d", i)
}
}
}
func BenchmarkCoreSignMinPk(b *testing.B) {
var ikm = [...]byte{
0x93, 0xad, 0x7e, 0x65, 0xde, 0xad, 0x05, 0x2a,
0x08, 0x3a, 0x91, 0x0c, 0x8b, 0x72, 0x85, 0x91,
0x46, 0x4c, 0xca, 0x56, 0x60, 0x5b, 0xb0, 0x56,
0xed, 0xfe, 0x2b, 0x60, 0xa6, 0x3c, 0x48, 0x99}
sk := KeyGen(ikm[:])
defer sk.Zeroize()
msg := []byte("hello foo")
for i := 0; i < b.N; i++ {
new(SignatureMinPk).Sign(sk, msg, dstMinPk)
}
}
func BenchmarkCoreVerifyMinPk(b *testing.B) {
var ikm = [...]byte{
0x93, 0xad, 0x7e, 0x65, 0xde, 0xad, 0x05, 0x2a,
0x08, 0x3a, 0x91, 0x0c, 0x8b, 0x72, 0x85, 0x91,
0x46, 0x4c, 0xca, 0x56, 0x60, 0x5b, 0xb0, 0x56,
0xed, 0xfe, 0x2b, 0x60, 0xa6, 0x3c, 0x48, 0x99}
sk := KeyGen(ikm[:])
defer sk.Zeroize()
pk := new(PublicKeyMinPk).From(sk)
msg := []byte("hello foo")
sig := new(SignatureMinPk).Sign(sk, msg, dstMinPk)
// Verify
for i := 0; i < b.N; i++ {
if !sig.Verify(true, pk, false, msg, dstMinPk) {
b.Fatal("verify sig")
}
}
}
func BenchmarkCoreVerifyAggregateMinPk(b *testing.B) {
run := func(size int) func(b *testing.B) {
return func(b *testing.B) {
b.Helper()
msgs, _, pubks, agsig, err := generateBatchTestDataMinPk(size)
if err {
b.Fatal("Error generating test data")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if !new(SignatureMinPk).AggregateVerifyCompressed(agsig, true,
pubks, false,
msgs, dstMinPk) {
b.Fatal("failed to verify")
}
}
}
}
b.Run("1", run(1))
b.Run("10", run(10))
b.Run("50", run(50))
b.Run("100", run(100))
b.Run("300", run(300))
b.Run("1000", run(1000))
b.Run("4000", run(4000))
}
func BenchmarkVerifyAggregateUncompressedMinPk(b *testing.B) {
run := func(size int) func(b *testing.B) {
return func(b *testing.B) {
b.Helper()
_, msgs, _, pubks, agsig, err :=
generateBatchTestDataUncompressedMinPk(size)
if err {
b.Fatal("Error generating test data")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if !agsig.AggregateVerify(true, pubks, false, msgs, dstMinPk) {
b.Fatal("failed to verify")
}
}
}
}
b.Run("1", run(1))
b.Run("10", run(10))
b.Run("50", run(50))
b.Run("100", run(100))
b.Run("300", run(300))
b.Run("1000", run(1000))
b.Run("4000", run(4000))
}
func BenchmarkCoreAggregateMinPk(b *testing.B) {
run := func(size int) func(b *testing.B) {
return func(b *testing.B) {
b.Helper()
_, sigs, _, _, err := generateBatchTestDataMinPk(size)
if err {
b.Fatal("Error generating test data")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
var agg AggregateSignatureMinPk
agg.AggregateCompressed(sigs, true)
}
}
}
b.Run("1", run(1))
b.Run("10", run(10))
b.Run("50", run(50))
b.Run("100", run(100))
b.Run("300", run(300))
b.Run("1000", run(1000))
b.Run("4000", run(4000))
}
func genRandomKeyMinPk() *SecretKey {
// Generate 32 bytes of randomness
var ikm [32]byte
_, err := rand.Read(ikm[:])
if err != nil {
return nil
}
return KeyGen(ikm[:])
}
func generateBatchTestDataMinPk(size int) (msgs []Message,
sigs [][]byte, pubks [][]byte, agsig []byte, err bool) {
err = false
for i := 0; i < size; i++ {
msg := Message(fmt.Sprintf("blst is a blast!! %d", i))
msgs = append(msgs, msg)
priv := genRandomKeyMinPk()
sigs = append(sigs, new(SignatureMinPk).Sign(priv, msg, dstMinPk).
Compress())
pubks = append(pubks, new(PublicKeyMinPk).From(priv).Compress())
}
agProj := new(AggregateSignatureMinPk)
if !agProj.AggregateCompressed(sigs, true) {
fmt.Println("AggregateCompressed unexpectedly returned nil")
err = true
return //nolint:revive
}
agAff := agProj.ToAffine()
if agAff == nil {
fmt.Println("ToAffine unexpectedly returned nil")
err = true
return //nolint:revive
}
agsig = agAff.Compress()
return //nolint:revive
}
func generateBatchTestDataUncompressedMinPk(size int) (sks []*SecretKey,
msgs []Message, sigs []*SignatureMinPk, //nolint:unparam
pubks []*PublicKeyMinPk, agsig *SignatureMinPk, err bool) {
err = false
for i := 0; i < size; i++ {
msg := Message(fmt.Sprintf("blst is a blast!! %d", i))
msgs = append(msgs, msg)
priv := genRandomKeyMinPk()
sks = append(sks, priv)
sigs = append(sigs, new(SignatureMinPk).Sign(priv, msg, dstMinPk))
pubks = append(pubks, new(PublicKeyMinPk).From(priv))
}
agProj := new(AggregateSignatureMinPk)
if !agProj.Aggregate(sigs, true) {
fmt.Println("Aggregate unexpectedly returned nil")
err = true
return //nolint:revive
}
agsig = agProj.ToAffine()
return //nolint:revive
}
func BenchmarkBatchUncompressMinPk(b *testing.B) {
size := 128
var compPoints [][]byte
for i := 0; i < size; i++ {
msg := Message(fmt.Sprintf("blst is a blast!! %d", i))
p2 := HashToG2(msg, dstMinPk).ToAffine()
compPoints = append(compPoints, p2.Compress())
}
b.Run("Single", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
var tmp SignatureMinPk
for i := 0; i < b.N; i++ {
for j := 0; j < size; j++ {
if tmp.Uncompress(compPoints[j]) == nil {
b.Fatal("could not uncompress point")
}
}
}
})
b.Run("Batch", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
var tmp SignatureMinPk
for i := 0; i < b.N; i++ {
if tmp.BatchUncompress(compPoints) == nil {
b.Fatal("could not batch uncompress points")
}
}
})
}
func TestSignVerifyAggregateValidatesInfinitePubkeyMinPk(t *testing.T) {
t.Parallel()
size := 20
sks, msgs, _, pubks, _, err :=
generateBatchTestDataUncompressedMinPk(size)
if err {
t.Error("Error generating test data")
return
}
// All signers sign the same message
sigs := make([]*SignatureMinPk, size)
for i := range sigs {
sigs[i] = new(SignatureMinPk).Sign(sks[i], msgs[i], dstMinPk)
}
// Single message: Infinite pubkeys and signature
zeroKey := new(PublicKeyMinPk)
zeroSig := new(SignatureMinPk)
agProj := new(AggregateSignatureMinPk)
if !agProj.Aggregate([]*SignatureMinPk{zeroSig}, false) {
t.Error("Aggregate unexpectedly returned nil")
return
}
agSig := agProj.ToAffine()
if agSig.AggregateVerify(false, []*PublicKeyMinPk{zeroKey}, false,
[][]byte{msgs[0]}, dstMinPk) {
t.Error("failed to NOT verify signature")
}
// Replace firstkey with infinite pubkey.
pubks[0] = zeroKey
sigs[0] = zeroSig
agProj = new(AggregateSignatureMinPk)
if !agProj.Aggregate(sigs, false) {
t.Error("Aggregate unexpectedly returned nil")
return
}
agSig = agProj.ToAffine()
if agSig.AggregateVerify(false, pubks, false, msgs, dstMinPk) {
t.Error("failed to NOT verify signature")
}
}
func TestEmptyMessageMinPk(t *testing.T) {
t.Parallel()
msg := []byte("")
var sk_bytes = []byte {99, 64, 58, 175, 15, 139, 113, 184, 37, 222, 127,
204, 233, 209, 34, 8, 61, 27, 85, 251, 68, 31, 255, 214, 8, 189, 190, 71,
198, 16, 210, 91};
sk := new(SecretKey).Deserialize(sk_bytes)
pk := new(PublicKeyMinPk).From(sk)
sig := new(SignatureMinPk).Sign(sk, msg, dstMinPk)
if !new(SignatureMinPk).VerifyCompressed(sig.Compress(), true,
pk.Compress(), false, msg, dstMinPk) {
t.Error("failed to verify empty message")
}
}
func TestEmptySignatureMinPk(t *testing.T) {
t.Parallel()
msg := []byte("message")
var sk_bytes = []byte {99, 64, 58, 175, 15, 139, 113, 184, 37, 222, 127,
204, 233, 209, 34, 8, 61, 27, 85, 251, 68, 31, 255, 214, 8, 189, 190, 71,
198, 16, 210, 91};
sk := new(SecretKey).Deserialize(sk_bytes)
pk := new(PublicKeyMinPk).From(sk)
var emptySig []byte
if new(SignatureMinPk).VerifyCompressed(emptySig, true, pk.Compress(), false, msg, dstMinPk) {
t.Error("failed to NOT verify empty signature")
}
}
func TestMultiScalarP1(t *testing.T) {
t.Parallel()
const npoints = 1027
scalars := make([]byte, npoints*16)
_, err := rand.Read(scalars)
if err != nil {
t.Error(err.Error())
return
}
points := make([]P1, npoints)
refs := make([]P1, npoints)
generator := P1Generator()
for i := range points {
points[i] = *generator.Mult(scalars[i*4:(i+1)*4])
refs[i] = *points[i].Mult(scalars[i*16:(i+1)*16], 128)
if i < 27 {
ref := P1s(refs[:i+1]).Add()
ret := P1s(points[:i+1]).Mult(scalars, 128)
if !ref.Equals(ret) {
t.Error("failed self-consistency multi-scalar test")
}
}
}
ref := P1s(refs).Add()
ret := P1s(points).Mult(scalars, 128)
if !ref.Equals(ret) {
t.Error("failed self-consistency multi-scalar test")
}
}
func BenchmarkMultiScalarP1(b *testing.B) {
const npoints = 200000
scalars := make([]byte, npoints*32)
_, err := rand.Read(scalars)
if err != nil {
b.Fatal(err.Error())
}
temp := make([]P1, npoints)
generator := P1Generator()
for i := range temp {
temp[i] = *generator.Mult(scalars[i*4:(i+1)*4])
}
points := P1s(temp).ToAffine()
run := func(points []P1Affine) func(b *testing.B) {
return func(b *testing.B) {
b.Helper()
for i:=0; i window*ncpus { //nolint:nestif
nx = 1
wnd = bits.Len(uint(ncpus)/4)
if (window + wnd) > 18 {
wnd = window - wnd
} else {
wnd = (nbits / window + ncpus - 1) / ncpus;
if (nbits / (window + 1) + ncpus - 1) / ncpus < wnd {
wnd = window + 1;
} else {
wnd = window;
}
}
} else {
nx = 2
wnd = window-2
for (nbits/wnd+1)*nx < ncpus {
nx += 1
wnd = window - bits.Len(3*uint(nx)/2)
}
nx -= 1
wnd = window - bits.Len(3*uint(nx)/2)
}
ny = nbits/wnd + 1
wnd = nbits/ny + 1
return nx, ny, wnd
}
func pippenger_window_size(npoints int) int {
wbits := bits.Len(uint(npoints))
if wbits > 13 {
return wbits - 4
}
if wbits > 5 {
return wbits - 3
}
return 2
}
================================================
FILE: bindings/go/blst_px.tgo
================================================
func PairingAggregatePkInG1(ctx Pairing, PK *P1Affine, pkValidate bool,
sig *P2Affine, sigGroupcheck bool, msg []byte,
optional ...[]byte) int { // aug
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
r := C.blst_pairing_chk_n_aggr_pk_in_g1(&ctx[0],
PK.asPtr(), C.bool(pkValidate),
sig.asPtr(), C.bool(sigGroupcheck),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(aug), C.size_t(len(aug)))
return int(r)
}
func PairingMulNAggregatePkInG1(ctx Pairing, PK *P1Affine, pkValidate bool,
sig *P2Affine, sigGroupcheck bool,
rand *Scalar, randBits int, msg []byte,
optional ...[]byte) int { // aug
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
r := C.blst_pairing_chk_n_mul_n_aggr_pk_in_g1(&ctx[0],
PK.asPtr(), C.bool(pkValidate),
sig.asPtr(), C.bool(sigGroupcheck),
&rand.cgo.b[0], C.size_t(randBits),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(aug), C.size_t(len(aug)))
return int(r)
}
//
// Serialization/Deserialization.
//
// P1 Serdes
func (p1 *P1Affine) Serialize() []byte {
var out [BLST_P1_SERIALIZE_BYTES]byte
C.blst_p1_affine_serialize((*C.byte)(&out[0]), &p1.cgo)
return out[:]
}
func (p1 *P1Affine) Deserialize(in []byte) *P1Affine {
if len(in) != BLST_P1_SERIALIZE_BYTES {
return nil
}
if C.blst_p1_deserialize(&p1.cgo, (*C.byte)(&in[0])) != C.BLST_SUCCESS {
return nil
}
return p1
}
func (p1 *P1Affine) Compress() []byte {
var out [BLST_P1_COMPRESS_BYTES]byte
C.blst_p1_affine_compress((*C.byte)(&out[0]), &p1.cgo)
return out[:]
}
func (p1 *P1Affine) Uncompress(in []byte) *P1Affine {
if len(in) != BLST_P1_COMPRESS_BYTES {
return nil
}
if C.blst_p1_uncompress(&p1.cgo, (*C.byte)(&in[0])) != C.BLST_SUCCESS {
return nil
}
return p1
}
func (p1 *P1Affine) InG1() bool {
return bool(C.blst_p1_affine_in_g1(&p1.cgo))
}
func (*P1Affine) BatchUncompress(in [][]byte) []*P1Affine {
// Allocate space for all of the resulting points. Later we'll save pointers
// and return those so that the result could be used in other functions,
// such as MultipleAggregateVerify.
n := len(in)
points := make([]P1Affine, n)
pointsPtrs := make([]*P1Affine, n)
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding point, and
// repeat until n is exceeded. Each thread will send a result (true for
// success, false for failure) into the channel when complete.
resCh := make(chan bool, numThreads)
valid := int32(1)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
if points[work].Uncompress(in[work]) == nil {
atomic.StoreInt32(&valid, 0)
break
}
pointsPtrs[work] = &points[work]
}
if atomic.LoadInt32(&valid) > 0 {
resCh <- true
} else {
resCh <- false
}
}()
}
// Collect the threads
result := true
for i := 0; i < numThreads; i++ {
if ! <-resCh {
result = false
}
}
if atomic.LoadInt32(&valid) == 0 || !result {
return nil
}
return pointsPtrs
}
func (p1 *P1) Serialize() []byte {
var out [BLST_P1_SERIALIZE_BYTES]byte
C.blst_p1_serialize((*C.byte)(&out[0]), &p1.cgo)
return out[:]
}
func (p1 *P1) Compress() []byte {
var out [BLST_P1_COMPRESS_BYTES]byte
C.blst_p1_compress((*C.byte)(&out[0]), &p1.cgo)
return out[:]
}
func (p1 *P1) MultAssign(scalarIf interface{}, optional ...int) *P1 {
var nbits int
var scalar *C.byte
switch val := scalarIf.(type) {
case []byte:
scalar = (*C.byte)(&val[0])
nbits = len(val)*8
case *Scalar:
scalar = &val.cgo.b[0]
nbits = 255
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if len(optional) > 0 {
nbits = optional[0]
}
C.blst_p1_mult(&p1.cgo, &p1.cgo, scalar, C.size_t(nbits))
return p1
}
func (p1 *P1) Mult(scalarIf interface{}, optional ...int) *P1 {
ret := *p1
return ret.MultAssign(scalarIf, optional...)
}
func (p1 *P1) AddAssign(pointIf interface{}) *P1 {
switch val := pointIf.(type) {
case *P1:
C.blst_p1_add_or_double(&p1.cgo, &p1.cgo, &val.cgo)
case *P1Affine:
C.blst_p1_add_or_double_affine(&p1.cgo, &p1.cgo, &val.cgo)
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
return p1
}
func (p1 *P1) Add(pointIf interface{}) *P1 {
ret := *p1
return ret.AddAssign(pointIf)
}
func (p1 *P1) SubAssign(pointIf interface{}) *P1 {
var x *C.blst_fp
var affine C.bool
switch val := pointIf.(type) {
case *P1:
x = &val.cgo.x
affine = false
case *P1Affine:
x = &val.cgo.x
affine = true
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
C.go_p1_sub_assign(&p1.cgo, x, affine)
return p1
}
func (p1 *P1) Sub(pointIf interface{}) *P1 {
ret := *p1
return ret.SubAssign(pointIf)
}
func P1Generator() *P1 {
return &cgo_p1Generator
}
// 'acc += point * scalar', passing 'nil' for 'point' means "use the
// group generator point"
func (acc *P1) MultNAccumulate(pointIf interface{}, scalarIf interface{},
optional ...int) *P1 {
var x *C.blst_fp
var affine C.bool
if pointIf != nil {
switch val := pointIf.(type) {
case *P1:
x = &val.cgo.x
affine = false
case *P1Affine:
x = &val.cgo.x
affine = true
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
}
var nbits int
var scalar *C.byte
switch val := scalarIf.(type) {
case []byte:
scalar = (*C.byte)(&val[0])
nbits = len(val)*8
case *Scalar:
scalar = &val.cgo.b[0]
nbits = 255
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if len(optional) > 0 {
nbits = optional[0]
}
C.go_p1_mult_n_acc(&acc.cgo, x, affine, scalar, C.size_t(nbits))
return acc
}
//
// Affine
//
func (p *P1) ToAffine() *P1Affine {
var pa P1Affine
C.blst_p1_to_affine(&pa.cgo, &p.cgo)
return &pa
}
func (p *P1) FromAffine(pa *P1Affine) {
C.blst_p1_from_affine(&p.cgo, &pa.cgo)
}
//
// Hash
//
func HashToG1(msg []byte, dst []byte,
optional ...[]byte) *P1 { // aug
var q P1
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
C.blst_hash_to_g1(&q.cgo, ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug)))
return &q
}
func EncodeToG1(msg []byte, dst []byte,
optional ...[]byte) *P1 { // aug
var q P1
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
C.blst_encode_to_g1(&q.cgo, ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug)))
return &q
}
//
// Multi-point/scalar operations
//
func P1sToAffine(points []*P1, optional ...int) P1Affines {
var npoints int
if len(optional) > 0 {
npoints = optional[0]
} else {
npoints = len(points)
}
ret := make([]P1Affine, npoints)
_cgoCheckPointer := func(...interface{}) {}
C.blst_p1s_to_affine(&ret[0].cgo, (**C.blst_p1)(unsafe.Pointer(&points[0])),
C.size_t(npoints))
return ret
}
func (points P1s) ToAffine(optional ...P1Affines) P1Affines {
npoints := len(points)
var ret P1Affines
if len(optional) > 0 { // used in benchmark
ret = optional[0]
if len(ret) < npoints {
panic("npoints mismatch")
}
} else {
ret = make([]P1Affine, npoints)
}
if maxProcs < 2 || npoints < 768 {
C.go_p1slice_to_affine(&ret[0].cgo, &points[0].cgo, C.size_t(npoints))
return ret
}
nslices := (npoints + 511) / 512
if nslices > maxProcs {
nslices = maxProcs
}
delta, rem := npoints/nslices + 1, npoints%nslices
var wg sync.WaitGroup
wg.Add(nslices)
for x := 0; x < npoints; x += delta {
if rem == 0 {
delta -= 1
}
rem -= 1
go func(out *P1Affine, inp *P1, delta int) {
C.go_p1slice_to_affine(&out.cgo, &inp.cgo, C.size_t(delta))
wg.Done()
}(&ret[x], &points[x], delta)
}
wg.Wait()
return ret
}
//
// Batch addition
//
func P1AffinesAdd(points []*P1Affine, optional ...int) *P1 {
var npoints int
if len(optional) > 0 {
npoints = optional[0]
} else {
npoints = len(points)
}
var ret P1
_cgoCheckPointer := func(...interface{}) {}
C.blst_p1s_add(&ret.cgo, (**C.blst_p1_affine)(unsafe.Pointer(&points[0])),
C.size_t(npoints))
return &ret
}
func (points P1Affines) Add() *P1 {
npoints := len(points)
if maxProcs < 2 || npoints < 768 {
var ret P1
C.go_p1slice_add(&ret.cgo, &points[0].cgo, C.size_t(npoints))
return &ret
}
nslices := (npoints + 511) / 512
if nslices > maxProcs {
nslices = maxProcs
}
delta, rem := npoints/nslices + 1, npoints%nslices
msgs := make(chan P1, nslices)
for x := 0; x < npoints; x += delta {
if rem == 0 {
delta -= 1
}
rem -= 1
go func(points *P1Affine, delta int) {
var ret P1
C.go_p1slice_add(&ret.cgo, &points.cgo, C.size_t(delta))
msgs <- ret
}(&points[x], delta)
}
ret := <- msgs
for i := 1; i < nslices; i++ {
msg := <- msgs
C.blst_p1_add_or_double(&ret.cgo, &ret.cgo, &msg.cgo)
}
return &ret
}
func (points P1s) Add() *P1 {
return points.ToAffine().Add()
}
//
// Multi-scalar multiplication
//
func P1AffinesMult(pointsIf interface{}, scalarsIf interface{}, nbits int) *P1 {
var npoints int
switch val := pointsIf.(type) {
case []*P1Affine:
npoints = len(val)
case []P1Affine:
npoints = len(val)
case P1Affines:
npoints = len(val)
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
nbytes := (nbits+7)/8
var scalars []*C.byte
switch val := scalarsIf.(type) {
case []byte:
if len(val) < npoints*nbytes {
return nil
}
case [][]byte:
if len(val) < npoints {
return nil
}
scalars = make([]*C.byte, npoints)
for i := range scalars {
scalars[i] = (*C.byte)(&val[i][0])
}
case []Scalar:
if len(val) < npoints {
return nil
}
if nbits <= 248 {
scalars = make([]*C.byte, npoints)
for i := range scalars {
scalars[i] = &val[i].cgo.b[0]
}
}
case []*Scalar:
if len(val) < npoints {
return nil
}
scalars = make([]*C.byte, npoints)
for i := range scalars {
scalars[i] = &val[i].cgo.b[0]
}
default:
panic(fmt.Sprintf("unsupported type %T",val))
}
numThreads := numThreads(0)
if numThreads < 2 {
sz := int(C.blst_p1s_mult_pippenger_scratch_sizeof(C.size_t(npoints)))/8
scratch := make([]uint64, sz)
pointsBySlice := [2]*C.blst_p1_affine{nil, nil}
var p_points **C.blst_p1_affine
switch val := pointsIf.(type) {
case []*P1Affine:
p_points = (**C.blst_p1_affine)(unsafe.Pointer(&val[0]))
case []P1Affine:
pointsBySlice[0] = &val[0].cgo
p_points = &pointsBySlice[0]
case P1Affines:
pointsBySlice[0] = &val[0].cgo
p_points = &pointsBySlice[0]
default: // type is already vetted
}
scalarsBySlice := [2]*C.byte{nil, nil}
var p_scalars **C.byte
switch val := scalarsIf.(type) {
case []byte:
scalarsBySlice[0] = (*C.byte)(&val[0])
p_scalars = &scalarsBySlice[0]
case [][]byte:
p_scalars = &scalars[0]
case []Scalar:
if nbits > 248 {
scalarsBySlice[0] = &val[0].cgo.b[0]
p_scalars = &scalarsBySlice[0]
} else {
p_scalars = &scalars[0]
}
case []*Scalar:
p_scalars = &scalars[0]
default: // type is already vetted
}
var ret P1
_cgoCheckPointer := func(...interface{}) {}
C.blst_p1s_mult_pippenger(&ret.cgo, p_points, C.size_t(npoints),
p_scalars, C.size_t(nbits),
(*C.limb_t)(&scratch[0]))
for i := range(scalars) {
scalars[i] = nil
}
return &ret
}
if npoints < 32 {
if numThreads > npoints {
numThreads = npoints
}
curItem := uint32(0)
msgs := make(chan P1, numThreads)
for tid := 0; tid < numThreads; tid++ {
go func() {
var acc P1
for {
workItem := int(atomic.AddUint32(&curItem, 1) - 1)
if workItem >= npoints {
break
}
var point *P1Affine
switch val := pointsIf.(type) {
case []*P1Affine:
point = val[workItem]
case []P1Affine:
point = &val[workItem]
case P1Affines:
point = &val[workItem]
default: // type is already vetted
}
var scalar *C.byte
switch val := scalarsIf.(type) {
case []byte:
scalar = (*C.byte)(&val[workItem*nbytes])
case [][]byte:
scalar = scalars[workItem]
case []Scalar:
if nbits > 248 {
scalar = &val[workItem].cgo.b[0]
} else {
scalar = scalars[workItem]
}
case []*Scalar:
scalar = scalars[workItem]
default: // type is already vetted
}
C.go_p1_mult_n_acc(&acc.cgo, &point.cgo.x, true,
scalar, C.size_t(nbits))
}
msgs <- acc
}()
}
ret := <-msgs
for tid := 1; tid < numThreads; tid++ {
point := <- msgs
C.blst_p1_add_or_double(&ret.cgo, &ret.cgo, &point.cgo);
}
for i := range(scalars) {
scalars[i] = nil
}
return &ret
}
// this is sizeof(scratch[0])
sz := int(C.blst_p1s_mult_pippenger_scratch_sizeof(0))/8
nx, ny, window := breakdown(nbits, pippenger_window_size(npoints),
numThreads)
// |grid[]| holds "coordinates" and place for result
grid := make([]struct { x, dx, y, dy int
point P1 }, nx*ny)
dx := npoints/nx
y := window*(ny-1)
total := 0
for ; total < nx; total++ {
grid[total].x = total*dx
grid[total].dx = dx
grid[total].y = y
grid[total].dy = nbits - y
}
grid[total-1].dx = npoints - grid[total-1].x
for y > 0 {
y -= window
for i := 0; i < nx; i++ {
grid[total].x = grid[i].x
grid[total].dx = grid[i].dx
grid[total].y = y
grid[total].dy = window
total++
}
}
if numThreads > total {
numThreads = total
}
msgsCh := make(chan int, ny)
rowSync := make([]int32, ny) // count up to |nx|
curItem := int32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
scratch := make([]uint64, sz << uint(window-1))
pointsBySlice := [2]*C.blst_p1_affine{nil, nil}
scalarsBySlice := [2]*C.byte{nil, nil}
_cgoCheckPointer := func(...interface{}) {}
for {
workItem := atomic.AddInt32(&curItem, 1) - 1
if int(workItem) >= total {
break
}
x := grid[workItem].x
y := grid[workItem].y
var p_points **C.blst_p1_affine
switch val := pointsIf.(type) {
case []*P1Affine:
p_points = (**C.blst_p1_affine)(unsafe.Pointer(&val[x]))
case []P1Affine:
pointsBySlice[0] = &val[x].cgo
p_points = &pointsBySlice[0]
case P1Affines:
pointsBySlice[0] = &val[x].cgo
p_points = &pointsBySlice[0]
default: // type is already vetted
}
var p_scalars **C.byte
switch val := scalarsIf.(type) {
case []byte:
scalarsBySlice[0] = (*C.byte)(&val[x*nbytes])
p_scalars = &scalarsBySlice[0]
case [][]byte:
p_scalars = &scalars[x]
case []Scalar:
if nbits > 248 {
scalarsBySlice[0] = &val[x].cgo.b[0]
p_scalars = &scalarsBySlice[0]
} else {
p_scalars = &scalars[x]
}
case []*Scalar:
p_scalars = &scalars[x]
default: // type is already vetted
}
C.blst_p1s_tile_pippenger(&grid[workItem].point.cgo,
p_points, C.size_t(grid[workItem].dx),
p_scalars, C.size_t(nbits),
(*C.limb_t)(&scratch[0]),
C.size_t(y), C.size_t(window));
if atomic.AddInt32(&rowSync[y/window], 1) == int32(nx) {
msgsCh <- y // "row" is done
} else {
runtime.Gosched() // be nice to the application
}
}
pointsBySlice[0] = nil
scalarsBySlice[0] = nil
}()
}
var ret P1
rows := make([]bool, ny)
row := 0 // actually index in |grid[]|
for i := 0; i < ny; i++ { // we expect |ny| messages, one per "row"
y := <- msgsCh
rows[y/window] = true // mark the "row"
for grid[row].y == y { // if it's current "row", process it
for row < total && grid[row].y == y {
C.blst_p1_add_or_double(&ret.cgo, &ret.cgo, &grid[row].point.cgo)
row++
}
if y == 0 {
break // one can as well 'return &ret' here
}
for j := 0; j < window; j++ {
C.blst_p1_double(&ret.cgo, &ret.cgo)
}
y -= window
if !rows[y/window] { // see if next "row" was marked already
break
}
}
}
for i := range(scalars) {
scalars[i] = nil
}
return &ret
}
func (points P1Affines) Mult(scalarsIf interface{}, nbits int) *P1 {
return P1AffinesMult(points, scalarsIf, nbits)
}
func (points P1s) Mult(scalarsIf interface{}, nbits int) *P1 {
return points.ToAffine().Mult(scalarsIf, nbits)
}
//
// Group-check
//
func P1AffinesValidate(pointsIf interface{}) bool {
var npoints int
switch val := pointsIf.(type) {
case []*P1Affine:
npoints = len(val)
case []P1Affine:
npoints = len(val)
case P1Affines:
npoints = len(val)
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
numThreads := numThreads(npoints)
if numThreads < 2 {
for i := 0; i < npoints; i++ {
var point *P1Affine
switch val := pointsIf.(type) {
case []*P1Affine:
point = val[i]
case []P1Affine:
point = &val[i]
case P1Affines:
point = &val[i]
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if !C.go_p1_affine_validate(&point.cgo, true) {
return false
}
}
return true
}
valid := int32(1)
curItem := uint32(0)
var wg sync.WaitGroup
wg.Add(numThreads)
for tid := 0; tid < numThreads; tid++ {
go func() {
for atomic.LoadInt32(&valid) != 0 {
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(npoints) {
break
}
var point *P1Affine
switch val := pointsIf.(type) {
case []*P1Affine:
point = val[work]
case []P1Affine:
point = &val[work]
case P1Affines:
point = &val[work]
default:
panic(fmt.Sprintf("unsupported type %T", val))
}
if !C.go_p1_affine_validate(&point.cgo, true) {
atomic.StoreInt32(&valid, 0)
break
}
}
wg.Done()
}()
}
wg.Wait()
return atomic.LoadInt32(&valid) != 0
}
func (points P1Affines) Validate() bool {
return P1AffinesValidate(points)
}
================================================
FILE: bindings/go/blst_wasm.go
================================================
//go:build wasm
package not_supported
================================================
FILE: bindings/go/cgo_assembly.S
================================================
#include "assembly.S"
================================================
FILE: bindings/go/cgo_server.c
================================================
#include "server.c"
================================================
FILE: bindings/go/generate.py
================================================
#!/usr/bin/env python3
import os
import sys
import re
import subprocess
here = re.split(r'/(?=[^/]*$)', sys.argv[0])
if len(here) > 1:
os.chdir(here[0])
for dir in re.split(r':', os.getenv("GOPATH")):
goimports = dir + "/bin/goimports"
if os.path.isfile(goimports) and os.access(goimports, os.X_OK):
break
goimports = None
if goimports is None:
version = subprocess.check_output(["go", "version"]).decode('ascii')
v = re.search(r'version go([0-9]+\.[0-9]+)', version)
if not v:
raise OSError(2, "unparseable output from 'go version'")
if float(v.group(1)) < 1.17:
advice = "'go get golang.org/x/tools/cmd/goimports'"
else:
advice = "'go install golang.org/x/tools/cmd/goimports@latest'"
print("'goimports' is not found on $GOPATH, install with", file=sys.stderr)
print(advice, file=sys.stderr)
sys.exit(1)
outFile = 'blst.go'
def concatFile(fout, fin, removeImports):
for line in fin:
if removeImports and 'import' in line:
while ')' not in line:
line = fin.readline()
continue
print(line, file=fout, end='')
def remap(fout, fin, mapping, dont_touch, removeImports):
for line in fin:
if removeImports and 'import' in line:
while ')' not in line:
line = fin.readline()
continue
for (a, b) in dont_touch:
line = line.replace(a, b)
for (a, b) in mapping:
line = line.replace(a, a+"_tmp")
line = line.replace(b, b+"_tmp")
line = line.replace(a+"_tmp", b)
line = line.replace(b+"_tmp", a)
for (a, b) in dont_touch:
line = line.replace(b, a)
print(line, file=fout, end='')
fout = open(outFile, "w")
print("// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=fout)
print("// DO NOT MODIFY THIS FILE!!", file=fout)
print("// The file is generated from *.tgo by " + here[-1], file=fout)
print("// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=fout)
fin = open('blst.tgo', "r")
concatFile(fout, fin, False)
fin.close()
# min-pk
print("//", file=fout)
print("// MIN-PK", file=fout)
print("//", file=fout)
fin = open('blst_minpk.tgo', "r")
concatFile(fout, fin, True)
fin.close()
# These are strings that overlap with the mapping names but we don't
# actually want to change. The second value should be a unique string.
dont_touch = (('Fp12', 'foo1234'),)
# We're going to swap these names to get from min-pk to min-sig
mapping = [('P1', 'P2'),
('p1', 'p2'),
('Fp', 'Fp2'),
('C.blst_fp', 'C.blst_fp2'),
('G1', 'G2'),
('g1', 'g2')
]
# min-sig
print("//", file=fout)
print("// MIN-SIG", file=fout)
print("//", file=fout)
with open('blst_minpk.tgo', "r") as fin:
remap(fout, fin, mapping, dont_touch, True)
# serdes and other functions
fin = open('blst_px.tgo', "r")
concatFile(fout, fin, True)
fin.close()
with open('blst_px.tgo', "r") as fin:
remap(fout, fin, mapping, dont_touch, True)
# final code
fin = open('blst_misc.tgo', "r")
concatFile(fout, fin, True)
fin.close()
fout.close()
# Use goimports to generate the import list
os.system(goimports + " -w blst.go")
# Generate min-sig tests
fout = open('blst_minsig_test.go', "w")
print("// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=fout)
print("// DO NOT EDIT THIS FILE!!", file=fout)
print("// The file is generated from blst_minpk_test.go by " + here[-1], file=fout)
print("// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=fout)
mapping.append(('MinPk', 'MinSig'))
with open('blst_minpk_test.go', "r") as fin:
remap(fout, fin, mapping, dont_touch, False)
fout.close()
================================================
FILE: bindings/go/rb_tree.go
================================================
/*
* Copyright Supranational LLC
* Licensed under the Apache License, Version 2.0, see LICENSE for details.
* SPDX-License-Identifier: Apache-2.0
*/
/*
* Reimplement rb_tree.c, because C.call overhead is too high in
* comparison to tree insertion subroutine.
*/
package blst
import "bytes"
/*
* Red-black tree tailored for uniqueness test. Amount of messages to be
* checked is known prior context initialization, implementation is
* insert-only, failure is returned if message is already in the tree.
*/
const red, black bool = true, false
type node struct {
leafs [2]*node
data *[]byte
colour bool
}
type rbTree struct {
root *node
nnodes uint
nodes []node
}
func (tree *rbTree) insert(data *[]byte) bool {
var nodes [64]*node /* visited nodes */
var dirs [64]byte /* taken directions */
var k uint /* walked distance */
for p := tree.root; p != nil && k < 64; k++ {
cmp := bytes.Compare(*data, *p.data)
if cmp == 0 {
return false /* already in tree, no insertion */
}
/* record the step */
nodes[k] = p
if cmp > 0 {
dirs[k] = 1
} else {
dirs[k] = 0
}
p = p.leafs[dirs[k]]
}
if k == 64 {
return false
}
/* allocate new node */
z := &tree.nodes[tree.nnodes]; tree.nnodes++
z.data = data
z.colour = red
/* graft |z| */
if k > 0 {
nodes[k-1].leafs[dirs[k-1]] = z
} else {
tree.root = z
}
/* re-balance |tree| */
for k >= 2 /* && IS_RED(y = nodes[k-1]) */ {
y := nodes[k-1]
if y.colour == black { //nolint:staticcheck
break
}
ydir := dirs[k-2]
x := nodes[k-2] /* |z|'s grandparent */
s := x.leafs[ydir^1] /* |z|'s uncle */
if s != nil && s.colour == red { //nolint:staticcheck,revive
x.colour = red
y.colour = black
s.colour = black
k -= 2
} else {
if dirs[k-1] != ydir {
/* | |
* x x
* / \ \
* y s -> z s
* \ /
* z y
* / \
* ? ?
*/
t := y
y = y.leafs[ydir^1]
t.leafs[ydir^1] = y.leafs[ydir]
y.leafs[ydir] = t
}
/* | |
* x y
* \ / \
* y s -> z x
* / \ / \
* z ? ? s
*/
x.leafs[ydir] = y.leafs[ydir^1]
y.leafs[ydir^1] = x
x.colour = red
y.colour = black
if k > 2 {
nodes[k-3].leafs[dirs[k-3]] = y
} else {
tree.root = y
}
break
}
}
tree.root.colour = black
return true
}
func Uniq(msgs []Message) bool {
n := len(msgs)
if n == 1 { //nolint:staticcheck
return true
} else if n == 2 {
return !bytes.Equal(msgs[0], msgs[1])
}
var tree rbTree
tree.nodes = make([]node, n)
for i := 0; i < n; i++ {
if !tree.insert(&msgs[i]) {
return false
}
}
return true
}
================================================
FILE: bindings/rust/Cargo.toml
================================================
[package]
name = "blst"
version = "0.3.16"
authors = ["sean-sn "]
edition = "2018"
license = "Apache-2.0"
description = "Bindings for blst BLS12-381 library"
repository = "https://github.com/supranational/blst"
readme = "README.md"
categories = ["cryptography"]
keywords = ["crypto", "bls", "signature", "asm", "wasm"]
include = [
"**/*.rs",
"/Cargo.toml",
"/README.md",
"/rustfmt.toml",
"/blst/src/*.c",
"/blst/src/*.h*",
"/blst/build/**",
"/blst/bindings/blst.h",
"/blst/bindings/blst_aux.h",
"/blst/bindings/blst.hpp",
]
links = "blst"
[features]
# By default, compile with ADX extension if the host supports it.
# Binary can be executed on systems similar to the host.
default = []
# Compile in portable mode, without ISA extensions.
# Binary can be executed on all systems.
portable = []
# Enable ADX even if the host CPU doesn't support it.
# Binary can be executed on Broadwell+ and Ryzen+ systems.
force-adx = []
# Suppress multi-threading.
# Engaged on wasm32 target architecture automatically.
no-threads = []
# Add support for serializing SecretKey, not suitable for production.
serde-secret = ["serde"]
[build-dependencies]
cc = "1.0"
[target.'cfg(target_env = "msvc")'.build-dependencies]
glob = "0.3"
[dependencies]
zeroize = { version = "^1.1", features = ["zeroize_derive"] }
serde = { version = "1.0.152", optional = true }
[target.'cfg(not(any(target_arch="wasm32", target_os="none", target_os="unknown", target_os="uefi")))'.dependencies]
threadpool = "^1.8.1"
[dev-dependencies]
rand = "0.8"
rand_chacha = "0.3"
rmp-serde = "1.1.1"
# Uncomment if you want to execute the test suite with Rust 1.56 through 1.64.
#byteorder = "=1.4.3"
#rmp = "=0.8.12"
#ppv-lite86 = "=0.2.17"
[target.'cfg(any(unix, windows))'.dev-dependencies]
criterion = "0.3"
[[bench]]
name = "blst_benches"
harness = false
[profile.release]
#opt-level = 3
[badges]
maintenance = { status = "actively-developed" }
================================================
FILE: bindings/rust/README.md
================================================
# blst [](https://crates.io/crates/blst)
The `blst` crate provides a rust interface to the blst BLS12-381 signature library.
## Build
[bindgen](https://github.com/rust-lang/rust-bindgen) is used to generate FFI bindings to blst.h. Then [build.rs](https://github.com/supranational/blst/blob/master/bindings/rust/build.rs) invokes C compiler to compile everything into libblst.a within the rust target build area. On Linux it's possible to choose compiler by setting `CC` environment variable.
Everything can be built and run with the typical cargo commands:
```
cargo test
cargo bench
```
If the target application crashes with an "illegal instruction" exception [after copying to an older system], activate `portable` feature when building blst. Conversely, if you compile on an older Intel system, but will execute the binary on a newer one, consider instead activating `force-adx` feature. Though keep in mind that [cc](https://crates.io/crates/cc) passes the value of `CFLAGS` environment variable to the C compiler, and if set to contain specific flags, it can interfere with feature selection. `-D__BLST_PORTABLE__` and `-D__ADX__` are the said features' equivalents.
To compile for WebAssembly, your clang has to recognize `--target=wasm32`. Alternatively you can build your project with `CC` environment variable set to `emcc`, the [Emscripten compiler](https://emscripten.org), and `AR` set to `emar`, naturally, with both commands available on your `PATH`.
While `cargo test`'s dependencies happen to require at least Rust 1.65, the library by itself can be compiled with earlier compiler versions. Though in order to use Rust version prior 1.56 you would need to pin`zeroize` to "=1.3.0" and `zeroize_derive` to "=1.3.3" in **your** project Cargo.toml. Even `cc` might require pinning to "=1.0.79". And if you find yourself with Rust 1.56 through 1.64 as the only option and want to execute `cargo test` you'd need to pin some of `[dev-dependencies]` versions in **this** project's Cargo.toml by uncommenting following lines and commenting `criterion`:
```
byteorder = "=1.4.3"
ppv-lite86 = "=0.2.17"
rmp = "=0.8.12"
[target.'cfg(any(unix, windows))'.dev-dependencies]
#criterion = "0.3"
```
## Usage
There are two primary modes of operation that can be chosen based on declaration path:
For minimal-pubkey-size operations:
```rust
use blst::min_pk::*;
```
For minimal-signature-size operations:
```rust
use blst::min_sig::*;
```
There are five structs with inherent implementations that provide the BLS12-381 signature functionality.
```
SecretKey
PublicKey
AggregatePublicKey
Signature
AggregateSignature
```
A simple example for generating a key, signing a message, and verifying the message:
```rust
use blst::min_pk::SecretKey;
let mut rng = rand::thread_rng();
let mut ikm = [0u8; 32];
rng.fill_bytes(&mut ikm);
let sk = SecretKey::key_gen(&ikm, &[]).unwrap();
let pk = sk.sk_to_pk();
let dst = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_";
let msg = b"blst is such a blast";
let sig = sk.sign(msg, dst, &[]);
let err = sig.verify(true, msg, dst, &[], &pk, true);
assert_eq!(err, blst::BLST_ERROR::BLST_SUCCESS);
```
See the tests in src/lib.rs and benchmarks in benches/blst_benches.rs for further examples of usage.
================================================
FILE: bindings/rust/benches/blst_benches.rs
================================================
// Copyright Supranational LLC
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
use blst::*;
// Benchmark min_pk
use blst::min_pk::*;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use rand::{RngCore, SeedableRng};
use rand_chacha::ChaCha20Rng;
struct BenchData {
sk: SecretKey,
pk: PublicKey,
msg: Vec,
dst: Vec,
sig: Signature,
}
fn gen_bench_data(rng: &mut rand_chacha::ChaCha20Rng) -> BenchData {
let msg_len = (rng.next_u64() & 0x3F) + 1;
let mut msg = vec![0u8; msg_len as usize];
rng.fill_bytes(&mut msg);
gen_bench_data_for_msg(rng, &msg)
}
fn gen_bench_data_for_msg(
rng: &mut rand_chacha::ChaCha20Rng,
msg: &Vec,
) -> BenchData {
let mut ikm = [0u8; 32];
rng.fill_bytes(&mut ikm);
let sk = SecretKey::key_gen(&ikm, &[]).unwrap();
let pk = sk.sk_to_pk();
let dst = "BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_"
.as_bytes()
.to_owned();
let sig = sk.sign(&msg, &dst, &[]);
let bd = BenchData {
sk,
pk,
dst,
msg: msg.clone(),
sig,
};
bd
}
fn bench_verify_multi_aggregate(c: &mut Criterion) {
let mut group = c.benchmark_group("verify_multi_aggregate");
let dst = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_";
let mut ikm = [0u8; 32];
let seed = [0u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let num_sigs = vec![8, 16, 32, 64, 128];
let pks_per_sig = 3;
for n in num_sigs.iter() {
let mut msgs: Vec> = vec![vec![]; *n];
let mut sigs: Vec = Vec::with_capacity(*n);
let mut pks: Vec = Vec::with_capacity(*n);
let mut rands: Vec = Vec::with_capacity(*n);
for i in 0..*n {
// Create public keys
rng.fill_bytes(&mut ikm);
let sks_i: Vec<_> = (0..pks_per_sig)
.map(|_| {
ikm[0] += 1;
SecretKey::key_gen(&ikm, &[]).unwrap()
})
.collect();
let pks_i =
sks_i.iter().map(|sk| sk.sk_to_pk()).collect::>();
let pks_refs_i: Vec<&PublicKey> =
pks_i.iter().map(|pk| pk).collect();
// Create random message for pks to all sign
let msg_len = (rng.next_u64() & 0x3F) + 1;
msgs[i] = vec![0u8; msg_len as usize];
rng.fill_bytes(&mut msgs[i]);
// Generate signature for each key pair
let sigs_i = sks_i
.iter()
.map(|sk| sk.sign(&msgs[i], dst, &[]))
.collect::>();
// Aggregate signature
let sig_refs_i =
sigs_i.iter().map(|s| s).collect::>();
let agg_i = match AggregateSignature::aggregate(&sig_refs_i, false)
{
Ok(agg_i) => agg_i,
Err(err) => panic!("aggregate failure: {:?}", err),
};
sigs.push(agg_i.to_signature());
// aggregate public keys and push into vec
let agg_pk_i =
match AggregatePublicKey::aggregate(&pks_refs_i, false) {
Ok(agg_pk_i) => agg_pk_i,
Err(err) => panic!("aggregate failure: {:?}", err),
};
pks.push(agg_pk_i.to_public_key());
// create random values
let mut vals = [0u64; 4];
vals[0] = rng.next_u64();
let mut rand_i = std::mem::MaybeUninit::::uninit();
unsafe {
blst_scalar_from_uint64(rand_i.as_mut_ptr(), vals.as_ptr());
rands.push(rand_i.assume_init());
}
}
let msgs_refs: Vec<&[u8]> = msgs.iter().map(|m| m.as_slice()).collect();
let sig_refs = sigs.iter().map(|s| s).collect::>();
let pks_refs: Vec<&PublicKey> = pks.iter().map(|pk| pk).collect();
let agg_ver = (sig_refs, pks_refs, msgs_refs, dst, rands);
group.bench_with_input(
BenchmarkId::new("verify_multi_aggregate", n),
&agg_ver,
|b, (s, p, m, d, r)| {
b.iter(|| {
let result =
Signature::verify_multiple_aggregate_signatures(
&m, *d, &p, false, &s, false, &r, 64,
);
assert_eq!(result, BLST_ERROR::BLST_SUCCESS);
});
},
);
}
group.finish();
}
fn bench_fast_aggregate_verify(c: &mut Criterion) {
let mut group = c.benchmark_group("fast_aggregate_verify");
let seed = [0u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let msg_len = (rng.next_u64() & 0x3F) + 1;
let mut msg = vec![0u8; msg_len as usize];
rng.fill_bytes(&mut msg);
let sizes = vec![8, 16, 32, 64, 128];
let bds: Vec<_> = (0..sizes[sizes.len() - 1])
.map(|_| gen_bench_data_for_msg(&mut rng, &msg))
.collect();
for size in sizes.iter() {
let pks_refs = bds
.iter()
.take(*size)
.map(|s| &s.pk)
.collect::>();
let sig_refs = bds
.iter()
.take(*size)
.map(|s| &s.sig)
.collect::>();
let agg = match AggregateSignature::aggregate(&sig_refs, false) {
Ok(agg) => agg,
Err(err) => panic!("aggregate failure: {:?}", err),
};
let agg_sig = agg.to_signature();
let agg_pks = match AggregatePublicKey::aggregate(&pks_refs, false) {
Ok(agg_pks) => agg_pks,
Err(err) => panic!("aggregate failure: {:?}", err),
};
let agg_pk = agg_pks.to_public_key();
let agg_ver = (agg_sig, pks_refs, &bds[0].msg, &bds[0].dst);
let agg_pre_ver = (agg_sig, agg_pk, &bds[0].msg, &bds[0].dst);
group.bench_with_input(
BenchmarkId::new("fast_aggregate_verify", size),
&agg_ver,
|b, (a, p, m, d)| {
b.iter(|| {
let result = a.fast_aggregate_verify(true, &m, &d, &p);
assert_eq!(result, BLST_ERROR::BLST_SUCCESS);
});
},
);
group.bench_with_input(
BenchmarkId::new("fast_aggregate_verify_preagg", size),
&agg_pre_ver,
|b, (a, p, m, d)| {
b.iter(|| {
let result = a
.fast_aggregate_verify_pre_aggregated(true, &m, &d, &p);
assert_eq!(result, BLST_ERROR::BLST_SUCCESS);
});
},
);
}
group.finish();
}
fn bench_aggregate_verify(c: &mut Criterion) {
let mut group = c.benchmark_group("aggregate_verify");
let seed = [0u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let sizes = vec![8, 16, 32, 64, 128];
// [10, 50, 100, 300, 1000, 4000];
let bds: Vec<_> = (0..sizes[sizes.len() - 1])
.map(|_| gen_bench_data(&mut rng))
.collect();
for size in sizes.iter() {
let msgs_refs = bds
.iter()
.take(*size)
.map(|s| s.msg.as_slice())
.collect::>();
let pks_refs = bds
.iter()
.take(*size)
.map(|s| &s.pk)
.collect::>();
let sig_refs = bds
.iter()
.take(*size)
.map(|s| &s.sig)
.collect::>();
let agg = match AggregateSignature::aggregate(&sig_refs, false) {
Ok(agg) => agg,
Err(err) => panic!("aggregate failure: {:?}", err),
};
let agg_sig = agg.to_signature();
let agg_ver = (agg_sig, pks_refs, msgs_refs, &bds[0].dst);
group.bench_with_input(
BenchmarkId::new("aggregate_verify", size),
&agg_ver,
|b, (a, p, m, d)| {
b.iter(|| {
let result = a.aggregate_verify(true, &m, &d, &p, false);
assert_eq!(result, BLST_ERROR::BLST_SUCCESS);
});
},
);
}
group.finish();
}
fn bench_aggregate(c: &mut Criterion) {
let mut group = c.benchmark_group("aggregate");
let seed = [0u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let sizes: [usize; 6] = [10, 50, 100, 300, 1000, 4000];
let bds: Vec<_> = (0..4000).map(|_| gen_bench_data(&mut rng)).collect();
for size in sizes.iter() {
let sig_refs = bds
.iter()
.take(*size)
.map(|s| &s.sig)
.collect::>();
group.bench_with_input(
BenchmarkId::new("aggregate_signature", size),
&sig_refs,
|b, s| {
b.iter(|| AggregateSignature::aggregate(&s, false));
},
);
let pks_refs = bds
.iter()
.take(*size)
.map(|s| &s.pk)
.collect::>();
group.bench_with_input(
BenchmarkId::new("aggregate_public_key", size),
&pks_refs,
|b, p| {
b.iter(|| AggregatePublicKey::aggregate(&p, false));
},
);
}
group.finish();
}
fn bench_single_message(c: &mut Criterion) {
let mut group = c.benchmark_group("single_message");
let seed = [0u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let bd = gen_bench_data(&mut rng);
group.bench_function("sign", |b| {
b.iter(|| bd.sk.sign(&bd.msg, &bd.dst, &[]))
});
group.bench_function("verify", |b| {
b.iter(|| bd.sig.verify(true, &bd.msg, &bd.dst, &[], &bd.pk, false))
});
group.finish();
}
fn bench_serdes(c: &mut Criterion) {
let mut group = c.benchmark_group("serdes");
let seed = [0u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let bd = gen_bench_data(&mut rng);
let sk = bd.sk;
let sk_ser = sk.serialize();
let pk = bd.pk;
let pk_comp = pk.compress();
let pk_ser = pk.serialize();
let sig = bd.sig;
let sig_comp = sig.compress();
let sig_ser = sig.serialize();
let mut pk_jac = std::mem::MaybeUninit::::uninit();
let mut sig_jac = std::mem::MaybeUninit::::uninit();
let mut p1_comp = [0; 48];
let mut p2_comp = [0; 96];
let mut p1_ser = [0; 96];
let mut p2_ser = [0; 192];
unsafe {
let mut junk = [0u8; 32];
rng.fill_bytes(&mut junk);
blst_encode_to_g1(
pk_jac.as_mut_ptr(),
junk.as_ptr(),
junk.len(),
"junk".as_ptr(),
4,
std::ptr::null(),
0,
);
blst_encode_to_g2(
sig_jac.as_mut_ptr(),
junk.as_ptr(),
junk.len(),
"junk".as_ptr(),
4,
std::ptr::null(),
0,
);
}
group.bench_function("secret_key_serialize", |b| b.iter(|| sk.serialize()));
group.bench_function("secret_key_deserialize", |b| {
b.iter(|| SecretKey::deserialize(&sk_ser));
});
group.bench_function("public_key_serialize", |b| b.iter(|| pk.serialize()));
group.bench_function("public_key_compress", |b| b.iter(|| pk.compress()));
group.bench_function("public_key_uncompress", |b| {
b.iter(|| PublicKey::uncompress(&pk_comp))
});
group.bench_function("public_key_deserialize", |b| {
b.iter(|| PublicKey::deserialize(&pk_ser));
});
group.bench_function("signature_serialize", |b| b.iter(|| sig.serialize()));
group.bench_function("signature_compress", |b| b.iter(|| sig.compress()));
group.bench_function("signature_uncompress", |b| {
b.iter(|| Signature::uncompress(&sig_comp))
});
group.bench_function("signature_deserialize", |b| {
b.iter(|| Signature::deserialize(&sig_ser))
});
group.bench_function("p1_serialize", |b| {
b.iter(|| unsafe {
blst_p1_serialize(p1_ser.as_mut_ptr(), pk_jac.as_ptr())
})
});
group.bench_function("p1_compress", |b| {
b.iter(|| unsafe {
blst_p1_compress(p1_comp.as_mut_ptr(), pk_jac.as_ptr())
})
});
group.bench_function("p2_serialize", |b| {
b.iter(|| unsafe {
blst_p2_serialize(p2_ser.as_mut_ptr(), sig_jac.as_ptr())
})
});
group.bench_function("p2_compress", |b| {
b.iter(|| unsafe {
blst_p2_compress(p2_comp.as_mut_ptr(), sig_jac.as_ptr())
})
});
group.finish();
}
fn bench_keys(c: &mut Criterion) {
let mut group = c.benchmark_group("keys");
let ikm: [u8; 32] = [
0x93, 0xad, 0x7e, 0x65, 0xde, 0xad, 0x05, 0x2a, 0x08, 0x3a, 0x91, 0x0c,
0x8b, 0x72, 0x85, 0x91, 0x46, 0x4c, 0xca, 0x56, 0x60, 0x5b, 0xb0, 0x56,
0xed, 0xfe, 0x2b, 0x60, 0xa6, 0x3c, 0x48, 0x99,
];
let sk = SecretKey::key_gen(&ikm, &[]).unwrap();
let pk = sk.sk_to_pk();
let pk_comp = pk.compress();
group.bench_function("key_gen", |b| {
b.iter(|| SecretKey::key_gen(&ikm, &[]))
});
group.bench_function("sk_to_pk", |b| {
b.iter(|| sk.sk_to_pk());
});
group.bench_function("key_validate", |b| {
b.iter(|| PublicKey::key_validate(&pk_comp));
});
group.finish();
}
criterion_group!(
benches,
bench_verify_multi_aggregate,
bench_fast_aggregate_verify,
bench_aggregate_verify,
bench_aggregate,
bench_single_message,
bench_serdes,
bench_keys
);
criterion_main!(benches);
================================================
FILE: bindings/rust/build.rs
================================================
#![allow(unused_imports)]
extern crate cc;
use std::env;
use std::path::{Path, PathBuf};
fn assembly(
file_vec: &mut Vec,
base_dir: &Path,
_arch: &str,
_is_msvc: bool,
) {
#[cfg(target_env = "msvc")]
if _is_msvc {
let sfx = match _arch {
"x86_64" => "x86_64",
"aarch64" => "armv8",
_ => "unknown",
};
let files =
glob::glob(&format!("{}/win64/*-{}.asm", base_dir.display(), sfx))
.expect("unable to collect assembly files");
for file in files {
file_vec.push(file.unwrap());
}
return;
}
file_vec.push(base_dir.join("assembly.S"));
}
fn main() {
if env::var("CARGO_FEATURE_SERDE_SECRET").is_ok() {
println!(
"cargo:warning=blst: non-production feature serde-secret enabled"
);
}
// account for cross-compilation [by examining environment variables]
let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap();
let target_env = env::var("CARGO_CFG_TARGET_ENV").unwrap();
let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap();
let target_family = env::var("CARGO_CFG_TARGET_FAMILY").unwrap_or_default();
let target_no_std = target_os.eq("none")
|| (target_os.eq("unknown") && target_arch.eq("wasm32"))
|| target_os.eq("uefi")
|| env::var("BLST_TEST_NO_STD").is_ok();
if !target_no_std {
println!("cargo:rustc-cfg=feature=\"std\"");
if target_arch.eq("wasm32") || target_os.eq("unknown") {
println!("cargo:rustc-cfg=feature=\"no-threads\"");
}
}
println!("cargo:rerun-if-env-changed=BLST_TEST_NO_STD");
/*
* Use pre-built libblst.a if there is one. This is primarily
* for trouble-shooting purposes. Idea is that libblst.a can be
* compiled with flags independent from cargo defaults, e.g.
* '../../build.sh -O1 ...'.
*/
if Path::new("libblst.a").exists() {
println!("cargo:rustc-link-search=.");
println!("cargo:rustc-link-lib=blst");
println!("cargo:rerun-if-changed=libblst.a");
return;
}
let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
let mut blst_base_dir = manifest_dir.join("blst");
if !blst_base_dir.exists() {
// Reach out to ../.., which is the root of the blst repo.
// Use an absolute path to avoid issues with relative paths
// being treated as strings by `cc` and getting concatenated
// in ways that reach out of the OUT_DIR.
blst_base_dir = manifest_dir
.parent()
.and_then(|dir| dir.parent())
.expect("can't access parent of parent of current directory")
.into();
}
println!("Using blst source directory {}", blst_base_dir.display());
// Set CC environment variable to choose alternative C compiler.
// Optimization level depends on whether or not --release is passed
// or implied.
if target_os.eq("uefi") && env::var("CC").is_err() {
match std::process::Command::new("clang")
.arg("--version")
.output()
{
Ok(_) => env::set_var("CC", "clang"),
Err(_) => { /* no clang in sight, just ignore the error */ }
}
}
if target_env.eq("sgx") && env::var("CC").is_err() {
match std::process::Command::new("clang")
.arg("--version")
.output()
{
Ok(out) => {
let version = String::from_utf8(out.stdout)
.unwrap_or("unintelligible".to_string());
if let Some(x) = version.find("clang version ") {
let x = x + 14;
let y = version[x..].find('.').unwrap_or(0);
if version[x..x + y].parse::().unwrap_or(0) >= 11 {
env::set_var("CC", "clang");
}
}
}
Err(_) => { /* no clang in sight, just ignore the error */ }
}
}
if target_env.eq("msvc")
&& env::var("CARGO_CFG_TARGET_POINTER_WIDTH").unwrap().eq("32")
&& env::var("CC").is_err()
{
match std::process::Command::new("clang-cl")
.args(["-m32", "--version"])
.output()
{
Ok(out) => {
if String::from_utf8(out.stdout)
.unwrap_or("unintelligible".to_string())
.contains("Target: i386-pc-windows-msvc")
{
env::set_var("CC", "clang-cl");
}
}
Err(_) => { /* no clang-cl in sight, just ignore the error */ }
}
}
let mut cc = cc::Build::new();
let c_src_dir = blst_base_dir.join("src");
println!("cargo:rerun-if-changed={}", c_src_dir.display());
let mut file_vec = vec![c_src_dir.join("server.c")];
if target_arch.eq("x86_64") || target_arch.eq("aarch64") {
let asm_dir = blst_base_dir.join("build");
println!("cargo:rerun-if-changed={}", asm_dir.display());
assembly(
&mut file_vec,
&asm_dir,
&target_arch,
cc.get_compiler().is_like_msvc(),
);
} else {
cc.define("__BLST_NO_ASM__", None);
}
match (cfg!(feature = "portable"), cfg!(feature = "force-adx")) {
(true, false) => {
if target_arch.eq("x86_64") && target_env.eq("sgx") {
panic!("'portable' is not supported on SGX target");
}
println!("Compiling in portable mode without ISA extensions");
cc.define("__BLST_PORTABLE__", None);
}
(false, true) => {
if target_arch.eq("x86_64") {
println!("Enabling ADX support via `force-adx` feature");
cc.define("__ADX__", None);
} else {
println!("`force-adx` is ignored for non-x86_64 targets");
}
}
(false, false) => {
if target_arch.eq("x86_64") {
if target_env.eq("sgx") {
println!("Enabling ADX for Intel SGX target");
cc.define("__ADX__", None);
} else if env::var("CARGO_ENCODED_RUSTFLAGS")
.unwrap_or_default()
.contains("target-cpu=")
{
// If target-cpu is specified on the rustc command line,
// then obey the resulting target-features.
let feat_list = env::var("CARGO_CFG_TARGET_FEATURE")
.unwrap_or_default();
let features: Vec<_> = feat_list.split(',').collect();
if !features.contains(&"ssse3") {
println!(
"Compiling in portable mode without ISA extensions"
);
cc.define("__BLST_PORTABLE__", None);
} else if features.contains(&"adx") {
println!(
"Enabling ADX because it was set as target-feature"
);
cc.define("__ADX__", None);
}
} else {
#[cfg(target_arch = "x86_64")]
if std::is_x86_feature_detected!("adx") {
println!(
"Enabling ADX because it was detected on the host"
);
cc.define("__ADX__", None);
}
}
}
}
(true, true) => panic!(
"Cannot compile with both `portable` and `force-adx` features"
),
}
if target_env.eq("msvc") && cc.get_compiler().is_like_msvc() {
cc.flag("-Zl");
}
cc.flag_if_supported("-mno-avx") // avoid costly transitions
.flag_if_supported("-fno-builtin")
.flag_if_supported("-Wno-unused-function")
.flag_if_supported("-Wno-unused-command-line-argument");
if target_arch.eq("wasm32") || target_family.is_empty() {
cc.flag("-ffreestanding");
}
if target_arch.eq("wasm32") || target_no_std {
cc.define("SCRATCH_LIMIT", "(45 * 1024)");
}
if target_env.eq("sgx") {
cc.flag_if_supported("-mlvi-hardening");
cc.define("__SGX_LVI_HARDENING__", None);
cc.define("__BLST_NO_CPUID__", None);
cc.define("__ELF__", None);
cc.define("SCRATCH_LIMIT", "(45 * 1024)");
}
if !cfg!(debug_assertions) {
cc.opt_level(2);
}
cc.files(&file_vec).compile("blst");
// pass some DEP_BLST_* variables to dependents
println!(
"cargo:BINDINGS={}",
blst_base_dir.join("bindings").to_string_lossy()
);
println!("cargo:C_SRC={}", c_src_dir.to_string_lossy());
}
================================================
FILE: bindings/rust/publish.sh
================================================
#!/bin/sh
HERE=`dirname $0`
cd "${HERE}"
if [ ! -d blst ]; then
trap '[ -h blst ] && rm -f blst' 0 2
ln -s ../.. blst
fi
# --allow-dirty because the temporary blst symbolic link is not committed
cargo +stable publish --allow-dirty "$@"
================================================
FILE: bindings/rust/rustfmt.toml
================================================
max_width = 80
================================================
FILE: bindings/rust/src/bindings.rs
================================================
/* automatically generated by rust-bindgen 0.65.1 */
#[repr(u32)]
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub enum BLST_ERROR {
BLST_SUCCESS = 0,
BLST_BAD_ENCODING = 1,
BLST_POINT_NOT_ON_CURVE = 2,
BLST_POINT_NOT_IN_GROUP = 3,
BLST_AGGR_TYPE_MISMATCH = 4,
BLST_VERIFY_FAIL = 5,
BLST_PK_IS_INFINITY = 6,
BLST_BAD_SCALAR = 7,
}
pub type byte = u8;
pub type limb_t = u64;
#[repr(C)]
#[derive(Debug, Default, Clone, PartialEq, Eq, Zeroize)]
#[zeroize(drop)]
pub struct blst_scalar {
pub b: [byte; 32usize],
}
#[test]
fn bindgen_test_layout_blst_scalar() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
32usize,
concat!("Size of: ", stringify!(blst_scalar))
);
assert_eq!(
::core::mem::align_of::(),
1usize,
concat!("Alignment of ", stringify!(blst_scalar))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).b) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_scalar),
"::",
stringify!(b)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
pub struct blst_fr {
pub l: [limb_t; 4usize],
}
#[test]
fn bindgen_test_layout_blst_fr() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
32usize,
concat!("Size of: ", stringify!(blst_fr))
);
assert_eq!(
::core::mem::align_of::(),
8usize,
concat!("Alignment of ", stringify!(blst_fr))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).l) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_fr),
"::",
stringify!(l)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
pub struct blst_fp {
pub l: [limb_t; 6usize],
}
#[test]
fn bindgen_test_layout_blst_fp() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
48usize,
concat!("Size of: ", stringify!(blst_fp))
);
assert_eq!(
::core::mem::align_of::(),
8usize,
concat!("Alignment of ", stringify!(blst_fp))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).l) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_fp),
"::",
stringify!(l)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
pub struct blst_fp2 {
pub fp: [blst_fp; 2usize],
}
#[test]
fn bindgen_test_layout_blst_fp2() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
96usize,
concat!("Size of: ", stringify!(blst_fp2))
);
assert_eq!(
::core::mem::align_of::(),
8usize,
concat!("Alignment of ", stringify!(blst_fp2))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).fp) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_fp2),
"::",
stringify!(fp)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
pub struct blst_fp6 {
pub fp2: [blst_fp2; 3usize],
}
#[test]
fn bindgen_test_layout_blst_fp6() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
288usize,
concat!("Size of: ", stringify!(blst_fp6))
);
assert_eq!(
::core::mem::align_of::(),
8usize,
concat!("Alignment of ", stringify!(blst_fp6))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).fp2) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_fp6),
"::",
stringify!(fp2)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Eq)]
pub struct blst_fp12 {
pub fp6: [blst_fp6; 2usize],
}
#[test]
fn bindgen_test_layout_blst_fp12() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
576usize,
concat!("Size of: ", stringify!(blst_fp12))
);
assert_eq!(
::core::mem::align_of::(),
8usize,
concat!("Alignment of ", stringify!(blst_fp12))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).fp6) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_fp12),
"::",
stringify!(fp6)
)
);
}
extern "C" {
pub fn blst_scalar_from_uint32(out: *mut blst_scalar, a: *const u32);
}
extern "C" {
pub fn blst_uint32_from_scalar(out: *mut u32, a: *const blst_scalar);
}
extern "C" {
pub fn blst_scalar_from_uint64(out: *mut blst_scalar, a: *const u64);
}
extern "C" {
pub fn blst_uint64_from_scalar(out: *mut u64, a: *const blst_scalar);
}
extern "C" {
pub fn blst_scalar_from_bendian(out: *mut blst_scalar, a: *const byte);
}
extern "C" {
pub fn blst_bendian_from_scalar(out: *mut byte, a: *const blst_scalar);
}
extern "C" {
pub fn blst_scalar_from_lendian(out: *mut blst_scalar, a: *const byte);
}
extern "C" {
pub fn blst_lendian_from_scalar(out: *mut byte, a: *const blst_scalar);
}
extern "C" {
pub fn blst_scalar_fr_check(a: *const blst_scalar) -> bool;
}
extern "C" {
pub fn blst_sk_check(a: *const blst_scalar) -> bool;
}
extern "C" {
pub fn blst_sk_add_n_check(
out: *mut blst_scalar,
a: *const blst_scalar,
b: *const blst_scalar,
) -> bool;
}
extern "C" {
pub fn blst_sk_sub_n_check(
out: *mut blst_scalar,
a: *const blst_scalar,
b: *const blst_scalar,
) -> bool;
}
extern "C" {
pub fn blst_sk_mul_n_check(
out: *mut blst_scalar,
a: *const blst_scalar,
b: *const blst_scalar,
) -> bool;
}
extern "C" {
pub fn blst_sk_inverse(out: *mut blst_scalar, a: *const blst_scalar);
}
extern "C" {
pub fn blst_scalar_from_le_bytes(out: *mut blst_scalar, in_: *const byte, len: usize) -> bool;
}
extern "C" {
pub fn blst_scalar_from_be_bytes(out: *mut blst_scalar, in_: *const byte, len: usize) -> bool;
}
extern "C" {
pub fn blst_fr_add(ret: *mut blst_fr, a: *const blst_fr, b: *const blst_fr);
}
extern "C" {
pub fn blst_fr_sub(ret: *mut blst_fr, a: *const blst_fr, b: *const blst_fr);
}
extern "C" {
pub fn blst_fr_mul_by_3(ret: *mut blst_fr, a: *const blst_fr);
}
extern "C" {
pub fn blst_fr_lshift(ret: *mut blst_fr, a: *const blst_fr, count: usize);
}
extern "C" {
pub fn blst_fr_rshift(ret: *mut blst_fr, a: *const blst_fr, count: usize);
}
extern "C" {
pub fn blst_fr_mul(ret: *mut blst_fr, a: *const blst_fr, b: *const blst_fr);
}
extern "C" {
pub fn blst_fr_sqr(ret: *mut blst_fr, a: *const blst_fr);
}
extern "C" {
pub fn blst_fr_cneg(ret: *mut blst_fr, a: *const blst_fr, flag: bool);
}
extern "C" {
pub fn blst_fr_eucl_inverse(ret: *mut blst_fr, a: *const blst_fr);
}
extern "C" {
pub fn blst_fr_inverse(ret: *mut blst_fr, a: *const blst_fr);
}
extern "C" {
pub fn blst_fr_from_uint64(ret: *mut blst_fr, a: *const u64);
}
extern "C" {
pub fn blst_uint64_from_fr(ret: *mut u64, a: *const blst_fr);
}
extern "C" {
pub fn blst_fr_from_scalar(ret: *mut blst_fr, a: *const blst_scalar);
}
extern "C" {
pub fn blst_scalar_from_fr(ret: *mut blst_scalar, a: *const blst_fr);
}
extern "C" {
pub fn blst_fp_add(ret: *mut blst_fp, a: *const blst_fp, b: *const blst_fp);
}
extern "C" {
pub fn blst_fp_sub(ret: *mut blst_fp, a: *const blst_fp, b: *const blst_fp);
}
extern "C" {
pub fn blst_fp_mul_by_3(ret: *mut blst_fp, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_mul_by_8(ret: *mut blst_fp, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_lshift(ret: *mut blst_fp, a: *const blst_fp, count: usize);
}
extern "C" {
pub fn blst_fp_mul(ret: *mut blst_fp, a: *const blst_fp, b: *const blst_fp);
}
extern "C" {
pub fn blst_fp_sqr(ret: *mut blst_fp, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_cneg(ret: *mut blst_fp, a: *const blst_fp, flag: bool);
}
extern "C" {
pub fn blst_fp_eucl_inverse(ret: *mut blst_fp, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_inverse(ret: *mut blst_fp, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_sqrt(ret: *mut blst_fp, a: *const blst_fp) -> bool;
}
extern "C" {
pub fn blst_fp_from_uint32(ret: *mut blst_fp, a: *const u32);
}
extern "C" {
pub fn blst_uint32_from_fp(ret: *mut u32, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_from_uint64(ret: *mut blst_fp, a: *const u64);
}
extern "C" {
pub fn blst_uint64_from_fp(ret: *mut u64, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_from_bendian(ret: *mut blst_fp, a: *const byte);
}
extern "C" {
pub fn blst_bendian_from_fp(ret: *mut byte, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_from_lendian(ret: *mut blst_fp, a: *const byte);
}
extern "C" {
pub fn blst_lendian_from_fp(ret: *mut byte, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp2_add(ret: *mut blst_fp2, a: *const blst_fp2, b: *const blst_fp2);
}
extern "C" {
pub fn blst_fp2_sub(ret: *mut blst_fp2, a: *const blst_fp2, b: *const blst_fp2);
}
extern "C" {
pub fn blst_fp2_mul_by_3(ret: *mut blst_fp2, a: *const blst_fp2);
}
extern "C" {
pub fn blst_fp2_mul_by_8(ret: *mut blst_fp2, a: *const blst_fp2);
}
extern "C" {
pub fn blst_fp2_lshift(ret: *mut blst_fp2, a: *const blst_fp2, count: usize);
}
extern "C" {
pub fn blst_fp2_mul(ret: *mut blst_fp2, a: *const blst_fp2, b: *const blst_fp2);
}
extern "C" {
pub fn blst_fp2_sqr(ret: *mut blst_fp2, a: *const blst_fp2);
}
extern "C" {
pub fn blst_fp2_cneg(ret: *mut blst_fp2, a: *const blst_fp2, flag: bool);
}
extern "C" {
pub fn blst_fp2_eucl_inverse(ret: *mut blst_fp2, a: *const blst_fp2);
}
extern "C" {
pub fn blst_fp2_inverse(ret: *mut blst_fp2, a: *const blst_fp2);
}
extern "C" {
pub fn blst_fp2_sqrt(ret: *mut blst_fp2, a: *const blst_fp2) -> bool;
}
extern "C" {
pub fn blst_fp12_sqr(ret: *mut blst_fp12, a: *const blst_fp12);
}
extern "C" {
pub fn blst_fp12_cyclotomic_sqr(ret: *mut blst_fp12, a: *const blst_fp12);
}
extern "C" {
pub fn blst_fp12_mul(ret: *mut blst_fp12, a: *const blst_fp12, b: *const blst_fp12);
}
extern "C" {
pub fn blst_fp12_mul_by_xy00z0(
ret: *mut blst_fp12,
a: *const blst_fp12,
xy00z0: *const blst_fp6,
);
}
extern "C" {
pub fn blst_fp12_conjugate(a: *mut blst_fp12);
}
extern "C" {
pub fn blst_fp12_inverse(ret: *mut blst_fp12, a: *const blst_fp12);
}
extern "C" {
pub fn blst_fp12_frobenius_map(ret: *mut blst_fp12, a: *const blst_fp12, n: usize);
}
extern "C" {
pub fn blst_fp12_is_equal(a: *const blst_fp12, b: *const blst_fp12) -> bool;
}
extern "C" {
pub fn blst_fp12_is_one(a: *const blst_fp12) -> bool;
}
extern "C" {
pub fn blst_fp12_in_group(a: *const blst_fp12) -> bool;
}
extern "C" {
pub fn blst_fp12_one() -> *const blst_fp12;
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Eq)]
pub struct blst_p1 {
pub x: blst_fp,
pub y: blst_fp,
pub z: blst_fp,
}
#[test]
fn bindgen_test_layout_blst_p1() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
144usize,
concat!("Size of: ", stringify!(blst_p1))
);
assert_eq!(
::core::mem::align_of::(),
8usize,
concat!("Alignment of ", stringify!(blst_p1))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).x) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_p1),
"::",
stringify!(x)
)
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).y) as usize - ptr as usize },
48usize,
concat!(
"Offset of field: ",
stringify!(blst_p1),
"::",
stringify!(y)
)
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).z) as usize - ptr as usize },
96usize,
concat!(
"Offset of field: ",
stringify!(blst_p1),
"::",
stringify!(z)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Eq)]
pub struct blst_p1_affine {
pub x: blst_fp,
pub y: blst_fp,
}
#[test]
fn bindgen_test_layout_blst_p1_affine() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
96usize,
concat!("Size of: ", stringify!(blst_p1_affine))
);
assert_eq!(
::core::mem::align_of::(),
8usize,
concat!("Alignment of ", stringify!(blst_p1_affine))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).x) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_p1_affine),
"::",
stringify!(x)
)
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).y) as usize - ptr as usize },
48usize,
concat!(
"Offset of field: ",
stringify!(blst_p1_affine),
"::",
stringify!(y)
)
);
}
extern "C" {
pub fn blst_p1_add(out: *mut blst_p1, a: *const blst_p1, b: *const blst_p1);
}
extern "C" {
pub fn blst_p1_add_or_double(out: *mut blst_p1, a: *const blst_p1, b: *const blst_p1);
}
extern "C" {
pub fn blst_p1_add_affine(out: *mut blst_p1, a: *const blst_p1, b: *const blst_p1_affine);
}
extern "C" {
pub fn blst_p1_add_or_double_affine(
out: *mut blst_p1,
a: *const blst_p1,
b: *const blst_p1_affine,
);
}
extern "C" {
pub fn blst_p1_double(out: *mut blst_p1, a: *const blst_p1);
}
extern "C" {
pub fn blst_p1_mult(out: *mut blst_p1, p: *const blst_p1, scalar: *const byte, nbits: usize);
}
extern "C" {
pub fn blst_p1_cneg(p: *mut blst_p1, cbit: bool);
}
extern "C" {
pub fn blst_p1_to_affine(out: *mut blst_p1_affine, in_: *const blst_p1);
}
extern "C" {
pub fn blst_p1_from_affine(out: *mut blst_p1, in_: *const blst_p1_affine);
}
extern "C" {
pub fn blst_p1_on_curve(p: *const blst_p1) -> bool;
}
extern "C" {
pub fn blst_p1_in_g1(p: *const blst_p1) -> bool;
}
extern "C" {
pub fn blst_p1_is_equal(a: *const blst_p1, b: *const blst_p1) -> bool;
}
extern "C" {
pub fn blst_p1_is_inf(a: *const blst_p1) -> bool;
}
extern "C" {
pub fn blst_p1_generator() -> *const blst_p1;
}
extern "C" {
pub fn blst_p1_affine_on_curve(p: *const blst_p1_affine) -> bool;
}
extern "C" {
pub fn blst_p1_affine_in_g1(p: *const blst_p1_affine) -> bool;
}
extern "C" {
pub fn blst_p1_affine_is_equal(a: *const blst_p1_affine, b: *const blst_p1_affine) -> bool;
}
extern "C" {
pub fn blst_p1_affine_is_inf(a: *const blst_p1_affine) -> bool;
}
extern "C" {
pub fn blst_p1_affine_generator() -> *const blst_p1_affine;
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Eq)]
pub struct blst_p2 {
pub x: blst_fp2,
pub y: blst_fp2,
pub z: blst_fp2,
}
#[test]
fn bindgen_test_layout_blst_p2() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
288usize,
concat!("Size of: ", stringify!(blst_p2))
);
assert_eq!(
::core::mem::align_of::(),
8usize,
concat!("Alignment of ", stringify!(blst_p2))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).x) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_p2),
"::",
stringify!(x)
)
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).y) as usize - ptr as usize },
96usize,
concat!(
"Offset of field: ",
stringify!(blst_p2),
"::",
stringify!(y)
)
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).z) as usize - ptr as usize },
192usize,
concat!(
"Offset of field: ",
stringify!(blst_p2),
"::",
stringify!(z)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Eq)]
pub struct blst_p2_affine {
pub x: blst_fp2,
pub y: blst_fp2,
}
#[test]
fn bindgen_test_layout_blst_p2_affine() {
const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::core::mem::size_of::(),
192usize,
concat!("Size of: ", stringify!(blst_p2_affine))
);
assert_eq!(
::core::mem::align_of::(),
8usize,
concat!("Alignment of ", stringify!(blst_p2_affine))
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).x) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(blst_p2_affine),
"::",
stringify!(x)
)
);
assert_eq!(
unsafe { ::core::ptr::addr_of!((*ptr).y) as usize - ptr as usize },
96usize,
concat!(
"Offset of field: ",
stringify!(blst_p2_affine),
"::",
stringify!(y)
)
);
}
extern "C" {
pub fn blst_p2_add(out: *mut blst_p2, a: *const blst_p2, b: *const blst_p2);
}
extern "C" {
pub fn blst_p2_add_or_double(out: *mut blst_p2, a: *const blst_p2, b: *const blst_p2);
}
extern "C" {
pub fn blst_p2_add_affine(out: *mut blst_p2, a: *const blst_p2, b: *const blst_p2_affine);
}
extern "C" {
pub fn blst_p2_add_or_double_affine(
out: *mut blst_p2,
a: *const blst_p2,
b: *const blst_p2_affine,
);
}
extern "C" {
pub fn blst_p2_double(out: *mut blst_p2, a: *const blst_p2);
}
extern "C" {
pub fn blst_p2_mult(out: *mut blst_p2, p: *const blst_p2, scalar: *const byte, nbits: usize);
}
extern "C" {
pub fn blst_p2_cneg(p: *mut blst_p2, cbit: bool);
}
extern "C" {
pub fn blst_p2_to_affine(out: *mut blst_p2_affine, in_: *const blst_p2);
}
extern "C" {
pub fn blst_p2_from_affine(out: *mut blst_p2, in_: *const blst_p2_affine);
}
extern "C" {
pub fn blst_p2_on_curve(p: *const blst_p2) -> bool;
}
extern "C" {
pub fn blst_p2_in_g2(p: *const blst_p2) -> bool;
}
extern "C" {
pub fn blst_p2_is_equal(a: *const blst_p2, b: *const blst_p2) -> bool;
}
extern "C" {
pub fn blst_p2_is_inf(a: *const blst_p2) -> bool;
}
extern "C" {
pub fn blst_p2_generator() -> *const blst_p2;
}
extern "C" {
pub fn blst_p2_affine_on_curve(p: *const blst_p2_affine) -> bool;
}
extern "C" {
pub fn blst_p2_affine_in_g2(p: *const blst_p2_affine) -> bool;
}
extern "C" {
pub fn blst_p2_affine_is_equal(a: *const blst_p2_affine, b: *const blst_p2_affine) -> bool;
}
extern "C" {
pub fn blst_p2_affine_is_inf(a: *const blst_p2_affine) -> bool;
}
extern "C" {
pub fn blst_p2_affine_generator() -> *const blst_p2_affine;
}
extern "C" {
pub fn blst_p1s_to_affine(
dst: *mut blst_p1_affine,
points: *const *const blst_p1,
npoints: usize,
);
}
extern "C" {
pub fn blst_p1s_add(ret: *mut blst_p1, points: *const *const blst_p1_affine, npoints: usize);
}
extern "C" {
pub fn blst_p1s_mult_wbits_precompute_sizeof(wbits: usize, npoints: usize) -> usize;
}
extern "C" {
pub fn blst_p1s_mult_wbits_precompute(
table: *mut blst_p1_affine,
wbits: usize,
points: *const *const blst_p1_affine,
npoints: usize,
);
}
extern "C" {
pub fn blst_p1s_mult_wbits_scratch_sizeof(npoints: usize) -> usize;
}
extern "C" {
pub fn blst_p1s_mult_wbits(
ret: *mut blst_p1,
table: *const blst_p1_affine,
wbits: usize,
npoints: usize,
scalars: *const *const byte,
nbits: usize,
scratch: *mut limb_t,
);
}
extern "C" {
pub fn blst_p1s_mult_pippenger_scratch_sizeof(npoints: usize) -> usize;
}
extern "C" {
pub fn blst_p1s_mult_pippenger(
ret: *mut blst_p1,
points: *const *const blst_p1_affine,
npoints: usize,
scalars: *const *const byte,
nbits: usize,
scratch: *mut limb_t,
);
}
extern "C" {
pub fn blst_p1s_tile_pippenger(
ret: *mut blst_p1,
points: *const *const blst_p1_affine,
npoints: usize,
scalars: *const *const byte,
nbits: usize,
scratch: *mut limb_t,
bit0: usize,
window: usize,
);
}
extern "C" {
pub fn blst_p2s_to_affine(
dst: *mut blst_p2_affine,
points: *const *const blst_p2,
npoints: usize,
);
}
extern "C" {
pub fn blst_p2s_add(ret: *mut blst_p2, points: *const *const blst_p2_affine, npoints: usize);
}
extern "C" {
pub fn blst_p2s_mult_wbits_precompute_sizeof(wbits: usize, npoints: usize) -> usize;
}
extern "C" {
pub fn blst_p2s_mult_wbits_precompute(
table: *mut blst_p2_affine,
wbits: usize,
points: *const *const blst_p2_affine,
npoints: usize,
);
}
extern "C" {
pub fn blst_p2s_mult_wbits_scratch_sizeof(npoints: usize) -> usize;
}
extern "C" {
pub fn blst_p2s_mult_wbits(
ret: *mut blst_p2,
table: *const blst_p2_affine,
wbits: usize,
npoints: usize,
scalars: *const *const byte,
nbits: usize,
scratch: *mut limb_t,
);
}
extern "C" {
pub fn blst_p2s_mult_pippenger_scratch_sizeof(npoints: usize) -> usize;
}
extern "C" {
pub fn blst_p2s_mult_pippenger(
ret: *mut blst_p2,
points: *const *const blst_p2_affine,
npoints: usize,
scalars: *const *const byte,
nbits: usize,
scratch: *mut limb_t,
);
}
extern "C" {
pub fn blst_p2s_tile_pippenger(
ret: *mut blst_p2,
points: *const *const blst_p2_affine,
npoints: usize,
scalars: *const *const byte,
nbits: usize,
scratch: *mut limb_t,
bit0: usize,
window: usize,
);
}
extern "C" {
pub fn blst_map_to_g1(out: *mut blst_p1, u: *const blst_fp, v: *const blst_fp);
}
extern "C" {
pub fn blst_map_to_g2(out: *mut blst_p2, u: *const blst_fp2, v: *const blst_fp2);
}
extern "C" {
pub fn blst_encode_to_g1(
out: *mut blst_p1,
msg: *const byte,
msg_len: usize,
DST: *const byte,
DST_len: usize,
aug: *const byte,
aug_len: usize,
);
}
extern "C" {
pub fn blst_hash_to_g1(
out: *mut blst_p1,
msg: *const byte,
msg_len: usize,
DST: *const byte,
DST_len: usize,
aug: *const byte,
aug_len: usize,
);
}
extern "C" {
pub fn blst_encode_to_g2(
out: *mut blst_p2,
msg: *const byte,
msg_len: usize,
DST: *const byte,
DST_len: usize,
aug: *const byte,
aug_len: usize,
);
}
extern "C" {
pub fn blst_hash_to_g2(
out: *mut blst_p2,
msg: *const byte,
msg_len: usize,
DST: *const byte,
DST_len: usize,
aug: *const byte,
aug_len: usize,
);
}
extern "C" {
pub fn blst_p1_serialize(out: *mut byte, in_: *const blst_p1);
}
extern "C" {
pub fn blst_p1_compress(out: *mut byte, in_: *const blst_p1);
}
extern "C" {
pub fn blst_p1_affine_serialize(out: *mut byte, in_: *const blst_p1_affine);
}
extern "C" {
pub fn blst_p1_affine_compress(out: *mut byte, in_: *const blst_p1_affine);
}
extern "C" {
pub fn blst_p1_uncompress(out: *mut blst_p1_affine, in_: *const byte) -> BLST_ERROR;
}
extern "C" {
pub fn blst_p1_deserialize(out: *mut blst_p1_affine, in_: *const byte) -> BLST_ERROR;
}
extern "C" {
pub fn blst_p2_serialize(out: *mut byte, in_: *const blst_p2);
}
extern "C" {
pub fn blst_p2_compress(out: *mut byte, in_: *const blst_p2);
}
extern "C" {
pub fn blst_p2_affine_serialize(out: *mut byte, in_: *const blst_p2_affine);
}
extern "C" {
pub fn blst_p2_affine_compress(out: *mut byte, in_: *const blst_p2_affine);
}
extern "C" {
pub fn blst_p2_uncompress(out: *mut blst_p2_affine, in_: *const byte) -> BLST_ERROR;
}
extern "C" {
pub fn blst_p2_deserialize(out: *mut blst_p2_affine, in_: *const byte) -> BLST_ERROR;
}
extern "C" {
pub fn blst_keygen(
out_SK: *mut blst_scalar,
IKM: *const byte,
IKM_len: usize,
info: *const byte,
info_len: usize,
);
}
extern "C" {
pub fn blst_sk_to_pk_in_g1(out_pk: *mut blst_p1, SK: *const blst_scalar);
}
extern "C" {
pub fn blst_sign_pk_in_g1(out_sig: *mut blst_p2, hash: *const blst_p2, SK: *const blst_scalar);
}
extern "C" {
pub fn blst_sk_to_pk_in_g2(out_pk: *mut blst_p2, SK: *const blst_scalar);
}
extern "C" {
pub fn blst_sign_pk_in_g2(out_sig: *mut blst_p1, hash: *const blst_p1, SK: *const blst_scalar);
}
extern "C" {
pub fn blst_miller_loop(
ret: *mut blst_fp12,
Q: *const blst_p2_affine,
P: *const blst_p1_affine,
);
}
extern "C" {
pub fn blst_miller_loop_n(
ret: *mut blst_fp12,
Qs: *const *const blst_p2_affine,
Ps: *const *const blst_p1_affine,
n: usize,
);
}
extern "C" {
pub fn blst_final_exp(ret: *mut blst_fp12, f: *const blst_fp12);
}
extern "C" {
pub fn blst_precompute_lines(Qlines: *mut blst_fp6, Q: *const blst_p2_affine);
}
extern "C" {
pub fn blst_miller_loop_lines(
ret: *mut blst_fp12,
Qlines: *const blst_fp6,
P: *const blst_p1_affine,
);
}
extern "C" {
pub fn blst_fp12_finalverify(gt1: *const blst_fp12, gt2: *const blst_fp12) -> bool;
}
#[repr(C)]
#[repr(align(1))]
#[derive(Debug, Default)]
pub struct blst_pairing {
pub _bindgen_opaque_blob: [u8; 0usize],
}
#[test]
fn bindgen_test_layout_blst_pairing() {
assert_eq!(
::core::mem::size_of::(),
0usize,
concat!("Size of: ", stringify!(blst_pairing))
);
assert_eq!(
::core::mem::align_of::(),
1usize,
concat!("Alignment of ", stringify!(blst_pairing))
);
}
extern "C" {
pub fn blst_pairing_sizeof() -> usize;
}
extern "C" {
pub fn blst_pairing_init(
new_ctx: *mut blst_pairing,
hash_or_encode: bool,
DST: *const byte,
DST_len: usize,
);
}
extern "C" {
pub fn blst_pairing_get_dst(ctx: *const blst_pairing) -> *const byte;
}
extern "C" {
pub fn blst_pairing_commit(ctx: *mut blst_pairing);
}
extern "C" {
pub fn blst_pairing_aggregate_pk_in_g2(
ctx: *mut blst_pairing,
PK: *const blst_p2_affine,
signature: *const blst_p1_affine,
msg: *const byte,
msg_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_pairing_chk_n_aggr_pk_in_g2(
ctx: *mut blst_pairing,
PK: *const blst_p2_affine,
pk_grpchk: bool,
signature: *const blst_p1_affine,
sig_grpchk: bool,
msg: *const byte,
msg_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_pairing_mul_n_aggregate_pk_in_g2(
ctx: *mut blst_pairing,
PK: *const blst_p2_affine,
sig: *const blst_p1_affine,
scalar: *const byte,
nbits: usize,
msg: *const byte,
msg_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_pairing_chk_n_mul_n_aggr_pk_in_g2(
ctx: *mut blst_pairing,
PK: *const blst_p2_affine,
pk_grpchk: bool,
sig: *const blst_p1_affine,
sig_grpchk: bool,
scalar: *const byte,
nbits: usize,
msg: *const byte,
msg_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_pairing_aggregate_pk_in_g1(
ctx: *mut blst_pairing,
PK: *const blst_p1_affine,
signature: *const blst_p2_affine,
msg: *const byte,
msg_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_pairing_chk_n_aggr_pk_in_g1(
ctx: *mut blst_pairing,
PK: *const blst_p1_affine,
pk_grpchk: bool,
signature: *const blst_p2_affine,
sig_grpchk: bool,
msg: *const byte,
msg_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_pairing_mul_n_aggregate_pk_in_g1(
ctx: *mut blst_pairing,
PK: *const blst_p1_affine,
sig: *const blst_p2_affine,
scalar: *const byte,
nbits: usize,
msg: *const byte,
msg_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_pairing_chk_n_mul_n_aggr_pk_in_g1(
ctx: *mut blst_pairing,
PK: *const blst_p1_affine,
pk_grpchk: bool,
sig: *const blst_p2_affine,
sig_grpchk: bool,
scalar: *const byte,
nbits: usize,
msg: *const byte,
msg_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_pairing_merge(ctx: *mut blst_pairing, ctx1: *const blst_pairing) -> BLST_ERROR;
}
extern "C" {
pub fn blst_pairing_finalverify(ctx: *const blst_pairing, gtsig: *const blst_fp12) -> bool;
}
extern "C" {
pub fn blst_aggregate_in_g1(
out: *mut blst_p1,
in_: *const blst_p1,
zwire: *const byte,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_aggregate_in_g2(
out: *mut blst_p2,
in_: *const blst_p2,
zwire: *const byte,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_aggregated_in_g1(out: *mut blst_fp12, signature: *const blst_p1_affine);
}
extern "C" {
pub fn blst_aggregated_in_g2(out: *mut blst_fp12, signature: *const blst_p2_affine);
}
extern "C" {
pub fn blst_core_verify_pk_in_g1(
pk: *const blst_p1_affine,
signature: *const blst_p2_affine,
hash_or_encode: bool,
msg: *const byte,
msg_len: usize,
DST: *const byte,
DST_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub fn blst_core_verify_pk_in_g2(
pk: *const blst_p2_affine,
signature: *const blst_p1_affine,
hash_or_encode: bool,
msg: *const byte,
msg_len: usize,
DST: *const byte,
DST_len: usize,
aug: *const byte,
aug_len: usize,
) -> BLST_ERROR;
}
extern "C" {
pub static BLS12_381_G1: blst_p1_affine;
}
extern "C" {
pub static BLS12_381_NEG_G1: blst_p1_affine;
}
extern "C" {
pub static BLS12_381_G2: blst_p2_affine;
}
extern "C" {
pub static BLS12_381_NEG_G2: blst_p2_affine;
}
extern "C" {
pub fn blst_fr_ct_bfly(x0: *mut blst_fr, x1: *mut blst_fr, twiddle: *const blst_fr);
}
extern "C" {
pub fn blst_fr_gs_bfly(x0: *mut blst_fr, x1: *mut blst_fr, twiddle: *const blst_fr);
}
extern "C" {
pub fn blst_fr_to(ret: *mut blst_fr, a: *const blst_fr);
}
extern "C" {
pub fn blst_fr_from(ret: *mut blst_fr, a: *const blst_fr);
}
extern "C" {
pub fn blst_fp_to(ret: *mut blst_fp, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_from(ret: *mut blst_fp, a: *const blst_fp);
}
extern "C" {
pub fn blst_fp_is_square(a: *const blst_fp) -> bool;
}
extern "C" {
pub fn blst_fp2_is_square(a: *const blst_fp2) -> bool;
}
extern "C" {
pub fn blst_p1_from_jacobian(out: *mut blst_p1, in_: *const blst_p1);
}
extern "C" {
pub fn blst_p2_from_jacobian(out: *mut blst_p2, in_: *const blst_p2);
}
extern "C" {
pub fn blst_sk_to_pk2_in_g1(
out: *mut byte,
out_pk: *mut blst_p1_affine,
SK: *const blst_scalar,
);
}
extern "C" {
pub fn blst_sign_pk2_in_g1(
out: *mut byte,
out_sig: *mut blst_p2_affine,
hash: *const blst_p2,
SK: *const blst_scalar,
);
}
extern "C" {
pub fn blst_sk_to_pk2_in_g2(
out: *mut byte,
out_pk: *mut blst_p2_affine,
SK: *const blst_scalar,
);
}
extern "C" {
pub fn blst_sign_pk2_in_g2(
out: *mut byte,
out_sig: *mut blst_p1_affine,
hash: *const blst_p1,
SK: *const blst_scalar,
);
}
#[repr(C)]
#[repr(align(1))]
#[derive(Debug, Default)]
pub struct blst_uniq {
pub _bindgen_opaque_blob: [u8; 0usize],
}
#[test]
fn bindgen_test_layout_blst_uniq() {
assert_eq!(
::core::mem::size_of::(),
0usize,
concat!("Size of: ", stringify!(blst_uniq))
);
assert_eq!(
::core::mem::align_of::(),
1usize,
concat!("Alignment of ", stringify!(blst_uniq))
);
}
extern "C" {
pub fn blst_uniq_sizeof(n_nodes: usize) -> usize;
}
extern "C" {
pub fn blst_uniq_init(tree: *mut blst_uniq);
}
extern "C" {
pub fn blst_uniq_test(tree: *mut blst_uniq, msg: *const byte, len: usize) -> bool;
}
extern "C" {
pub fn blst_expand_message_xmd(
out: *mut byte,
out_len: usize,
msg: *const byte,
msg_len: usize,
DST: *const byte,
DST_len: usize,
);
}
extern "C" {
pub fn blst_p1_unchecked_mult(
out: *mut blst_p1,
p: *const blst_p1,
scalar: *const byte,
nbits: usize,
);
}
extern "C" {
pub fn blst_p2_unchecked_mult(
out: *mut blst_p2,
p: *const blst_p2,
scalar: *const byte,
nbits: usize,
);
}
extern "C" {
pub fn blst_pairing_raw_aggregate(
ctx: *mut blst_pairing,
q: *const blst_p2_affine,
p: *const blst_p1_affine,
);
}
extern "C" {
pub fn blst_pairing_as_fp12(ctx: *mut blst_pairing) -> *mut blst_fp12;
}
extern "C" {
pub fn blst_bendian_from_fp12(out: *mut byte, a: *const blst_fp12);
}
extern "C" {
pub fn blst_keygen_v3(
out_SK: *mut blst_scalar,
IKM: *const byte,
IKM_len: usize,
info: *const byte,
info_len: usize,
);
}
extern "C" {
pub fn blst_keygen_v4_5(
out_SK: *mut blst_scalar,
IKM: *const byte,
IKM_len: usize,
salt: *const byte,
salt_len: usize,
info: *const byte,
info_len: usize,
);
}
extern "C" {
pub fn blst_keygen_v5(
out_SK: *mut blst_scalar,
IKM: *const byte,
IKM_len: usize,
salt: *const byte,
salt_len: usize,
info: *const byte,
info_len: usize,
);
}
extern "C" {
pub fn blst_derive_master_eip2333(out_SK: *mut blst_scalar, IKM: *const byte, IKM_len: usize);
}
extern "C" {
pub fn blst_derive_child_eip2333(
out_SK: *mut blst_scalar,
SK: *const blst_scalar,
child_index: u32,
);
}
extern "C" {
pub fn blst_scalar_from_hexascii(out: *mut blst_scalar, hex: *const byte);
}
extern "C" {
pub fn blst_fr_from_hexascii(ret: *mut blst_fr, hex: *const byte);
}
extern "C" {
pub fn blst_fp_from_hexascii(ret: *mut blst_fp, hex: *const byte);
}
extern "C" {
pub fn blst_p1_sizeof() -> usize;
}
extern "C" {
pub fn blst_p1_affine_sizeof() -> usize;
}
extern "C" {
pub fn blst_p2_sizeof() -> usize;
}
extern "C" {
pub fn blst_p2_affine_sizeof() -> usize;
}
extern "C" {
pub fn blst_fp12_sizeof() -> usize;
}
extern "C" {
pub fn blst_fp_from_le_bytes(ret: *mut blst_fp, in_: *const byte, len: usize);
}
extern "C" {
pub fn blst_fp_from_be_bytes(ret: *mut blst_fp, in_: *const byte, len: usize);
}
extern "C" {
pub fn blst_sha256(out: *mut byte, msg: *const byte, msg_len: usize);
}
#[test]
fn bindgen_test_normal_types() {
// from "Rust for Rustaceans" by Jon Gjengset
fn is_normal() {}
is_normal::();
is_normal::();
is_normal::();
is_normal::();
is_normal::();
is_normal::();
is_normal::();
is_normal::();
is_normal::();
is_normal::();
is_normal::();
is_normal::();
is_normal::();
}
================================================
FILE: bindings/rust/src/lib.rs
================================================
// Copyright Supranational LLC
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(unexpected_cfgs)]
extern crate alloc;
use alloc::boxed::Box;
use alloc::vec;
use alloc::vec::Vec;
use core::any::Any;
use core::mem::{transmute, MaybeUninit};
use core::ptr;
use zeroize::Zeroize;
#[cfg(feature = "std")]
use std::sync::{atomic::*, mpsc::sync_channel, Arc};
#[cfg(feature = "serde")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "std")]
trait ThreadPoolExt {
fn joined_execute<'any, F>(&self, job: F)
where
F: FnOnce() + Send + 'any;
}
#[cfg(all(not(feature = "no-threads"), feature = "std"))]
mod mt {
use super::*;
use std::sync::{Mutex, Once};
use threadpool::ThreadPool;
pub fn da_pool() -> ThreadPool {
static INIT: Once = Once::new();
static mut POOL: *const Mutex = ptr::null();
INIT.call_once(|| {
let pool = Mutex::new(ThreadPool::default());
unsafe { POOL = transmute::, *const _>(Box::new(pool)) };
});
unsafe { (*POOL).lock().unwrap().clone() }
}
type Thunk<'any> = Box;
impl ThreadPoolExt for ThreadPool {
fn joined_execute<'scope, F>(&self, job: F)
where
F: FnOnce() + Send + 'scope,
{
// Bypass 'lifetime limitations by brute force. It works,
// because we explicitly join the threads...
self.execute(unsafe {
transmute::, Thunk<'static>>(Box::new(job))
})
}
}
}
#[cfg(all(feature = "no-threads", feature = "std"))]
mod mt {
use super::*;
pub struct EmptyPool {}
pub fn da_pool() -> EmptyPool {
EmptyPool {}
}
impl EmptyPool {
pub fn max_count(&self) -> usize {
1
}
}
impl ThreadPoolExt for EmptyPool {
fn joined_execute<'scope, F>(&self, job: F)
where
F: FnOnce() + Send + 'scope,
{
job()
}
}
}
include!("bindings.rs");
impl PartialEq for blst_p1 {
fn eq(&self, other: &Self) -> bool {
unsafe { blst_p1_is_equal(self, other) }
}
}
impl PartialEq for blst_p1_affine {
fn eq(&self, other: &Self) -> bool {
unsafe { blst_p1_affine_is_equal(self, other) }
}
}
impl PartialEq for blst_p2 {
fn eq(&self, other: &Self) -> bool {
unsafe { blst_p2_is_equal(self, other) }
}
}
impl PartialEq for blst_p2_affine {
fn eq(&self, other: &Self) -> bool {
unsafe { blst_p2_affine_is_equal(self, other) }
}
}
impl Default for blst_fp12 {
fn default() -> Self {
unsafe { *blst_fp12_one() }
}
}
impl PartialEq for blst_fp12 {
fn eq(&self, other: &Self) -> bool {
unsafe { blst_fp12_is_equal(self, other) }
}
}
impl core::ops::Mul for blst_fp12 {
type Output = Self;
fn mul(self, other: Self) -> Self {
let mut out = MaybeUninit::::uninit();
unsafe {
blst_fp12_mul(out.as_mut_ptr(), &self, &other);
out.assume_init()
}
}
}
impl core::ops::MulAssign for blst_fp12 {
fn mul_assign(&mut self, other: Self) {
unsafe { blst_fp12_mul(self, self, &other) }
}
}
impl blst_fp12 {
pub fn miller_loop(q: &blst_p2_affine, p: &blst_p1_affine) -> Self {
let mut out = MaybeUninit::::uninit();
unsafe {
blst_miller_loop(out.as_mut_ptr(), q, p);
out.assume_init()
}
}
#[cfg(not(feature = "std"))]
pub fn miller_loop_n(q: &[blst_p2_affine], p: &[blst_p1_affine]) -> Self {
let n_elems = q.len();
if n_elems != p.len() || n_elems == 0 {
panic!("inputs' lengths mismatch");
}
let qs: [*const _; 2] = [&q[0], ptr::null()];
let ps: [*const _; 2] = [&p[0], ptr::null()];
let mut out = MaybeUninit::::uninit();
unsafe {
blst_miller_loop_n(out.as_mut_ptr(), &qs[0], &ps[0], n_elems);
out.assume_init()
}
}
#[cfg(feature = "std")]
pub fn miller_loop_n(q: &[blst_p2_affine], p: &[blst_p1_affine]) -> Self {
let n_elems = q.len();
if n_elems != p.len() || n_elems == 0 {
panic!("inputs' lengths mismatch");
}
let pool = mt::da_pool();
let mut n_workers = pool.max_count();
if n_workers == 1 {
let qs: [*const _; 2] = [&q[0], ptr::null()];
let ps: [*const _; 2] = [&p[0], ptr::null()];
let mut out = MaybeUninit::::uninit();
unsafe {
blst_miller_loop_n(out.as_mut_ptr(), &qs[0], &ps[0], n_elems);
return out.assume_init();
}
}
let counter = Arc::new(AtomicUsize::new(0));
let stride = core::cmp::min((n_elems + n_workers - 1) / n_workers, 16);
n_workers = core::cmp::min((n_elems + stride - 1) / stride, n_workers);
let (tx, rx) = sync_channel(n_workers);
for _ in 0..n_workers {
let tx = tx.clone();
let counter = counter.clone();
pool.joined_execute(move || {
let mut acc = blst_fp12::default();
let mut tmp = MaybeUninit::::uninit();
let mut qs: [*const _; 2] = [ptr::null(), ptr::null()];
let mut ps: [*const _; 2] = [ptr::null(), ptr::null()];
loop {
let work = counter.fetch_add(stride, Ordering::Relaxed);
if work >= n_elems {
break;
}
let n = core::cmp::min(n_elems - work, stride);
qs[0] = &q[work];
ps[0] = &p[work];
unsafe {
blst_miller_loop_n(tmp.as_mut_ptr(), &qs[0], &ps[0], n);
acc *= tmp.assume_init();
}
}
tx.send(acc).expect("disaster");
});
}
let mut acc = rx.recv().unwrap();
for _ in 1..n_workers {
acc *= rx.recv().unwrap();
}
acc
}
pub fn final_exp(&self) -> Self {
let mut out = MaybeUninit::::uninit();
unsafe {
blst_final_exp(out.as_mut_ptr(), self);
out.assume_init()
}
}
pub fn in_group(&self) -> bool {
unsafe { blst_fp12_in_group(self) }
}
pub fn finalverify(a: &Self, b: &Self) -> bool {
unsafe { blst_fp12_finalverify(a, b) }
}
pub fn to_bendian(&self) -> [u8; 48 * 12] {
let mut out = MaybeUninit::<[u8; 48 * 12]>::uninit();
unsafe {
blst_bendian_from_fp12(out.as_mut_ptr() as *mut u8, self);
out.assume_init()
}
}
}
impl blst_scalar {
pub fn hash_to(msg: &[u8], dst: &[u8]) -> Option {
unsafe {
let mut out = ::default();
let mut elem = [0u8; 48];
blst_expand_message_xmd(
elem.as_mut_ptr(),
elem.len(),
msg.as_ptr(),
msg.len(),
dst.as_ptr(),
dst.len(),
);
if blst_scalar_from_be_bytes(&mut out, elem.as_ptr(), elem.len()) {
Some(out)
} else {
None
}
}
}
}
#[derive(Debug)]
pub struct Pairing {
v: Box<[u64]>,
}
impl Pairing {
pub fn new(hash_or_encode: bool, dst: &[u8]) -> Self {
let v: Vec = vec![0; unsafe { blst_pairing_sizeof() } / 8];
let mut obj = Self {
v: v.into_boxed_slice(),
};
obj.init(hash_or_encode, dst);
obj
}
pub fn init(&mut self, hash_or_encode: bool, dst: &[u8]) {
unsafe {
blst_pairing_init(
self.ctx(),
hash_or_encode,
dst.as_ptr(),
dst.len(),
)
}
}
fn ctx(&mut self) -> *mut blst_pairing {
self.v.as_mut_ptr() as *mut blst_pairing
}
fn const_ctx(&self) -> *const blst_pairing {
self.v.as_ptr() as *const blst_pairing
}
pub fn aggregate(
&mut self,
pk: &dyn Any,
pk_validate: bool,
sig: &dyn Any,
sig_groupcheck: bool,
msg: &[u8],
aug: &[u8],
) -> BLST_ERROR {
if pk.is::() {
unsafe {
blst_pairing_chk_n_aggr_pk_in_g1(
self.ctx(),
match pk.downcast_ref::() {
Some(pk) => pk,
None => ptr::null(),
},
pk_validate,
match sig.downcast_ref::() {
Some(sig) => sig,
None => ptr::null(),
},
sig_groupcheck,
msg.as_ptr(),
msg.len(),
aug.as_ptr(),
aug.len(),
)
}
} else if pk.is::() {
unsafe {
blst_pairing_chk_n_aggr_pk_in_g2(
self.ctx(),
match pk.downcast_ref::() {
Some(pk) => pk,
None => ptr::null(),
},
pk_validate,
match sig.downcast_ref::() {
Some(sig) => sig,
None => ptr::null(),
},
sig_groupcheck,
msg.as_ptr(),
msg.len(),
aug.as_ptr(),
aug.len(),
)
}
} else {
panic!("whaaaa?")
}
}
#[allow(clippy::too_many_arguments)]
pub fn mul_n_aggregate(
&mut self,
pk: &dyn Any,
pk_validate: bool,
sig: &dyn Any,
sig_groupcheck: bool,
scalar: &[u8],
nbits: usize,
msg: &[u8],
aug: &[u8],
) -> BLST_ERROR {
if pk.is::() {
unsafe {
blst_pairing_chk_n_mul_n_aggr_pk_in_g1(
self.ctx(),
match pk.downcast_ref::() {
Some(pk) => pk,
None => ptr::null(),
},
pk_validate,
match sig.downcast_ref::() {
Some(sig) => sig,
None => ptr::null(),
},
sig_groupcheck,
scalar.as_ptr(),
nbits,
msg.as_ptr(),
msg.len(),
aug.as_ptr(),
aug.len(),
)
}
} else if pk.is::() {
unsafe {
blst_pairing_chk_n_mul_n_aggr_pk_in_g2(
self.ctx(),
match pk.downcast_ref::() {
Some(pk) => pk,
None => ptr::null(),
},
pk_validate,
match sig.downcast_ref::() {
Some(sig) => sig,
None => ptr::null(),
},
sig_groupcheck,
scalar.as_ptr(),
nbits,
msg.as_ptr(),
msg.len(),
aug.as_ptr(),
aug.len(),
)
}
} else {
panic!("whaaaa?")
}
}
pub fn aggregated(gtsig: &mut blst_fp12, sig: &dyn Any) {
if sig.is::() {
unsafe {
blst_aggregated_in_g1(
gtsig,
sig.downcast_ref::().unwrap(),
)
}
} else if sig.is::() {
unsafe {
blst_aggregated_in_g2(
gtsig,
sig.downcast_ref::().unwrap(),
)
}
} else {
panic!("whaaaa?")
}
}
pub fn commit(&mut self) {
unsafe { blst_pairing_commit(self.ctx()) }
}
pub fn merge(&mut self, ctx1: &Self) -> BLST_ERROR {
unsafe { blst_pairing_merge(self.ctx(), ctx1.const_ctx()) }
}
pub fn finalverify(&self, gtsig: Option<&blst_fp12>) -> bool {
unsafe {
blst_pairing_finalverify(
self.const_ctx(),
match gtsig {
Some(gtsig) => gtsig,
None => ptr::null(),
},
)
}
}
pub fn raw_aggregate(&mut self, q: &blst_p2_affine, p: &blst_p1_affine) {
unsafe { blst_pairing_raw_aggregate(self.ctx(), q, p) }
}
pub fn as_fp12(&mut self) -> blst_fp12 {
unsafe { *blst_pairing_as_fp12(self.ctx()) }
}
}
pub fn uniq(msgs: &[&[u8]]) -> bool {
let n_elems = msgs.len();
if n_elems == 1 {
return true;
} else if n_elems == 2 {
return msgs[0] != msgs[1];
}
let mut v: Vec = vec![0; unsafe { blst_uniq_sizeof(n_elems) } / 8];
let ctx = v.as_mut_ptr() as *mut blst_uniq;
unsafe { blst_uniq_init(ctx) };
for msg in msgs.iter() {
if !unsafe { blst_uniq_test(ctx, msg.as_ptr(), msg.len()) } {
return false;
}
}
true
}
#[cfg(feature = "std")]
pub fn print_bytes(bytes: &[u8], name: &str) {
print!("{} ", name);
for b in bytes.iter() {
print!("{:02x}", b);
}
println!();
}
macro_rules! sig_variant_impl {
(
$name:expr,
$pk:ty,
$pk_aff:ty,
$sig:ty,
$sig_aff:ty,
$sk_to_pk:ident,
$hash_or_encode:expr,
$hash_or_encode_to:ident,
$sign:ident,
$pk_eq:ident,
$sig_eq:ident,
$verify:ident,
$pk_in_group:ident,
$pk_to_aff:ident,
$pk_from_aff:ident,
$pk_ser:ident,
$pk_comp:ident,
$pk_deser:ident,
$pk_uncomp:ident,
$pk_comp_size:expr,
$pk_ser_size:expr,
$sig_in_group:ident,
$sig_to_aff:ident,
$sig_from_aff:ident,
$sig_ser:ident,
$sig_comp:ident,
$sig_deser:ident,
$sig_uncomp:ident,
$sig_comp_size:expr,
$sig_ser_size:expr,
$pk_add_or_dbl:ident,
$pk_add_or_dbl_aff:ident,
$pk_cneg:ident,
$sig_add_or_dbl:ident,
$sig_add_or_dbl_aff:ident,
$pk_is_inf:ident,
$sig_is_inf:ident,
$sig_aggr_in_group:ident,
) => {
/// Secret Key
#[repr(transparent)]
#[derive(Default, Debug, Clone, Zeroize)]
#[zeroize(drop)]
pub struct SecretKey {
value: blst_scalar,
}
impl SecretKey {
/// Deterministically generate a secret key from key material
pub fn key_gen(
ikm: &[u8],
key_info: &[u8],
) -> Result {
if ikm.len() < 32 {
return Err(BLST_ERROR::BLST_BAD_ENCODING);
}
let mut sk = SecretKey::default();
unsafe {
blst_keygen(
&mut sk.value,
ikm.as_ptr(),
ikm.len(),
key_info.as_ptr(),
key_info.len(),
);
}
Ok(sk)
}
pub fn key_gen_v3(
ikm: &[u8],
key_info: &[u8],
) -> Result {
if ikm.len() < 32 {
return Err(BLST_ERROR::BLST_BAD_ENCODING);
}
let mut sk = SecretKey::default();
unsafe {
blst_keygen_v3(
&mut sk.value,
ikm.as_ptr(),
ikm.len(),
key_info.as_ptr(),
key_info.len(),
);
}
Ok(sk)
}
pub fn key_gen_v4_5(
ikm: &[u8],
salt: &[u8],
info: &[u8],
) -> Result {
if ikm.len() < 32 {
return Err(BLST_ERROR::BLST_BAD_ENCODING);
}
let mut sk = SecretKey::default();
unsafe {
blst_keygen_v4_5(
&mut sk.value,
ikm.as_ptr(),
ikm.len(),
salt.as_ptr(),
salt.len(),
info.as_ptr(),
info.len(),
);
}
Ok(sk)
}
pub fn key_gen_v5(
ikm: &[u8],
salt: &[u8],
info: &[u8],
) -> Result {
if ikm.len() < 32 {
return Err(BLST_ERROR::BLST_BAD_ENCODING);
}
let mut sk = SecretKey::default();
unsafe {
blst_keygen_v5(
&mut sk.value,
ikm.as_ptr(),
ikm.len(),
salt.as_ptr(),
salt.len(),
info.as_ptr(),
info.len(),
);
}
Ok(sk)
}
pub fn derive_master_eip2333(
ikm: &[u8],
) -> Result {
if ikm.len() < 32 {
return Err(BLST_ERROR::BLST_BAD_ENCODING);
}
let mut sk = SecretKey::default();
unsafe {
blst_derive_master_eip2333(
&mut sk.value,
ikm.as_ptr(),
ikm.len(),
);
}
Ok(sk)
}
pub fn derive_child_eip2333(&self, child_index: u32) -> Self {
let mut sk = SecretKey::default();
unsafe {
blst_derive_child_eip2333(
&mut sk.value,
&self.value,
child_index,
);
}
sk
}
// sk_to_pk
pub fn sk_to_pk(&self) -> PublicKey {
// TODO - would the user like the serialized/compressed pk as well?
let mut pk_aff = PublicKey::default();
//let mut pk_ser = [0u8; $pk_ser_size];
unsafe {
$sk_to_pk(
//pk_ser.as_mut_ptr(),
ptr::null_mut(),
&mut pk_aff.point,
&self.value,
);
}
pk_aff
}
// Sign
pub fn sign(
&self,
msg: &[u8],
dst: &[u8],
aug: &[u8],
) -> Signature {
// TODO - would the user like the serialized/compressed sig as well?
let mut q = <$sig>::default();
let mut sig_aff = <$sig_aff>::default();
//let mut sig_ser = [0u8; $sig_ser_size];
unsafe {
$hash_or_encode_to(
&mut q,
msg.as_ptr(),
msg.len(),
dst.as_ptr(),
dst.len(),
aug.as_ptr(),
aug.len(),
);
$sign(ptr::null_mut(), &mut sig_aff, &q, &self.value);
}
Signature { point: sig_aff }
}
// TODO - formally speaking application is entitled to have
// ultimate control over secret key storage, which means that
// corresponding serialization/deserialization subroutines
// should accept reference to where to store the result, as
// opposite to returning one.
// serialize
pub fn serialize(&self) -> [u8; 32] {
let mut sk_out = [0; 32];
unsafe {
blst_bendian_from_scalar(sk_out.as_mut_ptr(), &self.value);
}
sk_out
}
// deserialize
pub fn deserialize(sk_in: &[u8]) -> Result {
let mut sk = blst_scalar::default();
if sk_in.len() != 32 {
return Err(BLST_ERROR::BLST_BAD_ENCODING);
}
unsafe {
blst_scalar_from_bendian(&mut sk, sk_in.as_ptr());
if !blst_sk_check(&sk) {
return Err(BLST_ERROR::BLST_BAD_ENCODING);
}
}
Ok(Self { value: sk })
}
pub fn to_bytes(&self) -> [u8; 32] {
SecretKey::serialize(&self)
}
pub fn from_bytes(sk_in: &[u8]) -> Result {
SecretKey::deserialize(sk_in)
}
}
#[cfg(feature = "serde-secret")]
impl Serialize for SecretKey {
fn serialize(
&self,
ser: S,
) -> Result {
let bytes = zeroize::Zeroizing::new(self.serialize());
ser.serialize_bytes(bytes.as_ref())
}
}
#[cfg(feature = "serde-secret")]
impl<'de> Deserialize<'de> for SecretKey {
fn deserialize>(
deser: D,
) -> Result {
let bytes: &[u8] = Deserialize::deserialize(deser)?;
Self::deserialize(bytes).map_err(|e| {
::custom(format!("{:?}", e))
})
}
}
// From traits are not provided to discourage duplication
// of the secret key material.
impl<'a> From<&'a SecretKey> for &'a blst_scalar {
fn from(sk: &'a SecretKey) -> Self {
unsafe {
transmute::<&SecretKey, Self>(sk)
}
}
}
impl<'a> core::convert::TryFrom<&'a blst_scalar> for &'a SecretKey {
type Error = BLST_ERROR;
fn try_from(sk: &'a blst_scalar) -> Result {
unsafe {
if !blst_sk_check(sk) {
return Err(BLST_ERROR::BLST_BAD_ENCODING);
}
Ok(transmute::<&blst_scalar, Self>(sk))
}
}
}
#[repr(transparent)]
#[derive(Default, Debug, Clone, Copy)]
pub struct PublicKey {
point: $pk_aff,
}
impl PublicKey {
// Core operations
// key_validate
pub fn validate(&self) -> Result<(), BLST_ERROR> {
unsafe {
if $pk_is_inf(&self.point) {
return Err(BLST_ERROR::BLST_PK_IS_INFINITY);
}
if !$pk_in_group(&self.point) {
return Err(BLST_ERROR::BLST_POINT_NOT_IN_GROUP);
}
}
Ok(())
}
pub fn key_validate(key: &[u8]) -> Result {
let pk = PublicKey::from_bytes(key)?;
pk.validate()?;
Ok(pk)
}
pub fn from_aggregate(agg_pk: &AggregatePublicKey) -> Self {
let mut pk_aff = <$pk_aff>::default();
unsafe {
$pk_to_aff(&mut pk_aff, &agg_pk.point);
}
Self { point: pk_aff }
}
// Serdes
pub fn compress(&self) -> [u8; $pk_comp_size] {
let mut pk_comp = [0u8; $pk_comp_size];
unsafe {
$pk_comp(pk_comp.as_mut_ptr(), &self.point);
}
pk_comp
}
pub fn serialize(&self) -> [u8; $pk_ser_size] {
let mut pk_out = [0u8; $pk_ser_size];
unsafe {
$pk_ser(pk_out.as_mut_ptr(), &self.point);
}
pk_out
}
pub fn uncompress(pk_comp: &[u8]) -> Result {
if pk_comp.len() == $pk_comp_size && (pk_comp[0] & 0x80) != 0 {
let mut pk = <$pk_aff>::default();
let err = unsafe { $pk_uncomp(&mut pk, pk_comp.as_ptr()) };
if err != BLST_ERROR::BLST_SUCCESS {
return Err(err);
}
Ok(Self { point: pk })
} else {
Err(BLST_ERROR::BLST_BAD_ENCODING)
}
}
pub fn deserialize(pk_in: &[u8]) -> Result {
if (pk_in.len() == $pk_ser_size && (pk_in[0] & 0x80) == 0)
|| (pk_in.len() == $pk_comp_size && (pk_in[0] & 0x80) != 0)
{
let mut pk = <$pk_aff>::default();
let err = unsafe { $pk_deser(&mut pk, pk_in.as_ptr()) };
if err != BLST_ERROR::BLST_SUCCESS {
return Err(err);
}
Ok(Self { point: pk })
} else {
Err(BLST_ERROR::BLST_BAD_ENCODING)
}
}
pub fn from_bytes(pk_in: &[u8]) -> Result {
PublicKey::deserialize(pk_in)
}
pub fn to_bytes(&self) -> [u8; $pk_comp_size] {
self.compress()
}
}
// Trait for equality comparisons which are equivalence relations.
//
// This means, that in addition to a == b and a != b being strict
// inverses, the equality must be reflexive, symmetric and transitive.
impl Eq for PublicKey {}
impl PartialEq for PublicKey {
fn eq(&self, other: &Self) -> bool {
unsafe { $pk_eq(&self.point, &other.point) }
}
}
#[cfg(feature = "serde")]
impl Serialize for PublicKey {
fn serialize(
&self,
ser: S,
) -> Result {
ser.serialize_bytes(&self.serialize())
}
}
#[cfg(feature = "serde")]
impl<'de> Deserialize<'de> for PublicKey {
fn deserialize>(
deser: D,
) -> Result {
let bytes: &[u8] = Deserialize::deserialize(deser)?;
Self::deserialize(&bytes).map_err(|e| {
::custom(format!("{:?}", e))
})
}
}
impl From for $pk_aff {
fn from(pk: PublicKey) -> Self {
pk.point
}
}
impl<'a> From<&'a PublicKey> for &'a $pk_aff {
fn from(pk: &'a PublicKey) -> Self {
&pk.point
}
}
impl From<$pk_aff> for PublicKey {
fn from(point: $pk_aff) -> Self {
Self { point }
}
}
#[repr(transparent)]
#[derive(Debug, Clone, Copy)]
pub struct AggregatePublicKey {
point: $pk,
}
impl AggregatePublicKey {
pub fn from_public_key(pk: &PublicKey) -> Self {
let mut agg_pk = <$pk>::default();
unsafe {
$pk_from_aff(&mut agg_pk, &pk.point);
}
Self { point: agg_pk }
}
pub fn to_public_key(&self) -> PublicKey {
let mut pk = <$pk_aff>::default();
unsafe {
$pk_to_aff(&mut pk, &self.point);
}
PublicKey { point: pk }
}
// Aggregate
pub fn aggregate(
pks: &[&PublicKey],
pks_validate: bool,
) -> Result {
if pks.len() == 0 {
return Err(BLST_ERROR::BLST_AGGR_TYPE_MISMATCH);
}
if pks_validate {
pks[0].validate()?;
}
let mut agg_pk = AggregatePublicKey::from_public_key(pks[0]);
for s in pks.iter().skip(1) {
if pks_validate {
s.validate()?;
}
unsafe {
$pk_add_or_dbl_aff(
&mut agg_pk.point,
&agg_pk.point,
&s.point,
);
}
}
Ok(agg_pk)
}
pub fn aggregate_with_randomness(
pks: &[PublicKey],
randomness: &[u8],
nbits: usize,
pks_groupcheck: bool,
) -> Result {
if pks.len() == 0 {
return Err(BLST_ERROR::BLST_AGGR_TYPE_MISMATCH);
}
if pks_groupcheck {
pks.validate()?;
}
Ok(pks.mult(randomness, nbits))
}
pub fn aggregate_serialized(
pks: &[&[u8]],
pks_validate: bool,
) -> Result {
// TODO - threading
if pks.len() == 0 {
return Err(BLST_ERROR::BLST_AGGR_TYPE_MISMATCH);
}
let mut pk = if pks_validate {
PublicKey::key_validate(pks[0])?
} else {
PublicKey::from_bytes(pks[0])?
};
let mut agg_pk = AggregatePublicKey::from_public_key(&pk);
for s in pks.iter().skip(1) {
pk = if pks_validate {
PublicKey::key_validate(s)?
} else {
PublicKey::from_bytes(s)?
};
unsafe {
$pk_add_or_dbl_aff(
&mut agg_pk.point,
&agg_pk.point,
&pk.point,
);
}
}
Ok(agg_pk)
}
pub fn add_aggregate(&mut self, agg_pk: &AggregatePublicKey) {
unsafe {
$pk_add_or_dbl(&mut self.point, &self.point, &agg_pk.point);
}
}
pub fn sub_aggregate(&mut self, agg_pk: &AggregatePublicKey) {
unsafe {
let mut tmp = agg_pk.clone();
$pk_cneg(&mut tmp.point, true);
$pk_add_or_dbl(&mut self.point, &self.point, &tmp.point);
}
}
pub fn add_public_key(
&mut self,
pk: &PublicKey,
pk_validate: bool,
) -> Result<(), BLST_ERROR> {
if pk_validate {
pk.validate()?;
}
unsafe {
$pk_add_or_dbl_aff(&mut self.point, &self.point, &pk.point);
}
Ok(())
}
}
impl From for $pk {
fn from(pk: AggregatePublicKey) -> Self {
pk.point
}
}
impl<'a> From<&'a AggregatePublicKey> for &'a $pk {
fn from(pk: &'a AggregatePublicKey) -> Self {
&pk.point
}
}
impl From<$pk> for AggregatePublicKey {
fn from(point: $pk) -> Self {
Self { point }
}
}
#[repr(transparent)]
#[derive(Debug, Clone, Copy)]
pub struct Signature {
point: $sig_aff,
}
impl Signature {
// sig_infcheck, check for infinity, is a way to avoid going
// into resource-consuming verification. Passing 'false' is
// always cryptographically safe, but application might want
// to guard against obviously bogus individual[!] signatures.
pub fn validate(
&self,
sig_infcheck: bool,
) -> Result<(), BLST_ERROR> {
unsafe {
if sig_infcheck && $sig_is_inf(&self.point) {
return Err(BLST_ERROR::BLST_PK_IS_INFINITY);
}
if !$sig_in_group(&self.point) {
return Err(BLST_ERROR::BLST_POINT_NOT_IN_GROUP);
}
}
Ok(())
}
pub fn sig_validate(
sig: &[u8],
sig_infcheck: bool,
) -> Result {
let sig = Signature::from_bytes(sig)?;
sig.validate(sig_infcheck)?;
Ok(sig)
}
pub fn verify(
&self,
sig_groupcheck: bool,
msg: &[u8],
dst: &[u8],
aug: &[u8],
pk: &PublicKey,
pk_validate: bool,
) -> BLST_ERROR {
let aug_msg = [aug, msg].concat();
self.aggregate_verify(
sig_groupcheck,
&[aug_msg.as_slice()],
dst,
&[pk],
pk_validate,
)
}
#[cfg(not(feature = "std"))]
pub fn aggregate_verify(
&self,
sig_groupcheck: bool,
msgs: &[&[u8]],
dst: &[u8],
pks: &[&PublicKey],
pks_validate: bool,
) -> BLST_ERROR {
let n_elems = pks.len();
if n_elems == 0 || msgs.len() != n_elems {
return BLST_ERROR::BLST_VERIFY_FAIL;
}
let mut pairing = Pairing::new($hash_or_encode, dst);
let err = pairing.aggregate(
&pks[0].point,
pks_validate,
&self.point,
sig_groupcheck,
&msgs[0],
&[],
);
if err != BLST_ERROR::BLST_SUCCESS {
return err;
}
for i in 1..n_elems {
let err = pairing.aggregate(
&pks[i].point,
pks_validate,
&unsafe { ptr::null::<$sig_aff>().as_ref() },
false,
&msgs[i],
&[],
);
if err != BLST_ERROR::BLST_SUCCESS {
return err;
}
}
pairing.commit();
if pairing.finalverify(None) {
BLST_ERROR::BLST_SUCCESS
} else {
BLST_ERROR::BLST_VERIFY_FAIL
}
}
#[cfg(feature = "std")]
pub fn aggregate_verify(
&self,
sig_groupcheck: bool,
msgs: &[&[u8]],
dst: &[u8],
pks: &[&PublicKey],
pks_validate: bool,
) -> BLST_ERROR {
let n_elems = pks.len();
if n_elems == 0 || msgs.len() != n_elems {
return BLST_ERROR::BLST_VERIFY_FAIL;
}
// TODO - check msg uniqueness?
let pool = mt::da_pool();
let counter = Arc::new(AtomicUsize::new(0));
let valid = Arc::new(AtomicBool::new(true));
let n_workers = core::cmp::min(pool.max_count(), n_elems);
let (tx, rx) = sync_channel(n_workers);
for _ in 0..n_workers {
let tx = tx.clone();
let counter = counter.clone();
let valid = valid.clone();
pool.joined_execute(move || {
let mut pairing = Pairing::new($hash_or_encode, dst);
while valid.load(Ordering::Relaxed) {
let work = counter.fetch_add(1, Ordering::Relaxed);
if work >= n_elems {
break;
}
if pairing.aggregate(
&pks[work].point,
pks_validate,
&unsafe { ptr::null::<$sig_aff>().as_ref() },
false,
&msgs[work],
&[],
) != BLST_ERROR::BLST_SUCCESS
{
valid.store(false, Ordering::Relaxed);
break;
}
}
if valid.load(Ordering::Relaxed) {
pairing.commit();
}
tx.send(pairing).expect("disaster");
});
}
if sig_groupcheck && valid.load(Ordering::Relaxed) {
match self.validate(false) {
Err(_err) => valid.store(false, Ordering::Relaxed),
_ => (),
}
}
let mut gtsig = blst_fp12::default();
if valid.load(Ordering::Relaxed) {
Pairing::aggregated(&mut gtsig, &self.point);
}
let mut acc = rx.recv().unwrap();
for _ in 1..n_workers {
acc.merge(&rx.recv().unwrap());
}
if valid.load(Ordering::Relaxed)
&& acc.finalverify(Some(>sig))
{
BLST_ERROR::BLST_SUCCESS
} else {
BLST_ERROR::BLST_VERIFY_FAIL
}
}
// pks are assumed to be verified for proof of possession,
// which implies that they are already group-checked
pub fn fast_aggregate_verify(
&self,
sig_groupcheck: bool,
msg: &[u8],
dst: &[u8],
pks: &[&PublicKey],
) -> BLST_ERROR {
let agg_pk = match AggregatePublicKey::aggregate(pks, false) {
Ok(agg_sig) => agg_sig,
Err(err) => return err,
};
let pk = agg_pk.to_public_key();
self.aggregate_verify(
sig_groupcheck,
&[msg],
dst,
&[&pk],
false,
)
}
pub fn fast_aggregate_verify_pre_aggregated(
&self,
sig_groupcheck: bool,
msg: &[u8],
dst: &[u8],
pk: &PublicKey,
) -> BLST_ERROR {
self.aggregate_verify(sig_groupcheck, &[msg], dst, &[pk], false)
}
// https://ethresear.ch/t/fast-verification-of-multiple-bls-signatures/5407
#[cfg(feature = "std")]
#[allow(clippy::too_many_arguments)]
pub fn verify_multiple_aggregate_signatures(
msgs: &[&[u8]],
dst: &[u8],
pks: &[&PublicKey],
pks_validate: bool,
sigs: &[&Signature],
sigs_groupcheck: bool,
rands: &[blst_scalar],
rand_bits: usize,
) -> BLST_ERROR {
let n_elems = pks.len();
if n_elems == 0
|| msgs.len() != n_elems
|| sigs.len() != n_elems
|| rands.len() != n_elems
{
return BLST_ERROR::BLST_VERIFY_FAIL;
}
// TODO - check msg uniqueness?
let pool = mt::da_pool();
let counter = Arc::new(AtomicUsize::new(0));
let valid = Arc::new(AtomicBool::new(true));
let n_workers = core::cmp::min(pool.max_count(), n_elems);
let (tx, rx) = sync_channel(n_workers);
for _ in 0..n_workers {
let tx = tx.clone();
let counter = counter.clone();
let valid = valid.clone();
pool.joined_execute(move || {
let mut pairing = Pairing::new($hash_or_encode, dst);
// TODO - engage multi-point mul-n-add for larger
// amount of inputs...
while valid.load(Ordering::Relaxed) {
let work = counter.fetch_add(1, Ordering::Relaxed);
if work >= n_elems {
break;
}
if pairing.mul_n_aggregate(
&pks[work].point,
pks_validate,
&sigs[work].point,
sigs_groupcheck,
&rands[work].b,
rand_bits,
msgs[work],
&[],
) != BLST_ERROR::BLST_SUCCESS
{
valid.store(false, Ordering::Relaxed);
break;
}
}
if valid.load(Ordering::Relaxed) {
pairing.commit();
}
tx.send(pairing).expect("disaster");
});
}
let mut acc = rx.recv().unwrap();
for _ in 1..n_workers {
acc.merge(&rx.recv().unwrap());
}
if valid.load(Ordering::Relaxed) && acc.finalverify(None) {
BLST_ERROR::BLST_SUCCESS
} else {
BLST_ERROR::BLST_VERIFY_FAIL
}
}
#[cfg(not(feature = "std"))]
#[allow(clippy::too_many_arguments)]
pub fn verify_multiple_aggregate_signatures(
msgs: &[&[u8]],
dst: &[u8],
pks: &[&PublicKey],
pks_validate: bool,
sigs: &[&Signature],
sigs_groupcheck: bool,
rands: &[blst_scalar],
rand_bits: usize,
) -> BLST_ERROR {
let n_elems = pks.len();
if n_elems == 0
|| msgs.len() != n_elems
|| sigs.len() != n_elems
|| rands.len() != n_elems
{
return BLST_ERROR::BLST_VERIFY_FAIL;
}
// TODO - check msg uniqueness?
let mut pairing = Pairing::new($hash_or_encode, dst);
for i in 0..n_elems {
let err = pairing.mul_n_aggregate(
&pks[i].point,
pks_validate,
&sigs[i].point,
sigs_groupcheck,
&rands[i].b,
rand_bits,
msgs[i],
&[],
);
if err != BLST_ERROR::BLST_SUCCESS {
return err;
}
}
pairing.commit();
if pairing.finalverify(None) {
BLST_ERROR::BLST_SUCCESS
} else {
BLST_ERROR::BLST_VERIFY_FAIL
}
}
pub fn from_aggregate(agg_sig: &AggregateSignature) -> Self {
let mut sig_aff = <$sig_aff>::default();
unsafe {
$sig_to_aff(&mut sig_aff, &agg_sig.point);
}
Self { point: sig_aff }
}
pub fn compress(&self) -> [u8; $sig_comp_size] {
let mut sig_comp = [0; $sig_comp_size];
unsafe {
$sig_comp(sig_comp.as_mut_ptr(), &self.point);
}
sig_comp
}
pub fn serialize(&self) -> [u8; $sig_ser_size] {
let mut sig_out = [0; $sig_ser_size];
unsafe {
$sig_ser(sig_out.as_mut_ptr(), &self.point);
}
sig_out
}
pub fn uncompress(sig_comp: &[u8]) -> Result {
if sig_comp.len() == $sig_comp_size && (sig_comp[0] & 0x80) != 0
{
let mut sig = <$sig_aff>::default();
let err =
unsafe { $sig_uncomp(&mut sig, sig_comp.as_ptr()) };
if err != BLST_ERROR::BLST_SUCCESS {
return Err(err);
}
Ok(Self { point: sig })
} else {
Err(BLST_ERROR::BLST_BAD_ENCODING)
}
}
pub fn deserialize(sig_in: &[u8]) -> Result {
if (sig_in.len() == $sig_ser_size && (sig_in[0] & 0x80) == 0)
|| (sig_in.len() == $sig_comp_size
&& (sig_in[0] & 0x80) != 0)
{
let mut sig = <$sig_aff>::default();
let err = unsafe { $sig_deser(&mut sig, sig_in.as_ptr()) };
if err != BLST_ERROR::BLST_SUCCESS {
return Err(err);
}
Ok(Self { point: sig })
} else {
Err(BLST_ERROR::BLST_BAD_ENCODING)
}
}
pub fn from_bytes(sig_in: &[u8]) -> Result {
Signature::deserialize(sig_in)
}
pub fn to_bytes(&self) -> [u8; $sig_comp_size] {
self.compress()
}
pub fn subgroup_check(&self) -> bool {
unsafe { $sig_in_group(&self.point) }
}
}
// Trait for equality comparisons which are equivalence relations.
//
// This means, that in addition to a == b and a != b being strict
// inverses, the equality must be reflexive, symmetric and transitive.
impl Eq for Signature {}
impl PartialEq for Signature {
fn eq(&self, other: &Self) -> bool {
unsafe { $sig_eq(&self.point, &other.point) }
}
}
#[cfg(feature = "serde")]
impl Serialize for Signature {
fn serialize(
&self,
ser: S,
) -> Result {
ser.serialize_bytes(&self.serialize())
}
}
#[cfg(feature = "serde")]
impl<'de> Deserialize<'de> for Signature {
fn deserialize>(
deser: D,
) -> Result {
let bytes: &[u8] = Deserialize::deserialize(deser)?;
Self::deserialize(&bytes).map_err(|e| {
::custom(format!("{:?}", e))
})
}
}
impl From for $sig_aff {
fn from(sig: Signature) -> Self {
sig.point
}
}
impl<'a> From<&'a Signature> for &'a $sig_aff {
fn from(sig: &'a Signature) -> Self {
&sig.point
}
}
impl From<$sig_aff> for Signature {
fn from(point: $sig_aff) -> Self {
Self { point }
}
}
#[repr(transparent)]
#[derive(Debug, Clone, Copy)]
pub struct AggregateSignature {
point: $sig,
}
impl AggregateSignature {
pub fn validate(&self) -> Result<(), BLST_ERROR> {
unsafe {
if !$sig_aggr_in_group(&self.point) {
return Err(BLST_ERROR::BLST_POINT_NOT_IN_GROUP);
}
}
Ok(())
}
pub fn from_signature(sig: &Signature) -> Self {
let mut agg_sig = <$sig>::default();
unsafe {
$sig_from_aff(&mut agg_sig, &sig.point);
}
Self { point: agg_sig }
}
pub fn to_signature(&self) -> Signature {
let mut sig = <$sig_aff>::default();
unsafe {
$sig_to_aff(&mut sig, &self.point);
}
Signature { point: sig }
}
// Aggregate
pub fn aggregate(
sigs: &[&Signature],
sigs_groupcheck: bool,
) -> Result {
if sigs.len() == 0 {
return Err(BLST_ERROR::BLST_AGGR_TYPE_MISMATCH);
}
if sigs_groupcheck {
// We can't actually judge if input is individual or
// aggregated signature, so we can't enforce infinity
// check.
sigs[0].validate(false)?;
}
let mut agg_sig = AggregateSignature::from_signature(sigs[0]);
for s in sigs.iter().skip(1) {
if sigs_groupcheck {
s.validate(false)?;
}
unsafe {
$sig_add_or_dbl_aff(
&mut agg_sig.point,
&agg_sig.point,
&s.point,
);
}
}
Ok(agg_sig)
}
pub fn aggregate_with_randomness(
sigs: &[Signature],
randomness: &[u8],
nbits: usize,
sigs_groupcheck: bool,
) -> Result {
if sigs.len() == 0 {
return Err(BLST_ERROR::BLST_AGGR_TYPE_MISMATCH);
}
if sigs_groupcheck {
sigs.validate()?;
}
Ok(sigs.mult(randomness, nbits))
}
pub fn aggregate_serialized(
sigs: &[&[u8]],
sigs_groupcheck: bool,
) -> Result {
// TODO - threading
if sigs.len() == 0 {
return Err(BLST_ERROR::BLST_AGGR_TYPE_MISMATCH);
}
let mut sig = if sigs_groupcheck {
Signature::sig_validate(sigs[0], false)?
} else {
Signature::from_bytes(sigs[0])?
};
let mut agg_sig = AggregateSignature::from_signature(&sig);
for s in sigs.iter().skip(1) {
sig = if sigs_groupcheck {
Signature::sig_validate(s, false)?
} else {
Signature::from_bytes(s)?
};
unsafe {
$sig_add_or_dbl_aff(
&mut agg_sig.point,
&agg_sig.point,
&sig.point,
);
}
}
Ok(agg_sig)
}
pub fn add_aggregate(&mut self, agg_sig: &AggregateSignature) {
unsafe {
$sig_add_or_dbl(
&mut self.point,
&self.point,
&agg_sig.point,
);
}
}
pub fn add_signature(
&mut self,
sig: &Signature,
sig_groupcheck: bool,
) -> Result<(), BLST_ERROR> {
if sig_groupcheck {
sig.validate(false)?;
}
unsafe {
$sig_add_or_dbl_aff(
&mut self.point,
&self.point,
&sig.point,
);
}
Ok(())
}
pub fn subgroup_check(&self) -> bool {
unsafe { $sig_aggr_in_group(&self.point) }
}
}
impl From for $sig {
fn from(sig: AggregateSignature) -> Self {
sig.point
}
}
impl<'a> From<&'a AggregateSignature> for &'a $sig {
fn from(sig: &'a AggregateSignature) -> Self {
&sig.point
}
}
impl From<$sig> for AggregateSignature {
fn from(point: $sig) -> Self {
Self { point }
}
}
impl MultiPoint for [PublicKey] {
type Output = AggregatePublicKey;
fn mult(&self, scalars: &[u8], nbits: usize) -> Self::Output {
Self::Output {
point: unsafe { transmute::<&[_], &[$pk_aff]>(self) }
.mult(scalars, nbits),
}
}
fn add(&self) -> Self::Output {
Self::Output {
point: unsafe { transmute::<&[_], &[$pk_aff]>(self) }
.add(),
}
}
fn validate(&self) -> Result<(), BLST_ERROR> {
unsafe { transmute::<&[_], &[$pk_aff]>(self) }.validate()
}
}
impl MultiPoint for [Signature] {
type Output = AggregateSignature;
fn mult(&self, scalars: &[u8], nbits: usize) -> Self::Output {
Self::Output {
point: unsafe { transmute::<&[_], &[$sig_aff]>(self) }
.mult(scalars, nbits),
}
}
fn add(&self) -> Self::Output {
Self::Output {
point: unsafe { transmute::<&[_], &[$sig_aff]>(self) }
.add(),
}
}
fn validate(&self) -> Result<(), BLST_ERROR> {
unsafe { transmute::<&[_], &[$sig_aff]>(self) }.validate()
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::{RngCore, SeedableRng};
use rand_chacha::ChaCha20Rng;
// Testing only - do not use for production
pub fn gen_random_key(
rng: &mut rand_chacha::ChaCha20Rng,
) -> SecretKey {
let mut ikm = [0u8; 32];
rng.fill_bytes(&mut ikm);
let mut sk = ::default();
unsafe {
blst_keygen(&mut sk, ikm.as_ptr(), 32, ptr::null(), 0);
}
SecretKey { value: sk }
}
#[test]
fn test_sign_n_verify() {
let ikm: [u8; 32] = [
0x93, 0xad, 0x7e, 0x65, 0xde, 0xad, 0x05, 0x2a, 0x08, 0x3a,
0x91, 0x0c, 0x8b, 0x72, 0x85, 0x91, 0x46, 0x4c, 0xca, 0x56,
0x60, 0x5b, 0xb0, 0x56, 0xed, 0xfe, 0x2b, 0x60, 0xa6, 0x3c,
0x48, 0x99,
];
let sk = SecretKey::key_gen(&ikm, &[]).unwrap();
let pk = sk.sk_to_pk();
let dst = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_";
let msg = b"hello foo";
let sig = sk.sign(msg, dst, &[]);
let err = sig.verify(true, msg, dst, &[], &pk, true);
assert_eq!(err, BLST_ERROR::BLST_SUCCESS);
}
#[test]
fn test_aggregate() {
let num_msgs = 10;
let dst = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_";
let seed = [0u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let sks: Vec<_> =
(0..num_msgs).map(|_| gen_random_key(&mut rng)).collect();
let pks =
sks.iter().map(|sk| sk.sk_to_pk()).collect::>();
let pks_refs: Vec<&PublicKey> =
pks.iter().map(|pk| pk).collect();
let pks_rev: Vec<&PublicKey> =
pks.iter().rev().map(|pk| pk).collect();
let pk_comp = pks[0].compress();
let pk_uncomp = PublicKey::uncompress(&pk_comp);
assert_eq!(pk_uncomp.is_ok(), true);
let mut msgs: Vec> = vec![vec![]; num_msgs];
for i in 0..num_msgs {
let msg_len = (rng.next_u64() & 0x3F) + 1;
msgs[i] = vec![0u8; msg_len as usize];
rng.fill_bytes(&mut msgs[i]);
}
let msgs_refs: Vec<&[u8]> =
msgs.iter().map(|m| m.as_slice()).collect();
let sigs = sks
.iter()
.zip(msgs.iter())
.map(|(sk, m)| (sk.sign(m, dst, &[])))
.collect::>();
let mut errs = sigs
.iter()
.zip(msgs.iter())
.zip(pks.iter())
.map(|((s, m), pk)| (s.verify(true, m, dst, &[], pk, true)))
.collect::>();
assert_eq!(errs, vec![BLST_ERROR::BLST_SUCCESS; num_msgs]);
// Swap message/public key pairs to create bad signature
errs = sigs
.iter()
.zip(msgs.iter())
.zip(pks.iter().rev())
.map(|((s, m), pk)| (s.verify(true, m, dst, &[], pk, true)))
.collect::>();
assert_ne!(errs, vec![BLST_ERROR::BLST_SUCCESS; num_msgs]);
let sig_refs =
sigs.iter().map(|s| s).collect::>();
let agg = match AggregateSignature::aggregate(&sig_refs, true) {
Ok(agg) => agg,
Err(err) => panic!("aggregate failure: {:?}", err),
};
let agg_sig = agg.to_signature();
let mut result = agg_sig
.aggregate_verify(false, &msgs_refs, dst, &pks_refs, false);
assert_eq!(result, BLST_ERROR::BLST_SUCCESS);
// Swap message/public key pairs to create bad signature
result = agg_sig
.aggregate_verify(false, &msgs_refs, dst, &pks_rev, false);
assert_ne!(result, BLST_ERROR::BLST_SUCCESS);
}
#[test]
fn test_multiple_agg_sigs() {
let dst = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_";
let num_pks_per_sig = 10;
let num_sigs = 10;
let seed = [0u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let mut msgs: Vec> = vec![vec![]; num_sigs];
let mut sigs: Vec = Vec::with_capacity(num_sigs);
let mut pks: Vec