Repository: barryWhiteHat/roll_up
Branch: master
Commit: 118f3511a7a9
Files: 30
Total size: 108.4 KB
Directory structure:
gitextract_3ua0xt7z/
├── .dockerignore
├── .gitignore
├── .gitmodules
├── CMakeLists.txt
├── Dockerfile
├── README.md
├── build/
│ └── .gitkeep
├── contracts/
│ ├── Miximus.sol
│ ├── Pairing.sol
│ ├── Verifier.sol
│ ├── contract_deploy.py
│ └── roll_up.sol
├── depends/
│ └── CMakeLists.txt
├── docker-compose.yml
├── keys/
│ └── .gitkeep
├── pythonWrapper/
│ ├── helper.py
│ └── utils.py
├── requirements.txt
├── src/
│ ├── CMakeLists.txt
│ ├── ZoKrates/
│ │ ├── wraplibsnark.cpp
│ │ └── wraplibsnark.hpp
│ ├── export.cpp
│ ├── roll_up.hpp
│ ├── roll_up.tcc
│ ├── roll_up_wrapper.cpp
│ ├── roll_up_wrapper.hpp
│ ├── sha256/
│ │ └── sha256_ethereum.cpp
│ ├── tx.hpp
│ └── tx.tcc
└── tests/
└── test.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
__pycache__
================================================
FILE: .gitignore
================================================
keys/vk.json
__pycache__
================================================
FILE: .gitmodules
================================================
[submodule "depends/baby_jubjub_ecc"]
path = depends/baby_jubjub_ecc
url = https://github.com/barrywhitehat/baby_jubjub_ecc
[submodule "depends/libsnark"]
path = depends/libsnark
url = https://github.com/scipr-lab/libsnark.git
[submodule "src/sha256_ethereum"]
path = src/sha256_ethereum
url = https://github.com/kobigurk/sha256_ethereum
================================================
FILE: CMakeLists.txt
================================================
cmake_minimum_required(VERSION 2.8)
project(roll_up)
set(
CURVE
"ALT_BN128"
CACHE
STRING
"Default curve: one of ALT_BN128, BN128, EDWARDS, MNT4, MNT6"
)
set(
DEPENDS_DIR
"${CMAKE_CURRENT_SOURCE_DIR}/depends"
CACHE
STRING
"Optionally specify the dependency installation directory relative to the source directory (default: inside dependency folder)"
)
set(
OPT_FLAGS
""
CACHE
STRING
"Override C++ compiler optimization flags"
)
option(
MULTICORE
"Enable parallelized execution, using OpenMP"
ON
)
option(
WITH_PROCPS
"Use procps for memory profiling"
ON
)
option(
VERBOSE
"Print internal messages"
ON
)
option(
DEBUG
"Enable debugging mode"
OFF
)
option(
CPPDEBUG
"Enable debugging of C++ STL (does not imply DEBUG)"
OFF
)
if(CMAKE_COMPILER_IS_GNUCXX OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
# Common compilation flags and warning configuration
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wfatal-errors -pthread")
if("${MULTICORE}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
endif()
# Default optimizations flags (to override, use -DOPT_FLAGS=...)
if("${OPT_FLAGS}" STREQUAL "")
set(OPT_FLAGS "-ggdb3 -O2 -march=native -mtune=native")
endif()
endif()
add_definitions(-DCURVE_${CURVE})
if(${CURVE} STREQUAL "BN128")
add_definitions(-DBN_SUPPORT_SNARK=1)
endif()
if("${VERBOSE}")
add_definitions(-DVERBOSE=1)
endif()
if("${MULTICORE}")
add_definitions(-DMULTICORE=1)
endif()
add_compile_options(-fPIC)
if("${CPPDEBUG}")
add_definitions(-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPT_FLAGS}")
include(FindPkgConfig)
if("${WITH_PROCPS}")
pkg_check_modules(PROCPS REQUIRED libprocps)
else()
add_definitions(-DNO_PROCPS)
endif()
include_directories(.)
add_subdirectory(depends)
add_subdirectory(src)
================================================
FILE: Dockerfile
================================================
FROM ubuntu:18.04
RUN apt-get update && \
apt-get install software-properties-common -y && \
add-apt-repository ppa:ethereum/ethereum -y && \
apt-get update && \
apt-get install -y \
wget unzip curl \
build-essential cmake git libgmp3-dev libprocps-dev python-markdown libboost-all-dev libssl-dev pkg-config python3-pip solc
WORKDIR /root/roll_up
COPY . .
RUN pip3 install -r requirements.txt
RUN cd build \
&& cmake .. \
&& make \
&& DESTDIR=/usr/local make install \
NO_PROCPS=1 \
NO_GTEST=1 \
NO_DOCS=1 \
CURVE=ALT_BN128 \
FEATUREFLAGS="-DBINARY_OUTPUT=1 -DMONTGOMERY_OUTPUT=1 -DNO_PT_COMPRESSION=1"
ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:/usr/local/lib
================================================
FILE: README.md
================================================
# roll_up
[](https://gitter.im/barrywhitehat/roll_up?utm_source=share-link&utm_medium=link&utm_campaign=share-link)
Roll_up aggregates transactions so that they only require a single onchain transactions required to validate multiple other transactions. The snark checks the signature and applies the transaction to the the leaf that the signer owns.
Multiple users create signatures. Provers aggregates these signatures into a snark and use it to update a smart contract on the ethereum blockchain. A malicious prover who does not also have that leafs private key cannot change a leaf. Only the person who controls the private key can.
This is intended to be the database layer of snark-dapp (snapps) where the layers above define more rules about changing and updating the leaves
`roll_up` does not make any rules about what happens in a leaf, what kind of leaves can be created and destroyed. This is the purview of
higher level snapps. Who can add their constraints in `src/roll_up.tcc` in the function `generate_r1cs_constraints()`
## In Depth
The system is base use eddsa signatures defined in [baby_jubjub_ecc](https://github.com/barryWhiteHat/baby_jubjub_ecc) base upon [baby_jubjub](https://github.com/barryWhiteHat/baby_jubjub). It uses sha256 padded with 512 bits input.
The leaf is defined as follows
```
LEAF
+----------------^----------------+
LHS RHS
+----------------+
Public_key_x public_key_y
```
The leaf is then injected into a merkle tree.
A transaction updates a single leaf in the merkle tree. A transaction takes the following form.
```
1. Public key x and y point
2. The message which is defined as the hash of the old leaf and the new leaf.
MESSAGE
+----------------^----------------+
OLD_LEAF NEW_LEAF
3. the point R and the integer S.
```
In order to update the merkle tree the prover needs to aggregate together X transactions. For each transaction they check
```
1. Takes the merkel root as input from the smart contract (if it is the first iteration) or from the merkle root from the previous
transaction.
2. Find the leaf that matches the message in the merkle tree.
NOTE: If there are two messages that match, both can be updated as their is no replay protection this should be solved on the next layer
this is simply the read and write layer, we do not check what is being written here.
3. Check that the proving key matches the owner of that leaf.
4. Confirm that the signature is correct.
5. Confirm that that leaf is in the merkle tree.
6. Replace is with the new leaf and calculate the new merkle root.
7. Continue until all transactions have been included in a snark
```
The snark can then be included in a transaction to update the merkle root tracked by a smart contract.
## Data availabilty guarrentees
It is important that each prover is able to make merkle proofs for all leaves.
If they cannot these leaves are essentially locked until that information becomes available.
In order to ensure this, we pass every updated leaf to the smart contract so
that data will always be available.
Thus the system has the same data availability guarrentees as ethereum.
## Scalability
Gas cost of function call: 23368
Gas cost of throwing an event with a single leaf update : 1840
Although we don't use groth16 currently. This is the cheapest proving system to our knowledge.
groth16 confirm: 560000 including tx cost and input data is ~600000.
The gas limit is 8,000,000 per block. So we can use the rest of the gas to maintain data availability.
8000000 - 600000 = 7400000
We find that 7400000 is the remaining gas in the block.
So we calculate how much we can spend on data availability
7400000 / 1840 ~= 4021.73913043478
4021.73913043478 / 15 = 268 transactions per second
## Proving time
On a laptop with 7 GB of ram and 20 GB of swap space it struggles to aggragate 20 transactions per second. This is a
combination of my hardware limits and cpp code that needs to be improved.
[Wu et al](https://eprint.iacr.org/2018/691) showed that is is possible to distribute
these computations that scales to billions of constaints.
In order to reach the tps described above three approaches exist.
1. Improve the cpp code similar to https://github.com/HarryR/ethsnarks/issues/3 and run it on enterprise hardware.
2. Implmenting the full distributed system described by Wu et al.
3. Specialized hardware to create these proofs.
## Distribution
The role of prover can be distributed but it means that each will have to purchase/rent hardware in order to be able to keep up with the longest chain.
There are a few attacks where the fastest prover is able censor all other provers by constantly updating so the all competing provers proofs are constantly out of date.
These problem should be mitigated or solved at the consensus level.
## Running tests
If you want to run at noTx greater than 10 you will need more than 7GB
to add a bunch of swap space https://www.digitalocean.com/community/tutorials/how-to-add-swap-space-on-ubuntu-16-04
### Build everything
```
mkdir keys
git submodule update --init --recursive
mkdir build
cd build
cmake .. && make
```
### Run the tests
NOTE: Make sure you have a node running so the smart contract would be deployed and validate the transaction, you can use
`testrpc` or `ganache-cli`
```
cd ../tests/
python3 test.py
```
### Change the merkle tree depth and number of transactions to be aggregated
You'd need to update two files, and re-build the prover.
In `pythonWrapper/helper.py`
```
tree_depth = 2
noTx = 4
```
In `src/roll_up_wrapper.hpp`
```
const int tree_depth = 2;
```
================================================
FILE: build/.gitkeep
================================================
================================================
FILE: contracts/Miximus.sol
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
*/
pragma solidity ^0.4.19;
import "./Verifier.sol";
contract roll_up{
bytes32 root;
mapping (bytes32 => bool) nullifiers;
event Withdraw (address);
Verifier public zksnark_verify;
function roll_up (address _zksnark_verify, bytes32 _root) {
zksnark_verify = Verifier(_zksnark_verify);
root = _root;
}
function isTrue (
uint[2] a,
uint[2] a_p,
uint[2][2] b,
uint[2] b_p,
uint[2] c,
uint[2] c_p,
uint[2] h,
uint[2] k,
uint[] input
) returns (bool) {
bytes32 _root = padZero(reverse(bytes32(input[0]))); //)merge253bitWords(input[0], input[1]);
require(_root == padZero(root));
require(zksnark_verify.verifyTx(a,a_p,b,b_p,c,c_p,h,k,input));
root = padZero(reverse(bytes32(input[2])));
return(true);
}
function getRoot() constant returns(bytes32) {
return(root);
}
// libshark only allows 253 bit chunks in its output
// to overcome this we merge the first 253 bits (left) with the remaining 3 bits
// in the next variable (right)
function merge253bitWords(uint left, uint right) returns(bytes32) {
right = pad3bit(right);
uint left_msb = uint(padZero(reverse(bytes32(left))));
uint left_lsb = uint(getZero(reverse(bytes32(left))));
right = right + left_lsb;
uint res = left_msb + right;
return(bytes32(res));
}
// ensure that the 3 bits on the left is actually 3 bits.
function pad3bit(uint input) constant returns(uint) {
if (input == 0)
return 0;
if (input == 1)
return 4;
if (input == 2)
return 4;
if (input == 3)
return 6;
return(input);
}
function getZero(bytes32 x) returns(bytes32) {
//0x1111111111111111111111113fdc3192693e28ff6aee95320075e4c26be03308
return(x & 0x000000000000000000000000000000000000000000000000000000000000000F);
}
function padZero(bytes32 x) returns(bytes32) {
//0x1111111111111111111111113fdc3192693e28ff6aee95320075e4c26be03308
return(x & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0);
}
function reverseByte(uint a) public pure returns (uint) {
uint c = 0xf070b030d0509010e060a020c0408000;
return (( c >> ((a & 0xF)*8)) & 0xF0) +
(( c >> (((a >> 4)&0xF)*8) + 4) & 0xF);
}
//flip endinaness
function reverse(bytes32 a) public pure returns(bytes32) {
uint r;
uint i;
uint b;
for (i=0; i<32; i++) {
b = (uint(a) >> ((31-i)*8)) & 0xff;
b = reverseByte(b);
r += b << (i*8);
}
return bytes32(r);
}
}
================================================
FILE: contracts/Pairing.sol
================================================
// This code is taken from https://github.com/JacobEberhardt/ZoKrates
pragma solidity ^0.4.19;
library Pairing {
struct G1Point {
uint X;
uint Y;
}
// Encoding of field elements is: X[0] * z + X[1]
struct G2Point {
uint[2] X;
uint[2] Y;
}
/// @return the generator of G1
function P1() internal returns (G1Point) {
return G1Point(1, 2);
}
/// @return the generator of G2
function P2() internal returns (G2Point) {
return G2Point(
[11559732032986387107991004021392285783925812861821192530917403151452391805634,
10857046999023057135944570762232829481370756359578518086990519993285655852781],
[4082367875863433681332203403145435568316851327593401208105741076214120093531,
8495653923123431417604973247489272438418190587263600148770280649306958101930]
);
}
/// @return the negation of p, i.e. p.add(p.negate()) should be zero.
function negate(G1Point p) internal returns (G1Point) {
// The prime q in the base field F_q for G1
uint q = 21888242871839275222246405745257275088696311157297823662689037894645226208583;
if (p.X == 0 && p.Y == 0)
return G1Point(0, 0);
return G1Point(p.X, q - (p.Y % q));
}
/// @return the sum of two points of G1
function add(G1Point p1, G1Point p2) internal returns (G1Point r) {
uint[4] memory input;
input[0] = p1.X;
input[1] = p1.Y;
input[2] = p2.X;
input[3] = p2.Y;
bool success;
assembly {
success := call(sub(gas, 2000), 6, 0, input, 0xc0, r, 0x60)
// Use "invalid" to make gas estimation work
switch success case 0 { invalid }
}
require(success);
}
/// @return the product of a point on G1 and a scalar, i.e.
/// p == p.mul(1) and p.add(p) == p.mul(2) for all points p.
function mul(G1Point p, uint s) internal returns (G1Point r) {
uint[3] memory input;
input[0] = p.X;
input[1] = p.Y;
input[2] = s;
bool success;
assembly {
success := call(sub(gas, 2000), 7, 0, input, 0x80, r, 0x60)
// Use "invalid" to make gas estimation work
switch success case 0 { invalid }
}
require (success);
}
/// @return the result of computing the pairing check
/// e(p1[0], p2[0]) * .... * e(p1[n], p2[n]) == 1
/// For example pairing([P1(), P1().negate()], [P2(), P2()]) should
/// return true.
function pairing(G1Point[] p1, G2Point[] p2) internal returns (bool) {
require(p1.length == p2.length);
uint elements = p1.length;
uint inputSize = elements * 6;
uint[] memory input = new uint[](inputSize);
for (uint i = 0; i < elements; i++)
{
input[i * 6 + 0] = p1[i].X;
input[i * 6 + 1] = p1[i].Y;
input[i * 6 + 2] = p2[i].X[0];
input[i * 6 + 3] = p2[i].X[1];
input[i * 6 + 4] = p2[i].Y[0];
input[i * 6 + 5] = p2[i].Y[1];
}
uint[1] memory out;
bool success;
assembly {
success := call(sub(gas, 2000), 8, 0, add(input, 0x20), mul(inputSize, 0x20), out, 0x20)
// Use "invalid" to make gas estimation work
switch success case 0 { invalid }
}
require(success);
return out[0] != 0;
}
/// Convenience method for a pairing check for two pairs.
function pairingProd2(G1Point a1, G2Point a2, G1Point b1, G2Point b2) internal returns (bool) {
G1Point[] memory p1 = new G1Point[](2);
G2Point[] memory p2 = new G2Point[](2);
p1[0] = a1;
p1[1] = b1;
p2[0] = a2;
p2[1] = b2;
return pairing(p1, p2);
}
/// Convenience method for a pairing check for three pairs.
function pairingProd3(
G1Point a1, G2Point a2,
G1Point b1, G2Point b2,
G1Point c1, G2Point c2
) internal returns (bool) {
G1Point[] memory p1 = new G1Point[](3);
G2Point[] memory p2 = new G2Point[](3);
p1[0] = a1;
p1[1] = b1;
p1[2] = c1;
p2[0] = a2;
p2[1] = b2;
p2[2] = c2;
return pairing(p1, p2);
}
/// Convenience method for a pairing check for four pairs.
function pairingProd4(
G1Point a1, G2Point a2,
G1Point b1, G2Point b2,
G1Point c1, G2Point c2,
G1Point d1, G2Point d2
) internal returns (bool) {
G1Point[] memory p1 = new G1Point[](4);
G2Point[] memory p2 = new G2Point[](4);
p1[0] = a1;
p1[1] = b1;
p1[2] = c1;
p1[3] = d1;
p2[0] = a2;
p2[1] = b2;
p2[2] = c2;
p2[3] = d2;
return pairing(p1, p2);
}
}
================================================
FILE: contracts/Verifier.sol
================================================
// this code is taken from https://github.com/JacobEberhardt/ZoKrates
pragma solidity ^0.4.19;
import "../contracts/Pairing.sol";
contract Verifier {
using Pairing for *;
uint sealed = 0; //IC parameater add counter.
uint i = 0;
struct VerifyingKey {
Pairing.G2Point A;
Pairing.G1Point B;
Pairing.G2Point C;
Pairing.G2Point gamma;
Pairing.G1Point gammaBeta1;
Pairing.G2Point gammaBeta2;
Pairing.G2Point Z;
Pairing.G1Point[] IC;
}
struct Proof {
Pairing.G1Point A;
Pairing.G1Point A_p;
Pairing.G2Point B;
Pairing.G1Point B_p;
Pairing.G1Point C;
Pairing.G1Point C_p;
Pairing.G1Point K;
Pairing.G1Point H;
}
VerifyingKey verifyKey;
function Verifier (uint[2] A1, uint[2] A2, uint[2] B, uint[2] C1, uint[2] C2,
uint[2] gamma1, uint[2] gamma2, uint[2] gammaBeta1,
uint[2] gammaBeta2_1, uint[2] gammaBeta2_2, uint[2] Z1, uint[2] Z2,
uint[] input) {
verifyKey.A = Pairing.G2Point(A1,A2);
verifyKey.B = Pairing.G1Point(B[0], B[1]);
verifyKey.C = Pairing.G2Point(C1, C2);
verifyKey.gamma = Pairing.G2Point(gamma1, gamma2);
verifyKey.gammaBeta1 = Pairing.G1Point(gammaBeta1[0], gammaBeta1[1]);
verifyKey.gammaBeta2 = Pairing.G2Point(gammaBeta2_1, gammaBeta2_2);
verifyKey.Z = Pairing.G2Point(Z1,Z2);
/*while (verifyKey.IC.length != input.length/2) {
verifyKey.IC.push(Pairing.G1Point(input[i], input[i+1]));
i += 2;
}*/
}
function addIC(uint[] input) {
require(sealed ==0);
while (verifyKey.IC.length != input.length/2 && msg.gas > 200000) {
verifyKey.IC.push(Pairing.G1Point(input[i], input[i+1]));
i += 2;
}
if( verifyKey.IC.length == input.length/2) {
sealed = 1;
}
}
function getIC(uint i) returns(uint, uint) {
return(verifyKey.IC[i].X, verifyKey.IC[i].Y);
}
function getICLen () returns (uint) {
return(verifyKey.IC.length);
}
function verify(uint[] input, Proof proof) internal returns (uint) {
VerifyingKey memory vk = verifyKey;
require(input.length + 1 == vk.IC.length);
// Compute the linear combination vk_x
Pairing.G1Point memory vk_x = Pairing.G1Point(0, 0);
for (uint i = 0; i < input.length; i++)
vk_x = Pairing.add(vk_x, Pairing.mul(vk.IC[i + 1], input[i]));
vk_x = Pairing.add(vk_x, vk.IC[0]);
if (!Pairing.pairingProd2(proof.A, vk.A, Pairing.negate(proof.A_p), Pairing.P2())) return 1;
if (!Pairing.pairingProd2(vk.B, proof.B, Pairing.negate(proof.B_p), Pairing.P2())) return 2;
if (!Pairing.pairingProd2(proof.C, vk.C, Pairing.negate(proof.C_p), Pairing.P2())) return 3;
if (!Pairing.pairingProd3(
proof.K, vk.gamma,
Pairing.negate(Pairing.add(vk_x, Pairing.add(proof.A, proof.C))), vk.gammaBeta2,
Pairing.negate(vk.gammaBeta1), proof.B
)) return 4;
if (!Pairing.pairingProd3(
Pairing.add(vk_x, proof.A), proof.B,
Pairing.negate(proof.H), vk.Z,
Pairing.negate(proof.C), Pairing.P2()
)) return 5;
return 0;
}
event Verified(string);
function verifyTx(
uint[2] a,
uint[2] a_p,
uint[2][2] b,
uint[2] b_p,
uint[2] c,
uint[2] c_p,
uint[2] h,
uint[2] k,
uint[] input
) returns (bool) {
Proof memory proof;
proof.A = Pairing.G1Point(a[0], a[1]);
proof.A_p = Pairing.G1Point(a_p[0], a_p[1]);
proof.B = Pairing.G2Point([b[0][0], b[0][1]], [b[1][0], b[1][1]]);
proof.B_p = Pairing.G1Point(b_p[0], b_p[1]);
proof.C = Pairing.G1Point(c[0], c[1]);
proof.C_p = Pairing.G1Point(c_p[0], c_p[1]);
proof.H = Pairing.G1Point(h[0], h[1]);
proof.K = Pairing.G1Point(k[0], k[1]);
uint[] memory inputValues = new uint[](input.length);
for(uint i = 0; i < input.length; i++){
inputValues[i] = input[i];
}
if (verify(inputValues, proof) == 0) {
Verified("Transaction successfully verified.");
return true;
} else {
return false;
}
}
}
================================================
FILE: contracts/contract_deploy.py
================================================
'''
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
'''
import json
import web3
from web3 import Web3, HTTPProvider, TestRPCProvider
from solc import compile_source, compile_standard, compile_files
from solc import compile_source, compile_files, link_code
from web3.contract import ConciseContract
from utils import hex2int
def compile(tree_depth):
rollup = "../contracts/roll_up.sol"
Pairing = "../contracts/Pairing.sol"
Verifier = "../contracts/Verifier.sol"
compiled_sol = compile_files([Pairing, Verifier, rollup], allow_paths="./contracts")
rollup_interface = compiled_sol[rollup + ':roll_up']
verifier_interface = compiled_sol[Verifier + ':Verifier']
return(rollup_interface, verifier_interface)
def contract_deploy(tree_depth, vk_dir, merkle_root, host="localhost"):
w3 = Web3(HTTPProvider("http://" + host + ":8545"))
rollup_interface , verifier_interface = compile(tree_depth)
with open(vk_dir) as json_data:
vk = json.load(json_data)
vk = [hex2int(vk["a"][0]),
hex2int(vk["a"][1]),
hex2int(vk["b"]),
hex2int(vk["c"][0]),
hex2int(vk["c"][1]),
hex2int(vk["g"][0]),
hex2int(vk["g"][1]),
hex2int(vk["gb1"]),
hex2int(vk["gb2"][0]),
hex2int(vk["gb2"][1]),
hex2int(vk["z"][0]),
hex2int(vk["z"][1]),
hex2int(sum(vk["IC"], []))
]
# Instantiate and deploy contract
rollup = w3.eth.contract(abi=rollup_interface['abi'], bytecode=rollup_interface['bin'])
verifier = w3.eth.contract(abi=verifier_interface['abi'], bytecode=verifier_interface['bin'])
# Get transaction hash from deployed contract
tx_hash = verifier.deploy(args=vk, transaction={'from': w3.eth.accounts[0], 'gas': 4000000})
# Get tx receipt to get contract address
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, 10000)
verifier_address = tx_receipt['contractAddress']
# add IC
verifier = w3.eth.contract(address=verifier_address, abi=verifier_interface['abi'],ContractFactoryClass=ConciseContract)
while verifier.getICLen() != (len(vk[-1]))//2:
tx_hash = verifier.addIC(vk[-1] , transact={'from': w3.eth.accounts[0], 'gas': 4000000})
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, 100000)
tx_hash = rollup.deploy(transaction={'from': w3.eth.accounts[0], 'gas': 4000000}, args=[verifier_address, merkle_root])
# Get tx receipt to get contract address
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, 10000)
rollup_address = tx_receipt['contractAddress']
# Contract instance in concise mode
abi = rollup_interface['abi']
rollup = w3.eth.contract(address=rollup_address, abi=abi,ContractFactoryClass=ConciseContract)
return(rollup)
def verify(contract, proof, host="localhost"):
w3 = Web3(HTTPProvider("http://" + host + ":8545"))
tx_hash = contract.isTrue(proof["a"] , proof["a_p"], proof["b"], proof["b_p"] , proof["c"], proof["c_p"] , proof["h"] , proof["k"], proof["input"] , transact={'from': w3.eth.accounts[0], 'gas': 4000000})
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, 10000)
return(tx_receipt)
================================================
FILE: contracts/roll_up.sol
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
*/
pragma solidity ^0.4.19;
import "../contracts/Verifier.sol";
contract roll_up{
bytes32 root;
mapping (bytes32 => bool) nullifiers;
event Withdraw (address);
Verifier public zksnark_verify;
function roll_up (address _zksnark_verify, bytes32 _root) {
zksnark_verify = Verifier(_zksnark_verify);
root = _root;
}
function isTrue (
uint[2] a,
uint[2] a_p,
uint[2][2] b,
uint[2] b_p,
uint[2] c,
uint[2] c_p,
uint[2] h,
uint[2] k,
uint[] input
) returns (bool) {
bytes32 _root = padZero(reverse(bytes32(input[0]))); //)merge253bitWords(input[0], input[1]);
require(_root == padZero(root));
require(zksnark_verify.verifyTx(a,a_p,b,b_p,c,c_p,h,k,input));
root = padZero(reverse(bytes32(input[2])));
return(true);
}
function getRoot() constant returns(bytes32) {
return(root);
}
// libshark only allows 253 bit chunks in its output
// to overcome this we merge the first 253 bits (left) with the remaining 3 bits
// in the next variable (right)
function merge253bitWords(uint left, uint right) returns(bytes32) {
right = pad3bit(right);
uint left_msb = uint(padZero(reverse(bytes32(left))));
uint left_lsb = uint(getZero(reverse(bytes32(left))));
right = right + left_lsb;
uint res = left_msb + right;
return(bytes32(res));
}
// ensure that the 3 bits on the left is actually 3 bits.
function pad3bit(uint input) constant returns(uint) {
if (input == 0)
return 0;
if (input == 1)
return 4;
if (input == 2)
return 4;
if (input == 3)
return 6;
return(input);
}
function getZero(bytes32 x) returns(bytes32) {
//0x1111111111111111111111113fdc3192693e28ff6aee95320075e4c26be03308
return(x & 0x000000000000000000000000000000000000000000000000000000000000000F);
}
function padZero(bytes32 x) returns(bytes32) {
//0x1111111111111111111111113fdc3192693e28ff6aee95320075e4c26be03308
return(x & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0);
}
function reverseByte(uint a) public pure returns (uint) {
uint c = 0xf070b030d0509010e060a020c0408000;
return (( c >> ((a & 0xF)*8)) & 0xF0) +
(( c >> (((a >> 4)&0xF)*8) + 4) & 0xF);
}
//flip endinaness
function reverse(bytes32 a) public pure returns(bytes32) {
uint r;
uint i;
uint b;
for (i=0; i<32; i++) {
b = (uint(a) >> ((31-i)*8)) & 0xff;
b = reverseByte(b);
r += b << (i*8);
}
return bytes32(r);
}
}
================================================
FILE: depends/CMakeLists.txt
================================================
add_subdirectory(baby_jubjub_ecc)
================================================
FILE: docker-compose.yml
================================================
version: "3"
services:
testrpc:
image: trufflesuite/ganache-cli:v6.1.8
ports:
- 8545
networks:
- blockchain
test:
build: .
working_dir: /root/roll_up/tests
command: python3 test.py testrpc
depends_on:
- testrpc
networks:
- blockchain
volumes:
- ./tests:/root/roll_up/tests
- ./pythonWrapper:/root/roll_up/pythonWrapper
- ./keys:/root/roll_up/keys
- ./contracts/contract_deploy.py:/root/roll_up/contracts/contract_deploy.py
networks:
blockchain:
================================================
FILE: keys/.gitkeep
================================================
================================================
FILE: pythonWrapper/helper.py
================================================
'''
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
'''
import pdb
import json
from solc import compile_source, compile_files, link_code
from bitstring import BitArray
import random
from ctypes import cdll
import ctypes as c
import sys
sys.path.insert(0, '../pythonWrapper')
import utils
from utils import libsnark2python
tree_depth = 2
noTx = 4
lib = cdll.LoadLibrary('../build/src/libroll_up_wrapper.so')
prove = lib.prove
prove.argtypes = [((c.c_bool*256)*(tree_depth)*(noTx)), (c.c_bool*256 * noTx), (c.c_bool*256 * noTx), (c.c_bool*256* noTx),
(((c.c_bool*tree_depth) * noTx)), (c.c_bool*256 * noTx), (c.c_bool*256 * noTx), (c.c_bool*256 * noTx),
(c.c_bool*256 * noTx) , (c.c_bool*256* noTx),c.c_int, c.c_int]
prove.restype = c.c_char_p
genKeys = lib.genKeys
genKeys.argtypes = [c.c_int, c.c_char_p, c.c_char_p]
#verify = lib.verify
#verify.argtypes = [c.c_char_p, c.c_char_p , c.c_char_p , c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p , c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p ]
#verify.restype = c.c_bool
def binary2ctypes(out):
return((c.c_bool*256)(*out))
def hexToBinary(hexString):
out = [ int(x) for x in bin(int(hexString, 16))[2:].zfill(256)]
return(out)
def genWitness(leaves, public_key_x, public_key_y, address, tree_depth, _rhs_leaf, _new_leaf,r_x, r_y, s):
path = []
fee = 0
address_bits = []
pub_key_x = []
pub_key_y = []
roots = []
paths = []
old_leaf = []
new_leaf = []
r_x_bin_array = []
r_y_bin_array = []
s_bin_array = []
for i in range(noTx):
root , merkle_tree = utils.genMerkelTree(tree_depth, leaves[i])
path , address_bit = utils.getMerkelProof(leaves[i], address[i], tree_depth)
path = [binary2ctypes(hexToBinary(x)) for x in path]
address_bit = address_bit[::-1]
path = path[::-1]
paths.append(((c.c_bool*256)*(tree_depth))(*path))
pub_key_x.append(binary2ctypes(hexToBinary(public_key_x[i])))
pub_key_y.append(binary2ctypes(hexToBinary(public_key_y[i])))
roots.append(binary2ctypes(hexToBinary(root)))
address_bits.append((c.c_bool*tree_depth)(*address_bit))
old_leaf.append(binary2ctypes(hexToBinary(_rhs_leaf[i])))
new_leaf.append(binary2ctypes(hexToBinary(_new_leaf[i])))
r_x_bin_array.append(binary2ctypes(hexToBinary(r_x[i])))
r_y_bin_array.append(binary2ctypes(hexToBinary(r_y[i])))
s_bin_array.append(binary2ctypes(hexToBinary(hex(s[i]))))
pub_key_x_array = ((c.c_bool*256)*(noTx))(*pub_key_x)
pub_key_y_array = ((c.c_bool*256)*(noTx))(*pub_key_y)
merkle_roots = ((c.c_bool*256)*(noTx))(*roots)
old_leaf = ((c.c_bool*256)*(noTx))(*old_leaf)
new_leaf = ((c.c_bool*256)*(noTx))(*new_leaf)
r_x_bin = ((c.c_bool*256)*(noTx))(*r_x_bin_array)
r_y_bin = ((c.c_bool*256)*(noTx))(*r_y_bin_array)
s_bin = ((c.c_bool*256)*(noTx))(*s_bin_array)
paths = ((c.c_bool*256)*(tree_depth) * noTx)(*paths)
address_bits = ((c.c_bool)*(tree_depth) * noTx)(*address_bits)
proof = prove(paths, pub_key_x_array, pub_key_y_array, merkle_roots, address_bits, old_leaf, new_leaf, r_x_bin, r_y_bin, s_bin, tree_depth, noTx)
proof = json.loads(proof.decode("utf-8"))
root , merkle_tree = utils.genMerkelTree(tree_depth, leaves[0])
return(proof, root)
def genSalt(i):
salt = [random.choice("0123456789abcdef") for x in range(0,i)]
out = "".join(salt)
return(out)
def genNullifier(recvAddress):
salt = genSalt(24)
return(recvAddress + salt)
================================================
FILE: pythonWrapper/utils.py
================================================
'''
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
'''
import pdb
import hashlib
import sys
sys.path.insert(0, "../depends/baby_jubjub_ecc/tests")
import ed25519 as ed
def hex2int(elements):
ints = []
for el in elements:
ints.append(int(el, 16))
return(ints)
def normalize_proof(proof):
proof["a"] = hex2int(proof["a"])
proof["a_p"] = hex2int(proof["a_p"])
proof["b"] = [hex2int(proof["b"][0]), hex2int(proof["b"][1])]
proof["b_p"] = hex2int(proof["b_p"])
proof["c"] = hex2int(proof["c"])
proof["c_p"] = hex2int(proof["c_p"])
proof["h"] = hex2int(proof["h"])
proof["k"] = hex2int(proof["k"])
proof["input"] = hex2int(proof["input"])
return proof
def getSignature(m,sk,pk):
R,S = ed.signature(m,sk,pk)
return(R,S)
def createLeaf(public_key , message):
pk = ed.encodepoint(public_key)
leaf = hashPadded(pk, message)
return(leaf[2:])
def libsnark2python (inputs):
#flip the inputs
bin_inputs = []
for x in inputs:
binary = bin(x)[2:][::-1]
if len(binary) > 100:
binary = binary.ljust(253, "0")
bin_inputs.append(binary)
raw = "".join(bin_inputs)
raw += "0" * (256 * 5 - len(raw))
output = []
i = 0
while i < len(raw):
hexnum = hex(int(raw[i:i+256], 2))
#pad leading zeros
padding = 66 - len(hexnum)
hexnum = hexnum[:2] + "0"*padding + hexnum[2:]
output.append(hexnum)
i += 256
return(output)
def hashPadded(left, right):
x1 = int(left , 16).to_bytes(32, "big")
x2 = int(right , 16).to_bytes(32, "big")
data = x1 + x2
answer = hashlib.sha256(data).hexdigest()
return("0x" + answer)
def sha256(data):
data = str(data).encode()
return("0x" + hashlib.sha256(data).hexdigest())
def getUniqueLeaf(depth):
inputHash = "0x0000000000000000000000000000000000000000000000000000000000000000"
for i in range(0,depth):
inputHash = hashPadded(inputHash, inputHash)
return(inputHash)
def genMerkelTree(tree_depth, leaves):
tree_layers = [leaves ,[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
for i in range(0, tree_depth):
if len(tree_layers[i]) % 2 != 0:
tree_layers[i].append(getUniqueLeaf(i))
for j in range(0, len(tree_layers[i]), 2):
tree_layers[i+1].append(hashPadded(tree_layers[i][j], tree_layers[i][j+1]))
return(tree_layers[tree_depth][0] , tree_layers)
def getMerkelRoot(tree_depth, leaves):
genMerkelTree(tree_depth, leaves)
def getMerkelProof(leaves, index, tree_depth):
address_bits = []
merkelProof = []
mr , tree = genMerkelTree(tree_depth, leaves)
for i in range(0 , tree_depth):
address_bits.append(index%2)
if (index%2 == 0):
merkelProof.append(tree[i][index + 1])
else:
merkelProof.append(tree[i][index - 1])
index = int(index/2);
return(merkelProof, address_bits);
def testHashPadded():
left = "0x0000000000000000000000000000000000000000000000000000000000000000"
right = "0x0000000000000000000000000000000000000000000000000000000000000000"
res = hashPadded(left , right)
assert (res == "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b")
def testGenMerkelTree():
mr1, tree = genMerkelTree(1, ["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"])
mr2, tree = genMerkelTree(2, ["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"])
mr3, tree = genMerkelTree(29, ["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"])
assert(mr1 == "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b")
assert(mr2 == "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71")
def testlibsnarkTopython():
inputs = [12981351829201453377820191526040524295325907810881751591725375521336092323040,
2225095499654173609649711272123535458680077283826030252600915820706026312895,
10509931637877506470161905650895697133838017786875388895008260393592381807236,
11784807906137262651861317232543524609532737193375988426511007536308407308209, 17]
inputs = [9782619478414927069440250629401329418138703122237912437975467993246167708418,
2077680306600520305813581592038078188768881965413185699798221798985779874888,
4414150718664423886727710960459764220828063162079089958392546463165678021703,
7513790795222206681892855620762680219484336729153939269867138100414707910106,
902]
output = libsnark2python(inputs)
print(output)
assert(output[0] == "0x40cde80490e78bc7d1035cbc78d3e6be3e41b2fdfad473782e02e226cc2305a8")
assert(output[1] == "0x918e88a16d0624cd5ca4695bd84e23e4a6c8a202ce85560d3c66d4ed39bf4938")
assert(output[2] == "0x8dd3ea28fe8d04f3e15b787fec7e805e152fe7d3302d0122c8522bee1290e4b7")
assert(output[3] == "0x47a6bbcf8fa3667431e895f08cbd8ec2869a31698d9cf91e5bfd94cbca72161c")
def testgetMissingLeaf():
assert (getMissingLeaf(0) == "0x0000000000000000000000000000000000000000000000000000000000000000")
assert (getMissingLeaf(1) == "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b")
assert (getMissingLeaf(2) == "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71")
assert (getMissingLeaf(3) == "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c")
assert (getMissingLeaf(4) == "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c")
def testgetMerkelProof():
proof1, address1 = getMerkelProof(["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"] , 0 , 2)
assert ( proof1[0] == "0x0000000000000000000000000000000000000000000000000000000000000000")
assert ( proof1[1] == "f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b")
assert ( address1[0] == 0)
assert ( address1[1] == 0)
================================================
FILE: requirements.txt
================================================
web3==4.6.0
py-solc==3.1.0
bitstring==3.1.5
================================================
FILE: src/CMakeLists.txt
================================================
include_directories(.)
add_library(
roll_up_wrapper
SHARED
roll_up_wrapper.cpp
)
target_link_libraries(
roll_up_wrapper
snark
baby_jubjub_ecc
)
target_include_directories(roll_up_wrapper PUBLIC ../depends/baby_jubjub_ecc/src)
set_property(TARGET roll_up_wrapper PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(
roll_up_wrapper
PUBLIC
${DEPENDS_DIR}/baby_jubjub_ecc
${DEPENDS_DIR}/baby_jubjub_ecc/baby_jubjub_ecc
${DEPENDS_DIR}/baby_jubjub_ecc/depends/libsnark
${DEPENDS_DIR}/baby_jubjub_ecc/depends/libsnark/depends/libff
${DEPENDS_DIR}/baby_jubjub_ecc/depends/libsnark/depends/libfqfft
)
================================================
FILE: src/ZoKrates/wraplibsnark.cpp
================================================
/**
* @file wraplibsnark.cpp
* @author Jacob Eberhardt <jacob.eberhardt@tu-berlin.de
* @author Dennis Kuhnert <dennis.kuhnert@campus.tu-berlin.de>
* @date 2017
*/
#include "wraplibsnark.hpp"
#include <fstream>
#include <iostream>
#include <cassert>
#include <iomanip>
// contains definition of alt_bn128 ec public parameters
//#include "libsnark/libsnark/algebra/curves/alt_bn128/alt_bn128_pp.hpp"
#include "libff/algebra/curves/alt_bn128/alt_bn128_pp.hpp"
// contains required interfaces and types (keypair, proof, generator, prover, verifier)
#include <libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp>
typedef long integer_coeff_t;
using namespace std;
using namespace libsnark;
// conversion byte[32] <-> libsnark bigint.
libff::bigint<libff::alt_bn128_r_limbs> libsnarkBigintFromBytes(const uint8_t* _x)
{
libff::bigint<libff::alt_bn128_r_limbs> x;
for (unsigned i = 0; i < 4; i++) {
for (unsigned j = 0; j < 8; j++) {
x.data[3 - i] |= uint64_t(_x[i * 8 + j]) << (8 * (7-j));
}
}
return x;
}
std::string HexStringFromLibsnarkBigint(libff::bigint<libff::alt_bn128_r_limbs> _x){
uint8_t x[32];
for (unsigned i = 0; i < 4; i++)
for (unsigned j = 0; j < 8; j++)
x[i * 8 + j] = uint8_t(uint64_t(_x.data[3 - i]) >> (8 * (7 - j)));
std::stringstream ss;
ss << std::setfill('0');
for (unsigned i = 0; i<32; i++) {
ss << std::hex << std::setw(2) << (int)x[i];
}
std:string str = ss.str();
return str.erase(0, min(str.find_first_not_of('0'), str.size()-1));
}
std::string outputPointG1AffineAsHex(libff::alt_bn128_G1 _p)
{
libff::alt_bn128_G1 aff = _p;
aff.to_affine_coordinates();
std::stringstream ss;
ss << "0x" << aff.X.as_bigint() << "," << aff.Y.as_bigint() << "," << aff.Z.as_bigint();
return "\"" +
HexStringFromLibsnarkBigint(aff.X.as_bigint()) +
"\", \"0x"+
HexStringFromLibsnarkBigint(aff.Y.as_bigint()) +
"\"";
}
std::string outputPointG1AffineAsInt(libff::alt_bn128_G1 _p)
{
libff::alt_bn128_G1 aff = _p;
aff.to_affine_coordinates();
std::stringstream ss;
ss << "" << aff.X.as_bigint() << "," << aff.Y.as_bigint() << "," << aff.Z.as_bigint();
return ss.str();
}
std::string outputPointG2AffineAsHex(libff::alt_bn128_G2 _p)
{
libff::alt_bn128_G2 aff = _p;
if (aff.Z.c0.as_bigint() != "0" && aff.Z.c1.as_bigint() != "0" ) {
aff.to_affine_coordinates();
}
return "[\"0x" +
HexStringFromLibsnarkBigint(aff.X.c1.as_bigint()) + "\", \"0x" +
HexStringFromLibsnarkBigint(aff.X.c0.as_bigint()) + "\"],\n [\"0x" +
HexStringFromLibsnarkBigint(aff.Y.c1.as_bigint()) + "\", \"0x" +
HexStringFromLibsnarkBigint(aff.Y.c0.as_bigint()) + "\"]";
}
std::string outputPointG2AffineAsInt(libff::alt_bn128_G2 _p)
{
libff::alt_bn128_G2 aff = _p;
if (aff.Z.c0.as_bigint() != "0" && aff.Z.c1.as_bigint() != "0" ) {
aff.to_affine_coordinates();
}
std::stringstream ss;
ss << "" << aff.X.c1.as_bigint() << "," << aff.X.c0.as_bigint() << "," << aff.Y.c1.as_bigint() << "," << aff.Y.c0.as_bigint() << "," << aff.Z.c1.as_bigint() << "," <<aff.Z.c0.as_bigint() ;
return ss.str();
}
//takes input and puts it into constraint system
r1cs_ppzksnark_constraint_system<libff::alt_bn128_pp> createConstraintSystem(const uint8_t* A, const uint8_t* B, const uint8_t* C, int constraints, int variables, int inputs)
{
r1cs_ppzksnark_constraint_system<libff::alt_bn128_pp> cs;
cs.primary_input_size = inputs;
cs.auxiliary_input_size = variables - inputs - 1; // ~one not included
cout << "num variables: " << variables <<endl;
cout << "num constraints: " << constraints <<endl;
cout << "num inputs: " << inputs <<endl;
for (int row = 0; row < constraints; row++) {
linear_combination<libff::alt_bn128_pp> lin_comb_A, lin_comb_B, lin_comb_C;
for (int idx=0; idx<variables; idx++) {
libff::bigint<libff::alt_bn128_r_limbs> value = libsnarkBigintFromBytes(A+row*variables*32 + idx*32);
libff::alt_bn128_pp::init_public_params();
cout << "C entry " << idx << " in row " << row << ": " << value << endl;
if (!value.is_zero()) {
//cout << "A(" << idx << ", " << value << ")" << endl;
//lin_comb_A.add_term(idx,value);
//linear_term<libff::alt_bn128_pp>(0);
}
}
for (int idx=0; idx<variables; idx++) {
libff::bigint<libff::alt_bn128_r_limbs> value = libsnarkBigintFromBytes(B+row*variables*32 + idx*32);
cout << "B entry " << idx << " in row " << row << ": " << value << endl;
if (!value.is_zero()) {
cout << "B(" << idx << ", " << value << ")" << endl;
//lin_comb_B.add_term(idx, value);
}
}
for (int idx=0; idx<variables; idx++) {
libff::bigint<libff::alt_bn128_r_limbs> value = libsnarkBigintFromBytes(C+row*variables*32 + idx*32);
// cout << "C entry " << idx << " in row " << row << ": " << value << endl;
if (!value.is_zero()) {
// cout << "C(" << idx << ", " << value << ")" << endl;
//lin_comb_C.add_term(idx, value);
}
}
//cs.add_constraint(r1cs_constraint<libff::alt_bn128_pp>(lin_comb_A, lin_comb_B, lin_comb_C));
}
return cs;
}
// keypair generateKeypair(constraints)
r1cs_ppzksnark_keypair<libff::alt_bn128_pp> generateKeypair(const r1cs_ppzksnark_constraint_system<libff::alt_bn128_pp> &cs){
// from r1cs_ppzksnark.hpp
return r1cs_ppzksnark_generator<libff::alt_bn128_pp>(cs);
}
template<typename T>
void writeToFile(std::string path, T& obj) {
std::stringstream ss;
ss << obj;
std::ofstream fh;
fh.open(path, std::ios::binary);
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
template<typename T>
T loadFromFile(std::string path) {
std::stringstream ss;
std::ifstream fh(path, std::ios::binary);
assert(fh.is_open());
ss << fh.rdbuf();
fh.close();
ss.rdbuf()->pubseekpos(0, std::ios_base::in);
T obj;
ss >> obj;
return obj;
}
void serializeProvingKeyToFile(r1cs_ppzksnark_proving_key<libff::alt_bn128_pp> pk, const char* pk_path){
writeToFile(pk_path, pk);
}
r1cs_ppzksnark_proving_key<libff::alt_bn128_pp> deserializeProvingKeyFromFile(const char* pk_path){
return loadFromFile<r1cs_ppzksnark_proving_key<libff::alt_bn128_pp>>(pk_path);
}
void serializeVerificationKeyToFile(r1cs_ppzksnark_verification_key<libff::alt_bn128_pp> vk, const char* vk_path){
std::stringstream ss;
unsigned icLength = vk.encoded_IC_query.rest.indices.size() + 1;
ss << "\t\tvk.A = " << outputPointG2AffineAsHex(vk.alphaA_g2) << endl;
ss << "\t\tvk.B = " << outputPointG1AffineAsHex(vk.alphaB_g1) << endl;
ss << "\t\tvk.C = " << outputPointG2AffineAsHex(vk.alphaC_g2) << endl;
ss << "\t\tvk.gamma = " << outputPointG2AffineAsHex(vk.gamma_g2) << endl;
ss << "\t\tvk.gammaBeta1 = " << outputPointG1AffineAsHex(vk.gamma_beta_g1) << endl;
ss << "\t\tvk.gammaBeta2 = " << outputPointG2AffineAsHex(vk.gamma_beta_g2) << endl;
ss << "\t\tvk.Z = " << outputPointG2AffineAsHex(vk.rC_Z_g2) << endl;
ss << "\t\tvk.IC.len() = " << icLength << endl;
ss << "\t\tvk.IC[0] = " << outputPointG1AffineAsHex(vk.encoded_IC_query.first) << endl;
for (size_t i = 1; i < icLength; ++i)
{
auto vkICi = outputPointG1AffineAsHex(vk.encoded_IC_query.rest.values[i - 1]);
ss << "\t\tvk.IC[" << i << "] = " << vkICi << endl;
}
std::ofstream fh;
fh.open(vk_path, std::ios::binary);
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
// compliant with solidty verification example
void exportVerificationKey(r1cs_ppzksnark_keypair<libff::alt_bn128_pp> keypair){
unsigned icLength = keypair.vk.encoded_IC_query.rest.indices.size() + 1;
cout << "\tVerification key in Solidity compliant format:{" << endl;
cout << "\t\tvk.A = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.alphaA_g2) << ");" << endl;
cout << "\t\tvk.B = Pairing.G1Point(" << outputPointG1AffineAsHex(keypair.vk.alphaB_g1) << ");" << endl;
cout << "\t\tvk.C = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.alphaC_g2) << ");" << endl;
cout << "\t\tvk.gamma = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.gamma_g2) << ");" << endl;
cout << "\t\tvk.gammaBeta1 = Pairing.G1Point(" << outputPointG1AffineAsHex(keypair.vk.gamma_beta_g1) << ");" << endl;
cout << "\t\tvk.gammaBeta2 = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.gamma_beta_g2) << ");" << endl;
cout << "\t\tvk.Z = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.rC_Z_g2) << ");" << endl;
cout << "\t\tvk.IC = new Pairing.G1Point[](" << icLength << ");" << endl;
cout << "\t\tvk.IC[0] = Pairing.G1Point(" << outputPointG1AffineAsHex(keypair.vk.encoded_IC_query.first) << ");" << endl;
for (size_t i = 1; i < icLength; ++i)
{
auto vkICi = outputPointG1AffineAsHex(keypair.vk.encoded_IC_query.rest.values[i - 1]);
cout << "\t\tvk.IC[" << i << "] = Pairing.G1Point(" << vkICi << ");" << endl;
}
cout << "\t\t}" << endl;
}
// compliant with solidty verification example
/*
void exportInput(r1cs_primary_input<libff::alt_bn128_pp> input){
cout << "\tInput in Solidity compliant format:{" << endl;
for (size_t i = 0; i < input.size(); ++i)
{
cout << "\t\tinput[" << i << "] = " << HexStringFromLibsnarkBigint(input[i].as_bigint()) << ";" << endl;
}
cout << "\t\t}" << endl;
} */
void printProof(r1cs_ppzksnark_proof<libff::alt_bn128_pp> proof){
cout << "Proof:"<< endl;
cout << "proof.A = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_A.g)<< ");" << endl;
cout << "proof.A_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_A.h)<< ");" << endl;
cout << "proof.B = Pairing.G2Point(" << outputPointG2AffineAsHex(proof.g_B.g)<< ");" << endl;
cout << "proof.B_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_B.h)<<");" << endl;
cout << "proof.C = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_C.g)<< ");" << endl;
cout << "proof.C_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_C.h)<<");" << endl;
cout << "proof.H = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_H)<<");"<< endl;
cout << "proof.K = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_K)<<");"<< endl;
}
/*bool _setup(const uint8_t* A, const uint8_t* B, const uint8_t* C, int constraints, int variables, int inputs, const char* pk_path, const char* vk_path)
{
//libsnark::inhibit_profiling_info = true;
//libsnark::inhibit_profiling_counters = true;
//initialize curve parameters
libff::alt_bn128_pp::init_public_params();
r1cs_constraint_system<libff::alt_bn128_pp> cs;
cs = createConstraintSystem(A, B ,C , constraints, variables, inputs);
assert(cs.num_variables() >= inputs);
assert(cs.num_inputs() == inputs);
assert(cs.num_constraints() == constraints);
// create keypair
r1cs_ppzksnark_keypair<alt_bn128_pp> keypair = r1cs_ppzksnark_generator<alt_bn128_pp>(cs);
// Export vk and pk to files
serializeProvingKeyToFile(keypair.pk, pk_path);
serializeVerificationKeyToFile(keypair.vk, vk_path);
// Print VerificationKey in Solidity compatible format
exportVerificationKey(keypair);
return true;
}*/
/*
bool _generate_proof(const char* pk_path, const uint8_t* public_inputs, int public_inputs_length, const uint8_t* private_inputs, int private_inputs_length)
{
// libsnark::inhibit_profiling_info = true;
// libsnark::inhibit_profiling_counters = true;
//initialize curve parameters
libff::alt_bn128_pp::init_public_params();
r1cs_ppzksnark_proving_key<libff::alt_bn128_pp> pk = deserializeProvingKeyFromFile(pk_path);
// assign variables based on witness values, excludes ~one
r1cs_variable_assignment<libff::alt_bn128_pp> full_variable_assignment;
for (int i = 1; i < public_inputs_length; i++) {
full_variable_assignment.push_back(libff::alt_bn128_pp(libsnarkBigintFromBytes(public_inputs + i*32)));
}
for (int i = 0; i < private_inputs_length; i++) {
full_variable_assignment.push_back(<libff::alt_bn128_pp>(libsnarkBigintFromBytes(private_inputs + i*32)));
}
// split up variables into primary and auxiliary inputs. Does *NOT* include the constant 1
// Public variables belong to primary input, private variables are auxiliary input.
r1cs_primary_input<libff::alt_bn128_pp> primary_input(full_variable_assignment.begin(), full_variable_assignment.begin() + public_inputs_length-1);
r1cs_primary_input<libff::alt_bn128_pp> auxiliary_input(full_variable_assignment.begin() + public_inputs_length-1, full_variable_assignment.end());
// for debugging
// cout << "full variable assignment:"<< endl << full_variable_assignment;
// cout << "primary input:"<< endl << primary_input;
// cout << "auxiliary input:"<< endl << auxiliary_input;
// Proof Generation
r1cs_ppzksnark_proof<alt_bn128_pp> proof = r1cs_ppzksnark_prover<alt_bn128_pp>(pk, primary_input, auxiliary_input);
// print proof
printProof(proof);
// TODO? print inputs
return true;
} */
================================================
FILE: src/ZoKrates/wraplibsnark.hpp
================================================
/**
* @file wraplibsnark.hpp
* @author Jacob Eberhardt <jacob.eberhardt@tu-berlin.de
* @author Dennis Kuhnert <dennis.kuhnert@campus.tu-berlin.de>
* @date 2017
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <stdbool.h>
#include <stdint.h>
bool _setup(const uint8_t* A,
const uint8_t* B,
const uint8_t* C,
int constraints,
int variables,
int inputs,
const char* pk_path,
const char* vk_path
);
bool _generate_proof(const char* pk_path,
const uint8_t* public_inputs,
int public_inputs_length,
const uint8_t* private_inputs,
int private_inputs_length
);
#ifdef __cplusplus
} // extern "C"
#endif
================================================
FILE: src/export.cpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
*/
#include <fstream>
#include <iostream>
#include <cassert>
#include <iomanip>
#include <libsnark/zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp>
// ZoKrates
#include <ZoKrates/wraplibsnark.cpp>
//key gen
#include "libff/algebra/curves/alt_bn128/alt_bn128_pp.hpp" //hold key
#include "libff/algebra/curves/bn128/bn128_pp.hpp" //hold key
#include <libff/algebra/curves/bn128/bn128_pp.hpp>
#include <libff/algebra/curves/edwards/edwards_pp.hpp>
#include <libsnark/common/data_structures/merkle_tree.hpp>
#include <libsnark/gadgetlib1/gadget.hpp>
#include <libsnark/gadgetlib1/gadgets/hashes/crh_gadget.hpp>
#include <libsnark/gadgetlib1/gadgets/hashes/digest_selector_gadget.hpp>
#include <libsnark/gadgetlib1/gadgets/hashes/hash_io.hpp>
#include <libsnark/gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.hpp>
#include <libsnark/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.hpp>
// tmp
//#include <libsnark/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp>
#include <libsnark/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.hpp>
#include <libsnark/gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.hpp>
using namespace libsnark;
using namespace libff;
template<typename FieldT>
void constraint_to_json(linear_combination<FieldT> constraints, std::stringstream &ss)
{
ss << "{";
uint count = 0;
for (const linear_term<FieldT>& lt : constraints.terms)
{
if (count != 0) {
ss << ",";
}
if (lt.coeff != 0 && lt.coeff != 1) {
ss << '"' << lt.index << '"' << ":" << "-1";
}
else {
ss << '"' << lt.index << '"' << ":" << lt.coeff;
}
count++;
}
ss << "}";
}
template <typename FieldT>
void array_to_json(protoboard<FieldT> pb, uint input_variables, std::string path)
{
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
r1cs_variable_assignment<FieldT> values = pb.full_variable_assignment();
ss << "\n{\"TestVariables\":[";
for (size_t i = 0; i < values.size(); ++i)
{
ss << values[i].as_bigint();
if (i < values.size() - 1) { ss << ",";}
}
ss << "]}\n";
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
template<typename FieldT>
void r1cs_to_json(protoboard<FieldT> pb, uint input_variables, std::string path)
{
// output inputs, right now need to compile with debug flag so that the `variable_annotations`
// exists. Having trouble setting that up so will leave for now.
r1cs_constraint_system<FieldT> constraints = pb.get_constraint_system();
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
ss << "\n{\"variables\":[";
for (size_t i = 0; i < input_variables + 1; ++i)
{
ss << '"' << constraints.variable_annotations[i].c_str() << '"';
if (i < input_variables ) {
ss << ", ";
}
}
ss << "],\n";
ss << "\"constraints\":[";
for (size_t c = 0; c < constraints.num_constraints(); ++c)
{
ss << "[";// << "\"A\"=";
constraint_to_json(constraints.constraints[c].a, ss);
ss << ",";// << "\"B\"=";
constraint_to_json(constraints.constraints[c].b, ss);
ss << ",";// << "\"A\"=";;
constraint_to_json(constraints.constraints[c].c, ss);
if (c == constraints.num_constraints()-1 ) {
ss << "]\n";
} else {
ss << "],\n";
}
}
ss << "]}";
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
template<typename FieldT>
string proof_to_json(r1cs_ppzksnark_proof<libff::alt_bn128_pp> proof, r1cs_primary_input<FieldT> input, bool isInt) {
std::cout << "proof.A = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_A.g)<< ");" << endl;
std::cout << "proof.A_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_A.h)<< ");" << endl;
std::cout << "proof.B = Pairing.G2Point(" << outputPointG2AffineAsHex(proof.g_B.g)<< ");" << endl;
std::cout << "proof.B_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_B.h)<<");" << endl;
std::cout << "proof.C = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_C.g)<< ");" << endl;
std::cout << "proof.C_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_C.h)<<");" << endl;
std::cout << "proof.H = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_H)<<");"<< endl;
std::cout << "proof.K = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_K)<<");"<< endl;
std::string path = "../zksnark_element/proof.json";
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
if(isInt) {
ss << "{\n";
ss << " \"a\" :[" << outputPointG1AffineAsInt(proof.g_A.g) << "],\n";
ss << " \"a_p\" :[" << outputPointG1AffineAsInt(proof.g_A.h)<< "],\n";
ss << " \"b\" :[" << outputPointG2AffineAsInt(proof.g_B.g)<< "],\n";
ss << " \"b_p\" :[" << outputPointG1AffineAsInt(proof.g_B.h)<< "],\n";
ss << " \"c\" :[" << outputPointG1AffineAsInt(proof.g_C.g)<< "],\n";
ss << " \"c_p\" :[" << outputPointG1AffineAsInt(proof.g_C.h)<< "],\n";
ss << " \"h\" :[" << outputPointG1AffineAsInt(proof.g_H)<< "],\n";
ss << " \"k\" :[" << outputPointG1AffineAsInt(proof.g_K)<< "],\n";
ss << " \"input\" :" << "["; //1 should always be the first variavle passed
for (size_t i = 0; i < input.size(); ++i)
{
ss << input[i].as_bigint() ;
if ( i < input.size() - 1 ) {
ss<< ", ";
}
}
ss << "]\n";
ss << "}";
}
else {
ss << "{\n";
ss << " \"a\" :[" << outputPointG1AffineAsHex(proof.g_A.g) << "],\n";
ss << " \"a_p\" :[" << outputPointG1AffineAsHex(proof.g_A.h)<< "],\n";
ss << " \"b\" :[" << outputPointG2AffineAsHex(proof.g_B.g)<< "],\n";
ss << " \"b_p\" :[" << outputPointG1AffineAsHex(proof.g_B.h)<< "],\n";
ss << " \"c\" :[" << outputPointG1AffineAsHex(proof.g_C.g)<< "],\n";
ss << " \"c_p\" :[" << outputPointG1AffineAsHex(proof.g_C.h)<< "],\n";
ss << " \"h\" :[" << outputPointG1AffineAsHex(proof.g_H)<< "],\n";
ss << " \"k\" :[" << outputPointG1AffineAsHex(proof.g_K)<< "],\n";
ss << " \"input\" :" << "["; //1 should always be the first variavle passed
for (size_t i = 0; i < input.size(); ++i)
{
ss << "\"0x" << HexStringFromLibsnarkBigint(input[i].as_bigint()) << "\"";
if ( i < input.size() - 1 ) {
ss<< ", ";
}
}
ss << "]\n";
ss << "}";
}
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
return(ss.str());
}
void vk2json(r1cs_ppzksnark_keypair<libff::alt_bn128_pp> keypair, std::string path ) {
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
unsigned icLength = keypair.vk.encoded_IC_query.rest.indices.size() + 1;
ss << "{\n";
ss << " \"a\" :[" << outputPointG2AffineAsHex(keypair.vk.alphaA_g2) << "],\n";
ss << " \"b\" :[" << outputPointG1AffineAsHex(keypair.vk.alphaB_g1) << "],\n";
ss << " \"c\" :[" << outputPointG2AffineAsHex(keypair.vk.alphaC_g2) << "],\n";
ss << " \"g\" :[" << outputPointG2AffineAsHex(keypair.vk.gamma_g2)<< "],\n";
ss << " \"gb1\" :[" << outputPointG1AffineAsHex(keypair.vk.gamma_beta_g1)<< "],\n";
ss << " \"gb2\" :[" << outputPointG2AffineAsHex(keypair.vk.gamma_beta_g2)<< "],\n";
ss << " \"z\" :[" << outputPointG2AffineAsHex(keypair.vk.rC_Z_g2)<< "],\n";
ss << "\"IC\" :[[" << outputPointG1AffineAsHex(keypair.vk.encoded_IC_query.first) << "]";
for (size_t i = 1; i < icLength; ++i)
{
auto vkICi = outputPointG1AffineAsHex(keypair.vk.encoded_IC_query.rest.values[i - 1]);
ss << ",[" << vkICi << "]";
}
ss << "]";
ss << "}";
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
template<typename FieldT>
//void dump_key(r1cs_constraint_system<FieldT> cs)
char* dump_key(protoboard<FieldT> pb, std::string path)
{
r1cs_constraint_system<FieldT> constraints = pb.get_constraint_system();
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
r1cs_ppzksnark_keypair<libff::alt_bn128_pp> keypair = generateKeypair(pb.get_constraint_system());
//save keys
vk2json(keypair, "vk.json");
writeToFile("../zksnark_element/pk.raw", keypair.pk);
writeToFile("../zksnark_element/vk.raw", keypair.vk);
pb.primary_input();
pb.auxiliary_input();
r1cs_primary_input <FieldT> primary_input = pb.primary_input();
r1cs_auxiliary_input <FieldT> auxiliary_input = pb.auxiliary_input();
ss << "primaryinputs" << primary_input;
ss << "aux input" << auxiliary_input;
r1cs_ppzksnark_proof<libff::alt_bn128_pp> proof = r1cs_ppzksnark_prover<libff::alt_bn128_pp>(keypair.pk, primary_input, auxiliary_input);
auto json = proof_to_json (proof, primary_input);
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
auto result = new char[json.size()];
memcpy(result, json.c_str(), json.size() + 1);
return result;
}
================================================
FILE: src/roll_up.hpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
*/
#include <cassert>
#include <memory>
#include <libsnark/gadgetlib1/gadget.hpp>
#include <tx.hpp>
typedef sha256_ethereum HashT;
namespace libsnark {
template<typename FieldT>
class roll_up: public gadget<FieldT> {
//greater than gadget
private:
/* no internal variables */
public:
std::shared_ptr<multipacking_gadget<FieldT>> unpacker_old_root;
std::shared_ptr<multipacking_gadget<FieldT>> unpacker_new_root;
std::shared_ptr<multipacking_gadget<FieldT>> unpacker_leaf_addresses;
std::shared_ptr<multipacking_gadget<FieldT>> unpacker_leaf_hashes;
pb_variable<FieldT> a;
pb_variable<FieldT> d;
pb_variable_array<FieldT> unpacked_addresses;
pb_variable_array<FieldT> unpacked_leaves;
std::string annotation_prefix = "roll up";
int noTx;
std::vector<std::shared_ptr<tx<FieldT, HashT>>> transactions;
roll_up(protoboard<FieldT> &pb,
std::vector<pb_variable_array<FieldT>> &pub_key_x_bin,
std::vector<pb_variable_array<FieldT>> &pub_key_y_bin,
int tree_depth, std::vector<pb_variable_array<FieldT>> address_bits_va,
std::vector<std::shared_ptr<digest_variable<FieldT>>> root_digest_old,
std::vector<std::shared_ptr<digest_variable<FieldT>>> root_digest_new,
std::vector<std::vector<merkle_authentication_node>> path_old, std::vector<std::vector<merkle_authentication_node>> path_new,
std::vector<pb_variable_array<FieldT>> rhs_leaf,
std::vector<pb_variable_array<FieldT>> S, std::vector<std::shared_ptr<digest_variable<FieldT>>> new_leaf,
std::vector<pb_variable_array<FieldT>> r_x_bin, std::vector<pb_variable_array<FieldT>> r_y_bin,
pb_variable_array<FieldT> old_root , pb_variable_array<FieldT> new_root, pb_variable_array<FieldT> leaves_data_availability,
pb_variable_array<FieldT> leaves_addresses_data_availability,
int noTx,
const std::string &annotation_prefix);
void generate_r1cs_constraints();
void generate_r1cs_witness();
};
} // libsnark
#include <roll_up.tcc>
================================================
FILE: src/roll_up.tcc
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
*/
namespace libsnark {
template<typename FieldT>
roll_up<FieldT>::roll_up(protoboard<FieldT> &pb,
std::vector<pb_variable_array<FieldT>> &pub_key_x_bin,
std::vector<pb_variable_array<FieldT>> &pub_key_y_bin,
int tree_depth, std::vector<pb_variable_array<FieldT>> address_bits_va, std::vector<std::shared_ptr<digest_variable<FieldT>>> root_digest_old,
std::vector<std::shared_ptr<digest_variable<FieldT>>> root_digest_new, std::vector<std::vector<merkle_authentication_node>> path_old,
std::vector<std::vector<merkle_authentication_node>> path_new, std::vector<pb_variable_array<FieldT>> rhs_leaf,
std::vector<pb_variable_array<FieldT>> S, std::vector<std::shared_ptr<digest_variable<FieldT>>> new_leaf,
std::vector<pb_variable_array<FieldT>> r_x_bin, std::vector<pb_variable_array<FieldT>> r_y_bin,
pb_variable_array<FieldT> old_root , pb_variable_array<FieldT> new_root,
pb_variable_array<FieldT> leaves_data_availability, pb_variable_array<FieldT> leaves_addresses_data_availability,
int noTx,
const std::string &annotation_prefix): gadget<FieldT>(pb, annotation_prefix) , noTx(noTx) {
for(uint i = 0; i < noTx-1; i++) {
unpacked_addresses.insert(unpacked_addresses.end(), address_bits_va[i].begin(), address_bits_va[i].end());
unpacked_leaves.insert(unpacked_leaves.end(), new_leaf[i]->bits.begin(), new_leaf[i]->bits.end());
}
unpacker_old_root.reset(new multipacking_gadget<FieldT>(
pb,
root_digest_old[0]->bits,
old_root,
FieldT::capacity(),
"old root"
));
unpacker_new_root.reset(new multipacking_gadget<FieldT>(
pb,
root_digest_new[noTx-2]->bits,
new_root,
FieldT::capacity(),
"new_root"
));
unpacker_leaf_addresses.reset(new multipacking_gadget<FieldT>(
pb,
unpacked_addresses,
leaves_addresses_data_availability,
FieldT::capacity(),
"new_root"
));
unpacker_leaf_hashes.reset(new multipacking_gadget<FieldT>(
pb,
unpacked_leaves,
leaves_data_availability,
FieldT::capacity(),
"new_root"
));
//5 for the old root , new root
// noTx*2 for address, leaf
// noTx*2*253/256 for the left over bits
// that do not fit in a 253 bit field element.
pb.set_input_sizes(6);
transactions.resize(noTx);
transactions[0].reset(new tx<FieldT, HashT>(pb,
pub_key_x_bin[0], pub_key_y_bin[0], tree_depth,address_bits_va[0],root_digest_old[0],
root_digest_new[0],path_old[0],path_new[0], rhs_leaf[0], S[0] , new_leaf[0] , r_x_bin[0], r_y_bin[0],
"tx i"
));
for (int i =1; i<noTx; i++) {
transactions[i].reset(new tx<FieldT, HashT>(pb,
pub_key_x_bin[i], pub_key_y_bin[i], tree_depth,address_bits_va[i],root_digest_new[i-1],
root_digest_new[i],path_old[i],path_new[i], rhs_leaf[i], S[i] , new_leaf[i] , r_x_bin[i], r_y_bin[i],
"tx i"
));
}
}
template<typename FieldT>
void roll_up<FieldT>::generate_r1cs_constraints() {
for (int i =0; i<noTx; i++) {
transactions[i]->generate_r1cs_constraints();
}
unpacker_old_root->generate_r1cs_constraints(true);
unpacker_new_root->generate_r1cs_constraints(true);
unpacker_leaf_addresses->generate_r1cs_constraints(true);
unpacker_leaf_hashes->generate_r1cs_constraints(true);
}
template<typename FieldT>
void roll_up<FieldT>::generate_r1cs_witness() {
for (int i =0; i<noTx; i++) {
transactions[i]->generate_r1cs_witness();
}
unpacker_old_root->generate_r1cs_witness_from_bits();
unpacker_new_root->generate_r1cs_witness_from_bits();
unpacker_leaf_addresses->generate_r1cs_witness_from_bits();
unpacker_leaf_hashes->generate_r1cs_witness_from_bits();
}
}
================================================
FILE: src/roll_up_wrapper.cpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
*/
//hash
#include "roll_up_wrapper.hpp"
#include <export.cpp>
#include <roll_up.hpp>
using namespace libsnark;
using namespace libff;
typedef sha256_ethereum HashT;
#include <iostream>
void genKeys(int noTx, char* pkOutput, char* vkOuput) {
libff::alt_bn128_pp::init_public_params();
protoboard<FieldT> pb;
pb_variable<FieldT> ZERO;
ZERO.allocate(pb, "ZERO");
pb.val(ZERO) = 0;
//make sure we constarin to zero.
std::shared_ptr<roll_up<FieldT>> transactions;
std::vector<std::vector<merkle_authentication_node>> path(noTx);
path.resize(noTx);
std::vector<std::shared_ptr<digest_variable<FieldT>>> root_digest_old(noTx);
std::vector<std::shared_ptr<digest_variable<FieldT>>> root_digest_new(noTx);
std::vector<std::shared_ptr<digest_variable<FieldT>>> new_leaf(noTx);
std::vector<pb_variable_array<FieldT>> pub_key_x_bin(noTx);
std::vector<pb_variable_array<FieldT>> pub_key_y_bin(noTx);
std::vector<pb_variable_array<FieldT>> address_bits_va(noTx);
std::vector<pb_variable_array<FieldT>> rhs_leaf(noTx);
//signatures setup
std::vector<pb_variable_array<FieldT>> S(noTx);
std::vector<pb_variable_array<FieldT>> pk_x_bin(noTx);
std::vector<pb_variable_array<FieldT>> pk_y_bin(noTx);
std::vector<pb_variable_array<FieldT>> r_x_bin(noTx);
std::vector<pb_variable_array<FieldT>> r_y_bin(noTx);
for(int k = 0 ; k < noTx; k++) {
root_digest_old[k].reset(new digest_variable<FieldT>(pb, 256, "root_digest_old"));
root_digest_new[k].reset(new digest_variable<FieldT>(pb, 256, "root_digest_new"));
new_leaf[k].reset(new digest_variable<FieldT>(pb, 256, "new leaf"));
pub_key_x_bin[k].allocate(pb,256,"pub_key_x_bin");
pub_key_y_bin[k].allocate(pb,256,"pub_key_y_bin");
address_bits_va[k].allocate(pb, 256, "address_bits");
rhs_leaf[k].allocate(pb,256,"pub_key_y_bin");
S[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
pk_x_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
pk_y_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
r_x_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
r_y_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
}
/* transactions.reset( new roll_up <FieldT> (pb, pub_key_x_bin, pub_key_y_bin, tree_depth,
address_bits_va, root_digest_old, root_digest_new,
path, path, rhs_leaf, S, new_leaf, r_x_bin, r_y_bin, noTx ,"Confirm tx"));
transactions->generate_r1cs_constraints();
r1cs_ppzksnark_keypair<libff::alt_bn128_pp> keypair = generateKeypair(pb.get_constraint_system());
//save keys
vk2json(keypair, "../keys/vk.json");
writeToFile("../keys/pk.raw", keypair.pk);
writeToFile("../keys/vk.raw", keypair.vk); */
}
char* prove(bool _path[][tree_depth][256], bool _pub_key_x[][256], bool _pub_key_y[][256] , bool _root[][256],
bool _address_bits[][tree_depth], bool _rhs_leaf[][256],
bool _new_leaf[][256], bool _r_x[][256], bool _r_y[][256] , bool _S[][256], int _tree_depth, int noTx) {
libff::alt_bn128_pp::init_public_params();
libff::bit_vector init(0,256);
std::vector<libff::bit_vector> pub_key_x(noTx);
std::vector<libff::bit_vector> pub_key_y(noTx);
std::vector<libff::bit_vector> root(noTx);
std::vector<libff::bit_vector> rhs_leaf_bits(noTx);
std::vector<libff::bit_vector> new_leaf_bits(noTx);
std::vector<libff::bit_vector> r_x_bits(noTx);
std::vector<libff::bit_vector> r_y_bits(noTx);
std::vector<libff::bit_vector> S_bits(noTx);
std::vector<libff::bit_vector> address_bits(noTx);
std::vector<std::vector<merkle_authentication_node>> path(noTx);
init.resize(256);
path.resize(noTx);
pub_key_x.resize(noTx);
pub_key_y.resize(noTx);
root.resize(noTx);
rhs_leaf_bits.resize(noTx);
new_leaf_bits.resize(noTx);
r_x_bits.resize(noTx);
r_y_bits.resize(noTx);
S_bits.resize(noTx);
for(int k = 0 ; k < noTx; k++) {
pub_key_x[k].resize(256);
pub_key_y[k].resize(256);
root[k].resize(256);
rhs_leaf_bits[k].resize(256);
new_leaf_bits[k].resize(256);
r_x_bits[k].resize(256);
r_y_bits[k].resize(256);
S_bits[k].resize(256);
path[k].resize(tree_depth);
for (int i =tree_depth - 1; i>=0 ; i--) {
path[k][i] = init;
for (int j =0; j<sizeof(_path[k][0]); j++) {
path[k][i][j] = _path[k][i][j];
}
}
for (int j = 0 ; j <256 ; j++) {
pub_key_x[k][j] = _pub_key_x[k][j];
pub_key_y[k][j] = _pub_key_y[k][j];
root[k][j] = _root[k][j];
rhs_leaf_bits[k][j] = _rhs_leaf[k][j];
new_leaf_bits[k][j] = _new_leaf[k][j];
r_x_bits[k][j] = _r_x[k][j];
r_y_bits[k][j] = _r_y[k][j];
S_bits[k][j] = _S[k][j];
}
size_t address = 0;
for (long level = tree_depth-1; level >= 0; level--)
{
const bool computed_is_right = _address_bits[k][level];
address |= (computed_is_right ? 1ul << (tree_depth-1-level) : 0);
address_bits[k].push_back(computed_is_right);
}
}
protoboard<FieldT> pb;
pb_variable_array<FieldT> old_root;
pb_variable_array<FieldT> new_root;
pb_variable_array<FieldT> leaves_data_availability;
pb_variable_array<FieldT> leaves_addresses_data_availability;
old_root.allocate(pb, 2, "old_root");
new_root.allocate(pb, 2, "new_root");
leaves_data_availability.allocate(pb, noTx*256, "packed");
leaves_addresses_data_availability.allocate(pb, noTx*256, "packed");
pb_variable<FieldT> ZERO;
ZERO.allocate(pb, "ZERO");
pb.val(ZERO) = 0;
//make sure we constarin to zero.
std::shared_ptr<roll_up<FieldT>> transactions;
std::vector<std::shared_ptr<digest_variable<FieldT>>> root_digest_old(noTx);
std::vector<std::shared_ptr<digest_variable<FieldT>>> root_digest_new(noTx);
std::vector<std::shared_ptr<digest_variable<FieldT>>> new_leaf(noTx);
std::vector<pb_variable_array<FieldT>> pub_key_x_bin(noTx);
std::vector<pb_variable_array<FieldT>> pub_key_y_bin(noTx);
std::vector<pb_variable_array<FieldT>> address_bits_va(noTx);
std::vector<pb_variable_array<FieldT>> rhs_leaf(noTx);
//signatures setup
std::vector<pb_variable_array<FieldT>> S(noTx);
std::vector<pb_variable_array<FieldT>> pk_x_bin(noTx);
std::vector<pb_variable_array<FieldT>> pk_y_bin(noTx);
std::vector<pb_variable_array<FieldT>> r_x_bin(noTx);
std::vector<pb_variable_array<FieldT>> r_y_bin(noTx);
for(int k = 0 ; k < noTx; k++) {
root_digest_old[k].reset(new digest_variable<FieldT>(pb, 256, "root_digest_old"));
root_digest_new[k].reset(new digest_variable<FieldT>(pb, 256, "root_digest_new"));
new_leaf[k].reset(new digest_variable<FieldT>(pb, 256, "new leaf"));
pub_key_x_bin[k].allocate(pb,256,"pub_key_x_bin");
pub_key_y_bin[k].allocate(pb,256,"pub_key_y_bin");
address_bits_va[k].allocate(pb, 256, "address_bits");
rhs_leaf[k].allocate(pb,256,"pub_key_y_bin");
S[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
pk_x_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
pk_y_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
r_x_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
r_y_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
S[k].fill_with_bits(pb, S_bits[k]);
r_x_bin[k].fill_with_bits(pb, r_x_bits[k]);
r_y_bin[k].fill_with_bits(pb, r_y_bits[k]);
root_digest_old[k]->bits.fill_with_bits(pb, root[k]);
pub_key_x_bin[k].fill_with_bits(pb, pub_key_x[k]);
pub_key_y_bin[k].fill_with_bits(pb, pub_key_y[k]);
address_bits_va[k] = from_bits(address_bits[k], ZERO);
rhs_leaf[k].fill_with_bits(pb, rhs_leaf_bits[k]);
new_leaf[k]->bits.fill_with_bits(pb, new_leaf_bits[k]);
}
transactions.reset( new roll_up <FieldT> (pb, pub_key_x_bin, pub_key_y_bin, tree_depth,
address_bits_va, root_digest_old, root_digest_new,
path, path, rhs_leaf, S, new_leaf, r_x_bin, r_y_bin, old_root, new_root, leaves_data_availability, leaves_addresses_data_availability , noTx ,"Confirm tx"));
transactions->generate_r1cs_constraints();
transactions->generate_r1cs_witness();
std::cout << "is satisfied: " << pb.is_satisfied() << std::endl;
pb.primary_input();
pb.auxiliary_input();
r1cs_ppzksnark_keypair<libff::alt_bn128_pp> keypair = generateKeypair(pb.get_constraint_system());
//save keys
vk2json(keypair, "../keys/vk.json");
r1cs_primary_input <FieldT> primary_input = pb.primary_input();
std::cout << "primary_input " << primary_input;
r1cs_auxiliary_input <FieldT> auxiliary_input = pb.auxiliary_input();
r1cs_ppzksnark_proof<libff::alt_bn128_pp> proof = r1cs_ppzksnark_prover<libff::alt_bn128_pp>(keypair.pk, primary_input, auxiliary_input);
auto json = proof_to_json (proof, primary_input, false);
auto result = new char[json.size()];
memcpy(result, json.c_str(), json.size() + 1);
return result;
}
================================================
FILE: src/roll_up_wrapper.hpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <stdbool.h>
#include <stdint.h>
const int tree_depth = 2;
char* _sha256Constraints();
char* _sha256Witness();
char* prove(bool path[][tree_depth][256], bool _pub_key_x[][256], bool _pub_key_y[][256] , bool _root[][256],
bool _address_bits[][tree_depth], bool _rhs_leaf[][256],
bool _new_leaf[][256], bool _r_x[][256], bool _r_y[][256] , bool _S[][256], int tree_depth, int noTx);
void genKeys(int noTx, char* pkOutput, char* vkOuput );
bool verify( char* vk, char* _g_A_0, char* _g_A_1, char* _g_A_2 , char* _g_A_P_0, char* _g_A_P_1, char* _g_A_P_2,
char* _g_B_1, char* _g_B_0, char* _g_B_3, char* _g_B_2, char* _g_B_5 , char* _g_B_4, char* _g_B_P_0, char* _g_B_P_1, char* _g_B_P_2,
char* _g_C_0, char* _g_C_1, char* _g_C_2, char* _g_C_P_0, char* _g_C_P_1, char* _g_C_P_2,
char* _g_H_0, char* _g_H_1, char* _g_H_2, char* _g_K_0, char* _g_K_1, char* _g_K_2, char* _input0 , char* _input1 , char* _input2, char* _input3,
char* _input4, char* _input5
) ;
#ifdef __cplusplus
} // extern "C"
#endif
================================================
FILE: src/sha256/sha256_ethereum.cpp
================================================
/*
copyright 2018 to the Kobigurk
https://github.com/kobigurk/sha256_ethereum
MIT
*/
#include <iostream>
#include "libsnark/gadgetlib1/gadget.hpp"
#include "libsnark/gadgetlib1/protoboard.hpp"
#include "libff/common/default_types/ec_pp.hpp"
#include <libsnark/common/data_structures/merkle_tree.hpp>
#include <libsnark/gadgetlib1/gadgets/basic_gadgets.hpp>
#include <libsnark/gadgetlib1/gadgets/hashes/hash_io.hpp>
#include <libsnark/gadgetlib1/gadgets/hashes/sha256/sha256_components.hpp>
#include <libsnark/gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp>
using namespace libsnark;
using namespace libff;
using std::vector;
//typedef libff::Fr<libff::default_ec_pp> FieldT;
typedef libff::Fr<alt_bn128_pp> FieldT;
pb_variable_array<FieldT> from_bits(std::vector<bool> bits, pb_variable<FieldT>& ZERO) {
pb_variable_array<FieldT> acc;
for (size_t i = 0; i < bits.size(); i++) {
bool bit = bits[i];
acc.emplace_back(bit ? ONE : ZERO);
}
return acc;
}
class sha256_ethereum : gadget<FieldT> {
private:
std::shared_ptr<block_variable<FieldT>> block1;
std::shared_ptr<block_variable<FieldT>> block2;
std::shared_ptr<sha256_compression_function_gadget<FieldT>> hasher1;
std::shared_ptr<digest_variable<FieldT>> intermediate_hash;
std::shared_ptr<sha256_compression_function_gadget<FieldT>> hasher2;
public:
sha256_ethereum(protoboard<FieldT> &pb,
const size_t block_length,
const block_variable<FieldT> &input_block,
const digest_variable<FieldT> &output,
const std::string &annotation_prefix) : gadget<FieldT>(pb, "sha256_ethereum") {
intermediate_hash.reset(new digest_variable<FieldT>(pb, 256, "intermediate"));
pb_variable<FieldT> ZERO;
ZERO.allocate(pb, "ZERO");
pb.val(ZERO) = 0;
// final padding
pb_variable_array<FieldT> length_padding =
from_bits({
// padding
1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
// length of message (512 bits)
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0
}, ZERO);
/* block2.reset(new block_variable<FieldT>(pb, {
length_padding
}, "block2"));
*/
pb_linear_combination_array<FieldT> IV = SHA256_default_IV(pb);
hasher1.reset(new sha256_compression_function_gadget<FieldT>(
pb,
IV,
input_block.bits,
*intermediate_hash,
"hasher1"));
pb_linear_combination_array<FieldT> IV2(intermediate_hash->bits);
// std::cout << block2->bits;
// std::cout << intermediate_hash;
hasher2.reset(new sha256_compression_function_gadget<FieldT>(
pb,
IV2,
length_padding,
output,
"hasher2"));
}
void generate_r1cs_constraints(const bool ensure_output_bitness) {
libff::UNUSED(ensure_output_bitness);
hasher1->generate_r1cs_constraints();
hasher2->generate_r1cs_constraints();
}
void generate_r1cs_witness() {
hasher1->generate_r1cs_witness();
hasher2->generate_r1cs_witness();
}
static size_t get_digest_len()
{
return 256;
}
static libff::bit_vector get_hash(const libff::bit_vector &input)
{
protoboard<FieldT> pb;
block_variable<FieldT> input_variable(pb, SHA256_block_size, "input");
digest_variable<FieldT> output_variable(pb, SHA256_digest_size, "output");
sha256_ethereum f(pb, SHA256_block_size, input_variable, output_variable, "f");
input_variable.generate_r1cs_witness(input);
f.generate_r1cs_witness();
return output_variable.get_digest();
}
static size_t expected_constraints(const bool ensure_output_bitness)
{
libff::UNUSED(ensure_output_bitness);
return 54560; /* hardcoded for now */
}
};
vector<unsigned long> bit_list_to_ints(vector<bool> bit_list, const size_t wordsize) {
vector<unsigned long> res;
size_t iterations = bit_list.size()/wordsize+1;
for (size_t i = 0; i < iterations; ++i) {
unsigned long current = 0;
for (size_t j = 0; j < wordsize; ++j) {
if (bit_list.size() == (i*wordsize+j)) break;
current += (bit_list[i*wordsize+j] * (1ul<<(wordsize-1-j)));
}
res.push_back(current);
}
return res;
}
================================================
FILE: src/tx.hpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
*/
#include <cassert>
#include <memory>
#include <libsnark/gadgetlib1/gadget.hpp>
#include "baby_jubjub_ecc/main.cpp"
namespace libsnark {
template<typename FieldT, typename HashT>
class tx: public gadget<FieldT> {
//greater than gadget
private:
/* no internal variables */
public:
pb_variable<FieldT> a;
pb_variable<FieldT> d;
int tree_depth;
//intermeditate variables
pb_variable_array<FieldT> pub_key_x_bin;
pb_variable_array<FieldT> pub_key_y_bin;
std::string annotation_prefix = "roll up";
//internal
std::shared_ptr<HashT> public_key_hash;
std::shared_ptr<HashT> leaf_hash;
std::shared_ptr<HashT> message_hash;
std::shared_ptr<digest_variable<FieldT>> lhs_leaf;
pb_variable_array<FieldT> rhs_leaf;
std::shared_ptr<digest_variable<FieldT>> leaf;
std::shared_ptr<digest_variable<FieldT>> root_digest_old;
std::shared_ptr<digest_variable<FieldT>> root_digest_calculated;
std::shared_ptr<digest_variable<FieldT>> root_digest_new;
std::shared_ptr<digest_variable<FieldT>> message;
std::shared_ptr<merkle_authentication_path_variable<FieldT, HashT>> path_var_old;
std::shared_ptr<merkle_authentication_path_variable<FieldT, HashT>> path_var_new;
std::shared_ptr<merkle_tree_check_update_gadget<FieldT, HashT>> ml;
std::shared_ptr<merkle_tree_check_read_gadget<FieldT, HashT>> ml_update;
std::vector<merkle_authentication_node> path_old;
std::vector<merkle_authentication_node> path_new;
pb_variable_array<FieldT> address_bits_va;
std::shared_ptr<eddsa<FieldT, HashT>> jubjub_eddsa;
std::shared_ptr<multipacking_gadget<FieldT>> unpacker_pub_key_x;
std::shared_ptr<multipacking_gadget<FieldT>> unpacker_pub_key_y;
std::shared_ptr <block_variable<FieldT>> input_variable;
std::shared_ptr <block_variable<FieldT>> input_variable2;
std::shared_ptr <block_variable<FieldT>> input_variable3;
pb_variable_array<FieldT> pub_key_x;
pb_variable_array<FieldT> pub_key_y;
std::shared_ptr<digest_variable<FieldT>> new_leaf;
pb_variable<FieldT> ZERO;
pb_variable<FieldT> ONE_test;
tx(protoboard<FieldT> &pb,
pb_variable_array<FieldT> &pub_key_x_bin,
pb_variable_array<FieldT> &pub_key_y_bin,
int tree_depth, pb_variable_array<FieldT> address_bits_va, std::shared_ptr<digest_variable<FieldT>> root_digest_old,
std::shared_ptr<digest_variable<FieldT>> root_digest_new,
std::vector<merkle_authentication_node> path_old, std::vector<merkle_authentication_node> path_new, pb_variable_array<FieldT> rhs_leaf,
pb_variable_array<FieldT> S, std::shared_ptr<digest_variable<FieldT>> new_leaf, pb_variable_array<FieldT> r_x_bin, pb_variable_array<FieldT> r_y_bin,
const std::string &annotation_prefix);
void generate_r1cs_constraints();
void generate_r1cs_witness();
};
} // libsnark
#include <tx.tcc>
================================================
FILE: src/tx.tcc
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
*/
namespace libsnark {
template<typename FieldT, typename HashT>
tx<FieldT,HashT>::tx(protoboard<FieldT> &pb,
pb_variable_array<FieldT> &pub_key_x_bin,
pb_variable_array<FieldT> &pub_key_y_bin,
int tree_depth, pb_variable_array<FieldT> address_bits_va, std::shared_ptr<digest_variable<FieldT>> root_digest_old,
std::shared_ptr<digest_variable<FieldT>> root_digest_new,
std::vector<merkle_authentication_node> path_old, std::vector<merkle_authentication_node> path_new, pb_variable_array<FieldT> rhs_leaf,
pb_variable_array<FieldT> S, std::shared_ptr<digest_variable<FieldT>> new_leaf, pb_variable_array<FieldT> r_x_bin, pb_variable_array<FieldT> r_y_bin,
const std::string &annotation_prefix): gadget<FieldT>(pb, annotation_prefix) ,
pub_key_x_bin(pub_key_x_bin),
pub_key_y_bin(pub_key_y_bin) , tree_depth(tree_depth), path_old(path_old),
path_new(path_new), address_bits_va(address_bits_va), rhs_leaf(rhs_leaf),
root_digest_old(root_digest_old), root_digest_new(root_digest_new), new_leaf(new_leaf) {
pb_variable<FieldT> base_x;
pb_variable<FieldT> base_y;
pb_variable<FieldT> a;
pb_variable<FieldT> d;
//public key
pb_variable<FieldT> pub_x;
pb_variable<FieldT> pub_y;
base_x.allocate(pb, "base x");
base_y.allocate(pb, "base y");
pub_x.allocate(pb, "pub_x");
pub_y.allocate(pb, "pub_y");
a.allocate(pb, "a");
d.allocate(pb, "d");
pb.val(base_x) = FieldT("17777552123799933955779906779655732241715742912184938656739573121738514868268");
pb.val(base_y) = FieldT("2626589144620713026669568689430873010625803728049924121243784502389097019475");
pb.val(a) = FieldT("168700");
pb.val(d) = FieldT("168696");
pub_key_x.allocate(pb,2, "ZERO");
pub_key_y.allocate(pb,2, "ZERO");
ZERO.allocate(pb, "ZERO");
pb.val(ZERO) = 0;
lhs_leaf.reset(new digest_variable<FieldT>(pb, 256, "lhs_leaf"));
leaf.reset(new digest_variable<FieldT>(pb, 256, "lhs_leaf"));
message.reset(new digest_variable<FieldT>(pb, 256, "message digest"));
input_variable.reset(new block_variable<FieldT>(pb, {pub_key_x_bin, pub_key_y_bin}, "input_variable"));
input_variable2.reset(new block_variable<FieldT>(pb, {lhs_leaf->bits, rhs_leaf}, "input_variable"));
public_key_hash.reset(new sha256_ethereum(pb, 256, *input_variable, *lhs_leaf, "pub key hash"));
leaf_hash.reset(new sha256_ethereum(pb, 256, *input_variable2, *leaf, "pub key hash"));
input_variable3.reset(new block_variable<FieldT>(pb, {leaf->bits, new_leaf->bits}, "input_variable"));
message_hash.reset(new sha256_ethereum(pb, 256, *input_variable3, *message, "pub key hash"));
unpacker_pub_key_x.reset(new multipacking_gadget<FieldT>(
pb,
pub_key_x_bin, //pb_linear_combination_array<FieldT>(cm->bits.begin(), cm->bits.begin() , cm->bits.size()),
pub_key_x,
FieldT::capacity() + 1,
"pack pub key x into var"
));
unpacker_pub_key_y.reset(new multipacking_gadget<FieldT>(
pb,
pub_key_y_bin, //pb_linear_combination_array<FieldT>(cm->bits.begin(), cm->bits.begin() , cm->bits.size()),
pub_key_y,
FieldT::capacity() + 1,
"pack pub key y into var"
));
path_var_old.reset(new merkle_authentication_path_variable<FieldT, HashT> (pb, tree_depth, "path_var" ));
path_var_new.reset(new merkle_authentication_path_variable<FieldT, HashT> (pb, tree_depth, "path_var" ));
ml.reset(new merkle_tree_check_update_gadget<FieldT, HashT>(pb, tree_depth, address_bits_va, *leaf, *root_digest_old, *path_var_old, *new_leaf, *root_digest_new, *path_var_new, ONE, "ml"));
jubjub_eddsa.reset(new eddsa<FieldT, HashT> (pb,a,d, pub_key_x_bin, pub_key_y_bin, base_x,base_y,r_x_bin, r_y_bin, message->bits, S));
}
template<typename FieldT, typename HashT>
void tx<FieldT, HashT>::generate_r1cs_constraints() {
jubjub_eddsa->generate_r1cs_constraints();
public_key_hash->generate_r1cs_constraints(true);
leaf_hash->generate_r1cs_constraints(true);
message_hash->generate_r1cs_constraints(true);
unpacker_pub_key_x->generate_r1cs_constraints(true);
unpacker_pub_key_y->generate_r1cs_constraints(true);
path_var_old->generate_r1cs_constraints();
path_var_new->generate_r1cs_constraints();
root_digest_old->generate_r1cs_constraints();
root_digest_new->generate_r1cs_constraints();
ml->generate_r1cs_constraints();
//make sure the traget root matched the calculated root
//for(int i = 0 ; i < 255; i++) {
// this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(1, root_digest_old->bits[i], root_digest_calculated->bits[i]),
// FMT(annotation_prefix, " root digests equal"));
//}
}
template<typename FieldT, typename HashT>
void tx<FieldT, HashT>::generate_r1cs_witness() {
//debug
public_key_hash->generate_r1cs_witness();
leaf_hash->generate_r1cs_witness();
message_hash->generate_r1cs_witness();
unpacker_pub_key_x->generate_r1cs_witness_from_bits();
unpacker_pub_key_y->generate_r1cs_witness_from_bits();
auto address = address_bits_va.get_field_element_from_bits(this->pb);
path_var_old->generate_r1cs_witness(address.as_ulong(), path_old);
path_var_new->generate_r1cs_witness(address.as_ulong(), path_new);
ml->generate_r1cs_witness();
jubjub_eddsa->generate_r1cs_witness();
//debug
/*
std::cout << " leaf " ;
for(uint i =0;i<256;i++) {
std::cout << " , " << this->pb.lc_val(leaf->bits[i]);
}
std::cout << "new leaf " ;
for(uint i =0;i<256;i++) {
std::cout << " , " << this->pb.lc_val(new_leaf->bits[i]);
}
std::cout << "message " ;
for(uint i =0;i<256;i++) {
std::cout << " , " << this->pb.lc_val(message->bits[i]);
}
std::cout << " pub_key_x " << this->pb.lc_val(pub_key_x[0]) << " " << this->pb.lc_val(pub_key_x[1]) << std::endl;
std::cout << " pub_key_y " << this->pb.lc_val(pub_key_y[0]) << " " << this->pb.lc_val(pub_key_y[1]) << std::endl;
*/
std::cout << "pub_key_x " ;
for(uint i =0;i<256;i++) {
std::cout << " , " << this->pb.lc_val(pub_key_x_bin[i]);
}
}
}
================================================
FILE: tests/test.py
================================================
'''
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see <https://www.gnu.org/licenses/>.
'''
import sys
sys.path.insert(0, '../pythonWrapper')
sys.path.insert(0, "../depends/baby_jubjub_ecc/tests")
sys.path.insert(0, '../contracts')
from contract_deploy import contract_deploy, verify
from helper import *
from utils import getSignature, createLeaf, hashPadded, libsnark2python, normalize_proof, hex2int
import ed25519 as ed
from web3 import Web3, HTTPProvider, TestRPCProvider
host = sys.argv[1] if len(sys.argv) > 1 else "localhost"
w3 = Web3(HTTPProvider("http://" + host + ":8545"))
if __name__ == "__main__":
pk_output = "../zksnark_element/pk.raw" # Prover key
vk_output = "../zksnark_element/vk.json" # Verifier key
#genKeys(c.c_int(noTx), c.c_char_p(pk_output.encode()) , c.c_char_p(vk_output.encode()))
pub_x = []
pub_y = []
leaves = []
R_x = []
R_y = []
S = []
old_leaf = []
new_leaf = []
rhs_leaf = [] # Message
address = []
public_key = []
sk = []
fee = 0
# Generate random private key
sk.append(genSalt(64))
# Public key from private key
public_key.append(ed.publickey(sk[0]))
# Empty right handside of first leaf
rhs_leaf.append(hashPadded("0"*64 , "0"*64)[2:])
# Iterate over transactions on the merkle tree
for j in range (1,noTx + 1):
leaves.append([])
# create a random pub key from priv key
sk.append(genSalt(64))
public_key.append(ed.publickey(sk[j]))
# create a random new leaf
# This is just a filler message for test purpose (e.g. 11111111... , 22222211111...)
rhs_leaf.append(hashPadded(hex(j)[2]*64 , "1"*64)[2:])
# The old leaf is previous pubkey + previous message
old_leaf.append(createLeaf(public_key[j-1], rhs_leaf[j-1]))
# The new leaf is current pubkey with current message
new_leaf.append(createLeaf(public_key[j], rhs_leaf[j]))
# The message to sign is the previous leaf with the new leaf
message = hashPadded(old_leaf[j-1], new_leaf[j-1])
# Remove '0x' from byte
message = message[2:]
# Obtain Signature
r,s = getSignature(message, sk[j - 1], public_key[j-1])
# check the signature is correct
ed.checkvalid(r, s, message, public_key[j-1])
# Now we reverse the puplic key by bit
# we have to reverse the bits so that the
# unpacker in libsnark will return us the
# correct field element
# To put into little-endian
pub_key_x = hex(int(''.join(str(e) for e in hexToBinary(hex(public_key[j-1][0]))[::-1]),2))
pub_key_y = hex(int(''.join(str(e) for e in hexToBinary(hex(public_key[j-1][1]))[::-1]),2))
r[0] = hex(int(''.join(str(e) for e in hexToBinary(hex(r[0]))[::-1]),2))
r[1] = hex(int(''.join(str(e) for e in hexToBinary(hex(r[1]))[::-1]),2))
# Two r on x and y axis of curve
R_x.append(r[0])
R_y.append(r[1])
# Store s
S.append(s)
# Store public key
pub_x.append(pub_key_x)
pub_y.append(pub_key_y)
leaves[j-1].append(old_leaf[j-1])
address.append(0)
# Get zk proof and merkle root
proof, root = genWitness(leaves, pub_x, pub_y, address, tree_depth,
rhs_leaf, new_leaf , R_x, R_y, S)
proof = normalize_proof(proof)
#root , merkle_tree = utils.genMerkelTree(tree_depth, leaves[0])
try:
inputs = libsnark2python(proof["input"])
proof_input_root = libsnark2python(proof["input"][:2])[0]
assert proof_input_root == root, "Proof input {} not matching the root {}".format(proof_input_root, root)
# calculate final root
root_final , merkle_tree = utils.genMerkelTree(tree_depth, leaves[-1])
proof_input_root_final = libsnark2python(proof["input"][2:4])[0]
assert proof_input_root_final == root_final, "Proof input final root {} not matching the final root".format(proof_input_root_final, root_final)
first_leaf = libsnark2python(proof["input"][4:6])[0]
assert first_leaf == "0x" + leaves[1][0], "First leaf {} is not matching the leaf".format(first_leaf, leaves[1][0])
contract = contract_deploy(1, "../keys/vk.json", root, host)
result = verify(contract, proof, host)
print(result)
assert result["status"] == 1, "Result status of the verify function not equal to 1, but equals to {}".format(result['status'])
contract_root = w3.toHex(contract.getRoot())[:65]
assert contract_root == root_final[:65], "contract root {} not equals to root_final {}".format(contract_root, root_final)
except Exception as err:
pdb.set_trace()
raise
gitextract_3ua0xt7z/
├── .dockerignore
├── .gitignore
├── .gitmodules
├── CMakeLists.txt
├── Dockerfile
├── README.md
├── build/
│ └── .gitkeep
├── contracts/
│ ├── Miximus.sol
│ ├── Pairing.sol
│ ├── Verifier.sol
│ ├── contract_deploy.py
│ └── roll_up.sol
├── depends/
│ └── CMakeLists.txt
├── docker-compose.yml
├── keys/
│ └── .gitkeep
├── pythonWrapper/
│ ├── helper.py
│ └── utils.py
├── requirements.txt
├── src/
│ ├── CMakeLists.txt
│ ├── ZoKrates/
│ │ ├── wraplibsnark.cpp
│ │ └── wraplibsnark.hpp
│ ├── export.cpp
│ ├── roll_up.hpp
│ ├── roll_up.tcc
│ ├── roll_up_wrapper.cpp
│ ├── roll_up_wrapper.hpp
│ ├── sha256/
│ │ └── sha256_ethereum.cpp
│ ├── tx.hpp
│ └── tx.tcc
└── tests/
└── test.py
SYMBOL INDEX (58 symbols across 9 files)
FILE: contracts/contract_deploy.py
function compile (line 30) | def compile(tree_depth):
function contract_deploy (line 43) | def contract_deploy(tree_depth, vk_dir, merkle_root, host="localhost"):
function verify (line 95) | def verify(contract, proof, host="localhost"):
FILE: pythonWrapper/helper.py
function binary2ctypes (line 56) | def binary2ctypes(out):
function hexToBinary (line 59) | def hexToBinary(hexString):
function genWitness (line 65) | def genWitness(leaves, public_key_x, public_key_y, address, tree_depth, ...
function genSalt (line 129) | def genSalt(i):
function genNullifier (line 134) | def genNullifier(recvAddress):
FILE: pythonWrapper/utils.py
function hex2int (line 29) | def hex2int(elements):
function normalize_proof (line 35) | def normalize_proof(proof):
function getSignature (line 48) | def getSignature(m,sk,pk):
function createLeaf (line 54) | def createLeaf(public_key , message):
function libsnark2python (line 60) | def libsnark2python (inputs):
function hashPadded (line 86) | def hashPadded(left, right):
function sha256 (line 93) | def sha256(data):
function getUniqueLeaf (line 97) | def getUniqueLeaf(depth):
function genMerkelTree (line 103) | def genMerkelTree(tree_depth, leaves):
function getMerkelRoot (line 115) | def getMerkelRoot(tree_depth, leaves):
function getMerkelProof (line 118) | def getMerkelProof(leaves, index, tree_depth):
function testHashPadded (line 131) | def testHashPadded():
function testGenMerkelTree (line 137) | def testGenMerkelTree():
function testlibsnarkTopython (line 145) | def testlibsnarkTopython():
function testgetMissingLeaf (line 164) | def testgetMissingLeaf():
function testgetMerkelProof (line 171) | def testgetMerkelProof():
FILE: src/ZoKrates/wraplibsnark.cpp
function libsnarkBigintFromBytes (line 26) | libff::bigint<libff::alt_bn128_r_limbs> libsnarkBigintFromBytes(const ui...
function HexStringFromLibsnarkBigint (line 38) | std::string HexStringFromLibsnarkBigint(libff::bigint<libff::alt_bn128_r...
function outputPointG1AffineAsHex (line 54) | std::string outputPointG1AffineAsHex(libff::alt_bn128_G1 _p)
function outputPointG1AffineAsInt (line 68) | std::string outputPointG1AffineAsInt(libff::alt_bn128_G1 _p)
function outputPointG2AffineAsHex (line 78) | std::string outputPointG2AffineAsHex(libff::alt_bn128_G2 _p)
function outputPointG2AffineAsInt (line 91) | std::string outputPointG2AffineAsInt(libff::alt_bn128_G2 _p)
function createConstraintSystem (line 105) | r1cs_ppzksnark_constraint_system<libff::alt_bn128_pp> createConstraintSy...
function generateKeypair (line 150) | r1cs_ppzksnark_keypair<libff::alt_bn128_pp> generateKeypair(const r1cs_p...
function writeToFile (line 156) | void writeToFile(std::string path, T& obj) {
function T (line 168) | T loadFromFile(std::string path) {
function serializeProvingKeyToFile (line 185) | void serializeProvingKeyToFile(r1cs_ppzksnark_proving_key<libff::alt_bn1...
function deserializeProvingKeyFromFile (line 189) | r1cs_ppzksnark_proving_key<libff::alt_bn128_pp> deserializeProvingKeyFro...
function serializeVerificationKeyToFile (line 193) | void serializeVerificationKeyToFile(r1cs_ppzksnark_verification_key<libf...
function exportVerificationKey (line 222) | void exportVerificationKey(r1cs_ppzksnark_keypair<libff::alt_bn128_pp> k...
function printProof (line 256) | void printProof(r1cs_ppzksnark_proof<libff::alt_bn128_pp> proof){
FILE: src/export.cpp
function constraint_to_json (line 68) | void constraint_to_json(linear_combination<FieldT> constraints, std::str...
function array_to_json (line 89) | void array_to_json(protoboard<FieldT> pb, uint input_variables, std::st...
function r1cs_to_json (line 114) | void r1cs_to_json(protoboard<FieldT> pb, uint input_variables, std::stri...
function string (line 157) | string proof_to_json(r1cs_ppzksnark_proof<libff::alt_bn128_pp> proof, r1...
function vk2json (line 226) | void vk2json(r1cs_ppzksnark_keypair<libff::alt_bn128_pp> keypair, std::s...
FILE: src/roll_up.hpp
type libsnark (line 32) | namespace libsnark {
class roll_up (line 35) | class roll_up: public gadget<FieldT> {
FILE: src/roll_up_wrapper.cpp
function genKeys (line 38) | void genKeys(int noTx, char* pkOutput, char* vkOuput) {
FILE: src/sha256/sha256_ethereum.cpp
function from_bits (line 30) | pb_variable_array<FieldT> from_bits(std::vector<bool> bits, pb_variable<...
class sha256_ethereum (line 41) | class sha256_ethereum : gadget<FieldT> {
method sha256_ethereum (line 51) | sha256_ethereum(protoboard<FieldT> &pb,
method generate_r1cs_constraints (line 161) | void generate_r1cs_constraints(const bool ensure_output_bitness) {
method generate_r1cs_witness (line 167) | void generate_r1cs_witness() {
method get_digest_len (line 172) | static size_t get_digest_len()
method get_hash (line 179) | static libff::bit_vector get_hash(const libff::bit_vector &input)
method expected_constraints (line 196) | static size_t expected_constraints(const bool ensure_output_bitness)
function bit_list_to_ints (line 206) | vector<unsigned long> bit_list_to_ints(vector<bool> bit_list, const size...
FILE: src/tx.hpp
type libsnark (line 29) | namespace libsnark {
class tx (line 32) | class tx: public gadget<FieldT> {
Condensed preview — 30 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (117K chars).
[
{
"path": ".dockerignore",
"chars": 11,
"preview": "__pycache__"
},
{
"path": ".gitignore",
"chars": 24,
"preview": "keys/vk.json\n__pycache__"
},
{
"path": ".gitmodules",
"chars": 344,
"preview": "[submodule \"depends/baby_jubjub_ecc\"]\n\tpath = depends/baby_jubjub_ecc\n\turl = https://github.com/barrywhitehat/baby_jubju"
},
{
"path": "CMakeLists.txt",
"chars": 1901,
"preview": "cmake_minimum_required(VERSION 2.8)\n\nproject(roll_up)\n\nset(\n CURVE\n \"ALT_BN128\"\n CACHE\n STRING\n \"Default curve: one"
},
{
"path": "Dockerfile",
"chars": 737,
"preview": "FROM ubuntu:18.04\n\nRUN apt-get update && \\\n apt-get install software-properties-common -y && \\\n add-apt-repository"
},
{
"path": "README.md",
"chars": 6026,
"preview": "# roll_up \n\n[\n\n\n"
},
{
"path": "docker-compose.yml",
"chars": 538,
"preview": "version: \"3\"\n\nservices:\n\n testrpc:\n image: trufflesuite/ganache-cli:v6.1.8\n ports:\n - 8545\n networks:\n "
},
{
"path": "keys/.gitkeep",
"chars": 0,
"preview": ""
},
{
"path": "pythonWrapper/helper.py",
"chars": 4548,
"preview": "\n'''\n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you can"
},
{
"path": "pythonWrapper/utils.py",
"chars": 7326,
"preview": "'''\n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you can "
},
{
"path": "requirements.txt",
"chars": 43,
"preview": "web3==4.6.0\npy-solc==3.1.0\nbitstring==3.1.5"
},
{
"path": "src/CMakeLists.txt",
"chars": 641,
"preview": "include_directories(.)\n\n\nadd_library(\n roll_up_wrapper\n SHARED\n roll_up_wrapper.cpp\n)\n\ntarget_link_libraries(\n roll_"
},
{
"path": "src/ZoKrates/wraplibsnark.cpp",
"chars": 13673,
"preview": "/**\n * @file wraplibsnark.cpp\n * @author Jacob Eberhardt <jacob.eberhardt@tu-berlin.de\n * @author Dennis Kuhnert <dennis"
},
{
"path": "src/ZoKrates/wraplibsnark.hpp",
"chars": 759,
"preview": "/**\n * @file wraplibsnark.hpp\n * @author Jacob Eberhardt <jacob.eberhardt@tu-berlin.de\n * @author Dennis Kuhnert <dennis"
},
{
"path": "src/export.cpp",
"chars": 10267,
"preview": "/* \n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you c"
},
{
"path": "src/roll_up.hpp",
"chars": 2930,
"preview": "/* \n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you c"
},
{
"path": "src/roll_up.tcc",
"chars": 4945,
"preview": "/* \n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you c"
},
{
"path": "src/roll_up_wrapper.cpp",
"chars": 10481,
"preview": "/* \n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you c"
},
{
"path": "src/roll_up_wrapper.hpp",
"chars": 1868,
"preview": "/* \n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you c"
},
{
"path": "src/sha256/sha256_ethereum.cpp",
"chars": 6528,
"preview": "/* \n copyright 2018 to the Kobigurk \n https://github.com/kobigurk/sha256_ethereum\n MIT\n*/\n\n\n#include <iostre"
},
{
"path": "src/tx.hpp",
"chars": 3726,
"preview": "/* \n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you c"
},
{
"path": "src/tx.tcc",
"chars": 7570,
"preview": "/* \n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you c"
},
{
"path": "tests/test.py",
"chars": 5628,
"preview": "''' \n copyright 2018 to the roll_up Authors\n\n This file is part of roll_up.\n\n roll_up is free software: you c"
}
]
About this extraction
This page contains the full source code of the barryWhiteHat/roll_up GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 30 files (108.4 KB), approximately 32.5k tokens, and a symbol index with 58 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.