Repository: barryWhiteHat/roll_up
Branch: master
Commit: 118f3511a7a9
Files: 30
Total size: 108.4 KB
Directory structure:
gitextract_3ua0xt7z/
├── .dockerignore
├── .gitignore
├── .gitmodules
├── CMakeLists.txt
├── Dockerfile
├── README.md
├── build/
│ └── .gitkeep
├── contracts/
│ ├── Miximus.sol
│ ├── Pairing.sol
│ ├── Verifier.sol
│ ├── contract_deploy.py
│ └── roll_up.sol
├── depends/
│ └── CMakeLists.txt
├── docker-compose.yml
├── keys/
│ └── .gitkeep
├── pythonWrapper/
│ ├── helper.py
│ └── utils.py
├── requirements.txt
├── src/
│ ├── CMakeLists.txt
│ ├── ZoKrates/
│ │ ├── wraplibsnark.cpp
│ │ └── wraplibsnark.hpp
│ ├── export.cpp
│ ├── roll_up.hpp
│ ├── roll_up.tcc
│ ├── roll_up_wrapper.cpp
│ ├── roll_up_wrapper.hpp
│ ├── sha256/
│ │ └── sha256_ethereum.cpp
│ ├── tx.hpp
│ └── tx.tcc
└── tests/
└── test.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
__pycache__
================================================
FILE: .gitignore
================================================
keys/vk.json
__pycache__
================================================
FILE: .gitmodules
================================================
[submodule "depends/baby_jubjub_ecc"]
path = depends/baby_jubjub_ecc
url = https://github.com/barrywhitehat/baby_jubjub_ecc
[submodule "depends/libsnark"]
path = depends/libsnark
url = https://github.com/scipr-lab/libsnark.git
[submodule "src/sha256_ethereum"]
path = src/sha256_ethereum
url = https://github.com/kobigurk/sha256_ethereum
================================================
FILE: CMakeLists.txt
================================================
cmake_minimum_required(VERSION 2.8)
project(roll_up)
set(
CURVE
"ALT_BN128"
CACHE
STRING
"Default curve: one of ALT_BN128, BN128, EDWARDS, MNT4, MNT6"
)
set(
DEPENDS_DIR
"${CMAKE_CURRENT_SOURCE_DIR}/depends"
CACHE
STRING
"Optionally specify the dependency installation directory relative to the source directory (default: inside dependency folder)"
)
set(
OPT_FLAGS
""
CACHE
STRING
"Override C++ compiler optimization flags"
)
option(
MULTICORE
"Enable parallelized execution, using OpenMP"
ON
)
option(
WITH_PROCPS
"Use procps for memory profiling"
ON
)
option(
VERBOSE
"Print internal messages"
ON
)
option(
DEBUG
"Enable debugging mode"
OFF
)
option(
CPPDEBUG
"Enable debugging of C++ STL (does not imply DEBUG)"
OFF
)
if(CMAKE_COMPILER_IS_GNUCXX OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
# Common compilation flags and warning configuration
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wfatal-errors -pthread")
if("${MULTICORE}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
endif()
# Default optimizations flags (to override, use -DOPT_FLAGS=...)
if("${OPT_FLAGS}" STREQUAL "")
set(OPT_FLAGS "-ggdb3 -O2 -march=native -mtune=native")
endif()
endif()
add_definitions(-DCURVE_${CURVE})
if(${CURVE} STREQUAL "BN128")
add_definitions(-DBN_SUPPORT_SNARK=1)
endif()
if("${VERBOSE}")
add_definitions(-DVERBOSE=1)
endif()
if("${MULTICORE}")
add_definitions(-DMULTICORE=1)
endif()
add_compile_options(-fPIC)
if("${CPPDEBUG}")
add_definitions(-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPT_FLAGS}")
include(FindPkgConfig)
if("${WITH_PROCPS}")
pkg_check_modules(PROCPS REQUIRED libprocps)
else()
add_definitions(-DNO_PROCPS)
endif()
include_directories(.)
add_subdirectory(depends)
add_subdirectory(src)
================================================
FILE: Dockerfile
================================================
FROM ubuntu:18.04
RUN apt-get update && \
apt-get install software-properties-common -y && \
add-apt-repository ppa:ethereum/ethereum -y && \
apt-get update && \
apt-get install -y \
wget unzip curl \
build-essential cmake git libgmp3-dev libprocps-dev python-markdown libboost-all-dev libssl-dev pkg-config python3-pip solc
WORKDIR /root/roll_up
COPY . .
RUN pip3 install -r requirements.txt
RUN cd build \
&& cmake .. \
&& make \
&& DESTDIR=/usr/local make install \
NO_PROCPS=1 \
NO_GTEST=1 \
NO_DOCS=1 \
CURVE=ALT_BN128 \
FEATUREFLAGS="-DBINARY_OUTPUT=1 -DMONTGOMERY_OUTPUT=1 -DNO_PT_COMPRESSION=1"
ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:/usr/local/lib
================================================
FILE: README.md
================================================
# roll_up
[](https://gitter.im/barrywhitehat/roll_up?utm_source=share-link&utm_medium=link&utm_campaign=share-link)
Roll_up aggregates transactions so that they only require a single onchain transactions required to validate multiple other transactions. The snark checks the signature and applies the transaction to the the leaf that the signer owns.
Multiple users create signatures. Provers aggregates these signatures into a snark and use it to update a smart contract on the ethereum blockchain. A malicious prover who does not also have that leafs private key cannot change a leaf. Only the person who controls the private key can.
This is intended to be the database layer of snark-dapp (snapps) where the layers above define more rules about changing and updating the leaves
`roll_up` does not make any rules about what happens in a leaf, what kind of leaves can be created and destroyed. This is the purview of
higher level snapps. Who can add their constraints in `src/roll_up.tcc` in the function `generate_r1cs_constraints()`
## In Depth
The system is base use eddsa signatures defined in [baby_jubjub_ecc](https://github.com/barryWhiteHat/baby_jubjub_ecc) base upon [baby_jubjub](https://github.com/barryWhiteHat/baby_jubjub). It uses sha256 padded with 512 bits input.
The leaf is defined as follows
```
LEAF
+----------------^----------------+
LHS RHS
+----------------+
Public_key_x public_key_y
```
The leaf is then injected into a merkle tree.
A transaction updates a single leaf in the merkle tree. A transaction takes the following form.
```
1. Public key x and y point
2. The message which is defined as the hash of the old leaf and the new leaf.
MESSAGE
+----------------^----------------+
OLD_LEAF NEW_LEAF
3. the point R and the integer S.
```
In order to update the merkle tree the prover needs to aggregate together X transactions. For each transaction they check
```
1. Takes the merkel root as input from the smart contract (if it is the first iteration) or from the merkle root from the previous
transaction.
2. Find the leaf that matches the message in the merkle tree.
NOTE: If there are two messages that match, both can be updated as their is no replay protection this should be solved on the next layer
this is simply the read and write layer, we do not check what is being written here.
3. Check that the proving key matches the owner of that leaf.
4. Confirm that the signature is correct.
5. Confirm that that leaf is in the merkle tree.
6. Replace is with the new leaf and calculate the new merkle root.
7. Continue until all transactions have been included in a snark
```
The snark can then be included in a transaction to update the merkle root tracked by a smart contract.
## Data availabilty guarrentees
It is important that each prover is able to make merkle proofs for all leaves.
If they cannot these leaves are essentially locked until that information becomes available.
In order to ensure this, we pass every updated leaf to the smart contract so
that data will always be available.
Thus the system has the same data availability guarrentees as ethereum.
## Scalability
Gas cost of function call: 23368
Gas cost of throwing an event with a single leaf update : 1840
Although we don't use groth16 currently. This is the cheapest proving system to our knowledge.
groth16 confirm: 560000 including tx cost and input data is ~600000.
The gas limit is 8,000,000 per block. So we can use the rest of the gas to maintain data availability.
8000000 - 600000 = 7400000
We find that 7400000 is the remaining gas in the block.
So we calculate how much we can spend on data availability
7400000 / 1840 ~= 4021.73913043478
4021.73913043478 / 15 = 268 transactions per second
## Proving time
On a laptop with 7 GB of ram and 20 GB of swap space it struggles to aggragate 20 transactions per second. This is a
combination of my hardware limits and cpp code that needs to be improved.
[Wu et al](https://eprint.iacr.org/2018/691) showed that is is possible to distribute
these computations that scales to billions of constaints.
In order to reach the tps described above three approaches exist.
1. Improve the cpp code similar to https://github.com/HarryR/ethsnarks/issues/3 and run it on enterprise hardware.
2. Implmenting the full distributed system described by Wu et al.
3. Specialized hardware to create these proofs.
## Distribution
The role of prover can be distributed but it means that each will have to purchase/rent hardware in order to be able to keep up with the longest chain.
There are a few attacks where the fastest prover is able censor all other provers by constantly updating so the all competing provers proofs are constantly out of date.
These problem should be mitigated or solved at the consensus level.
## Running tests
If you want to run at noTx greater than 10 you will need more than 7GB
to add a bunch of swap space https://www.digitalocean.com/community/tutorials/how-to-add-swap-space-on-ubuntu-16-04
### Build everything
```
mkdir keys
git submodule update --init --recursive
mkdir build
cd build
cmake .. && make
```
### Run the tests
NOTE: Make sure you have a node running so the smart contract would be deployed and validate the transaction, you can use
`testrpc` or `ganache-cli`
```
cd ../tests/
python3 test.py
```
### Change the merkle tree depth and number of transactions to be aggregated
You'd need to update two files, and re-build the prover.
In `pythonWrapper/helper.py`
```
tree_depth = 2
noTx = 4
```
In `src/roll_up_wrapper.hpp`
```
const int tree_depth = 2;
```
================================================
FILE: build/.gitkeep
================================================
================================================
FILE: contracts/Miximus.sol
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
*/
pragma solidity ^0.4.19;
import "./Verifier.sol";
contract roll_up{
bytes32 root;
mapping (bytes32 => bool) nullifiers;
event Withdraw (address);
Verifier public zksnark_verify;
function roll_up (address _zksnark_verify, bytes32 _root) {
zksnark_verify = Verifier(_zksnark_verify);
root = _root;
}
function isTrue (
uint[2] a,
uint[2] a_p,
uint[2][2] b,
uint[2] b_p,
uint[2] c,
uint[2] c_p,
uint[2] h,
uint[2] k,
uint[] input
) returns (bool) {
bytes32 _root = padZero(reverse(bytes32(input[0]))); //)merge253bitWords(input[0], input[1]);
require(_root == padZero(root));
require(zksnark_verify.verifyTx(a,a_p,b,b_p,c,c_p,h,k,input));
root = padZero(reverse(bytes32(input[2])));
return(true);
}
function getRoot() constant returns(bytes32) {
return(root);
}
// libshark only allows 253 bit chunks in its output
// to overcome this we merge the first 253 bits (left) with the remaining 3 bits
// in the next variable (right)
function merge253bitWords(uint left, uint right) returns(bytes32) {
right = pad3bit(right);
uint left_msb = uint(padZero(reverse(bytes32(left))));
uint left_lsb = uint(getZero(reverse(bytes32(left))));
right = right + left_lsb;
uint res = left_msb + right;
return(bytes32(res));
}
// ensure that the 3 bits on the left is actually 3 bits.
function pad3bit(uint input) constant returns(uint) {
if (input == 0)
return 0;
if (input == 1)
return 4;
if (input == 2)
return 4;
if (input == 3)
return 6;
return(input);
}
function getZero(bytes32 x) returns(bytes32) {
//0x1111111111111111111111113fdc3192693e28ff6aee95320075e4c26be03308
return(x & 0x000000000000000000000000000000000000000000000000000000000000000F);
}
function padZero(bytes32 x) returns(bytes32) {
//0x1111111111111111111111113fdc3192693e28ff6aee95320075e4c26be03308
return(x & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0);
}
function reverseByte(uint a) public pure returns (uint) {
uint c = 0xf070b030d0509010e060a020c0408000;
return (( c >> ((a & 0xF)*8)) & 0xF0) +
(( c >> (((a >> 4)&0xF)*8) + 4) & 0xF);
}
//flip endinaness
function reverse(bytes32 a) public pure returns(bytes32) {
uint r;
uint i;
uint b;
for (i=0; i<32; i++) {
b = (uint(a) >> ((31-i)*8)) & 0xff;
b = reverseByte(b);
r += b << (i*8);
}
return bytes32(r);
}
}
================================================
FILE: contracts/Pairing.sol
================================================
// This code is taken from https://github.com/JacobEberhardt/ZoKrates
pragma solidity ^0.4.19;
library Pairing {
struct G1Point {
uint X;
uint Y;
}
// Encoding of field elements is: X[0] * z + X[1]
struct G2Point {
uint[2] X;
uint[2] Y;
}
/// @return the generator of G1
function P1() internal returns (G1Point) {
return G1Point(1, 2);
}
/// @return the generator of G2
function P2() internal returns (G2Point) {
return G2Point(
[11559732032986387107991004021392285783925812861821192530917403151452391805634,
10857046999023057135944570762232829481370756359578518086990519993285655852781],
[4082367875863433681332203403145435568316851327593401208105741076214120093531,
8495653923123431417604973247489272438418190587263600148770280649306958101930]
);
}
/// @return the negation of p, i.e. p.add(p.negate()) should be zero.
function negate(G1Point p) internal returns (G1Point) {
// The prime q in the base field F_q for G1
uint q = 21888242871839275222246405745257275088696311157297823662689037894645226208583;
if (p.X == 0 && p.Y == 0)
return G1Point(0, 0);
return G1Point(p.X, q - (p.Y % q));
}
/// @return the sum of two points of G1
function add(G1Point p1, G1Point p2) internal returns (G1Point r) {
uint[4] memory input;
input[0] = p1.X;
input[1] = p1.Y;
input[2] = p2.X;
input[3] = p2.Y;
bool success;
assembly {
success := call(sub(gas, 2000), 6, 0, input, 0xc0, r, 0x60)
// Use "invalid" to make gas estimation work
switch success case 0 { invalid }
}
require(success);
}
/// @return the product of a point on G1 and a scalar, i.e.
/// p == p.mul(1) and p.add(p) == p.mul(2) for all points p.
function mul(G1Point p, uint s) internal returns (G1Point r) {
uint[3] memory input;
input[0] = p.X;
input[1] = p.Y;
input[2] = s;
bool success;
assembly {
success := call(sub(gas, 2000), 7, 0, input, 0x80, r, 0x60)
// Use "invalid" to make gas estimation work
switch success case 0 { invalid }
}
require (success);
}
/// @return the result of computing the pairing check
/// e(p1[0], p2[0]) * .... * e(p1[n], p2[n]) == 1
/// For example pairing([P1(), P1().negate()], [P2(), P2()]) should
/// return true.
function pairing(G1Point[] p1, G2Point[] p2) internal returns (bool) {
require(p1.length == p2.length);
uint elements = p1.length;
uint inputSize = elements * 6;
uint[] memory input = new uint[](inputSize);
for (uint i = 0; i < elements; i++)
{
input[i * 6 + 0] = p1[i].X;
input[i * 6 + 1] = p1[i].Y;
input[i * 6 + 2] = p2[i].X[0];
input[i * 6 + 3] = p2[i].X[1];
input[i * 6 + 4] = p2[i].Y[0];
input[i * 6 + 5] = p2[i].Y[1];
}
uint[1] memory out;
bool success;
assembly {
success := call(sub(gas, 2000), 8, 0, add(input, 0x20), mul(inputSize, 0x20), out, 0x20)
// Use "invalid" to make gas estimation work
switch success case 0 { invalid }
}
require(success);
return out[0] != 0;
}
/// Convenience method for a pairing check for two pairs.
function pairingProd2(G1Point a1, G2Point a2, G1Point b1, G2Point b2) internal returns (bool) {
G1Point[] memory p1 = new G1Point[](2);
G2Point[] memory p2 = new G2Point[](2);
p1[0] = a1;
p1[1] = b1;
p2[0] = a2;
p2[1] = b2;
return pairing(p1, p2);
}
/// Convenience method for a pairing check for three pairs.
function pairingProd3(
G1Point a1, G2Point a2,
G1Point b1, G2Point b2,
G1Point c1, G2Point c2
) internal returns (bool) {
G1Point[] memory p1 = new G1Point[](3);
G2Point[] memory p2 = new G2Point[](3);
p1[0] = a1;
p1[1] = b1;
p1[2] = c1;
p2[0] = a2;
p2[1] = b2;
p2[2] = c2;
return pairing(p1, p2);
}
/// Convenience method for a pairing check for four pairs.
function pairingProd4(
G1Point a1, G2Point a2,
G1Point b1, G2Point b2,
G1Point c1, G2Point c2,
G1Point d1, G2Point d2
) internal returns (bool) {
G1Point[] memory p1 = new G1Point[](4);
G2Point[] memory p2 = new G2Point[](4);
p1[0] = a1;
p1[1] = b1;
p1[2] = c1;
p1[3] = d1;
p2[0] = a2;
p2[1] = b2;
p2[2] = c2;
p2[3] = d2;
return pairing(p1, p2);
}
}
================================================
FILE: contracts/Verifier.sol
================================================
// this code is taken from https://github.com/JacobEberhardt/ZoKrates
pragma solidity ^0.4.19;
import "../contracts/Pairing.sol";
contract Verifier {
using Pairing for *;
uint sealed = 0; //IC parameater add counter.
uint i = 0;
struct VerifyingKey {
Pairing.G2Point A;
Pairing.G1Point B;
Pairing.G2Point C;
Pairing.G2Point gamma;
Pairing.G1Point gammaBeta1;
Pairing.G2Point gammaBeta2;
Pairing.G2Point Z;
Pairing.G1Point[] IC;
}
struct Proof {
Pairing.G1Point A;
Pairing.G1Point A_p;
Pairing.G2Point B;
Pairing.G1Point B_p;
Pairing.G1Point C;
Pairing.G1Point C_p;
Pairing.G1Point K;
Pairing.G1Point H;
}
VerifyingKey verifyKey;
function Verifier (uint[2] A1, uint[2] A2, uint[2] B, uint[2] C1, uint[2] C2,
uint[2] gamma1, uint[2] gamma2, uint[2] gammaBeta1,
uint[2] gammaBeta2_1, uint[2] gammaBeta2_2, uint[2] Z1, uint[2] Z2,
uint[] input) {
verifyKey.A = Pairing.G2Point(A1,A2);
verifyKey.B = Pairing.G1Point(B[0], B[1]);
verifyKey.C = Pairing.G2Point(C1, C2);
verifyKey.gamma = Pairing.G2Point(gamma1, gamma2);
verifyKey.gammaBeta1 = Pairing.G1Point(gammaBeta1[0], gammaBeta1[1]);
verifyKey.gammaBeta2 = Pairing.G2Point(gammaBeta2_1, gammaBeta2_2);
verifyKey.Z = Pairing.G2Point(Z1,Z2);
/*while (verifyKey.IC.length != input.length/2) {
verifyKey.IC.push(Pairing.G1Point(input[i], input[i+1]));
i += 2;
}*/
}
function addIC(uint[] input) {
require(sealed ==0);
while (verifyKey.IC.length != input.length/2 && msg.gas > 200000) {
verifyKey.IC.push(Pairing.G1Point(input[i], input[i+1]));
i += 2;
}
if( verifyKey.IC.length == input.length/2) {
sealed = 1;
}
}
function getIC(uint i) returns(uint, uint) {
return(verifyKey.IC[i].X, verifyKey.IC[i].Y);
}
function getICLen () returns (uint) {
return(verifyKey.IC.length);
}
function verify(uint[] input, Proof proof) internal returns (uint) {
VerifyingKey memory vk = verifyKey;
require(input.length + 1 == vk.IC.length);
// Compute the linear combination vk_x
Pairing.G1Point memory vk_x = Pairing.G1Point(0, 0);
for (uint i = 0; i < input.length; i++)
vk_x = Pairing.add(vk_x, Pairing.mul(vk.IC[i + 1], input[i]));
vk_x = Pairing.add(vk_x, vk.IC[0]);
if (!Pairing.pairingProd2(proof.A, vk.A, Pairing.negate(proof.A_p), Pairing.P2())) return 1;
if (!Pairing.pairingProd2(vk.B, proof.B, Pairing.negate(proof.B_p), Pairing.P2())) return 2;
if (!Pairing.pairingProd2(proof.C, vk.C, Pairing.negate(proof.C_p), Pairing.P2())) return 3;
if (!Pairing.pairingProd3(
proof.K, vk.gamma,
Pairing.negate(Pairing.add(vk_x, Pairing.add(proof.A, proof.C))), vk.gammaBeta2,
Pairing.negate(vk.gammaBeta1), proof.B
)) return 4;
if (!Pairing.pairingProd3(
Pairing.add(vk_x, proof.A), proof.B,
Pairing.negate(proof.H), vk.Z,
Pairing.negate(proof.C), Pairing.P2()
)) return 5;
return 0;
}
event Verified(string);
function verifyTx(
uint[2] a,
uint[2] a_p,
uint[2][2] b,
uint[2] b_p,
uint[2] c,
uint[2] c_p,
uint[2] h,
uint[2] k,
uint[] input
) returns (bool) {
Proof memory proof;
proof.A = Pairing.G1Point(a[0], a[1]);
proof.A_p = Pairing.G1Point(a_p[0], a_p[1]);
proof.B = Pairing.G2Point([b[0][0], b[0][1]], [b[1][0], b[1][1]]);
proof.B_p = Pairing.G1Point(b_p[0], b_p[1]);
proof.C = Pairing.G1Point(c[0], c[1]);
proof.C_p = Pairing.G1Point(c_p[0], c_p[1]);
proof.H = Pairing.G1Point(h[0], h[1]);
proof.K = Pairing.G1Point(k[0], k[1]);
uint[] memory inputValues = new uint[](input.length);
for(uint i = 0; i < input.length; i++){
inputValues[i] = input[i];
}
if (verify(inputValues, proof) == 0) {
Verified("Transaction successfully verified.");
return true;
} else {
return false;
}
}
}
================================================
FILE: contracts/contract_deploy.py
================================================
'''
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
'''
import json
import web3
from web3 import Web3, HTTPProvider, TestRPCProvider
from solc import compile_source, compile_standard, compile_files
from solc import compile_source, compile_files, link_code
from web3.contract import ConciseContract
from utils import hex2int
def compile(tree_depth):
rollup = "../contracts/roll_up.sol"
Pairing = "../contracts/Pairing.sol"
Verifier = "../contracts/Verifier.sol"
compiled_sol = compile_files([Pairing, Verifier, rollup], allow_paths="./contracts")
rollup_interface = compiled_sol[rollup + ':roll_up']
verifier_interface = compiled_sol[Verifier + ':Verifier']
return(rollup_interface, verifier_interface)
def contract_deploy(tree_depth, vk_dir, merkle_root, host="localhost"):
w3 = Web3(HTTPProvider("http://" + host + ":8545"))
rollup_interface , verifier_interface = compile(tree_depth)
with open(vk_dir) as json_data:
vk = json.load(json_data)
vk = [hex2int(vk["a"][0]),
hex2int(vk["a"][1]),
hex2int(vk["b"]),
hex2int(vk["c"][0]),
hex2int(vk["c"][1]),
hex2int(vk["g"][0]),
hex2int(vk["g"][1]),
hex2int(vk["gb1"]),
hex2int(vk["gb2"][0]),
hex2int(vk["gb2"][1]),
hex2int(vk["z"][0]),
hex2int(vk["z"][1]),
hex2int(sum(vk["IC"], []))
]
# Instantiate and deploy contract
rollup = w3.eth.contract(abi=rollup_interface['abi'], bytecode=rollup_interface['bin'])
verifier = w3.eth.contract(abi=verifier_interface['abi'], bytecode=verifier_interface['bin'])
# Get transaction hash from deployed contract
tx_hash = verifier.deploy(args=vk, transaction={'from': w3.eth.accounts[0], 'gas': 4000000})
# Get tx receipt to get contract address
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, 10000)
verifier_address = tx_receipt['contractAddress']
# add IC
verifier = w3.eth.contract(address=verifier_address, abi=verifier_interface['abi'],ContractFactoryClass=ConciseContract)
while verifier.getICLen() != (len(vk[-1]))//2:
tx_hash = verifier.addIC(vk[-1] , transact={'from': w3.eth.accounts[0], 'gas': 4000000})
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, 100000)
tx_hash = rollup.deploy(transaction={'from': w3.eth.accounts[0], 'gas': 4000000}, args=[verifier_address, merkle_root])
# Get tx receipt to get contract address
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, 10000)
rollup_address = tx_receipt['contractAddress']
# Contract instance in concise mode
abi = rollup_interface['abi']
rollup = w3.eth.contract(address=rollup_address, abi=abi,ContractFactoryClass=ConciseContract)
return(rollup)
def verify(contract, proof, host="localhost"):
w3 = Web3(HTTPProvider("http://" + host + ":8545"))
tx_hash = contract.isTrue(proof["a"] , proof["a_p"], proof["b"], proof["b_p"] , proof["c"], proof["c_p"] , proof["h"] , proof["k"], proof["input"] , transact={'from': w3.eth.accounts[0], 'gas': 4000000})
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, 10000)
return(tx_receipt)
================================================
FILE: contracts/roll_up.sol
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
*/
pragma solidity ^0.4.19;
import "../contracts/Verifier.sol";
contract roll_up{
bytes32 root;
mapping (bytes32 => bool) nullifiers;
event Withdraw (address);
Verifier public zksnark_verify;
function roll_up (address _zksnark_verify, bytes32 _root) {
zksnark_verify = Verifier(_zksnark_verify);
root = _root;
}
function isTrue (
uint[2] a,
uint[2] a_p,
uint[2][2] b,
uint[2] b_p,
uint[2] c,
uint[2] c_p,
uint[2] h,
uint[2] k,
uint[] input
) returns (bool) {
bytes32 _root = padZero(reverse(bytes32(input[0]))); //)merge253bitWords(input[0], input[1]);
require(_root == padZero(root));
require(zksnark_verify.verifyTx(a,a_p,b,b_p,c,c_p,h,k,input));
root = padZero(reverse(bytes32(input[2])));
return(true);
}
function getRoot() constant returns(bytes32) {
return(root);
}
// libshark only allows 253 bit chunks in its output
// to overcome this we merge the first 253 bits (left) with the remaining 3 bits
// in the next variable (right)
function merge253bitWords(uint left, uint right) returns(bytes32) {
right = pad3bit(right);
uint left_msb = uint(padZero(reverse(bytes32(left))));
uint left_lsb = uint(getZero(reverse(bytes32(left))));
right = right + left_lsb;
uint res = left_msb + right;
return(bytes32(res));
}
// ensure that the 3 bits on the left is actually 3 bits.
function pad3bit(uint input) constant returns(uint) {
if (input == 0)
return 0;
if (input == 1)
return 4;
if (input == 2)
return 4;
if (input == 3)
return 6;
return(input);
}
function getZero(bytes32 x) returns(bytes32) {
//0x1111111111111111111111113fdc3192693e28ff6aee95320075e4c26be03308
return(x & 0x000000000000000000000000000000000000000000000000000000000000000F);
}
function padZero(bytes32 x) returns(bytes32) {
//0x1111111111111111111111113fdc3192693e28ff6aee95320075e4c26be03308
return(x & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0);
}
function reverseByte(uint a) public pure returns (uint) {
uint c = 0xf070b030d0509010e060a020c0408000;
return (( c >> ((a & 0xF)*8)) & 0xF0) +
(( c >> (((a >> 4)&0xF)*8) + 4) & 0xF);
}
//flip endinaness
function reverse(bytes32 a) public pure returns(bytes32) {
uint r;
uint i;
uint b;
for (i=0; i<32; i++) {
b = (uint(a) >> ((31-i)*8)) & 0xff;
b = reverseByte(b);
r += b << (i*8);
}
return bytes32(r);
}
}
================================================
FILE: depends/CMakeLists.txt
================================================
add_subdirectory(baby_jubjub_ecc)
================================================
FILE: docker-compose.yml
================================================
version: "3"
services:
testrpc:
image: trufflesuite/ganache-cli:v6.1.8
ports:
- 8545
networks:
- blockchain
test:
build: .
working_dir: /root/roll_up/tests
command: python3 test.py testrpc
depends_on:
- testrpc
networks:
- blockchain
volumes:
- ./tests:/root/roll_up/tests
- ./pythonWrapper:/root/roll_up/pythonWrapper
- ./keys:/root/roll_up/keys
- ./contracts/contract_deploy.py:/root/roll_up/contracts/contract_deploy.py
networks:
blockchain:
================================================
FILE: keys/.gitkeep
================================================
================================================
FILE: pythonWrapper/helper.py
================================================
'''
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
'''
import pdb
import json
from solc import compile_source, compile_files, link_code
from bitstring import BitArray
import random
from ctypes import cdll
import ctypes as c
import sys
sys.path.insert(0, '../pythonWrapper')
import utils
from utils import libsnark2python
tree_depth = 2
noTx = 4
lib = cdll.LoadLibrary('../build/src/libroll_up_wrapper.so')
prove = lib.prove
prove.argtypes = [((c.c_bool*256)*(tree_depth)*(noTx)), (c.c_bool*256 * noTx), (c.c_bool*256 * noTx), (c.c_bool*256* noTx),
(((c.c_bool*tree_depth) * noTx)), (c.c_bool*256 * noTx), (c.c_bool*256 * noTx), (c.c_bool*256 * noTx),
(c.c_bool*256 * noTx) , (c.c_bool*256* noTx),c.c_int, c.c_int]
prove.restype = c.c_char_p
genKeys = lib.genKeys
genKeys.argtypes = [c.c_int, c.c_char_p, c.c_char_p]
#verify = lib.verify
#verify.argtypes = [c.c_char_p, c.c_char_p , c.c_char_p , c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p , c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p, c.c_char_p ]
#verify.restype = c.c_bool
def binary2ctypes(out):
return((c.c_bool*256)(*out))
def hexToBinary(hexString):
out = [ int(x) for x in bin(int(hexString, 16))[2:].zfill(256)]
return(out)
def genWitness(leaves, public_key_x, public_key_y, address, tree_depth, _rhs_leaf, _new_leaf,r_x, r_y, s):
path = []
fee = 0
address_bits = []
pub_key_x = []
pub_key_y = []
roots = []
paths = []
old_leaf = []
new_leaf = []
r_x_bin_array = []
r_y_bin_array = []
s_bin_array = []
for i in range(noTx):
root , merkle_tree = utils.genMerkelTree(tree_depth, leaves[i])
path , address_bit = utils.getMerkelProof(leaves[i], address[i], tree_depth)
path = [binary2ctypes(hexToBinary(x)) for x in path]
address_bit = address_bit[::-1]
path = path[::-1]
paths.append(((c.c_bool*256)*(tree_depth))(*path))
pub_key_x.append(binary2ctypes(hexToBinary(public_key_x[i])))
pub_key_y.append(binary2ctypes(hexToBinary(public_key_y[i])))
roots.append(binary2ctypes(hexToBinary(root)))
address_bits.append((c.c_bool*tree_depth)(*address_bit))
old_leaf.append(binary2ctypes(hexToBinary(_rhs_leaf[i])))
new_leaf.append(binary2ctypes(hexToBinary(_new_leaf[i])))
r_x_bin_array.append(binary2ctypes(hexToBinary(r_x[i])))
r_y_bin_array.append(binary2ctypes(hexToBinary(r_y[i])))
s_bin_array.append(binary2ctypes(hexToBinary(hex(s[i]))))
pub_key_x_array = ((c.c_bool*256)*(noTx))(*pub_key_x)
pub_key_y_array = ((c.c_bool*256)*(noTx))(*pub_key_y)
merkle_roots = ((c.c_bool*256)*(noTx))(*roots)
old_leaf = ((c.c_bool*256)*(noTx))(*old_leaf)
new_leaf = ((c.c_bool*256)*(noTx))(*new_leaf)
r_x_bin = ((c.c_bool*256)*(noTx))(*r_x_bin_array)
r_y_bin = ((c.c_bool*256)*(noTx))(*r_y_bin_array)
s_bin = ((c.c_bool*256)*(noTx))(*s_bin_array)
paths = ((c.c_bool*256)*(tree_depth) * noTx)(*paths)
address_bits = ((c.c_bool)*(tree_depth) * noTx)(*address_bits)
proof = prove(paths, pub_key_x_array, pub_key_y_array, merkle_roots, address_bits, old_leaf, new_leaf, r_x_bin, r_y_bin, s_bin, tree_depth, noTx)
proof = json.loads(proof.decode("utf-8"))
root , merkle_tree = utils.genMerkelTree(tree_depth, leaves[0])
return(proof, root)
def genSalt(i):
salt = [random.choice("0123456789abcdef") for x in range(0,i)]
out = "".join(salt)
return(out)
def genNullifier(recvAddress):
salt = genSalt(24)
return(recvAddress + salt)
================================================
FILE: pythonWrapper/utils.py
================================================
'''
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
'''
import pdb
import hashlib
import sys
sys.path.insert(0, "../depends/baby_jubjub_ecc/tests")
import ed25519 as ed
def hex2int(elements):
ints = []
for el in elements:
ints.append(int(el, 16))
return(ints)
def normalize_proof(proof):
proof["a"] = hex2int(proof["a"])
proof["a_p"] = hex2int(proof["a_p"])
proof["b"] = [hex2int(proof["b"][0]), hex2int(proof["b"][1])]
proof["b_p"] = hex2int(proof["b_p"])
proof["c"] = hex2int(proof["c"])
proof["c_p"] = hex2int(proof["c_p"])
proof["h"] = hex2int(proof["h"])
proof["k"] = hex2int(proof["k"])
proof["input"] = hex2int(proof["input"])
return proof
def getSignature(m,sk,pk):
R,S = ed.signature(m,sk,pk)
return(R,S)
def createLeaf(public_key , message):
pk = ed.encodepoint(public_key)
leaf = hashPadded(pk, message)
return(leaf[2:])
def libsnark2python (inputs):
#flip the inputs
bin_inputs = []
for x in inputs:
binary = bin(x)[2:][::-1]
if len(binary) > 100:
binary = binary.ljust(253, "0")
bin_inputs.append(binary)
raw = "".join(bin_inputs)
raw += "0" * (256 * 5 - len(raw))
output = []
i = 0
while i < len(raw):
hexnum = hex(int(raw[i:i+256], 2))
#pad leading zeros
padding = 66 - len(hexnum)
hexnum = hexnum[:2] + "0"*padding + hexnum[2:]
output.append(hexnum)
i += 256
return(output)
def hashPadded(left, right):
x1 = int(left , 16).to_bytes(32, "big")
x2 = int(right , 16).to_bytes(32, "big")
data = x1 + x2
answer = hashlib.sha256(data).hexdigest()
return("0x" + answer)
def sha256(data):
data = str(data).encode()
return("0x" + hashlib.sha256(data).hexdigest())
def getUniqueLeaf(depth):
inputHash = "0x0000000000000000000000000000000000000000000000000000000000000000"
for i in range(0,depth):
inputHash = hashPadded(inputHash, inputHash)
return(inputHash)
def genMerkelTree(tree_depth, leaves):
tree_layers = [leaves ,[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
for i in range(0, tree_depth):
if len(tree_layers[i]) % 2 != 0:
tree_layers[i].append(getUniqueLeaf(i))
for j in range(0, len(tree_layers[i]), 2):
tree_layers[i+1].append(hashPadded(tree_layers[i][j], tree_layers[i][j+1]))
return(tree_layers[tree_depth][0] , tree_layers)
def getMerkelRoot(tree_depth, leaves):
genMerkelTree(tree_depth, leaves)
def getMerkelProof(leaves, index, tree_depth):
address_bits = []
merkelProof = []
mr , tree = genMerkelTree(tree_depth, leaves)
for i in range(0 , tree_depth):
address_bits.append(index%2)
if (index%2 == 0):
merkelProof.append(tree[i][index + 1])
else:
merkelProof.append(tree[i][index - 1])
index = int(index/2);
return(merkelProof, address_bits);
def testHashPadded():
left = "0x0000000000000000000000000000000000000000000000000000000000000000"
right = "0x0000000000000000000000000000000000000000000000000000000000000000"
res = hashPadded(left , right)
assert (res == "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b")
def testGenMerkelTree():
mr1, tree = genMerkelTree(1, ["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"])
mr2, tree = genMerkelTree(2, ["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"])
mr3, tree = genMerkelTree(29, ["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"])
assert(mr1 == "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b")
assert(mr2 == "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71")
def testlibsnarkTopython():
inputs = [12981351829201453377820191526040524295325907810881751591725375521336092323040,
2225095499654173609649711272123535458680077283826030252600915820706026312895,
10509931637877506470161905650895697133838017786875388895008260393592381807236,
11784807906137262651861317232543524609532737193375988426511007536308407308209, 17]
inputs = [9782619478414927069440250629401329418138703122237912437975467993246167708418,
2077680306600520305813581592038078188768881965413185699798221798985779874888,
4414150718664423886727710960459764220828063162079089958392546463165678021703,
7513790795222206681892855620762680219484336729153939269867138100414707910106,
902]
output = libsnark2python(inputs)
print(output)
assert(output[0] == "0x40cde80490e78bc7d1035cbc78d3e6be3e41b2fdfad473782e02e226cc2305a8")
assert(output[1] == "0x918e88a16d0624cd5ca4695bd84e23e4a6c8a202ce85560d3c66d4ed39bf4938")
assert(output[2] == "0x8dd3ea28fe8d04f3e15b787fec7e805e152fe7d3302d0122c8522bee1290e4b7")
assert(output[3] == "0x47a6bbcf8fa3667431e895f08cbd8ec2869a31698d9cf91e5bfd94cbca72161c")
def testgetMissingLeaf():
assert (getMissingLeaf(0) == "0x0000000000000000000000000000000000000000000000000000000000000000")
assert (getMissingLeaf(1) == "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b")
assert (getMissingLeaf(2) == "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71")
assert (getMissingLeaf(3) == "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c")
assert (getMissingLeaf(4) == "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c")
def testgetMerkelProof():
proof1, address1 = getMerkelProof(["0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000"] , 0 , 2)
assert ( proof1[0] == "0x0000000000000000000000000000000000000000000000000000000000000000")
assert ( proof1[1] == "f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b")
assert ( address1[0] == 0)
assert ( address1[1] == 0)
================================================
FILE: requirements.txt
================================================
web3==4.6.0
py-solc==3.1.0
bitstring==3.1.5
================================================
FILE: src/CMakeLists.txt
================================================
include_directories(.)
add_library(
roll_up_wrapper
SHARED
roll_up_wrapper.cpp
)
target_link_libraries(
roll_up_wrapper
snark
baby_jubjub_ecc
)
target_include_directories(roll_up_wrapper PUBLIC ../depends/baby_jubjub_ecc/src)
set_property(TARGET roll_up_wrapper PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(
roll_up_wrapper
PUBLIC
${DEPENDS_DIR}/baby_jubjub_ecc
${DEPENDS_DIR}/baby_jubjub_ecc/baby_jubjub_ecc
${DEPENDS_DIR}/baby_jubjub_ecc/depends/libsnark
${DEPENDS_DIR}/baby_jubjub_ecc/depends/libsnark/depends/libff
${DEPENDS_DIR}/baby_jubjub_ecc/depends/libsnark/depends/libfqfft
)
================================================
FILE: src/ZoKrates/wraplibsnark.cpp
================================================
/**
* @file wraplibsnark.cpp
* @author Jacob Eberhardt
* @date 2017
*/
#include "wraplibsnark.hpp"
#include
#include
#include
#include
// contains definition of alt_bn128 ec public parameters
//#include "libsnark/libsnark/algebra/curves/alt_bn128/alt_bn128_pp.hpp"
#include "libff/algebra/curves/alt_bn128/alt_bn128_pp.hpp"
// contains required interfaces and types (keypair, proof, generator, prover, verifier)
#include
typedef long integer_coeff_t;
using namespace std;
using namespace libsnark;
// conversion byte[32] <-> libsnark bigint.
libff::bigint libsnarkBigintFromBytes(const uint8_t* _x)
{
libff::bigint x;
for (unsigned i = 0; i < 4; i++) {
for (unsigned j = 0; j < 8; j++) {
x.data[3 - i] |= uint64_t(_x[i * 8 + j]) << (8 * (7-j));
}
}
return x;
}
std::string HexStringFromLibsnarkBigint(libff::bigint _x){
uint8_t x[32];
for (unsigned i = 0; i < 4; i++)
for (unsigned j = 0; j < 8; j++)
x[i * 8 + j] = uint8_t(uint64_t(_x.data[3 - i]) >> (8 * (7 - j)));
std::stringstream ss;
ss << std::setfill('0');
for (unsigned i = 0; i<32; i++) {
ss << std::hex << std::setw(2) << (int)x[i];
}
std:string str = ss.str();
return str.erase(0, min(str.find_first_not_of('0'), str.size()-1));
}
std::string outputPointG1AffineAsHex(libff::alt_bn128_G1 _p)
{
libff::alt_bn128_G1 aff = _p;
aff.to_affine_coordinates();
std::stringstream ss;
ss << "0x" << aff.X.as_bigint() << "," << aff.Y.as_bigint() << "," << aff.Z.as_bigint();
return "\"" +
HexStringFromLibsnarkBigint(aff.X.as_bigint()) +
"\", \"0x"+
HexStringFromLibsnarkBigint(aff.Y.as_bigint()) +
"\"";
}
std::string outputPointG1AffineAsInt(libff::alt_bn128_G1 _p)
{
libff::alt_bn128_G1 aff = _p;
aff.to_affine_coordinates();
std::stringstream ss;
ss << "" << aff.X.as_bigint() << "," << aff.Y.as_bigint() << "," << aff.Z.as_bigint();
return ss.str();
}
std::string outputPointG2AffineAsHex(libff::alt_bn128_G2 _p)
{
libff::alt_bn128_G2 aff = _p;
if (aff.Z.c0.as_bigint() != "0" && aff.Z.c1.as_bigint() != "0" ) {
aff.to_affine_coordinates();
}
return "[\"0x" +
HexStringFromLibsnarkBigint(aff.X.c1.as_bigint()) + "\", \"0x" +
HexStringFromLibsnarkBigint(aff.X.c0.as_bigint()) + "\"],\n [\"0x" +
HexStringFromLibsnarkBigint(aff.Y.c1.as_bigint()) + "\", \"0x" +
HexStringFromLibsnarkBigint(aff.Y.c0.as_bigint()) + "\"]";
}
std::string outputPointG2AffineAsInt(libff::alt_bn128_G2 _p)
{
libff::alt_bn128_G2 aff = _p;
if (aff.Z.c0.as_bigint() != "0" && aff.Z.c1.as_bigint() != "0" ) {
aff.to_affine_coordinates();
}
std::stringstream ss;
ss << "" << aff.X.c1.as_bigint() << "," << aff.X.c0.as_bigint() << "," << aff.Y.c1.as_bigint() << "," << aff.Y.c0.as_bigint() << "," << aff.Z.c1.as_bigint() << "," < createConstraintSystem(const uint8_t* A, const uint8_t* B, const uint8_t* C, int constraints, int variables, int inputs)
{
r1cs_ppzksnark_constraint_system cs;
cs.primary_input_size = inputs;
cs.auxiliary_input_size = variables - inputs - 1; // ~one not included
cout << "num variables: " << variables < lin_comb_A, lin_comb_B, lin_comb_C;
for (int idx=0; idx value = libsnarkBigintFromBytes(A+row*variables*32 + idx*32);
libff::alt_bn128_pp::init_public_params();
cout << "C entry " << idx << " in row " << row << ": " << value << endl;
if (!value.is_zero()) {
//cout << "A(" << idx << ", " << value << ")" << endl;
//lin_comb_A.add_term(idx,value);
//linear_term(0);
}
}
for (int idx=0; idx value = libsnarkBigintFromBytes(B+row*variables*32 + idx*32);
cout << "B entry " << idx << " in row " << row << ": " << value << endl;
if (!value.is_zero()) {
cout << "B(" << idx << ", " << value << ")" << endl;
//lin_comb_B.add_term(idx, value);
}
}
for (int idx=0; idx value = libsnarkBigintFromBytes(C+row*variables*32 + idx*32);
// cout << "C entry " << idx << " in row " << row << ": " << value << endl;
if (!value.is_zero()) {
// cout << "C(" << idx << ", " << value << ")" << endl;
//lin_comb_C.add_term(idx, value);
}
}
//cs.add_constraint(r1cs_constraint(lin_comb_A, lin_comb_B, lin_comb_C));
}
return cs;
}
// keypair generateKeypair(constraints)
r1cs_ppzksnark_keypair generateKeypair(const r1cs_ppzksnark_constraint_system &cs){
// from r1cs_ppzksnark.hpp
return r1cs_ppzksnark_generator(cs);
}
template
void writeToFile(std::string path, T& obj) {
std::stringstream ss;
ss << obj;
std::ofstream fh;
fh.open(path, std::ios::binary);
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
template
T loadFromFile(std::string path) {
std::stringstream ss;
std::ifstream fh(path, std::ios::binary);
assert(fh.is_open());
ss << fh.rdbuf();
fh.close();
ss.rdbuf()->pubseekpos(0, std::ios_base::in);
T obj;
ss >> obj;
return obj;
}
void serializeProvingKeyToFile(r1cs_ppzksnark_proving_key pk, const char* pk_path){
writeToFile(pk_path, pk);
}
r1cs_ppzksnark_proving_key deserializeProvingKeyFromFile(const char* pk_path){
return loadFromFile>(pk_path);
}
void serializeVerificationKeyToFile(r1cs_ppzksnark_verification_key vk, const char* vk_path){
std::stringstream ss;
unsigned icLength = vk.encoded_IC_query.rest.indices.size() + 1;
ss << "\t\tvk.A = " << outputPointG2AffineAsHex(vk.alphaA_g2) << endl;
ss << "\t\tvk.B = " << outputPointG1AffineAsHex(vk.alphaB_g1) << endl;
ss << "\t\tvk.C = " << outputPointG2AffineAsHex(vk.alphaC_g2) << endl;
ss << "\t\tvk.gamma = " << outputPointG2AffineAsHex(vk.gamma_g2) << endl;
ss << "\t\tvk.gammaBeta1 = " << outputPointG1AffineAsHex(vk.gamma_beta_g1) << endl;
ss << "\t\tvk.gammaBeta2 = " << outputPointG2AffineAsHex(vk.gamma_beta_g2) << endl;
ss << "\t\tvk.Z = " << outputPointG2AffineAsHex(vk.rC_Z_g2) << endl;
ss << "\t\tvk.IC.len() = " << icLength << endl;
ss << "\t\tvk.IC[0] = " << outputPointG1AffineAsHex(vk.encoded_IC_query.first) << endl;
for (size_t i = 1; i < icLength; ++i)
{
auto vkICi = outputPointG1AffineAsHex(vk.encoded_IC_query.rest.values[i - 1]);
ss << "\t\tvk.IC[" << i << "] = " << vkICi << endl;
}
std::ofstream fh;
fh.open(vk_path, std::ios::binary);
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
// compliant with solidty verification example
void exportVerificationKey(r1cs_ppzksnark_keypair keypair){
unsigned icLength = keypair.vk.encoded_IC_query.rest.indices.size() + 1;
cout << "\tVerification key in Solidity compliant format:{" << endl;
cout << "\t\tvk.A = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.alphaA_g2) << ");" << endl;
cout << "\t\tvk.B = Pairing.G1Point(" << outputPointG1AffineAsHex(keypair.vk.alphaB_g1) << ");" << endl;
cout << "\t\tvk.C = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.alphaC_g2) << ");" << endl;
cout << "\t\tvk.gamma = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.gamma_g2) << ");" << endl;
cout << "\t\tvk.gammaBeta1 = Pairing.G1Point(" << outputPointG1AffineAsHex(keypair.vk.gamma_beta_g1) << ");" << endl;
cout << "\t\tvk.gammaBeta2 = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.gamma_beta_g2) << ");" << endl;
cout << "\t\tvk.Z = Pairing.G2Point(" << outputPointG2AffineAsHex(keypair.vk.rC_Z_g2) << ");" << endl;
cout << "\t\tvk.IC = new Pairing.G1Point[](" << icLength << ");" << endl;
cout << "\t\tvk.IC[0] = Pairing.G1Point(" << outputPointG1AffineAsHex(keypair.vk.encoded_IC_query.first) << ");" << endl;
for (size_t i = 1; i < icLength; ++i)
{
auto vkICi = outputPointG1AffineAsHex(keypair.vk.encoded_IC_query.rest.values[i - 1]);
cout << "\t\tvk.IC[" << i << "] = Pairing.G1Point(" << vkICi << ");" << endl;
}
cout << "\t\t}" << endl;
}
// compliant with solidty verification example
/*
void exportInput(r1cs_primary_input input){
cout << "\tInput in Solidity compliant format:{" << endl;
for (size_t i = 0; i < input.size(); ++i)
{
cout << "\t\tinput[" << i << "] = " << HexStringFromLibsnarkBigint(input[i].as_bigint()) << ";" << endl;
}
cout << "\t\t}" << endl;
} */
void printProof(r1cs_ppzksnark_proof proof){
cout << "Proof:"<< endl;
cout << "proof.A = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_A.g)<< ");" << endl;
cout << "proof.A_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_A.h)<< ");" << endl;
cout << "proof.B = Pairing.G2Point(" << outputPointG2AffineAsHex(proof.g_B.g)<< ");" << endl;
cout << "proof.B_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_B.h)<<");" << endl;
cout << "proof.C = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_C.g)<< ");" << endl;
cout << "proof.C_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_C.h)<<");" << endl;
cout << "proof.H = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_H)<<");"<< endl;
cout << "proof.K = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_K)<<");"<< endl;
}
/*bool _setup(const uint8_t* A, const uint8_t* B, const uint8_t* C, int constraints, int variables, int inputs, const char* pk_path, const char* vk_path)
{
//libsnark::inhibit_profiling_info = true;
//libsnark::inhibit_profiling_counters = true;
//initialize curve parameters
libff::alt_bn128_pp::init_public_params();
r1cs_constraint_system cs;
cs = createConstraintSystem(A, B ,C , constraints, variables, inputs);
assert(cs.num_variables() >= inputs);
assert(cs.num_inputs() == inputs);
assert(cs.num_constraints() == constraints);
// create keypair
r1cs_ppzksnark_keypair keypair = r1cs_ppzksnark_generator(cs);
// Export vk and pk to files
serializeProvingKeyToFile(keypair.pk, pk_path);
serializeVerificationKeyToFile(keypair.vk, vk_path);
// Print VerificationKey in Solidity compatible format
exportVerificationKey(keypair);
return true;
}*/
/*
bool _generate_proof(const char* pk_path, const uint8_t* public_inputs, int public_inputs_length, const uint8_t* private_inputs, int private_inputs_length)
{
// libsnark::inhibit_profiling_info = true;
// libsnark::inhibit_profiling_counters = true;
//initialize curve parameters
libff::alt_bn128_pp::init_public_params();
r1cs_ppzksnark_proving_key pk = deserializeProvingKeyFromFile(pk_path);
// assign variables based on witness values, excludes ~one
r1cs_variable_assignment full_variable_assignment;
for (int i = 1; i < public_inputs_length; i++) {
full_variable_assignment.push_back(libff::alt_bn128_pp(libsnarkBigintFromBytes(public_inputs + i*32)));
}
for (int i = 0; i < private_inputs_length; i++) {
full_variable_assignment.push_back((libsnarkBigintFromBytes(private_inputs + i*32)));
}
// split up variables into primary and auxiliary inputs. Does *NOT* include the constant 1
// Public variables belong to primary input, private variables are auxiliary input.
r1cs_primary_input primary_input(full_variable_assignment.begin(), full_variable_assignment.begin() + public_inputs_length-1);
r1cs_primary_input auxiliary_input(full_variable_assignment.begin() + public_inputs_length-1, full_variable_assignment.end());
// for debugging
// cout << "full variable assignment:"<< endl << full_variable_assignment;
// cout << "primary input:"<< endl << primary_input;
// cout << "auxiliary input:"<< endl << auxiliary_input;
// Proof Generation
r1cs_ppzksnark_proof proof = r1cs_ppzksnark_prover(pk, primary_input, auxiliary_input);
// print proof
printProof(proof);
// TODO? print inputs
return true;
} */
================================================
FILE: src/ZoKrates/wraplibsnark.hpp
================================================
/**
* @file wraplibsnark.hpp
* @author Jacob Eberhardt
* @date 2017
*/
#ifdef __cplusplus
extern "C" {
#endif
#include
#include
bool _setup(const uint8_t* A,
const uint8_t* B,
const uint8_t* C,
int constraints,
int variables,
int inputs,
const char* pk_path,
const char* vk_path
);
bool _generate_proof(const char* pk_path,
const uint8_t* public_inputs,
int public_inputs_length,
const uint8_t* private_inputs,
int private_inputs_length
);
#ifdef __cplusplus
} // extern "C"
#endif
================================================
FILE: src/export.cpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
*/
#include
#include
#include
#include
#include
// ZoKrates
#include
//key gen
#include "libff/algebra/curves/alt_bn128/alt_bn128_pp.hpp" //hold key
#include "libff/algebra/curves/bn128/bn128_pp.hpp" //hold key
#include
#include
#include
#include
#include
#include
#include
#include
#include
// tmp
//#include
#include
#include
using namespace libsnark;
using namespace libff;
template
void constraint_to_json(linear_combination constraints, std::stringstream &ss)
{
ss << "{";
uint count = 0;
for (const linear_term& lt : constraints.terms)
{
if (count != 0) {
ss << ",";
}
if (lt.coeff != 0 && lt.coeff != 1) {
ss << '"' << lt.index << '"' << ":" << "-1";
}
else {
ss << '"' << lt.index << '"' << ":" << lt.coeff;
}
count++;
}
ss << "}";
}
template
void array_to_json(protoboard pb, uint input_variables, std::string path)
{
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
r1cs_variable_assignment values = pb.full_variable_assignment();
ss << "\n{\"TestVariables\":[";
for (size_t i = 0; i < values.size(); ++i)
{
ss << values[i].as_bigint();
if (i < values.size() - 1) { ss << ",";}
}
ss << "]}\n";
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
template
void r1cs_to_json(protoboard pb, uint input_variables, std::string path)
{
// output inputs, right now need to compile with debug flag so that the `variable_annotations`
// exists. Having trouble setting that up so will leave for now.
r1cs_constraint_system constraints = pb.get_constraint_system();
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
ss << "\n{\"variables\":[";
for (size_t i = 0; i < input_variables + 1; ++i)
{
ss << '"' << constraints.variable_annotations[i].c_str() << '"';
if (i < input_variables ) {
ss << ", ";
}
}
ss << "],\n";
ss << "\"constraints\":[";
for (size_t c = 0; c < constraints.num_constraints(); ++c)
{
ss << "[";// << "\"A\"=";
constraint_to_json(constraints.constraints[c].a, ss);
ss << ",";// << "\"B\"=";
constraint_to_json(constraints.constraints[c].b, ss);
ss << ",";// << "\"A\"=";;
constraint_to_json(constraints.constraints[c].c, ss);
if (c == constraints.num_constraints()-1 ) {
ss << "]\n";
} else {
ss << "],\n";
}
}
ss << "]}";
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
template
string proof_to_json(r1cs_ppzksnark_proof proof, r1cs_primary_input input, bool isInt) {
std::cout << "proof.A = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_A.g)<< ");" << endl;
std::cout << "proof.A_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_A.h)<< ");" << endl;
std::cout << "proof.B = Pairing.G2Point(" << outputPointG2AffineAsHex(proof.g_B.g)<< ");" << endl;
std::cout << "proof.B_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_B.h)<<");" << endl;
std::cout << "proof.C = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_C.g)<< ");" << endl;
std::cout << "proof.C_p = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_C.h)<<");" << endl;
std::cout << "proof.H = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_H)<<");"<< endl;
std::cout << "proof.K = Pairing.G1Point(" << outputPointG1AffineAsHex(proof.g_K)<<");"<< endl;
std::string path = "../zksnark_element/proof.json";
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
if(isInt) {
ss << "{\n";
ss << " \"a\" :[" << outputPointG1AffineAsInt(proof.g_A.g) << "],\n";
ss << " \"a_p\" :[" << outputPointG1AffineAsInt(proof.g_A.h)<< "],\n";
ss << " \"b\" :[" << outputPointG2AffineAsInt(proof.g_B.g)<< "],\n";
ss << " \"b_p\" :[" << outputPointG1AffineAsInt(proof.g_B.h)<< "],\n";
ss << " \"c\" :[" << outputPointG1AffineAsInt(proof.g_C.g)<< "],\n";
ss << " \"c_p\" :[" << outputPointG1AffineAsInt(proof.g_C.h)<< "],\n";
ss << " \"h\" :[" << outputPointG1AffineAsInt(proof.g_H)<< "],\n";
ss << " \"k\" :[" << outputPointG1AffineAsInt(proof.g_K)<< "],\n";
ss << " \"input\" :" << "["; //1 should always be the first variavle passed
for (size_t i = 0; i < input.size(); ++i)
{
ss << input[i].as_bigint() ;
if ( i < input.size() - 1 ) {
ss<< ", ";
}
}
ss << "]\n";
ss << "}";
}
else {
ss << "{\n";
ss << " \"a\" :[" << outputPointG1AffineAsHex(proof.g_A.g) << "],\n";
ss << " \"a_p\" :[" << outputPointG1AffineAsHex(proof.g_A.h)<< "],\n";
ss << " \"b\" :[" << outputPointG2AffineAsHex(proof.g_B.g)<< "],\n";
ss << " \"b_p\" :[" << outputPointG1AffineAsHex(proof.g_B.h)<< "],\n";
ss << " \"c\" :[" << outputPointG1AffineAsHex(proof.g_C.g)<< "],\n";
ss << " \"c_p\" :[" << outputPointG1AffineAsHex(proof.g_C.h)<< "],\n";
ss << " \"h\" :[" << outputPointG1AffineAsHex(proof.g_H)<< "],\n";
ss << " \"k\" :[" << outputPointG1AffineAsHex(proof.g_K)<< "],\n";
ss << " \"input\" :" << "["; //1 should always be the first variavle passed
for (size_t i = 0; i < input.size(); ++i)
{
ss << "\"0x" << HexStringFromLibsnarkBigint(input[i].as_bigint()) << "\"";
if ( i < input.size() - 1 ) {
ss<< ", ";
}
}
ss << "]\n";
ss << "}";
}
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
return(ss.str());
}
void vk2json(r1cs_ppzksnark_keypair keypair, std::string path ) {
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
unsigned icLength = keypair.vk.encoded_IC_query.rest.indices.size() + 1;
ss << "{\n";
ss << " \"a\" :[" << outputPointG2AffineAsHex(keypair.vk.alphaA_g2) << "],\n";
ss << " \"b\" :[" << outputPointG1AffineAsHex(keypair.vk.alphaB_g1) << "],\n";
ss << " \"c\" :[" << outputPointG2AffineAsHex(keypair.vk.alphaC_g2) << "],\n";
ss << " \"g\" :[" << outputPointG2AffineAsHex(keypair.vk.gamma_g2)<< "],\n";
ss << " \"gb1\" :[" << outputPointG1AffineAsHex(keypair.vk.gamma_beta_g1)<< "],\n";
ss << " \"gb2\" :[" << outputPointG2AffineAsHex(keypair.vk.gamma_beta_g2)<< "],\n";
ss << " \"z\" :[" << outputPointG2AffineAsHex(keypair.vk.rC_Z_g2)<< "],\n";
ss << "\"IC\" :[[" << outputPointG1AffineAsHex(keypair.vk.encoded_IC_query.first) << "]";
for (size_t i = 1; i < icLength; ++i)
{
auto vkICi = outputPointG1AffineAsHex(keypair.vk.encoded_IC_query.rest.values[i - 1]);
ss << ",[" << vkICi << "]";
}
ss << "]";
ss << "}";
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
}
template
//void dump_key(r1cs_constraint_system cs)
char* dump_key(protoboard pb, std::string path)
{
r1cs_constraint_system constraints = pb.get_constraint_system();
std::stringstream ss;
std::ofstream fh;
fh.open(path, std::ios::binary);
r1cs_ppzksnark_keypair keypair = generateKeypair(pb.get_constraint_system());
//save keys
vk2json(keypair, "vk.json");
writeToFile("../zksnark_element/pk.raw", keypair.pk);
writeToFile("../zksnark_element/vk.raw", keypair.vk);
pb.primary_input();
pb.auxiliary_input();
r1cs_primary_input primary_input = pb.primary_input();
r1cs_auxiliary_input auxiliary_input = pb.auxiliary_input();
ss << "primaryinputs" << primary_input;
ss << "aux input" << auxiliary_input;
r1cs_ppzksnark_proof proof = r1cs_ppzksnark_prover(keypair.pk, primary_input, auxiliary_input);
auto json = proof_to_json (proof, primary_input);
ss.rdbuf()->pubseekpos(0, std::ios_base::out);
fh << ss.rdbuf();
fh.flush();
fh.close();
auto result = new char[json.size()];
memcpy(result, json.c_str(), json.size() + 1);
return result;
}
================================================
FILE: src/roll_up.hpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
*/
#include
#include
#include
#include
typedef sha256_ethereum HashT;
namespace libsnark {
template
class roll_up: public gadget {
//greater than gadget
private:
/* no internal variables */
public:
std::shared_ptr> unpacker_old_root;
std::shared_ptr> unpacker_new_root;
std::shared_ptr> unpacker_leaf_addresses;
std::shared_ptr> unpacker_leaf_hashes;
pb_variable a;
pb_variable d;
pb_variable_array unpacked_addresses;
pb_variable_array unpacked_leaves;
std::string annotation_prefix = "roll up";
int noTx;
std::vector>> transactions;
roll_up(protoboard &pb,
std::vector> &pub_key_x_bin,
std::vector> &pub_key_y_bin,
int tree_depth, std::vector> address_bits_va,
std::vector>> root_digest_old,
std::vector>> root_digest_new,
std::vector> path_old, std::vector> path_new,
std::vector> rhs_leaf,
std::vector> S, std::vector>> new_leaf,
std::vector> r_x_bin, std::vector> r_y_bin,
pb_variable_array old_root , pb_variable_array new_root, pb_variable_array leaves_data_availability,
pb_variable_array leaves_addresses_data_availability,
int noTx,
const std::string &annotation_prefix);
void generate_r1cs_constraints();
void generate_r1cs_witness();
};
} // libsnark
#include
================================================
FILE: src/roll_up.tcc
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
*/
namespace libsnark {
template
roll_up::roll_up(protoboard &pb,
std::vector> &pub_key_x_bin,
std::vector> &pub_key_y_bin,
int tree_depth, std::vector> address_bits_va, std::vector>> root_digest_old,
std::vector>> root_digest_new, std::vector> path_old,
std::vector> path_new, std::vector> rhs_leaf,
std::vector> S, std::vector>> new_leaf,
std::vector> r_x_bin, std::vector> r_y_bin,
pb_variable_array old_root , pb_variable_array new_root,
pb_variable_array leaves_data_availability, pb_variable_array leaves_addresses_data_availability,
int noTx,
const std::string &annotation_prefix): gadget(pb, annotation_prefix) , noTx(noTx) {
for(uint i = 0; i < noTx-1; i++) {
unpacked_addresses.insert(unpacked_addresses.end(), address_bits_va[i].begin(), address_bits_va[i].end());
unpacked_leaves.insert(unpacked_leaves.end(), new_leaf[i]->bits.begin(), new_leaf[i]->bits.end());
}
unpacker_old_root.reset(new multipacking_gadget(
pb,
root_digest_old[0]->bits,
old_root,
FieldT::capacity(),
"old root"
));
unpacker_new_root.reset(new multipacking_gadget(
pb,
root_digest_new[noTx-2]->bits,
new_root,
FieldT::capacity(),
"new_root"
));
unpacker_leaf_addresses.reset(new multipacking_gadget(
pb,
unpacked_addresses,
leaves_addresses_data_availability,
FieldT::capacity(),
"new_root"
));
unpacker_leaf_hashes.reset(new multipacking_gadget(
pb,
unpacked_leaves,
leaves_data_availability,
FieldT::capacity(),
"new_root"
));
//5 for the old root , new root
// noTx*2 for address, leaf
// noTx*2*253/256 for the left over bits
// that do not fit in a 253 bit field element.
pb.set_input_sizes(6);
transactions.resize(noTx);
transactions[0].reset(new tx(pb,
pub_key_x_bin[0], pub_key_y_bin[0], tree_depth,address_bits_va[0],root_digest_old[0],
root_digest_new[0],path_old[0],path_new[0], rhs_leaf[0], S[0] , new_leaf[0] , r_x_bin[0], r_y_bin[0],
"tx i"
));
for (int i =1; i(pb,
pub_key_x_bin[i], pub_key_y_bin[i], tree_depth,address_bits_va[i],root_digest_new[i-1],
root_digest_new[i],path_old[i],path_new[i], rhs_leaf[i], S[i] , new_leaf[i] , r_x_bin[i], r_y_bin[i],
"tx i"
));
}
}
template
void roll_up::generate_r1cs_constraints() {
for (int i =0; igenerate_r1cs_constraints();
}
unpacker_old_root->generate_r1cs_constraints(true);
unpacker_new_root->generate_r1cs_constraints(true);
unpacker_leaf_addresses->generate_r1cs_constraints(true);
unpacker_leaf_hashes->generate_r1cs_constraints(true);
}
template
void roll_up::generate_r1cs_witness() {
for (int i =0; igenerate_r1cs_witness();
}
unpacker_old_root->generate_r1cs_witness_from_bits();
unpacker_new_root->generate_r1cs_witness_from_bits();
unpacker_leaf_addresses->generate_r1cs_witness_from_bits();
unpacker_leaf_hashes->generate_r1cs_witness_from_bits();
}
}
================================================
FILE: src/roll_up_wrapper.cpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
*/
//hash
#include "roll_up_wrapper.hpp"
#include
#include
using namespace libsnark;
using namespace libff;
typedef sha256_ethereum HashT;
#include
void genKeys(int noTx, char* pkOutput, char* vkOuput) {
libff::alt_bn128_pp::init_public_params();
protoboard pb;
pb_variable ZERO;
ZERO.allocate(pb, "ZERO");
pb.val(ZERO) = 0;
//make sure we constarin to zero.
std::shared_ptr> transactions;
std::vector> path(noTx);
path.resize(noTx);
std::vector>> root_digest_old(noTx);
std::vector>> root_digest_new(noTx);
std::vector>> new_leaf(noTx);
std::vector> pub_key_x_bin(noTx);
std::vector> pub_key_y_bin(noTx);
std::vector> address_bits_va(noTx);
std::vector> rhs_leaf(noTx);
//signatures setup
std::vector> S(noTx);
std::vector> pk_x_bin(noTx);
std::vector> pk_y_bin(noTx);
std::vector> r_x_bin(noTx);
std::vector> r_y_bin(noTx);
for(int k = 0 ; k < noTx; k++) {
root_digest_old[k].reset(new digest_variable(pb, 256, "root_digest_old"));
root_digest_new[k].reset(new digest_variable(pb, 256, "root_digest_new"));
new_leaf[k].reset(new digest_variable(pb, 256, "new leaf"));
pub_key_x_bin[k].allocate(pb,256,"pub_key_x_bin");
pub_key_y_bin[k].allocate(pb,256,"pub_key_y_bin");
address_bits_va[k].allocate(pb, 256, "address_bits");
rhs_leaf[k].allocate(pb,256,"pub_key_y_bin");
S[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
pk_x_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
pk_y_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
r_x_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
r_y_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
}
/* transactions.reset( new roll_up (pb, pub_key_x_bin, pub_key_y_bin, tree_depth,
address_bits_va, root_digest_old, root_digest_new,
path, path, rhs_leaf, S, new_leaf, r_x_bin, r_y_bin, noTx ,"Confirm tx"));
transactions->generate_r1cs_constraints();
r1cs_ppzksnark_keypair keypair = generateKeypair(pb.get_constraint_system());
//save keys
vk2json(keypair, "../keys/vk.json");
writeToFile("../keys/pk.raw", keypair.pk);
writeToFile("../keys/vk.raw", keypair.vk); */
}
char* prove(bool _path[][tree_depth][256], bool _pub_key_x[][256], bool _pub_key_y[][256] , bool _root[][256],
bool _address_bits[][tree_depth], bool _rhs_leaf[][256],
bool _new_leaf[][256], bool _r_x[][256], bool _r_y[][256] , bool _S[][256], int _tree_depth, int noTx) {
libff::alt_bn128_pp::init_public_params();
libff::bit_vector init(0,256);
std::vector pub_key_x(noTx);
std::vector pub_key_y(noTx);
std::vector root(noTx);
std::vector rhs_leaf_bits(noTx);
std::vector new_leaf_bits(noTx);
std::vector r_x_bits(noTx);
std::vector r_y_bits(noTx);
std::vector S_bits(noTx);
std::vector address_bits(noTx);
std::vector> path(noTx);
init.resize(256);
path.resize(noTx);
pub_key_x.resize(noTx);
pub_key_y.resize(noTx);
root.resize(noTx);
rhs_leaf_bits.resize(noTx);
new_leaf_bits.resize(noTx);
r_x_bits.resize(noTx);
r_y_bits.resize(noTx);
S_bits.resize(noTx);
for(int k = 0 ; k < noTx; k++) {
pub_key_x[k].resize(256);
pub_key_y[k].resize(256);
root[k].resize(256);
rhs_leaf_bits[k].resize(256);
new_leaf_bits[k].resize(256);
r_x_bits[k].resize(256);
r_y_bits[k].resize(256);
S_bits[k].resize(256);
path[k].resize(tree_depth);
for (int i =tree_depth - 1; i>=0 ; i--) {
path[k][i] = init;
for (int j =0; j= 0; level--)
{
const bool computed_is_right = _address_bits[k][level];
address |= (computed_is_right ? 1ul << (tree_depth-1-level) : 0);
address_bits[k].push_back(computed_is_right);
}
}
protoboard pb;
pb_variable_array old_root;
pb_variable_array new_root;
pb_variable_array leaves_data_availability;
pb_variable_array leaves_addresses_data_availability;
old_root.allocate(pb, 2, "old_root");
new_root.allocate(pb, 2, "new_root");
leaves_data_availability.allocate(pb, noTx*256, "packed");
leaves_addresses_data_availability.allocate(pb, noTx*256, "packed");
pb_variable ZERO;
ZERO.allocate(pb, "ZERO");
pb.val(ZERO) = 0;
//make sure we constarin to zero.
std::shared_ptr> transactions;
std::vector>> root_digest_old(noTx);
std::vector>> root_digest_new(noTx);
std::vector>> new_leaf(noTx);
std::vector> pub_key_x_bin(noTx);
std::vector> pub_key_y_bin(noTx);
std::vector> address_bits_va(noTx);
std::vector> rhs_leaf(noTx);
//signatures setup
std::vector> S(noTx);
std::vector> pk_x_bin(noTx);
std::vector> pk_y_bin(noTx);
std::vector> r_x_bin(noTx);
std::vector> r_y_bin(noTx);
for(int k = 0 ; k < noTx; k++) {
root_digest_old[k].reset(new digest_variable(pb, 256, "root_digest_old"));
root_digest_new[k].reset(new digest_variable(pb, 256, "root_digest_new"));
new_leaf[k].reset(new digest_variable(pb, 256, "new leaf"));
pub_key_x_bin[k].allocate(pb,256,"pub_key_x_bin");
pub_key_y_bin[k].allocate(pb,256,"pub_key_y_bin");
address_bits_va[k].allocate(pb, 256, "address_bits");
rhs_leaf[k].allocate(pb,256,"pub_key_y_bin");
S[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
pk_x_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
pk_y_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
r_x_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
r_y_bin[k].allocate(pb, 256, FMT("annotation_prefix", " scaler to multiply by"));
S[k].fill_with_bits(pb, S_bits[k]);
r_x_bin[k].fill_with_bits(pb, r_x_bits[k]);
r_y_bin[k].fill_with_bits(pb, r_y_bits[k]);
root_digest_old[k]->bits.fill_with_bits(pb, root[k]);
pub_key_x_bin[k].fill_with_bits(pb, pub_key_x[k]);
pub_key_y_bin[k].fill_with_bits(pb, pub_key_y[k]);
address_bits_va[k] = from_bits(address_bits[k], ZERO);
rhs_leaf[k].fill_with_bits(pb, rhs_leaf_bits[k]);
new_leaf[k]->bits.fill_with_bits(pb, new_leaf_bits[k]);
}
transactions.reset( new roll_up (pb, pub_key_x_bin, pub_key_y_bin, tree_depth,
address_bits_va, root_digest_old, root_digest_new,
path, path, rhs_leaf, S, new_leaf, r_x_bin, r_y_bin, old_root, new_root, leaves_data_availability, leaves_addresses_data_availability , noTx ,"Confirm tx"));
transactions->generate_r1cs_constraints();
transactions->generate_r1cs_witness();
std::cout << "is satisfied: " << pb.is_satisfied() << std::endl;
pb.primary_input();
pb.auxiliary_input();
r1cs_ppzksnark_keypair keypair = generateKeypair(pb.get_constraint_system());
//save keys
vk2json(keypair, "../keys/vk.json");
r1cs_primary_input primary_input = pb.primary_input();
std::cout << "primary_input " << primary_input;
r1cs_auxiliary_input auxiliary_input = pb.auxiliary_input();
r1cs_ppzksnark_proof proof = r1cs_ppzksnark_prover(keypair.pk, primary_input, auxiliary_input);
auto json = proof_to_json (proof, primary_input, false);
auto result = new char[json.size()];
memcpy(result, json.c_str(), json.size() + 1);
return result;
}
================================================
FILE: src/roll_up_wrapper.hpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
*/
#ifdef __cplusplus
extern "C" {
#endif
#include
#include
const int tree_depth = 2;
char* _sha256Constraints();
char* _sha256Witness();
char* prove(bool path[][tree_depth][256], bool _pub_key_x[][256], bool _pub_key_y[][256] , bool _root[][256],
bool _address_bits[][tree_depth], bool _rhs_leaf[][256],
bool _new_leaf[][256], bool _r_x[][256], bool _r_y[][256] , bool _S[][256], int tree_depth, int noTx);
void genKeys(int noTx, char* pkOutput, char* vkOuput );
bool verify( char* vk, char* _g_A_0, char* _g_A_1, char* _g_A_2 , char* _g_A_P_0, char* _g_A_P_1, char* _g_A_P_2,
char* _g_B_1, char* _g_B_0, char* _g_B_3, char* _g_B_2, char* _g_B_5 , char* _g_B_4, char* _g_B_P_0, char* _g_B_P_1, char* _g_B_P_2,
char* _g_C_0, char* _g_C_1, char* _g_C_2, char* _g_C_P_0, char* _g_C_P_1, char* _g_C_P_2,
char* _g_H_0, char* _g_H_1, char* _g_H_2, char* _g_K_0, char* _g_K_1, char* _g_K_2, char* _input0 , char* _input1 , char* _input2, char* _input3,
char* _input4, char* _input5
) ;
#ifdef __cplusplus
} // extern "C"
#endif
================================================
FILE: src/sha256/sha256_ethereum.cpp
================================================
/*
copyright 2018 to the Kobigurk
https://github.com/kobigurk/sha256_ethereum
MIT
*/
#include
#include "libsnark/gadgetlib1/gadget.hpp"
#include "libsnark/gadgetlib1/protoboard.hpp"
#include "libff/common/default_types/ec_pp.hpp"
#include
#include
#include
#include
#include
using namespace libsnark;
using namespace libff;
using std::vector;
//typedef libff::Fr FieldT;
typedef libff::Fr FieldT;
pb_variable_array from_bits(std::vector bits, pb_variable& ZERO) {
pb_variable_array acc;
for (size_t i = 0; i < bits.size(); i++) {
bool bit = bits[i];
acc.emplace_back(bit ? ONE : ZERO);
}
return acc;
}
class sha256_ethereum : gadget {
private:
std::shared_ptr> block1;
std::shared_ptr> block2;
std::shared_ptr> hasher1;
std::shared_ptr> intermediate_hash;
std::shared_ptr> hasher2;
public:
sha256_ethereum(protoboard &pb,
const size_t block_length,
const block_variable &input_block,
const digest_variable &output,
const std::string &annotation_prefix) : gadget(pb, "sha256_ethereum") {
intermediate_hash.reset(new digest_variable(pb, 256, "intermediate"));
pb_variable ZERO;
ZERO.allocate(pb, "ZERO");
pb.val(ZERO) = 0;
// final padding
pb_variable_array length_padding =
from_bits({
// padding
1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
// length of message (512 bits)
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0
}, ZERO);
/* block2.reset(new block_variable(pb, {
length_padding
}, "block2"));
*/
pb_linear_combination_array IV = SHA256_default_IV(pb);
hasher1.reset(new sha256_compression_function_gadget(
pb,
IV,
input_block.bits,
*intermediate_hash,
"hasher1"));
pb_linear_combination_array IV2(intermediate_hash->bits);
// std::cout << block2->bits;
// std::cout << intermediate_hash;
hasher2.reset(new sha256_compression_function_gadget(
pb,
IV2,
length_padding,
output,
"hasher2"));
}
void generate_r1cs_constraints(const bool ensure_output_bitness) {
libff::UNUSED(ensure_output_bitness);
hasher1->generate_r1cs_constraints();
hasher2->generate_r1cs_constraints();
}
void generate_r1cs_witness() {
hasher1->generate_r1cs_witness();
hasher2->generate_r1cs_witness();
}
static size_t get_digest_len()
{
return 256;
}
static libff::bit_vector get_hash(const libff::bit_vector &input)
{
protoboard pb;
block_variable input_variable(pb, SHA256_block_size, "input");
digest_variable output_variable(pb, SHA256_digest_size, "output");
sha256_ethereum f(pb, SHA256_block_size, input_variable, output_variable, "f");
input_variable.generate_r1cs_witness(input);
f.generate_r1cs_witness();
return output_variable.get_digest();
}
static size_t expected_constraints(const bool ensure_output_bitness)
{
libff::UNUSED(ensure_output_bitness);
return 54560; /* hardcoded for now */
}
};
vector bit_list_to_ints(vector bit_list, const size_t wordsize) {
vector res;
size_t iterations = bit_list.size()/wordsize+1;
for (size_t i = 0; i < iterations; ++i) {
unsigned long current = 0;
for (size_t j = 0; j < wordsize; ++j) {
if (bit_list.size() == (i*wordsize+j)) break;
current += (bit_list[i*wordsize+j] * (1ul<<(wordsize-1-j)));
}
res.push_back(current);
}
return res;
}
================================================
FILE: src/tx.hpp
================================================
/*
copyright 2018 to the roll_up Authors
This file is part of roll_up.
roll_up is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
roll_up is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with roll_up. If not, see .
*/
#include
#include
#include
#include "baby_jubjub_ecc/main.cpp"
namespace libsnark {
template
class tx: public gadget {
//greater than gadget
private:
/* no internal variables */
public:
pb_variable a;
pb_variable