Showing preview only (361K chars total). Download the full file or copy to clipboard to get everything.
Repository: ddkang/zkml
Branch: main
Commit: 43789582671f
Files: 93
Total size: 13.7 MB
Directory structure:
gitextract_ixvshx2q/
├── .gitignore
├── Cargo.toml
├── LICENSE
├── README.md
├── backwards/
│ ├── README.md
│ └── backward.py
├── python/
│ ├── converter.py
│ ├── input_converter.py
│ └── training_converter.py
├── rustfmt.toml
├── src/
│ ├── bin/
│ │ ├── test_circuit.rs
│ │ ├── time_circuit.rs
│ │ ├── verify_circuit.rs
│ │ └── verify_wav.rs
│ ├── commitments/
│ │ ├── commit.rs
│ │ ├── packer.rs
│ │ └── poseidon_commit.rs
│ ├── commitments.rs
│ ├── gadgets/
│ │ ├── add_pairs.rs
│ │ ├── adder.rs
│ │ ├── bias_div_floor_relu6.rs
│ │ ├── bias_div_round_relu6.rs
│ │ ├── dot_prod.rs
│ │ ├── gadget.rs
│ │ ├── input_lookup.rs
│ │ ├── max.rs
│ │ ├── mul_pairs.rs
│ │ ├── nonlinear/
│ │ │ ├── exp.rs
│ │ │ ├── logistic.rs
│ │ │ ├── non_linearity.rs
│ │ │ ├── pow.rs
│ │ │ ├── relu.rs
│ │ │ ├── rsqrt.rs
│ │ │ ├── sqrt.rs
│ │ │ └── tanh.rs
│ │ ├── nonlinear.rs
│ │ ├── sqrt_big.rs
│ │ ├── square.rs
│ │ ├── squared_diff.rs
│ │ ├── sub_pairs.rs
│ │ ├── update.rs
│ │ ├── var_div.rs
│ │ ├── var_div_big.rs
│ │ └── var_div_big3.rs
│ ├── gadgets.rs
│ ├── layers/
│ │ ├── arithmetic/
│ │ │ ├── add.rs
│ │ │ ├── div_var.rs
│ │ │ ├── mul.rs
│ │ │ └── sub.rs
│ │ ├── arithmetic.rs
│ │ ├── averager.rs
│ │ ├── avg_pool_2d.rs
│ │ ├── batch_mat_mul.rs
│ │ ├── conv2d.rs
│ │ ├── dag.rs
│ │ ├── div_fixed.rs
│ │ ├── fully_connected.rs
│ │ ├── layer.rs
│ │ ├── logistic.rs
│ │ ├── max_pool_2d.rs
│ │ ├── mean.rs
│ │ ├── noop.rs
│ │ ├── pow.rs
│ │ ├── rsqrt.rs
│ │ ├── shape/
│ │ │ ├── broadcast.rs
│ │ │ ├── concatenation.rs
│ │ │ ├── mask_neg_inf.rs
│ │ │ ├── pack.rs
│ │ │ ├── pad.rs
│ │ │ ├── permute.rs
│ │ │ ├── reshape.rs
│ │ │ ├── resize_nn.rs
│ │ │ ├── rotate.rs
│ │ │ ├── slice.rs
│ │ │ ├── split.rs
│ │ │ └── transpose.rs
│ │ ├── shape.rs
│ │ ├── softmax.rs
│ │ ├── sqrt.rs
│ │ ├── square.rs
│ │ ├── squared_diff.rs
│ │ ├── tanh.rs
│ │ └── update.rs
│ ├── layers.rs
│ ├── lib.rs
│ ├── model.rs
│ ├── utils/
│ │ ├── helpers.rs
│ │ ├── loader.rs
│ │ ├── proving_ipa.rs
│ │ └── proving_kzg.rs
│ └── utils.rs
└── testing/
└── circuits/
├── last_two_layers.py
└── v2_1.0_224.tflite
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
# Generated by Cargo
# will have compiled files and executables
/target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# VSCode
.vscode
**/.DS_Store
*.swp
# Proof stuff
out.msgpack
proof_size_check
pkey
vkey
proof
params_kzg
params_ipa
examples
testing/data
*.diff
================================================
FILE: Cargo.toml
================================================
[package]
name = "zkml"
version = "0.0.1"
edition = "2021"
description = "Zero-knowledge machine learning"
license = "LICENSE"
homepage = "https://github.com/ddkang/zkml"
repository = "https://github.com/ddkang/zkml-public.git"
readme = "README.md"
exclude = [
"params",
"params_kzg",
"python",
]
[profile.dev]
opt-level = 3
[profile.test]
opt-level = 3
[dependencies]
bitvec = "1.0.1"
halo2 = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2", rev="17e9765c199670534c0299c96128d0464a188d0b" }
halo2_gadgets = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_gadgets", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] }
halo2_proofs = { git="https://github.com/privacy-scaling-explorations/halo2", package="halo2_proofs", rev="17e9765c199670534c0299c96128d0464a188d0b", features = ["circuit-params"] }
lazy_static = "1.4.0"
ndarray = "0.15.6"
num-bigint = "0.4.3"
num-traits = "0.2.15"
once_cell = "1.15.0"
rand = "0.8.5"
rmp-serde = "1.1.1"
rounded-div = "0.1.2"
serde = "1.0.152"
serde_derive = "1.0.152"
serde_json = "1.0.85"
wav = "1.0.0"
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.md
================================================
# zkml
zkml is a framework for constructing proofs of ML model execution in ZK-SNARKs.
Read our [blog
post](https://medium.com/@danieldkang/trustless-verification-of-machine-learning-6f648fd8ba88)
and [paper](https://arxiv.org/abs/2210.08674) for implementation details.
zkml requires the nightly build of Rust:
```
rustup override set nightly
```
## Quickstart
Run the following commands:
```sh
# Installs rust, skip if you already have rust installed
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
git clone https://github.com/ddkang/zkml.git
cd zkml
rustup override set nightly
cargo build --release
mkdir params_kzg
mkdir params_ipa
# This should take ~16s to run the first time
# and ~8s to run the second time
./target/release/time_circuit examples/mnist/model.msgpack examples/mnist/inp.msgpack kzg
```
This will prove an MNIST circuit! It will require around 2GB of memory and take
around 8 seconds to run.
## Converting your own model and data
To convert your own model and data, you will need to convert the model and data to the format zkml
expects. Currently, we accept TFLite models. We show an example below.
1. First `cd examples/mnist`
2. We've already created a model that achieves high accuracy on MNIST (`model.tflite`). You will
need to create your own TFLite model. One way is to [convert a model from Keras](https://stackoverflow.com/questions/53256877/how-to-convert-kerash5-file-to-a-tflite-file).
3. You will need to convert the model:
```bash
python ../../python/converter.py --model model.tflite --model_output converted_model.msgpack --config_output config.msgpack --scale_factor 512 --k 17 --num_cols 10 --num_randoms 1024
```
There are several parameters that need to be changed depending on the model (`scale_factor`, `k`,
`num_cols`, and `num_randoms`).
4. You will first need to serialize the model input to numpy's serialization format `npy`. We've
written a small script to do this for the first test data point in MNIST:
```bash
python data_to_npy.py
```
5. You will then need to convert the input to the model:
```bash
python ../../python/input_converter.py --model_config converted_model.msgpack --inputs 7.npy --output example_inp.msgpack
```
6. Once you've converted the model and input, you can run the model as above! However, we generally
recommend testing the model before proving (you will need to build zkml before running the next
line):
```bash
cd ../../
./target/release/test_circuit examples/mnist/converted_model.msgpack examples/mnist/example_inp.msgpack
```
## Contact us
If you're interested in extending or using zkml, please contact us at `ddkang
[at] g.illinois.edu`.
================================================
FILE: backwards/README.md
================================================
### About
Takes in a feed-forward TF model and outputs a new computational graph for back-propagation.
================================================
FILE: backwards/backward.py
================================================
#
# A script for generating a backprop computational graph from forward
#
import argparse
import ast
from typing import Literal, Union
import msgpack
import numpy as np
class CircuitConfig():
def __init__(self, starting_index):
self.next_index = starting_index
self.outp_to_grad = {}
self.label_tensor_idx = None
self.weights_update = None
# Allocates an index for a gradient tensor and returns
def new_gradient_tensor(self, tensor_idx):
if tensor_idx in self.outp_to_grad:
raise Exception("Tensor already allocated")
self.outp_to_grad[tensor_idx] = self.next_index
self.next_index += 1
return self.outp_to_grad[tensor_idx]
# Allocates an index for a tensor
def new_tensor(self):
new_index = self.next_index
self.next_index += 1
return new_index
def new_label_tensor(self):
if self.label_tensor_idx is not None:
raise Exception("Label tensor already allocated")
self.label_tensor_idx = self.next_index
self.next_index += 1
return self.label_tensor_idx
# Allocates an index for a gradient tensor and returns
def gradient_tensor_idx(self, tensor_idx):
return self.outp_to_grad[tensor_idx]
# TODO: Put these in enums
NO_ACTIVATION = 0
SAME = 0
VALID = 1
CONV2D = 0
CONV2D_DEPTHWISE = 1
class Conv2D():
def __init__(self, layer):
params = layer['params']
self.padding = params[1]
self.activation_type = params[2]
self.stride_h = params[3]
self.stride_w = params[4]
def backward(self, layer, transcript, config):
inputs_idx, inputs_shape = layer['inp_idxes'][0], layer['inp_shapes'][0]
weights_idx, weights_shape = layer['inp_idxes'][1], layer['inp_shapes'][1]
bias_idx, bias_shape = layer['inp_idxes'][2], layer['inp_shapes'][2]
output_idx, output_shape = layer['out_idxes'][0], layer['out_shapes'][0]
permuted_inputs_idx = config.new_tensor()
permutation = [3, 1, 2, 0]
permuted_inputs_shape = [inputs_shape[p] for p in permutation]
inputs_permute_layer = {
'layer_type': 'Permute',
'params': permutation,
'inp_idxes': [inputs_idx],
'out_idxes': [permuted_inputs_idx],
'inp_shapes': [inputs_shape],
'out_shapes': [permuted_inputs_shape],
'mask': [],
}
transcript.append(inputs_permute_layer)
permuted_outputs_idx = config.new_tensor()
permuted_outputs_shape = [output_shape[p] for p in permutation]
inputs_permute_layer = {
'layer_type': 'Permute',
'params': permutation,
'inp_idxes': [config.gradient_tensor_idx(output_idx)],
'out_idxes': [permuted_outputs_idx],
'inp_shapes': [output_shape],
'out_shapes': [permuted_outputs_shape],
'mask': [],
}
transcript.append(inputs_permute_layer)
dw_idx, dw_shape = config.new_tensor(), weights_shape
dw_conv = {
'layer_type': 'Conv2D',
'params': [CONV2D, VALID, NO_ACTIVATION, self.stride_h, self.stride_w],
'inp_idxes': [permuted_inputs_idx, permuted_outputs_idx],
'out_idxes': [dw_idx],
'inp_shapes': [permuted_inputs_shape, permuted_outputs_shape],
'out_shapes': [dw_shape],
'mask': [],
}
transcript.append(dw_conv)
config.weights_update = dw_idx
permutation = [3, 1, 2, 0]
permutation_weights_idx = config.new_tensor()
permutation_weights_shape = [weights_shape[p] for p in permutation]
permute_weights = {
'layer_type': 'Permute',
'params': permutation,
'inp_idxes': [weights_idx],
'out_idxes': [permutation_weights_idx],
'inp_shapes': [weights_shape],
'out_shapes': [permutation_weights_shape],
'mask': [],
}
transcript.append(permute_weights)
rotated_weights_idx, rotated_weights_shape = config.new_tensor(), permutation_weights_shape
rotate_layer = {
'layer_type': 'Rotate',
'params': [1, 2],
'inp_idxes': [permutation_weights_idx],
'out_idxes': [rotated_weights_idx],
'inp_shapes': [permutation_weights_shape],
'out_shapes': [rotated_weights_shape],
'mask': [],
}
transcript.append(rotate_layer)
padded_gradients_idx, padded_gradients_shape = config.new_tensor(), output_shape
padded_gradients_shape[1] += (rotated_weights_shape[1] - 1) * 2
padded_gradients_shape[2] += (rotated_weights_shape[2] - 1) * 2
pad_layer = {
'layer_type': 'Pad',
'params': [
0, 0,
rotated_weights_shape[1] - 1, rotated_weights_shape[1] - 1,
rotated_weights_shape[2] - 1, rotated_weights_shape[2] - 1,
0, 0
],
'inp_idxes': [config.gradient_tensor_idx(output_idx)],
'out_idxes': [padded_gradients_idx],
'inp_shapes': [],
'out_shapes': [],
'mask': [],
}
transcript.append(pad_layer)
dx_idx, dx_shape = config.new_gradient_tensor(inputs_idx), inputs_shape
input_conv_layer = {
'layer_type': 'Conv2D',
'params': [CONV2D, VALID, NO_ACTIVATION, self.stride_h, self.stride_w],
'inp_idxes': [padded_gradients_idx, rotated_weights_idx],
'out_idxes': [dx_idx],
'inp_shapes': [padded_gradients_shape, rotated_weights_shape],
'out_shapes': [dx_shape],
'mask': [],
}
transcript.append(input_conv_layer)
permutation = [3, 1, 2, 0]
permuted_dw_idx = config.new_tensor()
permuted_dw_shape = [dw_shape[p] for p in permutation]
permute_dw = {
'layer_type': 'Permute',
'params': permutation,
'inp_idxes': [dw_idx],
'out_idxes': [permuted_dw_idx],
'inp_shapes': [dw_shape],
'out_shapes': [permuted_dw_shape],
'mask': [],
}
transcript.append(permute_dw)
updated_weights_idx, updated_weights_shape = config.new_tensor(), dw_shape
# Call a layer to update the outputs of the convolution
update_weights_layer = {
'layer_type': 'Update',
'params': [],
'inp_idxes': [weights_idx, permuted_dw_idx],
'out_idxes': [updated_weights_idx],
'inp_shapes': [weights_shape, permuted_dw_shape],
'out_shapes': [updated_weights_shape],
'mask': [],
}
# transcript.append(update_weights_layer)
class Softmax():
def __init__(self, layer):
return
# TODO: Make this generalizable to all neural networks
# (do not assume that softmax is the last layer, fused with CE-loss)
def backward(self, layer, transcript, config):
sub_layer = {
'layer_type': 'Sub',
'params': [],
# y_hat - y
'inp_idxes': [layer['out_idxes'][0], config.label_tensor_idx],
'out_idxes': [config.new_gradient_tensor(layer['inp_idxes'][0])],
'inp_shapes': [layer['out_shapes'][0], layer['out_shapes'][0]],
'out_shapes': [layer['out_shapes'][0]],
'mask': [],
}
transcript.append(sub_layer)
class AveragePool2D():
def __init__(self, layer):
return
def backward(self, layer, transcript, config):
# TODO: This is very model specific, must rewrite to be accurate
# We just broadcast dx across 3 axes
# 1 x 3 x 3 x 1 -> 1 x 1 x 1 x 1280
div_idx = config.new_tensor()
reshape_layer = {
'layer_type': 'Broadcast',
'params': [],
'inp_idxes': [config.gradient_tensor_idx(layer['out_idxes'][0])],
'out_idxes': [div_idx],
'inp_shapes': [layer['out_shapes'][0]],
'out_shapes': [layer['inp_shapes'][0]],
'mask': [],
}
transcript.append(reshape_layer)
out_idx = config.new_gradient_tensor(layer['inp_idxes'][0])
out_shape = layer['inp_shapes'][0]
div = {
'layer_type': 'Div',
'params': [layer['inp_shapes'][0][1] * layer['inp_shapes'][0][2]],
'inp_idxes': [div_idx],
'out_idxes': [out_idx],
'inp_shapes': [out_shape],
'out_shapes': [out_shape],
'mask': [],
}
transcript.append(div)
class Reshape():
def __init__(self, layer):
return
def backward(self, layer, transcript, config):
reshape_layer = {
'layer_type': 'Reshape',
'params': [],
'inp_idxes': [config.gradient_tensor_idx(layer['out_idxes'][0])],
'out_idxes': [config.new_gradient_tensor(layer['inp_idxes'][0])],
'inp_shapes': [layer['out_shapes'][0]],
'out_shapes': [layer['inp_shapes'][0]],
'mask': [],
}
transcript.append(reshape_layer)
def produce_graph():
# Read msgpack file
with open("examples/v2_1.0_224_truncated/model.msgpack", "rb") as data_file:
byte_data = data_file.read()
model = msgpack.unpackb(byte_data)
# TODO: I'm unsure whether the circuit output is always the last indexed tensor
softmax_output_index = int(np.max(
[[out for out in layer['out_idxes']] for layer in model['layers']] +
[[inp for inp in layer['inp_idxes']] for layer in model['layers']]
)[0])
circuit_config = CircuitConfig(softmax_output_index + 1)
circuit_config.new_label_tensor()
transcript = []
for layer in reversed(model['layers']):
fetched_layer = None
match layer['layer_type']:
case "Conv2D":
fetched_layer = Conv2D(layer)
case "AveragePool2D":
fetched_layer = AveragePool2D(layer)
case "Softmax":
fetched_layer = Softmax(layer)
case _:
fetched_layer = Reshape(layer)
print(layer['layer_type'])
fetched_layer.backward(layer, transcript, circuit_config)
print('----------------')
model['layers'] += transcript
model['inp_idxes'].append(circuit_config.label_tensor_idx)
model['out_idxes'] = [31]
packed = msgpack.packb(model, use_bin_type=True)
with open("./examples/train_graph/train.msgpack", 'wb') as f:
f.write(packed)
print(model.keys())
return model
model = produce_graph()
print(model.keys())
model['tensors'] = ""
print(model['inp_idxes'], model['out_idxes'])
================================================
FILE: python/converter.py
================================================
import argparse
import ast
from typing import Literal, Union
import tensorflow as tf
import numpy as np
import tflite
import msgpack
def get_shape(interpreter: tf.lite.Interpreter, tensor_idx):
if tensor_idx == -1:
return []
tensor = interpreter.get_tensor(tensor_idx)
return list(tensor.shape)
def handle_numpy_or_literal(inp: Union[np.ndarray, Literal[0]]):
if isinstance(inp, int):
return np.array([inp])
return inp
def get_inputs(op: tflite.Operator):
idxes = handle_numpy_or_literal(op.InputsAsNumpy())
idxes = idxes.tolist()
idxes = list(filter(lambda x: x != -1, idxes))
return idxes
class Converter:
def __init__(
self, model_path, scale_factor, k, num_cols, num_randoms, use_selectors, commit,
expose_output
):
self.model_path = model_path
self.scale_factor = scale_factor
self.k = k
self.num_cols = num_cols
self.num_randoms = num_randoms
self.use_selectors = use_selectors
self.commit = commit
self.expose_output = expose_output
self.interpreter = tf.lite.Interpreter(
model_path=self.model_path,
experimental_preserve_all_tensors=True
)
self.interpreter.allocate_tensors()
with open(self.model_path, 'rb') as f:
buf = f.read()
self.model = tflite.Model.GetRootAsModel(buf, 0)
self.graph = self.model.Subgraphs(0)
def valid_activations(self):
return [
tflite.ActivationFunctionType.NONE,
tflite.ActivationFunctionType.RELU,
tflite.ActivationFunctionType.RELU6,
]
def _convert_add(self, op: tflite.Operator, generated_tensors: set):
# Get params
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('Add options is None')
opt = tflite.AddOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
params = [opt.FusedActivationFunction()]
# Get inputs
inputs = get_inputs(op)
print(generated_tensors)
print('Add inputs: ', inputs)
if len(inputs) != 2:
raise RuntimeError('Add must have 2 inputs')
# If both tensors are generated, do nothing
print(inputs[0] in generated_tensors, inputs[1] in generated_tensors)
if (inputs[0] in generated_tensors) and (inputs[1] in generated_tensors):
return ('Add', params)
nb_generated = (inputs[0] in generated_tensors) + (inputs[1] in generated_tensors)
if nb_generated != 1:
raise RuntimeError('Add must have 1 generated tensor')
# Check if there are any negative infinities
const_tensor = self.interpreter.get_tensor(inputs[0]) if inputs[0] not in generated_tensors else self.interpreter.get_tensor(inputs[1])
if np.any(const_tensor == -np.inf):
# Ensure that the constant tensor is all -inf and 0
if not np.all(np.logical_or(np.isneginf(const_tensor), const_tensor == 0)):
raise RuntimeError('Add constant tensor must be -inf and 0 only')
mask = (const_tensor == -np.inf).astype(np.int64)
params = [len(mask.shape)] + list(mask.shape)
params += mask.flatten().tolist()
return ('MaskNegInf', params)
else:
return ('Add', params)
def to_dict(self, start_layer, end_layer):
interpreter = self.interpreter
model = self.model
graph = self.graph
if graph is None:
raise RuntimeError('Graph is None')
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for inp_detail in input_details:
inp = np.zeros(inp_detail['shape'], dtype=inp_detail['dtype'])
interpreter.set_tensor(inp_detail['index'], inp)
# for i, inp in enumerate(inps):
# interpreter.set_tensor(input_details[i]['index'], inp)
interpreter.invoke()
# Get layers
generated_tensor_idxes = set()
for inp in input_details:
generated_tensor_idxes.add(inp['index'])
layers = []
keep_tensors = set()
adjusted_tensors = {}
for op_idx in range(graph.OperatorsLength()):
op = graph.Operators(op_idx)
if op is None:
raise RuntimeError('Operator is None')
model_opcode = model.OperatorCodes(op.OpcodeIndex())
if model_opcode is None:
raise RuntimeError('Operator code is None')
op_code = model_opcode.BuiltinCode()
# Skip generated tensors
for output in handle_numpy_or_literal(op.OutputsAsNumpy()):
generated_tensor_idxes.add(output)
if op_idx < start_layer:
continue
if op_idx > end_layer:
break
# Keep the input tensors
for input in handle_numpy_or_literal(op.InputsAsNumpy()):
keep_tensors.add(input)
# AvgPool2D
if op_code == tflite.BuiltinOperator.AVERAGE_POOL_2D:
layer_type = 'AveragePool2D'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('AvgPool2D options is None')
opt = tflite.Pool2DOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
params = [opt.FilterHeight(), opt.FilterWidth(), opt.StrideH(), opt.StrideW()]
elif op_code == tflite.BuiltinOperator.MAX_POOL_2D:
layer_type = 'MaxPool2D'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('MaxPool2D options is None')
opt = tflite.Pool2DOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
if opt.Padding() == tflite.Padding.SAME:
raise NotImplementedError('SAME padding is not supported')
if opt.FusedActivationFunction() != tflite.ActivationFunctionType.NONE:
raise NotImplementedError('Fused activation is not supported')
params = [opt.FilterHeight(), opt.FilterWidth(), opt.StrideH(), opt.StrideW()]
# FIXME: hack for Keras... not sure why this isn't being converted properly
elif op_code == tflite.BuiltinOperator.CUSTOM:
layer_type = 'Conv2D'
activation = 0
weights = self.interpreter.get_tensor(op.Inputs(1))
weights = np.transpose(weights, (3, 0, 1, 2))
weights = (weights * self.scale_factor).round().astype(np.int64)
adjusted_tensors[op.Inputs(1)] = weights
params = [0, 1, activation, 1, 1]
# Conv2D
elif op_code == tflite.BuiltinOperator.CONV_2D:
layer_type = 'Conv2D'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('Conv2D options is None')
opt = tflite.Conv2DOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
if opt.DilationHFactor() != 1 or opt.DilationWFactor() != 1:
raise NotImplementedError('Dilation is not supported')
if opt.FusedActivationFunction() not in self.valid_activations():
raise NotImplementedError('Unsupported activation function at layer {op_idx}')
# 0 is Conv2D
params = \
[0] + \
[opt.Padding()] + \
[opt.FusedActivationFunction()] + \
[opt.StrideH(), opt.StrideW()]
# DepthwiseConv2D
elif op_code == tflite.BuiltinOperator.DEPTHWISE_CONV_2D:
layer_type = 'Conv2D'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('DepthwiseConv2D options is None')
opt = tflite.DepthwiseConv2DOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
if opt.DilationHFactor() != 1 or opt.DilationWFactor() != 1:
raise NotImplementedError('Dilation is not supported')
if opt.FusedActivationFunction() not in self.valid_activations():
raise NotImplementedError('Unsupported activation function at layer {op_idx}')
# 1 is DepthwiseConv2D
params = \
[1] + \
[opt.Padding()] + \
[opt.FusedActivationFunction()] + \
[opt.StrideH(), opt.StrideW()]
# Fully connected
elif op_code == tflite.BuiltinOperator.FULLY_CONNECTED:
layer_type = 'FullyConnected'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('Fully connected options is None')
opt = tflite.FullyConnectedOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
if opt.FusedActivationFunction() not in self.valid_activations():
raise NotImplementedError(f'Unsupported activation function at layer {op_idx}')
params = [opt.FusedActivationFunction()]
elif op_code == tflite.BuiltinOperator.BATCH_MATMUL:
layer_type = 'BatchMatMul'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('BatchMatMul options is None')
opt = tflite.BatchMatMulOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
if opt.AdjX() is True: raise NotImplementedError('AdjX is not supported')
params = [int(opt.AdjX()), int(opt.AdjY())]
## Arithmetic
# Add
elif op_code == tflite.BuiltinOperator.ADD:
layer_type, params = self._convert_add(op, generated_tensor_idxes)
# Mul
elif op_code == tflite.BuiltinOperator.MUL:
layer_type = 'Mul'
params = []
# Sub
elif op_code == tflite.BuiltinOperator.SUB:
sub_val = interpreter.get_tensor(op.Inputs(1))
# TODO: this is a bit of a hack
if np.any(np.isin(sub_val, 10000)):
layer_type = 'MaskNegInf'
mask = (sub_val == 10000).astype(np.int64)
params = [len(mask.shape)] + list(mask.shape)
params += mask.flatten().tolist()
else:
layer_type = 'Sub'
params = []
# Div
elif op_code == tflite.BuiltinOperator.DIV:
# Implement division as multiplication by the inverse
layer_type = 'Mul'
div_val = interpreter.get_tensor(op.Inputs(1))
if type(div_val) != np.float32: raise NotImplementedError('Only support one divisor')
adjusted_tensors[op.Inputs(1)] = np.array([(self.scale_factor / div_val).round().astype(np.int64)])
params = []
# Pad
elif op_code == tflite.BuiltinOperator.PAD:
layer_type = 'Pad'
tensor_idx = op.Inputs(1)
tensor = interpreter.get_tensor(tensor_idx).flatten().astype(np.int64)
params = tensor.tolist()
# Softmax
elif op_code == tflite.BuiltinOperator.SOFTMAX:
layer_type = 'Softmax'
# TODO: conditionally determine whether or not to subtract the max
# It should depend on the input to the softmax
if layers[-1]['layer_type'] == 'MaskNegInf':
params = layers[-1]['params']
elif layers[-2]['layer_type'] == 'MaskNegInf':
params = layers[-2]['params']
params = [params[0] - 1] + params[2:]
else:
params = []
# Mean
elif op_code == tflite.BuiltinOperator.MEAN:
layer_type = 'Mean'
inp_shape = interpreter.get_tensor(op.Inputs(0)).shape
mean_idxes = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64)
if len(mean_idxes) + 2 != len(inp_shape):
raise NotImplementedError(f'Only mean over all but one axis is supported: {op_idx}')
params = mean_idxes.tolist()
elif op_code == tflite.BuiltinOperator.SQUARE:
layer_type = 'Square'
params = []
# Squared difference
elif op_code == tflite.BuiltinOperator.SQUARED_DIFFERENCE:
layer_type = 'SquaredDifference'
params = []
# Pointwise
elif op_code == tflite.BuiltinOperator.RSQRT:
layer_type = 'Rsqrt'
params = []
elif op_code == tflite.BuiltinOperator.LOGISTIC:
layer_type = 'Logistic'
params = []
elif op_code == tflite.BuiltinOperator.TANH:
layer_type = 'Tanh'
params = []
elif op_code == tflite.BuiltinOperator.POW:
layer_type = 'Pow'
power = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.float32)
if power != 3.: raise NotImplementedError(f'Only support power 3')
power = power.round().astype(np.int64)
if len(power) != 1: raise NotImplementedError(f'Only scalar power is supported: {op_idx}')
params = power.tolist()
# The following are no-ops in the sense that they don't change the tensor
# However, we need to pass along the right tensors
# The param says which input to pass along
elif op_code == tflite.BuiltinOperator.SHAPE:
layer_type = 'Noop'
params = [0]
elif op_code == tflite.BuiltinOperator.GATHER:
layer_type = 'Noop'
params = [0]
elif op_code == tflite.BuiltinOperator.REDUCE_PROD:
# TODO: not sure if this is in general a no-op
layer_type = 'Noop'
params = [0]
elif op_code == tflite.BuiltinOperator.STRIDED_SLICE:
# FIXME: this is not in general a no-op
layer_type = 'Noop'
params = [0]
elif op_code == tflite.BuiltinOperator.BROADCAST_ARGS:
layer_type = 'Noop'
params = [0]
elif op_code == tflite.BuiltinOperator.BROADCAST_TO:
layer_type = 'Noop'
params = [0]
## Shape
elif op_code == tflite.BuiltinOperator.RESHAPE:
layer_type = 'Reshape'
params = []
elif op_code == tflite.BuiltinOperator.TRANSPOSE:
layer_type = 'Transpose'
params = get_shape(interpreter, op.Inputs(0)) + interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64).tolist()
elif op_code == tflite.BuiltinOperator.CONCATENATION:
# FIXME: This is not in general a no-op
layer_type = 'Concatenation'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('Concatenation options is None')
opt = tflite.ConcatenationOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
params = [opt.Axis()]
elif op_code == tflite.BuiltinOperator.PACK:
layer_type = 'Pack'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('Pack options is None')
opt = tflite.PackOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
params = [opt.Axis()]
if params[0] > 1: raise NotImplementedError(f'Only axis=0,1 supported at layer {op_idx}')
elif op_code == tflite.BuiltinOperator.SPLIT:
layer_type = 'Split'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('Split options is None')
opt = tflite.SplitOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
axis = interpreter.get_tensor(op.Inputs(0)).flatten().astype(np.int64)[0]
num_splits = opt.NumSplits()
inp = interpreter.get_tensor(op.Inputs(1))
if inp.shape[axis] % num_splits != 0:
raise NotImplementedError(f'Only equal splits supported at layer {op_idx}')
params = [int(axis), num_splits]
elif op_code == tflite.BuiltinOperator.SLICE:
layer_type = 'Slice'
begin = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64).tolist()
size = interpreter.get_tensor(op.Inputs(2)).flatten().astype(np.int64).tolist()
params = begin + size
elif op_code == tflite.BuiltinOperator.RESIZE_NEAREST_NEIGHBOR:
layer_type = 'ResizeNearestNeighbor'
op_opt = op.BuiltinOptions()
if op_opt is None:
raise RuntimeError('ResizeNearestNeighbor options is None')
opt = tflite.ResizeNearestNeighborOptions()
opt.Init(op_opt.Bytes, op_opt.Pos)
if opt.AlignCorners():
raise NotImplementedError(f'Align corners not supported at layer {op_idx}')
if not opt.HalfPixelCenters():
raise NotImplementedError(f'Half pixel centers not supported at layer {op_idx}')
# Can take the out shape directly from the tensor
params = [int(opt.AlignCorners()), int(opt.HalfPixelCenters())]
# Not implemented
else:
op_name = None
for attr in dir(tflite.BuiltinOperator):
if not attr.startswith('__'):
if getattr(tflite.BuiltinOperator, attr) == op_code:
op_name = attr
raise NotImplementedError('Unsupported operator at layer {}: {}, {}'.format(op_idx, op_code, op_name))
inp_idxes = get_inputs(op)
# FIXME: hack for testing
rsqrt_overflows = [99, 158, 194, 253, 289, 348]
if op_idx in rsqrt_overflows:
if op_code == tflite.BuiltinOperator.RSQRT:
mask = [0, 1]
else:
mask = []
else:
mask = []
layers.append({
'layer_type': layer_type,
'inp_idxes': inp_idxes,
'inp_shapes': [get_shape(interpreter, inp_idx) for inp_idx in inp_idxes],
'out_idxes': [op.Outputs(i) for i in range(op.OutputsLength())],
'out_shapes': [get_shape(interpreter, op.Outputs(i)) for i in range(op.OutputsLength())],
'params': params,
'mask': mask,
})
print(layers)
print()
# Get tensors
print('keep tensors:', keep_tensors)
tensors = []
for tensor_idx in range(graph.TensorsLength()):
if tensor_idx not in keep_tensors:
continue
tensor = graph.Tensors(tensor_idx)
if tensor is None:
raise NotImplementedError('Tensor is None')
if tensor_idx in generated_tensor_idxes:
print(f'skipping generated tensor: {format(tensor_idx)}, {tensor.Name()}')
continue
shape = []
for i in range(tensor.ShapeLength()):
shape.append(int(tensor.Shape(i)))
if shape == []:
shape = [1]
tensor_data = interpreter.get_tensor(tensor_idx)
if tensor.Type() == tflite.TensorType.FLOAT32:
tensor_data = (tensor_data * self.scale_factor).round().astype(np.int64)
elif tensor.Type() == tflite.TensorType.INT32:
tensor_data = tensor_data.astype(np.int64)
elif tensor.Type() == tflite.TensorType.INT64:
continue
else:
raise NotImplementedError('Unsupported tensor type: {}'.format(tensor.Type()))
if tensor_idx in adjusted_tensors:
tensor_data = adjusted_tensors[tensor_idx]
shape = tensor_data.shape
tensors.append({
'idx': tensor_idx,
'shape': shape,
'data': tensor_data.flatten().tolist(),
})
# print(tensor_idx, tensor.Type(), tensor.Name(), tensors[-1]['shape'])
# print(np.abs(tensor_data).max())
commit_before = []
commit_after = []
if self.commit:
input_tensors = [inp['index'] for inp in input_details]
weight_tensors = [tensor['idx'] for tensor in tensors if tensor['idx'] not in input_tensors]
commit_before = [weight_tensors, input_tensors]
output_tensors = [out['index'] for out in output_details]
commit_after = [output_tensors]
out_idxes = layers[-1]['out_idxes'] if self.expose_output else []
d = {
'global_sf': self.scale_factor,
'k': self.k,
'num_cols': self.num_cols,
'num_random': self.num_randoms,
'inp_idxes': [inp['index'] for inp in input_details],
# 'out_idxes': [out['index'] for out in output_details],
'out_idxes': out_idxes,
'layers': layers,
'tensors': tensors,
'use_selectors': self.use_selectors,
'commit_before': commit_before,
'commit_after': commit_after,
}
print()
print(d['layers'][-1])
# d['out_idxes'] = [14]
print(d.keys())
print(d['out_idxes'])
return d
def to_msgpack(self, start_layer, end_layer, use_selectors=True):
d = self.to_dict(start_layer, end_layer)
model_packed = msgpack.packb(d, use_bin_type=True)
d['tensors'] = []
config_packed = msgpack.packb(d, use_bin_type=True)
return model_packed, config_packed
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--model_output', type=str, required=True)
parser.add_argument('--config_output', type=str, required=True)
parser.add_argument('--scale_factor', type=int, default=2**16)
parser.add_argument('--k', type=int, default=19)
parser.add_argument('--eta', type=float, default=0.001)
parser.add_argument('--num_cols', type=int, default=6)
parser.add_argument('--use_selectors', action=argparse.BooleanOptionalAction, required=False, default=True)
parser.add_argument('--commit', action=argparse.BooleanOptionalAction, required=False, default=False)
parser.add_argument('--expose_output', action=argparse.BooleanOptionalAction, required=False, default=True)
parser.add_argument('--start_layer', type=int, default=0)
parser.add_argument('--end_layer', type=int, default=10000)
parser.add_argument('--num_randoms', type=int, default=20001)
args = parser.parse_args()
converter = Converter(
args.model,
args.scale_factor,
args.k,
args.num_cols,
args.num_randoms,
args.use_selectors,
args.commit,
args.expose_output,
)
model_packed, config_packed = converter.to_msgpack(
start_layer=args.start_layer,
end_layer=args.end_layer,
)
if model_packed is None:
raise Exception('Failed to convert model')
with open(args.model_output, 'wb') as f:
f.write(model_packed)
with open(args.config_output, 'wb') as f:
f.write(config_packed)
if __name__ == '__main__':
main()
================================================
FILE: python/input_converter.py
================================================
import argparse
import ast
import numpy as np
import msgpack
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_config', type=str, required=True)
parser.add_argument('--inputs', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
args = parser.parse_args()
inputs = args.inputs.split(',')
with open(args.model_config, 'rb') as f:
model_config = msgpack.unpackb(f.read())
input_idxes = model_config['inp_idxes']
scale_factor = model_config['global_sf']
# Get the input shapes from the layers
input_shapes = [[0] for _ in input_idxes]
for layer in model_config['layers']:
for layer_inp_idx, layer_shape in zip(layer['inp_idxes'], layer['inp_shapes']):
for index, inp_idx in enumerate(input_idxes):
if layer_inp_idx == inp_idx:
input_shapes[index] = layer_shape
tensors = []
for inp, shape, idx in zip(inputs, input_shapes, input_idxes):
tensor = np.load(inp).reshape(shape)
tensor = (tensor * scale_factor).round().astype(np.int64)
tensors.append({
'idx': idx,
'shape': shape,
'data': tensor.flatten().tolist(),
})
packed = msgpack.packb(tensors, use_bin_type=True)
with open(args.output, 'wb') as f:
f.write(packed)
if __name__ == '__main__':
main()
================================================
FILE: python/training_converter.py
================================================
# A converter for training data
# Performs the conversion npy -> msgpack
# TODO: Ensure that training works with models that take in multiple input shapes
#
# Shortcut:
# `python3 python/training_converter.py --input_shapes 7,7,320 --input_idxes 1,0 --output training_data/inputs.msgpack --labels_output training_data/labels.msgpack`
#
import argparse
import ast
import numpy as np
import msgpack
import os
NUM_LOADS = 1
SF = 1 << 17
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_shapes', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
TRAINING_DIRECTORY = './testing/data/pre_last_conv/flowers/train'
args = parser.parse_args()
input_shapes = ast.literal_eval(args.input_shapes)
loaded = 0
tensors = []
num_classes = os.listdir(TRAINING_DIRECTORY)
first_file = "0.npy"
for file_name in os.listdir(TRAINING_DIRECTORY):
if loaded == NUM_LOADS:
break
label = int(first_file[:-4])
data_array = np.load(TRAINING_DIRECTORY + '/' + first_file)
input_shape = input_shapes
for idx in range(data_array.shape[0]):
print(SF)
print((np.vstack(data_array) * SF).round().astype(np.int64))
tensors.append({
'idx': 0,
'shape': input_shape,
'data': list(map(lambda x: int(x), list((data_array[idx] * SF).round().astype(np.int64).flatten()))),
})
# represent the label as a one hot encoding
one_hot = np.zeros(102)
one_hot[label] = SF
print("IMPORTANT LABEL", label)
print("IMPORTANT LABEL", data_array[idx].flatten()[:500])
# print(one_hot.shape())
tensors.append({
'idx': 11,
'shape': (1, 102),
'data': list(map(lambda x: int(x), one_hot)),
})
loaded += 1
if loaded == NUM_LOADS:
break
packed_inputs = msgpack.packb(tensors, use_bin_type=True)
# print(tensors)
with open(args.output, 'wb') as f:
f.write(packed_inputs)
if __name__ == '__main__':
main()
================================================
FILE: rustfmt.toml
================================================
tab_spaces = 2
max_width = 100
================================================
FILE: src/bin/test_circuit.rs
================================================
use halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr};
use zkml::{
model::ModelCircuit,
utils::{
helpers::get_public_values,
loader::{load_model_msgpack, ModelMsgpack},
},
};
fn main() {
let config_fname = std::env::args().nth(1).expect("config file path");
let inp_fname = std::env::args().nth(2).expect("input file path");
let config: ModelMsgpack = load_model_msgpack(&config_fname, &inp_fname);
let circuit = ModelCircuit::<Fr>::generate_from_file(&config_fname, &inp_fname);
let _prover = MockProver::run(config.k.try_into().unwrap(), &circuit, vec![vec![]]).unwrap();
let public_vals = get_public_values();
let prover = MockProver::run(config.k.try_into().unwrap(), &circuit, vec![public_vals]).unwrap();
assert_eq!(prover.verify(), Ok(()));
}
================================================
FILE: src/bin/time_circuit.rs
================================================
use halo2_proofs::halo2curves::{bn256::Fr, pasta::Fp};
use zkml::{
model::ModelCircuit,
utils::{proving_ipa::time_circuit_ipa, proving_kzg::time_circuit_kzg},
};
fn main() {
let config_fname = std::env::args().nth(1).expect("config file path");
let inp_fname = std::env::args().nth(2).expect("input file path");
let kzg_or_ipa = std::env::args().nth(3).expect("kzg or ipa");
if kzg_or_ipa != "kzg" && kzg_or_ipa != "ipa" {
panic!("Must specify kzg or ipa");
}
if kzg_or_ipa == "kzg" {
let circuit = ModelCircuit::<Fr>::generate_from_file(&config_fname, &inp_fname);
time_circuit_kzg(circuit);
} else {
let circuit = ModelCircuit::<Fp>::generate_from_file(&config_fname, &inp_fname);
time_circuit_ipa(circuit);
}
}
================================================
FILE: src/bin/verify_circuit.rs
================================================
use halo2_proofs::halo2curves::bn256::Fr;
use zkml::{
model::ModelCircuit,
utils::{loader::load_config_msgpack, proving_kzg::verify_circuit_kzg},
};
fn main() {
let config_fname = std::env::args().nth(1).expect("config file path");
let vkey_fname = std::env::args().nth(2).expect("verification key file path");
let proof_fname = std::env::args().nth(3).expect("proof file path");
let public_vals_fname = std::env::args().nth(4).expect("public values file path");
let kzg_or_ipa = std::env::args().nth(5).expect("kzg or ipa");
if kzg_or_ipa != "kzg" && kzg_or_ipa != "ipa" {
panic!("Must specify kzg or ipa");
}
if kzg_or_ipa == "kzg" {
let config = load_config_msgpack(&config_fname);
let circuit = ModelCircuit::<Fr>::generate_from_msgpack(config, false);
println!("Loaded configuration");
verify_circuit_kzg(circuit, &vkey_fname, &proof_fname, &public_vals_fname);
} else {
// Serialization of the verification key doesn't seem to be supported for IPA
panic!("Not implemented");
}
}
================================================
FILE: src/bin/verify_wav.rs
================================================
use std::fs::File;
use halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr};
use zkml::{
model::ModelCircuit,
utils::{
helpers::get_public_values,
loader::{load_config_msgpack, ModelMsgpack, TensorMsgpack},
},
};
fn main() {
let config_fname = std::env::args().nth(1).expect("config file path");
let wav_fname = std::env::args().nth(2).expect("wav file path");
let mut wav_file = File::open(wav_fname).unwrap();
let (_header, data) = wav::read(&mut wav_file).unwrap();
let data = match data {
wav::BitDepth::Sixteen(data) => data,
_ => panic!("Unsupported bit depth"),
};
let data: Vec<i64> = data.iter().map(|x| *x as i64).collect();
let base_config = load_config_msgpack(&config_fname);
let config = ModelMsgpack {
tensors: vec![TensorMsgpack {
idx: 0,
shape: vec![1, data.len().try_into().unwrap()],
data: data,
}],
inp_idxes: vec![0],
out_idxes: vec![],
layers: vec![],
commit_before: Some(vec![]),
commit_after: Some(vec![vec![0]]),
..base_config
};
println!("Config: {:?}", config);
let k = config.k;
let circuit = ModelCircuit::<Fr>::generate_from_msgpack(config, false);
let _prover = MockProver::run(k.try_into().unwrap(), &circuit, vec![vec![]]).unwrap();
let public_vals: Vec<Fr> = get_public_values();
println!("Public values: {:?}", public_vals);
}
================================================
FILE: src/commitments/commit.rs
================================================
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use crate::{gadgets::gadget::GadgetConfig, layers::layer::CellRc};
pub trait Commit<F: PrimeField> {
fn commit(
&self,
layouter: impl Layouter<F>,
gadget_config: Rc<GadgetConfig>,
constants: &HashMap<i64, CellRc<F>>,
values: &Vec<CellRc<F>>,
blinding: CellRc<F>,
) -> Result<Vec<CellRc<F>>, Error>;
}
================================================
FILE: src/commitments/packer.rs
================================================
use std::{
cmp::{max, min},
collections::{BTreeMap, HashMap},
marker::PhantomData,
rc::Rc,
};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Value},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression},
poly::Rotation,
};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::gadget::{GadgetConfig, GadgetType},
layers::layer::{AssignedTensor, CellRc},
};
const NUM_BITS_PER_FIELD_ELEM: usize = 254;
pub struct PackerConfig<F: PrimeField> {
pub num_bits_per_elem: usize,
pub num_elem_per_packed: usize,
pub num_packed_per_row: usize,
pub exponents: Vec<F>,
_marker: PhantomData<F>,
}
pub struct PackerChip<F: PrimeField> {
pub config: PackerConfig<F>,
}
impl<F: PrimeField> PackerChip<F> {
pub fn get_exponents(num_bits_per_elem: usize, num_exponents: usize) -> Vec<F> {
let mul_val = F::from(1 << num_bits_per_elem);
let mut exponents = vec![F::ONE];
for _ in 1..num_exponents {
exponents.push(exponents[exponents.len() - 1] * mul_val);
}
exponents
}
pub fn construct(num_bits_per_elem: usize, gadget_config: &GadgetConfig) -> PackerConfig<F> {
let columns = &gadget_config.columns;
let num_elem_per_packed = if NUM_BITS_PER_FIELD_ELEM / num_bits_per_elem > columns.len() - 1 {
columns.len() - 1
} else {
// TODO: for many columns, pack many in a single row
NUM_BITS_PER_FIELD_ELEM / num_bits_per_elem
};
println!("column len: {}", columns.len());
println!("num_bits_per_elem: {}", num_bits_per_elem);
println!("NUM_BITS_PER_FIELD_ELEM: {}", NUM_BITS_PER_FIELD_ELEM);
println!("num_elem_per_packed: {}", num_elem_per_packed);
let num_packed_per_row = max(
1,
columns.len() / (num_elem_per_packed * (num_bits_per_elem + 1)),
);
println!("num_packed_per_row: {}", num_packed_per_row);
let exponents = Self::get_exponents(num_bits_per_elem, num_elem_per_packed);
let config = PackerConfig {
num_bits_per_elem,
num_elem_per_packed,
num_packed_per_row,
exponents,
_marker: PhantomData,
};
config
}
pub fn configure(
meta: &mut ConstraintSystem<F>,
packer_config: PackerConfig<F>,
gadget_config: GadgetConfig,
) -> GadgetConfig {
let selector = meta.complex_selector();
let columns = gadget_config.columns;
let lookup = gadget_config.tables.get(&GadgetType::InputLookup).unwrap()[0];
let exponents = &packer_config.exponents;
let num_bits_per_elem = packer_config.num_bits_per_elem;
let shift_val = 1 << (num_bits_per_elem - 1);
let shift_val = Expression::Constant(F::from(shift_val as u64));
meta.create_gate("packer", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
for i in 0..packer_config.num_packed_per_row {
let offset = i * (packer_config.num_elem_per_packed + 1);
let inps = columns[offset..offset + packer_config.num_elem_per_packed]
.iter()
.map(|col| meta.query_advice(*col, Rotation::cur()))
.collect::<Vec<_>>();
let outp = meta.query_advice(
columns[offset + packer_config.num_elem_per_packed],
Rotation::cur(),
);
let res = inps
.into_iter()
.zip(exponents.iter())
.map(|(inp, exp)| (inp + shift_val.clone()) * (*exp))
.fold(Expression::Constant(F::ZERO), |acc, prod| acc + prod);
constraints.push(s.clone() * (res - outp));
// constraints.push(s.clone() * Expression::Constant(F::zero()));
}
constraints
});
// Ensure that the weights/inputs are in the correct range
for i in 0..packer_config.num_packed_per_row {
let offset = i * (packer_config.num_elem_per_packed + 1);
for j in 0..packer_config.num_elem_per_packed {
meta.lookup("packer lookup", |meta| {
let s = meta.query_selector(selector);
let inp = meta.query_advice(columns[offset + j], Rotation::cur());
vec![(s * (inp + shift_val.clone()), lookup)]
});
}
}
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::Packer, vec![selector]);
GadgetConfig {
columns,
selectors,
..gadget_config
}
}
pub fn copy_and_pack_row(
&self,
mut layouter: impl Layouter<F>,
gadget_config: Rc<GadgetConfig>,
cells: Vec<CellRc<F>>,
zero: &AssignedCell<F, F>,
) -> Result<Vec<CellRc<F>>, Error> {
let columns = &gadget_config.columns;
let selector = gadget_config.selectors.get(&GadgetType::Packer).unwrap()[0];
let num_bits_per_elem = gadget_config.num_bits_per_elem;
let shift_val = 1 << (num_bits_per_elem - 1);
let shift_val = F::from(shift_val as u64);
let outp = layouter.assign_region(
|| "pack row",
|mut region| {
if gadget_config.use_selectors {
selector.enable(&mut region, 0)?;
}
let mut packed = vec![];
for i in 0..self.config.num_packed_per_row {
let val_offset = i * self.config.num_elem_per_packed;
let col_offset = i * (self.config.num_elem_per_packed + 1);
let mut vals = cells
[val_offset..min(val_offset + self.config.num_elem_per_packed, cells.len())]
.iter()
.enumerate()
.map(|(i, x)| {
x.copy_advice(|| "", &mut region, columns[col_offset + i], 0)
.unwrap();
x.value().copied()
})
.collect::<Vec<_>>();
let zero_copied = (cells.len()..self.config.num_elem_per_packed)
.map(|i| {
zero
.copy_advice(|| "", &mut region, columns[col_offset + i], 0)
.unwrap();
zero.value().copied()
})
.collect::<Vec<_>>();
vals.extend(zero_copied);
let res = vals.iter().zip(self.config.exponents.iter()).fold(
Value::known(F::ZERO),
|acc, (inp, exp)| {
let res = acc + (*inp + Value::known(shift_val)) * Value::known(*exp);
res
},
);
let outp = region.assign_advice(
|| "",
columns[col_offset + self.config.num_elem_per_packed],
0,
|| res,
)?;
packed.push(Rc::new(outp));
}
Ok(packed)
},
)?;
Ok(outp)
}
pub fn assign_and_pack_row(
&self,
mut layouter: impl Layouter<F>,
gadget_config: Rc<GadgetConfig>,
values: Vec<&F>,
zero: &AssignedCell<F, F>,
) -> Result<(Vec<CellRc<F>>, Vec<CellRc<F>>), Error> {
let columns = &gadget_config.columns;
let selector = gadget_config.selectors.get(&GadgetType::Packer).unwrap()[0];
let num_bits_per_elem = gadget_config.num_bits_per_elem;
let shift_val = 1 << (num_bits_per_elem - 1);
let shift_val = F::from(shift_val as u64);
let outp = layouter.assign_region(
|| "pack row",
|mut region| {
if gadget_config.use_selectors {
selector.enable(&mut region, 0)?;
}
let mut packed = vec![];
let mut assigned = vec![];
for i in 0..self.config.num_packed_per_row {
let val_offset = i * self.config.num_elem_per_packed;
let col_offset = i * (self.config.num_elem_per_packed + 1);
let mut values = values
[val_offset..min(val_offset + self.config.num_elem_per_packed, values.len())]
.iter()
.map(|x| **x)
.collect::<Vec<_>>();
let vals = values
.iter()
.enumerate()
.map(|(i, x)| {
let tmp = region
.assign_advice(|| "", columns[col_offset + i], 0, || Value::known(*x))
.unwrap();
Rc::new(tmp)
})
.collect::<Vec<_>>();
assigned.extend(vals);
let zero_vals = (values.len()..self.config.num_elem_per_packed)
.map(|i| {
zero
.copy_advice(|| "", &mut region, columns[col_offset + i], 0)
.unwrap();
F::ZERO
})
.collect::<Vec<_>>();
values.extend(zero_vals);
let res =
values
.iter()
.zip(self.config.exponents.iter())
.fold(F::ZERO, |acc, (inp, exp)| {
let res = acc + (*inp + shift_val) * (*exp);
res
});
let outp = region.assign_advice(
|| "",
columns[col_offset + self.config.num_elem_per_packed],
0,
|| Value::known(res),
)?;
packed.push(Rc::new(outp));
}
Ok((packed, assigned))
},
)?;
Ok(outp)
}
pub fn assign_and_pack(
&self,
mut layouter: impl Layouter<F>,
gadget_config: Rc<GadgetConfig>,
constants: &HashMap<i64, CellRc<F>>,
tensors: &BTreeMap<i64, Array<F, IxDyn>>,
) -> Result<(BTreeMap<i64, AssignedTensor<F>>, Vec<CellRc<F>>), Error> {
let mut values = vec![];
for (_, tensor) in tensors {
for value in tensor.iter() {
values.push(value);
}
}
let mut packed = vec![];
let mut assigned = vec![];
let zero = constants.get(&0).unwrap().clone();
let num_elems_per_row = self.config.num_packed_per_row * self.config.num_elem_per_packed;
for i in 0..(values.len().div_ceil(num_elems_per_row)) {
let row =
values[i * num_elems_per_row..min((i + 1) * num_elems_per_row, values.len())].to_vec();
let (row_packed, row_assigned) = self
.assign_and_pack_row(
layouter.namespace(|| "pack row"),
gadget_config.clone(),
row,
zero.as_ref(),
)
.unwrap();
packed.extend(row_packed);
assigned.extend(row_assigned);
}
let mut assigned_tensors = BTreeMap::new();
let mut start_idx = 0;
for (tensor_id, tensor) in tensors {
let num_el = tensor.len();
let v = assigned[start_idx..start_idx + num_el].to_vec();
let new_tensor = Array::from_shape_vec(tensor.raw_dim(), v).unwrap();
assigned_tensors.insert(*tensor_id, new_tensor);
start_idx += num_el;
}
Ok((assigned_tensors, packed))
}
pub fn copy_and_pack(
&self,
mut layouter: impl Layouter<F>,
gadget_config: Rc<GadgetConfig>,
constants: &HashMap<i64, CellRc<F>>,
tensors: &BTreeMap<i64, AssignedTensor<F>>,
) -> Result<Vec<CellRc<F>>, Error> {
let mut values = vec![];
for (_, tensor) in tensors {
for value in tensor.iter() {
values.push(value.clone());
}
}
let mut packed = vec![];
let zero = constants.get(&0).unwrap().clone();
let num_elems_per_row = self.config.num_packed_per_row * self.config.num_elem_per_packed;
for i in 0..(values.len().div_ceil(num_elems_per_row)) {
let row =
values[i * num_elems_per_row..min((i + 1) * num_elems_per_row, values.len())].to_vec();
let row_packed = self
.copy_and_pack_row(
layouter.namespace(|| "pack row"),
gadget_config.clone(),
row,
zero.as_ref(),
)
.unwrap();
packed.extend(row_packed);
}
Ok(packed)
}
}
================================================
FILE: src/commitments/poseidon_commit.rs
================================================
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_gadgets::poseidon::{
primitives::{generate_constants, Absorbing, ConstantLength, Domain, Mds, Spec},
PaddedWord, PoseidonSpongeInstructions, Pow5Chip, Pow5Config, Sponge,
};
use halo2_proofs::{
circuit::Layouter,
halo2curves::ff::{FromUniformBytes, PrimeField},
plonk::{Advice, Column, ConstraintSystem, Error},
};
use crate::{gadgets::gadget::GadgetConfig, layers::layer::CellRc};
use super::commit::Commit;
pub const WIDTH: usize = 3;
pub const RATE: usize = 2;
pub const L: usize = 8 - WIDTH - 1;
#[derive(Clone, Debug)]
pub struct PoseidonCommitChip<
F: PrimeField + Ord + FromUniformBytes<64>,
const WIDTH: usize,
const RATE: usize,
const L: usize,
> {
pub poseidon_config: Pow5Config<F, WIDTH, RATE>,
}
#[derive(Debug)]
pub struct P128Pow5T3Gen<F: PrimeField, const SECURE_MDS: usize>(PhantomData<F>);
impl<F: PrimeField, const SECURE_MDS: usize> P128Pow5T3Gen<F, SECURE_MDS> {
pub fn new() -> Self {
P128Pow5T3Gen(PhantomData::default())
}
}
impl<F: FromUniformBytes<64> + Ord, const SECURE_MDS: usize> Spec<F, 3, 2>
for P128Pow5T3Gen<F, SECURE_MDS>
{
fn full_rounds() -> usize {
8
}
fn partial_rounds() -> usize {
56
}
fn sbox(val: F) -> F {
val.pow_vartime([5])
}
fn secure_mds() -> usize {
SECURE_MDS
}
fn constants() -> (Vec<[F; 3]>, Mds<F, 3>, Mds<F, 3>) {
generate_constants::<_, Self, 3, 2>()
}
}
/// A Poseidon hash function, built around a sponge.
#[derive(Debug)]
pub struct MyHash<
F: PrimeField,
PoseidonChip: PoseidonSpongeInstructions<F, S, D, T, RATE>,
S: Spec<F, T, RATE>,
D: Domain<F, RATE>,
const T: usize,
const RATE: usize,
> {
pub sponge: Sponge<F, PoseidonChip, S, Absorbing<PaddedWord<F>, RATE>, D, T, RATE>,
}
impl<F: PrimeField + Ord + FromUniformBytes<64>> PoseidonCommitChip<F, WIDTH, RATE, L> {
pub fn configure(
meta: &mut ConstraintSystem<F>,
// TODO: ??
_input: [Column<Advice>; L],
state: [Column<Advice>; WIDTH],
partial_sbox: Column<Advice>,
) -> PoseidonCommitChip<F, WIDTH, RATE, L> {
let rc_a = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>();
let rc_b = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>();
meta.enable_constant(rc_b[0]);
PoseidonCommitChip {
poseidon_config: Pow5Chip::configure::<P128Pow5T3Gen<F, 0>>(
meta,
state.try_into().unwrap(),
partial_sbox,
rc_a.try_into().unwrap(),
rc_b.try_into().unwrap(),
),
}
}
}
impl<F: PrimeField + Ord + FromUniformBytes<64>> Commit<F>
for PoseidonCommitChip<F, WIDTH, RATE, L>
{
fn commit(
&self,
mut layouter: impl Layouter<F>,
_gadget_config: Rc<GadgetConfig>,
_constants: &HashMap<i64, CellRc<F>>,
values: &Vec<CellRc<F>>,
blinding: CellRc<F>,
) -> Result<Vec<CellRc<F>>, Error> {
let chip = Pow5Chip::construct(self.poseidon_config.clone());
let mut hasher: MyHash<F, Pow5Chip<F, 3, 2>, P128Pow5T3Gen<F, 0>, ConstantLength<L>, 3, 2> =
Sponge::new(chip, layouter.namespace(|| "sponge"))
.map(|sponge| MyHash { sponge })
.unwrap();
let mut new_vals = values
.iter()
.map(|x| x.clone())
.chain(vec![blinding.clone()])
.collect::<Vec<_>>();
while new_vals.len() % L != 0 {
new_vals.push(blinding.clone());
}
for (i, value) in new_vals
.iter()
.map(|x| PaddedWord::Message((**x).clone()))
.chain(<ConstantLength<L> as Domain<F, RATE>>::padding(L).map(PaddedWord::Padding))
.enumerate()
{
hasher
.sponge
.absorb(layouter.namespace(|| format!("absorb {}", i)), value)
.unwrap();
}
let outp = hasher
.sponge
.finish_absorbing(layouter.namespace(|| "finish absorbing"))
.unwrap()
.squeeze(layouter.namespace(|| "squeeze"))
.unwrap();
let outp = Rc::new(outp);
Ok(vec![outp])
}
}
================================================
FILE: src/commitments.rs
================================================
pub mod commit;
pub mod packer;
pub mod poseidon_commit;
================================================
FILE: src/gadgets/add_pairs.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
poly::Rotation,
};
use super::gadget::{Gadget, GadgetConfig, GadgetType};
type AddPairsConfig = GadgetConfig;
pub struct AddPairsChip<F: PrimeField> {
config: Rc<AddPairsConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> AddPairsChip<F> {
pub fn construct(config: Rc<AddPairsConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
3
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.selector();
let columns = gadget_config.columns;
meta.create_gate("add pair", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
for i in 0..columns.len() / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());
let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());
let outp = meta.query_advice(columns[offset + 2], Rotation::cur());
let res = inp1 + inp2;
constraints.append(&mut vec![s.clone() * (res - outp)])
}
constraints
});
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::AddPairs, vec![selector]);
GadgetConfig {
columns,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for AddPairsChip<F> {
fn name(&self) -> String {
"add pairs chip".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let inp1 = &vec_inputs[0];
let inp2 = &vec_inputs[1];
assert_eq!(inp1.len(), inp2.len());
let columns = &self.config.columns;
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::AddPairs).unwrap()[0];
selector.enable(region, row_offset)?;
}
let mut outps = vec![];
for i in 0..inp1.len() {
let offset = i * self.num_cols_per_op();
let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?;
let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?;
let outp = inp1.value().map(|x: &F| x.to_owned()) + inp2.value().map(|x: &F| x.to_owned());
let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?;
outps.push(outp);
}
Ok(outps)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let zero = &single_inputs[0];
let mut inp1 = vec_inputs[0].clone();
let mut inp2 = vec_inputs[1].clone();
let initial_len = inp1.len();
while inp1.len() % self.num_inputs_per_row() != 0 {
inp1.push(zero);
inp2.push(zero);
}
let vec_inputs = vec![inp1, inp2];
let res = self.op_aligned_rows(
layouter.namespace(|| format!("forward row {}", self.name())),
&vec_inputs,
single_inputs,
)?;
Ok(res[0..initial_len].to_vec())
}
}
================================================
FILE: src/gadgets/adder.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region, Value},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression},
poly::Rotation,
};
use super::gadget::{Gadget, GadgetConfig, GadgetType};
type AdderConfig = GadgetConfig;
pub struct AdderChip<F: PrimeField> {
config: Rc<AdderConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> AdderChip<F> {
pub fn construct(config: Rc<AdderConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.selector();
let columns = gadget_config.columns;
meta.create_gate("adder gate", |meta| {
let s = meta.query_selector(selector);
let gate_inp = columns[0..columns.len() - 1]
.iter()
.map(|col| meta.query_advice(*col, Rotation::cur()))
.collect::<Vec<_>>();
let gate_output = meta.query_advice(*columns.last().unwrap(), Rotation::cur());
let res = gate_inp
.iter()
.fold(Expression::Constant(F::ZERO), |a, b| a + b.clone());
vec![s * (res - gate_output)]
});
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::Adder, vec![selector]);
GadgetConfig {
columns,
selectors,
..gadget_config
}
}
}
// NOTE: The forward pass of the adder adds _everything_ into one cell
impl<F: PrimeField> Gadget<F> for AdderChip<F> {
fn name(&self) -> String {
"adder".to_string()
}
fn num_cols_per_op(&self) -> usize {
self.config.columns.len()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() - 1
}
fn num_outputs_per_row(&self) -> usize {
1
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
assert_eq!(vec_inputs.len(), 1);
let inp = &vec_inputs[0];
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::Adder).unwrap()[0];
selector.enable(region, row_offset)?;
}
inp
.iter()
.enumerate()
.map(|(i, cell)| cell.copy_advice(|| "", region, self.config.columns[i], row_offset))
.collect::<Result<Vec<_>, _>>()?;
let e = inp.iter().fold(Value::known(F::ZERO), |a, b| {
a + b.value().map(|x: &F| x.to_owned())
});
let res = region.assign_advice(
|| "",
*self.config.columns.last().unwrap(),
row_offset,
|| e,
)?;
Ok(vec![res])
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
assert_eq!(single_inputs.len(), 1);
let mut inputs = vec_inputs[0].clone();
let zero = single_inputs[0].clone();
while inputs.len() % self.num_inputs_per_row() != 0 {
inputs.push(&zero);
}
let mut outputs = self.op_aligned_rows(
layouter.namespace(|| "adder forward"),
&vec![inputs],
single_inputs,
)?;
while outputs.len() != 1 {
while outputs.len() % self.num_inputs_per_row() != 0 {
outputs.push(zero.clone());
}
let tmp = outputs.iter().map(|x| x).collect::<Vec<_>>();
outputs = self.op_aligned_rows(
layouter.namespace(|| "adder forward"),
&vec![tmp],
single_inputs,
)?;
}
Ok(outputs)
}
}
================================================
FILE: src/gadgets/bias_div_floor_relu6.rs
================================================
use std::{collections::HashMap, marker::PhantomData};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression},
poly::Rotation,
};
use crate::gadgets::gadget::convert_to_u64;
use super::gadget::{Gadget, GadgetConfig, GadgetType};
type BiasDivFloorRelu6Config = GadgetConfig;
const SHIFT_MIN_VAL: i64 = -(1 << 30);
pub struct BiasDivFloorRelu6Chip<F: PrimeField> {
config: BiasDivFloorRelu6Config,
_marker: PhantomData<F>,
}
impl<F: PrimeField> BiasDivFloorRelu6Chip<F> {
pub fn construct(config: BiasDivFloorRelu6Config) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn get_map(scale_factor: u64, num_rows: i64, div_outp_min_val: i64) -> HashMap<i64, i64> {
let div_val = scale_factor;
let div_outp_min_val = div_outp_min_val;
let mut map = HashMap::new();
for i in 0..num_rows {
let shifted = i + div_outp_min_val;
let val = shifted.clamp(0, 6 * div_val as i64);
map.insert(i as i64, val);
}
map
}
pub fn num_cols_per_op() -> usize {
5
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.complex_selector();
let sf = Expression::Constant(F::from(gadget_config.scale_factor));
let columns = gadget_config.columns;
let mod_lookup = meta.lookup_table_column();
let relu_lookup = meta.lookup_table_column();
let div_lookup = meta.lookup_table_column();
meta.create_gate("bias_mul", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
for op_idx in 0..columns.len() / Self::num_cols_per_op() {
let offset = op_idx * Self::num_cols_per_op();
let inp = meta.query_advice(columns[offset + 0], Rotation::cur());
let bias = meta.query_advice(columns[offset + 1], Rotation::cur());
let div_res = meta.query_advice(columns[offset + 2], Rotation::cur());
let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());
constraints.push(s.clone() * (inp - (sf.clone() * (div_res - bias) + mod_res)));
}
constraints
});
for op_idx in 0..columns.len() / Self::num_cols_per_op() {
let offset = op_idx * Self::num_cols_per_op();
meta.lookup("bias_div_relu6 lookup", |meta| {
let s = meta.query_selector(selector);
let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());
// Constrains that the modulus \in [0, DIV_VAL)
vec![(s.clone() * mod_res.clone(), mod_lookup)]
});
meta.lookup("bias_div_relu6 lookup", |meta| {
let s = meta.query_selector(selector);
let div = meta.query_advice(columns[offset + 2], Rotation::cur());
let outp = meta.query_advice(columns[offset + 4], Rotation::cur());
let div_outp_min_val = Expression::Constant(F::from((-SHIFT_MIN_VAL) as u64));
// Constrains that output \in [0, 6 * SF]
vec![
(s.clone() * outp, relu_lookup),
(s * (div + div_outp_min_val), div_lookup),
]
});
}
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::BiasDivFloorRelu6, vec![selector]);
let mut tables = gadget_config.tables;
tables.insert(
GadgetType::BiasDivFloorRelu6,
vec![mod_lookup, relu_lookup, div_lookup],
);
let mut maps = gadget_config.maps;
let relu_map = Self::get_map(
gadget_config.scale_factor,
gadget_config.num_rows as i64,
gadget_config.div_outp_min_val,
);
maps.insert(GadgetType::BiasDivFloorRelu6, vec![relu_map]);
GadgetConfig {
columns,
selectors,
tables,
maps,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for BiasDivFloorRelu6Chip<F> {
fn name(&self) -> String {
"BiasDivRelu6".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.num_inputs_per_row()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let div_val = self.config.scale_factor as i64;
let div_outp_min_val_i64 = -self.config.div_outp_min_val;
let div_inp_min_val_pos_i64 = -SHIFT_MIN_VAL;
let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);
let inp = &vec_inputs[0];
let bias = &vec_inputs[1];
assert_eq!(inp.len(), bias.len());
assert_eq!(inp.len() % self.num_inputs_per_row(), 0);
let relu_map = &self
.config
.maps
.get(&GadgetType::BiasDivFloorRelu6)
.unwrap()[0];
if self.config.use_selectors {
let selector = self
.config
.selectors
.get(&GadgetType::BiasDivFloorRelu6)
.unwrap()[0];
selector.enable(region, row_offset)?;
}
let mut outp_cells = vec![];
for (i, (inp, bias)) in inp.iter().zip(bias.iter()).enumerate() {
let offset = i * self.num_cols_per_op();
let inp_f = inp.value().map(|x: &F| x.to_owned());
let bias_f = bias.value().map(|x: &F| {
let a = *x + div_inp_min_val_pos;
let a = convert_to_u64(&a) as i64 - div_inp_min_val_pos_i64;
a
});
let div_mod_res = inp_f.map(|x: F| {
let x_pos = x + div_inp_min_val_pos;
let inp = convert_to_u64(&x_pos);
// println!("inp: {:?}, bias: {:?}, x_pos: {:?}", inp, bias, x_pos);
let div_res = inp as i64 / div_val - (div_inp_min_val_pos_i64 / div_val);
let mod_res = inp as i64 % div_val;
// println!("div_res: {:?}, mod_res: {:?}", div_res, mod_res);
(div_res, mod_res)
});
let div_res = div_mod_res.map(|x: (i64, i64)| x.0) + bias_f;
let mod_res = div_mod_res.map(|x: (i64, i64)| x.1);
let outp = div_res.map(|x: i64| {
let mut x_pos = x - div_outp_min_val_i64;
if !relu_map.contains_key(&(x_pos)) {
println!("x: {}, x_pos: {}", x, x_pos);
x_pos = 0;
}
let outp_val = relu_map.get(&(x_pos)).unwrap();
// println!("x: {}, x_pos: {}, outp_val: {}", x, x_pos, outp_val);
F::from(*outp_val as u64)
});
// Assign inp, bias
inp.copy_advice(|| "", region, self.config.columns[offset + 0], row_offset)?;
bias.copy_advice(|| "", region, self.config.columns[offset + 1], row_offset)?;
// Assign div_res, mod_res
let div_res_cell = region
.assign_advice(
|| "div_res",
self.config.columns[offset + 2],
row_offset,
|| {
div_res.map(|x: i64| {
F::from((x - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64)
})
},
)
.unwrap();
let _mod_res_cell = region
.assign_advice(
|| "mod_res",
self.config.columns[offset + 3],
row_offset,
|| mod_res.map(|x: i64| F::from(x as u64)),
)
.unwrap();
let outp_cell = region
.assign_advice(
|| "outp",
self.config.columns[offset + 4],
row_offset,
|| outp.map(|x: F| x.to_owned()),
)
.unwrap();
// outp_cells.push((outp_cell, div_res_cell));
outp_cells.push(outp_cell);
outp_cells.push(div_res_cell);
}
Ok(outp_cells)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mut inps = vec_inputs[0].clone();
let mut biases = vec_inputs[1].clone();
// Needed to pad: bias - bias = 0
let default = biases[0].clone();
while inps.len() % self.num_inputs_per_row() != 0 {
inps.push(&default);
biases.push(&default);
}
let res = self.op_aligned_rows(
layouter.namespace(|| "bias_div_relu6"),
&vec![inps, biases],
single_inputs,
)?;
Ok(res)
}
}
================================================
FILE: src/gadgets/bias_div_round_relu6.rs
================================================
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region, Value},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression},
poly::Rotation,
};
use crate::gadgets::gadget::convert_to_u64;
use super::gadget::{Gadget, GadgetConfig, GadgetType};
type BiasDivRoundRelu6Config = GadgetConfig;
const NUM_COLS_PER_OP: usize = 5;
pub struct BiasDivRoundRelu6Chip<F: PrimeField> {
config: Rc<BiasDivRoundRelu6Config>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> BiasDivRoundRelu6Chip<F> {
pub fn construct(config: Rc<BiasDivRoundRelu6Config>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn get_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {
let div_val = scale_factor;
let mut map = HashMap::new();
for i in 0..num_rows {
let shifted = i + min_val;
let val = shifted.clamp(0, 6 * div_val as i64);
map.insert(i as i64, val);
}
map
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.complex_selector();
let sf = Expression::Constant(F::from(gadget_config.scale_factor));
let two = Expression::Constant(F::from(2));
let columns = gadget_config.columns;
let mut tables = gadget_config.tables;
let div_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];
let relu_lookup = meta.lookup_table_column();
meta.create_gate("bias_mul", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
for op_idx in 0..columns.len() / NUM_COLS_PER_OP {
let offset = op_idx * NUM_COLS_PER_OP;
let inp = meta.query_advice(columns[offset + 0], Rotation::cur());
let bias = meta.query_advice(columns[offset + 1], Rotation::cur());
let div_res = meta.query_advice(columns[offset + 2], Rotation::cur());
let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());
// ((div - bias) * 2 + mod) * sf = 2 * inp + sf
constraints.push(
s.clone()
* (two.clone() * inp + sf.clone()
- (sf.clone() * two.clone() * (div_res - bias) + mod_res)),
);
}
constraints
});
for op_idx in 0..columns.len() / NUM_COLS_PER_OP {
let offset = op_idx * NUM_COLS_PER_OP;
meta.lookup("bias_div_relu6 lookup", |meta| {
let s = meta.query_selector(selector);
let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());
// Constrains that the modulus \in [0, DIV_VAL)
// div_val - mod_res \in [0, max_val)
vec![(s.clone() * (two.clone() * sf.clone() - mod_res), div_lookup)]
});
meta.lookup("bias_div_relu6 lookup", |meta| {
let s = meta.query_selector(selector);
let div = meta.query_advice(columns[offset + 2], Rotation::cur());
let outp = meta.query_advice(columns[offset + 4], Rotation::cur());
let div_outp_min_val = gadget_config.div_outp_min_val;
let div_outp_min_val = Expression::Constant(F::from((-div_outp_min_val) as u64));
// Constrains that output \in [0, 6 * SF]
vec![
(s.clone() * (div + div_outp_min_val), div_lookup),
(s.clone() * outp, relu_lookup),
]
});
}
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::BiasDivRoundRelu6, vec![selector]);
tables.insert(GadgetType::BiasDivRoundRelu6, vec![relu_lookup]);
let mut maps = gadget_config.maps;
let relu_map = Self::get_map(
gadget_config.scale_factor,
gadget_config.min_val,
gadget_config.num_rows as i64,
);
maps.insert(GadgetType::BiasDivRoundRelu6, vec![relu_map]);
GadgetConfig {
columns,
selectors,
tables,
maps,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for BiasDivRoundRelu6Chip<F> {
fn name(&self) -> String {
"BiasDivRelu6".to_string()
}
fn num_cols_per_op(&self) -> usize {
NUM_COLS_PER_OP
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / NUM_COLS_PER_OP
}
fn num_outputs_per_row(&self) -> usize {
self.num_inputs_per_row() * 2
}
fn load_lookups(&self, mut layouter: impl Layouter<F>) -> Result<(), Error> {
let map = &self.config.maps[&GadgetType::BiasDivRoundRelu6][0];
let relu_lookup = self.config.tables[&GadgetType::BiasDivRoundRelu6][0];
layouter
.assign_table(
|| "bdr round div/relu lookup",
|mut table| {
for i in 0..self.config.num_rows {
let i = i as i64;
let val = map.get(&i).unwrap();
table
.assign_cell(
|| "relu lookup",
relu_lookup,
i as usize,
|| Value::known(F::from(*val as u64)),
)
.unwrap();
}
Ok(())
},
)
.unwrap();
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let div_val = self.config.scale_factor as i64;
let div_outp_min_val_i64 = self.config.div_outp_min_val;
let div_inp_min_val_pos_i64 = -self.config.shift_min_val;
let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);
let inp = &vec_inputs[0];
let bias = &vec_inputs[1];
assert_eq!(inp.len(), bias.len());
assert_eq!(inp.len() % self.num_inputs_per_row(), 0);
let relu_map = &self
.config
.maps
.get(&GadgetType::BiasDivRoundRelu6)
.unwrap()[0];
if self.config.use_selectors {
let selector = self
.config
.selectors
.get(&GadgetType::BiasDivRoundRelu6)
.unwrap()[0];
selector.enable(region, row_offset).unwrap();
}
let mut outp_cells = vec![];
for (i, (inp, bias)) in inp.iter().zip(bias.iter()).enumerate() {
let offset = i * NUM_COLS_PER_OP;
let inp_f = inp.value().map(|x: &F| x.to_owned());
let bias_f = bias.value().map(|x: &F| {
let a = *x + div_inp_min_val_pos;
let a = convert_to_u64(&a) as i64 - div_inp_min_val_pos_i64;
a
});
let div_mod_res = inp_f.map(|x: F| {
let x_pos = x + div_inp_min_val_pos;
let inp = convert_to_u64(&x_pos) as i64;
let div_inp = 2 * inp + div_val;
let div_res = div_inp / (2 * div_val) - div_inp_min_val_pos_i64 / div_val;
let mod_res = div_inp % (2 * div_val);
(div_res, mod_res)
});
let div_res = div_mod_res.map(|x: (i64, i64)| x.0) + bias_f;
let mod_res = div_mod_res.map(|x: (i64, i64)| x.1);
let outp = div_res.map(|x: i64| {
let mut x_pos = x - div_outp_min_val_i64;
if !relu_map.contains_key(&(x_pos)) {
println!("x: {}, x_pos: {}", x, x_pos);
x_pos = 0;
}
let outp_val = relu_map.get(&(x_pos)).unwrap();
F::from(*outp_val as u64)
});
// Assign inp, bias
inp
.copy_advice(|| "", region, self.config.columns[offset + 0], row_offset)
.unwrap();
bias
.copy_advice(|| "", region, self.config.columns[offset + 1], row_offset)
.unwrap();
// Assign div_res, mod_res
let div_res_cell = region
.assign_advice(
|| "div_res",
self.config.columns[offset + 2],
row_offset,
|| {
div_res.map(|x: i64| {
F::from((x - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64)
})
},
)
.unwrap();
let _mod_res_cell = region
.assign_advice(
|| "mod_res",
self.config.columns[offset + 3],
row_offset,
|| mod_res.map(|x: i64| F::from(x as u64)),
)
.unwrap();
let outp_cell = region
.assign_advice(
|| "outp",
self.config.columns[offset + 4],
row_offset,
|| outp.map(|x: F| x.to_owned()),
)
.unwrap();
// outp_cells.push((outp_cell, div_res_cell));
outp_cells.push(outp_cell);
outp_cells.push(div_res_cell);
}
Ok(outp_cells)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mut inps = vec_inputs[0].clone();
let mut biases = vec_inputs[1].clone();
let initial_len = inps.len();
// Needed to pad: bias - bias = 0
let default = biases[0].clone();
while inps.len() % self.num_inputs_per_row() != 0 {
inps.push(&default);
biases.push(&default);
}
let res = self
.op_aligned_rows(
layouter.namespace(|| "bias_div_relu6"),
&vec![inps, biases],
single_inputs,
)
.unwrap();
Ok(res[0..initial_len * 2].to_vec())
}
}
================================================
FILE: src/gadgets/dot_prod.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{Advice, Column, ConstraintSystem, Error, Expression},
poly::Rotation,
};
use crate::gadgets::adder::AdderChip;
use super::gadget::{Gadget, GadgetConfig, GadgetType};
type DotProductConfig = GadgetConfig;
pub struct DotProductChip<F: PrimeField> {
config: Rc<DotProductConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> DotProductChip<F> {
pub fn construct(config: Rc<DotProductConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn get_input_columns(config: &GadgetConfig) -> Vec<Column<Advice>> {
let num_inputs = (config.columns.len() - 1) / 2;
config.columns[0..num_inputs].to_vec()
}
pub fn get_weight_columns(config: &GadgetConfig) -> Vec<Column<Advice>> {
let num_inputs = (config.columns.len() - 1) / 2;
config.columns[num_inputs..config.columns.len() - 1].to_vec()
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.selector();
let columns = &gadget_config.columns;
meta.create_gate("dot product gate", |meta| {
let s = meta.query_selector(selector);
let gate_inp = DotProductChip::<F>::get_input_columns(&gadget_config)
.iter()
.map(|col| meta.query_advice(*col, Rotation::cur()))
.collect::<Vec<_>>();
let gate_weights = DotProductChip::<F>::get_weight_columns(&gadget_config)
.iter()
.map(|col| meta.query_advice(*col, Rotation::cur()))
.collect::<Vec<_>>();
let gate_output = meta.query_advice(columns[columns.len() - 1], Rotation::cur());
let res = gate_inp
.iter()
.zip(gate_weights)
.map(|(a, b)| a.clone() * b.clone())
.fold(Expression::Constant(F::ZERO), |a, b| a + b);
vec![s * (res - gate_output)]
});
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::DotProduct, vec![selector]);
GadgetConfig {
columns: gadget_config.columns,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for DotProductChip<F> {
fn name(&self) -> String {
"dot product".to_string()
}
fn num_cols_per_op(&self) -> usize {
self.config.columns.len()
}
fn num_inputs_per_row(&self) -> usize {
(self.config.columns.len() - 1) / 2
}
fn num_outputs_per_row(&self) -> usize {
1
}
// The caller is expected to pad the inputs
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
assert_eq!(vec_inputs.len(), 2);
let inp = &vec_inputs[0];
let weights = &vec_inputs[1];
assert_eq!(inp.len(), weights.len());
assert_eq!(inp.len(), self.num_inputs_per_row());
let zero = &single_inputs[0];
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::DotProduct).unwrap()[0];
selector.enable(region, row_offset).unwrap();
}
let inp_cols = DotProductChip::<F>::get_input_columns(&self.config);
inp
.iter()
.enumerate()
.map(|(i, cell)| cell.copy_advice(|| "", region, inp_cols[i], row_offset))
.collect::<Result<Vec<_>, _>>()
.unwrap();
let weight_cols = DotProductChip::<F>::get_weight_columns(&self.config);
weights
.iter()
.enumerate()
.map(|(i, cell)| cell.copy_advice(|| "", region, weight_cols[i], row_offset))
.collect::<Result<Vec<_>, _>>()
.unwrap();
// All columns need to be assigned
if self.config.columns.len() % 2 == 0 {
zero
.copy_advice(
|| "",
region,
self.config.columns[self.config.columns.len() - 2],
row_offset,
)
.unwrap();
}
let e = inp
.iter()
.zip(weights.iter())
.map(|(a, b)| a.value().map(|x: &F| *x) * b.value())
.reduce(|a, b| a + b)
.unwrap();
let res = region
.assign_advice(
|| "",
self.config.columns[self.config.columns.len() - 1],
row_offset,
|| e,
)
.unwrap();
Ok(vec![res])
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
assert_eq!(vec_inputs.len(), 2);
assert_eq!(single_inputs.len(), 1);
let zero = &single_inputs[0];
let mut inputs = vec_inputs[0].clone();
let mut weights = vec_inputs[1].clone();
while inputs.len() % self.num_inputs_per_row() != 0 {
inputs.push(&zero);
weights.push(&zero);
}
let outputs = layouter
.assign_region(
|| "dot prod rows",
|mut region| {
let mut outputs = vec![];
for i in 0..inputs.len() / self.num_inputs_per_row() {
let inp =
inputs[i * self.num_inputs_per_row()..(i + 1) * self.num_inputs_per_row()].to_vec();
let weights =
weights[i * self.num_inputs_per_row()..(i + 1) * self.num_inputs_per_row()].to_vec();
let res = self
.op_row_region(&mut region, i, &vec![inp, weights], &vec![zero.clone()])
.unwrap();
outputs.push(res[0].clone());
}
Ok(outputs)
},
)
.unwrap();
let adder_chip = AdderChip::<F>::construct(self.config.clone());
let tmp = outputs.iter().map(|x| x).collect::<Vec<_>>();
Ok(
adder_chip
.forward(
layouter.namespace(|| "dot prod adder"),
&vec![tmp],
single_inputs,
)
.unwrap(),
)
}
}
================================================
FILE: src/gadgets/gadget.rs
================================================
use std::{
collections::{BTreeSet, HashMap},
sync::Arc,
};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::group::ff::PrimeField,
plonk::{Advice, Column, Error, Fixed, Selector, TableColumn},
};
use num_bigint::{BigUint, ToBigUint};
use num_traits::cast::ToPrimitive;
#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub enum GadgetType {
AddPairs,
Adder,
BiasDivRoundRelu6,
BiasDivFloorRelu6,
DotProduct,
Exp,
Logistic,
Max,
Pow,
Relu,
Rsqrt,
Sqrt,
SqrtBig,
Square,
SquaredDiff,
SubPairs,
Tanh,
MulPairs,
VarDivRound,
VarDivRoundBig,
VarDivRoundBig3,
Packer, // This is a special case
InputLookup, // Dummy placeholder for the input lookup
Update,
}
#[derive(Clone, Debug, Default)]
pub struct GadgetConfig {
pub used_gadgets: Arc<BTreeSet<GadgetType>>,
pub columns: Vec<Column<Advice>>,
pub fixed_columns: Vec<Column<Fixed>>,
pub selectors: HashMap<GadgetType, Vec<Selector>>,
pub tables: HashMap<GadgetType, Vec<TableColumn>>,
pub maps: HashMap<GadgetType, Vec<HashMap<i64, i64>>>,
pub scale_factor: u64,
pub shift_min_val: i64, // MUST be divisible by 2 * scale_factor
pub num_rows: usize,
pub num_cols: usize,
pub k: usize,
pub eta: f64,
pub min_val: i64,
pub max_val: i64,
pub div_outp_min_val: i64,
pub use_selectors: bool,
pub commit_before: Vec<Vec<i64>>,
pub commit_after: Vec<Vec<i64>>,
pub num_bits_per_elem: i64,
}
// TODO: refactor
pub fn convert_to_u64<F: PrimeField>(x: &F) -> u64 {
let big = BigUint::from_bytes_le(x.to_repr().as_ref());
let big_digits = big.to_u64_digits();
if big_digits.len() > 2 {
println!("big_digits: {:?}", big_digits);
}
if big_digits.len() == 1 {
big_digits[0] as u64
} else if big_digits.len() == 0 {
0
} else {
panic!();
}
}
pub fn convert_to_u128<F: PrimeField>(x: &F) -> u128 {
let big = BigUint::from_bytes_le(x.to_repr().as_ref());
big.to_biguint().unwrap().to_u128().unwrap()
}
pub trait Gadget<F: PrimeField> {
fn name(&self) -> String;
fn num_cols_per_op(&self) -> usize;
fn num_inputs_per_row(&self) -> usize;
fn num_outputs_per_row(&self) -> usize;
fn load_lookups(&self, _layouter: impl Layouter<F>) -> Result<(), Error> {
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error>;
// The caller is required to ensure that the inputs are of the correct length.
fn op_aligned_rows(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
// Sanity check inputs
for inp in vec_inputs.iter() {
assert_eq!(inp.len() % self.num_inputs_per_row(), 0);
}
let outputs = layouter.assign_region(
|| format!("gadget {}", self.name()),
|mut region| {
let mut outputs = vec![];
for i in 0..vec_inputs[0].len() / self.num_inputs_per_row() {
let mut vec_inputs_row = vec![];
for inp in vec_inputs.iter() {
vec_inputs_row.push(
inp[i * self.num_inputs_per_row()..(i + 1) * self.num_inputs_per_row()].to_vec(),
);
}
let row_outputs = self.op_row_region(&mut region, i, &vec_inputs_row, &single_inputs)?;
assert_eq!(row_outputs.len(), self.num_outputs_per_row());
outputs.extend(row_outputs);
}
Ok(outputs)
},
)?;
Ok(outputs)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
self.op_aligned_rows(
layouter.namespace(|| format!("forward row {}", self.name())),
vec_inputs,
single_inputs,
)
}
}
================================================
FILE: src/gadgets/input_lookup.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region, Value},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
};
use super::gadget::{Gadget, GadgetConfig, GadgetType};
pub struct InputLookupChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> InputLookupChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let lookup = meta.lookup_table_column();
let mut tables = gadget_config.tables;
tables.insert(GadgetType::InputLookup, vec![lookup]);
GadgetConfig {
tables,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for InputLookupChip<F> {
fn load_lookups(&self, mut layouter: impl Layouter<F>) -> Result<(), Error> {
let lookup = self.config.tables[&GadgetType::InputLookup][0];
layouter
.assign_table(
|| "input lookup",
|mut table| {
for i in 0..self.config.num_rows as i64 {
table
.assign_cell(
|| "mod lookup",
lookup,
i as usize,
|| Value::known(F::from(i as u64)),
)
.unwrap();
}
Ok(())
},
)
.unwrap();
Ok(())
}
fn name(&self) -> String {
panic!("InputLookupChip should not be called directly")
}
fn num_cols_per_op(&self) -> usize {
panic!("InputLookupChip should not be called directly")
}
fn num_inputs_per_row(&self) -> usize {
panic!("InputLookupChip should not be called directly")
}
fn num_outputs_per_row(&self) -> usize {
panic!("InputLookupChip should not be called directly")
}
fn op_row_region(
&self,
_region: &mut Region<F>,
_row_offset: usize,
_vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
panic!("InputLookupChip should not be called directly")
}
}
================================================
FILE: src/gadgets/max.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
poly::Rotation,
};
use crate::gadgets::gadget::convert_to_u64;
use super::gadget::{Gadget, GadgetConfig, GadgetType};
pub struct MaxChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> MaxChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
3
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.complex_selector();
let columns = gadget_config.columns;
let tables = gadget_config.tables;
let inp_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];
meta.create_gate("max arithmetic", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
for i in 0..columns.len() / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());
let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());
let outp = meta.query_advice(columns[offset + 2], Rotation::cur());
constraints.push(s.clone() * (inp1 - outp.clone()) * (inp2 - outp))
}
constraints
});
for idx in 0..columns.len() / Self::num_cols_per_op() {
meta.lookup("max inp1", |meta| {
let s = meta.query_selector(selector);
let offset = idx * Self::num_cols_per_op();
let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());
let outp = meta.query_advice(columns[offset + 2], Rotation::cur());
vec![(s * (outp - inp1), inp_lookup)]
});
meta.lookup("max inp2", |meta| {
let s = meta.query_selector(selector);
let offset = idx * Self::num_cols_per_op();
let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());
let outp = meta.query_advice(columns[offset + 2], Rotation::cur());
vec![(s * (outp - inp2), inp_lookup)]
});
}
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::Max, vec![selector]);
GadgetConfig {
columns,
selectors,
tables,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for MaxChip<F> {
fn name(&self) -> String {
"max".to_string()
}
fn num_cols_per_op(&self) -> usize {
3
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op() * 2
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
assert_eq!(vec_inputs.len(), 1);
let inp = &vec_inputs[0];
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::Max).unwrap()[0];
selector.enable(region, row_offset)?;
}
let min_val_pos = F::from((-self.config.shift_min_val) as u64);
let mut outp = vec![];
let chunks: Vec<&[&AssignedCell<F, F>]> = inp.chunks(self.num_outputs_per_row()).collect();
let i1 = chunks[0];
let i2 = chunks[1];
for (idx, (inp1, inp2)) in i1.iter().zip(i2.iter()).enumerate() {
let offset = idx * self.num_cols_per_op();
inp1
.copy_advice(|| "", region, self.config.columns[offset + 0], row_offset)
.unwrap();
inp2
.copy_advice(|| "", region, self.config.columns[offset + 1], row_offset)
.unwrap();
let max = inp1.value().zip(inp2.value()).map(|(a, b)| {
let a = convert_to_u64(&(*a + min_val_pos));
let b = convert_to_u64(&(*b + min_val_pos));
let max = a.max(b);
let max = F::from(max) - min_val_pos;
max
});
let res = region
.assign_advice(|| "", self.config.columns[offset + 2], row_offset, || max)
.unwrap();
outp.push(res);
}
Ok(outp)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mut inputs = vec_inputs[0].clone();
let first = inputs[0];
while inputs.len() % self.num_inputs_per_row() != 0 {
inputs.push(first);
}
// TODO: pretty sure this is correct but check
let num_iters = inputs.len().div_ceil(self.num_inputs_per_row()) + self.num_inputs_per_row();
let mut outputs = self.op_aligned_rows(
layouter.namespace(|| "max forward"),
&vec![inputs],
single_inputs,
)?;
for _ in 0..num_iters {
while outputs.len() % self.num_inputs_per_row() != 0 {
outputs.push(first.clone());
}
let tmp = outputs.iter().map(|x| x).collect::<Vec<_>>();
outputs = self.op_aligned_rows(
layouter.namespace(|| "max forward"),
&vec![tmp],
single_inputs,
)?;
}
outputs = vec![outputs.into_iter().next().unwrap()];
Ok(outputs)
}
}
================================================
FILE: src/gadgets/mul_pairs.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
poly::Rotation,
};
use super::gadget::{Gadget, GadgetConfig, GadgetType};
type MulPairsConfig = GadgetConfig;
pub struct MulPairsChip<F: PrimeField> {
config: Rc<MulPairsConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> MulPairsChip<F> {
pub fn construct(config: Rc<MulPairsConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
3
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.selector();
let columns = gadget_config.columns;
meta.create_gate("mul pair", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
for i in 0..columns.len() / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());
let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());
let outp = meta.query_advice(columns[offset + 2], Rotation::cur());
let res = inp1 * inp2;
constraints.append(&mut vec![s.clone() * (res - outp)])
}
constraints
});
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::MulPairs, vec![selector]);
GadgetConfig {
columns,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for MulPairsChip<F> {
fn name(&self) -> String {
"MulPairs".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
// TODO: This + below is basically copied from add pairs - make arithmetic generic
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let inp1 = &vec_inputs[0];
let inp2 = &vec_inputs[1];
assert_eq!(inp1.len(), inp2.len());
let columns = &self.config.columns;
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::MulPairs).unwrap()[0];
selector.enable(region, row_offset)?;
}
let mut outps = vec![];
for i in 0..inp1.len() {
let offset = i * self.num_cols_per_op();
let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?;
let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?;
let outp = inp1.value().map(|x: &F| x.to_owned()) * inp2.value().map(|x: &F| x.to_owned());
let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?;
outps.push(outp);
}
Ok(outps)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let zero = &single_inputs[0];
let mut inp1 = vec_inputs[0].clone();
let mut inp2 = vec_inputs[1].clone();
let initial_len = inp1.len();
while inp1.len() % self.num_inputs_per_row() != 0 {
inp1.push(zero);
inp2.push(zero);
}
let vec_inputs = vec![inp1, inp2];
let res = self.op_aligned_rows(
layouter.namespace(|| format!("forward row {}", self.name())),
&vec_inputs,
single_inputs,
)?;
Ok(res[0..initial_len].to_vec())
}
}
================================================
FILE: src/gadgets/nonlinear/exp.rs
================================================
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
};
use super::{
super::gadget::{Gadget, GadgetConfig, GadgetType},
non_linearity::NonLinearGadget,
};
type ExpGadgetConfig = GadgetConfig;
// IMPORTANT: this return exp(x) * SF
pub struct ExpGadgetChip<F: PrimeField> {
config: Rc<ExpGadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> ExpGadgetChip<F> {
pub fn construct(config: Rc<ExpGadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
<ExpGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Exp)
}
}
impl<F: PrimeField> NonLinearGadget<F> for ExpGadgetChip<F> {
fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {
let mut map = HashMap::new();
for i in 0..num_rows {
let shifted = i + min_val;
let x = (shifted as f64) / (scale_factor as f64);
let exp = x.exp();
let exp = (exp * ((scale_factor * scale_factor) as f64)).round() as i64;
map.insert(i as i64, exp);
}
map
}
fn get_map(&self) -> &HashMap<i64, i64> {
&self.config.maps.get(&GadgetType::Exp).unwrap()[0]
}
fn get_selector(&self) -> halo2_proofs::plonk::Selector {
self.config.selectors.get(&GadgetType::Exp).unwrap()[0]
}
}
impl<F: PrimeField> Gadget<F> for ExpGadgetChip<F> {
fn name(&self) -> String {
"Exp".to_string()
}
fn num_cols_per_op(&self) -> usize {
<ExpGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Exp)?;
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::op_row_region(
self,
region,
row_offset,
vec_inputs,
single_inputs,
self.config.clone(),
)
}
fn forward(
&self,
layouter: impl halo2_proofs::circuit::Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)
}
}
================================================
FILE: src/gadgets/nonlinear/logistic.rs
================================================
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
};
use super::{
super::gadget::{Gadget, GadgetConfig, GadgetType},
non_linearity::NonLinearGadget,
};
pub struct LogisticGadgetChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> LogisticGadgetChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
<LogisticGadgetChip<F> as NonLinearGadget<F>>::configure(
meta,
gadget_config,
GadgetType::Logistic,
)
}
}
impl<F: PrimeField> NonLinearGadget<F> for LogisticGadgetChip<F> {
fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {
let mut map = HashMap::new();
for i in 0..num_rows {
let shifted = i + min_val;
let x = (shifted as f64) / (scale_factor as f64);
let logistic = 1. / (1. + (-x).exp());
let logistic = (logistic * ((scale_factor) as f64)).round() as i64;
map.insert(i as i64, logistic);
}
map
}
fn get_map(&self) -> &HashMap<i64, i64> {
&self.config.maps.get(&GadgetType::Logistic).unwrap()[0]
}
fn get_selector(&self) -> halo2_proofs::plonk::Selector {
self.config.selectors.get(&GadgetType::Logistic).unwrap()[0]
}
}
impl<F: PrimeField> Gadget<F> for LogisticGadgetChip<F> {
fn name(&self) -> String {
"LogisticChip".to_string()
}
fn num_cols_per_op(&self) -> usize {
<LogisticGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Logistic)?;
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::op_row_region(
self,
region,
row_offset,
vec_inputs,
single_inputs,
self.config.clone(),
)
}
fn forward(
&self,
layouter: impl halo2_proofs::circuit::Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)
}
}
================================================
FILE: src/gadgets/nonlinear/non_linearity.rs
================================================
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region, Value},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression, Selector},
poly::Rotation,
};
use crate::gadgets::gadget::convert_to_u128;
use super::super::gadget::Gadget;
use super::super::gadget::{GadgetConfig, GadgetType};
const NUM_COLS_PER_OP: usize = 2;
pub trait NonLinearGadget<F: PrimeField>: Gadget<F> {
fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64>;
fn get_map(&self) -> &HashMap<i64, i64>;
fn get_selector(&self) -> Selector;
fn num_cols_per_op() -> usize {
NUM_COLS_PER_OP
}
fn configure(
meta: &mut ConstraintSystem<F>,
gadget_config: GadgetConfig,
gadget_type: GadgetType,
) -> GadgetConfig {
let selector = meta.complex_selector();
let columns = gadget_config.columns;
let mut tables = gadget_config.tables;
let inp_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];
let outp_lookup = meta.lookup_table_column();
for op_idx in 0..columns.len() / NUM_COLS_PER_OP {
let offset = op_idx * NUM_COLS_PER_OP;
meta.lookup("non-linear lookup", |meta| {
let s = meta.query_selector(selector);
let inp = meta.query_advice(columns[offset + 0], Rotation::cur());
let outp = meta.query_advice(columns[offset + 1], Rotation::cur());
let shift_val = gadget_config.min_val;
let shift_val_pos = Expression::Constant(F::from((-shift_val) as u64));
vec![
(s.clone() * (inp + shift_val_pos), inp_lookup),
(s.clone() * outp, outp_lookup),
]
});
}
let mut selectors = gadget_config.selectors;
selectors.insert(gadget_type, vec![selector]);
tables.insert(gadget_type, vec![inp_lookup, outp_lookup]);
let mut maps = gadget_config.maps;
let non_linear_map = Self::generate_map(
gadget_config.scale_factor,
gadget_config.min_val,
gadget_config.num_rows as i64,
);
maps.insert(gadget_type, vec![non_linear_map]);
GadgetConfig {
columns,
selectors,
tables,
maps,
..gadget_config
}
}
fn load_lookups(
&self,
mut layouter: impl Layouter<F>,
config: Rc<GadgetConfig>,
gadget_type: GadgetType,
) -> Result<(), Error> {
let map = self.get_map();
let table_col = config.tables.get(&gadget_type).unwrap()[1];
let shift_pos_i64 = -config.shift_min_val;
let shift_pos = F::from(shift_pos_i64 as u64);
layouter.assign_table(
|| "non linear table",
|mut table| {
for i in 0..config.num_rows {
let i = i as i64;
// FIXME: refactor this
let tmp = *map.get(&i).unwrap();
let val = if i == 0 {
F::ZERO
} else {
if tmp >= 0 {
F::from(tmp as u64)
} else {
let tmp = tmp + shift_pos_i64;
F::from(tmp as u64) - shift_pos
}
};
table.assign_cell(
|| "non linear cell",
table_col,
i as usize,
|| Value::known(val),
)?;
}
Ok(())
},
)?;
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
gadget_config: Rc<GadgetConfig>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let columns = &gadget_config.columns;
let inp = &vec_inputs[0];
let map = self.get_map();
let shift_val_pos_i64 = -gadget_config.shift_min_val;
let shift_val_pos = F::from(shift_val_pos_i64 as u64);
let min_val = gadget_config.min_val;
if gadget_config.use_selectors {
let selector = self.get_selector();
selector.enable(region, row_offset)?;
}
let mut outps = vec![];
for i in 0..inp.len() {
let offset = i * 2;
inp[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?;
let outp = inp[i].value().map(|x: &F| {
let pos = convert_to_u128(&(*x + shift_val_pos)) as i128 - shift_val_pos_i64 as i128;
let x = pos as i64 - min_val;
let val = *map.get(&x).unwrap();
if x == 0 {
F::ZERO
} else {
if val >= 0 {
F::from(val as u64)
} else {
let val_pos = val + shift_val_pos_i64;
F::from(val_pos as u64) - F::from(shift_val_pos_i64 as u64)
}
}
});
let outp =
region.assign_advice(|| "nonlinearity", columns[offset + 1], row_offset, || outp)?;
outps.push(outp);
}
Ok(outps)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let zero = &single_inputs[0];
let inp_len = vec_inputs[0].len();
let mut inp = vec_inputs[0].clone();
while inp.len() % self.num_inputs_per_row() != 0 {
inp.push(zero);
}
let vec_inputs = vec![inp];
let outp = self.op_aligned_rows(
layouter.namespace(|| format!("forward row {}", self.name())),
&vec_inputs,
&single_inputs,
)?;
Ok(outp[0..inp_len].to_vec())
}
}
================================================
FILE: src/gadgets/nonlinear/pow.rs
================================================
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
};
use super::{
super::gadget::{Gadget, GadgetConfig, GadgetType},
non_linearity::NonLinearGadget,
};
// IMPORTANT: PowGadget assumes a single power across the entire DAG
pub struct PowGadgetChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> PowGadgetChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
<PowGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Pow)
}
}
impl<F: PrimeField> NonLinearGadget<F> for PowGadgetChip<F> {
fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {
let power = 3.; // FIXME: need to make this variable somehow...
let mut map = HashMap::new();
for i in 0..num_rows {
let shifted = i + min_val;
let x = (shifted as f64) / (scale_factor as f64);
let y = x.powf(power);
let y = (y * ((scale_factor) as f64)).round() as i64;
map.insert(i as i64, y);
}
map
}
fn get_map(&self) -> &HashMap<i64, i64> {
&self.config.maps.get(&GadgetType::Pow).unwrap()[0]
}
fn get_selector(&self) -> halo2_proofs::plonk::Selector {
self.config.selectors.get(&GadgetType::Pow).unwrap()[0]
}
}
impl<F: PrimeField> Gadget<F> for PowGadgetChip<F> {
fn name(&self) -> String {
"PowGadgetChip".to_string()
}
fn num_cols_per_op(&self) -> usize {
<PowGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Pow)?;
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::op_row_region(
self,
region,
row_offset,
vec_inputs,
single_inputs,
self.config.clone(),
)
}
fn forward(
&self,
layouter: impl halo2_proofs::circuit::Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)
}
}
================================================
FILE: src/gadgets/nonlinear/relu.rs
================================================
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
};
use super::{
super::gadget::{Gadget, GadgetConfig, GadgetType},
non_linearity::NonLinearGadget,
};
pub struct ReluChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> ReluChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
<ReluChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Relu)
}
}
impl<F: PrimeField> NonLinearGadget<F> for ReluChip<F> {
fn generate_map(_scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {
let mut map = HashMap::new();
for i in 0..num_rows {
let shifted = i + min_val;
let relu = shifted.max(0);
map.insert(i as i64, relu);
}
map
}
fn get_map(&self) -> &HashMap<i64, i64> {
&self.config.maps.get(&GadgetType::Relu).unwrap()[0]
}
fn get_selector(&self) -> halo2_proofs::plonk::Selector {
self.config.selectors.get(&GadgetType::Relu).unwrap()[0]
}
}
impl<F: PrimeField> Gadget<F> for ReluChip<F> {
fn name(&self) -> String {
"Relu".to_string()
}
fn num_cols_per_op(&self) -> usize {
<ReluChip<F> as NonLinearGadget<F>>::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Relu)?;
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::op_row_region(
self,
region,
row_offset,
vec_inputs,
single_inputs,
self.config.clone(),
)
}
fn forward(
&self,
layouter: impl halo2_proofs::circuit::Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)
}
}
================================================
FILE: src/gadgets/nonlinear/rsqrt.rs
================================================
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
};
use super::{
super::gadget::{Gadget, GadgetConfig, GadgetType},
non_linearity::NonLinearGadget,
};
pub struct RsqrtGadgetChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> RsqrtGadgetChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
<RsqrtGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Rsqrt)
}
}
impl<F: PrimeField> NonLinearGadget<F> for RsqrtGadgetChip<F> {
fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {
let mut map = HashMap::new();
for i in 0..num_rows {
let shifted = i + min_val;
let x = (shifted as f64) / (scale_factor as f64);
let sqrt = x.sqrt();
let rsqrt = 1.0 / sqrt;
let rsqrt = (rsqrt * (scale_factor as f64)).round() as i64;
map.insert(i as i64, rsqrt);
}
map
}
fn get_map(&self) -> &HashMap<i64, i64> {
&self.config.maps.get(&GadgetType::Rsqrt).unwrap()[0]
}
fn get_selector(&self) -> halo2_proofs::plonk::Selector {
self.config.selectors.get(&GadgetType::Rsqrt).unwrap()[0]
}
}
impl<F: PrimeField> Gadget<F> for RsqrtGadgetChip<F> {
fn name(&self) -> String {
"RsqrtGadget".to_string()
}
fn num_cols_per_op(&self) -> usize {
<RsqrtGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Rsqrt)?;
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::op_row_region(
self,
region,
row_offset,
vec_inputs,
single_inputs,
self.config.clone(),
)
}
fn forward(
&self,
layouter: impl halo2_proofs::circuit::Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)
}
}
================================================
FILE: src/gadgets/nonlinear/sqrt.rs
================================================
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
};
use super::{
super::gadget::{Gadget, GadgetConfig, GadgetType},
non_linearity::NonLinearGadget,
};
pub struct SqrtGadgetChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> SqrtGadgetChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
<SqrtGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Sqrt)
}
}
impl<F: PrimeField> NonLinearGadget<F> for SqrtGadgetChip<F> {
fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {
let mut map = HashMap::new();
for i in 0..num_rows {
let shifted = i + min_val;
let x = (shifted as f64) / (scale_factor as f64);
let sqrt = x.sqrt();
let sqrt = (sqrt * (scale_factor as f64)).round() as i64;
map.insert(i as i64, sqrt);
}
map
}
fn get_map(&self) -> &HashMap<i64, i64> {
&self.config.maps.get(&GadgetType::Sqrt).unwrap()[0]
}
fn get_selector(&self) -> halo2_proofs::plonk::Selector {
self.config.selectors.get(&GadgetType::Sqrt).unwrap()[0]
}
}
impl<F: PrimeField> Gadget<F> for SqrtGadgetChip<F> {
fn name(&self) -> String {
"SqrtGadget".to_string()
}
fn num_cols_per_op(&self) -> usize {
<SqrtGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Sqrt)?;
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::op_row_region(
self,
region,
row_offset,
vec_inputs,
single_inputs,
self.config.clone(),
)
}
fn forward(
&self,
layouter: impl halo2_proofs::circuit::Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)
}
}
================================================
FILE: src/gadgets/nonlinear/tanh.rs
================================================
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
};
use super::{
super::gadget::{Gadget, GadgetConfig, GadgetType},
non_linearity::NonLinearGadget,
};
pub struct TanhGadgetChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> TanhGadgetChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
<TanhGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Tanh)
}
}
impl<F: PrimeField> NonLinearGadget<F> for TanhGadgetChip<F> {
fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {
let scale_factor = scale_factor as f64;
let mut map = HashMap::new();
for i in 0..num_rows {
let shifted = i + min_val;
let x = (shifted as f64) / scale_factor;
let y = x.tanh();
let y = (y * scale_factor).round() as i64;
map.insert(i as i64, y);
}
map
}
fn get_map(&self) -> &HashMap<i64, i64> {
&self.config.maps.get(&GadgetType::Tanh).unwrap()[0]
}
fn get_selector(&self) -> halo2_proofs::plonk::Selector {
self.config.selectors.get(&GadgetType::Tanh).unwrap()[0]
}
}
impl<F: PrimeField> Gadget<F> for TanhGadgetChip<F> {
fn name(&self) -> String {
"TanhGadgetChip".to_string()
}
fn num_cols_per_op(&self) -> usize {
<TanhGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Tanh)?;
Ok(())
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::op_row_region(
self,
region,
row_offset,
vec_inputs,
single_inputs,
self.config.clone(),
)
}
fn forward(
&self,
layouter: impl halo2_proofs::circuit::Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)
}
}
================================================
FILE: src/gadgets/nonlinear.rs
================================================
pub mod exp;
pub mod logistic;
pub mod non_linearity;
pub mod pow;
pub mod relu;
pub mod rsqrt;
pub mod sqrt;
pub mod tanh;
================================================
FILE: src/gadgets/sqrt_big.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression},
poly::Rotation,
};
use crate::gadgets::gadget::convert_to_u64;
use super::gadget::{Gadget, GadgetConfig, GadgetType};
type SqrtBigConfig = GadgetConfig;
pub struct SqrtBigChip<F: PrimeField> {
config: Rc<SqrtBigConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> SqrtBigChip<F> {
pub fn construct(config: Rc<SqrtBigConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
3
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.complex_selector();
let two = Expression::Constant(F::from(2));
let columns = gadget_config.columns;
let tables = gadget_config.tables;
let inp_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];
// TODO: prove that these constraints work
meta.create_gate("sqrt_big arithm", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
for op_idx in 0..columns.len() / Self::num_cols_per_op() {
let offset = op_idx * Self::num_cols_per_op();
let inp = meta.query_advice(columns[offset + 0], Rotation::cur());
let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur());
let rem = meta.query_advice(columns[offset + 2], Rotation::cur());
let lhs = inp.clone();
let rhs = sqrt.clone() * sqrt.clone() + rem.clone();
constraints.push(s.clone() * (lhs - rhs));
}
constraints
});
for op_idx in 0..columns.len() / Self::num_cols_per_op() {
let offset = op_idx * Self::num_cols_per_op();
meta.lookup("sqrt_big sqrt lookup", |meta| {
let s = meta.query_selector(selector);
let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur());
vec![(s.clone() * sqrt, inp_lookup)]
});
meta.lookup("sqrt_big rem lookup", |meta| {
let s = meta.query_selector(selector);
let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur());
let rem = meta.query_advice(columns[offset + 2], Rotation::cur());
vec![(s.clone() * (rem + sqrt), inp_lookup)]
});
meta.lookup("sqrt_big sqrt - rem lookup", |meta| {
let s = meta.query_selector(selector);
let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur());
let rem = meta.query_advice(columns[offset + 2], Rotation::cur());
vec![(s.clone() * (two.clone() * sqrt - rem), inp_lookup)]
});
}
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::SqrtBig, vec![selector]);
GadgetConfig {
columns,
tables,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for SqrtBigChip<F> {
fn name(&self) -> String {
"sqrt_big".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.num_inputs_per_row()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let inps = &vec_inputs[0];
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::SqrtBig).unwrap()[0];
selector.enable(region, row_offset)?;
}
let mut outp_cells = vec![];
for (i, inp) in inps.iter().enumerate() {
let offset = i * self.num_cols_per_op();
inp.copy_advice(
|| "sqrt_big",
region,
self.config.columns[offset],
row_offset,
)?;
let outp = inp.value().map(|x: &F| {
let inp_val = convert_to_u64(x) as i64;
let fsqrt = (inp_val as f64).sqrt();
let sqrt = fsqrt.round() as i64;
let rem = inp_val - sqrt * sqrt;
(sqrt, rem)
});
let sqrt_cell = region.assign_advice(
|| "sqrt_big",
self.config.columns[offset + 1],
row_offset,
|| outp.map(|x| F::from(x.0 as u64)),
)?;
let _rem_cell = region.assign_advice(
|| "sqrt_big",
self.config.columns[offset + 2],
row_offset,
|| {
outp.map(|x| {
let rem_pos = x.1 + x.0;
F::from(rem_pos as u64) - F::from(x.0 as u64)
})
},
)?;
outp_cells.push(sqrt_cell);
}
Ok(outp_cells)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let zero = &single_inputs[0];
let mut inp = vec_inputs[0].clone();
let inp_len = inp.len();
while inp.len() % self.num_inputs_per_row() != 0 {
inp.push(zero);
}
let vec_inputs = vec![inp];
let outp = self.op_aligned_rows(
layouter.namespace(|| format!("forward row {}", self.name())),
&vec_inputs,
single_inputs,
)?;
Ok(outp[0..inp_len].to_vec())
}
}
================================================
FILE: src/gadgets/square.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
poly::Rotation,
};
use super::gadget::{Gadget, GadgetConfig, GadgetType};
pub struct SquareGadgetChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> SquareGadgetChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
// TODO: it would be more efficient to do the division here directly
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.selector();
let columns = gadget_config.columns;
meta.create_gate("square gate", |meta| {
let s = meta.query_selector(selector);
let gate_inp = meta.query_advice(columns[0], Rotation::cur());
let gate_output = meta.query_advice(columns[1], Rotation::cur());
let res = gate_inp.clone() * gate_inp;
vec![s * (res - gate_output)]
});
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::Square, vec![selector]);
GadgetConfig {
columns,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for SquareGadgetChip<F> {
fn name(&self) -> String {
"SquareChip".to_string()
}
fn num_cols_per_op(&self) -> usize {
2
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.num_inputs_per_row()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
assert_eq!(vec_inputs.len(), 1);
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::Square).unwrap()[0];
selector.enable(region, row_offset)?;
}
let inps = &vec_inputs[0];
let mut outp = vec![];
for (i, inp) in inps.iter().enumerate() {
let offset = i * self.num_cols_per_op();
inp.copy_advice(|| "", region, self.config.columns[offset], row_offset)?;
let outp_val = inp.value().map(|x: &F| x.to_owned() * x.to_owned());
let outp_cell = region.assign_advice(
|| "square output",
self.config.columns[offset + 1],
row_offset,
|| outp_val,
)?;
outp.push(outp_cell);
}
Ok(outp)
}
fn forward(
&self,
mut layouter: impl halo2_proofs::circuit::Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let zero = &single_inputs[0];
let mut inp = vec_inputs[0].clone();
let initial_len = inp.len();
while inp.len() % self.num_inputs_per_row() != 0 {
inp.push(zero);
}
let vec_inputs = vec![inp];
let res = self.op_aligned_rows(
layouter.namespace(|| format!("forward row {}", self.name())),
&vec_inputs,
single_inputs,
)?;
Ok(res[0..initial_len].to_vec())
}
}
================================================
FILE: src/gadgets/squared_diff.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
poly::Rotation,
};
use super::gadget::{Gadget, GadgetConfig, GadgetType};
type SquaredDiffConfig = GadgetConfig;
pub struct SquaredDiffGadgetChip<F: PrimeField> {
config: Rc<SquaredDiffConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> SquaredDiffGadgetChip<F> {
pub fn construct(config: Rc<SquaredDiffConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
3
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.selector();
let columns = gadget_config.columns;
meta.create_gate("squared diff", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
for i in 0..columns.len() / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());
let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());
let outp = meta.query_advice(columns[offset + 2], Rotation::cur());
let res = (inp1 - inp2).square();
constraints.append(&mut vec![s.clone() * (res - outp)])
}
constraints
});
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::SquaredDiff, vec![selector]);
GadgetConfig {
columns,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for SquaredDiffGadgetChip<F> {
fn name(&self) -> String {
"SquaredDiff".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let inp1 = &vec_inputs[0];
let inp2 = &vec_inputs[1];
assert_eq!(inp1.len(), inp2.len());
let columns = &self.config.columns;
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::SquaredDiff).unwrap()[0];
selector.enable(region, row_offset)?;
}
let mut outps = vec![];
for i in 0..inp1.len() {
let offset = i * self.num_cols_per_op();
let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?;
let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?;
let outp = inp1.value().map(|x: &F| x.to_owned()) - inp2.value().map(|x: &F| x.to_owned());
let outp = outp * outp;
let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?;
outps.push(outp);
}
Ok(outps)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let zero = &single_inputs[0];
let mut inp1 = vec_inputs[0].clone();
let mut inp2 = vec_inputs[1].clone();
let initial_len = inp1.len();
while inp1.len() % self.num_inputs_per_row() != 0 {
inp1.push(zero);
inp2.push(zero);
}
let vec_inputs = vec![inp1, inp2];
let res = self.op_aligned_rows(
layouter.namespace(|| format!("forward row {}", self.name())),
&vec_inputs,
single_inputs,
)?;
Ok(res[0..initial_len].to_vec())
}
}
================================================
FILE: src/gadgets/sub_pairs.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error},
poly::Rotation,
};
use super::gadget::{Gadget, GadgetConfig, GadgetType};
type SubPairsConfig = GadgetConfig;
pub struct SubPairsChip<F: PrimeField> {
config: Rc<SubPairsConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> SubPairsChip<F> {
pub fn construct(config: Rc<SubPairsConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
3
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let selector = meta.selector();
let columns = gadget_config.columns;
meta.create_gate("sub pair", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
for i in 0..columns.len() / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());
let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());
let outp = meta.query_advice(columns[offset + 2], Rotation::cur());
let res = inp1 - inp2;
constraints.append(&mut vec![s.clone() * (res - outp)])
}
constraints
});
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::SubPairs, vec![selector]);
GadgetConfig {
columns,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for SubPairsChip<F> {
fn name(&self) -> String {
"sub pairs chip".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let inp1 = &vec_inputs[0];
let inp2 = &vec_inputs[1];
assert_eq!(inp1.len(), inp2.len());
let columns = &self.config.columns;
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::SubPairs).unwrap()[0];
selector.enable(region, row_offset)?;
}
let mut outps = vec![];
for i in 0..inp1.len() {
let offset = i * self.num_cols_per_op();
let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?;
let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?;
let outp = inp1.value().map(|x: &F| x.to_owned()) - inp2.value().map(|x: &F| x.to_owned());
let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?;
outps.push(outp);
}
Ok(outps)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let zero = &single_inputs[0];
let mut inp1 = vec_inputs[0].clone();
let mut inp2 = vec_inputs[1].clone();
let initial_len = inp1.len();
while inp1.len() % self.num_inputs_per_row() != 0 {
inp1.push(zero);
inp2.push(zero);
}
let vec_inputs = vec![inp1, inp2];
let res = self.op_aligned_rows(
layouter.namespace(|| format!("forward row {}", self.name())),
&vec_inputs,
single_inputs,
)?;
Ok(res[0..initial_len].to_vec())
}
}
================================================
FILE: src/gadgets/update.rs
================================================
use std::marker::PhantomData;
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression},
poly::Rotation,
};
use crate::gadgets::gadget::{convert_to_u64, GadgetConfig};
use super::gadget::{Gadget, GadgetType};
type UpdateConfig = GadgetConfig;
#[derive(Clone, Debug)]
pub struct UpdateGadgetChip<F: PrimeField> {
config: UpdateConfig,
_marker: PhantomData<F>,
}
impl<F: PrimeField> UpdateGadgetChip<F> {
pub fn construct(config: UpdateConfig) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
4
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> UpdateConfig {
let tables = &gadget_config.tables;
let mod_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];
let columns = gadget_config.columns;
let selector = meta.complex_selector();
let div_val = gadget_config.scale_factor;
let eta: u64 = (gadget_config.scale_factor as f64 * gadget_config.eta) as u64;
meta.create_gate("updater_arith", |meta| {
let s = meta.query_selector(selector);
let sf = Expression::Constant(F::from(div_val as u64));
let eta = Expression::Constant(F::from(eta as u64));
let mut constraints = vec![];
for op_idx in 0..columns.len() / Self::num_cols_per_op() {
let offset = op_idx * Self::num_cols_per_op();
let w = meta.query_advice(columns[offset], Rotation::cur());
let dw = meta.query_advice(columns[offset + 1], Rotation::cur());
let div = meta.query_advice(columns[offset + 2], Rotation::cur());
let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());
let expr = (w * sf.clone() - dw * eta.clone()) - (div * sf.clone() + mod_res);
constraints.push(s.clone() * expr);
}
constraints
});
for op_idx in 0..columns.len() / Self::num_cols_per_op() {
let offset = op_idx * Self::num_cols_per_op();
// Check that mod is smaller than SF
meta.lookup("max inp1", |meta| {
let s = meta.query_selector(selector);
let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());
// Constrains that the modulus \in [0, DIV_VAL)
vec![(s.clone() * mod_res.clone(), mod_lookup)]
});
}
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::Update, vec![selector]);
UpdateConfig {
columns,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField + Ord> Gadget<F> for UpdateGadgetChip<F> {
fn name(&self) -> String {
"updater chip".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.config.columns.len() / self.num_cols_per_op()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
_single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let div_val = self.config.scale_factor as i64;
let div_val_f = F::from(div_val as u64);
let eta = div_val / 1000;
let eta = F::from(eta as u64);
let div_outp_min_val = self.config.div_outp_min_val;
let div_inp_min_val_pos_i64 = -self.config.shift_min_val;
let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);
let columns = &self.config.columns;
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::Update).unwrap()[0];
selector.enable(region, row_offset)?;
}
let w = &vec_inputs[0];
let dw = &vec_inputs[1];
let mut output_cells = vec![];
for i in 0..w.len() {
let offset = i * self.num_cols_per_op();
let _w_cell = w[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?;
let _dw_cell = dw[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?;
let w_val = w[i].value().map(|x: &F| x.to_owned());
let dw_val = dw[i].value().map(|x: &F| x.to_owned());
let out_scaled = w_val.zip(dw_val).map(|(w, dw)| w * div_val_f - dw * eta);
let div_mod = out_scaled.map(|x| {
let x_pos = x + div_inp_min_val_pos;
let x_pos = if x_pos > F::ZERO {
x_pos
} else {
x_pos + div_val_f
};
let inp = convert_to_u64(&x_pos);
let div_res = inp as i64 / div_val - (div_inp_min_val_pos_i64 as i64 / div_val);
let mod_res = inp as i64 % div_val;
(div_res, mod_res)
});
let div_res_cell = region
.assign_advice(
|| "div_res",
self.config.columns[offset + 2],
row_offset,
|| {
div_mod.map(|(x, _): (i64, i64)| {
F::from((x - div_outp_min_val as i64) as u64) - F::from(-div_outp_min_val as u64)
})
},
)
.unwrap();
let _mod_res_cell = region
.assign_advice(
|| "mod_res",
self.config.columns[offset + 3],
row_offset,
|| div_mod.map(|(_, x): (i64, i64)| F::from(x as u64)),
)
.unwrap();
output_cells.push(div_res_cell);
}
Ok(output_cells)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let zero = &single_inputs[0];
let mut w = vec_inputs[0].clone();
let mut dw = vec_inputs[1].clone();
let initial_len = w.len();
while !w.len() % self.num_cols_per_op() == 0 {
w.push(zero);
}
while !dw.len() % self.num_cols_per_op() == 0 {
dw.push(zero);
}
let res = self.op_aligned_rows(
layouter.namespace(|| format!("forward row {}", self.name())),
&vec![w, dw],
single_inputs,
)?;
Ok(res[0..initial_len].to_vec())
}
}
================================================
FILE: src/gadgets/var_div.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression},
poly::Rotation,
};
use rounded_div::RoundedDiv;
use super::gadget::{convert_to_u128, Gadget, GadgetConfig, GadgetType};
type VarDivRoundConfig = GadgetConfig;
pub struct VarDivRoundChip<F: PrimeField> {
config: Rc<VarDivRoundConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> VarDivRoundChip<F> {
pub fn construct(config: Rc<VarDivRoundConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
3
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let columns = gadget_config.columns;
let selector = meta.complex_selector();
let two = Expression::Constant(F::from(2));
let tables = gadget_config.tables;
let lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];
// a | c | r | ... | b
// (2 * a + b) = (2 * b) * c + r
// b - r \in [0, 2^N) <-- forces b > r
meta.create_gate("var_div_arithm", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());
for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
let a = meta.query_advice(columns[offset], Rotation::cur());
let c = meta.query_advice(columns[offset + 1], Rotation::cur());
let r = meta.query_advice(columns[offset + 2], Rotation::cur());
let lhs = a.clone() * two.clone() + b.clone();
let rhs = b.clone() * two.clone() * c + r;
constraints.push(s.clone() * (lhs - rhs));
}
constraints
});
for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
// r \in [0, 2^N)
meta.lookup("var div range checks r", |meta| {
let s = meta.query_selector(selector);
let r = meta.query_advice(columns[offset + 2], Rotation::cur());
vec![(s.clone() * r, lookup)]
});
// 2 * b - r \in [0, 2^N)
meta.lookup("var div range checks 2b-r", |meta| {
let s = meta.query_selector(selector);
let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());
let r = meta.query_advice(columns[offset + 2], Rotation::cur());
vec![(s.clone() * (two.clone() * b - r), lookup)]
});
}
// b \in [0, 2^N)
meta.lookup("var div range checks b", |meta| {
let s = meta.query_selector(selector);
let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());
vec![(s.clone() * b, lookup)]
});
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::VarDivRound, vec![selector]);
GadgetConfig {
columns,
tables,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for VarDivRoundChip<F> {
fn name(&self) -> String {
"VarDivRoundChip".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
(self.config.columns.len() - 1) / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.num_inputs_per_row()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let a_vec = &vec_inputs[0];
// let zero = single_inputs[0].clone();
let b = &single_inputs[1];
let div_outp_min_val_i64 = self.config.div_outp_min_val;
let div_inp_min_val_pos_i64 = -self.config.shift_min_val;
if self.config.use_selectors {
let selector = self.config.selectors.get(&GadgetType::VarDivRound).unwrap()[0];
selector.enable(region, row_offset)?;
}
b.copy_advice(
|| "",
region,
self.config.columns[self.config.columns.len() - 1],
row_offset,
)?;
let mut div_out = vec![];
for (i, a) in a_vec.iter().enumerate() {
let offset = i * self.num_cols_per_op();
a.copy_advice(|| "", region, self.config.columns[offset], row_offset)?;
let div_mod = a.value().zip(b.value()).map(|(a, b)| {
let b = convert_to_u128(b);
// Needs to be divisible by b
let div_inp_min_val_pos_i64 = div_inp_min_val_pos_i64 / (b as i64) * (b as i64);
let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);
let a_pos = *a + div_inp_min_val_pos;
let a = convert_to_u128(&a_pos);
// c = (2 * a + b) / (2 * b)
let c_pos = a.rounded_div(b);
let c = (c_pos as i128 - (div_inp_min_val_pos_i64 as u128 / b) as i128) as i64;
// r = (2 * a + b) % (2 * b)
let rem_floor = (a as i128) - (c_pos * b) as i128;
let r = 2 * rem_floor + (b as i128);
let r = r as i64;
(c, r)
});
let div_cell = region.assign_advice(
|| "",
self.config.columns[offset + 1],
row_offset,
|| {
div_mod.map(|(c, _)| {
let offset = F::from(-div_outp_min_val_i64 as u64);
let c = F::from((c - div_outp_min_val_i64) as u64);
c - offset
})
},
)?;
let _mod_cell = region.assign_advice(
|| "",
self.config.columns[offset + 2],
row_offset,
|| div_mod.map(|(_, r)| F::from(r as u64)),
)?;
div_out.push(div_cell);
}
Ok(div_out)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mut inps = vec_inputs[0].clone();
let initial_len = inps.len();
// Needed to pad: bias - bias = 0
let default = &single_inputs[0];
while inps.len() % self.num_inputs_per_row() != 0 {
inps.push(&default);
}
let res = self.op_aligned_rows(layouter.namespace(|| "var_div"), &vec![inps], single_inputs)?;
Ok(res[..initial_len].to_vec())
}
}
================================================
FILE: src/gadgets/var_div_big.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression},
poly::Rotation,
};
use rounded_div::RoundedDiv;
use super::gadget::{convert_to_u128, Gadget, GadgetConfig, GadgetType};
pub struct VarDivRoundBigChip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> VarDivRoundBigChip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
7
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let columns = gadget_config.columns;
let selector = meta.complex_selector();
let two = Expression::Constant(F::from(2));
let range = Expression::Constant(F::from(gadget_config.num_rows as u64));
let tables = gadget_config.tables;
let lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];
// a | c | r | (2 b - r)_1 | (2 b - r)_0 | r_1 | r_0 | ... | b
// a / b = c
meta.create_gate("var_div_arithm", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());
for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
// Constrain that (2 * a + b) = (2 * b) * c + r
let a = meta.query_advice(columns[offset], Rotation::cur());
let c = meta.query_advice(columns[offset + 1], Rotation::cur());
let r = meta.query_advice(columns[offset + 2], Rotation::cur());
let lhs = a.clone() * two.clone() + b.clone();
let rhs = b.clone() * two.clone() * c + r.clone();
constraints.push(s.clone() * (lhs - rhs));
// Constrain that (2 * b - r) = br1 * max_val + br0
let br1 = meta.query_advice(columns[offset + 3], Rotation::cur());
let br0 = meta.query_advice(columns[offset + 4], Rotation::cur());
let lhs = b.clone() * two.clone() - r.clone();
let rhs = br1 * range.clone() + br0;
constraints.push(s.clone() * (lhs - rhs));
// Constrains that r = r1 * max_val + r0
let r1 = meta.query_advice(columns[offset + 5], Rotation::cur());
let r0 = meta.query_advice(columns[offset + 6], Rotation::cur());
let lhs = r.clone();
let rhs = r1 * range.clone() + r0;
constraints.push(s.clone() * (lhs - rhs));
}
constraints
});
// For var div big, we assume that a, b > 0 and are outputs of the previous layer
// r must be constrained to be in [0, b)
for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
// (2 * b - r)_{1, 0} \in [0, 2^N)
meta.lookup("var div big br1", |meta| {
let s = meta.query_selector(selector);
let br1 = meta.query_advice(columns[offset + 3], Rotation::cur());
vec![(s * br1, lookup)]
});
meta.lookup("var div big br0", |meta| {
let s = meta.query_selector(selector);
let br0 = meta.query_advice(columns[offset + 4], Rotation::cur());
vec![(s * br0, lookup)]
});
// r_{1, 0} \in [0, 2^N)
meta.lookup("var div big r1", |meta| {
let s = meta.query_selector(selector);
let r1 = meta.query_advice(columns[offset + 5], Rotation::cur());
vec![(s * r1, lookup)]
});
meta.lookup("var div big r0", |meta| {
let s = meta.query_selector(selector);
let r0 = meta.query_advice(columns[offset + 6], Rotation::cur());
vec![(s * r0, lookup)]
});
}
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::VarDivRoundBig, vec![selector]);
GadgetConfig {
columns,
tables,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for VarDivRoundBigChip<F> {
fn name(&self) -> String {
"VarDivBigRoundChip".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
(self.config.columns.len() - 1) / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.num_inputs_per_row()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let a_vec = &vec_inputs[0];
// let zero = single_inputs[0].clone();
let b = &single_inputs[1];
let div_outp_min_val_i64 = self.config.div_outp_min_val;
let div_inp_min_val_pos_i64 = -self.config.shift_min_val;
let num_rows = self.config.num_rows as i64;
if self.config.use_selectors {
let selector = self
.config
.selectors
.get(&GadgetType::VarDivRoundBig)
.unwrap()[0];
selector.enable(region, row_offset)?;
}
b.copy_advice(
|| "",
region,
self.config.columns[self.config.columns.len() - 1],
row_offset,
)?;
let mut div_out = vec![];
for (i, a) in a_vec.iter().enumerate() {
let offset = i * self.num_cols_per_op();
a.copy_advice(|| "", region, self.config.columns[offset], row_offset)
.unwrap();
let div_mod = a.value().zip(b.value()).map(|(a, b)| {
let b = convert_to_u128(b);
// Needs to be divisible by b
let div_inp_min_val_pos_i64 = div_inp_min_val_pos_i64 / (b as i64) * (b as i64);
let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);
let a_pos = *a + div_inp_min_val_pos;
let a = convert_to_u128(&a_pos);
// c = (2 * a + b) / (2 * b)
let c_pos = a.rounded_div(b);
let c = (c_pos as i128 - (div_inp_min_val_pos_i64 as u128 / b) as i128) as i64;
// r = (2 * a + b) % (2 * b)
let rem_floor = (a as i128) - (c_pos * b) as i128;
let r = 2 * rem_floor + (b as i128);
let r = r as i64;
(c, r)
});
let br_split = div_mod.zip(b.value()).map(|((_, r), b)| {
let b = convert_to_u128(b) as i64;
let val = 2 * b - r;
let p1 = val / num_rows;
let p0 = val % num_rows;
// val = p1 * max_val + p0
(p1, p0)
});
let r_split = div_mod.map(|(_, r)| {
let p1 = r / num_rows;
let p0 = r % num_rows;
// val = p1 * max_val + p0
(p1, p0)
});
let div_cell = region.assign_advice(
|| "",
self.config.columns[offset + 1],
row_offset,
|| {
div_mod.map(|(c, _)| {
let offset = F::from(-div_outp_min_val_i64 as u64);
let c = F::from((c - div_outp_min_val_i64) as u64);
c - offset
})
},
)?;
let _mod_cell = region.assign_advice(
|| "",
self.config.columns[offset + 2],
row_offset,
|| div_mod.map(|(_, r)| F::from(r as u64)),
)?;
// Assign 2 * b - r to the next 2 columns
let _br_split_cell_1 = region.assign_advice(
|| "",
self.config.columns[offset + 3],
row_offset,
|| br_split.map(|(p1, _)| F::from(p1 as u64)),
)?;
let _br_split_cell_2 = region.assign_advice(
|| "",
self.config.columns[offset + 4],
row_offset,
|| br_split.map(|(_, p0)| F::from(p0 as u64)),
)?;
// Assign r to the next 2 columns
let _r_split_cell_1 = region.assign_advice(
|| "",
self.config.columns[offset + 5],
row_offset,
|| r_split.map(|(p1, _)| F::from(p1 as u64)),
)?;
let _r_split_cell_2 = region.assign_advice(
|| "",
self.config.columns[offset + 6],
row_offset,
|| r_split.map(|(_, p0)| F::from(p0 as u64)),
)?;
div_out.push(div_cell);
}
Ok(div_out)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mut inps = vec_inputs[0].clone();
let initial_len = inps.len();
// Needed to pad
let default = &single_inputs[0];
while inps.len() % self.num_inputs_per_row() != 0 {
inps.push(&default);
}
let res = self.op_aligned_rows(
layouter.namespace(|| "var_div_big"),
&vec![inps],
single_inputs,
)?;
Ok(res[..initial_len].to_vec())
}
}
================================================
FILE: src/gadgets/var_div_big3.rs
================================================
use std::{marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region},
halo2curves::ff::PrimeField,
plonk::{ConstraintSystem, Error, Expression},
poly::Rotation,
};
use rounded_div::RoundedDiv;
use super::gadget::{convert_to_u128, Gadget, GadgetConfig, GadgetType};
pub struct VarDivRoundBig3Chip<F: PrimeField> {
config: Rc<GadgetConfig>,
_marker: PhantomData<F>,
}
impl<F: PrimeField> VarDivRoundBig3Chip<F> {
pub fn construct(config: Rc<GadgetConfig>) -> Self {
Self {
config,
_marker: PhantomData,
}
}
pub fn num_cols_per_op() -> usize {
9
}
pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {
let columns = gadget_config.columns;
let selector = meta.complex_selector();
let two = Expression::Constant(F::from(2));
let range = Expression::Constant(F::from(gadget_config.num_rows as u64));
let range_sq = range.clone() * range.clone();
let tables = gadget_config.tables;
let lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];
// a | c | r | (2b - r)_2 | (2 b - r)_1 | (2 b - r)_0 | r_2 | r_1 | r_0 | ... | b
// a / b = c
meta.create_gate("var_div_big3_arithm", |meta| {
let s = meta.query_selector(selector);
let mut constraints = vec![];
let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());
for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
// Constrain that (2 * a + b) = (2 * b) * c + r
let a = meta.query_advice(columns[offset], Rotation::cur());
let c = meta.query_advice(columns[offset + 1], Rotation::cur());
let r = meta.query_advice(columns[offset + 2], Rotation::cur());
let lhs = a.clone() * two.clone() + b.clone();
let rhs = b.clone() * two.clone() * c + r.clone();
constraints.push(s.clone() * (lhs - rhs));
// Constrain that (2 * b - r) = br1 * max_val + br0
let br2 = meta.query_advice(columns[offset + 3], Rotation::cur());
let br1 = meta.query_advice(columns[offset + 4], Rotation::cur());
let br0 = meta.query_advice(columns[offset + 5], Rotation::cur());
let lhs = b.clone() * two.clone() - r.clone();
let rhs = br2 * range_sq.clone() + br1 * range.clone() + br0;
constraints.push(s.clone() * (lhs - rhs));
// Constrains that r = r1 * max_val + r0
let r2 = meta.query_advice(columns[offset + 6], Rotation::cur());
let r1 = meta.query_advice(columns[offset + 7], Rotation::cur());
let r0 = meta.query_advice(columns[offset + 8], Rotation::cur());
let lhs = r.clone();
let rhs = r2 * range_sq.clone() + r1 * range.clone() + r0;
constraints.push(s.clone() * (lhs - rhs));
}
constraints
});
// For var div big, we assume that a, b > 0 and are outputs of the previous layer
// r must be constrained to be in [0, b)
for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {
let offset = i * Self::num_cols_per_op();
// (2 * b - r)_{1, 0} \in [0, 2^N)
meta.lookup("var div big br2", |meta| {
let s = meta.query_selector(selector);
let br2 = meta.query_advice(columns[offset + 3], Rotation::cur());
vec![(s * br2, lookup)]
});
meta.lookup("var div big br1", |meta| {
let s = meta.query_selector(selector);
let br1 = meta.query_advice(columns[offset + 4], Rotation::cur());
vec![(s * br1, lookup)]
});
meta.lookup("var div big br0", |meta| {
let s = meta.query_selector(selector);
let br0 = meta.query_advice(columns[offset + 5], Rotation::cur());
vec![(s * br0, lookup)]
});
// r_{1, 0} \in [0, 2^N)
meta.lookup("var div big r2", |meta| {
let s = meta.query_selector(selector);
let r2 = meta.query_advice(columns[offset + 6], Rotation::cur());
vec![(s * r2, lookup)]
});
meta.lookup("var div big r1", |meta| {
let s = meta.query_selector(selector);
let r1 = meta.query_advice(columns[offset + 7], Rotation::cur());
vec![(s * r1, lookup)]
});
meta.lookup("var div big r0", |meta| {
let s = meta.query_selector(selector);
let r0 = meta.query_advice(columns[offset + 8], Rotation::cur());
vec![(s * r0, lookup)]
});
}
let mut selectors = gadget_config.selectors;
selectors.insert(GadgetType::VarDivRoundBig3, vec![selector]);
GadgetConfig {
columns,
tables,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for VarDivRoundBig3Chip<F> {
fn name(&self) -> String {
"VarDivBig3RoundChip".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
(self.config.columns.len() - 1) / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.num_inputs_per_row()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let a_vec = &vec_inputs[0];
// let zero = single_inputs[0].clone();
let b = &single_inputs[1];
let c_shift_base = (-(1_i64 << 62)) as i128;
let num_rows = self.config.num_rows as i128;
if self.config.use_selectors {
let selector = self
.config
.selectors
.get(&GadgetType::VarDivRoundBig3)
.unwrap()[0];
selector.enable(region, row_offset)?;
}
b.copy_advice(
|| "",
region,
self.config.columns[self.config.columns.len() - 1],
row_offset,
)?;
let mut div_out = vec![];
for (i, a) in a_vec.iter().enumerate() {
let offset = i * self.num_cols_per_op();
a.copy_advice(|| "", region, self.config.columns[offset], row_offset)
.unwrap();
let div_mod = a.value().zip(b.value()).map(|(a, b)| {
let b = convert_to_u128(b);
let c_shift = (-c_shift_base) as u128 / b * b;
let div_inp_min_val_pos = F::from(c_shift as u64);
let a_pos = *a + div_inp_min_val_pos;
let a = convert_to_u128(&a_pos);
// c = (2 * a + b) / (2 * b)
let c_pos = a.rounded_div(b);
let c = c_pos as i128 - (c_shift / b) as i128;
// r = (2 * a + b) % (2 * b)
let rem_floor = (a as i128) - (c_pos * b) as i128;
let r = 2 * rem_floor + (b as i128);
(c, r)
});
let br_split = div_mod.zip(b.value()).map(|((_, r), b)| {
let b = convert_to_u128(b) as i128;
let val = 2 * b - r;
let p2 = val / (num_rows * num_rows);
let p1 = (val % (num_rows * num_rows)) / num_rows;
let p0 = val % num_rows;
// val = p2 * max_val^2 + p1 * max_val + p0
(p2, p1, p0)
});
let r_split = div_mod.map(|(_, r)| {
let p2 = r / (num_rows * num_rows);
let p1 = (r % (num_rows * num_rows)) / num_rows;
let p0 = r % num_rows;
// val = p1 * max_val + p0
(p2, p1, p0)
});
let div_cell = region.assign_advice(
|| "",
self.config.columns[offset + 1],
row_offset,
|| {
div_mod.map(|(c, _)| {
let offset = F::from(-c_shift_base as u64);
let c = F::from((c - c_shift_base) as u64);
c - offset
})
},
)?;
let _mod_cell = region.assign_advice(
|| "",
self.config.columns[offset + 2],
row_offset,
|| div_mod.map(|(_, r)| F::from(r as u64)),
)?;
// Assign 2 * b - r to the next 3 columns
let _br_split_cell_2 = region.assign_advice(
|| "",
self.config.columns[offset + 3],
row_offset,
|| br_split.map(|(p2, _, _)| F::from(p2 as u64)),
)?;
let _br_split_cell_1 = region.assign_advice(
|| "",
self.config.columns[offset + 4],
row_offset,
|| br_split.map(|(_, p1, _)| F::from(p1 as u64)),
)?;
let _br_split_cell_0 = region.assign_advice(
|| "",
self.config.columns[offset + 5],
row_offset,
|| br_split.map(|(_, _, p0)| F::from(p0 as u64)),
)?;
// Assign r to the next 3 columns
let _r_split_cell_2 = region.assign_advice(
|| "",
self.config.columns[offset + 6],
row_offset,
|| r_split.map(|(p2, _, _)| F::from(p2 as u64)),
)?;
let _r_split_cell_1 = region.assign_advice(
|| "",
self.config.columns[offset + 7],
row_offset,
|| r_split.map(|(_, p1, _)| F::from(p1 as u64)),
)?;
let _r_split_cell_0 = region.assign_advice(
|| "",
self.config.columns[offset + 8],
row_offset,
|| r_split.map(|(_, _, p0)| F::from(p0 as u64)),
)?;
div_out.push(div_cell);
}
Ok(div_out)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mut inps = vec_inputs[0].clone();
let initial_len = inps.len();
// Needed to pad
let default = &single_inputs[0];
while inps.len() % self.num_inputs_per_row() != 0 {
inps.push(&default);
}
let res = self.op_aligned_rows(
layouter.namespace(|| "var_div_big3"),
&vec![inps],
single_inputs,
)?;
Ok(res[..initial_len].to_vec())
}
}
================================================
FILE: src/gadgets.rs
================================================
pub mod add_pairs;
pub mod adder;
pub mod bias_div_floor_relu6;
pub mod bias_div_round_relu6;
pub mod dot_prod;
pub mod gadget;
pub mod input_lookup;
pub mod max;
pub mod mul_pairs;
pub mod sqrt_big;
pub mod square;
pub mod squared_diff;
pub mod sub_pairs;
pub mod update;
pub mod var_div;
pub mod var_div_big;
pub mod var_div_big3;
// Generics
pub mod nonlinear;
================================================
FILE: src/layers/arithmetic/add.rs
================================================
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::{
add_pairs::AddPairsChip,
gadget::{Gadget, GadgetConfig, GadgetType},
nonlinear::relu::ReluChip,
},
layers::layer::{ActivationType, AssignedTensor, CellRc, GadgetConsumer},
};
use super::{
super::layer::{Layer, LayerConfig},
Arithmetic,
};
#[derive(Clone, Debug)]
pub struct AddChip {}
impl AddChip {
fn get_activation(&self, layer_params: &Vec<i64>) -> ActivationType {
let activation = layer_params[0];
match activation {
0 => ActivationType::None,
1 => ActivationType::Relu,
_ => panic!("Unsupported activation type for add"),
}
}
}
impl<F: PrimeField> Arithmetic<F> for AddChip {
fn gadget_forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
constants: &Vec<&AssignedCell<F, F>>,
gadget_config: Rc<GadgetConfig>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let add_pairs_chip = AddPairsChip::<F>::construct(gadget_config);
let out = add_pairs_chip.forward(layouter.namespace(|| "add chip"), &vec_inputs, constants)?;
Ok(out)
}
}
impl<F: PrimeField> Layer<F> for AddChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let activation = self.get_activation(&layer_config.layer_params);
// Do the addition
let (out, out_shape) = self.arithmetic_forward(
layouter.namespace(|| ""),
tensors,
constants,
gadget_config.clone(),
)?;
// Do the fused activation
let out = if activation == ActivationType::Relu {
let zero = constants.get(&0).unwrap();
let single_inps = vec![zero.as_ref()];
let out = out.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let relu_chip = ReluChip::<F>::construct(gadget_config);
let out = relu_chip.forward(layouter.namespace(|| "relu"), &vec![out], &single_inps)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
out
} else if activation == ActivationType::None {
out
} else {
panic!("Unsupported activation type for add");
};
let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for AddChip {
fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
let activation = self.get_activation(&layer_params);
let mut outp = vec![GadgetType::AddPairs];
match activation {
ActivationType::Relu => outp.push(GadgetType::Relu),
ActivationType::None => (),
_ => panic!("Unsupported activation type for add"),
}
outp
}
}
================================================
FILE: src/layers/arithmetic/div_var.rs
================================================
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
mul_pairs::MulPairsChip,
var_div::VarDivRoundChip,
},
layers::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer},
};
use super::Arithmetic;
pub struct DivVarChip {}
// TODO: hack. Used for multiplying by the scale factor
impl<F: PrimeField> Arithmetic<F> for DivVarChip {
fn gadget_forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
constants: &Vec<&AssignedCell<F, F>>,
gadget_config: Rc<GadgetConfig>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mul_pairs_chip = MulPairsChip::<F>::construct(gadget_config.clone());
let out = mul_pairs_chip.forward(
layouter.namespace(|| "mul pairs chip"),
&vec_inputs,
constants,
)?;
Ok(out)
}
}
impl<F: PrimeField> Layer<F> for DivVarChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &crate::layers::layer::LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
assert_eq!(tensors.len(), 2);
// TODO: We only support dividing by a single number for now
assert_eq!(tensors[1].shape().len(), 1);
assert_eq!(tensors[1].shape()[0], 1);
let sf = constants
.get(&(gadget_config.scale_factor as i64))
.unwrap()
.as_ref();
let sf_tensor = Array::from_shape_vec(IxDyn(&[1]), vec![Rc::new(sf.clone())]).unwrap();
// out = inp * SF
let (out, out_shape) = self.arithmetic_forward(
layouter.namespace(|| ""),
&vec![tensors[0].clone(), sf_tensor],
constants,
gadget_config.clone(),
)?;
let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.
gitextract_ixvshx2q/
├── .gitignore
├── Cargo.toml
├── LICENSE
├── README.md
├── backwards/
│ ├── README.md
│ └── backward.py
├── python/
│ ├── converter.py
│ ├── input_converter.py
│ └── training_converter.py
├── rustfmt.toml
├── src/
│ ├── bin/
│ │ ├── test_circuit.rs
│ │ ├── time_circuit.rs
│ │ ├── verify_circuit.rs
│ │ └── verify_wav.rs
│ ├── commitments/
│ │ ├── commit.rs
│ │ ├── packer.rs
│ │ └── poseidon_commit.rs
│ ├── commitments.rs
│ ├── gadgets/
│ │ ├── add_pairs.rs
│ │ ├── adder.rs
│ │ ├── bias_div_floor_relu6.rs
│ │ ├── bias_div_round_relu6.rs
│ │ ├── dot_prod.rs
│ │ ├── gadget.rs
│ │ ├── input_lookup.rs
│ │ ├── max.rs
│ │ ├── mul_pairs.rs
│ │ ├── nonlinear/
│ │ │ ├── exp.rs
│ │ │ ├── logistic.rs
│ │ │ ├── non_linearity.rs
│ │ │ ├── pow.rs
│ │ │ ├── relu.rs
│ │ │ ├── rsqrt.rs
│ │ │ ├── sqrt.rs
│ │ │ └── tanh.rs
│ │ ├── nonlinear.rs
│ │ ├── sqrt_big.rs
│ │ ├── square.rs
│ │ ├── squared_diff.rs
│ │ ├── sub_pairs.rs
│ │ ├── update.rs
│ │ ├── var_div.rs
│ │ ├── var_div_big.rs
│ │ └── var_div_big3.rs
│ ├── gadgets.rs
│ ├── layers/
│ │ ├── arithmetic/
│ │ │ ├── add.rs
│ │ │ ├── div_var.rs
│ │ │ ├── mul.rs
│ │ │ └── sub.rs
│ │ ├── arithmetic.rs
│ │ ├── averager.rs
│ │ ├── avg_pool_2d.rs
│ │ ├── batch_mat_mul.rs
│ │ ├── conv2d.rs
│ │ ├── dag.rs
│ │ ├── div_fixed.rs
│ │ ├── fully_connected.rs
│ │ ├── layer.rs
│ │ ├── logistic.rs
│ │ ├── max_pool_2d.rs
│ │ ├── mean.rs
│ │ ├── noop.rs
│ │ ├── pow.rs
│ │ ├── rsqrt.rs
│ │ ├── shape/
│ │ │ ├── broadcast.rs
│ │ │ ├── concatenation.rs
│ │ │ ├── mask_neg_inf.rs
│ │ │ ├── pack.rs
│ │ │ ├── pad.rs
│ │ │ ├── permute.rs
│ │ │ ├── reshape.rs
│ │ │ ├── resize_nn.rs
│ │ │ ├── rotate.rs
│ │ │ ├── slice.rs
│ │ │ ├── split.rs
│ │ │ └── transpose.rs
│ │ ├── shape.rs
│ │ ├── softmax.rs
│ │ ├── sqrt.rs
│ │ ├── square.rs
│ │ ├── squared_diff.rs
│ │ ├── tanh.rs
│ │ └── update.rs
│ ├── layers.rs
│ ├── lib.rs
│ ├── model.rs
│ ├── utils/
│ │ ├── helpers.rs
│ │ ├── loader.rs
│ │ ├── proving_ipa.rs
│ │ └── proving_kzg.rs
│ └── utils.rs
└── testing/
└── circuits/
├── last_two_layers.py
└── v2_1.0_224.tflite
SYMBOL INDEX (543 symbols across 79 files)
FILE: backwards/backward.py
class CircuitConfig (line 11) | class CircuitConfig():
method __init__ (line 12) | def __init__(self, starting_index):
method new_gradient_tensor (line 19) | def new_gradient_tensor(self, tensor_idx):
method new_tensor (line 27) | def new_tensor(self):
method new_label_tensor (line 32) | def new_label_tensor(self):
method gradient_tensor_idx (line 40) | def gradient_tensor_idx(self, tensor_idx):
class Conv2D (line 52) | class Conv2D():
method __init__ (line 53) | def __init__(self, layer):
method backward (line 60) | def backward(self, layer, transcript, config):
class Softmax (line 193) | class Softmax():
method __init__ (line 194) | def __init__(self, layer):
method backward (line 199) | def backward(self, layer, transcript, config):
class AveragePool2D (line 212) | class AveragePool2D():
method __init__ (line 213) | def __init__(self, layer):
method backward (line 216) | def backward(self, layer, transcript, config):
class Reshape (line 248) | class Reshape():
method __init__ (line 249) | def __init__(self, layer):
method backward (line 252) | def backward(self, layer, transcript, config):
function produce_graph (line 265) | def produce_graph():
FILE: python/converter.py
function get_shape (line 9) | def get_shape(interpreter: tf.lite.Interpreter, tensor_idx):
function handle_numpy_or_literal (line 15) | def handle_numpy_or_literal(inp: Union[np.ndarray, Literal[0]]):
function get_inputs (line 20) | def get_inputs(op: tflite.Operator):
class Converter (line 26) | class Converter:
method __init__ (line 27) | def __init__(
method valid_activations (line 52) | def valid_activations(self):
method _convert_add (line 59) | def _convert_add(self, op: tflite.Operator, generated_tensors: set):
method to_dict (line 98) | def to_dict(self, start_layer, end_layer):
method to_msgpack (line 500) | def to_msgpack(self, start_layer, end_layer, use_selectors=True):
function main (line 508) | def main():
FILE: python/input_converter.py
function main (line 6) | def main():
FILE: python/training_converter.py
function main (line 18) | def main():
FILE: src/bin/test_circuit.rs
function main (line 10) | fn main() {
FILE: src/bin/time_circuit.rs
function main (line 7) | fn main() {
FILE: src/bin/verify_circuit.rs
function main (line 7) | fn main() {
FILE: src/bin/verify_wav.rs
function main (line 12) | fn main() {
FILE: src/commitments/commit.rs
type Commit (line 7) | pub trait Commit<F: PrimeField> {
method commit (line 8) | fn commit(
FILE: src/commitments/packer.rs
constant NUM_BITS_PER_FIELD_ELEM (line 21) | const NUM_BITS_PER_FIELD_ELEM: usize = 254;
type PackerConfig (line 23) | pub struct PackerConfig<F: PrimeField> {
type PackerChip (line 31) | pub struct PackerChip<F: PrimeField> {
function get_exponents (line 36) | pub fn get_exponents(num_bits_per_elem: usize, num_exponents: usize) -> ...
function construct (line 45) | pub fn construct(num_bits_per_elem: usize, gadget_config: &GadgetConfig)...
function configure (line 77) | pub fn configure(
function copy_and_pack_row (line 142) | pub fn copy_and_pack_row(
function assign_and_pack_row (line 213) | pub fn assign_and_pack_row(
function assign_and_pack (line 292) | pub fn assign_and_pack(
function copy_and_pack (line 339) | pub fn copy_and_pack(
FILE: src/commitments/poseidon_commit.rs
constant WIDTH (line 17) | pub const WIDTH: usize = 3;
constant RATE (line 18) | pub const RATE: usize = 2;
constant L (line 19) | pub const L: usize = 8 - WIDTH - 1;
type PoseidonCommitChip (line 23) | pub struct PoseidonCommitChip<
type P128Pow5T3Gen (line 33) | pub struct P128Pow5T3Gen<F: PrimeField, const SECURE_MDS: usize>(Phantom...
function new (line 36) | pub fn new() -> Self {
function full_rounds (line 44) | fn full_rounds() -> usize {
function partial_rounds (line 48) | fn partial_rounds() -> usize {
function sbox (line 52) | fn sbox(val: F) -> F {
function secure_mds (line 56) | fn secure_mds() -> usize {
function constants (line 60) | fn constants() -> (Vec<[F; 3]>, Mds<F, 3>, Mds<F, 3>) {
type MyHash (line 67) | pub struct MyHash<
function configure (line 79) | pub fn configure(
function commit (line 106) | fn commit(
FILE: src/gadgets/add_pairs.rs
type AddPairsConfig (line 12) | type AddPairsConfig = GadgetConfig;
type AddPairsChip (line 14) | pub struct AddPairsChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<AddPairsConfig>) -> Self {
function num_cols_per_op (line 27) | pub fn num_cols_per_op() -> usize {
function configure (line 31) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 63) | fn name(&self) -> String {
function num_cols_per_op (line 67) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 71) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 75) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 79) | fn op_row_region(
function forward (line 110) | fn forward(
FILE: src/gadgets/adder.rs
type AdderConfig (line 12) | type AdderConfig = GadgetConfig;
type AdderChip (line 14) | pub struct AdderChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<AdderConfig>) -> Self {
function configure (line 27) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 59) | fn name(&self) -> String {
function num_cols_per_op (line 63) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 67) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 71) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 75) | fn op_row_region(
function forward (line 109) | fn forward(
FILE: src/gadgets/bias_div_floor_relu6.rs
type BiasDivFloorRelu6Config (line 14) | type BiasDivFloorRelu6Config = GadgetConfig;
constant SHIFT_MIN_VAL (line 16) | const SHIFT_MIN_VAL: i64 = -(1 << 30);
type BiasDivFloorRelu6Chip (line 18) | pub struct BiasDivFloorRelu6Chip<F: PrimeField> {
function construct (line 24) | pub fn construct(config: BiasDivFloorRelu6Config) -> Self {
function get_map (line 31) | pub fn get_map(scale_factor: u64, num_rows: i64, div_outp_min_val: i64) ...
function num_cols_per_op (line 44) | pub fn num_cols_per_op() -> usize {
function configure (line 48) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 125) | fn name(&self) -> String {
function num_cols_per_op (line 129) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 133) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 137) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 141) | fn op_row_region(
function forward (line 251) | fn forward(
FILE: src/gadgets/bias_div_round_relu6.rs
type BiasDivRoundRelu6Config (line 14) | type BiasDivRoundRelu6Config = GadgetConfig;
constant NUM_COLS_PER_OP (line 16) | const NUM_COLS_PER_OP: usize = 5;
type BiasDivRoundRelu6Chip (line 18) | pub struct BiasDivRoundRelu6Chip<F: PrimeField> {
function construct (line 24) | pub fn construct(config: Rc<BiasDivRoundRelu6Config>) -> Self {
function get_map (line 31) | pub fn get_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMa...
function configure (line 43) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 124) | fn name(&self) -> String {
function num_cols_per_op (line 128) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 132) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 136) | fn num_outputs_per_row(&self) -> usize {
function load_lookups (line 140) | fn load_lookups(&self, mut layouter: impl Layouter<F>) -> Result<(), Err...
function op_row_region (line 169) | fn op_row_region(
function forward (line 281) | fn forward(
FILE: src/gadgets/dot_prod.rs
type DotProductConfig (line 14) | type DotProductConfig = GadgetConfig;
type DotProductChip (line 16) | pub struct DotProductChip<F: PrimeField> {
function construct (line 22) | pub fn construct(config: Rc<DotProductConfig>) -> Self {
function get_input_columns (line 29) | pub fn get_input_columns(config: &GadgetConfig) -> Vec<Column<Advice>> {
function get_weight_columns (line 34) | pub fn get_weight_columns(config: &GadgetConfig) -> Vec<Column<Advice>> {
function configure (line 39) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 76) | fn name(&self) -> String {
function num_cols_per_op (line 80) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 84) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 88) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 93) | fn op_row_region(
function forward (line 161) | fn forward(
FILE: src/gadgets/gadget.rs
type GadgetType (line 15) | pub enum GadgetType {
type GadgetConfig (line 43) | pub struct GadgetConfig {
function convert_to_u64 (line 66) | pub fn convert_to_u64<F: PrimeField>(x: &F) -> u64 {
function convert_to_u128 (line 81) | pub fn convert_to_u128<F: PrimeField>(x: &F) -> u128 {
type Gadget (line 86) | pub trait Gadget<F: PrimeField> {
method name (line 87) | fn name(&self) -> String;
method num_cols_per_op (line 89) | fn num_cols_per_op(&self) -> usize;
method num_inputs_per_row (line 91) | fn num_inputs_per_row(&self) -> usize;
method num_outputs_per_row (line 93) | fn num_outputs_per_row(&self) -> usize;
method load_lookups (line 95) | fn load_lookups(&self, _layouter: impl Layouter<F>) -> Result<(), Erro...
method op_row_region (line 99) | fn op_row_region(
method op_aligned_rows (line 108) | fn op_aligned_rows(
method forward (line 141) | fn forward(
FILE: src/gadgets/input_lookup.rs
type InputLookupChip (line 11) | pub struct InputLookupChip<F: PrimeField> {
function construct (line 17) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function configure (line 24) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function load_lookups (line 37) | fn load_lookups(&self, mut layouter: impl Layouter<F>) -> Result<(), Err...
function name (line 62) | fn name(&self) -> String {
function num_cols_per_op (line 66) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 70) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 74) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 78) | fn op_row_region(
FILE: src/gadgets/max.rs
type MaxChip (line 14) | pub struct MaxChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function num_cols_per_op (line 27) | pub fn num_cols_per_op() -> usize {
function configure (line 31) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 84) | fn name(&self) -> String {
function num_cols_per_op (line 88) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 92) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 96) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 100) | fn op_row_region(
function forward (line 148) | fn forward(
FILE: src/gadgets/mul_pairs.rs
type MulPairsConfig (line 12) | type MulPairsConfig = GadgetConfig;
type MulPairsChip (line 14) | pub struct MulPairsChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<MulPairsConfig>) -> Self {
function num_cols_per_op (line 27) | pub fn num_cols_per_op() -> usize {
function configure (line 31) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 63) | fn name(&self) -> String {
function num_cols_per_op (line 67) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 71) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 75) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 80) | fn op_row_region(
function forward (line 111) | fn forward(
FILE: src/gadgets/nonlinear/exp.rs
type ExpGadgetConfig (line 14) | type ExpGadgetConfig = GadgetConfig;
type ExpGadgetChip (line 17) | pub struct ExpGadgetChip<F: PrimeField> {
function construct (line 23) | pub fn construct(config: Rc<ExpGadgetConfig>) -> Self {
function configure (line 30) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function generate_map (line 36) | fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashM...
function get_map (line 48) | fn get_map(&self) -> &HashMap<i64, i64> {
function get_selector (line 52) | fn get_selector(&self) -> halo2_proofs::plonk::Selector {
function name (line 58) | fn name(&self) -> String {
function num_cols_per_op (line 62) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 66) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 70) | fn num_outputs_per_row(&self) -> usize {
function load_lookups (line 74) | fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
function op_row_region (line 79) | fn op_row_region(
function forward (line 96) | fn forward(
FILE: src/gadgets/nonlinear/logistic.rs
type LogisticGadgetChip (line 14) | pub struct LogisticGadgetChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function configure (line 27) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function generate_map (line 37) | fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashM...
function get_map (line 50) | fn get_map(&self) -> &HashMap<i64, i64> {
function get_selector (line 54) | fn get_selector(&self) -> halo2_proofs::plonk::Selector {
function name (line 60) | fn name(&self) -> String {
function num_cols_per_op (line 64) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 68) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 72) | fn num_outputs_per_row(&self) -> usize {
function load_lookups (line 76) | fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
function op_row_region (line 81) | fn op_row_region(
function forward (line 98) | fn forward(
FILE: src/gadgets/nonlinear/non_linearity.rs
constant NUM_COLS_PER_OP (line 15) | const NUM_COLS_PER_OP: usize = 2;
type NonLinearGadget (line 17) | pub trait NonLinearGadget<F: PrimeField>: Gadget<F> {
method generate_map (line 18) | fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> Has...
method get_map (line 20) | fn get_map(&self) -> &HashMap<i64, i64>;
method get_selector (line 22) | fn get_selector(&self) -> Selector;
method num_cols_per_op (line 24) | fn num_cols_per_op() -> usize {
method configure (line 28) | fn configure(
method load_lookups (line 78) | fn load_lookups(
method op_row_region (line 119) | fn op_row_region(
method forward (line 167) | fn forward(
FILE: src/gadgets/nonlinear/pow.rs
type PowGadgetChip (line 15) | pub struct PowGadgetChip<F: PrimeField> {
function construct (line 21) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function configure (line 28) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function generate_map (line 34) | fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashM...
function get_map (line 49) | fn get_map(&self) -> &HashMap<i64, i64> {
function get_selector (line 53) | fn get_selector(&self) -> halo2_proofs::plonk::Selector {
function name (line 59) | fn name(&self) -> String {
function num_cols_per_op (line 63) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 67) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 71) | fn num_outputs_per_row(&self) -> usize {
function load_lookups (line 75) | fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
function op_row_region (line 80) | fn op_row_region(
function forward (line 97) | fn forward(
FILE: src/gadgets/nonlinear/relu.rs
type ReluChip (line 14) | pub struct ReluChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function configure (line 27) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function generate_map (line 33) | fn generate_map(_scale_factor: u64, min_val: i64, num_rows: i64) -> Hash...
function get_map (line 44) | fn get_map(&self) -> &HashMap<i64, i64> {
function get_selector (line 48) | fn get_selector(&self) -> halo2_proofs::plonk::Selector {
function name (line 54) | fn name(&self) -> String {
function num_cols_per_op (line 58) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 62) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 66) | fn num_outputs_per_row(&self) -> usize {
function load_lookups (line 70) | fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
function op_row_region (line 75) | fn op_row_region(
function forward (line 92) | fn forward(
FILE: src/gadgets/nonlinear/rsqrt.rs
type RsqrtGadgetChip (line 14) | pub struct RsqrtGadgetChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function configure (line 27) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function generate_map (line 33) | fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashM...
function get_map (line 46) | fn get_map(&self) -> &HashMap<i64, i64> {
function get_selector (line 50) | fn get_selector(&self) -> halo2_proofs::plonk::Selector {
function name (line 56) | fn name(&self) -> String {
function num_cols_per_op (line 60) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 64) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 68) | fn num_outputs_per_row(&self) -> usize {
function load_lookups (line 72) | fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
function op_row_region (line 77) | fn op_row_region(
function forward (line 94) | fn forward(
FILE: src/gadgets/nonlinear/sqrt.rs
type SqrtGadgetChip (line 14) | pub struct SqrtGadgetChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function configure (line 27) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function generate_map (line 33) | fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashM...
function get_map (line 45) | fn get_map(&self) -> &HashMap<i64, i64> {
function get_selector (line 49) | fn get_selector(&self) -> halo2_proofs::plonk::Selector {
function name (line 55) | fn name(&self) -> String {
function num_cols_per_op (line 59) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 63) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 67) | fn num_outputs_per_row(&self) -> usize {
function load_lookups (line 71) | fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
function op_row_region (line 76) | fn op_row_region(
function forward (line 93) | fn forward(
FILE: src/gadgets/nonlinear/tanh.rs
type TanhGadgetChip (line 14) | pub struct TanhGadgetChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function configure (line 27) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function generate_map (line 33) | fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashM...
function get_map (line 48) | fn get_map(&self) -> &HashMap<i64, i64> {
function get_selector (line 52) | fn get_selector(&self) -> halo2_proofs::plonk::Selector {
function name (line 58) | fn name(&self) -> String {
function num_cols_per_op (line 62) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 66) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 70) | fn num_outputs_per_row(&self) -> usize {
function load_lookups (line 74) | fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
function op_row_region (line 79) | fn op_row_region(
function forward (line 96) | fn forward(
FILE: src/gadgets/sqrt_big.rs
type SqrtBigConfig (line 14) | type SqrtBigConfig = GadgetConfig;
type SqrtBigChip (line 16) | pub struct SqrtBigChip<F: PrimeField> {
function construct (line 22) | pub fn construct(config: Rc<SqrtBigConfig>) -> Self {
function num_cols_per_op (line 29) | pub fn num_cols_per_op() -> usize {
function configure (line 33) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 99) | fn name(&self) -> String {
function num_cols_per_op (line 103) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 107) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 111) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 115) | fn op_row_region(
function forward (line 171) | fn forward(
FILE: src/gadgets/square.rs
type SquareGadgetChip (line 12) | pub struct SquareGadgetChip<F: PrimeField> {
function construct (line 18) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function configure (line 26) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 52) | fn name(&self) -> String {
function num_cols_per_op (line 56) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 60) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 64) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 68) | fn op_row_region(
function forward (line 100) | fn forward(
FILE: src/gadgets/squared_diff.rs
type SquaredDiffConfig (line 12) | type SquaredDiffConfig = GadgetConfig;
type SquaredDiffGadgetChip (line 14) | pub struct SquaredDiffGadgetChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<SquaredDiffConfig>) -> Self {
function num_cols_per_op (line 27) | pub fn num_cols_per_op() -> usize {
function configure (line 31) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 63) | fn name(&self) -> String {
function num_cols_per_op (line 67) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 71) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 75) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 79) | fn op_row_region(
function forward (line 111) | fn forward(
FILE: src/gadgets/sub_pairs.rs
type SubPairsConfig (line 12) | type SubPairsConfig = GadgetConfig;
type SubPairsChip (line 14) | pub struct SubPairsChip<F: PrimeField> {
function construct (line 20) | pub fn construct(config: Rc<SubPairsConfig>) -> Self {
function num_cols_per_op (line 27) | pub fn num_cols_per_op() -> usize {
function configure (line 31) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 63) | fn name(&self) -> String {
function num_cols_per_op (line 67) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 71) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 75) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 79) | fn op_row_region(
function forward (line 110) | fn forward(
FILE: src/gadgets/update.rs
type UpdateConfig (line 14) | type UpdateConfig = GadgetConfig;
type UpdateGadgetChip (line 17) | pub struct UpdateGadgetChip<F: PrimeField> {
function construct (line 23) | pub fn construct(config: UpdateConfig) -> Self {
function num_cols_per_op (line 30) | pub fn num_cols_per_op() -> usize {
function configure (line 34) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 89) | fn name(&self) -> String {
function num_cols_per_op (line 93) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 97) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 101) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 105) | fn op_row_region(
function forward (line 183) | fn forward(
FILE: src/gadgets/var_div.rs
type VarDivRoundConfig (line 13) | type VarDivRoundConfig = GadgetConfig;
type VarDivRoundChip (line 15) | pub struct VarDivRoundChip<F: PrimeField> {
function construct (line 21) | pub fn construct(config: Rc<VarDivRoundConfig>) -> Self {
function num_cols_per_op (line 28) | pub fn num_cols_per_op() -> usize {
function configure (line 32) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 102) | fn name(&self) -> String {
function num_cols_per_op (line 106) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 110) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 114) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 118) | fn op_row_region(
function forward (line 192) | fn forward(
FILE: src/gadgets/var_div_big.rs
type VarDivRoundBigChip (line 13) | pub struct VarDivRoundBigChip<F: PrimeField> {
function construct (line 19) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function num_cols_per_op (line 26) | pub fn num_cols_per_op() -> usize {
function configure (line 30) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 117) | fn name(&self) -> String {
function num_cols_per_op (line 121) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 125) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 129) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 133) | fn op_row_region(
function forward (line 256) | fn forward(
FILE: src/gadgets/var_div_big3.rs
type VarDivRoundBig3Chip (line 13) | pub struct VarDivRoundBig3Chip<F: PrimeField> {
function construct (line 19) | pub fn construct(config: Rc<GadgetConfig>) -> Self {
function num_cols_per_op (line 26) | pub fn num_cols_per_op() -> usize {
function configure (line 30) | pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetCo...
function name (line 130) | fn name(&self) -> String {
function num_cols_per_op (line 134) | fn num_cols_per_op(&self) -> usize {
function num_inputs_per_row (line 138) | fn num_inputs_per_row(&self) -> usize {
function num_outputs_per_row (line 142) | fn num_outputs_per_row(&self) -> usize {
function op_row_region (line 146) | fn op_row_region(
function forward (line 280) | fn forward(
FILE: src/layers/arithmetic.rs
type Arithmetic (line 18) | pub trait Arithmetic<F: PrimeField> {
method gadget_forward (line 19) | fn gadget_forward(
method arithmetic_forward (line 27) | fn arithmetic_forward(
FILE: src/layers/arithmetic/add.rs
type AddChip (line 25) | pub struct AddChip {}
method get_activation (line 28) | fn get_activation(&self, layer_params: &Vec<i64>) -> ActivationType {
method gadget_forward (line 39) | fn gadget_forward(
method forward (line 53) | fn forward(
method used_gadgets (line 95) | fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::ga...
FILE: src/layers/arithmetic/div_var.rs
type DivVarChip (line 21) | pub struct DivVarChip {}
method gadget_forward (line 25) | fn gadget_forward(
method forward (line 44) | fn forward(
method used_gadgets (line 86) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/arithmetic/mul.rs
type MulChip (line 25) | pub struct MulChip {}
method gadget_forward (line 28) | fn gadget_forward(
method forward (line 48) | fn forward(
method used_gadgets (line 80) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/arithmetic/sub.rs
type SubChip (line 24) | pub struct SubChip {}
method gadget_forward (line 27) | fn gadget_forward(
method forward (line 41) | fn forward(
method used_gadgets (line 62) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/averager.rs
type Averager (line 14) | pub trait Averager<F: PrimeField> {
method splat (line 15) | fn splat(&self, input: &AssignedTensor<F>, layer_config: &LayerConfig)...
method get_div_val (line 17) | fn get_div_val(
method avg_forward (line 25) | fn avg_forward(
FILE: src/layers/avg_pool_2d.rs
type AvgPool2DChip (line 20) | pub struct AvgPool2DChip {}
method splat (line 23) | fn splat(&self, input: &AssignedTensor<F>, layer_config: &LayerConfig)...
method get_div_val (line 32) | fn get_div_val(
method forward (line 64) | fn forward(
method used_gadgets (line 88) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/batch_mat_mul.rs
type BatchMatMulChip (line 16) | pub struct BatchMatMulChip {}
method forward (line 19) | fn forward(
method used_gadgets (line 87) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/conv2d.rs
type PaddingEnum (line 28) | pub enum PaddingEnum {
type ConvLayerEnum (line 35) | pub enum ConvLayerEnum {
type Conv2DConfig (line 41) | pub struct Conv2DConfig {
type Conv2DChip (line 48) | pub struct Conv2DChip<F: PrimeField> {
function param_vec_to_config (line 55) | pub fn param_vec_to_config(layer_params: Vec<i64>) -> Conv2DConfig {
function get_padding (line 81) | pub fn get_padding(
function out_hw (line 102) | pub fn out_hw(
function splat (line 126) | pub fn splat<G: Clone>(
function splat_depthwise (line 223) | pub fn splat_depthwise<G: Clone>(
function forward (line 289) | fn forward(
method used_gadgets (line 437) | fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::ga...
FILE: src/layers/dag.rs
type DAGLayerConfig (line 41) | pub struct DAGLayerConfig {
type DAGLayerChip (line 48) | pub struct DAGLayerChip<F: PrimeField + Ord> {
function construct (line 54) | pub fn construct(dag_config: DAGLayerConfig) -> Self {
function forward (line 62) | pub fn forward(
method used_gadgets (line 481) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/div_fixed.rs
type DivFixedChip (line 18) | pub struct DivFixedChip {}
method get_div_val (line 21) | fn get_div_val<F: PrimeField>(
method forward (line 54) | fn forward(
method used_gadgets (line 90) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/fully_connected.rs
type FullyConnectedConfig (line 24) | pub struct FullyConnectedConfig {
method construct (line 29) | pub fn construct(normalize: bool) -> Self {
type FullyConnectedChip (line 34) | pub struct FullyConnectedChip<F: PrimeField> {
function compute_mm (line 40) | pub fn compute_mm(
function assign_array (line 64) | pub fn assign_array(
function random_vector (line 85) | pub fn random_vector(
function get_activation (line 102) | fn get_activation(&self, layer_params: &Vec<i64>) -> ActivationType {
function forward (line 113) | fn forward(
method used_gadgets (line 310) | fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::ga...
FILE: src/layers/layer.rs
type LayerType (line 13) | pub enum LayerType {
type ActivationType (line 53) | pub enum ActivationType {
type LayerConfig (line 64) | pub struct LayerConfig {
type CellRc (line 72) | pub type CellRc<F> = Rc<AssignedCell<F, F>>;
type AssignedTensor (line 73) | pub type AssignedTensor<F> = Array<CellRc<F>, IxDyn>;
type Layer (line 76) | pub trait Layer<F: PrimeField> {
method forward (line 77) | fn forward(
type GadgetConsumer (line 87) | pub trait GadgetConsumer {
method used_gadgets (line 88) | fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<GadgetType>;
FILE: src/layers/logistic.rs
type LogisticChip (line 14) | pub struct LogisticChip {}
method forward (line 17) | fn forward(
method used_gadgets (line 46) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/max_pool_2d.rs
type MaxPool2DChip (line 16) | pub struct MaxPool2DChip<F: PrimeField> {
function shape (line 21) | pub fn shape(inp: &AssignedTensor<F>, layer_config: &LayerConfig) -> (us...
function splat (line 44) | pub fn splat(
function forward (line 83) | fn forward(
method used_gadgets (line 121) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
FILE: src/layers/mean.rs
type MeanChip (line 17) | pub struct MeanChip {}
method get_keep_axis (line 20) | pub fn get_keep_axis(&self, layer_config: &LayerConfig) -> usize {
method splat (line 56) | fn splat(&self, input: &AssignedTensor<F>, layer_config: &LayerConfig)...
method get_div_val (line 75) | fn get_div_val(
method forward (line 111) | fn forward(
method used_gadgets (line 132) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/noop.rs
type NoopChip (line 9) | pub struct NoopChip {}
method forward (line 12) | fn forward(
method used_gadgets (line 26) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/pow.rs
type PowChip (line 14) | pub struct PowChip {}
method forward (line 17) | fn forward(
method used_gadgets (line 42) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/rsqrt.rs
type RsqrtChip (line 14) | pub struct RsqrtChip {}
method forward (line 17) | fn forward(
method used_gadgets (line 68) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/shape/broadcast.rs
type BroadcastChip (line 18) | pub struct BroadcastChip {}
method forward (line 22) | fn forward(
method used_gadgets (line 68) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/shape/concatenation.rs
type ConcatenationChip (line 13) | pub struct ConcatenationChip {}
method forward (line 16) | fn forward(
method used_gadgets (line 34) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
FILE: src/layers/shape/mask_neg_inf.rs
type MaskNegInfChip (line 13) | pub struct MaskNegInfChip {}
method forward (line 16) | fn forward(
method used_gadgets (line 52) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/shape/pack.rs
type PackChip (line 13) | pub struct PackChip {}
method forward (line 16) | fn forward(
method used_gadgets (line 43) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
FILE: src/layers/shape/pad.rs
function pad (line 18) | pub fn pad<G: Clone>(
type PadChip (line 54) | pub struct PadChip {}
method param_vec_to_config (line 61) | pub fn param_vec_to_config(layer_params: Vec<i64>) -> PadConfig {
method forward (line 73) | fn forward(
type PadConfig (line 56) | pub struct PadConfig {
method used_gadgets (line 94) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/shape/permute.rs
type PermuteChip (line 13) | pub struct PermuteChip {}
method forward (line 16) | fn forward(
method used_gadgets (line 40) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/shape/reshape.rs
type ReshapeChip (line 13) | pub struct ReshapeChip {}
method forward (line 16) | fn forward(
method used_gadgets (line 35) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/shape/resize_nn.rs
type ResizeNNChip (line 13) | pub struct ResizeNNChip {}
method forward (line 17) | fn forward(
method used_gadgets (line 53) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/shape/rotate.rs
type RotateChip (line 14) | pub struct RotateChip {}
method forward (line 26) | fn forward(
method used_gadgets (line 71) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/shape/slice.rs
type SliceChip (line 13) | pub struct SliceChip {}
method forward (line 16) | fn forward(
method used_gadgets (line 45) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
FILE: src/layers/shape/split.rs
type SplitChip (line 13) | pub struct SplitChip {}
method forward (line 16) | fn forward(
method used_gadgets (line 44) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
FILE: src/layers/shape/transpose.rs
type TransposeChip (line 13) | pub struct TransposeChip {}
method forward (line 16) | fn forward(
method used_gadgets (line 49) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/softmax.rs
type SoftmaxChip (line 22) | pub struct SoftmaxChip {}
method softmax_flat (line 25) | pub fn softmax_flat<F: PrimeField>(
method forward (line 119) | fn forward(
method used_gadgets (line 202) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/sqrt.rs
type SqrtChip (line 14) | pub struct SqrtChip {}
method forward (line 17) | fn forward(
method used_gadgets (line 68) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/square.rs
type SquareChip (line 15) | pub struct SquareChip {}
method forward (line 18) | fn forward(
method used_gadgets (line 62) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/squared_diff.rs
type SquaredDiffChip (line 18) | pub struct SquaredDiffChip {}
method forward (line 21) | fn forward(
method used_gadgets (line 70) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/tanh.rs
type TanhChip (line 14) | pub struct TanhChip {}
method forward (line 17) | fn forward(
method used_gadgets (line 42) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/layers/update.rs
type UpdateChip (line 14) | pub struct UpdateChip {}
method forward (line 17) | fn forward(
method used_gadgets (line 48) | fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::g...
FILE: src/model.rs
type ModelCircuit (line 82) | pub struct ModelCircuit<F: PrimeField> {
type ModelConfig (line 95) | pub struct ModelConfig<F: PrimeField + Ord + FromUniformBytes<64>> {
function assign_tensors_map (line 103) | pub fn assign_tensors_map(
function tensor_map_to_vec (line 142) | pub fn tensor_map_to_vec(
function assign_tensors_vec (line 166) | pub fn assign_tensors_vec(
function assign_constants (line 182) | pub fn assign_constants(
function assign_constants2 (line 233) | pub fn assign_constants2(
function generate_from_file (line 292) | pub fn generate_from_file(config_file: &str, inp_file: &str) -> ModelCir...
function generate_from_msgpack (line 297) | pub fn generate_from_msgpack(config: ModelMsgpack, panic_empty_tensor: b...
function assign_and_commit (line 482) | pub fn assign_and_commit(
function copy_and_commit (line 520) | pub fn copy_and_commit(
type Config (line 560) | type Config = ModelConfig<F>;
type FloorPlanner (line 561) | type FloorPlanner = SimpleFloorPlanner;
type Params (line 562) | type Params = ();
function without_witnesses (line 564) | fn without_witnesses(&self) -> Self {
function configure (line 568) | fn configure(meta: &mut ConstraintSystem<F>) -> Self::Config {
function synthesize (line 644) | fn synthesize(&self, config: Self::Config, mut layouter: impl Layouter<F...
FILE: src/utils/helpers.rs
constant RAND_START_IDX (line 11) | pub const RAND_START_IDX: i64 = i64::MIN;
constant NUM_RANDOMS (line 12) | pub const NUM_RANDOMS: i64 = 20001;
function convert_to_bigint (line 15) | pub fn convert_to_bigint<F: PrimeField>(x: Value<F>) -> BigUint {
function convert_pos_int (line 23) | pub fn convert_pos_int<F: PrimeField>(x: Value<F>) -> i128 {
function print_pos_int (line 35) | pub fn print_pos_int<F: PrimeField>(prefix: &str, x: Value<F>, scale_fac...
function print_assigned_arr (line 41) | pub fn print_assigned_arr<F: PrimeField>(
function get_public_values (line 56) | pub fn get_public_values<F: PrimeField>() -> Vec<F> {
function shape_dominates (line 66) | fn shape_dominates(s1: &[usize], s2: &[usize]) -> bool {
function intermediate_shape (line 81) | fn intermediate_shape(s1: &[usize], s2: &[usize]) -> Vec<usize> {
function final_shape (line 89) | fn final_shape(s1: &[usize], s2: &[usize]) -> Vec<usize> {
function broadcast (line 97) | pub fn broadcast<G: Clone>(
FILE: src/utils/loader.rs
type TensorMsgpack (line 6) | pub struct TensorMsgpack {
type LayerMsgpack (line 13) | pub struct LayerMsgpack {
type ModelMsgpack (line 24) | pub struct ModelMsgpack {
function load_config_msgpack (line 39) | pub fn load_config_msgpack(config_path: &str) -> ModelMsgpack {
function load_model_msgpack (line 48) | pub fn load_model_msgpack(config_path: &str, inp_path: &str) -> ModelMsg...
FILE: src/utils/proving_ipa.rs
function get_ipa_params (line 28) | pub fn get_ipa_params(params_dir: &str, degree: u32) -> ParamsIPA<EqAffi...
function time_circuit_ipa (line 48) | pub fn time_circuit_ipa(circuit: ModelCircuit<Fp>) {
FILE: src/utils/proving_kzg.rs
function get_kzg_params (line 28) | pub fn get_kzg_params(params_dir: &str, degree: u32) -> ParamsKZG<Bn256> {
function serialize (line 48) | pub fn serialize(data: &Vec<u8>, path: &str) -> u64 {
function verify_kzg (line 54) | pub fn verify_kzg(
function time_circuit_kzg (line 74) | pub fn time_circuit_kzg(circuit: ModelCircuit<Fr>) {
function verify_circuit_kzg (line 171) | pub fn verify_circuit_kzg(
FILE: testing/circuits/last_two_layers.py
class LastTwoLayers (line 16) | class LastTwoLayers(tf.keras.Model):
method __init__ (line 17) | def __init__(self, name=None):
method call (line 40) | def call(self, x):
Condensed preview — 93 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (363K chars).
[
{
"path": ".gitignore",
"chars": 470,
"preview": "# Generated by Cargo\n# will have compiled files and executables\n/target/\n\n# Remove Cargo.lock from gitignore if creating"
},
{
"path": "Cargo.toml",
"chars": 1140,
"preview": "[package]\nname = \"zkml\"\nversion = \"0.0.1\"\nedition = \"2021\"\ndescription = \"Zero-knowledge machine learning\"\nlicense = \"LI"
},
{
"path": "LICENSE",
"chars": 11357,
"preview": " Apache License\n Version 2.0, January 2004\n "
},
{
"path": "README.md",
"chars": 2676,
"preview": "# zkml\n\nzkml is a framework for constructing proofs of ML model execution in ZK-SNARKs.\nRead our [blog\npost](https://med"
},
{
"path": "backwards/README.md",
"chars": 103,
"preview": "### About\n\nTakes in a feed-forward TF model and outputs a new computational graph for back-propagation."
},
{
"path": "backwards/backward.py",
"chars": 10873,
"preview": "#\n# A script for generating a backprop computational graph from forward\n#\n\nimport argparse\nimport ast\nfrom typing import"
},
{
"path": "python/converter.py",
"chars": 21091,
"preview": "import argparse\nimport ast\nfrom typing import Literal, Union\nimport tensorflow as tf\nimport numpy as np\nimport tflite\nim"
},
{
"path": "python/input_converter.py",
"chars": 1315,
"preview": "import argparse\nimport ast\nimport numpy as np\nimport msgpack\n\ndef main():\n parser = argparse.ArgumentParser()\n parser."
},
{
"path": "python/training_converter.py",
"chars": 2018,
"preview": "# A converter for training data\n# Performs the conversion npy -> msgpack\n# TODO: Ensure that training works with models "
},
{
"path": "rustfmt.toml",
"chars": 31,
"preview": "tab_spaces = 2\nmax_width = 100\n"
},
{
"path": "src/bin/test_circuit.rs",
"chars": 791,
"preview": "use halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr};\nuse zkml::{\n model::ModelCircuit,\n utils::{\n helpers::"
},
{
"path": "src/bin/time_circuit.rs",
"chars": 757,
"preview": "use halo2_proofs::halo2curves::{bn256::Fr, pasta::Fp};\nuse zkml::{\n model::ModelCircuit,\n utils::{proving_ipa::time_ci"
},
{
"path": "src/bin/verify_circuit.rs",
"chars": 1040,
"preview": "use halo2_proofs::halo2curves::bn256::Fr;\nuse zkml::{\n model::ModelCircuit,\n utils::{loader::load_config_msgpack, prov"
},
{
"path": "src/bin/verify_wav.rs",
"chars": 1375,
"preview": "use std::fs::File;\n\nuse halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr};\nuse zkml::{\n model::ModelCircuit,\n ut"
},
{
"path": "src/commitments/commit.rs",
"chars": 455,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/commitments/packer.rs",
"chars": 11351,
"preview": "use std::{\n cmp::{max, min},\n collections::{BTreeMap, HashMap},\n marker::PhantomData,\n rc::Rc,\n};\n\nuse halo2_proofs:"
},
{
"path": "src/commitments/poseidon_commit.rs",
"chars": 3977,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_gadgets::poseidon::{\n primitives::{generate_co"
},
{
"path": "src/commitments.rs",
"chars": 57,
"preview": "pub mod commit;\npub mod packer;\npub mod poseidon_commit;\n"
},
{
"path": "src/gadgets/add_pairs.rs",
"chars": 3712,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets/adder.rs",
"chars": 3627,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region, Value},\n halo2"
},
{
"path": "src/gadgets/bias_div_floor_relu6.rs",
"chars": 8287,
"preview": "use std::{collections::HashMap, marker::PhantomData};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n"
},
{
"path": "src/gadgets/bias_div_round_relu6.rs",
"chars": 9147,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, R"
},
{
"path": "src/gadgets/dot_prod.rs",
"chars": 5870,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets/gadget.rs",
"chars": 4018,
"preview": "use std::{\n collections::{BTreeSet, HashMap},\n sync::Arc,\n};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, "
},
{
"path": "src/gadgets/input_lookup.rs",
"chars": 2165,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region, Value},\n halo2"
},
{
"path": "src/gadgets/max.rs",
"chars": 5345,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets/mul_pairs.rs",
"chars": 3791,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets/nonlinear/exp.rs",
"chars": 2816,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, R"
},
{
"path": "src/gadgets/nonlinear/logistic.rs",
"chars": 2839,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, R"
},
{
"path": "src/gadgets/nonlinear/non_linearity.rs",
"chars": 5358,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region, Value},\n halo"
},
{
"path": "src/gadgets/nonlinear/pow.rs",
"chars": 2866,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, R"
},
{
"path": "src/gadgets/nonlinear/relu.rs",
"chars": 2585,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, R"
},
{
"path": "src/gadgets/nonlinear/rsqrt.rs",
"chars": 2783,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, R"
},
{
"path": "src/gadgets/nonlinear/sqrt.rs",
"chars": 2739,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, R"
},
{
"path": "src/gadgets/nonlinear/tanh.rs",
"chars": 2759,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, R"
},
{
"path": "src/gadgets/nonlinear.rs",
"chars": 124,
"preview": "pub mod exp;\npub mod logistic;\npub mod non_linearity;\npub mod pow;\npub mod relu;\npub mod rsqrt;\npub mod sqrt;\npub mod ta"
},
{
"path": "src/gadgets/sqrt_big.rs",
"chars": 5363,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets/square.rs",
"chars": 3218,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Region},\n halo2curves::ff::Prime"
},
{
"path": "src/gadgets/squared_diff.rs",
"chars": 3797,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets/sub_pairs.rs",
"chars": 3712,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets/update.rs",
"chars": 6099,
"preview": "use std::marker::PhantomData;\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves::ff::Prime"
},
{
"path": "src/gadgets/var_div.rs",
"chars": 6286,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets/var_div_big.rs",
"chars": 8648,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets/var_div_big3.rs",
"chars": 9676,
"preview": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Region},\n halo2curves:"
},
{
"path": "src/gadgets.rs",
"chars": 365,
"preview": "pub mod add_pairs;\npub mod adder;\npub mod bias_div_floor_relu6;\npub mod bias_div_round_relu6;\npub mod dot_prod;\npub mod "
},
{
"path": "src/layers/arithmetic/add.rs",
"chars": 2982,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter},\n halo2curves::f"
},
{
"path": "src/layers/arithmetic/div_var.rs",
"chars": 2752,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter},\n halo2curves::f"
},
{
"path": "src/layers/arithmetic/mul.rs",
"chars": 2384,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter},\n halo2curves::f"
},
{
"path": "src/layers/arithmetic/sub.rs",
"chars": 1675,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter},\n halo2curves::f"
},
{
"path": "src/layers/arithmetic.rs",
"chars": 1631,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter},\n halo2curves::ff::Pr"
},
{
"path": "src/layers/averager.rs",
"chars": 2114,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter},\n halo2curves::ff::Pr"
},
{
"path": "src/layers/avg_pool_2d.rs",
"chars": 2522,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Value},\n halo2curves:"
},
{
"path": "src/layers/batch_mat_mul.rs",
"chars": 2875,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::Pr"
},
{
"path": "src/layers/conv2d.rs",
"chars": 13161,
"preview": "// TODO: Speed up Depthwise operations with Freivald's algorithm\n\nuse std::{collections::HashMap, marker::PhantomData, r"
},
{
"path": "src/layers/dag.rs",
"chars": 14370,
"preview": "use std::{collections::HashMap, fs::File, io::BufWriter, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{circuit::Layo"
},
{
"path": "src/layers/div_fixed.rs",
"chars": 2375,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Value},\n halo2cu"
},
{
"path": "src/layers/fully_connected.rs",
"chars": 9991,
"preview": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, R"
},
{
"path": "src/layers/layer.rs",
"chars": 1842,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter},\n halo2curves::ff::Pr"
},
{
"path": "src/layers/logistic.rs",
"chars": 1502,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk:"
},
{
"path": "src/layers/max_pool_2d.rs",
"chars": 3367,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/mean.rs",
"chars": 3539,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter, Value},\n halo2curves:"
},
{
"path": "src/layers/noop.rs",
"chars": 822,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/pow.rs",
"chars": 1427,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk:"
},
{
"path": "src/layers/rsqrt.rs",
"chars": 2193,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk:"
},
{
"path": "src/layers/shape/broadcast.rs",
"chars": 1990,
"preview": "//\n// Broadcast is used as a temporary measure to represent a the backprop\n// of a full-kernel AvgPool2D\n//\n\nuse std::{c"
},
{
"path": "src/layers/shape/concatenation.rs",
"chars": 1077,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/shape/mask_neg_inf.rs",
"chars": 1637,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/shape/pack.rs",
"chars": 1314,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/shape/pad.rs",
"chars": 2725,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter},\n halo2curves::ff::Pr"
},
{
"path": "src/layers/shape/permute.rs",
"chars": 1076,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/shape/reshape.rs",
"chars": 1060,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/shape/resize_nn.rs",
"chars": 1688,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/shape/rotate.rs",
"chars": 1771,
"preview": "// TODO: The implementation is not ideal.\n\nuse std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layoute"
},
{
"path": "src/layers/shape/slice.rs",
"chars": 1282,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/shape/split.rs",
"chars": 1240,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/shape/transpose.rs",
"chars": 1479,
"preview": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Erro"
},
{
"path": "src/layers/shape.rs",
"chars": 209,
"preview": "pub mod broadcast;\npub mod concatenation;\npub mod mask_neg_inf;\npub mod pack;\npub mod pad;\npub mod permute;\npub mod resh"
},
{
"path": "src/layers/softmax.rs",
"chars": 6361,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n circuit::{AssignedCell, Layouter},\n halo2curves::f"
},
{
"path": "src/layers/sqrt.rs",
"chars": 2183,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk:"
},
{
"path": "src/layers/square.rs",
"chars": 2022,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk:"
},
{
"path": "src/layers/squared_diff.rs",
"chars": 2311,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk:"
},
{
"path": "src/layers/tanh.rs",
"chars": 1437,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk:"
},
{
"path": "src/layers/update.rs",
"chars": 1706,
"preview": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk:"
},
{
"path": "src/layers.rs",
"chars": 457,
"preview": "// Generics\npub mod averager;\n\npub mod arithmetic;\npub mod shape;\n\n// Concrete implementations\npub mod avg_pool_2d;\npub "
},
{
"path": "src/lib.rs",
"chars": 112,
"preview": "#![feature(int_roundings)]\n\npub mod commitments;\npub mod gadgets;\npub mod layers;\npub mod model;\npub mod utils;\n"
},
{
"path": "src/model.rs",
"chars": 30336,
"preview": "use std::{\n collections::{BTreeMap, BTreeSet, HashMap},\n marker::PhantomData,\n rc::Rc,\n sync::{Arc, Mutex},\n};\n\nuse "
},
{
"path": "src/utils/helpers.rs",
"chars": 3524,
"preview": "use halo2_proofs::{\n circuit::{AssignedCell, Value},\n halo2curves::ff::PrimeField,\n};\nuse ndarray::{Array, IxDyn};\nuse"
},
{
"path": "src/utils/loader.rs",
"chars": 2075,
"preview": "use std::{fs::File, io::BufReader};\n\nuse serde_derive::{Deserialize, Serialize};\n\n#[derive(Clone, Debug, Serialize, Dese"
},
{
"path": "src/utils/proving_ipa.rs",
"chars": 3576,
"preview": "use std::{\n fs::File,\n io::{BufReader, Write},\n path::Path,\n time::Instant,\n};\n\nuse halo2_proofs::{\n dev::MockProve"
},
{
"path": "src/utils/proving_kzg.rs",
"chars": 6058,
"preview": "use std::{\n fs::File,\n io::{BufReader, Write},\n path::Path,\n time::Instant,\n};\n\nuse halo2_proofs::{\n dev::MockProve"
},
{
"path": "src/utils.rs",
"chars": 75,
"preview": "pub mod helpers;\npub mod loader;\npub mod proving_ipa;\npub mod proving_kzg;\n"
},
{
"path": "testing/circuits/last_two_layers.py",
"chars": 2383,
"preview": "import tensorflow as tf\nimport os \nimport numpy as np\n\ninterpreter = tf.lite.Interpreter(\n model_path=f'./testing/cir"
}
]
// ... and 1 more files (download for full content)
About this extraction
This page contains the full source code of the ddkang/zkml GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 93 files (13.7 MB), approximately 95.6k tokens, and a symbol index with 543 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.