[
  {
    "path": ".gitignore",
    "content": "# Generated by Cargo\n# will have compiled files and executables\n/target/\n\n# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries\n# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html\nCargo.lock\n\n# These are backup files generated by rustfmt\n**/*.rs.bk\n\n# VSCode\n.vscode\n\n**/.DS_Store\n*.swp\n\n# Proof stuff\nout.msgpack\nproof_size_check\npkey\nvkey\nproof\n\nparams_kzg\nparams_ipa\nexamples\ntesting/data\n*.diff\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[package]\nname = \"zkml\"\nversion = \"0.0.1\"\nedition = \"2021\"\ndescription = \"Zero-knowledge machine learning\"\nlicense = \"LICENSE\"\nhomepage = \"https://github.com/ddkang/zkml\"\nrepository = \"https://github.com/ddkang/zkml-public.git\"\nreadme = \"README.md\"\nexclude = [\n  \"params\",\n  \"params_kzg\",\n  \"python\",\n]\n\n[profile.dev]\nopt-level = 3\n\n[profile.test]\nopt-level = 3\n\n[dependencies]\nbitvec = \"1.0.1\"\nhalo2 = { git=\"https://github.com/privacy-scaling-explorations/halo2\", package=\"halo2\", rev=\"17e9765c199670534c0299c96128d0464a188d0b\" }\nhalo2_gadgets = { git=\"https://github.com/privacy-scaling-explorations/halo2\", package=\"halo2_gadgets\", rev=\"17e9765c199670534c0299c96128d0464a188d0b\", features = [\"circuit-params\"] }\nhalo2_proofs = { git=\"https://github.com/privacy-scaling-explorations/halo2\", package=\"halo2_proofs\", rev=\"17e9765c199670534c0299c96128d0464a188d0b\", features = [\"circuit-params\"] }\nlazy_static = \"1.4.0\"\nndarray = \"0.15.6\"\nnum-bigint = \"0.4.3\"\nnum-traits = \"0.2.15\"\nonce_cell = \"1.15.0\"\nrand = \"0.8.5\"\nrmp-serde = \"1.1.1\"\nrounded-div = \"0.1.2\"\nserde = \"1.0.152\"\nserde_derive = \"1.0.152\"\nserde_json = \"1.0.85\"\nwav = \"1.0.0\"\n\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# zkml\n\nzkml is a framework for constructing proofs of ML model execution in ZK-SNARKs.\nRead our [blog\npost](https://medium.com/@danieldkang/trustless-verification-of-machine-learning-6f648fd8ba88)\nand [paper](https://arxiv.org/abs/2210.08674) for implementation details.\n\nzkml requires the nightly build of Rust:\n\n```\nrustup override set nightly\n```\n\n## Quickstart\n\nRun the following commands:\n\n```sh\n# Installs rust, skip if you already have rust installed\ncurl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh\n\ngit clone https://github.com/ddkang/zkml.git\ncd zkml\nrustup override set nightly\ncargo build --release\nmkdir params_kzg\nmkdir params_ipa\n\n# This should take ~16s to run the first time\n# and ~8s to run the second time\n./target/release/time_circuit examples/mnist/model.msgpack examples/mnist/inp.msgpack kzg\n```\n\nThis will prove an MNIST circuit! It will require around 2GB of memory and take\naround 8 seconds to run.\n\n\n\n## Converting your own model and data\n\nTo convert your own model and data, you will need to convert the model and data to the format zkml\nexpects. Currently, we accept TFLite models. We show an example below.\n\n1. First `cd examples/mnist`\n\n2. We've already created a model that achieves high accuracy on MNIST (`model.tflite`). You will\n   need to create your own TFLite model. One way is to [convert a model from Keras](https://stackoverflow.com/questions/53256877/how-to-convert-kerash5-file-to-a-tflite-file).\n\n3. You will need to convert the model:\n```bash\npython ../../python/converter.py --model model.tflite --model_output converted_model.msgpack --config_output config.msgpack --scale_factor 512 --k 17 --num_cols 10 --num_randoms 1024\n```\n\nThere are several parameters that need to be changed depending on the model (`scale_factor`, `k`,\n`num_cols`, and `num_randoms`).\n\n4. You will first need to serialize the model input to numpy's serialization format `npy`. We've\n   written a small script to do this for the first test data point in MNIST:\n```bash\npython data_to_npy.py\n```\n\n5. You will then need to convert the input to the model:\n```bash\npython ../../python/input_converter.py --model_config converted_model.msgpack --inputs 7.npy --output example_inp.msgpack\n```\n\n6. Once you've converted the model and input, you can run the model as above! However, we generally\n   recommend testing the model before proving (you will need to build zkml before running the next\n   line):\n```bash\ncd ../../\n./target/release/test_circuit examples/mnist/converted_model.msgpack examples/mnist/example_inp.msgpack\n```\n\n\n## Contact us\n\nIf you're interested in extending or using zkml, please contact us at `ddkang\n[at] g.illinois.edu`.\n"
  },
  {
    "path": "backwards/README.md",
    "content": "### About\n\nTakes in a feed-forward TF model and outputs a new computational graph for back-propagation."
  },
  {
    "path": "backwards/backward.py",
    "content": "#\n# A script for generating a backprop computational graph from forward\n#\n\nimport argparse\nimport ast\nfrom typing import Literal, Union\nimport msgpack\nimport numpy as np\n\nclass CircuitConfig():\n    def __init__(self, starting_index):\n        self.next_index = starting_index\n        self.outp_to_grad = {}\n        self.label_tensor_idx = None\n        self.weights_update = None\n\n    # Allocates an index for a gradient tensor and returns\n    def new_gradient_tensor(self, tensor_idx):\n        if tensor_idx in self.outp_to_grad:\n            raise Exception(\"Tensor already allocated\")\n        self.outp_to_grad[tensor_idx] = self.next_index\n        self.next_index += 1\n        return self.outp_to_grad[tensor_idx]\n\n    # Allocates an index for a tensor\n    def new_tensor(self):\n        new_index = self.next_index\n        self.next_index += 1\n        return new_index\n\n    def new_label_tensor(self):\n        if self.label_tensor_idx is not None:\n            raise Exception(\"Label tensor already allocated\")\n        self.label_tensor_idx = self.next_index\n        self.next_index += 1\n        return self.label_tensor_idx\n\n    # Allocates an index for a gradient tensor and returns\n    def gradient_tensor_idx(self, tensor_idx):\n        return self.outp_to_grad[tensor_idx]\n\n# TODO: Put these in enums\nNO_ACTIVATION = 0\n\nSAME = 0\nVALID = 1\n\nCONV2D = 0\nCONV2D_DEPTHWISE = 1\n\nclass Conv2D():\n    def __init__(self, layer):\n        params = layer['params']\n        self.padding = params[1]\n        self.activation_type = params[2]\n        self.stride_h = params[3]\n        self.stride_w = params[4]\n\n    def backward(self, layer, transcript, config):\n        inputs_idx, inputs_shape = layer['inp_idxes'][0], layer['inp_shapes'][0]\n        weights_idx, weights_shape = layer['inp_idxes'][1], layer['inp_shapes'][1]\n        bias_idx, bias_shape = layer['inp_idxes'][2], layer['inp_shapes'][2]\n        output_idx, output_shape = layer['out_idxes'][0], layer['out_shapes'][0]\n\n        permuted_inputs_idx = config.new_tensor()\n        permutation = [3, 1, 2, 0]\n        permuted_inputs_shape = [inputs_shape[p] for p in permutation]\n        inputs_permute_layer = {\n            'layer_type': 'Permute',\n            'params': permutation,\n            'inp_idxes': [inputs_idx],\n            'out_idxes': [permuted_inputs_idx],\n            'inp_shapes': [inputs_shape],\n            'out_shapes': [permuted_inputs_shape],\n            'mask': [],\n        }\n        transcript.append(inputs_permute_layer)\n\n        permuted_outputs_idx = config.new_tensor()\n        permuted_outputs_shape = [output_shape[p] for p in permutation]\n        inputs_permute_layer = {\n            'layer_type': 'Permute',\n            'params': permutation,\n            'inp_idxes': [config.gradient_tensor_idx(output_idx)],\n            'out_idxes': [permuted_outputs_idx],\n            'inp_shapes': [output_shape],\n            'out_shapes': [permuted_outputs_shape],\n            'mask': [],\n        }\n        transcript.append(inputs_permute_layer)\n\n\n        dw_idx, dw_shape = config.new_tensor(), weights_shape\n        dw_conv = {\n            'layer_type': 'Conv2D',\n            'params': [CONV2D, VALID, NO_ACTIVATION, self.stride_h, self.stride_w],\n            'inp_idxes': [permuted_inputs_idx, permuted_outputs_idx],\n            'out_idxes': [dw_idx],\n            'inp_shapes': [permuted_inputs_shape, permuted_outputs_shape],\n            'out_shapes': [dw_shape],\n            'mask': [],\n        }\n        transcript.append(dw_conv)\n        config.weights_update = dw_idx\n\n        permutation = [3, 1, 2, 0]\n        permutation_weights_idx = config.new_tensor()\n        permutation_weights_shape = [weights_shape[p] for p in permutation]\n\n        permute_weights = {\n            'layer_type': 'Permute',\n            'params': permutation,\n            'inp_idxes': [weights_idx],\n            'out_idxes': [permutation_weights_idx],\n            'inp_shapes': [weights_shape],\n            'out_shapes': [permutation_weights_shape],\n            'mask': [],\n        }\n        transcript.append(permute_weights)\n\n        rotated_weights_idx, rotated_weights_shape = config.new_tensor(), permutation_weights_shape\n        rotate_layer = {\n            'layer_type': 'Rotate',\n            'params': [1, 2],\n            'inp_idxes': [permutation_weights_idx],\n            'out_idxes': [rotated_weights_idx],\n            'inp_shapes': [permutation_weights_shape],\n            'out_shapes': [rotated_weights_shape],\n            'mask': [],\n        }\n        transcript.append(rotate_layer)\n\n        padded_gradients_idx, padded_gradients_shape = config.new_tensor(), output_shape\n        padded_gradients_shape[1] += (rotated_weights_shape[1] - 1) * 2 \n        padded_gradients_shape[2] += (rotated_weights_shape[2] - 1) * 2\n        pad_layer = {\n            'layer_type': 'Pad',\n            'params': [\n                0, 0,\n                rotated_weights_shape[1] - 1, rotated_weights_shape[1] - 1,\n                rotated_weights_shape[2] - 1, rotated_weights_shape[2] - 1,\n                0, 0\n            ],\n            'inp_idxes': [config.gradient_tensor_idx(output_idx)],\n            'out_idxes': [padded_gradients_idx],\n            'inp_shapes': [],\n            'out_shapes': [],\n            'mask': [],\n        }\n        transcript.append(pad_layer)\n\n        dx_idx, dx_shape = config.new_gradient_tensor(inputs_idx), inputs_shape\n        input_conv_layer = {\n            'layer_type': 'Conv2D',\n            'params': [CONV2D, VALID, NO_ACTIVATION, self.stride_h, self.stride_w],\n            'inp_idxes': [padded_gradients_idx, rotated_weights_idx],\n            'out_idxes': [dx_idx],\n            'inp_shapes': [padded_gradients_shape, rotated_weights_shape],\n            'out_shapes': [dx_shape],\n            'mask': [],\n        }\n        transcript.append(input_conv_layer)\n\n        permutation = [3, 1, 2, 0]\n        permuted_dw_idx = config.new_tensor()\n        permuted_dw_shape = [dw_shape[p] for p in permutation]\n\n        permute_dw = {\n            'layer_type': 'Permute',\n            'params': permutation,\n            'inp_idxes': [dw_idx],\n            'out_idxes': [permuted_dw_idx],\n            'inp_shapes': [dw_shape],\n            'out_shapes': [permuted_dw_shape],\n            'mask': [],\n        }\n        transcript.append(permute_dw)\n\n        updated_weights_idx, updated_weights_shape = config.new_tensor(), dw_shape\n        # Call a layer to update the outputs of the convolution\n        update_weights_layer = {\n            'layer_type': 'Update',\n            'params': [],\n            'inp_idxes': [weights_idx, permuted_dw_idx],\n            'out_idxes': [updated_weights_idx],\n            'inp_shapes': [weights_shape, permuted_dw_shape],\n            'out_shapes': [updated_weights_shape],\n            'mask': [],\n        }\n        # transcript.append(update_weights_layer)\n\nclass Softmax():\n    def __init__(self, layer):\n        return\n\n    # TODO: Make this generalizable to all neural networks\n    # (do not assume that softmax is the last layer, fused with CE-loss)\n    def backward(self, layer, transcript, config):\n        sub_layer = {\n            'layer_type': 'Sub',\n            'params': [],\n            # y_hat - y\n            'inp_idxes': [layer['out_idxes'][0], config.label_tensor_idx],\n            'out_idxes': [config.new_gradient_tensor(layer['inp_idxes'][0])],\n            'inp_shapes': [layer['out_shapes'][0], layer['out_shapes'][0]],\n            'out_shapes': [layer['out_shapes'][0]],\n            'mask': [],\n        }\n        transcript.append(sub_layer)\n\nclass AveragePool2D():\n    def __init__(self, layer):\n        return\n\n    def backward(self, layer, transcript, config):\n        # TODO: This is very model specific, must rewrite to be accurate\n        # We just broadcast dx across 3 axes\n        # 1 x 3 x 3 x 1 -> 1 x 1 x 1 x 1280\n\n        div_idx = config.new_tensor()\n        reshape_layer = {\n            'layer_type': 'Broadcast',\n            'params': [],\n            'inp_idxes': [config.gradient_tensor_idx(layer['out_idxes'][0])],\n            'out_idxes': [div_idx],\n            'inp_shapes': [layer['out_shapes'][0]],\n            'out_shapes': [layer['inp_shapes'][0]],\n            'mask': [],\n        }\n        transcript.append(reshape_layer)\n\n        out_idx = config.new_gradient_tensor(layer['inp_idxes'][0])\n        out_shape = layer['inp_shapes'][0]\n\n        div = {\n            'layer_type': 'Div',\n            'params': [layer['inp_shapes'][0][1] * layer['inp_shapes'][0][2]],\n            'inp_idxes': [div_idx],\n            'out_idxes': [out_idx],\n            'inp_shapes': [out_shape],\n            'out_shapes': [out_shape],\n            'mask': [],\n        }\n        transcript.append(div)\n\n\nclass Reshape():\n    def __init__(self, layer):\n        return\n\n    def backward(self, layer, transcript, config):\n        reshape_layer = {\n            'layer_type': 'Reshape',\n            'params': [],\n            'inp_idxes': [config.gradient_tensor_idx(layer['out_idxes'][0])],\n            'out_idxes': [config.new_gradient_tensor(layer['inp_idxes'][0])],\n            'inp_shapes': [layer['out_shapes'][0]],\n            'out_shapes': [layer['inp_shapes'][0]],\n            'mask': [],\n        }\n        transcript.append(reshape_layer)\n\n\ndef produce_graph():\n    # Read msgpack file\n    with open(\"examples/v2_1.0_224_truncated/model.msgpack\", \"rb\") as data_file:\n        byte_data = data_file.read()\n    model = msgpack.unpackb(byte_data)\n\n    # TODO: I'm unsure whether the circuit output is always the last indexed tensor\n    softmax_output_index = int(np.max(\n            [[out for out in layer['out_idxes']] for layer in model['layers']] + \n            [[inp for inp in layer['inp_idxes']] for layer in model['layers']]\n    )[0])\n    circuit_config = CircuitConfig(softmax_output_index + 1)\n    circuit_config.new_label_tensor()\n\n    transcript = []\n    for layer in reversed(model['layers']):\n        fetched_layer = None\n        match layer['layer_type']:\n            case \"Conv2D\":\n                fetched_layer = Conv2D(layer)\n            case \"AveragePool2D\":\n                fetched_layer = AveragePool2D(layer)\n            case \"Softmax\":\n                fetched_layer = Softmax(layer)\n            case _:\n                fetched_layer = Reshape(layer)\n        print(layer['layer_type'])\n        fetched_layer.backward(layer, transcript, circuit_config)\n        print('----------------')\n\n    model['layers'] += transcript\n    model['inp_idxes'].append(circuit_config.label_tensor_idx)\n    model['out_idxes'] = [31]\n\n    packed = msgpack.packb(model, use_bin_type=True)\n    with open(\"./examples/train_graph/train.msgpack\", 'wb') as f:\n        f.write(packed)\n    print(model.keys())\n    return model\n\nmodel = produce_graph()\n\nprint(model.keys())\nmodel['tensors'] = \"\"\nprint(model['inp_idxes'], model['out_idxes'])\n"
  },
  {
    "path": "python/converter.py",
    "content": "import argparse\nimport ast\nfrom typing import Literal, Union\nimport tensorflow as tf\nimport numpy as np\nimport tflite\nimport msgpack\n\ndef get_shape(interpreter: tf.lite.Interpreter, tensor_idx):\n  if tensor_idx == -1:\n    return []\n  tensor = interpreter.get_tensor(tensor_idx)\n  return list(tensor.shape)\n\ndef handle_numpy_or_literal(inp: Union[np.ndarray, Literal[0]]):\n  if isinstance(inp, int):\n    return np.array([inp])\n  return inp\n\ndef get_inputs(op: tflite.Operator):\n  idxes = handle_numpy_or_literal(op.InputsAsNumpy())\n  idxes = idxes.tolist()\n  idxes = list(filter(lambda x: x != -1, idxes))\n  return idxes\n\nclass Converter:\n  def __init__(\n      self, model_path, scale_factor, k, num_cols, num_randoms, use_selectors, commit,\n      expose_output\n    ):\n    self.model_path = model_path\n    self.scale_factor = scale_factor\n    self.k = k\n    self.num_cols = num_cols\n    self.num_randoms = num_randoms\n    self.use_selectors = use_selectors\n    self.commit = commit\n    self.expose_output = expose_output\n\n    self.interpreter = tf.lite.Interpreter(\n      model_path=self.model_path,\n      experimental_preserve_all_tensors=True\n    )\n    self.interpreter.allocate_tensors()\n\n    with open(self.model_path, 'rb') as f:\n      buf = f.read()\n      self.model = tflite.Model.GetRootAsModel(buf, 0)\n    self.graph = self.model.Subgraphs(0)\n\n\n  def valid_activations(self):\n    return [\n      tflite.ActivationFunctionType.NONE,\n      tflite.ActivationFunctionType.RELU,\n      tflite.ActivationFunctionType.RELU6,\n    ]\n\n  def _convert_add(self, op: tflite.Operator, generated_tensors: set):\n    # Get params\n    op_opt = op.BuiltinOptions()\n    if op_opt is None:\n      raise RuntimeError('Add options is None')\n    opt = tflite.AddOptions()\n    opt.Init(op_opt.Bytes, op_opt.Pos)\n    params = [opt.FusedActivationFunction()]\n\n    # Get inputs\n    inputs = get_inputs(op)\n    print(generated_tensors)\n    print('Add inputs: ', inputs)\n    if len(inputs) != 2:\n      raise RuntimeError('Add must have 2 inputs')\n\n    # If both tensors are generated, do nothing\n    print(inputs[0] in generated_tensors, inputs[1] in generated_tensors)\n    if (inputs[0] in generated_tensors) and (inputs[1] in generated_tensors):\n      return ('Add', params)\n\n    nb_generated = (inputs[0] in generated_tensors) + (inputs[1] in generated_tensors)\n    if nb_generated != 1:\n      raise RuntimeError('Add must have 1 generated tensor')\n\n    # Check if there are any negative infinities\n    const_tensor = self.interpreter.get_tensor(inputs[0]) if inputs[0] not in generated_tensors else self.interpreter.get_tensor(inputs[1])\n    if np.any(const_tensor == -np.inf):\n      # Ensure that the constant tensor is all -inf and 0\n      if not np.all(np.logical_or(np.isneginf(const_tensor), const_tensor == 0)):\n        raise RuntimeError('Add constant tensor must be -inf and 0 only')\n      mask = (const_tensor == -np.inf).astype(np.int64)\n      params = [len(mask.shape)] + list(mask.shape)\n      params += mask.flatten().tolist()\n      return ('MaskNegInf', params)\n    else:\n      return ('Add', params)\n\n\n  def to_dict(self, start_layer, end_layer):\n    interpreter = self.interpreter\n    model = self.model\n    graph = self.graph\n    if graph is None:\n      raise RuntimeError('Graph is None')\n\n    input_details = interpreter.get_input_details()\n    output_details = interpreter.get_output_details()\n\n    for inp_detail in input_details:\n      inp = np.zeros(inp_detail['shape'], dtype=inp_detail['dtype'])\n      interpreter.set_tensor(inp_detail['index'], inp)\n    # for i, inp in enumerate(inps):\n    #   interpreter.set_tensor(input_details[i]['index'], inp)\n    interpreter.invoke()\n\n    # Get layers\n    generated_tensor_idxes = set()\n    for inp in input_details:\n      generated_tensor_idxes.add(inp['index'])\n\n    layers = []\n    keep_tensors = set()\n    adjusted_tensors = {}\n    for op_idx in range(graph.OperatorsLength()):\n      op = graph.Operators(op_idx)\n      if op is None:\n        raise RuntimeError('Operator is None')\n      model_opcode = model.OperatorCodes(op.OpcodeIndex())\n      if model_opcode is None:\n        raise RuntimeError('Operator code is None')\n      op_code = model_opcode.BuiltinCode()\n\n      # Skip generated tensors\n      for output in handle_numpy_or_literal(op.OutputsAsNumpy()):\n        generated_tensor_idxes.add(output)\n\n      if op_idx < start_layer:\n        continue\n      if op_idx > end_layer:\n        break\n\n      # Keep the input tensors\n      for input in handle_numpy_or_literal(op.InputsAsNumpy()):\n        keep_tensors.add(input)\n\n      # AvgPool2D\n      if op_code == tflite.BuiltinOperator.AVERAGE_POOL_2D:\n        layer_type = 'AveragePool2D'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('AvgPool2D options is None')\n        opt = tflite.Pool2DOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        params = [opt.FilterHeight(), opt.FilterWidth(), opt.StrideH(), opt.StrideW()]\n      elif op_code == tflite.BuiltinOperator.MAX_POOL_2D:\n        layer_type = 'MaxPool2D'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('MaxPool2D options is None')\n        opt = tflite.Pool2DOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        if opt.Padding() == tflite.Padding.SAME:\n          raise NotImplementedError('SAME padding is not supported')\n        if opt.FusedActivationFunction() != tflite.ActivationFunctionType.NONE:\n          raise NotImplementedError('Fused activation is not supported')\n        params = [opt.FilterHeight(), opt.FilterWidth(), opt.StrideH(), opt.StrideW()]\n      # FIXME: hack for Keras... not sure why this isn't being converted properly\n      elif op_code == tflite.BuiltinOperator.CUSTOM:\n        layer_type = 'Conv2D'\n        activation = 0\n        weights = self.interpreter.get_tensor(op.Inputs(1))\n        weights = np.transpose(weights, (3, 0, 1, 2))\n        weights = (weights * self.scale_factor).round().astype(np.int64)\n        adjusted_tensors[op.Inputs(1)] = weights\n        params = [0, 1, activation, 1, 1]\n      # Conv2D\n      elif op_code == tflite.BuiltinOperator.CONV_2D:\n        layer_type = 'Conv2D'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('Conv2D options is None')\n        opt = tflite.Conv2DOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        if opt.DilationHFactor() != 1 or opt.DilationWFactor() != 1:\n          raise NotImplementedError('Dilation is not supported')\n        if opt.FusedActivationFunction() not in self.valid_activations():\n          raise NotImplementedError('Unsupported activation function at layer {op_idx}')\n        # 0 is Conv2D\n        params = \\\n          [0] + \\\n          [opt.Padding()] + \\\n          [opt.FusedActivationFunction()] + \\\n          [opt.StrideH(), opt.StrideW()]\n      # DepthwiseConv2D\n      elif op_code == tflite.BuiltinOperator.DEPTHWISE_CONV_2D:\n        layer_type = 'Conv2D'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('DepthwiseConv2D options is None')\n        opt = tflite.DepthwiseConv2DOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        if opt.DilationHFactor() != 1 or opt.DilationWFactor() != 1:\n          raise NotImplementedError('Dilation is not supported')\n        if opt.FusedActivationFunction() not in self.valid_activations():\n          raise NotImplementedError('Unsupported activation function at layer {op_idx}')\n        # 1 is DepthwiseConv2D\n        params = \\\n          [1] + \\\n          [opt.Padding()] + \\\n          [opt.FusedActivationFunction()] + \\\n          [opt.StrideH(), opt.StrideW()]\n      # Fully connected\n      elif op_code == tflite.BuiltinOperator.FULLY_CONNECTED:\n        layer_type = 'FullyConnected'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('Fully connected options is None')\n        opt = tflite.FullyConnectedOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        if opt.FusedActivationFunction() not in self.valid_activations():\n          raise NotImplementedError(f'Unsupported activation function at layer {op_idx}')\n        params = [opt.FusedActivationFunction()]\n      elif op_code == tflite.BuiltinOperator.BATCH_MATMUL:\n        layer_type = 'BatchMatMul'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('BatchMatMul options is None')\n        opt = tflite.BatchMatMulOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        if opt.AdjX() is True: raise NotImplementedError('AdjX is not supported')\n        params = [int(opt.AdjX()), int(opt.AdjY())]\n\n      ## Arithmetic\n      # Add\n      elif op_code == tflite.BuiltinOperator.ADD:\n        layer_type, params = self._convert_add(op, generated_tensor_idxes)\n      # Mul\n      elif op_code == tflite.BuiltinOperator.MUL:\n        layer_type = 'Mul'\n        params = []\n      # Sub\n      elif op_code == tflite.BuiltinOperator.SUB:\n        sub_val = interpreter.get_tensor(op.Inputs(1))\n        # TODO: this is a bit of a hack\n        if np.any(np.isin(sub_val, 10000)):\n          layer_type = 'MaskNegInf'\n          mask = (sub_val == 10000).astype(np.int64)\n          params = [len(mask.shape)] + list(mask.shape)\n          params += mask.flatten().tolist()\n        else:\n          layer_type = 'Sub'\n          params = []\n      # Div\n      elif op_code == tflite.BuiltinOperator.DIV:\n        # Implement division as multiplication by the inverse\n        layer_type = 'Mul'\n        div_val = interpreter.get_tensor(op.Inputs(1))\n        if type(div_val) != np.float32: raise NotImplementedError('Only support one divisor')\n        adjusted_tensors[op.Inputs(1)] = np.array([(self.scale_factor / div_val).round().astype(np.int64)])\n        params = []\n      # Pad\n      elif op_code == tflite.BuiltinOperator.PAD:\n        layer_type = 'Pad'\n        tensor_idx = op.Inputs(1)\n        tensor = interpreter.get_tensor(tensor_idx).flatten().astype(np.int64)\n        params = tensor.tolist()\n      # Softmax\n      elif op_code == tflite.BuiltinOperator.SOFTMAX:\n        layer_type = 'Softmax'\n        # TODO: conditionally determine whether or not to subtract the max\n        # It should depend on the input to the softmax\n        if layers[-1]['layer_type'] == 'MaskNegInf':\n          params = layers[-1]['params']\n        elif layers[-2]['layer_type'] == 'MaskNegInf':\n          params = layers[-2]['params']\n          params = [params[0] - 1] + params[2:]\n        else:\n          params = []\n      # Mean\n      elif op_code == tflite.BuiltinOperator.MEAN:\n        layer_type = 'Mean'\n        inp_shape = interpreter.get_tensor(op.Inputs(0)).shape\n        mean_idxes = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64)\n        if len(mean_idxes) + 2 != len(inp_shape):\n          raise NotImplementedError(f'Only mean over all but one axis is supported: {op_idx}')\n        params = mean_idxes.tolist()\n      elif op_code == tflite.BuiltinOperator.SQUARE:\n        layer_type = 'Square'\n        params = []\n      # Squared difference\n      elif op_code == tflite.BuiltinOperator.SQUARED_DIFFERENCE:\n        layer_type = 'SquaredDifference'\n        params = []\n\n      # Pointwise\n      elif op_code == tflite.BuiltinOperator.RSQRT:\n        layer_type = 'Rsqrt'\n        params = []\n      elif op_code == tflite.BuiltinOperator.LOGISTIC:\n        layer_type = 'Logistic'\n        params = []\n      elif op_code == tflite.BuiltinOperator.TANH:\n        layer_type = 'Tanh'\n        params = []\n      elif op_code == tflite.BuiltinOperator.POW:\n        layer_type = 'Pow'\n        power = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.float32)\n        if power != 3.: raise NotImplementedError(f'Only support power 3')\n        power = power.round().astype(np.int64)\n        if len(power) != 1: raise NotImplementedError(f'Only scalar power is supported: {op_idx}')\n        params = power.tolist()\n\n      # The following are no-ops in the sense that they don't change the tensor\n      # However, we need to pass along the right tensors\n      # The param says which input to pass along\n      elif op_code == tflite.BuiltinOperator.SHAPE:\n        layer_type = 'Noop'\n        params = [0]\n      elif op_code == tflite.BuiltinOperator.GATHER:\n        layer_type = 'Noop'\n        params = [0]\n      elif op_code == tflite.BuiltinOperator.REDUCE_PROD:\n        # TODO: not sure if this is in general a no-op\n        layer_type = 'Noop'\n        params = [0]\n      elif op_code == tflite.BuiltinOperator.STRIDED_SLICE:\n        # FIXME: this is not in general a no-op\n        layer_type = 'Noop'\n        params = [0]\n      elif op_code == tflite.BuiltinOperator.BROADCAST_ARGS:\n        layer_type = 'Noop'\n        params = [0]\n      elif op_code == tflite.BuiltinOperator.BROADCAST_TO:\n        layer_type = 'Noop'\n        params = [0]\n\n      ## Shape\n      elif op_code == tflite.BuiltinOperator.RESHAPE:\n        layer_type = 'Reshape'\n        params = []\n      elif op_code == tflite.BuiltinOperator.TRANSPOSE:\n        layer_type = 'Transpose'\n        params = get_shape(interpreter, op.Inputs(0)) + interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64).tolist()\n      elif op_code == tflite.BuiltinOperator.CONCATENATION:\n        # FIXME: This is not in general a no-op\n        layer_type = 'Concatenation'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('Concatenation options is None')\n        opt = tflite.ConcatenationOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        params = [opt.Axis()]\n      elif op_code == tflite.BuiltinOperator.PACK:\n        layer_type = 'Pack'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('Pack options is None')\n        opt = tflite.PackOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        params = [opt.Axis()]\n        if params[0] > 1: raise NotImplementedError(f'Only axis=0,1 supported at layer {op_idx}')\n      elif op_code == tflite.BuiltinOperator.SPLIT:\n        layer_type = 'Split'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('Split options is None')\n        opt = tflite.SplitOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        axis = interpreter.get_tensor(op.Inputs(0)).flatten().astype(np.int64)[0]\n        num_splits = opt.NumSplits()\n        inp = interpreter.get_tensor(op.Inputs(1))\n        if inp.shape[axis] % num_splits != 0:\n          raise NotImplementedError(f'Only equal splits supported at layer {op_idx}')\n        params = [int(axis), num_splits]\n      elif op_code == tflite.BuiltinOperator.SLICE:\n        layer_type = 'Slice'\n        begin = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64).tolist()\n        size = interpreter.get_tensor(op.Inputs(2)).flatten().astype(np.int64).tolist()\n        params = begin + size\n      elif op_code == tflite.BuiltinOperator.RESIZE_NEAREST_NEIGHBOR:\n        layer_type = 'ResizeNearestNeighbor'\n        op_opt = op.BuiltinOptions()\n        if op_opt is None:\n          raise RuntimeError('ResizeNearestNeighbor options is None')\n        opt = tflite.ResizeNearestNeighborOptions()\n        opt.Init(op_opt.Bytes, op_opt.Pos)\n        if opt.AlignCorners():\n          raise NotImplementedError(f'Align corners not supported at layer {op_idx}')\n        if not opt.HalfPixelCenters():\n          raise NotImplementedError(f'Half pixel centers not supported at layer {op_idx}')\n        # Can take the out shape directly from the tensor\n        params = [int(opt.AlignCorners()), int(opt.HalfPixelCenters())]\n\n      # Not implemented\n      else:\n        op_name = None\n        for attr in dir(tflite.BuiltinOperator):\n          if not attr.startswith('__'):\n            if getattr(tflite.BuiltinOperator, attr) == op_code:\n              op_name = attr\n        raise NotImplementedError('Unsupported operator at layer {}: {}, {}'.format(op_idx, op_code, op_name))\n\n      inp_idxes = get_inputs(op)\n      # FIXME: hack for testing\n      rsqrt_overflows = [99, 158, 194, 253, 289, 348]\n      if op_idx in rsqrt_overflows:\n        if op_code == tflite.BuiltinOperator.RSQRT:\n          mask = [0, 1]\n        else:\n          mask = []\n      else:\n        mask = []\n      layers.append({\n        'layer_type': layer_type,\n        'inp_idxes': inp_idxes,\n        'inp_shapes': [get_shape(interpreter, inp_idx) for inp_idx in inp_idxes],\n        'out_idxes': [op.Outputs(i) for i in range(op.OutputsLength())],\n        'out_shapes': [get_shape(interpreter, op.Outputs(i)) for i in range(op.OutputsLength())],\n        'params': params,\n        'mask': mask,\n      })\n    print(layers)\n    print()\n\n\n    # Get tensors\n    print('keep tensors:', keep_tensors)\n    tensors = []\n    for tensor_idx in range(graph.TensorsLength()):\n      if tensor_idx not in keep_tensors:\n        continue\n\n      tensor = graph.Tensors(tensor_idx)\n      if tensor is None:\n        raise NotImplementedError('Tensor is None')\n\n      if tensor_idx in generated_tensor_idxes:\n        print(f'skipping generated tensor: {format(tensor_idx)}, {tensor.Name()}')\n        continue\n\n      shape = []\n      for i in range(tensor.ShapeLength()):\n        shape.append(int(tensor.Shape(i)))\n      if shape == []:\n        shape = [1]\n\n      tensor_data = interpreter.get_tensor(tensor_idx)\n      if tensor.Type() == tflite.TensorType.FLOAT32:\n        tensor_data = (tensor_data * self.scale_factor).round().astype(np.int64)\n      elif tensor.Type() == tflite.TensorType.INT32:\n        tensor_data = tensor_data.astype(np.int64)\n      elif tensor.Type() == tflite.TensorType.INT64:\n        continue\n      else:\n        raise NotImplementedError('Unsupported tensor type: {}'.format(tensor.Type()))\n\n      if tensor_idx in adjusted_tensors:\n        tensor_data = adjusted_tensors[tensor_idx]\n        shape = tensor_data.shape\n\n      tensors.append({\n        'idx': tensor_idx,\n        'shape': shape,\n        'data': tensor_data.flatten().tolist(),\n      })\n      # print(tensor_idx, tensor.Type(), tensor.Name(), tensors[-1]['shape'])\n      # print(np.abs(tensor_data).max())\n\n    commit_before = []\n    commit_after = []\n    if self.commit:\n      input_tensors = [inp['index'] for inp in input_details]\n      weight_tensors = [tensor['idx'] for tensor in tensors if tensor['idx'] not in input_tensors]\n      commit_before = [weight_tensors, input_tensors]\n\n      output_tensors = [out['index'] for out in output_details]\n      commit_after = [output_tensors]\n\n    out_idxes = layers[-1]['out_idxes'] if self.expose_output else []\n    d = {\n      'global_sf': self.scale_factor,\n      'k': self.k,\n      'num_cols': self.num_cols,\n      'num_random': self.num_randoms,\n      'inp_idxes': [inp['index'] for inp in input_details],\n      # 'out_idxes': [out['index'] for out in output_details],\n      'out_idxes': out_idxes,\n      'layers': layers,\n      'tensors': tensors,\n      'use_selectors': self.use_selectors,\n      'commit_before': commit_before,\n      'commit_after': commit_after,\n    }\n    print()\n    print(d['layers'][-1])\n    # d['out_idxes'] = [14]\n    print(d.keys())\n    print(d['out_idxes'])\n    return d\n\n  def to_msgpack(self, start_layer, end_layer, use_selectors=True):\n    d = self.to_dict(start_layer, end_layer)\n    model_packed = msgpack.packb(d, use_bin_type=True)\n    d['tensors'] = []\n    config_packed = msgpack.packb(d, use_bin_type=True)\n    return model_packed, config_packed\n\n\ndef main():\n  parser = argparse.ArgumentParser()\n  parser.add_argument('--model', type=str, required=True)\n  parser.add_argument('--model_output', type=str, required=True)\n  parser.add_argument('--config_output', type=str, required=True)\n  parser.add_argument('--scale_factor', type=int, default=2**16)\n  parser.add_argument('--k', type=int, default=19)\n  parser.add_argument('--eta', type=float, default=0.001)\n  parser.add_argument('--num_cols', type=int, default=6)\n  parser.add_argument('--use_selectors', action=argparse.BooleanOptionalAction, required=False, default=True)\n  parser.add_argument('--commit', action=argparse.BooleanOptionalAction, required=False, default=False)\n  parser.add_argument('--expose_output', action=argparse.BooleanOptionalAction, required=False, default=True)\n  parser.add_argument('--start_layer', type=int, default=0)\n  parser.add_argument('--end_layer', type=int, default=10000)\n  parser.add_argument('--num_randoms', type=int, default=20001)\n  args = parser.parse_args()\n\n  converter = Converter(\n    args.model,\n    args.scale_factor,\n    args.k,\n    args.num_cols,\n    args.num_randoms,\n    args.use_selectors,\n    args.commit,\n    args.expose_output,\n  )\n\n  model_packed, config_packed = converter.to_msgpack(\n    start_layer=args.start_layer,\n    end_layer=args.end_layer,\n  )\n  if model_packed is None:\n    raise Exception('Failed to convert model')\n\n  with open(args.model_output, 'wb') as f:\n    f.write(model_packed)\n  with open(args.config_output, 'wb') as f:\n    f.write(config_packed)\n\nif __name__ == '__main__':\n  main()\n"
  },
  {
    "path": "python/input_converter.py",
    "content": "import argparse\nimport ast\nimport numpy as np\nimport msgpack\n\ndef main():\n  parser = argparse.ArgumentParser()\n  parser.add_argument('--model_config', type=str, required=True)\n  parser.add_argument('--inputs', type=str, required=True)\n  parser.add_argument('--output', type=str, required=True)\n  args = parser.parse_args()\n\n  inputs = args.inputs.split(',')\n  with open(args.model_config, 'rb') as f:\n    model_config = msgpack.unpackb(f.read())\n  input_idxes = model_config['inp_idxes']\n  scale_factor = model_config['global_sf']\n\n  # Get the input shapes from the layers\n  input_shapes = [[0] for _ in input_idxes]\n  for layer in model_config['layers']:\n    for layer_inp_idx, layer_shape in zip(layer['inp_idxes'], layer['inp_shapes']):\n      for index, inp_idx in enumerate(input_idxes):\n        if layer_inp_idx == inp_idx:\n          input_shapes[index] = layer_shape\n\n  tensors = []\n  for inp, shape, idx in zip(inputs, input_shapes, input_idxes):\n    tensor = np.load(inp).reshape(shape)\n    tensor = (tensor * scale_factor).round().astype(np.int64)\n    tensors.append({\n      'idx': idx,\n      'shape': shape,\n      'data': tensor.flatten().tolist(),\n    })\n\n  packed = msgpack.packb(tensors, use_bin_type=True)\n\n  with open(args.output, 'wb') as f:\n    f.write(packed)\n\n\nif __name__ == '__main__':\n  main()"
  },
  {
    "path": "python/training_converter.py",
    "content": "# A converter for training data\n# Performs the conversion npy -> msgpack\n# TODO: Ensure that training works with models that take in multiple input shapes\n# \n# Shortcut: \n# `python3 python/training_converter.py --input_shapes 7,7,320 --input_idxes 1,0 --output training_data/inputs.msgpack --labels_output training_data/labels.msgpack`\n#\n\nimport argparse\nimport ast\nimport numpy as np\nimport msgpack\nimport os\n\nNUM_LOADS = 1\nSF = 1 << 17\n\ndef main():\n  parser = argparse.ArgumentParser()\n  parser.add_argument('--input_shapes', type=str, required=True)\n  parser.add_argument('--output', type=str, required=True)\n\n  TRAINING_DIRECTORY = './testing/data/pre_last_conv/flowers/train'\n  args = parser.parse_args()\n\n  input_shapes = ast.literal_eval(args.input_shapes)\n\n  loaded = 0\n  tensors = []\n  num_classes = os.listdir(TRAINING_DIRECTORY)\n\n  first_file = \"0.npy\"\n  for file_name in os.listdir(TRAINING_DIRECTORY):\n    if loaded == NUM_LOADS:\n      break\n\n    label = int(first_file[:-4])\n    data_array = np.load(TRAINING_DIRECTORY + '/' + first_file)\n\n    input_shape = input_shapes\n\n    for idx in range(data_array.shape[0]):\n      print(SF)\n      print((np.vstack(data_array) * SF).round().astype(np.int64))\n      tensors.append({\n        'idx': 0,\n        'shape': input_shape,\n        'data': list(map(lambda x: int(x), list((data_array[idx] * SF).round().astype(np.int64).flatten()))),\n      })\n      # represent the label as a one hot encoding\n      one_hot = np.zeros(102)\n      one_hot[label] = SF\n      print(\"IMPORTANT LABEL\", label)\n      print(\"IMPORTANT LABEL\", data_array[idx].flatten()[:500])\n      # print(one_hot.shape())\n      tensors.append({\n        'idx': 11,\n        'shape': (1, 102),\n        'data': list(map(lambda x: int(x), one_hot)),\n      })\n      loaded += 1\n\n      if loaded == NUM_LOADS:\n        break\n\n  packed_inputs = msgpack.packb(tensors, use_bin_type=True)\n\n  # print(tensors)\n  with open(args.output, 'wb') as f:\n    f.write(packed_inputs)\n\nif __name__ == '__main__':\n  main()\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "tab_spaces = 2\nmax_width = 100\n"
  },
  {
    "path": "src/bin/test_circuit.rs",
    "content": "use halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr};\nuse zkml::{\n  model::ModelCircuit,\n  utils::{\n    helpers::get_public_values,\n    loader::{load_model_msgpack, ModelMsgpack},\n  },\n};\n\nfn main() {\n  let config_fname = std::env::args().nth(1).expect(\"config file path\");\n  let inp_fname = std::env::args().nth(2).expect(\"input file path\");\n\n  let config: ModelMsgpack = load_model_msgpack(&config_fname, &inp_fname);\n\n  let circuit = ModelCircuit::<Fr>::generate_from_file(&config_fname, &inp_fname);\n\n  let _prover = MockProver::run(config.k.try_into().unwrap(), &circuit, vec![vec![]]).unwrap();\n  let public_vals = get_public_values();\n\n  let prover = MockProver::run(config.k.try_into().unwrap(), &circuit, vec![public_vals]).unwrap();\n  assert_eq!(prover.verify(), Ok(()));\n}\n"
  },
  {
    "path": "src/bin/time_circuit.rs",
    "content": "use halo2_proofs::halo2curves::{bn256::Fr, pasta::Fp};\nuse zkml::{\n  model::ModelCircuit,\n  utils::{proving_ipa::time_circuit_ipa, proving_kzg::time_circuit_kzg},\n};\n\nfn main() {\n  let config_fname = std::env::args().nth(1).expect(\"config file path\");\n  let inp_fname = std::env::args().nth(2).expect(\"input file path\");\n  let kzg_or_ipa = std::env::args().nth(3).expect(\"kzg or ipa\");\n\n  if kzg_or_ipa != \"kzg\" && kzg_or_ipa != \"ipa\" {\n    panic!(\"Must specify kzg or ipa\");\n  }\n\n  if kzg_or_ipa == \"kzg\" {\n    let circuit = ModelCircuit::<Fr>::generate_from_file(&config_fname, &inp_fname);\n    time_circuit_kzg(circuit);\n  } else {\n    let circuit = ModelCircuit::<Fp>::generate_from_file(&config_fname, &inp_fname);\n    time_circuit_ipa(circuit);\n  }\n}\n"
  },
  {
    "path": "src/bin/verify_circuit.rs",
    "content": "use halo2_proofs::halo2curves::bn256::Fr;\nuse zkml::{\n  model::ModelCircuit,\n  utils::{loader::load_config_msgpack, proving_kzg::verify_circuit_kzg},\n};\n\nfn main() {\n  let config_fname = std::env::args().nth(1).expect(\"config file path\");\n  let vkey_fname = std::env::args().nth(2).expect(\"verification key file path\");\n  let proof_fname = std::env::args().nth(3).expect(\"proof file path\");\n  let public_vals_fname = std::env::args().nth(4).expect(\"public values file path\");\n  let kzg_or_ipa = std::env::args().nth(5).expect(\"kzg or ipa\");\n\n  if kzg_or_ipa != \"kzg\" && kzg_or_ipa != \"ipa\" {\n    panic!(\"Must specify kzg or ipa\");\n  }\n\n  if kzg_or_ipa == \"kzg\" {\n    let config = load_config_msgpack(&config_fname);\n    let circuit = ModelCircuit::<Fr>::generate_from_msgpack(config, false);\n    println!(\"Loaded configuration\");\n    verify_circuit_kzg(circuit, &vkey_fname, &proof_fname, &public_vals_fname);\n  } else {\n    // Serialization of the verification key doesn't seem to be supported for IPA\n    panic!(\"Not implemented\");\n  }\n}\n"
  },
  {
    "path": "src/bin/verify_wav.rs",
    "content": "use std::fs::File;\n\nuse halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr};\nuse zkml::{\n  model::ModelCircuit,\n  utils::{\n    helpers::get_public_values,\n    loader::{load_config_msgpack, ModelMsgpack, TensorMsgpack},\n  },\n};\n\nfn main() {\n  let config_fname = std::env::args().nth(1).expect(\"config file path\");\n  let wav_fname = std::env::args().nth(2).expect(\"wav file path\");\n\n  let mut wav_file = File::open(wav_fname).unwrap();\n  let (_header, data) = wav::read(&mut wav_file).unwrap();\n  let data = match data {\n    wav::BitDepth::Sixteen(data) => data,\n    _ => panic!(\"Unsupported bit depth\"),\n  };\n  let data: Vec<i64> = data.iter().map(|x| *x as i64).collect();\n\n  let base_config = load_config_msgpack(&config_fname);\n\n  let config = ModelMsgpack {\n    tensors: vec![TensorMsgpack {\n      idx: 0,\n      shape: vec![1, data.len().try_into().unwrap()],\n      data: data,\n    }],\n    inp_idxes: vec![0],\n    out_idxes: vec![],\n    layers: vec![],\n    commit_before: Some(vec![]),\n    commit_after: Some(vec![vec![0]]),\n    ..base_config\n  };\n  println!(\"Config: {:?}\", config);\n  let k = config.k;\n  let circuit = ModelCircuit::<Fr>::generate_from_msgpack(config, false);\n\n  let _prover = MockProver::run(k.try_into().unwrap(), &circuit, vec![vec![]]).unwrap();\n  let public_vals: Vec<Fr> = get_public_values();\n  println!(\"Public values: {:?}\", public_vals);\n}\n"
  },
  {
    "path": "src/commitments/commit.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\n\nuse crate::{gadgets::gadget::GadgetConfig, layers::layer::CellRc};\n\npub trait Commit<F: PrimeField> {\n  fn commit(\n    &self,\n    layouter: impl Layouter<F>,\n    gadget_config: Rc<GadgetConfig>,\n    constants: &HashMap<i64, CellRc<F>>,\n    values: &Vec<CellRc<F>>,\n    blinding: CellRc<F>,\n  ) -> Result<Vec<CellRc<F>>, Error>;\n}\n"
  },
  {
    "path": "src/commitments/packer.rs",
    "content": "use std::{\n  cmp::{max, min},\n  collections::{BTreeMap, HashMap},\n  marker::PhantomData,\n  rc::Rc,\n};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Value},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::gadget::{GadgetConfig, GadgetType},\n  layers::layer::{AssignedTensor, CellRc},\n};\n\nconst NUM_BITS_PER_FIELD_ELEM: usize = 254;\n\npub struct PackerConfig<F: PrimeField> {\n  pub num_bits_per_elem: usize,\n  pub num_elem_per_packed: usize,\n  pub num_packed_per_row: usize,\n  pub exponents: Vec<F>,\n  _marker: PhantomData<F>,\n}\n\npub struct PackerChip<F: PrimeField> {\n  pub config: PackerConfig<F>,\n}\n\nimpl<F: PrimeField> PackerChip<F> {\n  pub fn get_exponents(num_bits_per_elem: usize, num_exponents: usize) -> Vec<F> {\n    let mul_val = F::from(1 << num_bits_per_elem);\n    let mut exponents = vec![F::ONE];\n    for _ in 1..num_exponents {\n      exponents.push(exponents[exponents.len() - 1] * mul_val);\n    }\n    exponents\n  }\n\n  pub fn construct(num_bits_per_elem: usize, gadget_config: &GadgetConfig) -> PackerConfig<F> {\n    let columns = &gadget_config.columns;\n\n    let num_elem_per_packed = if NUM_BITS_PER_FIELD_ELEM / num_bits_per_elem > columns.len() - 1 {\n      columns.len() - 1\n    } else {\n      // TODO: for many columns, pack many in a single row\n      NUM_BITS_PER_FIELD_ELEM / num_bits_per_elem\n    };\n    println!(\"column len: {}\", columns.len());\n    println!(\"num_bits_per_elem: {}\", num_bits_per_elem);\n    println!(\"NUM_BITS_PER_FIELD_ELEM: {}\", NUM_BITS_PER_FIELD_ELEM);\n    println!(\"num_elem_per_packed: {}\", num_elem_per_packed);\n\n    let num_packed_per_row = max(\n      1,\n      columns.len() / (num_elem_per_packed * (num_bits_per_elem + 1)),\n    );\n    println!(\"num_packed_per_row: {}\", num_packed_per_row);\n\n    let exponents = Self::get_exponents(num_bits_per_elem, num_elem_per_packed);\n\n    let config = PackerConfig {\n      num_bits_per_elem,\n      num_elem_per_packed,\n      num_packed_per_row,\n      exponents,\n      _marker: PhantomData,\n    };\n    config\n  }\n\n  pub fn configure(\n    meta: &mut ConstraintSystem<F>,\n    packer_config: PackerConfig<F>,\n    gadget_config: GadgetConfig,\n  ) -> GadgetConfig {\n    let selector = meta.complex_selector();\n    let columns = gadget_config.columns;\n    let lookup = gadget_config.tables.get(&GadgetType::InputLookup).unwrap()[0];\n\n    let exponents = &packer_config.exponents;\n\n    let num_bits_per_elem = packer_config.num_bits_per_elem;\n    let shift_val = 1 << (num_bits_per_elem - 1);\n    let shift_val = Expression::Constant(F::from(shift_val as u64));\n\n    meta.create_gate(\"packer\", |meta| {\n      let s = meta.query_selector(selector);\n      let mut constraints = vec![];\n      for i in 0..packer_config.num_packed_per_row {\n        let offset = i * (packer_config.num_elem_per_packed + 1);\n        let inps = columns[offset..offset + packer_config.num_elem_per_packed]\n          .iter()\n          .map(|col| meta.query_advice(*col, Rotation::cur()))\n          .collect::<Vec<_>>();\n\n        let outp = meta.query_advice(\n          columns[offset + packer_config.num_elem_per_packed],\n          Rotation::cur(),\n        );\n\n        let res = inps\n          .into_iter()\n          .zip(exponents.iter())\n          .map(|(inp, exp)| (inp + shift_val.clone()) * (*exp))\n          .fold(Expression::Constant(F::ZERO), |acc, prod| acc + prod);\n        constraints.push(s.clone() * (res - outp));\n        // constraints.push(s.clone() * Expression::Constant(F::zero()));\n      }\n\n      constraints\n    });\n\n    // Ensure that the weights/inputs are in the correct range\n    for i in 0..packer_config.num_packed_per_row {\n      let offset = i * (packer_config.num_elem_per_packed + 1);\n      for j in 0..packer_config.num_elem_per_packed {\n        meta.lookup(\"packer lookup\", |meta| {\n          let s = meta.query_selector(selector);\n          let inp = meta.query_advice(columns[offset + j], Rotation::cur());\n\n          vec![(s * (inp + shift_val.clone()), lookup)]\n        });\n      }\n    }\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::Packer, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      ..gadget_config\n    }\n  }\n\n  pub fn copy_and_pack_row(\n    &self,\n    mut layouter: impl Layouter<F>,\n    gadget_config: Rc<GadgetConfig>,\n    cells: Vec<CellRc<F>>,\n    zero: &AssignedCell<F, F>,\n  ) -> Result<Vec<CellRc<F>>, Error> {\n    let columns = &gadget_config.columns;\n    let selector = gadget_config.selectors.get(&GadgetType::Packer).unwrap()[0];\n\n    let num_bits_per_elem = gadget_config.num_bits_per_elem;\n    let shift_val = 1 << (num_bits_per_elem - 1);\n    let shift_val = F::from(shift_val as u64);\n\n    let outp = layouter.assign_region(\n      || \"pack row\",\n      |mut region| {\n        if gadget_config.use_selectors {\n          selector.enable(&mut region, 0)?;\n        }\n\n        let mut packed = vec![];\n        for i in 0..self.config.num_packed_per_row {\n          let val_offset = i * self.config.num_elem_per_packed;\n          let col_offset = i * (self.config.num_elem_per_packed + 1);\n\n          let mut vals = cells\n            [val_offset..min(val_offset + self.config.num_elem_per_packed, cells.len())]\n            .iter()\n            .enumerate()\n            .map(|(i, x)| {\n              x.copy_advice(|| \"\", &mut region, columns[col_offset + i], 0)\n                .unwrap();\n              x.value().copied()\n            })\n            .collect::<Vec<_>>();\n\n          let zero_copied = (cells.len()..self.config.num_elem_per_packed)\n            .map(|i| {\n              zero\n                .copy_advice(|| \"\", &mut region, columns[col_offset + i], 0)\n                .unwrap();\n              zero.value().copied()\n            })\n            .collect::<Vec<_>>();\n          vals.extend(zero_copied);\n\n          let res = vals.iter().zip(self.config.exponents.iter()).fold(\n            Value::known(F::ZERO),\n            |acc, (inp, exp)| {\n              let res = acc + (*inp + Value::known(shift_val)) * Value::known(*exp);\n              res\n            },\n          );\n\n          let outp = region.assign_advice(\n            || \"\",\n            columns[col_offset + self.config.num_elem_per_packed],\n            0,\n            || res,\n          )?;\n          packed.push(Rc::new(outp));\n        }\n\n        Ok(packed)\n      },\n    )?;\n\n    Ok(outp)\n  }\n\n  pub fn assign_and_pack_row(\n    &self,\n    mut layouter: impl Layouter<F>,\n    gadget_config: Rc<GadgetConfig>,\n    values: Vec<&F>,\n    zero: &AssignedCell<F, F>,\n  ) -> Result<(Vec<CellRc<F>>, Vec<CellRc<F>>), Error> {\n    let columns = &gadget_config.columns;\n    let selector = gadget_config.selectors.get(&GadgetType::Packer).unwrap()[0];\n\n    let num_bits_per_elem = gadget_config.num_bits_per_elem;\n    let shift_val = 1 << (num_bits_per_elem - 1);\n    let shift_val = F::from(shift_val as u64);\n\n    let outp = layouter.assign_region(\n      || \"pack row\",\n      |mut region| {\n        if gadget_config.use_selectors {\n          selector.enable(&mut region, 0)?;\n        }\n\n        let mut packed = vec![];\n        let mut assigned = vec![];\n        for i in 0..self.config.num_packed_per_row {\n          let val_offset = i * self.config.num_elem_per_packed;\n          let col_offset = i * (self.config.num_elem_per_packed + 1);\n\n          let mut values = values\n            [val_offset..min(val_offset + self.config.num_elem_per_packed, values.len())]\n            .iter()\n            .map(|x| **x)\n            .collect::<Vec<_>>();\n          let vals = values\n            .iter()\n            .enumerate()\n            .map(|(i, x)| {\n              let tmp = region\n                .assign_advice(|| \"\", columns[col_offset + i], 0, || Value::known(*x))\n                .unwrap();\n              Rc::new(tmp)\n            })\n            .collect::<Vec<_>>();\n          assigned.extend(vals);\n\n          let zero_vals = (values.len()..self.config.num_elem_per_packed)\n            .map(|i| {\n              zero\n                .copy_advice(|| \"\", &mut region, columns[col_offset + i], 0)\n                .unwrap();\n              F::ZERO\n            })\n            .collect::<Vec<_>>();\n          values.extend(zero_vals);\n\n          let res =\n            values\n              .iter()\n              .zip(self.config.exponents.iter())\n              .fold(F::ZERO, |acc, (inp, exp)| {\n                let res = acc + (*inp + shift_val) * (*exp);\n                res\n              });\n\n          let outp = region.assign_advice(\n            || \"\",\n            columns[col_offset + self.config.num_elem_per_packed],\n            0,\n            || Value::known(res),\n          )?;\n          packed.push(Rc::new(outp));\n        }\n\n        Ok((packed, assigned))\n      },\n    )?;\n\n    Ok(outp)\n  }\n\n  pub fn assign_and_pack(\n    &self,\n    mut layouter: impl Layouter<F>,\n    gadget_config: Rc<GadgetConfig>,\n    constants: &HashMap<i64, CellRc<F>>,\n    tensors: &BTreeMap<i64, Array<F, IxDyn>>,\n  ) -> Result<(BTreeMap<i64, AssignedTensor<F>>, Vec<CellRc<F>>), Error> {\n    let mut values = vec![];\n    for (_, tensor) in tensors {\n      for value in tensor.iter() {\n        values.push(value);\n      }\n    }\n\n    let mut packed = vec![];\n    let mut assigned = vec![];\n    let zero = constants.get(&0).unwrap().clone();\n\n    let num_elems_per_row = self.config.num_packed_per_row * self.config.num_elem_per_packed;\n    for i in 0..(values.len().div_ceil(num_elems_per_row)) {\n      let row =\n        values[i * num_elems_per_row..min((i + 1) * num_elems_per_row, values.len())].to_vec();\n      let (row_packed, row_assigned) = self\n        .assign_and_pack_row(\n          layouter.namespace(|| \"pack row\"),\n          gadget_config.clone(),\n          row,\n          zero.as_ref(),\n        )\n        .unwrap();\n      packed.extend(row_packed);\n      assigned.extend(row_assigned);\n    }\n\n    let mut assigned_tensors = BTreeMap::new();\n    let mut start_idx = 0;\n    for (tensor_id, tensor) in tensors {\n      let num_el = tensor.len();\n      let v = assigned[start_idx..start_idx + num_el].to_vec();\n      let new_tensor = Array::from_shape_vec(tensor.raw_dim(), v).unwrap();\n      assigned_tensors.insert(*tensor_id, new_tensor);\n      start_idx += num_el;\n    }\n\n    Ok((assigned_tensors, packed))\n  }\n\n  pub fn copy_and_pack(\n    &self,\n    mut layouter: impl Layouter<F>,\n    gadget_config: Rc<GadgetConfig>,\n    constants: &HashMap<i64, CellRc<F>>,\n    tensors: &BTreeMap<i64, AssignedTensor<F>>,\n  ) -> Result<Vec<CellRc<F>>, Error> {\n    let mut values = vec![];\n    for (_, tensor) in tensors {\n      for value in tensor.iter() {\n        values.push(value.clone());\n      }\n    }\n\n    let mut packed = vec![];\n    let zero = constants.get(&0).unwrap().clone();\n\n    let num_elems_per_row = self.config.num_packed_per_row * self.config.num_elem_per_packed;\n    for i in 0..(values.len().div_ceil(num_elems_per_row)) {\n      let row =\n        values[i * num_elems_per_row..min((i + 1) * num_elems_per_row, values.len())].to_vec();\n      let row_packed = self\n        .copy_and_pack_row(\n          layouter.namespace(|| \"pack row\"),\n          gadget_config.clone(),\n          row,\n          zero.as_ref(),\n        )\n        .unwrap();\n      packed.extend(row_packed);\n    }\n\n    Ok(packed)\n  }\n}\n"
  },
  {
    "path": "src/commitments/poseidon_commit.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_gadgets::poseidon::{\n  primitives::{generate_constants, Absorbing, ConstantLength, Domain, Mds, Spec},\n  PaddedWord, PoseidonSpongeInstructions, Pow5Chip, Pow5Config, Sponge,\n};\nuse halo2_proofs::{\n  circuit::Layouter,\n  halo2curves::ff::{FromUniformBytes, PrimeField},\n  plonk::{Advice, Column, ConstraintSystem, Error},\n};\n\nuse crate::{gadgets::gadget::GadgetConfig, layers::layer::CellRc};\n\nuse super::commit::Commit;\n\npub const WIDTH: usize = 3;\npub const RATE: usize = 2;\npub const L: usize = 8 - WIDTH - 1;\n\n#[derive(Clone, Debug)]\n\npub struct PoseidonCommitChip<\n  F: PrimeField + Ord + FromUniformBytes<64>,\n  const WIDTH: usize,\n  const RATE: usize,\n  const L: usize,\n> {\n  pub poseidon_config: Pow5Config<F, WIDTH, RATE>,\n}\n\n#[derive(Debug)]\npub struct P128Pow5T3Gen<F: PrimeField, const SECURE_MDS: usize>(PhantomData<F>);\n\nimpl<F: PrimeField, const SECURE_MDS: usize> P128Pow5T3Gen<F, SECURE_MDS> {\n  pub fn new() -> Self {\n    P128Pow5T3Gen(PhantomData::default())\n  }\n}\n\nimpl<F: FromUniformBytes<64> + Ord, const SECURE_MDS: usize> Spec<F, 3, 2>\n  for P128Pow5T3Gen<F, SECURE_MDS>\n{\n  fn full_rounds() -> usize {\n    8\n  }\n\n  fn partial_rounds() -> usize {\n    56\n  }\n\n  fn sbox(val: F) -> F {\n    val.pow_vartime([5])\n  }\n\n  fn secure_mds() -> usize {\n    SECURE_MDS\n  }\n\n  fn constants() -> (Vec<[F; 3]>, Mds<F, 3>, Mds<F, 3>) {\n    generate_constants::<_, Self, 3, 2>()\n  }\n}\n\n/// A Poseidon hash function, built around a sponge.\n#[derive(Debug)]\npub struct MyHash<\n  F: PrimeField,\n  PoseidonChip: PoseidonSpongeInstructions<F, S, D, T, RATE>,\n  S: Spec<F, T, RATE>,\n  D: Domain<F, RATE>,\n  const T: usize,\n  const RATE: usize,\n> {\n  pub sponge: Sponge<F, PoseidonChip, S, Absorbing<PaddedWord<F>, RATE>, D, T, RATE>,\n}\n\nimpl<F: PrimeField + Ord + FromUniformBytes<64>> PoseidonCommitChip<F, WIDTH, RATE, L> {\n  pub fn configure(\n    meta: &mut ConstraintSystem<F>,\n    // TODO: ??\n    _input: [Column<Advice>; L],\n    state: [Column<Advice>; WIDTH],\n    partial_sbox: Column<Advice>,\n  ) -> PoseidonCommitChip<F, WIDTH, RATE, L> {\n    let rc_a = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>();\n    let rc_b = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>();\n\n    meta.enable_constant(rc_b[0]);\n\n    PoseidonCommitChip {\n      poseidon_config: Pow5Chip::configure::<P128Pow5T3Gen<F, 0>>(\n        meta,\n        state.try_into().unwrap(),\n        partial_sbox,\n        rc_a.try_into().unwrap(),\n        rc_b.try_into().unwrap(),\n      ),\n    }\n  }\n}\n\nimpl<F: PrimeField + Ord + FromUniformBytes<64>> Commit<F>\n  for PoseidonCommitChip<F, WIDTH, RATE, L>\n{\n  fn commit(\n    &self,\n    mut layouter: impl Layouter<F>,\n    _gadget_config: Rc<GadgetConfig>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    values: &Vec<CellRc<F>>,\n    blinding: CellRc<F>,\n  ) -> Result<Vec<CellRc<F>>, Error> {\n    let chip = Pow5Chip::construct(self.poseidon_config.clone());\n    let mut hasher: MyHash<F, Pow5Chip<F, 3, 2>, P128Pow5T3Gen<F, 0>, ConstantLength<L>, 3, 2> =\n      Sponge::new(chip, layouter.namespace(|| \"sponge\"))\n        .map(|sponge| MyHash { sponge })\n        .unwrap();\n\n    let mut new_vals = values\n      .iter()\n      .map(|x| x.clone())\n      .chain(vec![blinding.clone()])\n      .collect::<Vec<_>>();\n    while new_vals.len() % L != 0 {\n      new_vals.push(blinding.clone());\n    }\n    for (i, value) in new_vals\n      .iter()\n      .map(|x| PaddedWord::Message((**x).clone()))\n      .chain(<ConstantLength<L> as Domain<F, RATE>>::padding(L).map(PaddedWord::Padding))\n      .enumerate()\n    {\n      hasher\n        .sponge\n        .absorb(layouter.namespace(|| format!(\"absorb {}\", i)), value)\n        .unwrap();\n    }\n    let outp = hasher\n      .sponge\n      .finish_absorbing(layouter.namespace(|| \"finish absorbing\"))\n      .unwrap()\n      .squeeze(layouter.namespace(|| \"squeeze\"))\n      .unwrap();\n    let outp = Rc::new(outp);\n\n    Ok(vec![outp])\n  }\n}\n"
  },
  {
    "path": "src/commitments.rs",
    "content": "pub mod commit;\npub mod packer;\npub mod poseidon_commit;\n"
  },
  {
    "path": "src/gadgets/add_pairs.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n  poly::Rotation,\n};\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\ntype AddPairsConfig = GadgetConfig;\n\npub struct AddPairsChip<F: PrimeField> {\n  config: Rc<AddPairsConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> AddPairsChip<F> {\n  pub fn construct(config: Rc<AddPairsConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    3\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.selector();\n    let columns = gadget_config.columns;\n\n    meta.create_gate(\"add pair\", |meta| {\n      let s = meta.query_selector(selector);\n      let mut constraints = vec![];\n      for i in 0..columns.len() / Self::num_cols_per_op() {\n        let offset = i * Self::num_cols_per_op();\n        let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        let res = inp1 + inp2;\n        constraints.append(&mut vec![s.clone() * (res - outp)])\n      }\n\n      constraints\n    });\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::AddPairs, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for AddPairsChip<F> {\n  fn name(&self) -> String {\n    \"add pairs chip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let inp1 = &vec_inputs[0];\n    let inp2 = &vec_inputs[1];\n    assert_eq!(inp1.len(), inp2.len());\n\n    let columns = &self.config.columns;\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::AddPairs).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    let mut outps = vec![];\n    for i in 0..inp1.len() {\n      let offset = i * self.num_cols_per_op();\n      let inp1 = inp1[i].copy_advice(|| \"\", region, columns[offset + 0], row_offset)?;\n      let inp2 = inp2[i].copy_advice(|| \"\", region, columns[offset + 1], row_offset)?;\n      let outp = inp1.value().map(|x: &F| x.to_owned()) + inp2.value().map(|x: &F| x.to_owned());\n\n      let outp = region.assign_advice(|| \"\", columns[offset + 2], row_offset, || outp)?;\n      outps.push(outp);\n    }\n    Ok(outps)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let zero = &single_inputs[0];\n\n    let mut inp1 = vec_inputs[0].clone();\n    let mut inp2 = vec_inputs[1].clone();\n    let initial_len = inp1.len();\n    while inp1.len() % self.num_inputs_per_row() != 0 {\n      inp1.push(zero);\n      inp2.push(zero);\n    }\n\n    let vec_inputs = vec![inp1, inp2];\n\n    let res = self.op_aligned_rows(\n      layouter.namespace(|| format!(\"forward row {}\", self.name())),\n      &vec_inputs,\n      single_inputs,\n    )?;\n    Ok(res[0..initial_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/adder.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region, Value},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\ntype AdderConfig = GadgetConfig;\n\npub struct AdderChip<F: PrimeField> {\n  config: Rc<AdderConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> AdderChip<F> {\n  pub fn construct(config: Rc<AdderConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.selector();\n    let columns = gadget_config.columns;\n\n    meta.create_gate(\"adder gate\", |meta| {\n      let s = meta.query_selector(selector);\n      let gate_inp = columns[0..columns.len() - 1]\n        .iter()\n        .map(|col| meta.query_advice(*col, Rotation::cur()))\n        .collect::<Vec<_>>();\n      let gate_output = meta.query_advice(*columns.last().unwrap(), Rotation::cur());\n\n      let res = gate_inp\n        .iter()\n        .fold(Expression::Constant(F::ZERO), |a, b| a + b.clone());\n\n      vec![s * (res - gate_output)]\n    });\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::Adder, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\n// NOTE: The forward pass of the adder adds _everything_ into one cell\nimpl<F: PrimeField> Gadget<F> for AdderChip<F> {\n  fn name(&self) -> String {\n    \"adder\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    self.config.columns.len()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() - 1\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    1\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    assert_eq!(vec_inputs.len(), 1);\n    let inp = &vec_inputs[0];\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::Adder).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    inp\n      .iter()\n      .enumerate()\n      .map(|(i, cell)| cell.copy_advice(|| \"\", region, self.config.columns[i], row_offset))\n      .collect::<Result<Vec<_>, _>>()?;\n\n    let e = inp.iter().fold(Value::known(F::ZERO), |a, b| {\n      a + b.value().map(|x: &F| x.to_owned())\n    });\n    let res = region.assign_advice(\n      || \"\",\n      *self.config.columns.last().unwrap(),\n      row_offset,\n      || e,\n    )?;\n\n    Ok(vec![res])\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    assert_eq!(single_inputs.len(), 1);\n\n    let mut inputs = vec_inputs[0].clone();\n    let zero = single_inputs[0].clone();\n\n    while inputs.len() % self.num_inputs_per_row() != 0 {\n      inputs.push(&zero);\n    }\n\n    let mut outputs = self.op_aligned_rows(\n      layouter.namespace(|| \"adder forward\"),\n      &vec![inputs],\n      single_inputs,\n    )?;\n    while outputs.len() != 1 {\n      while outputs.len() % self.num_inputs_per_row() != 0 {\n        outputs.push(zero.clone());\n      }\n      let tmp = outputs.iter().map(|x| x).collect::<Vec<_>>();\n      outputs = self.op_aligned_rows(\n        layouter.namespace(|| \"adder forward\"),\n        &vec![tmp],\n        single_inputs,\n      )?;\n    }\n\n    Ok(outputs)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/bias_div_floor_relu6.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\n\nuse crate::gadgets::gadget::convert_to_u64;\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\ntype BiasDivFloorRelu6Config = GadgetConfig;\n\nconst SHIFT_MIN_VAL: i64 = -(1 << 30);\n\npub struct BiasDivFloorRelu6Chip<F: PrimeField> {\n  config: BiasDivFloorRelu6Config,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> BiasDivFloorRelu6Chip<F> {\n  pub fn construct(config: BiasDivFloorRelu6Config) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn get_map(scale_factor: u64, num_rows: i64, div_outp_min_val: i64) -> HashMap<i64, i64> {\n    let div_val = scale_factor;\n    let div_outp_min_val = div_outp_min_val;\n\n    let mut map = HashMap::new();\n    for i in 0..num_rows {\n      let shifted = i + div_outp_min_val;\n      let val = shifted.clamp(0, 6 * div_val as i64);\n      map.insert(i as i64, val);\n    }\n    map\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    5\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.complex_selector();\n    let sf = Expression::Constant(F::from(gadget_config.scale_factor));\n    let columns = gadget_config.columns;\n\n    let mod_lookup = meta.lookup_table_column();\n    let relu_lookup = meta.lookup_table_column();\n    let div_lookup = meta.lookup_table_column();\n\n    meta.create_gate(\"bias_mul\", |meta| {\n      let s = meta.query_selector(selector);\n\n      let mut constraints = vec![];\n      for op_idx in 0..columns.len() / Self::num_cols_per_op() {\n        let offset = op_idx * Self::num_cols_per_op();\n        let inp = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let bias = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let div_res = meta.query_advice(columns[offset + 2], Rotation::cur());\n        let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());\n\n        constraints.push(s.clone() * (inp - (sf.clone() * (div_res - bias) + mod_res)));\n      }\n\n      constraints\n    });\n\n    for op_idx in 0..columns.len() / Self::num_cols_per_op() {\n      let offset = op_idx * Self::num_cols_per_op();\n      meta.lookup(\"bias_div_relu6 lookup\", |meta| {\n        let s = meta.query_selector(selector);\n        let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());\n\n        // Constrains that the modulus \\in [0, DIV_VAL)\n        vec![(s.clone() * mod_res.clone(), mod_lookup)]\n      });\n      meta.lookup(\"bias_div_relu6 lookup\", |meta| {\n        let s = meta.query_selector(selector);\n        let div = meta.query_advice(columns[offset + 2], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 4], Rotation::cur());\n        let div_outp_min_val = Expression::Constant(F::from((-SHIFT_MIN_VAL) as u64));\n\n        // Constrains that output \\in [0, 6 * SF]\n        vec![\n          (s.clone() * outp, relu_lookup),\n          (s * (div + div_outp_min_val), div_lookup),\n        ]\n      });\n    }\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::BiasDivFloorRelu6, vec![selector]);\n\n    let mut tables = gadget_config.tables;\n    tables.insert(\n      GadgetType::BiasDivFloorRelu6,\n      vec![mod_lookup, relu_lookup, div_lookup],\n    );\n\n    let mut maps = gadget_config.maps;\n    let relu_map = Self::get_map(\n      gadget_config.scale_factor,\n      gadget_config.num_rows as i64,\n      gadget_config.div_outp_min_val,\n    );\n    maps.insert(GadgetType::BiasDivFloorRelu6, vec![relu_map]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      tables,\n      maps,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for BiasDivFloorRelu6Chip<F> {\n  fn name(&self) -> String {\n    \"BiasDivRelu6\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.num_inputs_per_row()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let div_val = self.config.scale_factor as i64;\n\n    let div_outp_min_val_i64 = -self.config.div_outp_min_val;\n\n    let div_inp_min_val_pos_i64 = -SHIFT_MIN_VAL;\n    let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);\n\n    let inp = &vec_inputs[0];\n    let bias = &vec_inputs[1];\n    assert_eq!(inp.len(), bias.len());\n    assert_eq!(inp.len() % self.num_inputs_per_row(), 0);\n\n    let relu_map = &self\n      .config\n      .maps\n      .get(&GadgetType::BiasDivFloorRelu6)\n      .unwrap()[0];\n\n    if self.config.use_selectors {\n      let selector = self\n        .config\n        .selectors\n        .get(&GadgetType::BiasDivFloorRelu6)\n        .unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    let mut outp_cells = vec![];\n    for (i, (inp, bias)) in inp.iter().zip(bias.iter()).enumerate() {\n      let offset = i * self.num_cols_per_op();\n\n      let inp_f = inp.value().map(|x: &F| x.to_owned());\n      let bias_f = bias.value().map(|x: &F| {\n        let a = *x + div_inp_min_val_pos;\n        let a = convert_to_u64(&a) as i64 - div_inp_min_val_pos_i64;\n        a\n      });\n      let div_mod_res = inp_f.map(|x: F| {\n        let x_pos = x + div_inp_min_val_pos;\n        let inp = convert_to_u64(&x_pos);\n        // println!(\"inp: {:?}, bias: {:?}, x_pos: {:?}\", inp, bias, x_pos);\n        let div_res = inp as i64 / div_val - (div_inp_min_val_pos_i64 / div_val);\n        let mod_res = inp as i64 % div_val;\n        // println!(\"div_res: {:?}, mod_res: {:?}\", div_res, mod_res);\n        (div_res, mod_res)\n      });\n      let div_res = div_mod_res.map(|x: (i64, i64)| x.0) + bias_f;\n      let mod_res = div_mod_res.map(|x: (i64, i64)| x.1);\n\n      let outp = div_res.map(|x: i64| {\n        let mut x_pos = x - div_outp_min_val_i64;\n        if !relu_map.contains_key(&(x_pos)) {\n          println!(\"x: {}, x_pos: {}\", x, x_pos);\n          x_pos = 0;\n        }\n        let outp_val = relu_map.get(&(x_pos)).unwrap();\n        // println!(\"x: {}, x_pos: {}, outp_val: {}\", x, x_pos, outp_val);\n        F::from(*outp_val as u64)\n      });\n\n      // Assign inp, bias\n      inp.copy_advice(|| \"\", region, self.config.columns[offset + 0], row_offset)?;\n      bias.copy_advice(|| \"\", region, self.config.columns[offset + 1], row_offset)?;\n\n      // Assign div_res, mod_res\n      let div_res_cell = region\n        .assign_advice(\n          || \"div_res\",\n          self.config.columns[offset + 2],\n          row_offset,\n          || {\n            div_res.map(|x: i64| {\n              F::from((x - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64)\n            })\n          },\n        )\n        .unwrap();\n      let _mod_res_cell = region\n        .assign_advice(\n          || \"mod_res\",\n          self.config.columns[offset + 3],\n          row_offset,\n          || mod_res.map(|x: i64| F::from(x as u64)),\n        )\n        .unwrap();\n\n      let outp_cell = region\n        .assign_advice(\n          || \"outp\",\n          self.config.columns[offset + 4],\n          row_offset,\n          || outp.map(|x: F| x.to_owned()),\n        )\n        .unwrap();\n\n      // outp_cells.push((outp_cell, div_res_cell));\n      outp_cells.push(outp_cell);\n      outp_cells.push(div_res_cell);\n    }\n\n    Ok(outp_cells)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let mut inps = vec_inputs[0].clone();\n    let mut biases = vec_inputs[1].clone();\n\n    // Needed to pad: bias - bias = 0\n    let default = biases[0].clone();\n    while inps.len() % self.num_inputs_per_row() != 0 {\n      inps.push(&default);\n      biases.push(&default);\n    }\n\n    let res = self.op_aligned_rows(\n      layouter.namespace(|| \"bias_div_relu6\"),\n      &vec![inps, biases],\n      single_inputs,\n    )?;\n    Ok(res)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/bias_div_round_relu6.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region, Value},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\n\nuse crate::gadgets::gadget::convert_to_u64;\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\ntype BiasDivRoundRelu6Config = GadgetConfig;\n\nconst NUM_COLS_PER_OP: usize = 5;\n\npub struct BiasDivRoundRelu6Chip<F: PrimeField> {\n  config: Rc<BiasDivRoundRelu6Config>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> BiasDivRoundRelu6Chip<F> {\n  pub fn construct(config: Rc<BiasDivRoundRelu6Config>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn get_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {\n    let div_val = scale_factor;\n\n    let mut map = HashMap::new();\n    for i in 0..num_rows {\n      let shifted = i + min_val;\n      let val = shifted.clamp(0, 6 * div_val as i64);\n      map.insert(i as i64, val);\n    }\n    map\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.complex_selector();\n    let sf = Expression::Constant(F::from(gadget_config.scale_factor));\n    let two = Expression::Constant(F::from(2));\n    let columns = gadget_config.columns;\n\n    let mut tables = gadget_config.tables;\n    let div_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];\n    let relu_lookup = meta.lookup_table_column();\n\n    meta.create_gate(\"bias_mul\", |meta| {\n      let s = meta.query_selector(selector);\n\n      let mut constraints = vec![];\n      for op_idx in 0..columns.len() / NUM_COLS_PER_OP {\n        let offset = op_idx * NUM_COLS_PER_OP;\n        let inp = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let bias = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let div_res = meta.query_advice(columns[offset + 2], Rotation::cur());\n        let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());\n\n        // ((div - bias) * 2 + mod) * sf = 2 * inp + sf\n        constraints.push(\n          s.clone()\n            * (two.clone() * inp + sf.clone()\n              - (sf.clone() * two.clone() * (div_res - bias) + mod_res)),\n        );\n      }\n\n      constraints\n    });\n\n    for op_idx in 0..columns.len() / NUM_COLS_PER_OP {\n      let offset = op_idx * NUM_COLS_PER_OP;\n      meta.lookup(\"bias_div_relu6 lookup\", |meta| {\n        let s = meta.query_selector(selector);\n        let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());\n\n        // Constrains that the modulus \\in [0, DIV_VAL)\n        // div_val - mod_res \\in [0, max_val)\n        vec![(s.clone() * (two.clone() * sf.clone() - mod_res), div_lookup)]\n      });\n      meta.lookup(\"bias_div_relu6 lookup\", |meta| {\n        let s = meta.query_selector(selector);\n        let div = meta.query_advice(columns[offset + 2], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 4], Rotation::cur());\n        let div_outp_min_val = gadget_config.div_outp_min_val;\n        let div_outp_min_val = Expression::Constant(F::from((-div_outp_min_val) as u64));\n\n        // Constrains that output \\in [0, 6 * SF]\n        vec![\n          (s.clone() * (div + div_outp_min_val), div_lookup),\n          (s.clone() * outp, relu_lookup),\n        ]\n      });\n    }\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::BiasDivRoundRelu6, vec![selector]);\n\n    tables.insert(GadgetType::BiasDivRoundRelu6, vec![relu_lookup]);\n\n    let mut maps = gadget_config.maps;\n    let relu_map = Self::get_map(\n      gadget_config.scale_factor,\n      gadget_config.min_val,\n      gadget_config.num_rows as i64,\n    );\n    maps.insert(GadgetType::BiasDivRoundRelu6, vec![relu_map]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      tables,\n      maps,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for BiasDivRoundRelu6Chip<F> {\n  fn name(&self) -> String {\n    \"BiasDivRelu6\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    NUM_COLS_PER_OP\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / NUM_COLS_PER_OP\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.num_inputs_per_row() * 2\n  }\n\n  fn load_lookups(&self, mut layouter: impl Layouter<F>) -> Result<(), Error> {\n    let map = &self.config.maps[&GadgetType::BiasDivRoundRelu6][0];\n\n    let relu_lookup = self.config.tables[&GadgetType::BiasDivRoundRelu6][0];\n\n    layouter\n      .assign_table(\n        || \"bdr round div/relu lookup\",\n        |mut table| {\n          for i in 0..self.config.num_rows {\n            let i = i as i64;\n            let val = map.get(&i).unwrap();\n            table\n              .assign_cell(\n                || \"relu lookup\",\n                relu_lookup,\n                i as usize,\n                || Value::known(F::from(*val as u64)),\n              )\n              .unwrap();\n          }\n          Ok(())\n        },\n      )\n      .unwrap();\n\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let div_val = self.config.scale_factor as i64;\n\n    let div_outp_min_val_i64 = self.config.div_outp_min_val;\n\n    let div_inp_min_val_pos_i64 = -self.config.shift_min_val;\n    let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);\n\n    let inp = &vec_inputs[0];\n    let bias = &vec_inputs[1];\n    assert_eq!(inp.len(), bias.len());\n    assert_eq!(inp.len() % self.num_inputs_per_row(), 0);\n\n    let relu_map = &self\n      .config\n      .maps\n      .get(&GadgetType::BiasDivRoundRelu6)\n      .unwrap()[0];\n\n    if self.config.use_selectors {\n      let selector = self\n        .config\n        .selectors\n        .get(&GadgetType::BiasDivRoundRelu6)\n        .unwrap()[0];\n      selector.enable(region, row_offset).unwrap();\n    }\n\n    let mut outp_cells = vec![];\n    for (i, (inp, bias)) in inp.iter().zip(bias.iter()).enumerate() {\n      let offset = i * NUM_COLS_PER_OP;\n\n      let inp_f = inp.value().map(|x: &F| x.to_owned());\n      let bias_f = bias.value().map(|x: &F| {\n        let a = *x + div_inp_min_val_pos;\n        let a = convert_to_u64(&a) as i64 - div_inp_min_val_pos_i64;\n        a\n      });\n      let div_mod_res = inp_f.map(|x: F| {\n        let x_pos = x + div_inp_min_val_pos;\n        let inp = convert_to_u64(&x_pos) as i64;\n        let div_inp = 2 * inp + div_val;\n        let div_res = div_inp / (2 * div_val) - div_inp_min_val_pos_i64 / div_val;\n        let mod_res = div_inp % (2 * div_val);\n        (div_res, mod_res)\n      });\n      let div_res = div_mod_res.map(|x: (i64, i64)| x.0) + bias_f;\n      let mod_res = div_mod_res.map(|x: (i64, i64)| x.1);\n\n      let outp = div_res.map(|x: i64| {\n        let mut x_pos = x - div_outp_min_val_i64;\n        if !relu_map.contains_key(&(x_pos)) {\n          println!(\"x: {}, x_pos: {}\", x, x_pos);\n          x_pos = 0;\n        }\n        let outp_val = relu_map.get(&(x_pos)).unwrap();\n        F::from(*outp_val as u64)\n      });\n\n      // Assign inp, bias\n      inp\n        .copy_advice(|| \"\", region, self.config.columns[offset + 0], row_offset)\n        .unwrap();\n      bias\n        .copy_advice(|| \"\", region, self.config.columns[offset + 1], row_offset)\n        .unwrap();\n\n      // Assign div_res, mod_res\n      let div_res_cell = region\n        .assign_advice(\n          || \"div_res\",\n          self.config.columns[offset + 2],\n          row_offset,\n          || {\n            div_res.map(|x: i64| {\n              F::from((x - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64)\n            })\n          },\n        )\n        .unwrap();\n      let _mod_res_cell = region\n        .assign_advice(\n          || \"mod_res\",\n          self.config.columns[offset + 3],\n          row_offset,\n          || mod_res.map(|x: i64| F::from(x as u64)),\n        )\n        .unwrap();\n\n      let outp_cell = region\n        .assign_advice(\n          || \"outp\",\n          self.config.columns[offset + 4],\n          row_offset,\n          || outp.map(|x: F| x.to_owned()),\n        )\n        .unwrap();\n\n      // outp_cells.push((outp_cell, div_res_cell));\n      outp_cells.push(outp_cell);\n      outp_cells.push(div_res_cell);\n    }\n\n    Ok(outp_cells)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let mut inps = vec_inputs[0].clone();\n    let mut biases = vec_inputs[1].clone();\n    let initial_len = inps.len();\n\n    // Needed to pad: bias - bias = 0\n    let default = biases[0].clone();\n    while inps.len() % self.num_inputs_per_row() != 0 {\n      inps.push(&default);\n      biases.push(&default);\n    }\n\n    let res = self\n      .op_aligned_rows(\n        layouter.namespace(|| \"bias_div_relu6\"),\n        &vec![inps, biases],\n        single_inputs,\n      )\n      .unwrap();\n    Ok(res[0..initial_len * 2].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/dot_prod.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{Advice, Column, ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\n\nuse crate::gadgets::adder::AdderChip;\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\ntype DotProductConfig = GadgetConfig;\n\npub struct DotProductChip<F: PrimeField> {\n  config: Rc<DotProductConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> DotProductChip<F> {\n  pub fn construct(config: Rc<DotProductConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn get_input_columns(config: &GadgetConfig) -> Vec<Column<Advice>> {\n    let num_inputs = (config.columns.len() - 1) / 2;\n    config.columns[0..num_inputs].to_vec()\n  }\n\n  pub fn get_weight_columns(config: &GadgetConfig) -> Vec<Column<Advice>> {\n    let num_inputs = (config.columns.len() - 1) / 2;\n    config.columns[num_inputs..config.columns.len() - 1].to_vec()\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.selector();\n    let columns = &gadget_config.columns;\n\n    meta.create_gate(\"dot product gate\", |meta| {\n      let s = meta.query_selector(selector);\n      let gate_inp = DotProductChip::<F>::get_input_columns(&gadget_config)\n        .iter()\n        .map(|col| meta.query_advice(*col, Rotation::cur()))\n        .collect::<Vec<_>>();\n      let gate_weights = DotProductChip::<F>::get_weight_columns(&gadget_config)\n        .iter()\n        .map(|col| meta.query_advice(*col, Rotation::cur()))\n        .collect::<Vec<_>>();\n      let gate_output = meta.query_advice(columns[columns.len() - 1], Rotation::cur());\n\n      let res = gate_inp\n        .iter()\n        .zip(gate_weights)\n        .map(|(a, b)| a.clone() * b.clone())\n        .fold(Expression::Constant(F::ZERO), |a, b| a + b);\n\n      vec![s * (res - gate_output)]\n    });\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::DotProduct, vec![selector]);\n\n    GadgetConfig {\n      columns: gadget_config.columns,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for DotProductChip<F> {\n  fn name(&self) -> String {\n    \"dot product\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    self.config.columns.len()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    (self.config.columns.len() - 1) / 2\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    1\n  }\n\n  // The caller is expected to pad the inputs\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    assert_eq!(vec_inputs.len(), 2);\n\n    let inp = &vec_inputs[0];\n    let weights = &vec_inputs[1];\n    assert_eq!(inp.len(), weights.len());\n    assert_eq!(inp.len(), self.num_inputs_per_row());\n\n    let zero = &single_inputs[0];\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::DotProduct).unwrap()[0];\n      selector.enable(region, row_offset).unwrap();\n    }\n\n    let inp_cols = DotProductChip::<F>::get_input_columns(&self.config);\n    inp\n      .iter()\n      .enumerate()\n      .map(|(i, cell)| cell.copy_advice(|| \"\", region, inp_cols[i], row_offset))\n      .collect::<Result<Vec<_>, _>>()\n      .unwrap();\n\n    let weight_cols = DotProductChip::<F>::get_weight_columns(&self.config);\n    weights\n      .iter()\n      .enumerate()\n      .map(|(i, cell)| cell.copy_advice(|| \"\", region, weight_cols[i], row_offset))\n      .collect::<Result<Vec<_>, _>>()\n      .unwrap();\n\n    // All columns need to be assigned\n    if self.config.columns.len() % 2 == 0 {\n      zero\n        .copy_advice(\n          || \"\",\n          region,\n          self.config.columns[self.config.columns.len() - 2],\n          row_offset,\n        )\n        .unwrap();\n    }\n\n    let e = inp\n      .iter()\n      .zip(weights.iter())\n      .map(|(a, b)| a.value().map(|x: &F| *x) * b.value())\n      .reduce(|a, b| a + b)\n      .unwrap();\n\n    let res = region\n      .assign_advice(\n        || \"\",\n        self.config.columns[self.config.columns.len() - 1],\n        row_offset,\n        || e,\n      )\n      .unwrap();\n\n    Ok(vec![res])\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    assert_eq!(vec_inputs.len(), 2);\n    assert_eq!(single_inputs.len(), 1);\n    let zero = &single_inputs[0];\n\n    let mut inputs = vec_inputs[0].clone();\n    let mut weights = vec_inputs[1].clone();\n    while inputs.len() % self.num_inputs_per_row() != 0 {\n      inputs.push(&zero);\n      weights.push(&zero);\n    }\n\n    let outputs = layouter\n      .assign_region(\n        || \"dot prod rows\",\n        |mut region| {\n          let mut outputs = vec![];\n          for i in 0..inputs.len() / self.num_inputs_per_row() {\n            let inp =\n              inputs[i * self.num_inputs_per_row()..(i + 1) * self.num_inputs_per_row()].to_vec();\n            let weights =\n              weights[i * self.num_inputs_per_row()..(i + 1) * self.num_inputs_per_row()].to_vec();\n            let res = self\n              .op_row_region(&mut region, i, &vec![inp, weights], &vec![zero.clone()])\n              .unwrap();\n            outputs.push(res[0].clone());\n          }\n          Ok(outputs)\n        },\n      )\n      .unwrap();\n\n    let adder_chip = AdderChip::<F>::construct(self.config.clone());\n    let tmp = outputs.iter().map(|x| x).collect::<Vec<_>>();\n    Ok(\n      adder_chip\n        .forward(\n          layouter.namespace(|| \"dot prod adder\"),\n          &vec![tmp],\n          single_inputs,\n        )\n        .unwrap(),\n    )\n  }\n}\n"
  },
  {
    "path": "src/gadgets/gadget.rs",
    "content": "use std::{\n  collections::{BTreeSet, HashMap},\n  sync::Arc,\n};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::group::ff::PrimeField,\n  plonk::{Advice, Column, Error, Fixed, Selector, TableColumn},\n};\nuse num_bigint::{BigUint, ToBigUint};\nuse num_traits::cast::ToPrimitive;\n\n#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]\npub enum GadgetType {\n  AddPairs,\n  Adder,\n  BiasDivRoundRelu6,\n  BiasDivFloorRelu6,\n  DotProduct,\n  Exp,\n  Logistic,\n  Max,\n  Pow,\n  Relu,\n  Rsqrt,\n  Sqrt,\n  SqrtBig,\n  Square,\n  SquaredDiff,\n  SubPairs,\n  Tanh,\n  MulPairs,\n  VarDivRound,\n  VarDivRoundBig,\n  VarDivRoundBig3,\n  Packer,      // This is a special case\n  InputLookup, // Dummy placeholder for the input lookup\n  Update,\n}\n\n#[derive(Clone, Debug, Default)]\npub struct GadgetConfig {\n  pub used_gadgets: Arc<BTreeSet<GadgetType>>,\n  pub columns: Vec<Column<Advice>>,\n  pub fixed_columns: Vec<Column<Fixed>>,\n  pub selectors: HashMap<GadgetType, Vec<Selector>>,\n  pub tables: HashMap<GadgetType, Vec<TableColumn>>,\n  pub maps: HashMap<GadgetType, Vec<HashMap<i64, i64>>>,\n  pub scale_factor: u64,\n  pub shift_min_val: i64, // MUST be divisible by 2 * scale_factor\n  pub num_rows: usize,\n  pub num_cols: usize,\n  pub k: usize,\n  pub eta: f64,\n  pub min_val: i64,\n  pub max_val: i64,\n  pub div_outp_min_val: i64,\n  pub use_selectors: bool,\n  pub commit_before: Vec<Vec<i64>>,\n  pub commit_after: Vec<Vec<i64>>,\n  pub num_bits_per_elem: i64,\n}\n\n// TODO: refactor\npub fn convert_to_u64<F: PrimeField>(x: &F) -> u64 {\n  let big = BigUint::from_bytes_le(x.to_repr().as_ref());\n  let big_digits = big.to_u64_digits();\n  if big_digits.len() > 2 {\n    println!(\"big_digits: {:?}\", big_digits);\n  }\n  if big_digits.len() == 1 {\n    big_digits[0] as u64\n  } else if big_digits.len() == 0 {\n    0\n  } else {\n    panic!();\n  }\n}\n\npub fn convert_to_u128<F: PrimeField>(x: &F) -> u128 {\n  let big = BigUint::from_bytes_le(x.to_repr().as_ref());\n  big.to_biguint().unwrap().to_u128().unwrap()\n}\n\npub trait Gadget<F: PrimeField> {\n  fn name(&self) -> String;\n\n  fn num_cols_per_op(&self) -> usize;\n\n  fn num_inputs_per_row(&self) -> usize;\n\n  fn num_outputs_per_row(&self) -> usize;\n\n  fn load_lookups(&self, _layouter: impl Layouter<F>) -> Result<(), Error> {\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error>;\n\n  // The caller is required to ensure that the inputs are of the correct length.\n  fn op_aligned_rows(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    // Sanity check inputs\n    for inp in vec_inputs.iter() {\n      assert_eq!(inp.len() % self.num_inputs_per_row(), 0);\n    }\n\n    let outputs = layouter.assign_region(\n      || format!(\"gadget {}\", self.name()),\n      |mut region| {\n        let mut outputs = vec![];\n        for i in 0..vec_inputs[0].len() / self.num_inputs_per_row() {\n          let mut vec_inputs_row = vec![];\n          for inp in vec_inputs.iter() {\n            vec_inputs_row.push(\n              inp[i * self.num_inputs_per_row()..(i + 1) * self.num_inputs_per_row()].to_vec(),\n            );\n          }\n          let row_outputs = self.op_row_region(&mut region, i, &vec_inputs_row, &single_inputs)?;\n          assert_eq!(row_outputs.len(), self.num_outputs_per_row());\n          outputs.extend(row_outputs);\n        }\n        Ok(outputs)\n      },\n    )?;\n\n    Ok(outputs)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    self.op_aligned_rows(\n      layouter.namespace(|| format!(\"forward row {}\", self.name())),\n      vec_inputs,\n      single_inputs,\n    )\n  }\n}\n"
  },
  {
    "path": "src/gadgets/input_lookup.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region, Value},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n};\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\npub struct InputLookupChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> InputLookupChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let lookup = meta.lookup_table_column();\n    let mut tables = gadget_config.tables;\n    tables.insert(GadgetType::InputLookup, vec![lookup]);\n\n    GadgetConfig {\n      tables,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for InputLookupChip<F> {\n  fn load_lookups(&self, mut layouter: impl Layouter<F>) -> Result<(), Error> {\n    let lookup = self.config.tables[&GadgetType::InputLookup][0];\n\n    layouter\n      .assign_table(\n        || \"input lookup\",\n        |mut table| {\n          for i in 0..self.config.num_rows as i64 {\n            table\n              .assign_cell(\n                || \"mod lookup\",\n                lookup,\n                i as usize,\n                || Value::known(F::from(i as u64)),\n              )\n              .unwrap();\n          }\n          Ok(())\n        },\n      )\n      .unwrap();\n\n    Ok(())\n  }\n\n  fn name(&self) -> String {\n    panic!(\"InputLookupChip should not be called directly\")\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    panic!(\"InputLookupChip should not be called directly\")\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    panic!(\"InputLookupChip should not be called directly\")\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    panic!(\"InputLookupChip should not be called directly\")\n  }\n\n  fn op_row_region(\n    &self,\n    _region: &mut Region<F>,\n    _row_offset: usize,\n    _vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    panic!(\"InputLookupChip should not be called directly\")\n  }\n}\n"
  },
  {
    "path": "src/gadgets/max.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n  poly::Rotation,\n};\n\nuse crate::gadgets::gadget::convert_to_u64;\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\npub struct MaxChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> MaxChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    3\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.complex_selector();\n    let columns = gadget_config.columns;\n    let tables = gadget_config.tables;\n\n    let inp_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];\n\n    meta.create_gate(\"max arithmetic\", |meta| {\n      let s = meta.query_selector(selector);\n      let mut constraints = vec![];\n      for i in 0..columns.len() / Self::num_cols_per_op() {\n        let offset = i * Self::num_cols_per_op();\n        let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        constraints.push(s.clone() * (inp1 - outp.clone()) * (inp2 - outp))\n      }\n      constraints\n    });\n\n    for idx in 0..columns.len() / Self::num_cols_per_op() {\n      meta.lookup(\"max inp1\", |meta| {\n        let s = meta.query_selector(selector);\n        let offset = idx * Self::num_cols_per_op();\n        let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        vec![(s * (outp - inp1), inp_lookup)]\n      });\n      meta.lookup(\"max inp2\", |meta| {\n        let s = meta.query_selector(selector);\n        let offset = idx * Self::num_cols_per_op();\n        let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        vec![(s * (outp - inp2), inp_lookup)]\n      });\n    }\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::Max, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      tables,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for MaxChip<F> {\n  fn name(&self) -> String {\n    \"max\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    3\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op() * 2\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    assert_eq!(vec_inputs.len(), 1);\n    let inp = &vec_inputs[0];\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::Max).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    let min_val_pos = F::from((-self.config.shift_min_val) as u64);\n\n    let mut outp = vec![];\n\n    let chunks: Vec<&[&AssignedCell<F, F>]> = inp.chunks(self.num_outputs_per_row()).collect();\n    let i1 = chunks[0];\n    let i2 = chunks[1];\n    for (idx, (inp1, inp2)) in i1.iter().zip(i2.iter()).enumerate() {\n      let offset = idx * self.num_cols_per_op();\n      inp1\n        .copy_advice(|| \"\", region, self.config.columns[offset + 0], row_offset)\n        .unwrap();\n      inp2\n        .copy_advice(|| \"\", region, self.config.columns[offset + 1], row_offset)\n        .unwrap();\n\n      let max = inp1.value().zip(inp2.value()).map(|(a, b)| {\n        let a = convert_to_u64(&(*a + min_val_pos));\n        let b = convert_to_u64(&(*b + min_val_pos));\n        let max = a.max(b);\n        let max = F::from(max) - min_val_pos;\n        max\n      });\n\n      let res = region\n        .assign_advice(|| \"\", self.config.columns[offset + 2], row_offset, || max)\n        .unwrap();\n      outp.push(res);\n    }\n\n    Ok(outp)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let mut inputs = vec_inputs[0].clone();\n    let first = inputs[0];\n\n    while inputs.len() % self.num_inputs_per_row() != 0 {\n      inputs.push(first);\n    }\n\n    // TODO: pretty sure this is correct but check\n    let num_iters = inputs.len().div_ceil(self.num_inputs_per_row()) + self.num_inputs_per_row();\n\n    let mut outputs = self.op_aligned_rows(\n      layouter.namespace(|| \"max forward\"),\n      &vec![inputs],\n      single_inputs,\n    )?;\n    for _ in 0..num_iters {\n      while outputs.len() % self.num_inputs_per_row() != 0 {\n        outputs.push(first.clone());\n      }\n      let tmp = outputs.iter().map(|x| x).collect::<Vec<_>>();\n      outputs = self.op_aligned_rows(\n        layouter.namespace(|| \"max forward\"),\n        &vec![tmp],\n        single_inputs,\n      )?;\n    }\n\n    outputs = vec![outputs.into_iter().next().unwrap()];\n\n    Ok(outputs)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/mul_pairs.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n  poly::Rotation,\n};\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\ntype MulPairsConfig = GadgetConfig;\n\npub struct MulPairsChip<F: PrimeField> {\n  config: Rc<MulPairsConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> MulPairsChip<F> {\n  pub fn construct(config: Rc<MulPairsConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    3\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.selector();\n    let columns = gadget_config.columns;\n\n    meta.create_gate(\"mul pair\", |meta| {\n      let s = meta.query_selector(selector);\n      let mut constraints = vec![];\n      for i in 0..columns.len() / Self::num_cols_per_op() {\n        let offset = i * Self::num_cols_per_op();\n        let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        let res = inp1 * inp2;\n        constraints.append(&mut vec![s.clone() * (res - outp)])\n      }\n\n      constraints\n    });\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::MulPairs, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for MulPairsChip<F> {\n  fn name(&self) -> String {\n    \"MulPairs\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  // TODO: This + below is basically copied from add pairs - make arithmetic generic\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let inp1 = &vec_inputs[0];\n    let inp2 = &vec_inputs[1];\n    assert_eq!(inp1.len(), inp2.len());\n\n    let columns = &self.config.columns;\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::MulPairs).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    let mut outps = vec![];\n    for i in 0..inp1.len() {\n      let offset = i * self.num_cols_per_op();\n      let inp1 = inp1[i].copy_advice(|| \"\", region, columns[offset + 0], row_offset)?;\n      let inp2 = inp2[i].copy_advice(|| \"\", region, columns[offset + 1], row_offset)?;\n      let outp = inp1.value().map(|x: &F| x.to_owned()) * inp2.value().map(|x: &F| x.to_owned());\n\n      let outp = region.assign_advice(|| \"\", columns[offset + 2], row_offset, || outp)?;\n      outps.push(outp);\n    }\n    Ok(outps)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let zero = &single_inputs[0];\n\n    let mut inp1 = vec_inputs[0].clone();\n    let mut inp2 = vec_inputs[1].clone();\n    let initial_len = inp1.len();\n    while inp1.len() % self.num_inputs_per_row() != 0 {\n      inp1.push(zero);\n      inp2.push(zero);\n    }\n\n    let vec_inputs = vec![inp1, inp2];\n\n    let res = self.op_aligned_rows(\n      layouter.namespace(|| format!(\"forward row {}\", self.name())),\n      &vec_inputs,\n      single_inputs,\n    )?;\n    Ok(res[0..initial_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/nonlinear/exp.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n};\n\nuse super::{\n  super::gadget::{Gadget, GadgetConfig, GadgetType},\n  non_linearity::NonLinearGadget,\n};\n\ntype ExpGadgetConfig = GadgetConfig;\n\n// IMPORTANT: this return exp(x) * SF\npub struct ExpGadgetChip<F: PrimeField> {\n  config: Rc<ExpGadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> ExpGadgetChip<F> {\n  pub fn construct(config: Rc<ExpGadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    <ExpGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Exp)\n  }\n}\n\nimpl<F: PrimeField> NonLinearGadget<F> for ExpGadgetChip<F> {\n  fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {\n    let mut map = HashMap::new();\n    for i in 0..num_rows {\n      let shifted = i + min_val;\n      let x = (shifted as f64) / (scale_factor as f64);\n      let exp = x.exp();\n      let exp = (exp * ((scale_factor * scale_factor) as f64)).round() as i64;\n      map.insert(i as i64, exp);\n    }\n    map\n  }\n\n  fn get_map(&self) -> &HashMap<i64, i64> {\n    &self.config.maps.get(&GadgetType::Exp).unwrap()[0]\n  }\n\n  fn get_selector(&self) -> halo2_proofs::plonk::Selector {\n    self.config.selectors.get(&GadgetType::Exp).unwrap()[0]\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for ExpGadgetChip<F> {\n  fn name(&self) -> String {\n    \"Exp\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    <ExpGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {\n    NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Exp)?;\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::op_row_region(\n      self,\n      region,\n      row_offset,\n      vec_inputs,\n      single_inputs,\n      self.config.clone(),\n    )\n  }\n\n  fn forward(\n    &self,\n    layouter: impl halo2_proofs::circuit::Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/nonlinear/logistic.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n};\n\nuse super::{\n  super::gadget::{Gadget, GadgetConfig, GadgetType},\n  non_linearity::NonLinearGadget,\n};\n\npub struct LogisticGadgetChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> LogisticGadgetChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    <LogisticGadgetChip<F> as NonLinearGadget<F>>::configure(\n      meta,\n      gadget_config,\n      GadgetType::Logistic,\n    )\n  }\n}\n\nimpl<F: PrimeField> NonLinearGadget<F> for LogisticGadgetChip<F> {\n  fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {\n    let mut map = HashMap::new();\n    for i in 0..num_rows {\n      let shifted = i + min_val;\n      let x = (shifted as f64) / (scale_factor as f64);\n      let logistic = 1. / (1. + (-x).exp());\n      let logistic = (logistic * ((scale_factor) as f64)).round() as i64;\n      map.insert(i as i64, logistic);\n    }\n\n    map\n  }\n\n  fn get_map(&self) -> &HashMap<i64, i64> {\n    &self.config.maps.get(&GadgetType::Logistic).unwrap()[0]\n  }\n\n  fn get_selector(&self) -> halo2_proofs::plonk::Selector {\n    self.config.selectors.get(&GadgetType::Logistic).unwrap()[0]\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for LogisticGadgetChip<F> {\n  fn name(&self) -> String {\n    \"LogisticChip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    <LogisticGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {\n    NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Logistic)?;\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::op_row_region(\n      self,\n      region,\n      row_offset,\n      vec_inputs,\n      single_inputs,\n      self.config.clone(),\n    )\n  }\n\n  fn forward(\n    &self,\n    layouter: impl halo2_proofs::circuit::Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/nonlinear/non_linearity.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region, Value},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression, Selector},\n  poly::Rotation,\n};\n\nuse crate::gadgets::gadget::convert_to_u128;\n\nuse super::super::gadget::Gadget;\nuse super::super::gadget::{GadgetConfig, GadgetType};\n\nconst NUM_COLS_PER_OP: usize = 2;\n\npub trait NonLinearGadget<F: PrimeField>: Gadget<F> {\n  fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64>;\n\n  fn get_map(&self) -> &HashMap<i64, i64>;\n\n  fn get_selector(&self) -> Selector;\n\n  fn num_cols_per_op() -> usize {\n    NUM_COLS_PER_OP\n  }\n\n  fn configure(\n    meta: &mut ConstraintSystem<F>,\n    gadget_config: GadgetConfig,\n    gadget_type: GadgetType,\n  ) -> GadgetConfig {\n    let selector = meta.complex_selector();\n    let columns = gadget_config.columns;\n\n    let mut tables = gadget_config.tables;\n    let inp_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];\n    let outp_lookup = meta.lookup_table_column();\n\n    for op_idx in 0..columns.len() / NUM_COLS_PER_OP {\n      let offset = op_idx * NUM_COLS_PER_OP;\n      meta.lookup(\"non-linear lookup\", |meta| {\n        let s = meta.query_selector(selector);\n        let inp = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let shift_val = gadget_config.min_val;\n        let shift_val_pos = Expression::Constant(F::from((-shift_val) as u64));\n\n        vec![\n          (s.clone() * (inp + shift_val_pos), inp_lookup),\n          (s.clone() * outp, outp_lookup),\n        ]\n      });\n    }\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(gadget_type, vec![selector]);\n\n    tables.insert(gadget_type, vec![inp_lookup, outp_lookup]);\n\n    let mut maps = gadget_config.maps;\n    let non_linear_map = Self::generate_map(\n      gadget_config.scale_factor,\n      gadget_config.min_val,\n      gadget_config.num_rows as i64,\n    );\n    maps.insert(gadget_type, vec![non_linear_map]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      tables,\n      maps,\n      ..gadget_config\n    }\n  }\n\n  fn load_lookups(\n    &self,\n    mut layouter: impl Layouter<F>,\n    config: Rc<GadgetConfig>,\n    gadget_type: GadgetType,\n  ) -> Result<(), Error> {\n    let map = self.get_map();\n    let table_col = config.tables.get(&gadget_type).unwrap()[1];\n\n    let shift_pos_i64 = -config.shift_min_val;\n    let shift_pos = F::from(shift_pos_i64 as u64);\n    layouter.assign_table(\n      || \"non linear table\",\n      |mut table| {\n        for i in 0..config.num_rows {\n          let i = i as i64;\n          // FIXME: refactor this\n          let tmp = *map.get(&i).unwrap();\n          let val = if i == 0 {\n            F::ZERO\n          } else {\n            if tmp >= 0 {\n              F::from(tmp as u64)\n            } else {\n              let tmp = tmp + shift_pos_i64;\n              F::from(tmp as u64) - shift_pos\n            }\n          };\n          table.assign_cell(\n            || \"non linear cell\",\n            table_col,\n            i as usize,\n            || Value::known(val),\n          )?;\n        }\n        Ok(())\n      },\n    )?;\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n    gadget_config: Rc<GadgetConfig>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let columns = &gadget_config.columns;\n    let inp = &vec_inputs[0];\n    let map = self.get_map();\n    let shift_val_pos_i64 = -gadget_config.shift_min_val;\n    let shift_val_pos = F::from(shift_val_pos_i64 as u64);\n    let min_val = gadget_config.min_val;\n\n    if gadget_config.use_selectors {\n      let selector = self.get_selector();\n      selector.enable(region, row_offset)?;\n    }\n\n    let mut outps = vec![];\n    for i in 0..inp.len() {\n      let offset = i * 2;\n      inp[i].copy_advice(|| \"\", region, columns[offset + 0], row_offset)?;\n      let outp = inp[i].value().map(|x: &F| {\n        let pos = convert_to_u128(&(*x + shift_val_pos)) as i128 - shift_val_pos_i64 as i128;\n        let x = pos as i64 - min_val;\n        let val = *map.get(&x).unwrap();\n        if x == 0 {\n          F::ZERO\n        } else {\n          if val >= 0 {\n            F::from(val as u64)\n          } else {\n            let val_pos = val + shift_val_pos_i64;\n            F::from(val_pos as u64) - F::from(shift_val_pos_i64 as u64)\n          }\n        }\n      });\n\n      let outp =\n        region.assign_advice(|| \"nonlinearity\", columns[offset + 1], row_offset, || outp)?;\n      outps.push(outp);\n    }\n\n    Ok(outps)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let zero = &single_inputs[0];\n    let inp_len = vec_inputs[0].len();\n    let mut inp = vec_inputs[0].clone();\n\n    while inp.len() % self.num_inputs_per_row() != 0 {\n      inp.push(zero);\n    }\n\n    let vec_inputs = vec![inp];\n    let outp = self.op_aligned_rows(\n      layouter.namespace(|| format!(\"forward row {}\", self.name())),\n      &vec_inputs,\n      &single_inputs,\n    )?;\n\n    Ok(outp[0..inp_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/nonlinear/pow.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n};\n\nuse super::{\n  super::gadget::{Gadget, GadgetConfig, GadgetType},\n  non_linearity::NonLinearGadget,\n};\n\n// IMPORTANT: PowGadget assumes a single power across the entire DAG\npub struct PowGadgetChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> PowGadgetChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    <PowGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Pow)\n  }\n}\n\nimpl<F: PrimeField> NonLinearGadget<F> for PowGadgetChip<F> {\n  fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {\n    let power = 3.; // FIXME: need to make this variable somehow...\n\n    let mut map = HashMap::new();\n    for i in 0..num_rows {\n      let shifted = i + min_val;\n      let x = (shifted as f64) / (scale_factor as f64);\n      let y = x.powf(power);\n      let y = (y * ((scale_factor) as f64)).round() as i64;\n      map.insert(i as i64, y);\n    }\n\n    map\n  }\n\n  fn get_map(&self) -> &HashMap<i64, i64> {\n    &self.config.maps.get(&GadgetType::Pow).unwrap()[0]\n  }\n\n  fn get_selector(&self) -> halo2_proofs::plonk::Selector {\n    self.config.selectors.get(&GadgetType::Pow).unwrap()[0]\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for PowGadgetChip<F> {\n  fn name(&self) -> String {\n    \"PowGadgetChip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    <PowGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {\n    NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Pow)?;\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::op_row_region(\n      self,\n      region,\n      row_offset,\n      vec_inputs,\n      single_inputs,\n      self.config.clone(),\n    )\n  }\n\n  fn forward(\n    &self,\n    layouter: impl halo2_proofs::circuit::Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/nonlinear/relu.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n};\n\nuse super::{\n  super::gadget::{Gadget, GadgetConfig, GadgetType},\n  non_linearity::NonLinearGadget,\n};\n\npub struct ReluChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> ReluChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    <ReluChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Relu)\n  }\n}\n\nimpl<F: PrimeField> NonLinearGadget<F> for ReluChip<F> {\n  fn generate_map(_scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {\n    let mut map = HashMap::new();\n    for i in 0..num_rows {\n      let shifted = i + min_val;\n      let relu = shifted.max(0);\n      map.insert(i as i64, relu);\n    }\n\n    map\n  }\n\n  fn get_map(&self) -> &HashMap<i64, i64> {\n    &self.config.maps.get(&GadgetType::Relu).unwrap()[0]\n  }\n\n  fn get_selector(&self) -> halo2_proofs::plonk::Selector {\n    self.config.selectors.get(&GadgetType::Relu).unwrap()[0]\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for ReluChip<F> {\n  fn name(&self) -> String {\n    \"Relu\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    <ReluChip<F> as NonLinearGadget<F>>::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {\n    NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Relu)?;\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::op_row_region(\n      self,\n      region,\n      row_offset,\n      vec_inputs,\n      single_inputs,\n      self.config.clone(),\n    )\n  }\n\n  fn forward(\n    &self,\n    layouter: impl halo2_proofs::circuit::Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/nonlinear/rsqrt.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n};\n\nuse super::{\n  super::gadget::{Gadget, GadgetConfig, GadgetType},\n  non_linearity::NonLinearGadget,\n};\n\npub struct RsqrtGadgetChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> RsqrtGadgetChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    <RsqrtGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Rsqrt)\n  }\n}\n\nimpl<F: PrimeField> NonLinearGadget<F> for RsqrtGadgetChip<F> {\n  fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {\n    let mut map = HashMap::new();\n    for i in 0..num_rows {\n      let shifted = i + min_val;\n      let x = (shifted as f64) / (scale_factor as f64);\n      let sqrt = x.sqrt();\n      let rsqrt = 1.0 / sqrt;\n      let rsqrt = (rsqrt * (scale_factor as f64)).round() as i64;\n      map.insert(i as i64, rsqrt);\n    }\n    map\n  }\n\n  fn get_map(&self) -> &HashMap<i64, i64> {\n    &self.config.maps.get(&GadgetType::Rsqrt).unwrap()[0]\n  }\n\n  fn get_selector(&self) -> halo2_proofs::plonk::Selector {\n    self.config.selectors.get(&GadgetType::Rsqrt).unwrap()[0]\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for RsqrtGadgetChip<F> {\n  fn name(&self) -> String {\n    \"RsqrtGadget\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    <RsqrtGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {\n    NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Rsqrt)?;\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::op_row_region(\n      self,\n      region,\n      row_offset,\n      vec_inputs,\n      single_inputs,\n      self.config.clone(),\n    )\n  }\n\n  fn forward(\n    &self,\n    layouter: impl halo2_proofs::circuit::Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/nonlinear/sqrt.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n};\n\nuse super::{\n  super::gadget::{Gadget, GadgetConfig, GadgetType},\n  non_linearity::NonLinearGadget,\n};\n\npub struct SqrtGadgetChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> SqrtGadgetChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    <SqrtGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Sqrt)\n  }\n}\n\nimpl<F: PrimeField> NonLinearGadget<F> for SqrtGadgetChip<F> {\n  fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {\n    let mut map = HashMap::new();\n    for i in 0..num_rows {\n      let shifted = i + min_val;\n      let x = (shifted as f64) / (scale_factor as f64);\n      let sqrt = x.sqrt();\n      let sqrt = (sqrt * (scale_factor as f64)).round() as i64;\n      map.insert(i as i64, sqrt);\n    }\n    map\n  }\n\n  fn get_map(&self) -> &HashMap<i64, i64> {\n    &self.config.maps.get(&GadgetType::Sqrt).unwrap()[0]\n  }\n\n  fn get_selector(&self) -> halo2_proofs::plonk::Selector {\n    self.config.selectors.get(&GadgetType::Sqrt).unwrap()[0]\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for SqrtGadgetChip<F> {\n  fn name(&self) -> String {\n    \"SqrtGadget\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    <SqrtGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {\n    NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Sqrt)?;\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::op_row_region(\n      self,\n      region,\n      row_offset,\n      vec_inputs,\n      single_inputs,\n      self.config.clone(),\n    )\n  }\n\n  fn forward(\n    &self,\n    layouter: impl halo2_proofs::circuit::Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/nonlinear/tanh.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n};\n\nuse super::{\n  super::gadget::{Gadget, GadgetConfig, GadgetType},\n  non_linearity::NonLinearGadget,\n};\n\npub struct TanhGadgetChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> TanhGadgetChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    <TanhGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Tanh)\n  }\n}\n\nimpl<F: PrimeField> NonLinearGadget<F> for TanhGadgetChip<F> {\n  fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> {\n    let scale_factor = scale_factor as f64;\n\n    let mut map = HashMap::new();\n    for i in 0..num_rows {\n      let shifted = i + min_val;\n      let x = (shifted as f64) / scale_factor;\n      let y = x.tanh();\n      let y = (y * scale_factor).round() as i64;\n      map.insert(i as i64, y);\n    }\n\n    map\n  }\n\n  fn get_map(&self) -> &HashMap<i64, i64> {\n    &self.config.maps.get(&GadgetType::Tanh).unwrap()[0]\n  }\n\n  fn get_selector(&self) -> halo2_proofs::plonk::Selector {\n    self.config.selectors.get(&GadgetType::Tanh).unwrap()[0]\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for TanhGadgetChip<F> {\n  fn name(&self) -> String {\n    \"TanhGadgetChip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    <TanhGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {\n    NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Tanh)?;\n    Ok(())\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::op_row_region(\n      self,\n      region,\n      row_offset,\n      vec_inputs,\n      single_inputs,\n      self.config.clone(),\n    )\n  }\n\n  fn forward(\n    &self,\n    layouter: impl halo2_proofs::circuit::Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs)\n  }\n}\n"
  },
  {
    "path": "src/gadgets/nonlinear.rs",
    "content": "pub mod exp;\npub mod logistic;\npub mod non_linearity;\npub mod pow;\npub mod relu;\npub mod rsqrt;\npub mod sqrt;\npub mod tanh;\n"
  },
  {
    "path": "src/gadgets/sqrt_big.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\n\nuse crate::gadgets::gadget::convert_to_u64;\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\ntype SqrtBigConfig = GadgetConfig;\n\npub struct SqrtBigChip<F: PrimeField> {\n  config: Rc<SqrtBigConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> SqrtBigChip<F> {\n  pub fn construct(config: Rc<SqrtBigConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    3\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.complex_selector();\n    let two = Expression::Constant(F::from(2));\n    let columns = gadget_config.columns;\n\n    let tables = gadget_config.tables;\n\n    let inp_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];\n\n    // TODO: prove that these constraints work\n    meta.create_gate(\"sqrt_big arithm\", |meta| {\n      let s = meta.query_selector(selector);\n\n      let mut constraints = vec![];\n      for op_idx in 0..columns.len() / Self::num_cols_per_op() {\n        let offset = op_idx * Self::num_cols_per_op();\n        let inp = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let rem = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        let lhs = inp.clone();\n        let rhs = sqrt.clone() * sqrt.clone() + rem.clone();\n        constraints.push(s.clone() * (lhs - rhs));\n      }\n      constraints\n    });\n\n    for op_idx in 0..columns.len() / Self::num_cols_per_op() {\n      let offset = op_idx * Self::num_cols_per_op();\n      meta.lookup(\"sqrt_big sqrt lookup\", |meta| {\n        let s = meta.query_selector(selector);\n        let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur());\n\n        vec![(s.clone() * sqrt, inp_lookup)]\n      });\n\n      meta.lookup(\"sqrt_big rem lookup\", |meta| {\n        let s = meta.query_selector(selector);\n        let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let rem = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        vec![(s.clone() * (rem + sqrt), inp_lookup)]\n      });\n\n      meta.lookup(\"sqrt_big sqrt - rem lookup\", |meta| {\n        let s = meta.query_selector(selector);\n        let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let rem = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        vec![(s.clone() * (two.clone() * sqrt - rem), inp_lookup)]\n      });\n    }\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::SqrtBig, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      tables,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for SqrtBigChip<F> {\n  fn name(&self) -> String {\n    \"sqrt_big\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.num_inputs_per_row()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let inps = &vec_inputs[0];\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::SqrtBig).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    let mut outp_cells = vec![];\n    for (i, inp) in inps.iter().enumerate() {\n      let offset = i * self.num_cols_per_op();\n      inp.copy_advice(\n        || \"sqrt_big\",\n        region,\n        self.config.columns[offset],\n        row_offset,\n      )?;\n\n      let outp = inp.value().map(|x: &F| {\n        let inp_val = convert_to_u64(x) as i64;\n        let fsqrt = (inp_val as f64).sqrt();\n        let sqrt = fsqrt.round() as i64;\n        let rem = inp_val - sqrt * sqrt;\n        (sqrt, rem)\n      });\n\n      let sqrt_cell = region.assign_advice(\n        || \"sqrt_big\",\n        self.config.columns[offset + 1],\n        row_offset,\n        || outp.map(|x| F::from(x.0 as u64)),\n      )?;\n\n      let _rem_cell = region.assign_advice(\n        || \"sqrt_big\",\n        self.config.columns[offset + 2],\n        row_offset,\n        || {\n          outp.map(|x| {\n            let rem_pos = x.1 + x.0;\n            F::from(rem_pos as u64) - F::from(x.0 as u64)\n          })\n        },\n      )?;\n      outp_cells.push(sqrt_cell);\n    }\n\n    Ok(outp_cells)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let zero = &single_inputs[0];\n\n    let mut inp = vec_inputs[0].clone();\n    let inp_len = inp.len();\n    while inp.len() % self.num_inputs_per_row() != 0 {\n      inp.push(zero);\n    }\n\n    let vec_inputs = vec![inp];\n    let outp = self.op_aligned_rows(\n      layouter.namespace(|| format!(\"forward row {}\", self.name())),\n      &vec_inputs,\n      single_inputs,\n    )?;\n\n    Ok(outp[0..inp_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/square.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n  poly::Rotation,\n};\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\npub struct SquareGadgetChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> SquareGadgetChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  // TODO: it would be more efficient to do the division here directly\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.selector();\n    let columns = gadget_config.columns;\n\n    meta.create_gate(\"square gate\", |meta| {\n      let s = meta.query_selector(selector);\n      let gate_inp = meta.query_advice(columns[0], Rotation::cur());\n      let gate_output = meta.query_advice(columns[1], Rotation::cur());\n\n      let res = gate_inp.clone() * gate_inp;\n\n      vec![s * (res - gate_output)]\n    });\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::Square, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for SquareGadgetChip<F> {\n  fn name(&self) -> String {\n    \"SquareChip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    2\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.num_inputs_per_row()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    assert_eq!(vec_inputs.len(), 1);\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::Square).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    let inps = &vec_inputs[0];\n    let mut outp = vec![];\n    for (i, inp) in inps.iter().enumerate() {\n      let offset = i * self.num_cols_per_op();\n      inp.copy_advice(|| \"\", region, self.config.columns[offset], row_offset)?;\n      let outp_val = inp.value().map(|x: &F| x.to_owned() * x.to_owned());\n      let outp_cell = region.assign_advice(\n        || \"square output\",\n        self.config.columns[offset + 1],\n        row_offset,\n        || outp_val,\n      )?;\n      outp.push(outp_cell);\n    }\n\n    Ok(outp)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl halo2_proofs::circuit::Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let zero = &single_inputs[0];\n\n    let mut inp = vec_inputs[0].clone();\n    let initial_len = inp.len();\n    while inp.len() % self.num_inputs_per_row() != 0 {\n      inp.push(zero);\n    }\n\n    let vec_inputs = vec![inp];\n    let res = self.op_aligned_rows(\n      layouter.namespace(|| format!(\"forward row {}\", self.name())),\n      &vec_inputs,\n      single_inputs,\n    )?;\n    Ok(res[0..initial_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/squared_diff.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n  poly::Rotation,\n};\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\ntype SquaredDiffConfig = GadgetConfig;\n\npub struct SquaredDiffGadgetChip<F: PrimeField> {\n  config: Rc<SquaredDiffConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> SquaredDiffGadgetChip<F> {\n  pub fn construct(config: Rc<SquaredDiffConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    3\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.selector();\n    let columns = gadget_config.columns;\n\n    meta.create_gate(\"squared diff\", |meta| {\n      let s = meta.query_selector(selector);\n      let mut constraints = vec![];\n      for i in 0..columns.len() / Self::num_cols_per_op() {\n        let offset = i * Self::num_cols_per_op();\n        let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        let res = (inp1 - inp2).square();\n        constraints.append(&mut vec![s.clone() * (res - outp)])\n      }\n\n      constraints\n    });\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::SquaredDiff, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for SquaredDiffGadgetChip<F> {\n  fn name(&self) -> String {\n    \"SquaredDiff\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let inp1 = &vec_inputs[0];\n    let inp2 = &vec_inputs[1];\n    assert_eq!(inp1.len(), inp2.len());\n\n    let columns = &self.config.columns;\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::SquaredDiff).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    let mut outps = vec![];\n    for i in 0..inp1.len() {\n      let offset = i * self.num_cols_per_op();\n      let inp1 = inp1[i].copy_advice(|| \"\", region, columns[offset + 0], row_offset)?;\n      let inp2 = inp2[i].copy_advice(|| \"\", region, columns[offset + 1], row_offset)?;\n      let outp = inp1.value().map(|x: &F| x.to_owned()) - inp2.value().map(|x: &F| x.to_owned());\n      let outp = outp * outp;\n\n      let outp = region.assign_advice(|| \"\", columns[offset + 2], row_offset, || outp)?;\n      outps.push(outp);\n    }\n    Ok(outps)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let zero = &single_inputs[0];\n\n    let mut inp1 = vec_inputs[0].clone();\n    let mut inp2 = vec_inputs[1].clone();\n    let initial_len = inp1.len();\n    while inp1.len() % self.num_inputs_per_row() != 0 {\n      inp1.push(zero);\n      inp2.push(zero);\n    }\n\n    let vec_inputs = vec![inp1, inp2];\n\n    let res = self.op_aligned_rows(\n      layouter.namespace(|| format!(\"forward row {}\", self.name())),\n      &vec_inputs,\n      single_inputs,\n    )?;\n\n    Ok(res[0..initial_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/sub_pairs.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error},\n  poly::Rotation,\n};\n\nuse super::gadget::{Gadget, GadgetConfig, GadgetType};\n\ntype SubPairsConfig = GadgetConfig;\n\npub struct SubPairsChip<F: PrimeField> {\n  config: Rc<SubPairsConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> SubPairsChip<F> {\n  pub fn construct(config: Rc<SubPairsConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    3\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let selector = meta.selector();\n    let columns = gadget_config.columns;\n\n    meta.create_gate(\"sub pair\", |meta| {\n      let s = meta.query_selector(selector);\n      let mut constraints = vec![];\n      for i in 0..columns.len() / Self::num_cols_per_op() {\n        let offset = i * Self::num_cols_per_op();\n        let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur());\n        let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let outp = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        let res = inp1 - inp2;\n        constraints.append(&mut vec![s.clone() * (res - outp)])\n      }\n\n      constraints\n    });\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::SubPairs, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for SubPairsChip<F> {\n  fn name(&self) -> String {\n    \"sub pairs chip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let inp1 = &vec_inputs[0];\n    let inp2 = &vec_inputs[1];\n    assert_eq!(inp1.len(), inp2.len());\n\n    let columns = &self.config.columns;\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::SubPairs).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    let mut outps = vec![];\n    for i in 0..inp1.len() {\n      let offset = i * self.num_cols_per_op();\n      let inp1 = inp1[i].copy_advice(|| \"\", region, columns[offset + 0], row_offset)?;\n      let inp2 = inp2[i].copy_advice(|| \"\", region, columns[offset + 1], row_offset)?;\n      let outp = inp1.value().map(|x: &F| x.to_owned()) - inp2.value().map(|x: &F| x.to_owned());\n\n      let outp = region.assign_advice(|| \"\", columns[offset + 2], row_offset, || outp)?;\n      outps.push(outp);\n    }\n    Ok(outps)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let zero = &single_inputs[0];\n\n    let mut inp1 = vec_inputs[0].clone();\n    let mut inp2 = vec_inputs[1].clone();\n    let initial_len = inp1.len();\n    while inp1.len() % self.num_inputs_per_row() != 0 {\n      inp1.push(zero);\n      inp2.push(zero);\n    }\n\n    let vec_inputs = vec![inp1, inp2];\n\n    let res = self.op_aligned_rows(\n      layouter.namespace(|| format!(\"forward row {}\", self.name())),\n      &vec_inputs,\n      single_inputs,\n    )?;\n    Ok(res[0..initial_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/update.rs",
    "content": "use std::marker::PhantomData;\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\n\nuse crate::gadgets::gadget::{convert_to_u64, GadgetConfig};\n\nuse super::gadget::{Gadget, GadgetType};\n\ntype UpdateConfig = GadgetConfig;\n\n#[derive(Clone, Debug)]\npub struct UpdateGadgetChip<F: PrimeField> {\n  config: UpdateConfig,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> UpdateGadgetChip<F> {\n  pub fn construct(config: UpdateConfig) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    4\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> UpdateConfig {\n    let tables = &gadget_config.tables;\n    let mod_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];\n\n    let columns = gadget_config.columns;\n    let selector = meta.complex_selector();\n\n    let div_val = gadget_config.scale_factor;\n    let eta: u64 = (gadget_config.scale_factor as f64 * gadget_config.eta) as u64;\n\n    meta.create_gate(\"updater_arith\", |meta| {\n      let s = meta.query_selector(selector);\n\n      let sf = Expression::Constant(F::from(div_val as u64));\n      let eta = Expression::Constant(F::from(eta as u64));\n\n      let mut constraints = vec![];\n      for op_idx in 0..columns.len() / Self::num_cols_per_op() {\n        let offset = op_idx * Self::num_cols_per_op();\n        let w = meta.query_advice(columns[offset], Rotation::cur());\n        let dw = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let div = meta.query_advice(columns[offset + 2], Rotation::cur());\n        let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());\n\n        let expr = (w * sf.clone() - dw * eta.clone()) - (div * sf.clone() + mod_res);\n        constraints.push(s.clone() * expr);\n      }\n      constraints\n    });\n\n    for op_idx in 0..columns.len() / Self::num_cols_per_op() {\n      let offset = op_idx * Self::num_cols_per_op();\n\n      // Check that mod is smaller than SF\n      meta.lookup(\"max inp1\", |meta| {\n        let s = meta.query_selector(selector);\n        let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());\n\n        // Constrains that the modulus \\in [0, DIV_VAL)\n        vec![(s.clone() * mod_res.clone(), mod_lookup)]\n      });\n    }\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::Update, vec![selector]);\n\n    UpdateConfig {\n      columns,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField + Ord> Gadget<F> for UpdateGadgetChip<F> {\n  fn name(&self) -> String {\n    \"updater chip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.config.columns.len() / self.num_cols_per_op()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    _single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let div_val = self.config.scale_factor as i64;\n    let div_val_f = F::from(div_val as u64);\n    let eta = div_val / 1000;\n    let eta = F::from(eta as u64);\n\n    let div_outp_min_val = self.config.div_outp_min_val;\n    let div_inp_min_val_pos_i64 = -self.config.shift_min_val;\n    let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);\n\n    let columns = &self.config.columns;\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::Update).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    let w = &vec_inputs[0];\n    let dw = &vec_inputs[1];\n\n    let mut output_cells = vec![];\n\n    for i in 0..w.len() {\n      let offset = i * self.num_cols_per_op();\n      let _w_cell = w[i].copy_advice(|| \"\", region, columns[offset + 0], row_offset)?;\n      let _dw_cell = dw[i].copy_advice(|| \"\", region, columns[offset + 1], row_offset)?;\n\n      let w_val = w[i].value().map(|x: &F| x.to_owned());\n      let dw_val = dw[i].value().map(|x: &F| x.to_owned());\n      let out_scaled = w_val.zip(dw_val).map(|(w, dw)| w * div_val_f - dw * eta);\n\n      let div_mod = out_scaled.map(|x| {\n        let x_pos = x + div_inp_min_val_pos;\n        let x_pos = if x_pos > F::ZERO {\n          x_pos\n        } else {\n          x_pos + div_val_f\n        };\n        let inp = convert_to_u64(&x_pos);\n\n        let div_res = inp as i64 / div_val - (div_inp_min_val_pos_i64 as i64 / div_val);\n        let mod_res = inp as i64 % div_val;\n        (div_res, mod_res)\n      });\n\n      let div_res_cell = region\n        .assign_advice(\n          || \"div_res\",\n          self.config.columns[offset + 2],\n          row_offset,\n          || {\n            div_mod.map(|(x, _): (i64, i64)| {\n              F::from((x - div_outp_min_val as i64) as u64) - F::from(-div_outp_min_val as u64)\n            })\n          },\n        )\n        .unwrap();\n\n      let _mod_res_cell = region\n        .assign_advice(\n          || \"mod_res\",\n          self.config.columns[offset + 3],\n          row_offset,\n          || div_mod.map(|(_, x): (i64, i64)| F::from(x as u64)),\n        )\n        .unwrap();\n\n      output_cells.push(div_res_cell);\n    }\n    Ok(output_cells)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let zero = &single_inputs[0];\n    let mut w = vec_inputs[0].clone();\n    let mut dw = vec_inputs[1].clone();\n\n    let initial_len = w.len();\n    while !w.len() % self.num_cols_per_op() == 0 {\n      w.push(zero);\n    }\n    while !dw.len() % self.num_cols_per_op() == 0 {\n      dw.push(zero);\n    }\n\n    let res = self.op_aligned_rows(\n      layouter.namespace(|| format!(\"forward row {}\", self.name())),\n      &vec![w, dw],\n      single_inputs,\n    )?;\n\n    Ok(res[0..initial_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/var_div.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\nuse rounded_div::RoundedDiv;\n\nuse super::gadget::{convert_to_u128, Gadget, GadgetConfig, GadgetType};\n\ntype VarDivRoundConfig = GadgetConfig;\n\npub struct VarDivRoundChip<F: PrimeField> {\n  config: Rc<VarDivRoundConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> VarDivRoundChip<F> {\n  pub fn construct(config: Rc<VarDivRoundConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    3\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let columns = gadget_config.columns;\n    let selector = meta.complex_selector();\n    let two = Expression::Constant(F::from(2));\n\n    let tables = gadget_config.tables;\n    let lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];\n\n    // a | c | r | ... | b\n    // (2 * a + b) = (2 * b) * c + r\n    // b - r \\in [0, 2^N) <-- forces b > r\n    meta.create_gate(\"var_div_arithm\", |meta| {\n      let s = meta.query_selector(selector);\n      let mut constraints = vec![];\n\n      let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());\n      for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {\n        let offset = i * Self::num_cols_per_op();\n        let a = meta.query_advice(columns[offset], Rotation::cur());\n        let c = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let r = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        let lhs = a.clone() * two.clone() + b.clone();\n        let rhs = b.clone() * two.clone() * c + r;\n        constraints.push(s.clone() * (lhs - rhs));\n      }\n\n      constraints\n    });\n\n    for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {\n      let offset = i * Self::num_cols_per_op();\n      // r \\in [0, 2^N)\n      meta.lookup(\"var div range checks r\", |meta| {\n        let s = meta.query_selector(selector);\n        let r = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        vec![(s.clone() * r, lookup)]\n      });\n\n      // 2 * b - r \\in [0, 2^N)\n      meta.lookup(\"var div range checks 2b-r\", |meta| {\n        let s = meta.query_selector(selector);\n        let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());\n        let r = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        vec![(s.clone() * (two.clone() * b - r), lookup)]\n      });\n    }\n    // b \\in [0, 2^N)\n    meta.lookup(\"var div range checks b\", |meta| {\n      let s = meta.query_selector(selector);\n      let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());\n\n      vec![(s.clone() * b, lookup)]\n    });\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::VarDivRound, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      tables,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for VarDivRoundChip<F> {\n  fn name(&self) -> String {\n    \"VarDivRoundChip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    (self.config.columns.len() - 1) / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.num_inputs_per_row()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let a_vec = &vec_inputs[0];\n    // let zero = single_inputs[0].clone();\n    let b = &single_inputs[1];\n\n    let div_outp_min_val_i64 = self.config.div_outp_min_val;\n    let div_inp_min_val_pos_i64 = -self.config.shift_min_val;\n\n    if self.config.use_selectors {\n      let selector = self.config.selectors.get(&GadgetType::VarDivRound).unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    b.copy_advice(\n      || \"\",\n      region,\n      self.config.columns[self.config.columns.len() - 1],\n      row_offset,\n    )?;\n\n    let mut div_out = vec![];\n    for (i, a) in a_vec.iter().enumerate() {\n      let offset = i * self.num_cols_per_op();\n      a.copy_advice(|| \"\", region, self.config.columns[offset], row_offset)?;\n\n      let div_mod = a.value().zip(b.value()).map(|(a, b)| {\n        let b = convert_to_u128(b);\n        // Needs to be divisible by b\n        let div_inp_min_val_pos_i64 = div_inp_min_val_pos_i64 / (b as i64) * (b as i64);\n        let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);\n\n        let a_pos = *a + div_inp_min_val_pos;\n        let a = convert_to_u128(&a_pos);\n        // c = (2 * a + b) / (2 * b)\n        let c_pos = a.rounded_div(b);\n        let c = (c_pos as i128 - (div_inp_min_val_pos_i64 as u128 / b) as i128) as i64;\n\n        // r = (2 * a + b) % (2 * b)\n        let rem_floor = (a as i128) - (c_pos * b) as i128;\n        let r = 2 * rem_floor + (b as i128);\n        let r = r as i64;\n        (c, r)\n      });\n\n      let div_cell = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 1],\n        row_offset,\n        || {\n          div_mod.map(|(c, _)| {\n            let offset = F::from(-div_outp_min_val_i64 as u64);\n            let c = F::from((c - div_outp_min_val_i64) as u64);\n            c - offset\n          })\n        },\n      )?;\n      let _mod_cell = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 2],\n        row_offset,\n        || div_mod.map(|(_, r)| F::from(r as u64)),\n      )?;\n      div_out.push(div_cell);\n    }\n\n    Ok(div_out)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let mut inps = vec_inputs[0].clone();\n    let initial_len = inps.len();\n\n    // Needed to pad: bias - bias = 0\n    let default = &single_inputs[0];\n    while inps.len() % self.num_inputs_per_row() != 0 {\n      inps.push(&default);\n    }\n\n    let res = self.op_aligned_rows(layouter.namespace(|| \"var_div\"), &vec![inps], single_inputs)?;\n    Ok(res[..initial_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/var_div_big.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\nuse rounded_div::RoundedDiv;\n\nuse super::gadget::{convert_to_u128, Gadget, GadgetConfig, GadgetType};\n\npub struct VarDivRoundBigChip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> VarDivRoundBigChip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    7\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let columns = gadget_config.columns;\n    let selector = meta.complex_selector();\n    let two = Expression::Constant(F::from(2));\n    let range = Expression::Constant(F::from(gadget_config.num_rows as u64));\n\n    let tables = gadget_config.tables;\n    let lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];\n\n    // a | c | r | (2 b - r)_1 | (2 b - r)_0 | r_1 | r_0 | ... | b\n    // a / b = c\n    meta.create_gate(\"var_div_arithm\", |meta| {\n      let s = meta.query_selector(selector);\n      let mut constraints = vec![];\n\n      let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());\n      for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {\n        let offset = i * Self::num_cols_per_op();\n        // Constrain that (2 * a + b) = (2 * b) * c + r\n        let a = meta.query_advice(columns[offset], Rotation::cur());\n        let c = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let r = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        let lhs = a.clone() * two.clone() + b.clone();\n        let rhs = b.clone() * two.clone() * c + r.clone();\n        constraints.push(s.clone() * (lhs - rhs));\n\n        // Constrain that (2 * b - r) = br1 * max_val + br0\n        let br1 = meta.query_advice(columns[offset + 3], Rotation::cur());\n        let br0 = meta.query_advice(columns[offset + 4], Rotation::cur());\n        let lhs = b.clone() * two.clone() - r.clone();\n        let rhs = br1 * range.clone() + br0;\n        constraints.push(s.clone() * (lhs - rhs));\n\n        // Constrains that r = r1 * max_val + r0\n        let r1 = meta.query_advice(columns[offset + 5], Rotation::cur());\n        let r0 = meta.query_advice(columns[offset + 6], Rotation::cur());\n        let lhs = r.clone();\n        let rhs = r1 * range.clone() + r0;\n        constraints.push(s.clone() * (lhs - rhs));\n      }\n\n      constraints\n    });\n\n    // For var div big, we assume that a, b > 0 and are outputs of the previous layer\n    // r must be constrained to be in [0, b)\n    for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {\n      let offset = i * Self::num_cols_per_op();\n\n      // (2 * b - r)_{1, 0} \\in [0, 2^N)\n      meta.lookup(\"var div big br1\", |meta| {\n        let s = meta.query_selector(selector);\n        let br1 = meta.query_advice(columns[offset + 3], Rotation::cur());\n        vec![(s * br1, lookup)]\n      });\n      meta.lookup(\"var div big br0\", |meta| {\n        let s = meta.query_selector(selector);\n        let br0 = meta.query_advice(columns[offset + 4], Rotation::cur());\n        vec![(s * br0, lookup)]\n      });\n      // r_{1, 0} \\in [0, 2^N)\n      meta.lookup(\"var div big r1\", |meta| {\n        let s = meta.query_selector(selector);\n        let r1 = meta.query_advice(columns[offset + 5], Rotation::cur());\n        vec![(s * r1, lookup)]\n      });\n      meta.lookup(\"var div big r0\", |meta| {\n        let s = meta.query_selector(selector);\n        let r0 = meta.query_advice(columns[offset + 6], Rotation::cur());\n        vec![(s * r0, lookup)]\n      });\n    }\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::VarDivRoundBig, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      tables,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for VarDivRoundBigChip<F> {\n  fn name(&self) -> String {\n    \"VarDivBigRoundChip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    (self.config.columns.len() - 1) / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.num_inputs_per_row()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let a_vec = &vec_inputs[0];\n    // let zero = single_inputs[0].clone();\n    let b = &single_inputs[1];\n\n    let div_outp_min_val_i64 = self.config.div_outp_min_val;\n    let div_inp_min_val_pos_i64 = -self.config.shift_min_val;\n    let num_rows = self.config.num_rows as i64;\n\n    if self.config.use_selectors {\n      let selector = self\n        .config\n        .selectors\n        .get(&GadgetType::VarDivRoundBig)\n        .unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    b.copy_advice(\n      || \"\",\n      region,\n      self.config.columns[self.config.columns.len() - 1],\n      row_offset,\n    )?;\n\n    let mut div_out = vec![];\n    for (i, a) in a_vec.iter().enumerate() {\n      let offset = i * self.num_cols_per_op();\n      a.copy_advice(|| \"\", region, self.config.columns[offset], row_offset)\n        .unwrap();\n\n      let div_mod = a.value().zip(b.value()).map(|(a, b)| {\n        let b = convert_to_u128(b);\n        // Needs to be divisible by b\n        let div_inp_min_val_pos_i64 = div_inp_min_val_pos_i64 / (b as i64) * (b as i64);\n        let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64);\n\n        let a_pos = *a + div_inp_min_val_pos;\n        let a = convert_to_u128(&a_pos);\n        // c = (2 * a + b) / (2 * b)\n        let c_pos = a.rounded_div(b);\n        let c = (c_pos as i128 - (div_inp_min_val_pos_i64 as u128 / b) as i128) as i64;\n\n        // r = (2 * a + b) % (2 * b)\n        let rem_floor = (a as i128) - (c_pos * b) as i128;\n        let r = 2 * rem_floor + (b as i128);\n        let r = r as i64;\n        (c, r)\n      });\n\n      let br_split = div_mod.zip(b.value()).map(|((_, r), b)| {\n        let b = convert_to_u128(b) as i64;\n        let val = 2 * b - r;\n        let p1 = val / num_rows;\n        let p0 = val % num_rows;\n        // val = p1 * max_val + p0\n        (p1, p0)\n      });\n\n      let r_split = div_mod.map(|(_, r)| {\n        let p1 = r / num_rows;\n        let p0 = r % num_rows;\n        // val = p1 * max_val + p0\n        (p1, p0)\n      });\n\n      let div_cell = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 1],\n        row_offset,\n        || {\n          div_mod.map(|(c, _)| {\n            let offset = F::from(-div_outp_min_val_i64 as u64);\n            let c = F::from((c - div_outp_min_val_i64) as u64);\n            c - offset\n          })\n        },\n      )?;\n      let _mod_cell = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 2],\n        row_offset,\n        || div_mod.map(|(_, r)| F::from(r as u64)),\n      )?;\n      // Assign 2 * b - r to the next 2 columns\n      let _br_split_cell_1 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 3],\n        row_offset,\n        || br_split.map(|(p1, _)| F::from(p1 as u64)),\n      )?;\n      let _br_split_cell_2 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 4],\n        row_offset,\n        || br_split.map(|(_, p0)| F::from(p0 as u64)),\n      )?;\n      // Assign r to the next 2 columns\n      let _r_split_cell_1 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 5],\n        row_offset,\n        || r_split.map(|(p1, _)| F::from(p1 as u64)),\n      )?;\n      let _r_split_cell_2 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 6],\n        row_offset,\n        || r_split.map(|(_, p0)| F::from(p0 as u64)),\n      )?;\n\n      div_out.push(div_cell);\n    }\n\n    Ok(div_out)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let mut inps = vec_inputs[0].clone();\n    let initial_len = inps.len();\n\n    // Needed to pad\n    let default = &single_inputs[0];\n    while inps.len() % self.num_inputs_per_row() != 0 {\n      inps.push(&default);\n    }\n\n    let res = self.op_aligned_rows(\n      layouter.namespace(|| \"var_div_big\"),\n      &vec![inps],\n      single_inputs,\n    )?;\n    Ok(res[..initial_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets/var_div_big3.rs",
    "content": "use std::{marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region},\n  halo2curves::ff::PrimeField,\n  plonk::{ConstraintSystem, Error, Expression},\n  poly::Rotation,\n};\nuse rounded_div::RoundedDiv;\n\nuse super::gadget::{convert_to_u128, Gadget, GadgetConfig, GadgetType};\n\npub struct VarDivRoundBig3Chip<F: PrimeField> {\n  config: Rc<GadgetConfig>,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> VarDivRoundBig3Chip<F> {\n  pub fn construct(config: Rc<GadgetConfig>) -> Self {\n    Self {\n      config,\n      _marker: PhantomData,\n    }\n  }\n\n  pub fn num_cols_per_op() -> usize {\n    9\n  }\n\n  pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig {\n    let columns = gadget_config.columns;\n    let selector = meta.complex_selector();\n    let two = Expression::Constant(F::from(2));\n    let range = Expression::Constant(F::from(gadget_config.num_rows as u64));\n    let range_sq = range.clone() * range.clone();\n\n    let tables = gadget_config.tables;\n    let lookup = tables.get(&GadgetType::InputLookup).unwrap()[0];\n\n    // a | c | r | (2b - r)_2 | (2 b - r)_1 | (2 b - r)_0 | r_2 | r_1 | r_0 | ... | b\n    // a / b = c\n    meta.create_gate(\"var_div_big3_arithm\", |meta| {\n      let s = meta.query_selector(selector);\n      let mut constraints = vec![];\n\n      let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur());\n      for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {\n        let offset = i * Self::num_cols_per_op();\n        // Constrain that (2 * a + b) = (2 * b) * c + r\n        let a = meta.query_advice(columns[offset], Rotation::cur());\n        let c = meta.query_advice(columns[offset + 1], Rotation::cur());\n        let r = meta.query_advice(columns[offset + 2], Rotation::cur());\n\n        let lhs = a.clone() * two.clone() + b.clone();\n        let rhs = b.clone() * two.clone() * c + r.clone();\n        constraints.push(s.clone() * (lhs - rhs));\n\n        // Constrain that (2 * b - r) = br1 * max_val + br0\n        let br2 = meta.query_advice(columns[offset + 3], Rotation::cur());\n        let br1 = meta.query_advice(columns[offset + 4], Rotation::cur());\n        let br0 = meta.query_advice(columns[offset + 5], Rotation::cur());\n        let lhs = b.clone() * two.clone() - r.clone();\n        let rhs = br2 * range_sq.clone() + br1 * range.clone() + br0;\n        constraints.push(s.clone() * (lhs - rhs));\n\n        // Constrains that r = r1 * max_val + r0\n        let r2 = meta.query_advice(columns[offset + 6], Rotation::cur());\n        let r1 = meta.query_advice(columns[offset + 7], Rotation::cur());\n        let r0 = meta.query_advice(columns[offset + 8], Rotation::cur());\n        let lhs = r.clone();\n        let rhs = r2 * range_sq.clone() + r1 * range.clone() + r0;\n        constraints.push(s.clone() * (lhs - rhs));\n      }\n\n      constraints\n    });\n\n    // For var div big, we assume that a, b > 0 and are outputs of the previous layer\n    // r must be constrained to be in [0, b)\n    for i in 0..(columns.len() - 1) / Self::num_cols_per_op() {\n      let offset = i * Self::num_cols_per_op();\n\n      // (2 * b - r)_{1, 0} \\in [0, 2^N)\n      meta.lookup(\"var div big br2\", |meta| {\n        let s = meta.query_selector(selector);\n        let br2 = meta.query_advice(columns[offset + 3], Rotation::cur());\n        vec![(s * br2, lookup)]\n      });\n      meta.lookup(\"var div big br1\", |meta| {\n        let s = meta.query_selector(selector);\n        let br1 = meta.query_advice(columns[offset + 4], Rotation::cur());\n        vec![(s * br1, lookup)]\n      });\n      meta.lookup(\"var div big br0\", |meta| {\n        let s = meta.query_selector(selector);\n        let br0 = meta.query_advice(columns[offset + 5], Rotation::cur());\n        vec![(s * br0, lookup)]\n      });\n      // r_{1, 0} \\in [0, 2^N)\n      meta.lookup(\"var div big r2\", |meta| {\n        let s = meta.query_selector(selector);\n        let r2 = meta.query_advice(columns[offset + 6], Rotation::cur());\n        vec![(s * r2, lookup)]\n      });\n      meta.lookup(\"var div big r1\", |meta| {\n        let s = meta.query_selector(selector);\n        let r1 = meta.query_advice(columns[offset + 7], Rotation::cur());\n        vec![(s * r1, lookup)]\n      });\n      meta.lookup(\"var div big r0\", |meta| {\n        let s = meta.query_selector(selector);\n        let r0 = meta.query_advice(columns[offset + 8], Rotation::cur());\n        vec![(s * r0, lookup)]\n      });\n    }\n\n    let mut selectors = gadget_config.selectors;\n    selectors.insert(GadgetType::VarDivRoundBig3, vec![selector]);\n\n    GadgetConfig {\n      columns,\n      tables,\n      selectors,\n      ..gadget_config\n    }\n  }\n}\n\nimpl<F: PrimeField> Gadget<F> for VarDivRoundBig3Chip<F> {\n  fn name(&self) -> String {\n    \"VarDivBig3RoundChip\".to_string()\n  }\n\n  fn num_cols_per_op(&self) -> usize {\n    Self::num_cols_per_op()\n  }\n\n  fn num_inputs_per_row(&self) -> usize {\n    (self.config.columns.len() - 1) / self.num_cols_per_op()\n  }\n\n  fn num_outputs_per_row(&self) -> usize {\n    self.num_inputs_per_row()\n  }\n\n  fn op_row_region(\n    &self,\n    region: &mut Region<F>,\n    row_offset: usize,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let a_vec = &vec_inputs[0];\n    // let zero = single_inputs[0].clone();\n    let b = &single_inputs[1];\n\n    let c_shift_base = (-(1_i64 << 62)) as i128;\n    let num_rows = self.config.num_rows as i128;\n\n    if self.config.use_selectors {\n      let selector = self\n        .config\n        .selectors\n        .get(&GadgetType::VarDivRoundBig3)\n        .unwrap()[0];\n      selector.enable(region, row_offset)?;\n    }\n\n    b.copy_advice(\n      || \"\",\n      region,\n      self.config.columns[self.config.columns.len() - 1],\n      row_offset,\n    )?;\n\n    let mut div_out = vec![];\n    for (i, a) in a_vec.iter().enumerate() {\n      let offset = i * self.num_cols_per_op();\n      a.copy_advice(|| \"\", region, self.config.columns[offset], row_offset)\n        .unwrap();\n\n      let div_mod = a.value().zip(b.value()).map(|(a, b)| {\n        let b = convert_to_u128(b);\n        let c_shift = (-c_shift_base) as u128 / b * b;\n        let div_inp_min_val_pos = F::from(c_shift as u64);\n\n        let a_pos = *a + div_inp_min_val_pos;\n        let a = convert_to_u128(&a_pos);\n        // c = (2 * a + b) / (2 * b)\n        let c_pos = a.rounded_div(b);\n        let c = c_pos as i128 - (c_shift / b) as i128;\n\n        // r = (2 * a + b) % (2 * b)\n        let rem_floor = (a as i128) - (c_pos * b) as i128;\n        let r = 2 * rem_floor + (b as i128);\n        (c, r)\n      });\n\n      let br_split = div_mod.zip(b.value()).map(|((_, r), b)| {\n        let b = convert_to_u128(b) as i128;\n        let val = 2 * b - r;\n        let p2 = val / (num_rows * num_rows);\n        let p1 = (val % (num_rows * num_rows)) / num_rows;\n        let p0 = val % num_rows;\n        // val = p2 * max_val^2 + p1 * max_val + p0\n        (p2, p1, p0)\n      });\n\n      let r_split = div_mod.map(|(_, r)| {\n        let p2 = r / (num_rows * num_rows);\n        let p1 = (r % (num_rows * num_rows)) / num_rows;\n        let p0 = r % num_rows;\n        // val = p1 * max_val + p0\n        (p2, p1, p0)\n      });\n\n      let div_cell = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 1],\n        row_offset,\n        || {\n          div_mod.map(|(c, _)| {\n            let offset = F::from(-c_shift_base as u64);\n            let c = F::from((c - c_shift_base) as u64);\n            c - offset\n          })\n        },\n      )?;\n      let _mod_cell = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 2],\n        row_offset,\n        || div_mod.map(|(_, r)| F::from(r as u64)),\n      )?;\n      // Assign 2 * b - r to the next 3 columns\n      let _br_split_cell_2 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 3],\n        row_offset,\n        || br_split.map(|(p2, _, _)| F::from(p2 as u64)),\n      )?;\n      let _br_split_cell_1 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 4],\n        row_offset,\n        || br_split.map(|(_, p1, _)| F::from(p1 as u64)),\n      )?;\n      let _br_split_cell_0 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 5],\n        row_offset,\n        || br_split.map(|(_, _, p0)| F::from(p0 as u64)),\n      )?;\n      // Assign r to the next 3 columns\n      let _r_split_cell_2 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 6],\n        row_offset,\n        || r_split.map(|(p2, _, _)| F::from(p2 as u64)),\n      )?;\n      let _r_split_cell_1 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 7],\n        row_offset,\n        || r_split.map(|(_, p1, _)| F::from(p1 as u64)),\n      )?;\n      let _r_split_cell_0 = region.assign_advice(\n        || \"\",\n        self.config.columns[offset + 8],\n        row_offset,\n        || r_split.map(|(_, _, p0)| F::from(p0 as u64)),\n      )?;\n\n      div_out.push(div_cell);\n    }\n\n    Ok(div_out)\n  }\n\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    single_inputs: &Vec<&AssignedCell<F, F>>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let mut inps = vec_inputs[0].clone();\n    let initial_len = inps.len();\n\n    // Needed to pad\n    let default = &single_inputs[0];\n    while inps.len() % self.num_inputs_per_row() != 0 {\n      inps.push(&default);\n    }\n\n    let res = self.op_aligned_rows(\n      layouter.namespace(|| \"var_div_big3\"),\n      &vec![inps],\n      single_inputs,\n    )?;\n    Ok(res[..initial_len].to_vec())\n  }\n}\n"
  },
  {
    "path": "src/gadgets.rs",
    "content": "pub mod add_pairs;\npub mod adder;\npub mod bias_div_floor_relu6;\npub mod bias_div_round_relu6;\npub mod dot_prod;\npub mod gadget;\npub mod input_lookup;\npub mod max;\npub mod mul_pairs;\npub mod sqrt_big;\npub mod square;\npub mod squared_diff;\npub mod sub_pairs;\npub mod update;\npub mod var_div;\npub mod var_div_big;\npub mod var_div_big3;\n\n// Generics\npub mod nonlinear;\n"
  },
  {
    "path": "src/layers/arithmetic/add.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::{\n    add_pairs::AddPairsChip,\n    gadget::{Gadget, GadgetConfig, GadgetType},\n    nonlinear::relu::ReluChip,\n  },\n  layers::layer::{ActivationType, AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::{\n  super::layer::{Layer, LayerConfig},\n  Arithmetic,\n};\n\n#[derive(Clone, Debug)]\npub struct AddChip {}\n\nimpl AddChip {\n  fn get_activation(&self, layer_params: &Vec<i64>) -> ActivationType {\n    let activation = layer_params[0];\n    match activation {\n      0 => ActivationType::None,\n      1 => ActivationType::Relu,\n      _ => panic!(\"Unsupported activation type for add\"),\n    }\n  }\n}\n\nimpl<F: PrimeField> Arithmetic<F> for AddChip {\n  fn gadget_forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    constants: &Vec<&AssignedCell<F, F>>,\n    gadget_config: Rc<GadgetConfig>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let add_pairs_chip = AddPairsChip::<F>::construct(gadget_config);\n    let out = add_pairs_chip.forward(layouter.namespace(|| \"add chip\"), &vec_inputs, constants)?;\n    Ok(out)\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for AddChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let activation = self.get_activation(&layer_config.layer_params);\n\n    // Do the addition\n    let (out, out_shape) = self.arithmetic_forward(\n      layouter.namespace(|| \"\"),\n      tensors,\n      constants,\n      gadget_config.clone(),\n    )?;\n\n    // Do the fused activation\n    let out = if activation == ActivationType::Relu {\n      let zero = constants.get(&0).unwrap();\n      let single_inps = vec![zero.as_ref()];\n\n      let out = out.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n\n      let relu_chip = ReluChip::<F>::construct(gadget_config);\n      let out = relu_chip.forward(layouter.namespace(|| \"relu\"), &vec![out], &single_inps)?;\n      let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n      out\n    } else if activation == ActivationType::None {\n      out\n    } else {\n      panic!(\"Unsupported activation type for add\");\n    };\n\n    let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for AddChip {\n  fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    let activation = self.get_activation(&layer_params);\n    let mut outp = vec![GadgetType::AddPairs];\n\n    match activation {\n      ActivationType::Relu => outp.push(GadgetType::Relu),\n      ActivationType::None => (),\n      _ => panic!(\"Unsupported activation type for add\"),\n    }\n    outp\n  }\n}\n"
  },
  {
    "path": "src/layers/arithmetic/div_var.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::{\n    gadget::{Gadget, GadgetConfig, GadgetType},\n    mul_pairs::MulPairsChip,\n    var_div::VarDivRoundChip,\n  },\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer},\n};\n\nuse super::Arithmetic;\n\npub struct DivVarChip {}\n\n// TODO: hack. Used for multiplying by the scale factor\nimpl<F: PrimeField> Arithmetic<F> for DivVarChip {\n  fn gadget_forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    constants: &Vec<&AssignedCell<F, F>>,\n    gadget_config: Rc<GadgetConfig>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let mul_pairs_chip = MulPairsChip::<F>::construct(gadget_config.clone());\n\n    let out = mul_pairs_chip.forward(\n      layouter.namespace(|| \"mul pairs chip\"),\n      &vec_inputs,\n      constants,\n    )?;\n    Ok(out)\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for DivVarChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &crate::layers::layer::LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    assert_eq!(tensors.len(), 2);\n    // TODO: We only support dividing by a single number for now\n    assert_eq!(tensors[1].shape().len(), 1);\n    assert_eq!(tensors[1].shape()[0], 1);\n\n    let sf = constants\n      .get(&(gadget_config.scale_factor as i64))\n      .unwrap()\n      .as_ref();\n\n    let sf_tensor = Array::from_shape_vec(IxDyn(&[1]), vec![Rc::new(sf.clone())]).unwrap();\n\n    // out = inp * SF\n    let (out, out_shape) = self.arithmetic_forward(\n      layouter.namespace(|| \"\"),\n      &vec![tensors[0].clone(), sf_tensor],\n      constants,\n      gadget_config.clone(),\n    )?;\n\n    let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());\n    let div = tensors[1].iter().next().unwrap().as_ref();\n    let zero = constants.get(&0).unwrap().as_ref();\n    let single_inputs = vec![zero, div];\n    let out = out.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let out = var_div_chip.forward(layouter.namespace(|| \"mul div\"), &vec![out], &single_inputs)?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap();\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for DivVarChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![\n      GadgetType::MulPairs,\n      GadgetType::VarDivRound,\n      GadgetType::InputLookup,\n    ]\n  }\n}\n"
  },
  {
    "path": "src/layers/arithmetic/mul.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::{\n    gadget::{Gadget, GadgetConfig, GadgetType},\n    mul_pairs::MulPairsChip,\n    var_div::VarDivRoundChip,\n  },\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::{\n  super::layer::{Layer, LayerConfig},\n  Arithmetic,\n};\n\n#[derive(Clone, Debug)]\npub struct MulChip {}\n\nimpl<F: PrimeField> Arithmetic<F> for MulChip {\n  fn gadget_forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    constants: &Vec<&AssignedCell<F, F>>,\n    gadget_config: Rc<GadgetConfig>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let mul_pairs_chip = MulPairsChip::<F>::construct(gadget_config.clone());\n\n    let out = mul_pairs_chip.forward(\n      layouter.namespace(|| \"mul pairs chip\"),\n      &vec_inputs,\n      constants,\n    )?;\n    Ok(out)\n  }\n}\n\n// FIXME: move this + add to an arithmetic layer\nimpl<F: PrimeField> Layer<F> for MulChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let (out, out_shape) = self.arithmetic_forward(\n      layouter.namespace(|| \"\"),\n      tensors,\n      constants,\n      gadget_config.clone(),\n    )?;\n\n    let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());\n    let div = constants\n      .get(&(gadget_config.scale_factor as i64))\n      .unwrap()\n      .as_ref();\n    let zero = constants.get(&0).unwrap().as_ref();\n    let single_inputs = vec![zero, div];\n    let out = out.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let out = var_div_chip.forward(layouter.namespace(|| \"mul div\"), &vec![out], &single_inputs)?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap();\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for MulChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![\n      GadgetType::MulPairs,\n      GadgetType::VarDivRound,\n      GadgetType::InputLookup,\n    ]\n  }\n}\n"
  },
  {
    "path": "src/layers/arithmetic/sub.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::{\n    gadget::{Gadget, GadgetConfig, GadgetType},\n    sub_pairs::SubPairsChip,\n  },\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::{\n  super::layer::{Layer, LayerConfig},\n  Arithmetic,\n};\n\n#[derive(Clone, Debug)]\npub struct SubChip {}\n\nimpl<F: PrimeField> Arithmetic<F> for SubChip {\n  fn gadget_forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    constants: &Vec<&AssignedCell<F, F>>,\n    gadget_config: Rc<GadgetConfig>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let sub_pairs_chip = SubPairsChip::<F>::construct(gadget_config);\n    let out = sub_pairs_chip.forward(layouter.namespace(|| \"sub chip\"), &vec_inputs, constants)?;\n    Ok(out)\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for SubChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let (out, out_shape) = self.arithmetic_forward(\n      layouter.namespace(|| \"\"),\n      tensors,\n      constants,\n      gadget_config.clone(),\n    )?;\n    let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for SubChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![GadgetType::SubPairs]\n  }\n}\n"
  },
  {
    "path": "src/layers/arithmetic.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\n\nuse crate::{gadgets::gadget::GadgetConfig, utils::helpers::broadcast};\n\nuse super::layer::{AssignedTensor, CellRc};\n\npub mod add;\npub mod div_var;\npub mod mul;\npub mod sub;\n\npub trait Arithmetic<F: PrimeField> {\n  fn gadget_forward(\n    &self,\n    layouter: impl Layouter<F>,\n    vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,\n    constants: &Vec<&AssignedCell<F, F>>,\n    gadget_config: Rc<GadgetConfig>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error>;\n\n  fn arithmetic_forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n  ) -> Result<(Vec<CellRc<F>>, Vec<usize>), Error> {\n    assert_eq!(tensors.len(), 2);\n    // println!(\"tensors: {:?} {:?}\", tensors[0].shape(), tensors[1].shape());\n    let (inp1, inp2) = broadcast(&tensors[0], &tensors[1]);\n    let out_shape = inp1.shape().clone();\n    assert_eq!(inp1.shape(), inp2.shape());\n\n    let zero = constants.get(&0).unwrap().as_ref();\n\n    let inp1_vec = inp1.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let inp2_vec = inp2.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let vec_inputs = vec![inp1_vec, inp2_vec];\n    let constants = vec![zero];\n    let out = self.gadget_forward(\n      layouter.namespace(|| \"\"),\n      &vec_inputs,\n      &constants,\n      gadget_config.clone(),\n    )?;\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    Ok((out, out_shape.to_vec()))\n  }\n}\n"
  },
  {
    "path": "src/layers/averager.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\n\nuse crate::gadgets::gadget::Gadget;\nuse crate::gadgets::{adder::AdderChip, gadget::GadgetConfig, var_div::VarDivRoundChip};\n\nuse super::layer::{AssignedTensor, CellRc, LayerConfig};\n\npub trait Averager<F: PrimeField> {\n  fn splat(&self, input: &AssignedTensor<F>, layer_config: &LayerConfig) -> Vec<Vec<CellRc<F>>>;\n\n  fn get_div_val(\n    &self,\n    layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<AssignedCell<F, F>, Error>;\n\n  fn avg_forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<CellRc<F>>, Error> {\n    // Due to Mean BS\n    // assert_eq!(tensors.len(), 1);\n    let zero = constants.get(&0).unwrap().as_ref();\n\n    let inp = &tensors[0];\n    let splat_inp = self.splat(inp, layer_config);\n\n    let adder_chip = AdderChip::<F>::construct(gadget_config.clone());\n    let single_inputs = vec![zero];\n    let mut added = vec![];\n    for i in 0..splat_inp.len() {\n      let tmp = splat_inp[i].iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n      let tmp = adder_chip.forward(\n        layouter.namespace(|| format!(\"average {}\", i)),\n        &vec![tmp],\n        &single_inputs,\n      )?;\n      added.push(tmp[0].clone());\n    }\n\n    let div = self.get_div_val(\n      layouter.namespace(|| \"average div\"),\n      tensors,\n      gadget_config.clone(),\n      layer_config,\n    )?;\n    let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());\n\n    let single_inputs = vec![zero, &div];\n    let added = added.iter().map(|x| x).collect::<Vec<_>>();\n    let dived = var_div_chip.forward(\n      layouter.namespace(|| \"average div\"),\n      &vec![added],\n      &single_inputs,\n    )?;\n    let dived = dived.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n\n    Ok(dived)\n  }\n}\n"
  },
  {
    "path": "src/layers/avg_pool_2d.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Value},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::gadget::{GadgetConfig, GadgetType},\n  layers::max_pool_2d::MaxPool2DChip,\n};\n\nuse super::{\n  averager::Averager,\n  layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig},\n};\n\npub struct AvgPool2DChip {}\n\nimpl<F: PrimeField> Averager<F> for AvgPool2DChip {\n  fn splat(&self, input: &AssignedTensor<F>, layer_config: &LayerConfig) -> Vec<Vec<CellRc<F>>> {\n    assert_eq!(input.shape().len(), 4);\n    // Don't support batch size > 1 yet\n    assert_eq!(input.shape()[0], 1);\n\n    // TODO: refactor this\n    MaxPool2DChip::splat(input, layer_config).unwrap()\n  }\n\n  fn get_div_val(\n    &self,\n    mut layouter: impl Layouter<F>,\n    _tensors: &Vec<AssignedTensor<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<AssignedCell<F, F>, Error> {\n    // FIXME: this needs to be revealed\n    let div = layer_config.layer_params[0] * layer_config.layer_params[1];\n    let div = F::from(div as u64);\n    let div = layouter\n      .assign_region(\n        || \"avg pool 2d div\",\n        |mut region| {\n          let div = region\n            .assign_advice(\n              || \"avg pool 2d div\",\n              gadget_config.columns[0],\n              0,\n              || Value::known(div),\n            )\n            .unwrap();\n          Ok(div)\n        },\n      )\n      .unwrap();\n\n    Ok(div)\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for AvgPool2DChip {\n  fn forward(\n    &self,\n    layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let dived = self\n      .avg_forward(layouter, tensors, constants, gadget_config, layer_config)\n      .unwrap();\n\n    let inp = &tensors[0];\n    // TODO: refactor this\n    let out_xy = MaxPool2DChip::shape(inp, layer_config);\n    let out_shape = vec![1, out_xy.0, out_xy.1, inp.shape()[3]];\n    println!(\"out_shape: {:?}\", out_shape);\n\n    let out = Array::from_shape_vec(IxDyn(&out_shape), dived).unwrap();\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for AvgPool2DChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![\n      GadgetType::Adder,\n      GadgetType::VarDivRound,\n      GadgetType::InputLookup,\n    ]\n  }\n}\n"
  },
  {
    "path": "src/layers/batch_mat_mul.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, Axis, IxDyn};\n\nuse crate::{\n  gadgets::gadget::{GadgetConfig, GadgetType},\n  layers::fully_connected::FullyConnectedConfig,\n};\n\nuse super::{\n  fully_connected::FullyConnectedChip,\n  layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig},\n};\n\npub struct BatchMatMulChip {}\n\nimpl<F: PrimeField> Layer<F> for BatchMatMulChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp1 = &tensors[0];\n    let inp2 = &tensors[1];\n    println!(\"inp1: {:?}\", inp1.shape());\n    println!(\"inp2: {:?}\", inp2.shape());\n\n    assert_eq!(inp1.ndim(), 3);\n    assert_eq!(inp2.ndim(), 3);\n    assert_eq!(inp1.shape()[0], inp2.shape()[0]);\n\n    let adj_y = layer_config.layer_params[1] == 1;\n    if adj_y {\n      assert_eq!(inp1.shape()[2], inp2.shape()[2]);\n    } else {\n      assert_eq!(inp1.shape()[2], inp2.shape()[1]);\n    }\n\n    let out_shape = if adj_y {\n      vec![inp1.shape()[0], inp1.shape()[1], inp2.shape()[1]]\n    } else {\n      vec![inp1.shape()[0], inp1.shape()[1], inp2.shape()[2]]\n    };\n\n    let fc_chip = FullyConnectedChip::<F> {\n      _marker: PhantomData,\n      config: FullyConnectedConfig::construct(true),\n    };\n\n    let mut outp: Vec<CellRc<F>> = vec![];\n    for i in 0..inp1.shape()[0] {\n      let inp1_slice = inp1.index_axis(Axis(0), i).to_owned();\n      // Due to tensorflow BS, transpose the \"weights\"\n      let inp2_slice = if adj_y {\n        inp2.index_axis(Axis(0), i).to_owned()\n      } else {\n        inp2.index_axis(Axis(0), i).t().to_owned()\n      };\n      println!(\"inp1_slice: {:?}\", inp1_slice.shape());\n      println!(\"inp2_slice: {:?}\", inp2_slice.shape());\n      // Batch MM doesn't have a fused activation, so insert it here\n      // TODO: consider putting this in the converter?\n      let tmp_config = LayerConfig {\n        layer_params: vec![0],\n        ..layer_config.clone()\n      };\n      let outp_slice = fc_chip.forward(\n        layouter.namespace(|| \"\"),\n        &vec![inp1_slice, inp2_slice],\n        constants,\n        gadget_config.clone(),\n        &tmp_config,\n      )?;\n      outp.extend(outp_slice[0].iter().map(|x| x.clone()).collect::<Vec<_>>());\n    }\n\n    let outp = Array::from_shape_vec(IxDyn(out_shape.as_slice()), outp).unwrap();\n    Ok(vec![outp])\n  }\n}\n\nimpl GadgetConsumer for BatchMatMulChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![\n      GadgetType::Adder,\n      GadgetType::DotProduct,\n      GadgetType::VarDivRound,\n      GadgetType::InputLookup,\n    ]\n  }\n}\n"
  },
  {
    "path": "src/layers/conv2d.rs",
    "content": "// TODO: Speed up Depthwise operations with Freivald's algorithm\n\nuse std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::{\n    bias_div_round_relu6::BiasDivRoundRelu6Chip,\n    dot_prod::DotProductChip,\n    gadget::{Gadget, GadgetConfig, GadgetType},\n    nonlinear::relu::ReluChip,\n  },\n  layers::{\n    fully_connected::{FullyConnectedChip, FullyConnectedConfig},\n    shape::pad::pad,\n  },\n};\n\nuse super::layer::{ActivationType, AssignedTensor, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Default, Clone, Copy, Eq, PartialEq)]\npub enum PaddingEnum {\n  #[default]\n  Same,\n  Valid,\n}\n\n#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]\npub enum ConvLayerEnum {\n  #[default]\n  Conv2D,\n  DepthwiseConv2D,\n}\n\npub struct Conv2DConfig {\n  pub conv_type: ConvLayerEnum,\n  pub padding: PaddingEnum,\n  pub activation: ActivationType,\n  pub stride: (usize, usize),\n}\n\npub struct Conv2DChip<F: PrimeField> {\n  pub config: LayerConfig,\n  pub _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField> Conv2DChip<F> {\n  // TODO: this is horrible. What's the best way to fix this?\n  pub fn param_vec_to_config(layer_params: Vec<i64>) -> Conv2DConfig {\n    let conv_type = match layer_params[0] {\n      0 => ConvLayerEnum::Conv2D,\n      1 => ConvLayerEnum::DepthwiseConv2D,\n      _ => panic!(\"Invalid conv type\"),\n    };\n    let padding = match layer_params[1] {\n      0 => PaddingEnum::Same,\n      1 => PaddingEnum::Valid,\n      _ => panic!(\"Invalid padding\"),\n    };\n    let activation = match layer_params[2] {\n      0 => ActivationType::None,\n      1 => ActivationType::Relu,\n      3 => ActivationType::Relu6,\n      _ => panic!(\"Invalid activation type\"),\n    };\n    let stride = (layer_params[3] as usize, layer_params[4] as usize);\n    Conv2DConfig {\n      conv_type,\n      padding,\n      activation,\n      stride,\n    }\n  }\n\n  pub fn get_padding(\n    h: usize,\n    w: usize,\n    si: usize,\n    sj: usize,\n    ci: usize,\n    cj: usize,\n  ) -> ((usize, usize), (usize, usize)) {\n    let ph = if h % si == 0 {\n      (ci as i64 - sj as i64).max(0)\n    } else {\n      (ci as i64 - (h % si) as i64).max(0)\n    } as usize;\n    let pw = if w % sj == 0 {\n      (cj as i64 - sj as i64).max(0)\n    } else {\n      (cj as i64 - (w % sj) as i64).max(0)\n    } as usize;\n    ((ph / 2, ph - ph / 2), (pw / 2, pw - pw / 2))\n  }\n\n  pub fn out_hw(\n    h: usize,\n    w: usize,\n    si: usize,\n    sj: usize,\n    ch: usize,\n    cw: usize,\n    padding: PaddingEnum,\n  ) -> (usize, usize) {\n    /*\n    println!(\n      \"H: {}, W: {}, SI: {}, SJ: {}, CH: {}, CW: {}\",\n      h, w, si, sj, ch, cw\n    );\n    */\n    // https://iq.opengenus.org/same-and-valid-padding/\n    match padding {\n      PaddingEnum::Same => ((h + si - 1) / si, (w + sj - 1) / sj),\n      // TODO: the above is probably correct, but we always have valid paddings\n      // PaddingEnum::Same => (h / si, w / sj),\n      PaddingEnum::Valid => ((h - ch) / si + 1, (w - cw) / sj + 1),\n    }\n  }\n\n  pub fn splat<G: Clone>(\n    &self,\n    tensors: &Vec<Array<Rc<G>, IxDyn>>,\n    zero: Rc<G>,\n  ) -> (Vec<Vec<Rc<G>>>, Vec<Vec<Rc<G>>>, Vec<Rc<G>>) {\n    // assert_eq!(tensors.len(), 3);\n    assert!(tensors.len() <= 3);\n\n    let conv_config = &Self::param_vec_to_config(self.config.layer_params.clone());\n\n    let inp = &tensors[0];\n    let weights = &tensors[1];\n    let zero_arr = Array::from_elem(IxDyn(&vec![1]), zero.clone());\n    let biases = if tensors.len() == 3 {\n      &tensors[2]\n    } else {\n      &zero_arr\n    };\n\n    let h: usize = inp.shape()[1];\n    let w: usize = inp.shape()[2];\n\n    let ch: usize = weights.shape()[1];\n    let cw: usize = weights.shape()[2];\n\n    let (si, sj) = conv_config.stride;\n\n    // B, H, W, C\n    assert_eq!(inp.shape().len(), 4);\n\n    let (ph, pw) = if conv_config.padding == PaddingEnum::Same {\n      Self::get_padding(h, w, si, sj, ch, cw)\n    } else {\n      ((0, 0), (0, 0))\n    };\n    // println!(\"Padding: {:?}\", (ph, pw));\n    let padding = vec![[0, 0], [ph.0, ph.1], [pw.0, pw.1], [0, 0]];\n\n    let inp_pad = pad(&inp, padding, &zero);\n\n    let (oh, ow) = Self::out_hw(h, w, si, sj, ch, cw, conv_config.padding);\n\n    let mut inp_cells = vec![];\n    let mut weights_cells = vec![];\n    let mut biases_cells = vec![];\n    let mut input_row_idx = 0;\n    let mut weight_row_idx = 0;\n\n    // (output_channels x inp_channels * C_H * C_W)\n    for chan_out in 0..weights.shape()[0] {\n      weights_cells.push(vec![]);\n      for ci in 0..weights.shape()[1] {\n        for cj in 0..weights.shape()[2] {\n          for ck in 0..weights.shape()[3] {\n            weights_cells[weight_row_idx].push(weights[[chan_out, ci, cj, ck]].clone());\n          }\n        }\n      }\n      weight_row_idx += 1;\n    }\n\n    // (O_H * O_W x inp_channels * C_H * C_W)\n    for batch in 0..inp.shape()[0] {\n      for i in 0..oh {\n        for j in 0..ow {\n          inp_cells.push(vec![]);\n          for ci in 0..weights.shape()[1] {\n            for cj in 0..weights.shape()[2] {\n              for ck in 0..weights.shape()[3] {\n                let idx_i = i * si + ci;\n                let idx_j = j * sj + cj;\n                inp_cells[input_row_idx].push(inp_pad[[batch, idx_i, idx_j, ck]].clone());\n              }\n            }\n          }\n          input_row_idx += 1;\n        }\n      }\n    }\n\n    for _batch in 0..inp.shape()[0] {\n      for _ in 0..oh {\n        for _ in 0..ow {\n          for chan_out in 0..weights.shape()[0] {\n            if tensors.len() == 3 {\n              biases_cells.push(biases[chan_out].clone());\n            } else {\n              biases_cells.push(zero.clone());\n            }\n          }\n        }\n      }\n    }\n\n    (inp_cells, weights_cells, biases_cells)\n  }\n\n  pub fn splat_depthwise<G: Clone>(\n    &self,\n    tensors: &Vec<Array<Rc<G>, IxDyn>>,\n    zero: Rc<G>,\n  ) -> (Vec<Vec<Rc<G>>>, Vec<Vec<Rc<G>>>, Vec<Rc<G>>) {\n    let input = &tensors[0];\n    let weights = &tensors[1];\n    let biases = &tensors[2];\n\n    assert_eq!(tensors.len(), 3);\n    assert_eq!(input.shape().len(), 4);\n    assert_eq!(weights.shape().len(), 4);\n    assert_eq!(input.shape()[0], 1);\n\n    let conv_config = &Self::param_vec_to_config(self.config.layer_params.clone());\n    let strides = conv_config.stride;\n\n    let h: usize = input.shape()[1];\n    let w: usize = input.shape()[2];\n    let ch: usize = weights.shape()[1];\n    let cw: usize = weights.shape()[2];\n    let (si, sj) = conv_config.stride;\n    let (oh, ow) = Self::out_hw(h, w, si, sj, ch, cw, conv_config.padding);\n\n    let (ph, pw) = if conv_config.padding == PaddingEnum::Same {\n      Self::get_padding(h, w, si, sj, ch, cw)\n    } else {\n      ((0, 0), (0, 0))\n    };\n\n    let padding = vec![[0, 0], [ph.0, ph.1], [pw.0, pw.1], [0, 0]];\n\n    let inp_pad = pad(&input, padding, &zero);\n\n    let mut inp_cells = vec![];\n    let mut weight_cells = vec![];\n    let mut biases_cells = vec![];\n    let mut row_idx = 0;\n\n    for i in 0..oh {\n      for j in 0..ow {\n        for chan_out in 0..weights.shape()[3] {\n          inp_cells.push(vec![]);\n          weight_cells.push(vec![]);\n          biases_cells.push(biases[[chan_out]].clone());\n\n          for ci in 0..weights.shape()[1] {\n            for cj in 0..weights.shape()[2] {\n              let idx_i = i * strides.0 + ci;\n              let idx_j = j * strides.1 + cj;\n\n              inp_cells[row_idx].push(inp_pad[[0, idx_i, idx_j, chan_out]].clone());\n              weight_cells[row_idx].push(weights[[0, ci, cj, chan_out]].clone());\n            }\n          }\n\n          row_idx += 1;\n        }\n      }\n    }\n\n    (inp_cells, weight_cells, biases_cells)\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for Conv2DChip<F> {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, Rc<AssignedCell<F, F>>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let conv_config = &Self::param_vec_to_config(self.config.layer_params.clone());\n    let zero = constants.get(&0).unwrap();\n\n    let inp = &tensors[0];\n    let weights = &tensors[1];\n\n    let (oh, ow) = Self::out_hw(\n      inp.shape()[1],\n      inp.shape()[2],\n      conv_config.stride.0,\n      conv_config.stride.1,\n      weights.shape()[1],\n      weights.shape()[2],\n      conv_config.padding,\n    );\n    let batch_size = inp.shape()[0];\n\n    let (splat_inp, splat_weights, splat_biases) = match conv_config.conv_type {\n      ConvLayerEnum::Conv2D => self.splat(tensors, zero.clone()),\n      ConvLayerEnum::DepthwiseConv2D => self.splat_depthwise(tensors, zero.clone()),\n    };\n\n    let outp_flat: Vec<AssignedCell<F, F>> = match conv_config.conv_type {\n      ConvLayerEnum::Conv2D => {\n        let fc_chip = FullyConnectedChip::<F> {\n          _marker: PhantomData,\n          config: FullyConnectedConfig::construct(false),\n        };\n\n        let conv_size = splat_inp[0].len();\n        let flattened_inp: Vec<_> = splat_inp.into_iter().flat_map(|x| x.into_iter()).collect();\n        let flattened_weights = splat_weights\n          .into_iter()\n          .flat_map(|x| x.into_iter())\n          .collect::<Vec<_>>();\n\n        let out_channels = weights.shape()[0];\n        let inp_array =\n          Array::from_shape_vec(IxDyn(&vec![batch_size * oh * ow, conv_size]), flattened_inp)\n            .unwrap();\n        let weights_array =\n          Array::from_shape_vec(IxDyn(&vec![out_channels, conv_size]), flattened_weights).unwrap();\n\n        let outp_slice = fc_chip\n          .forward(\n            layouter.namespace(|| \"\"),\n            &vec![weights_array, inp_array],\n            constants,\n            gadget_config.clone(),\n            layer_config,\n          )\n          .unwrap();\n\n        let outp_flat = outp_slice[0]\n          .t()\n          .into_iter()\n          .map(|x| (**x).clone())\n          .collect::<Vec<_>>();\n        outp_flat\n      }\n      ConvLayerEnum::DepthwiseConv2D => {\n        // Do the dot products\n        let dot_prod_chip = DotProductChip::<F>::construct(gadget_config.clone());\n        let mut outp_flat = vec![];\n        for (inp_vec, weight_vec) in splat_inp.iter().zip(splat_weights.iter()) {\n          let inp_vec = inp_vec.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n          let weight_vec = weight_vec.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n          let vec_inputs = vec![inp_vec, weight_vec];\n          let constants = vec![zero.as_ref()];\n          let outp = dot_prod_chip\n            .forward(layouter.namespace(|| \"dot_prod\"), &vec_inputs, &constants)\n            .unwrap();\n          outp_flat.push(outp[0].clone());\n        }\n        // println!(\"outp_flat: {:?}\", outp_flat.len());\n\n        outp_flat\n      }\n    };\n\n    let mut biases = vec![];\n    for bias in splat_biases.iter() {\n      biases.push(bias.as_ref());\n    }\n\n    // Compute the bias + div + relu\n    let bdr_chip = BiasDivRoundRelu6Chip::<F>::construct(gadget_config.clone());\n    let tmp = vec![zero.as_ref()];\n    let outp_flat = outp_flat.iter().map(|x| x).collect::<Vec<_>>();\n    let outp = bdr_chip\n      .forward(\n        layouter.namespace(|| \"bias_div_relu\"),\n        &vec![outp_flat, biases],\n        &tmp,\n      )\n      .unwrap();\n\n    // TODO: this is also horrible. The bdr chip outputs interleaved [(relu'd, div'd), (relu'd, div'd), ...]\n    // Uninterleave depending on whether or not we're doing the relu\n    let outp = if conv_config.activation == ActivationType::Relu6 {\n      outp\n        .into_iter()\n        .step_by(2)\n        .map(|x| Rc::new(x))\n        .collect::<Vec<_>>()\n    } else if conv_config.activation == ActivationType::None {\n      outp\n        .into_iter()\n        .skip(1)\n        .step_by(2)\n        .map(|x| Rc::new(x))\n        .collect::<Vec<_>>()\n    } else if conv_config.activation == ActivationType::Relu {\n      let dived = outp.iter().skip(1).step_by(2).collect::<Vec<_>>();\n      let relu_chip = ReluChip::<F>::construct(gadget_config.clone());\n      let relu_outp = relu_chip\n        .forward(layouter.namespace(|| \"relu\"), &vec![dived], &tmp)\n        .unwrap();\n      let relu_outp = relu_outp\n        .into_iter()\n        .map(|x| Rc::new(x))\n        .collect::<Vec<_>>();\n      relu_outp\n    } else {\n      panic!(\"Unsupported activation type\");\n    };\n\n    let oc = match conv_config.conv_type {\n      ConvLayerEnum::Conv2D => weights.shape()[0],\n      ConvLayerEnum::DepthwiseConv2D => weights.shape()[3],\n    };\n\n    let out_shape = vec![batch_size, oh, ow, oc];\n    let outp = Array::from_shape_vec(IxDyn(&out_shape), outp).unwrap();\n\n    Ok(vec![outp])\n  }\n}\n\nimpl<F: PrimeField> GadgetConsumer for Conv2DChip<F> {\n  fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    let conv_config = &Self::param_vec_to_config(layer_params.clone());\n    let mut outp = vec![\n      GadgetType::Adder,\n      GadgetType::DotProduct,\n      GadgetType::InputLookup,\n      GadgetType::BiasDivRoundRelu6,\n    ];\n\n    if conv_config.activation == ActivationType::Relu {\n      outp.push(GadgetType::Relu);\n    }\n\n    outp\n  }\n}\n"
  },
  {
    "path": "src/layers/dag.rs",
    "content": "use std::{collections::HashMap, fs::File, io::BufWriter, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\n\nuse crate::{\n  gadgets::gadget::{convert_to_u64, GadgetConfig},\n  layers::{\n    arithmetic::{add::AddChip, div_var::DivVarChip, mul::MulChip, sub::SubChip},\n    batch_mat_mul::BatchMatMulChip,\n    div_fixed::DivFixedChip,\n    fully_connected::{FullyConnectedChip, FullyConnectedConfig},\n    logistic::LogisticChip,\n    max_pool_2d::MaxPool2DChip,\n    mean::MeanChip,\n    noop::NoopChip,\n    pow::PowChip,\n    rsqrt::RsqrtChip,\n    shape::{\n      broadcast::BroadcastChip, concatenation::ConcatenationChip, mask_neg_inf::MaskNegInfChip,\n      pack::PackChip, pad::PadChip, permute::PermuteChip, reshape::ReshapeChip,\n      resize_nn::ResizeNNChip, rotate::RotateChip, slice::SliceChip, split::SplitChip,\n      transpose::TransposeChip,\n    },\n    softmax::SoftmaxChip,\n    sqrt::SqrtChip,\n    square::SquareChip,\n    squared_diff::SquaredDiffChip,\n    tanh::TanhChip,\n    update::UpdateChip,\n  },\n  utils::helpers::print_assigned_arr,\n};\n\nuse super::{\n  avg_pool_2d::AvgPool2DChip,\n  conv2d::Conv2DChip,\n  layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig, LayerType},\n};\n\n#[derive(Clone, Debug, Default)]\npub struct DAGLayerConfig {\n  pub ops: Vec<LayerConfig>,\n  pub inp_idxes: Vec<Vec<usize>>,\n  pub out_idxes: Vec<Vec<usize>>,\n  pub final_out_idxes: Vec<usize>,\n}\n\npub struct DAGLayerChip<F: PrimeField + Ord> {\n  dag_config: DAGLayerConfig,\n  _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField + Ord> DAGLayerChip<F> {\n  pub fn construct(dag_config: DAGLayerConfig) -> Self {\n    Self {\n      dag_config,\n      _marker: PhantomData,\n    }\n  }\n\n  // IMPORTANT: Assumes input tensors are in order. Output tensors can be in any order.\n  pub fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &LayerConfig,\n  ) -> Result<(HashMap<usize, AssignedTensor<F>>, Vec<AssignedTensor<F>>), Error> {\n    // Tensor map\n    let mut tensor_map = HashMap::new();\n    for (idx, tensor) in tensors.iter().enumerate() {\n      tensor_map.insert(idx, tensor.clone());\n    }\n\n    // Compute the dag\n    for (layer_idx, layer_config) in self.dag_config.ops.iter().enumerate() {\n      let layer_type = &layer_config.layer_type;\n      let inp_idxes = &self.dag_config.inp_idxes[layer_idx];\n      let out_idxes = &self.dag_config.out_idxes[layer_idx];\n      println!(\n        \"Processing layer {}, type: {:?}, inp_idxes: {:?}, out_idxes: {:?}, layer_params: {:?}\",\n        layer_idx, layer_type, inp_idxes, out_idxes, layer_config.layer_params\n      );\n      let vec_inps = inp_idxes\n        .iter()\n        .map(|idx| tensor_map.get(idx).unwrap().clone())\n        .collect::<Vec<_>>();\n\n      let out = match layer_type {\n        LayerType::Add => {\n          let add_chip = AddChip {};\n          add_chip.forward(\n            layouter.namespace(|| \"dag add\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::AvgPool2D => {\n          let avg_pool_2d_chip = AvgPool2DChip {};\n          avg_pool_2d_chip.forward(\n            layouter.namespace(|| \"dag avg pool 2d\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::MaxPool2D => {\n          let max_pool_2d_chip = MaxPool2DChip {\n            marker: PhantomData::<F>,\n          };\n          max_pool_2d_chip.forward(\n            layouter.namespace(|| \"dag max pool 2d\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::BatchMatMul => {\n          let batch_mat_mul_chip = BatchMatMulChip {};\n          batch_mat_mul_chip.forward(\n            layouter.namespace(|| \"dag batch mat mul\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Broadcast => {\n          let broadcast_chip = BroadcastChip {};\n          broadcast_chip.forward(\n            layouter.namespace(|| \"dag batch mat mul\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Conv2D => {\n          let conv_2d_chip = Conv2DChip {\n            config: layer_config.clone(),\n            _marker: PhantomData,\n          };\n          conv_2d_chip.forward(\n            layouter.namespace(|| \"dag conv 2d\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::DivFixed => {\n          let div_fixed_chip = DivFixedChip {};\n          div_fixed_chip.forward(\n            layouter.namespace(|| \"dag div\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::DivVar => {\n          let div_var_chip = DivVarChip {};\n          div_var_chip.forward(\n            layouter.namespace(|| \"dag div\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::FullyConnected => {\n          let fc_chip = FullyConnectedChip {\n            _marker: PhantomData,\n            config: FullyConnectedConfig::construct(true),\n          };\n          fc_chip.forward(\n            layouter.namespace(|| \"dag fully connected\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Softmax => {\n          let softmax_chip = SoftmaxChip {};\n          softmax_chip.forward(\n            layouter.namespace(|| \"dag softmax\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Mean => {\n          let mean_chip = MeanChip {};\n          mean_chip.forward(\n            layouter.namespace(|| \"dag mean\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Pad => {\n          let pad_chip = PadChip {};\n          pad_chip.forward(\n            layouter.namespace(|| \"dag pad\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Permute => {\n          let pad_chip = PermuteChip {};\n          pad_chip.forward(\n            layouter.namespace(|| \"dag permute\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::SquaredDifference => {\n          let squared_diff_chip = SquaredDiffChip {};\n          squared_diff_chip.forward(\n            layouter.namespace(|| \"dag squared diff\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Rsqrt => {\n          let rsqrt_chip = RsqrtChip {};\n          rsqrt_chip.forward(\n            layouter.namespace(|| \"dag rsqrt\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Sqrt => {\n          let sqrt_chip = SqrtChip {};\n          sqrt_chip.forward(\n            layouter.namespace(|| \"dag sqrt\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Logistic => {\n          let logistic_chip = LogisticChip {};\n          logistic_chip.forward(\n            layouter.namespace(|| \"dag logistic\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Pow => {\n          let pow_chip = PowChip {};\n          pow_chip.forward(\n            layouter.namespace(|| \"dag logistic\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Tanh => {\n          let tanh_chip = TanhChip {};\n          tanh_chip.forward(\n            layouter.namespace(|| \"dag tanh\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Mul => {\n          let mul_chip = MulChip {};\n          mul_chip.forward(\n            layouter.namespace(|| \"dag mul\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Sub => {\n          let sub_chip = SubChip {};\n          sub_chip.forward(\n            layouter.namespace(|| \"dag sub\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Noop => {\n          let noop_chip = NoopChip {};\n          noop_chip.forward(\n            layouter.namespace(|| \"dag noop\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Transpose => {\n          let transpose_chip = TransposeChip {};\n          transpose_chip.forward(\n            layouter.namespace(|| \"dag transpose\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Reshape => {\n          let reshape_chip = ReshapeChip {};\n          reshape_chip.forward(\n            layouter.namespace(|| \"dag reshape\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::ResizeNN => {\n          let resize_nn_chip = ResizeNNChip {};\n          resize_nn_chip.forward(\n            layouter.namespace(|| \"dag resize nn\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Rotate => {\n          let rotate_chip = RotateChip {};\n          rotate_chip.forward(\n            layouter.namespace(|| \"dag rotate\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Concatenation => {\n          let concat_chip = ConcatenationChip {};\n          concat_chip.forward(\n            layouter.namespace(|| \"dag concatenation\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Pack => {\n          let pack_chip = PackChip {};\n          pack_chip.forward(\n            layouter.namespace(|| \"dag pack\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Split => {\n          let split_chip = SplitChip {};\n          split_chip.forward(\n            layouter.namespace(|| \"dag split\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Update => {\n          let split_chip = UpdateChip {};\n          split_chip.forward(\n            layouter.namespace(|| \"dag update\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Slice => {\n          let slice_chip = SliceChip {};\n          slice_chip.forward(\n            layouter.namespace(|| \"dag slice\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::MaskNegInf => {\n          let mask_neg_inf_chip = MaskNegInfChip {};\n          mask_neg_inf_chip.forward(\n            layouter.namespace(|| \"dag mask neg inf\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n        LayerType::Square => {\n          let square_chip = SquareChip {};\n          square_chip.forward(\n            layouter.namespace(|| \"dag square\"),\n            &vec_inps,\n            constants,\n            gadget_config.clone(),\n            &layer_config,\n          )?\n        }\n      };\n\n      for (idx, tensor_idx) in out_idxes.iter().enumerate() {\n        println!(\"Out {} shape: {:?}\", idx, out[idx].shape());\n        tensor_map.insert(*tensor_idx, out[idx].clone());\n      }\n      println!();\n    }\n\n    let mut final_out = vec![];\n    for idx in self.dag_config.final_out_idxes.iter() {\n      final_out.push(tensor_map.get(idx).unwrap().clone());\n    }\n\n    let print_arr = if final_out.len() > 0 {\n      &final_out[0]\n    } else {\n      if self.dag_config.ops.len() > 0 {\n        let last_layer_idx = self.dag_config.ops.len() - 1;\n        let out_idx = self.dag_config.out_idxes[last_layer_idx][0];\n        tensor_map.get(&out_idx).unwrap()\n      } else {\n        tensor_map.get(&0).unwrap()\n      }\n    };\n\n    let tmp = print_arr.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    print_assigned_arr(\"final out\", &tmp.to_vec(), gadget_config.scale_factor);\n    println!(\"final out idxes: {:?}\", self.dag_config.final_out_idxes);\n\n    let mut x = vec![];\n    for cell in print_arr.iter() {\n      cell.value().map(|v| {\n        let bias = 1 << 60 as i64;\n        let v_pos = *v + F::from(bias as u64);\n        let v = convert_to_u64(&v_pos) as i64 - bias;\n        x.push(v);\n      });\n    }\n    if x.len() > 0 {\n      let out_fname = \"out.msgpack\";\n      let f = File::create(out_fname).unwrap();\n      let mut buf = BufWriter::new(f);\n      rmp_serde::encode::write_named(&mut buf, &x).unwrap();\n    }\n\n    Ok((tensor_map, final_out))\n  }\n}\n\nimpl<F: PrimeField + Ord> GadgetConsumer for DAGLayerChip<F> {\n  // Special case: DAG doesn't do anything\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/div_fixed.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Value},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, IxDyn};\n\nuse crate::gadgets::{\n  gadget::{Gadget, GadgetConfig, GadgetType},\n  var_div::VarDivRoundChip,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct DivFixedChip {}\n\nimpl DivFixedChip {\n  fn get_div_val<F: PrimeField>(\n    &self,\n    mut layouter: impl Layouter<F>,\n    _tensors: &Vec<AssignedTensor<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<AssignedCell<F, F>, Error> {\n    // FIXME: this needs to be revealed\n    let div = layer_config.layer_params[0];\n    let div = F::from(div as u64);\n\n    let div = layouter\n      .assign_region(\n        || \"division\",\n        |mut region| {\n          let div = region\n            .assign_advice(\n              || \"avg pool 2d div\",\n              gadget_config.columns[0],\n              0,\n              || Value::known(div),\n            )\n            .unwrap();\n          Ok(div)\n        },\n      )\n      .unwrap();\n\n    Ok(div)\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for DivFixedChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let inp_flat = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n\n    let zero = constants.get(&0).unwrap().as_ref();\n    let shape = inp.shape();\n\n    let div = self.get_div_val(\n      layouter.namespace(|| \"average div\"),\n      tensors,\n      gadget_config.clone(),\n      layer_config,\n    )?;\n\n    let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());\n\n    let dived = var_div_chip.forward(\n      layouter.namespace(|| \"average div\"),\n      &vec![inp_flat],\n      &vec![zero, &div],\n    )?;\n    let dived = dived.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(shape), dived).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for DivFixedChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![GadgetType::VarDivRound]\n  }\n}\n"
  },
  {
    "path": "src/layers/fully_connected.rs",
    "content": "use std::{collections::HashMap, marker::PhantomData, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Region, Value},\n  halo2curves::ff::PrimeField,\n  plonk::{Advice, Column, Error},\n};\nuse ndarray::{Array, ArrayView, Axis, IxDyn};\n\nuse crate::{\n  gadgets::{\n    add_pairs::AddPairsChip,\n    dot_prod::DotProductChip,\n    gadget::{Gadget, GadgetConfig, GadgetType},\n    nonlinear::relu::ReluChip,\n    var_div::VarDivRoundChip,\n  },\n  layers::layer::ActivationType,\n  utils::helpers::RAND_START_IDX,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\npub struct FullyConnectedConfig {\n  pub normalize: bool, // Should be true\n}\n\nimpl FullyConnectedConfig {\n  pub fn construct(normalize: bool) -> Self {\n    Self { normalize }\n  }\n}\n\npub struct FullyConnectedChip<F: PrimeField> {\n  pub _marker: PhantomData<F>,\n  pub config: FullyConnectedConfig,\n}\n\nimpl<F: PrimeField> FullyConnectedChip<F> {\n  pub fn compute_mm(\n    // input: &AssignedTensor<F>,\n    input: &ArrayView<CellRc<F>, IxDyn>,\n    weight: &AssignedTensor<F>,\n  ) -> Array<Value<F>, IxDyn> {\n    assert_eq!(input.ndim(), 2);\n    assert_eq!(weight.ndim(), 2);\n    assert_eq!(input.shape()[1], weight.shape()[0]);\n\n    let mut outp = vec![];\n    for i in 0..input.shape()[0] {\n      for j in 0..weight.shape()[1] {\n        let mut sum = input[[i, 0]].value().map(|x: &F| *x) * weight[[0, j]].value();\n        for k in 1..input.shape()[1] {\n          sum = sum + input[[i, k]].value().map(|x: &F| *x) * weight[[k, j]].value();\n        }\n        outp.push(sum);\n      }\n    }\n\n    let out_shape = [input.shape()[0], weight.shape()[1]];\n    Array::from_shape_vec(IxDyn(out_shape.as_slice()), outp).unwrap()\n  }\n\n  pub fn assign_array(\n    columns: &Vec<Column<Advice>>,\n    region: &mut Region<F>,\n    array: &Array<Value<F>, IxDyn>,\n  ) -> Result<Array<AssignedCell<F, F>, IxDyn>, Error> {\n    assert_eq!(array.ndim(), 2);\n\n    let mut outp = vec![];\n    for (idx, val) in array.iter().enumerate() {\n      let row_idx = idx / columns.len();\n      let col_idx = idx % columns.len();\n      let cell = region\n        .assign_advice(|| \"assign array\", columns[col_idx], row_idx, || *val)\n        .unwrap();\n      outp.push(cell);\n    }\n\n    let out_shape = [array.shape()[0], array.shape()[1]];\n    Ok(Array::from_shape_vec(IxDyn(out_shape.as_slice()), outp).unwrap())\n  }\n\n  pub fn random_vector(\n    constants: &HashMap<i64, CellRc<F>>,\n    size: usize,\n  ) -> Result<Vec<CellRc<F>>, Error> {\n    let mut outp = vec![];\n    for idx in 0..size {\n      let idx = RAND_START_IDX + (idx as i64);\n      if !constants.contains_key(&idx) {\n        println!(\"Random vector is too small: {:?}\", size);\n      }\n      let cell = constants.get(&idx).unwrap().clone();\n      outp.push(cell);\n    }\n\n    Ok(outp)\n  }\n\n  fn get_activation(&self, layer_params: &Vec<i64>) -> ActivationType {\n    let activation = layer_params[0];\n    match activation {\n      0 => ActivationType::None,\n      1 => ActivationType::Relu,\n      _ => panic!(\"Unsupported activation type for fully connected\"),\n    }\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for FullyConnectedChip<F> {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    assert!(tensors.len() <= 3);\n    let activation = self.get_activation(&layer_config.layer_params);\n\n    let input = &tensors[0];\n    let ndim = input.ndim();\n    let input = if ndim == 2 {\n      ArrayView::from(input)\n    } else {\n      input.index_axis(Axis(0), 0)\n    };\n    let weight = &tensors[1].t().into_owned();\n    let zero = constants.get(&0).unwrap().as_ref();\n\n    // Compute and assign the result\n    let mm_result = layouter\n      .assign_region(\n        || \"compute and assign mm\",\n        |mut region| {\n          let mm_result = Self::compute_mm(&input, weight);\n          let mm_result =\n            Self::assign_array(&gadget_config.columns, &mut region, &mm_result).unwrap();\n\n          Ok(mm_result)\n        },\n      )\n      .unwrap();\n\n    // Generate random vectors\n    let r1 = Self::random_vector(constants, mm_result.shape()[0]).unwrap();\n    let r2 = Self::random_vector(constants, mm_result.shape()[1]).unwrap();\n\n    let dot_prod_chip = DotProductChip::<F>::construct(gadget_config.clone());\n    let r1_ref = r1.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let r2_ref = r2.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n\n    // Compute r1 * result\n    let mut r1_res = vec![];\n    // println!(\"r1_ref: {:?}\", r1_ref.len());\n    // println!(\"r2_ref: {:?}\", r2_ref.len());\n    // println!(\"mm_result: {:?}\", mm_result.shape());\n    for i in 0..mm_result.shape()[1] {\n      let tmp = mm_result.index_axis(Axis(1), i);\n      let mm_ci = tmp.iter().collect::<Vec<_>>();\n      let r1_res_i = dot_prod_chip\n        .forward(\n          layouter.namespace(|| format!(\"r1_res_{}\", i)),\n          &vec![mm_ci, r1_ref.clone()],\n          &vec![zero],\n        )\n        .unwrap();\n      r1_res.push(r1_res_i[0].clone());\n    }\n\n    // Compute r1 * result * r2\n    let r1_res_ref = r1_res.iter().collect::<Vec<_>>();\n    let r1_res_r2 = dot_prod_chip\n      .forward(\n        layouter.namespace(|| \"r1_res_r2\"),\n        &vec![r1_res_ref, r2_ref.clone()],\n        &vec![zero],\n      )\n      .unwrap();\n    let r1_res_r2 = r1_res_r2[0].clone();\n    // println!(\"r1_res_r2: {:?}\", r1_res_r2);\n\n    // Compute r1 * input\n    let mut r1_input = vec![];\n    // println!(\"input: {:?}\", input.shape());\n    // println!(\"r1_ref: {:?}\", r1_ref.len());\n    for i in 0..input.shape()[1] {\n      let tmp = input.index_axis(Axis(1), i);\n      let input_ci = tmp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n      let r1_input_i = dot_prod_chip\n        .forward(\n          layouter.namespace(|| format!(\"r1_input_{}\", i)),\n          &vec![input_ci, r1_ref.clone()],\n          &vec![zero],\n        )\n        .unwrap();\n      r1_input.push(r1_input_i[0].clone());\n    }\n\n    // Compute weight * r2\n    let mut weight_r2 = vec![];\n    for i in 0..weight.shape()[0] {\n      let tmp = weight.index_axis(Axis(0), i);\n      let weight_ci = tmp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n      let weight_r2_i = dot_prod_chip\n        .forward(\n          layouter.namespace(|| format!(\"weight_r2_{}\", i)),\n          &vec![weight_ci, r2_ref.clone()],\n          &vec![zero],\n        )\n        .unwrap();\n      weight_r2.push(weight_r2_i[0].clone());\n    }\n\n    // Compute (r1 * input) * (weight * r2)\n    let r1_input_ref = r1_input.iter().collect::<Vec<_>>();\n    let weight_r2_ref = weight_r2.iter().collect::<Vec<_>>();\n    let r1_inp_weight_r2 = dot_prod_chip\n      .forward(\n        layouter.namespace(|| \"r1_inp_weight_r2\"),\n        &vec![r1_input_ref, weight_r2_ref],\n        &vec![zero],\n      )\n      .unwrap();\n\n    let r1_inp_weight_r2 = r1_inp_weight_r2[0].clone();\n    // println!(\"r1_inp_weight_r2: {:?}\", r1_inp_weight_r2);\n\n    layouter\n      .assign_region(\n        || \"fc equality check\",\n        |mut region| {\n          let t1 = r1_res_r2\n            .copy_advice(|| \"\", &mut region, gadget_config.columns[0], 0)\n            .unwrap();\n          let t2 = r1_inp_weight_r2\n            .copy_advice(|| \"\", &mut region, gadget_config.columns[0], 1)\n            .unwrap();\n\n          region.constrain_equal(t1.cell(), t2.cell()).unwrap();\n\n          Ok(())\n        },\n      )\n      .unwrap();\n\n    let shape = [mm_result.shape()[0], mm_result.shape()[1]];\n    let final_result_flat = if self.config.normalize {\n      let mm_flat = mm_result.iter().collect::<Vec<_>>();\n      let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());\n      let sf = constants\n        .get(&(gadget_config.scale_factor as i64))\n        .unwrap()\n        .as_ref();\n      let mm_div = var_div_chip\n        .forward(\n          layouter.namespace(|| \"mm_div\"),\n          &vec![mm_flat],\n          &vec![zero, sf],\n        )\n        .unwrap();\n\n      let mm_div = if tensors.len() == 3 {\n        let bias = tensors[2].broadcast(shape.clone()).unwrap();\n        let bias = bias.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n        let mm_div = mm_div.iter().collect::<Vec<_>>();\n        let adder_chip = AddPairsChip::<F>::construct(gadget_config.clone());\n        let mm_bias = adder_chip\n          .forward(\n            layouter.namespace(|| \"mm_bias\"),\n            &vec![mm_div, bias],\n            &vec![zero],\n          )\n          .unwrap();\n        mm_bias\n      } else {\n        mm_div\n      };\n\n      let mm_div = if activation == ActivationType::Relu {\n        let relu_chip = ReluChip::<F>::construct(gadget_config.clone());\n        let mm_div = mm_div.iter().collect::<Vec<_>>();\n        let vec_inputs = vec![mm_div];\n        relu_chip\n          .forward(layouter.namespace(|| \"relu\"), &vec_inputs, &vec![zero])\n          .unwrap()\n      } else if activation == ActivationType::None {\n        mm_div\n      } else {\n        panic!(\"Unsupported activation type\");\n      };\n\n      mm_div.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>()\n    } else {\n      mm_result\n        .into_iter()\n        .map(|x| Rc::new(x))\n        .collect::<Vec<_>>()\n    };\n    let final_result = Array::from_shape_vec(IxDyn(&shape), final_result_flat).unwrap();\n\n    Ok(vec![final_result])\n  }\n}\n\nimpl<F: PrimeField> GadgetConsumer for FullyConnectedChip<F> {\n  fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    let activation = self.get_activation(&layer_params);\n    let mut outp = vec![\n      GadgetType::Adder,\n      GadgetType::AddPairs,\n      GadgetType::DotProduct,\n      GadgetType::VarDivRound,\n      GadgetType::InputLookup,\n    ];\n    match activation {\n      ActivationType::Relu => outp.push(GadgetType::Relu),\n      ActivationType::None => (),\n      _ => panic!(\"Unsupported activation type\"),\n    }\n    outp\n  }\n}\n"
  },
  {
    "path": "src/layers/layer.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, IxDyn};\n\nuse crate::gadgets::gadget::{GadgetConfig, GadgetType};\n\n#[derive(Clone, Copy, Debug, Default, Hash, Eq, PartialEq)]\npub enum LayerType {\n  Add,\n  AvgPool2D,\n  BatchMatMul,\n  Broadcast,\n  Concatenation,\n  Conv2D,\n  DivVar,\n  DivFixed,\n  FullyConnected,\n  Logistic,\n  MaskNegInf,\n  MaxPool2D,\n  Mean,\n  Mul,\n  #[default]\n  Noop,\n  Pack,\n  Pad,\n  Pow,\n  Permute,\n  Reshape,\n  ResizeNN,\n  Rotate,\n  Rsqrt,\n  Slice,\n  Softmax,\n  Split,\n  Sqrt,\n  Square,\n  SquaredDifference,\n  Sub,\n  Tanh,\n  Transpose,\n  Update,\n}\n\n// NOTE: This is the same order as the TFLite schema\n// Must not be changed\n#[derive(Clone, Debug, Default, Hash, Eq, PartialEq)]\npub enum ActivationType {\n  #[default]\n  None,\n  Relu,\n  ReluN1To1,\n  Relu6,\n  Tanh,\n  SignBit,\n}\n\n#[derive(Clone, Debug, Default)]\npub struct LayerConfig {\n  pub layer_type: LayerType,\n  pub layer_params: Vec<i64>, // This is turned into layer specific configurations at runtime\n  pub inp_shapes: Vec<Vec<usize>>,\n  pub out_shapes: Vec<Vec<usize>>,\n  pub mask: Vec<i64>,\n}\n\npub type CellRc<F> = Rc<AssignedCell<F, F>>;\npub type AssignedTensor<F> = Array<CellRc<F>, IxDyn>;\n// General issue with rust: I'm not sure how to pass named arguments to a trait...\n// Currently, the caller must be aware of the order of the tensors and results\npub trait Layer<F: PrimeField> {\n  fn forward(\n    &self,\n    layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error>;\n}\n\npub trait GadgetConsumer {\n  fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<GadgetType>;\n}\n"
  },
  {
    "path": "src/layers/logistic.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::gadgets::{\n  gadget::{Gadget, GadgetConfig, GadgetType},\n  nonlinear::logistic::LogisticGadgetChip,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct LogisticChip {}\n\nimpl<F: PrimeField> Layer<F> for LogisticChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let inp_vec = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let zero = constants.get(&0).unwrap().as_ref();\n\n    let logistic_chip = LogisticGadgetChip::<F>::construct(gadget_config.clone());\n    let vec_inps = vec![inp_vec];\n    let constants = vec![zero];\n    let out = logistic_chip.forward(\n      layouter.namespace(|| \"logistic chip\"),\n      &vec_inps,\n      &constants,\n    )?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for LogisticChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![GadgetType::Logistic, GadgetType::InputLookup]\n  }\n}\n"
  },
  {
    "path": "src/layers/max_pool_2d.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::{\n    gadget::{Gadget, GadgetConfig, GadgetType},\n    max::MaxChip,\n  },\n  layers::conv2d::{Conv2DChip, PaddingEnum},\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\npub struct MaxPool2DChip<F: PrimeField> {\n  pub marker: std::marker::PhantomData<F>,\n}\n\nimpl<F: PrimeField> MaxPool2DChip<F> {\n  pub fn shape(inp: &AssignedTensor<F>, layer_config: &LayerConfig) -> (usize, usize) {\n    let params = &layer_config.layer_params;\n    let (fx, fy) = (params[0], params[1]);\n    let (fx, fy) = (fx as usize, fy as usize);\n    let (sx, sy) = (params[2], params[3]);\n    let (sx, sy) = (sx as usize, sy as usize);\n\n    // Only support batch size 1 for now\n    assert_eq!(inp.shape()[0], 1);\n\n    let out_shape = Conv2DChip::<F>::out_hw(\n      inp.shape()[1],\n      inp.shape()[2],\n      sx,\n      sy,\n      fx,\n      fy,\n      PaddingEnum::Valid,\n    );\n\n    out_shape\n  }\n\n  pub fn splat(\n    inp: &AssignedTensor<F>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<Vec<CellRc<F>>>, Error> {\n    let params = &layer_config.layer_params;\n    let (fx, fy) = (params[0], params[1]);\n    let (fx, fy) = (fx as usize, fy as usize);\n    let (sx, sy) = (params[2], params[3]);\n    let (sx, sy) = (sx as usize, sy as usize);\n\n    // Only support batch size 1 for now\n    assert_eq!(inp.shape()[0], 1);\n\n    let out_shape = Self::shape(inp, layer_config);\n\n    let mut splat = vec![];\n    for i in 0..out_shape.0 {\n      for j in 0..out_shape.1 {\n        for k in 0..inp.shape()[3] {\n          let mut tmp = vec![];\n          for x in 0..fx {\n            for y in 0..fy {\n              let x = i * sx + x;\n              let y = j * sy + y;\n              if x < inp.shape()[1] && y < inp.shape()[2] {\n                tmp.push(inp[[0, x, y, k]].clone());\n              }\n            }\n          }\n          splat.push(tmp);\n        }\n      }\n    }\n\n    Ok(splat)\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for MaxPool2DChip<F> {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let splat = Self::splat(inp, layer_config).unwrap();\n\n    let max_chip = MaxChip::<F>::construct(gadget_config.clone());\n    let mut out = vec![];\n    for i in 0..splat.len() {\n      let inps = &splat[i];\n      let inps = inps.iter().map(|x| x.as_ref()).collect();\n      let max = max_chip\n        .forward(\n          layouter.namespace(|| format!(\"max {}\", i)),\n          &vec![inps],\n          &vec![],\n        )\n        .unwrap();\n      out.push(max[0].clone());\n    }\n    let out = out.into_iter().map(|x| Rc::new(x)).collect();\n\n    // TODO: refactor this\n    let out_xy = Self::shape(inp, layer_config);\n    let out_shape = vec![1, out_xy.0, out_xy.1, inp.shape()[3]];\n\n    let out = Array::from_shape_vec(IxDyn(&out_shape), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl<F: PrimeField> GadgetConsumer for MaxPool2DChip<F> {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {\n    vec![GadgetType::Max, GadgetType::InputLookup]\n  }\n}\n"
  },
  {
    "path": "src/layers/mean.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter, Value},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, Axis, IxDyn};\n\nuse crate::gadgets::gadget::{GadgetConfig, GadgetType};\n\nuse super::{\n  averager::Averager,\n  layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig},\n};\n\npub struct MeanChip {}\n\nimpl MeanChip {\n  pub fn get_keep_axis(&self, layer_config: &LayerConfig) -> usize {\n    let inp_shape = &layer_config.inp_shapes[0];\n    let out_shape = &layer_config.out_shapes[0];\n    assert_eq!(inp_shape[0], 1);\n    assert_eq!(out_shape[0], 1);\n\n    // Skip the batch axis\n    let mut keep_axes = (1..inp_shape.len()).collect::<Vec<_>>();\n    for mean_axis in layer_config.layer_params.iter() {\n      keep_axes.retain(|&x| x != *mean_axis as usize);\n    }\n    assert_eq!(keep_axes.len(), 1);\n    keep_axes[0]\n\n    /*\n    let mut num_same = 0;\n    let mut keep_axis: i64 = -1;\n    for i in 1..inp_shape.len() {\n      if inp_shape[i] == out_shape[i] {\n        keep_axis = i as i64;\n        num_same += 1;\n      }\n    }\n\n    if keep_axis == -1 {\n      panic!(\"All axes are different\");\n    }\n    if num_same > 1 {\n      panic!(\"More than one axis is the same\");\n    }\n    keep_axis as usize\n    */\n  }\n}\n\nimpl<F: PrimeField> Averager<F> for MeanChip {\n  fn splat(&self, input: &AssignedTensor<F>, layer_config: &LayerConfig) -> Vec<Vec<CellRc<F>>> {\n    // Only support batch size = 1\n    assert_eq!(input.shape()[0], 1);\n    // Only support batch + 2D, summing over one axis\n    // assert_eq!(input.shape().len(), 3);\n    let keep_axis = self.get_keep_axis(layer_config);\n\n    let mut splat = vec![];\n    for i in 0..input.shape()[keep_axis] {\n      let mut tmp = vec![];\n      for x in input.index_axis(Axis(keep_axis), i).iter() {\n        tmp.push(x.clone());\n      }\n      splat.push(tmp);\n    }\n\n    splat\n  }\n\n  fn get_div_val(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<AssignedCell<F, F>, Error> {\n    let inp = &tensors[0];\n    let keep_axis = self.get_keep_axis(layer_config);\n    let mut div = 1;\n    for i in 0..inp.shape().len() {\n      if i != keep_axis {\n        div *= inp.shape()[i];\n      }\n    }\n\n    let div = F::from(div as u64);\n    // FIXME: put this in the fixed column\n    let div = layouter.assign_region(\n      || \"mean div\",\n      |mut region| {\n        let div = region.assign_advice(\n          || \"mean div\",\n          gadget_config.columns[0],\n          0,\n          || Value::known(div),\n        )?;\n        Ok(div)\n      },\n    )?;\n\n    Ok(div)\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for MeanChip {\n  fn forward(\n    &self,\n    layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let dived = self.avg_forward(layouter, tensors, constants, gadget_config, layer_config)?;\n\n    let out_shape = layer_config.out_shapes[0]\n      .iter()\n      .map(|x| *x as usize)\n      .collect::<Vec<_>>();\n\n    let out = Array::from_shape_vec(IxDyn(&out_shape), dived).unwrap();\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for MeanChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![\n      GadgetType::Adder,\n      GadgetType::VarDivRound,\n      GadgetType::InputLookup,\n    ]\n  }\n}\n"
  },
  {
    "path": "src/layers/noop.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\n\nuse crate::gadgets::gadget::GadgetConfig;\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\npub struct NoopChip {}\n\nimpl<F: PrimeField> Layer<F> for NoopChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let ret_idx = layer_config.layer_params[0] as usize;\n    Ok(vec![tensors[ret_idx].clone()])\n  }\n}\n\nimpl GadgetConsumer for NoopChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/pow.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::gadgets::{\n  gadget::{Gadget, GadgetConfig, GadgetType},\n  nonlinear::pow::PowGadgetChip,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct PowChip {}\n\nimpl<F: PrimeField> Layer<F> for PowChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let inp_vec = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let zero = constants.get(&0).unwrap().as_ref();\n\n    let pow_chip = PowGadgetChip::<F>::construct(gadget_config.clone());\n    let vec_inps = vec![inp_vec];\n    let constants = vec![zero];\n    let out = pow_chip.forward(layouter.namespace(|| \"pow chip\"), &vec_inps, &constants)?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for PowChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![GadgetType::Pow, GadgetType::InputLookup]\n  }\n}\n"
  },
  {
    "path": "src/layers/rsqrt.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::gadgets::{\n  gadget::{Gadget, GadgetConfig, GadgetType},\n  nonlinear::rsqrt::RsqrtGadgetChip,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct RsqrtChip {}\n\nimpl<F: PrimeField> Layer<F> for RsqrtChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let mut inp_vec = vec![];\n\n    let mask = &layer_config.mask;\n    let mut mask_map = HashMap::new();\n    for i in 0..mask.len() / 2 {\n      mask_map.insert(mask[2 * i], mask[2 * i + 1]);\n    }\n\n    let min_val = gadget_config.min_val;\n    let min_val = constants.get(&min_val).unwrap().as_ref();\n    let max_val = gadget_config.max_val;\n    let max_val = constants.get(&max_val).unwrap().as_ref();\n    for (i, val) in inp.iter().enumerate() {\n      let i = i as i64;\n      if mask_map.contains_key(&i) {\n        let mask_val = *mask_map.get(&i).unwrap();\n        if mask_val == 1 {\n          inp_vec.push(max_val);\n        } else if mask_val == -1 {\n          inp_vec.push(min_val);\n        } else {\n          panic!();\n        }\n      } else {\n        inp_vec.push(val.as_ref());\n      }\n    }\n\n    let zero = constants.get(&0).unwrap().as_ref();\n    let rsqrt_chip = RsqrtGadgetChip::<F>::construct(gadget_config.clone());\n    let vec_inps = vec![inp_vec];\n    let constants = vec![zero, min_val, max_val];\n    let out = rsqrt_chip.forward(layouter.namespace(|| \"rsqrt chip\"), &vec_inps, &constants)?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for RsqrtChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![GadgetType::Rsqrt, GadgetType::InputLookup]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/broadcast.rs",
    "content": "//\n// Broadcast is used as a temporary measure to represent a the backprop\n// of a full-kernel AvgPool2D\n//\n\nuse std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::Array;\n\nuse crate::{\n  gadgets::gadget::GadgetConfig,\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct BroadcastChip {}\n\n// TODO: Fix this after demo\nimpl<F: PrimeField> Layer<F> for BroadcastChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let shape = inp.shape();\n    let output_shape = layer_config.out_shapes[0].clone();\n\n    // Check that we only broadcast dimensions with shape 1\n    assert!(shape.len() == output_shape.len());\n    assert!(shape.len() == 4);\n\n    for (inp, outp) in shape.iter().zip(output_shape.iter()) {\n      if *inp != *outp && !(*inp == 1) {\n        panic!();\n      }\n    }\n\n    let mut output_flat = vec![];\n\n    for i in 0..output_shape[0] {\n      for j in 0..output_shape[1] {\n        for k in 0..output_shape[2] {\n          for l in 0..output_shape[3] {\n            let indexes = [i, j, k, l]\n              .iter()\n              .enumerate()\n              .map(|(idx, x)| if shape[idx] == 1 { 0 } else { *x })\n              .collect::<Vec<_>>();\n            output_flat.push(inp[[indexes[0], indexes[1], indexes[2], indexes[3]]].clone());\n          }\n        }\n      }\n    }\n\n    println!(\"Broadcast : {:?} -> {:?}\", inp.shape(), output_shape);\n    let out = Array::from_shape_vec(output_shape, output_flat).unwrap();\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for BroadcastChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/concatenation.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{concatenate, Axis};\n\nuse crate::{\n  gadgets::gadget::{GadgetConfig, GadgetType},\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct ConcatenationChip {}\n\nimpl<F: PrimeField> Layer<F> for ConcatenationChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let axis = layer_config.layer_params[0] as usize;\n    let views = tensors.iter().map(|x| x.view()).collect::<Vec<_>>();\n    // TODO: this is a bit of a hack\n    let out = concatenate(Axis(axis), views.as_slice()).unwrap_or(tensors[0].clone());\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for ConcatenationChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/mask_neg_inf.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::gadget::GadgetConfig,\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct MaskNegInfChip {}\n\nimpl<F: PrimeField> Layer<F> for MaskNegInfChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let mask_ndim = layer_config.layer_params[0] as usize;\n    let mask_shape = layer_config.layer_params[1..mask_ndim + 1]\n      .iter()\n      .map(|x| *x as usize)\n      .collect::<Vec<_>>();\n\n    let mask_vec = layer_config.layer_params[mask_ndim + 1..].to_vec();\n    let mask = Array::from_shape_vec(IxDyn(&mask_shape), mask_vec).unwrap();\n    let mask = mask.broadcast(inp.raw_dim()).unwrap();\n\n    let min_val = gadget_config.min_val;\n    let min_val = constants.get(&min_val).unwrap().clone();\n    let mut out_vec = vec![];\n    for (val, to_mask) in inp.iter().zip(mask.iter()) {\n      if *to_mask == 0 {\n        out_vec.push(val.clone());\n      } else {\n        out_vec.push(min_val.clone());\n      }\n    }\n\n    let outp = Array::from_shape_vec(inp.raw_dim(), out_vec).unwrap();\n    Ok(vec![outp])\n  }\n}\n\nimpl GadgetConsumer for MaskNegInfChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/pack.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{concatenate, Axis};\n\nuse crate::{\n  gadgets::gadget::{GadgetConfig, GadgetType},\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct PackChip {}\n\nimpl<F: PrimeField> Layer<F> for PackChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let axis = layer_config.layer_params[0] as usize;\n    if axis > 1 {\n      panic!(\"Pack only supports axis=0 or axis=1\");\n    }\n\n    let expanded = tensors\n      .into_iter()\n      .map(|x| x.clone().insert_axis(Axis(axis)))\n      .collect::<Vec<_>>();\n    let views = expanded.iter().map(|x| x.view()).collect::<Vec<_>>();\n\n    // TODO: in some cases, the pack is unnecessary. Simply return the first tensor in this case\n    let out = concatenate(Axis(axis), views.as_slice()).unwrap_or(tensors[0].clone());\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for PackChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/pad.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{Array, Axis, IxDyn, Slice};\n\nuse crate::{\n  gadgets::gadget::GadgetConfig,\n  layers::layer::{AssignedTensor, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\n// TODO: figure out where to put this\npub fn pad<G: Clone>(\n  input: &Array<Rc<G>, IxDyn>,\n  padding: Vec<[usize; 2]>,\n  pad_val: &Rc<G>,\n) -> Array<Rc<G>, IxDyn> {\n  let tmp = input.iter().collect();\n  let input = Array::from_shape_vec(input.raw_dim(), tmp).unwrap();\n  assert_eq!(input.ndim(), padding.len());\n  let mut padded_shape = input.raw_dim();\n  for (ax, (&ax_len, &[pad_lo, pad_hi])) in input.shape().iter().zip(&padding).enumerate() {\n    padded_shape[ax] = ax_len + pad_lo + pad_hi;\n  }\n\n  let mut padded = Array::from_elem(padded_shape, pad_val);\n  let padded_dim = padded.raw_dim();\n  {\n    // Select portion of padded array that needs to be copied from the\n    // original array.\n    let mut orig_portion = padded.view_mut();\n    for (ax, &[pad_lo, pad_hi]) in padding.iter().enumerate() {\n      orig_portion.slice_axis_inplace(\n        Axis(ax),\n        Slice::from(pad_lo as isize..padded_dim[ax] as isize - (pad_hi as isize)),\n      );\n    }\n    // Copy the data from the original array.\n    orig_portion.assign(&input.view());\n  }\n\n  let dim = padded.raw_dim();\n  let tmp = padded.into_iter().map(|x| x.clone()).collect();\n  let padded = Array::from_shape_vec(dim, tmp).unwrap();\n\n  padded\n}\n\npub struct PadChip {}\n\npub struct PadConfig {\n  pub padding: Vec<[usize; 2]>,\n}\n\nimpl PadChip {\n  pub fn param_vec_to_config(layer_params: Vec<i64>) -> PadConfig {\n    assert!(layer_params.len() % 2 == 0);\n\n    let padding = layer_params\n      .chunks(2)\n      .map(|chunk| [chunk[0] as usize, chunk[1] as usize])\n      .collect();\n    PadConfig { padding }\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for PadChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, Rc<AssignedCell<F, F>>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    // FIXME: the pad from tflite is actually two, but mine is one\n    // assert_eq!(tensors.len(), 1);\n    let input = &tensors[0];\n\n    let zero = constants.get(&0).unwrap().clone();\n    let padding = PadChip::param_vec_to_config(layer_config.layer_params.clone());\n    let padded = pad(input, padding.padding, &zero);\n\n    Ok(vec![padded])\n  }\n}\n\nimpl GadgetConsumer for PadChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/permute.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::IxDyn;\n\nuse crate::{\n  gadgets::gadget::GadgetConfig,\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct PermuteChip {}\n\nimpl<F: PrimeField> Layer<F> for PermuteChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let params = &layer_config\n      .layer_params\n      .iter()\n      .map(|x| *x as usize)\n      .collect::<Vec<_>>()[..];\n\n    assert!(inp.ndim() == params.len());\n\n    let out = inp.clone();\n    let out = out.permuted_axes(IxDyn(params));\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for PermuteChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/reshape.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::Array;\n\nuse crate::{\n  gadgets::gadget::GadgetConfig,\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct ReshapeChip {}\n\nimpl<F: PrimeField> Layer<F> for ReshapeChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let shape = layer_config.out_shapes[0].clone();\n\n    println!(\"Reshape: {:?} -> {:?}\", inp.shape(), shape);\n    let flat = inp.iter().map(|x| x.clone()).collect();\n    let out = Array::from_shape_vec(shape, flat).unwrap();\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for ReshapeChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/resize_nn.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::gadget::GadgetConfig,\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct ResizeNNChip {}\n\n// TODO: this does not work in general\nimpl<F: PrimeField> Layer<F> for ResizeNNChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let output_shape = layer_config.out_shapes[0].clone();\n\n    assert_eq!(inp.ndim(), 4);\n    assert_eq!(inp.shape()[0], 1);\n    assert_eq!(inp.shape()[3], output_shape[3]);\n\n    let mut flat = vec![];\n    // Do nearest neighbor interpolation over batch, h, w, c\n    // The interpolation is over h and w\n    for b in 0..inp.shape()[0] {\n      for h in 0..output_shape[1] {\n        let h_in = (h as f64 * (inp.shape()[1] as f64 / output_shape[1] as f64)) as usize;\n        for w in 0..output_shape[2] {\n          let w_in = (w as f64 * (inp.shape()[2] as f64 / output_shape[2] as f64)) as usize;\n          for c in 0..inp.shape()[3] {\n            flat.push(inp[[b, h_in, w_in, c]].clone());\n          }\n        }\n      }\n    }\n\n    let outp = Array::from_shape_vec(IxDyn(&output_shape), flat).unwrap();\n    Ok(vec![outp])\n  }\n}\n\nimpl GadgetConsumer for ResizeNNChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/rotate.rs",
    "content": "// TODO: The implementation is not ideal.\n\nuse std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\n\nuse crate::{\n  gadgets::gadget::GadgetConfig,\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct RotateChip {}\n\n// Example:\n// input:\n// [1 2 3 4]\n// [5 6 7 8]\n//\n// params: [1] -- flip axis 1 only\n// output:\n// [4 3 2 1]\n// [8 7 6 5]\nimpl<F: PrimeField> Layer<F> for RotateChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let params = &layer_config.layer_params;\n\n    assert!(inp.shape().len() == 4);\n\n    let mut flip = vec![false; 4];\n    for p in params {\n      flip[*p as usize] = true;\n    }\n    let shape = inp.shape();\n\n    println!(\"Rotate: {:?} -> {:?}\", inp.shape(), shape);\n\n    let mut out = inp.clone();\n\n    for i in 0..shape[0] {\n      for j in 0..shape[1] {\n        for k in 0..shape[2] {\n          for l in 0..shape[3] {\n            let [ix, jx, kx, lx]: [usize; 4] = [i, j, k, l]\n              .iter()\n              .enumerate()\n              .map(|(idx, x)| if flip[idx] { shape[idx] - 1 - *x } else { *x })\n              .collect::<Vec<_>>()\n              .try_into()\n              .unwrap();\n            out[[ix, jx, kx, lx]] = inp[[i, j, k, l]].clone();\n          }\n        }\n      }\n    }\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for RotateChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/slice.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::Slice;\n\nuse crate::{\n  gadgets::gadget::{GadgetConfig, GadgetType},\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct SliceChip {}\n\nimpl<F: PrimeField> Layer<F> for SliceChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let params = &layer_config.layer_params;\n    assert_eq!(params.len() % 2, 0);\n    let num_axes = params.len() / 2;\n    let starts = &params[0..num_axes];\n    let sizes = &params[num_axes..];\n\n    let inp = &tensors[0];\n    let outp = inp.slice_each_axis(|ax| {\n      let start = starts[ax.axis.0] as usize;\n      let size = sizes[ax.axis.0];\n      if size == -1 {\n        Slice::from(start..)\n      } else {\n        Slice::from(start..(start + size as usize))\n      }\n    });\n    Ok(vec![outp.to_owned()])\n  }\n}\n\nimpl GadgetConsumer for SliceChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/split.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Axis, Slice};\n\nuse crate::{\n  gadgets::gadget::{GadgetConfig, GadgetType},\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct SplitChip {}\n\nimpl<F: PrimeField> Layer<F> for SplitChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let axis = layer_config.layer_params[0] as usize;\n    let num_splits = layer_config.layer_params[1] as usize;\n    let inp = &tensors[1];\n\n    let mut out = vec![];\n    let split_len = inp.shape()[axis] / num_splits;\n    for i in 0..num_splits {\n      let slice = inp\n        .slice_axis(\n          Axis(axis),\n          Slice::from((i * split_len)..((i + 1) * split_len)),\n        )\n        .to_owned();\n      out.push(slice.to_owned());\n    }\n    Ok(out)\n  }\n}\n\nimpl GadgetConsumer for SplitChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape/transpose.rs",
    "content": "use std::{collections::HashMap, rc::Rc};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::gadget::GadgetConfig,\n  layers::layer::{AssignedTensor, CellRc, GadgetConsumer},\n};\n\nuse super::super::layer::{Layer, LayerConfig};\n\npub struct TransposeChip {}\n\nimpl<F: PrimeField> Layer<F> for TransposeChip {\n  fn forward(\n    &self,\n    _layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    _constants: &HashMap<i64, CellRc<F>>,\n    _gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    assert_eq!(layer_config.layer_params.len() % 2, 0);\n    let ndim = layer_config.layer_params.len() / 2;\n    let inp_shape = layer_config.layer_params[0..ndim]\n      .to_vec()\n      .iter()\n      .map(|x| *x as usize)\n      .collect::<Vec<_>>();\n    let permutation = layer_config.layer_params[ndim..]\n      .to_vec()\n      .iter()\n      .map(|x| *x as usize)\n      .collect::<Vec<_>>();\n\n    let inp = &tensors[0];\n    // Required because of memory layout issues\n    let inp_flat = inp.iter().cloned().collect::<Vec<_>>();\n    let inp = Array::from_shape_vec(IxDyn(&inp_shape), inp_flat).unwrap();\n\n    let inp = inp.permuted_axes(IxDyn(&permutation));\n\n    Ok(vec![inp])\n  }\n}\n\nimpl GadgetConsumer for TransposeChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![]\n  }\n}\n"
  },
  {
    "path": "src/layers/shape.rs",
    "content": "pub mod broadcast;\npub mod concatenation;\npub mod mask_neg_inf;\npub mod pack;\npub mod pad;\npub mod permute;\npub mod reshape;\npub mod resize_nn;\npub mod rotate;\npub mod slice;\npub mod split;\npub mod transpose;\n"
  },
  {
    "path": "src/layers/softmax.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{\n  circuit::{AssignedCell, Layouter},\n  halo2curves::ff::PrimeField,\n  plonk::Error,\n};\nuse ndarray::{s, Array, IxDyn};\n\nuse crate::gadgets::{\n  adder::AdderChip,\n  gadget::{Gadget, GadgetConfig, GadgetType},\n  max::MaxChip,\n  nonlinear::exp::ExpGadgetChip,\n  sub_pairs::SubPairsChip,\n  var_div_big3::VarDivRoundBig3Chip,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct SoftmaxChip {}\n\nimpl SoftmaxChip {\n  pub fn softmax_flat<F: PrimeField>(\n    mut layouter: impl Layouter<F>,\n    constants: &HashMap<i64, CellRc<F>>,\n    inp_flat: Vec<&AssignedCell<F, F>>,\n    gadget_config: Rc<GadgetConfig>,\n    mask: &Vec<i64>,\n  ) -> Result<Vec<AssignedCell<F, F>>, Error> {\n    let exp_chip = ExpGadgetChip::<F>::construct(gadget_config.clone());\n    let adder_chip = AdderChip::<F>::construct(gadget_config.clone());\n    let sub_pairs_chip = SubPairsChip::<F>::construct(gadget_config.clone());\n    let max_chip = MaxChip::<F>::construct(gadget_config.clone());\n    let var_div_big_chip = VarDivRoundBig3Chip::<F>::construct(gadget_config.clone());\n\n    let zero = constants.get(&0).unwrap().as_ref();\n    let sf = constants\n      .get(&(gadget_config.scale_factor as i64))\n      .unwrap()\n      .as_ref();\n\n    // Mask the input for max computation and subtraction\n    let inp_take = inp_flat\n      .iter()\n      .enumerate()\n      .filter(|(i, _)| mask[*i] == 0) // Awkwardly, 1 = take negative infinity\n      .map(|(_, x)| *x)\n      .collect::<Vec<_>>();\n\n    // Compute the max\n    let max = max_chip\n      .forward(\n        layouter.namespace(|| format!(\"max\")),\n        &vec![inp_take.clone()],\n        &vec![zero],\n      )\n      .unwrap();\n    let max = &max[0];\n\n    // Subtract the max\n    let max_flat = vec![max; inp_take.len()];\n    let sub = sub_pairs_chip.forward(\n      layouter.namespace(|| format!(\"sub\")),\n      &vec![inp_take, max_flat],\n      &vec![zero],\n    )?;\n\n    let sub = sub.iter().collect::<Vec<_>>();\n\n    // Compute the exp\n    let exp_slice = exp_chip.forward(\n      layouter.namespace(|| format!(\"exp\")),\n      &vec![sub],\n      &vec![zero],\n    )?;\n\n    // Compute the sum\n    let sum = adder_chip.forward(\n      layouter.namespace(|| format!(\"sum\")),\n      &vec![exp_slice.iter().collect()],\n      &vec![zero],\n    )?;\n    let sum = sum[0].clone();\n    let sum_div_sf = var_div_big_chip.forward(\n      layouter.namespace(|| format!(\"sum div sf\")),\n      &vec![vec![&sum]],\n      &vec![zero, sf],\n    )?;\n    let sum_div_sf = sum_div_sf[0].clone();\n\n    let dived = var_div_big_chip.forward(\n      layouter.namespace(|| format!(\"div\")),\n      &vec![exp_slice.iter().collect()],\n      &vec![zero, &sum_div_sf],\n    )?;\n\n    // Take either zero (softmax(-inf)) or the result\n    let mut div_idx = 0;\n    let dived = mask\n      .iter()\n      .map(|x| {\n        if *x == 1 {\n          zero.clone()\n        } else {\n          let tmp = dived[div_idx].clone();\n          div_idx = div_idx + 1;\n          tmp\n        }\n      })\n      .collect();\n\n    Ok(dived)\n  }\n}\n\nimpl<F: PrimeField> Layer<F> for SoftmaxChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    assert!(inp.ndim() == 2 || inp.ndim() == 3 || inp.ndim() == 4);\n    if inp.ndim() == 4 {\n      assert_eq!(inp.shape()[0], 1);\n    }\n\n    let inp_shape = inp.shape().iter().map(|x| *x).collect::<Vec<_>>();\n    let mask = if layer_config.layer_params.len() == 0 {\n      Array::from_shape_fn(IxDyn(&inp_shape), |_| 0)\n    } else {\n      let mask_shape_len = layer_config.layer_params[0] as usize;\n      let mask_shape = layer_config.layer_params[1..(1 + mask_shape_len)]\n        .iter()\n        .map(|x| *x as usize)\n        .collect::<Vec<_>>();\n      let mask = layer_config.layer_params[(1 + mask_shape_len)..].to_vec();\n      let mask = Array::from_shape_vec(IxDyn(&mask_shape), mask).unwrap();\n      let mask = mask.broadcast(IxDyn(&inp_shape)).unwrap().to_owned();\n      mask\n    };\n\n    let shape = if inp.ndim() == 2 || inp.ndim() == 3 {\n      inp.shape().iter().map(|x| *x).collect::<Vec<_>>()\n    } else {\n      vec![inp.shape()[1], inp.shape()[2], inp.shape()[3]]\n    };\n    let inp = inp.to_owned().into_shape(shape.clone()).unwrap();\n    let mask = mask.into_shape(shape.clone()).unwrap();\n\n    let mut outp = vec![];\n    if inp.ndim() == 2 {\n      for i in 0..shape[0] {\n        let inp_slice = inp.slice(s![i, ..]);\n        let inp_flat = inp_slice.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n        let mask_slice = mask.slice(s![i, ..]);\n        let mask_flat = mask_slice.iter().map(|x| *x as i64).collect::<Vec<_>>();\n        let dived = Self::softmax_flat(\n          layouter.namespace(|| format!(\"softmax {}\", i)),\n          constants,\n          inp_flat,\n          gadget_config.clone(),\n          &mask_flat,\n        )\n        .unwrap();\n        outp.extend(dived);\n      }\n    } else if inp.ndim() == 3 {\n      for i in 0..shape[0] {\n        for j in 0..shape[1] {\n          let inp_slice = inp.slice(s![i, j, ..]);\n          let inp_flat = inp_slice.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n          let mask_slice = mask.slice(s![i, j, ..]);\n          let mask_flat = mask_slice.iter().map(|x| *x as i64).collect::<Vec<_>>();\n          let dived = Self::softmax_flat(\n            layouter.namespace(|| format!(\"softmax {} {}\", i, j)),\n            constants,\n            inp_flat,\n            gadget_config.clone(),\n            &mask_flat,\n          )\n          .unwrap();\n          outp.extend(dived);\n        }\n      }\n    } else {\n      panic!(\"Not implemented\");\n    }\n\n    let outp = outp.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let outp = Array::from_shape_vec(IxDyn(inp.shape()), outp).unwrap();\n    Ok(vec![outp])\n  }\n}\n\nimpl GadgetConsumer for SoftmaxChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![\n      GadgetType::Exp,\n      GadgetType::Adder,\n      GadgetType::VarDivRoundBig3,\n      GadgetType::Max,\n      GadgetType::SubPairs,\n      GadgetType::InputLookup,\n    ]\n  }\n}\n"
  },
  {
    "path": "src/layers/sqrt.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::gadgets::{\n  gadget::{Gadget, GadgetConfig, GadgetType},\n  nonlinear::sqrt::SqrtGadgetChip,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct SqrtChip {}\n\nimpl<F: PrimeField> Layer<F> for SqrtChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let mut inp_vec = vec![];\n\n    let mask = &layer_config.mask;\n    let mut mask_map = HashMap::new();\n    for i in 0..mask.len() / 2 {\n      mask_map.insert(mask[2 * i], mask[2 * i + 1]);\n    }\n\n    let min_val = gadget_config.min_val;\n    let min_val = constants.get(&min_val).unwrap().as_ref();\n    let max_val = gadget_config.max_val;\n    let max_val = constants.get(&max_val).unwrap().as_ref();\n    for (i, val) in inp.iter().enumerate() {\n      let i = i as i64;\n      if mask_map.contains_key(&i) {\n        let mask_val = *mask_map.get(&i).unwrap();\n        if mask_val == 1 {\n          inp_vec.push(max_val);\n        } else if mask_val == -1 {\n          inp_vec.push(min_val);\n        } else {\n          panic!();\n        }\n      } else {\n        inp_vec.push(val.as_ref());\n      }\n    }\n\n    let zero = constants.get(&0).unwrap().as_ref();\n    let sqrt_chip = SqrtGadgetChip::<F>::construct(gadget_config.clone());\n    let vec_inps = vec![inp_vec];\n    let constants = vec![zero, min_val, max_val];\n    let out = sqrt_chip.forward(layouter.namespace(|| \"sqrt chip\"), &vec_inps, &constants)?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for SqrtChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![GadgetType::Sqrt, GadgetType::InputLookup]\n  }\n}\n"
  },
  {
    "path": "src/layers/square.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::gadgets::{\n  gadget::{Gadget, GadgetConfig, GadgetType},\n  square::SquareGadgetChip,\n  var_div::VarDivRoundChip,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct SquareChip {}\n\nimpl<F: PrimeField> Layer<F> for SquareChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    assert_eq!(tensors.len(), 1);\n\n    let inp = &tensors[0];\n    let zero = constants.get(&0).unwrap().as_ref();\n\n    let square_chip = SquareGadgetChip::<F>::construct(gadget_config.clone());\n    let inp_vec = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let vec_inputs = vec![inp_vec];\n    let single_inps = vec![zero];\n    let out = square_chip.forward(\n      layouter.namespace(|| \"square chip\"),\n      &vec_inputs,\n      &single_inps,\n    )?;\n\n    let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());\n    let div = constants\n      .get(&(gadget_config.scale_factor as i64))\n      .unwrap()\n      .as_ref();\n    let single_inps = vec![zero, div];\n    let out = out.iter().collect::<Vec<_>>();\n    let vec_inputs = vec![out];\n    let out = var_div_chip.forward(\n      layouter.namespace(|| \"var div chip\"),\n      &vec_inputs,\n      &single_inps,\n    )?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for SquareChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![\n      GadgetType::Square,\n      GadgetType::VarDivRound,\n      GadgetType::InputLookup,\n    ]\n  }\n}\n"
  },
  {
    "path": "src/layers/squared_diff.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::{\n  gadgets::{\n    gadget::{Gadget, GadgetConfig, GadgetType},\n    squared_diff::SquaredDiffGadgetChip,\n    var_div::VarDivRoundChip,\n  },\n  utils::helpers::broadcast,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct SquaredDiffChip {}\n\nimpl<F: PrimeField> Layer<F> for SquaredDiffChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    assert_eq!(tensors.len(), 2);\n    let inp1 = &tensors[0];\n    let inp2 = &tensors[1];\n    // Broadcoasting allowed... can't check shapes easily\n    let (inp1, inp2) = broadcast(inp1, inp2);\n\n    let zero = constants.get(&0).unwrap().as_ref();\n\n    let sq_diff_chip = SquaredDiffGadgetChip::<F>::construct(gadget_config.clone());\n    let inp1_vec = inp1.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let inp2_vec = inp2.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let vec_inputs = vec![inp1_vec, inp2_vec];\n    let tmp_constants = vec![zero];\n    let out = sq_diff_chip.forward(\n      layouter.namespace(|| \"sq diff chip\"),\n      &vec_inputs,\n      &tmp_constants,\n    )?;\n\n    let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());\n    let div = constants\n      .get(&(gadget_config.scale_factor as i64))\n      .unwrap()\n      .as_ref();\n\n    let single_inputs = vec![zero, div];\n    let out = out.iter().map(|x| x).collect::<Vec<_>>();\n    let out = var_div_chip.forward(\n      layouter.namespace(|| \"sq diff div\"),\n      &vec![out],\n      &single_inputs,\n    )?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(inp1.shape()), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for SquaredDiffChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![\n      GadgetType::SquaredDiff,\n      GadgetType::VarDivRound,\n      GadgetType::InputLookup,\n    ]\n  }\n}\n"
  },
  {
    "path": "src/layers/tanh.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::gadgets::{\n  gadget::{Gadget, GadgetConfig, GadgetType},\n  nonlinear::tanh::TanhGadgetChip,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct TanhChip {}\n\nimpl<F: PrimeField> Layer<F> for TanhChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let inp = &tensors[0];\n    let inp_vec = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();\n    let zero = constants.get(&0).unwrap().as_ref();\n\n    let tanh_chip = TanhGadgetChip::<F>::construct(gadget_config.clone());\n    let vec_inps = vec![inp_vec];\n    let constants = vec![zero];\n    let out = tanh_chip.forward(layouter.namespace(|| \"tanh chip\"), &vec_inps, &constants)?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for TanhChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![GadgetType::Tanh, GadgetType::InputLookup]\n  }\n}\n"
  },
  {
    "path": "src/layers/update.rs",
    "content": "use std::{collections::HashMap, rc::Rc, vec};\n\nuse halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};\nuse ndarray::{Array, IxDyn};\n\nuse crate::gadgets::{\n  gadget::{Gadget, GadgetConfig, GadgetType},\n  update::UpdateGadgetChip,\n};\n\nuse super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};\n\n#[derive(Clone, Debug)]\npub struct UpdateChip {}\n\nimpl<F: PrimeField + Ord> Layer<F> for UpdateChip {\n  fn forward(\n    &self,\n    mut layouter: impl Layouter<F>,\n    tensors: &Vec<AssignedTensor<F>>,\n    constants: &HashMap<i64, CellRc<F>>,\n    gadget_config: Rc<GadgetConfig>,\n    _layer_config: &LayerConfig,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let w = &tensors[0];\n    let dw = &tensors[1];\n\n    let zero = constants.get(&0).unwrap().as_ref();\n    let update_chip = UpdateGadgetChip::<F>::construct((*gadget_config).clone());\n\n    let flattened_w = w.into_iter().map(|x| (**x).clone()).collect::<Vec<_>>();\n    let flattened_dw = dw.into_iter().map(|x| (**x).clone()).collect::<Vec<_>>();\n    let flattened_w_ref = flattened_w.iter().collect::<Vec<_>>();\n    let flattened_dw_ref = flattened_dw.iter().collect::<Vec<_>>();\n\n    let vec_inps = vec![flattened_w_ref, flattened_dw_ref];\n    let constants = vec![zero];\n    let out = update_chip.forward(layouter.namespace(|| \"update chip\"), &vec_inps, &constants)?;\n\n    let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();\n    let out = Array::from_shape_vec(IxDyn(w.shape()), out).unwrap();\n\n    Ok(vec![out])\n  }\n}\n\nimpl GadgetConsumer for UpdateChip {\n  fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {\n    vec![GadgetType::Update]\n  }\n}\n"
  },
  {
    "path": "src/layers.rs",
    "content": "// Generics\npub mod averager;\n\npub mod arithmetic;\npub mod shape;\n\n// Concrete implementations\npub mod avg_pool_2d;\npub mod batch_mat_mul;\npub mod conv2d;\npub mod div_fixed;\npub mod fully_connected;\npub mod logistic;\npub mod max_pool_2d;\npub mod mean;\npub mod noop;\npub mod pow;\npub mod rsqrt;\npub mod softmax;\npub mod sqrt;\npub mod square;\npub mod squared_diff;\npub mod tanh;\npub mod update;\n\n// Special: dag\npub mod dag;\n\n// Special: layer\npub mod layer;\n"
  },
  {
    "path": "src/lib.rs",
    "content": "#![feature(int_roundings)]\n\npub mod commitments;\npub mod gadgets;\npub mod layers;\npub mod model;\npub mod utils;\n"
  },
  {
    "path": "src/model.rs",
    "content": "use std::{\n  collections::{BTreeMap, BTreeSet, HashMap},\n  marker::PhantomData,\n  rc::Rc,\n  sync::{Arc, Mutex},\n};\n\nuse halo2_proofs::{\n  circuit::{Layouter, SimpleFloorPlanner, Value},\n  halo2curves::ff::{FromUniformBytes, PrimeField},\n  plonk::{Advice, Circuit, Column, ConstraintSystem, Error, Instance},\n};\nuse lazy_static::lazy_static;\nuse ndarray::{Array, IxDyn};\nuse num_bigint::BigUint;\n\nuse crate::{\n  commitments::{\n    commit::Commit,\n    packer::PackerChip,\n    poseidon_commit::{PoseidonCommitChip, L, RATE, WIDTH},\n  },\n  gadgets::{\n    add_pairs::AddPairsChip,\n    adder::AdderChip,\n    bias_div_round_relu6::BiasDivRoundRelu6Chip,\n    dot_prod::DotProductChip,\n    gadget::{Gadget, GadgetConfig, GadgetType},\n    input_lookup::InputLookupChip,\n    max::MaxChip,\n    mul_pairs::MulPairsChip,\n    nonlinear::{exp::ExpGadgetChip, pow::PowGadgetChip, relu::ReluChip, tanh::TanhGadgetChip},\n    nonlinear::{logistic::LogisticGadgetChip, rsqrt::RsqrtGadgetChip, sqrt::SqrtGadgetChip},\n    sqrt_big::SqrtBigChip,\n    square::SquareGadgetChip,\n    squared_diff::SquaredDiffGadgetChip,\n    sub_pairs::SubPairsChip,\n    update::UpdateGadgetChip,\n    var_div::VarDivRoundChip,\n    var_div_big::VarDivRoundBigChip,\n    var_div_big3::VarDivRoundBig3Chip,\n  },\n  layers::{\n    arithmetic::{add::AddChip, div_var::DivVarChip, mul::MulChip, sub::SubChip},\n    avg_pool_2d::AvgPool2DChip,\n    batch_mat_mul::BatchMatMulChip,\n    conv2d::Conv2DChip,\n    dag::{DAGLayerChip, DAGLayerConfig},\n    fully_connected::{FullyConnectedChip, FullyConnectedConfig},\n    layer::{AssignedTensor, CellRc, GadgetConsumer, LayerConfig, LayerType},\n    logistic::LogisticChip,\n    max_pool_2d::MaxPool2DChip,\n    mean::MeanChip,\n    noop::NoopChip,\n    pow::PowChip,\n    rsqrt::RsqrtChip,\n    shape::{\n      broadcast::BroadcastChip, concatenation::ConcatenationChip, mask_neg_inf::MaskNegInfChip,\n      pack::PackChip, pad::PadChip, permute::PermuteChip, reshape::ReshapeChip,\n      resize_nn::ResizeNNChip, rotate::RotateChip, slice::SliceChip, split::SplitChip,\n      transpose::TransposeChip,\n    },\n    softmax::SoftmaxChip,\n    sqrt::SqrtChip,\n    square::SquareChip,\n    squared_diff::SquaredDiffChip,\n    tanh::TanhChip,\n    update::UpdateChip,\n  },\n  utils::{\n    helpers::{convert_to_bigint, RAND_START_IDX},\n    loader::{load_model_msgpack, ModelMsgpack},\n  },\n};\n\nlazy_static! {\n  pub static ref GADGET_CONFIG: Mutex<GadgetConfig> = Mutex::new(GadgetConfig::default());\n  pub static ref PUBLIC_VALS: Mutex<Vec<BigUint>> = Mutex::new(vec![]);\n}\n\n#[derive(Clone, Debug, Default)]\npub struct ModelCircuit<F: PrimeField> {\n  pub used_gadgets: Arc<BTreeSet<GadgetType>>,\n  pub dag_config: DAGLayerConfig,\n  pub tensors: BTreeMap<i64, Array<F, IxDyn>>,\n  pub commit_before: Vec<Vec<i64>>,\n  pub commit_after: Vec<Vec<i64>>,\n  pub k: usize,\n  pub bits_per_elem: usize,\n  pub inp_idxes: Vec<i64>,\n  pub num_random: i64,\n}\n\n#[derive(Clone, Debug)]\npub struct ModelConfig<F: PrimeField + Ord + FromUniformBytes<64>> {\n  pub gadget_config: Rc<GadgetConfig>,\n  pub public_col: Column<Instance>,\n  pub hasher: Option<PoseidonCommitChip<F, WIDTH, RATE, L>>,\n  pub _marker: PhantomData<F>,\n}\n\nimpl<F: PrimeField + Ord + FromUniformBytes<64>> ModelCircuit<F> {\n  pub fn assign_tensors_map(\n    &self,\n    mut layouter: impl Layouter<F>,\n    columns: &Vec<Column<Advice>>,\n    tensors: &BTreeMap<i64, Array<F, IxDyn>>,\n  ) -> Result<BTreeMap<i64, AssignedTensor<F>>, Error> {\n    let tensors = layouter.assign_region(\n      || \"asssignment\",\n      |mut region| {\n        let mut cell_idx = 0;\n        let mut assigned_tensors = BTreeMap::new();\n\n        for (tensor_idx, tensor) in tensors.iter() {\n          let mut flat = vec![];\n          for val in tensor.iter() {\n            let row_idx = cell_idx / columns.len();\n            let col_idx = cell_idx % columns.len();\n            let cell = region\n              .assign_advice(\n                || \"assignment\",\n                columns[col_idx],\n                row_idx,\n                || Value::known(*val),\n              )\n              .unwrap();\n            flat.push(Rc::new(cell));\n            cell_idx += 1;\n          }\n          let tensor = Array::from_shape_vec(tensor.shape(), flat).unwrap();\n          assigned_tensors.insert(*tensor_idx, tensor);\n        }\n\n        Ok(assigned_tensors)\n      },\n    )?;\n\n    Ok(tensors)\n  }\n\n  pub fn tensor_map_to_vec(\n    &self,\n    tensor_map: &BTreeMap<i64, Array<CellRc<F>, IxDyn>>,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let smallest_tensor = tensor_map\n      .iter()\n      .min_by_key(|(_, tensor)| tensor.len())\n      .unwrap()\n      .1;\n    let max_tensor_key = tensor_map\n      .iter()\n      .max_by_key(|(key, _)| *key)\n      .unwrap()\n      .0\n      .clone();\n    let mut tensors = vec![];\n    for i in 0..max_tensor_key + 1 {\n      let tensor = tensor_map.get(&i).unwrap_or(smallest_tensor);\n      tensors.push(tensor.clone());\n    }\n\n    Ok(tensors)\n  }\n\n  pub fn assign_tensors_vec(\n    &self,\n    mut layouter: impl Layouter<F>,\n    columns: &Vec<Column<Advice>>,\n    tensors: &BTreeMap<i64, Array<F, IxDyn>>,\n  ) -> Result<Vec<AssignedTensor<F>>, Error> {\n    let tensor_map = self\n      .assign_tensors_map(\n        layouter.namespace(|| \"assign_tensors_map\"),\n        columns,\n        tensors,\n      )\n      .unwrap();\n    self.tensor_map_to_vec(&tensor_map)\n  }\n\n  pub fn assign_constants(\n    &self,\n    mut layouter: impl Layouter<F>,\n    gadget_config: Rc<GadgetConfig>,\n  ) -> Result<HashMap<i64, CellRc<F>>, Error> {\n    let sf = gadget_config.scale_factor;\n    let min_val = gadget_config.min_val;\n    let max_val = gadget_config.max_val;\n\n    let constants = layouter.assign_region(\n      || \"constants\",\n      |mut region| {\n        let mut constants: HashMap<i64, CellRc<F>> = HashMap::new();\n\n        let vals = vec![0 as i64, 1, sf as i64, min_val, max_val];\n        let shift_val_i64 = -min_val * 2; // FIXME\n        let shift_val_f = F::from(shift_val_i64 as u64);\n        for (i, val) in vals.iter().enumerate() {\n          let cell = region.assign_fixed(\n            || format!(\"constant_{}\", i),\n            gadget_config.fixed_columns[0],\n            i,\n            || Value::known(F::from((val + shift_val_i64) as u64) - shift_val_f),\n          )?;\n          constants.insert(*val, Rc::new(cell));\n        }\n\n        // TODO: I've made some very bad life decisions\n        // TOOD: this needs to be a random oracle\n        let r_base = F::from(0x123456789abcdef);\n        let mut r = r_base.clone();\n        for i in 0..self.num_random {\n          let rand = region.assign_fixed(\n            || format!(\"rand_{}\", i),\n            gadget_config.fixed_columns[0],\n            constants.len(),\n            || Value::known(r),\n          )?;\n          r = r * r_base;\n          constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand));\n        }\n\n        Ok(constants)\n      },\n    )?;\n    Ok(constants)\n  }\n\n  // TODO: for some horrifying reason, assigning to fixed columns causes everything to blow up\n  // Currently get around this by assigning to advice columns\n  // This is secure because of the equality checks but EXTREMELY STUPID\n  pub fn assign_constants2(\n    &self,\n    mut layouter: impl Layouter<F>,\n    gadget_config: Rc<GadgetConfig>,\n    fixed_constants: &HashMap<i64, CellRc<F>>,\n  ) -> Result<HashMap<i64, CellRc<F>>, Error> {\n    let sf = gadget_config.scale_factor;\n    let min_val = gadget_config.min_val;\n    let max_val = gadget_config.max_val;\n\n    let constants = layouter.assign_region(\n      || \"constants\",\n      |mut region| {\n        let mut constants: HashMap<i64, CellRc<F>> = HashMap::new();\n\n        let vals = vec![0 as i64, 1, sf as i64, min_val, max_val];\n        let shift_val_i64 = -min_val * 2; // FIXME\n        let shift_val_f = F::from(shift_val_i64 as u64);\n        for (i, val) in vals.iter().enumerate() {\n          let assignment_idx = i as usize;\n          let row_idx = assignment_idx / gadget_config.columns.len();\n          let col_idx = assignment_idx % gadget_config.columns.len();\n          let cell = region.assign_advice(\n            || format!(\"constant_{}\", i),\n            gadget_config.columns[col_idx],\n            row_idx,\n            || Value::known(F::from((val + shift_val_i64) as u64) - shift_val_f),\n          )?;\n          constants.insert(*val, Rc::new(cell));\n        }\n\n        // TODO: I've made some very bad life decisions\n        // TOOD: this needs to be a random oracle\n        let r_base = F::from(0x123456789abcdef);\n        let mut r = r_base.clone();\n        for i in 0..self.num_random {\n          let assignment_idx = constants.len();\n          let row_idx = assignment_idx / gadget_config.columns.len();\n          let col_idx = assignment_idx % gadget_config.columns.len();\n          let rand = region.assign_advice(\n            || format!(\"rand_{}\", i),\n            gadget_config.columns[col_idx],\n            row_idx,\n            || Value::known(r),\n          )?;\n          r = r * r_base;\n          constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand));\n        }\n\n        for (k, v) in fixed_constants.iter() {\n          let v2 = constants.get(k).unwrap();\n          region.constrain_equal(v.cell(), v2.cell()).unwrap();\n        }\n        Ok(constants)\n      },\n    )?;\n    Ok(constants)\n  }\n\n  pub fn generate_from_file(config_file: &str, inp_file: &str) -> ModelCircuit<F> {\n    let config = load_model_msgpack(config_file, inp_file);\n    Self::generate_from_msgpack(config, true)\n  }\n\n  pub fn generate_from_msgpack(config: ModelMsgpack, panic_empty_tensor: bool) -> ModelCircuit<F> {\n    let to_field = |x: i64| {\n      let bias = 1 << 31;\n      let x_pos = x + bias;\n      F::from(x_pos as u64) - F::from(bias as u64)\n    };\n\n    let match_layer = |x: &str| match x {\n      \"AveragePool2D\" => LayerType::AvgPool2D,\n      \"Add\" => LayerType::Add,\n      \"BatchMatMul\" => LayerType::BatchMatMul,\n      \"Broadcast\" => LayerType::Broadcast,\n      \"Concatenation\" => LayerType::Concatenation,\n      \"Conv2D\" => LayerType::Conv2D,\n      \"Div\" => LayerType::DivFixed, // TODO: rename to DivFixed\n      \"DivVar\" => LayerType::DivVar,\n      \"FullyConnected\" => LayerType::FullyConnected,\n      \"Logistic\" => LayerType::Logistic,\n      \"MaskNegInf\" => LayerType::MaskNegInf,\n      \"MaxPool2D\" => LayerType::MaxPool2D,\n      \"Mean\" => LayerType::Mean,\n      \"Mul\" => LayerType::Mul,\n      \"Noop\" => LayerType::Noop,\n      \"Pack\" => LayerType::Pack,\n      \"Pad\" => LayerType::Pad,\n      \"Pow\" => LayerType::Pow,\n      \"Permute\" => LayerType::Permute,\n      \"Reshape\" => LayerType::Reshape,\n      \"ResizeNearestNeighbor\" => LayerType::ResizeNN,\n      \"Rotate\" => LayerType::Rotate,\n      \"Rsqrt\" => LayerType::Rsqrt,\n      \"Slice\" => LayerType::Slice,\n      \"Softmax\" => LayerType::Softmax,\n      \"Split\" => LayerType::Split,\n      \"Sqrt\" => LayerType::Sqrt,\n      \"Square\" => LayerType::Square,\n      \"SquaredDifference\" => LayerType::SquaredDifference,\n      \"Sub\" => LayerType::Sub,\n      \"Tanh\" => LayerType::Tanh,\n      \"Transpose\" => LayerType::Transpose,\n      \"Update\" => LayerType::Update,\n      _ => panic!(\"unknown op: {}\", x),\n    };\n\n    let mut tensors = BTreeMap::new();\n    for flat in config.tensors {\n      let value_flat = flat.data.iter().map(|x| to_field(*x)).collect::<Vec<_>>();\n      let shape = flat.shape.iter().map(|x| *x as usize).collect::<Vec<_>>();\n      let num_el: usize = shape.iter().product();\n      if panic_empty_tensor && num_el != value_flat.len() {\n        panic!(\"tensor shape and data length mismatch\");\n      }\n      if num_el == value_flat.len() {\n        let tensor = Array::from_shape_vec(IxDyn(&shape), value_flat).unwrap();\n        tensors.insert(flat.idx, tensor);\n      } else {\n        // Do nothing here since we're loading the config\n      };\n    }\n\n    let i64_to_usize = |x: &Vec<i64>| x.iter().map(|x| *x as usize).collect::<Vec<_>>();\n\n    let mut used_gadgets = BTreeSet::new();\n\n    let dag_config = {\n      let ops = config\n        .layers\n        .iter()\n        .map(|layer| {\n          let layer_type = match_layer(&layer.layer_type);\n          let layer_gadgets = match layer_type {\n            LayerType::Add => Box::new(AddChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::AvgPool2D => Box::new(AvgPool2DChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::BatchMatMul => Box::new(BatchMatMulChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Broadcast => Box::new(BroadcastChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Concatenation => Box::new(ConcatenationChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::DivFixed => Box::new(ConcatenationChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::DivVar => Box::new(DivVarChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Conv2D => Box::new(Conv2DChip {\n              config: LayerConfig::default(),\n              _marker: PhantomData::<F>,\n            }) as Box<dyn GadgetConsumer>,\n            LayerType::FullyConnected => Box::new(FullyConnectedChip {\n              config: FullyConnectedConfig { normalize: true },\n              _marker: PhantomData::<F>,\n            }) as Box<dyn GadgetConsumer>,\n            LayerType::Logistic => Box::new(LogisticChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::MaskNegInf => Box::new(MaskNegInfChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::MaxPool2D => Box::new(MaxPool2DChip {\n              marker: PhantomData::<F>,\n            }) as Box<dyn GadgetConsumer>,\n            LayerType::Mean => Box::new(MeanChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Mul => Box::new(MulChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Noop => Box::new(NoopChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Pack => Box::new(PackChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Pad => Box::new(PadChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Pow => Box::new(PowChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Permute => Box::new(PermuteChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Reshape => Box::new(ReshapeChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::ResizeNN => Box::new(ResizeNNChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Rotate => Box::new(RotateChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Rsqrt => Box::new(RsqrtChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Slice => Box::new(SliceChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Softmax => Box::new(SoftmaxChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Split => Box::new(SplitChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Sqrt => Box::new(SqrtChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Square => Box::new(SquareChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::SquaredDifference => Box::new(SquaredDiffChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Sub => Box::new(SubChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Tanh => Box::new(TanhChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Transpose => Box::new(TransposeChip {}) as Box<dyn GadgetConsumer>,\n            LayerType::Update => Box::new(UpdateChip {}) as Box<dyn GadgetConsumer>,\n          }\n          .used_gadgets(layer.params.clone());\n          for gadget in layer_gadgets {\n            used_gadgets.insert(gadget);\n          }\n\n          LayerConfig {\n            layer_type,\n            layer_params: layer.params.clone(),\n            inp_shapes: layer.inp_shapes.iter().map(|x| i64_to_usize(x)).collect(),\n            out_shapes: layer.out_shapes.iter().map(|x| i64_to_usize(x)).collect(),\n            mask: layer.mask.clone(),\n          }\n        })\n        .collect::<Vec<_>>();\n      let inp_idxes = config\n        .layers\n        .iter()\n        .map(|layer| i64_to_usize(&layer.inp_idxes))\n        .collect::<Vec<_>>();\n      let out_idxes = config\n        .layers\n        .iter()\n        .map(|layer| i64_to_usize(&layer.out_idxes))\n        .collect::<Vec<_>>();\n      let final_out_idxes = config\n        .out_idxes\n        .iter()\n        .map(|x| *x as usize)\n        .collect::<Vec<_>>();\n      DAGLayerConfig {\n        inp_idxes,\n        out_idxes,\n        ops,\n        final_out_idxes,\n      }\n    };\n\n    // The input lookup is always used\n    used_gadgets.insert(GadgetType::InputLookup);\n    let used_gadgets = Arc::new(used_gadgets);\n    let gadget = &GADGET_CONFIG;\n    let cloned_gadget = gadget.lock().unwrap().clone();\n    *gadget.lock().unwrap() = GadgetConfig {\n      scale_factor: config.global_sf as u64,\n      shift_min_val: -(config.global_sf * config.global_sf * (1 << 17)),\n      div_outp_min_val: -(1 << (config.k - 1)),\n      min_val: -(1 << (config.k - 1)),\n      max_val: (1 << (config.k - 1)) - 10,\n      k: config.k as usize,\n      num_rows: (1 << config.k) - 10 + 1,\n      num_cols: config.num_cols as usize,\n      used_gadgets: used_gadgets.clone(),\n      commit_before: config.commit_before.clone().unwrap_or(vec![]),\n      commit_after: config.commit_after.clone().unwrap_or(vec![]),\n      use_selectors: config.use_selectors.unwrap_or(true),\n      num_bits_per_elem: config.bits_per_elem.unwrap_or(config.k),\n      ..cloned_gadget\n    };\n\n    ModelCircuit {\n      tensors,\n      dag_config,\n      used_gadgets,\n      k: config.k as usize,\n      bits_per_elem: config.bits_per_elem.unwrap_or(config.k) as usize,\n      inp_idxes: config.inp_idxes.clone(),\n      commit_after: config.commit_after.unwrap_or(vec![]),\n      commit_before: config.commit_before.unwrap_or(vec![]),\n      num_random: config.num_random.unwrap_or(0),\n    }\n  }\n\n  pub fn assign_and_commit(\n    &self,\n    mut layouter: impl Layouter<F>,\n    constants: &HashMap<i64, CellRc<F>>,\n    config: &ModelConfig<F>,\n    tensors: &BTreeMap<i64, Array<F, IxDyn>>,\n  ) -> (BTreeMap<i64, AssignedTensor<F>>, CellRc<F>) {\n    let num_bits = self.bits_per_elem;\n    let packer_config = PackerChip::<F>::construct(num_bits, config.gadget_config.as_ref());\n    let packer_chip = PackerChip::<F> {\n      config: packer_config,\n    };\n    let (tensor_map, packed) = packer_chip\n      .assign_and_pack(\n        layouter.namespace(|| \"packer\"),\n        config.gadget_config.clone(),\n        constants,\n        tensors,\n      )\n      .unwrap();\n\n    let zero = constants.get(&0).unwrap().clone();\n    let commit_chip = config.hasher.clone().unwrap();\n\n    let commitments = commit_chip\n      .commit(\n        layouter.namespace(|| \"commit\"),\n        config.gadget_config.clone(),\n        constants,\n        &packed,\n        zero.clone(),\n      )\n      .unwrap();\n    assert_eq!(commitments.len(), 1);\n\n    (tensor_map, commitments[0].clone())\n  }\n\n  pub fn copy_and_commit(\n    &self,\n    mut layouter: impl Layouter<F>,\n    constants: &HashMap<i64, CellRc<F>>,\n    config: &ModelConfig<F>,\n    tensors: &BTreeMap<i64, AssignedTensor<F>>,\n  ) -> CellRc<F> {\n    let num_bits = self.bits_per_elem;\n    let packer_config = PackerChip::<F>::construct(num_bits, config.gadget_config.as_ref());\n    let packer_chip = PackerChip::<F> {\n      config: packer_config,\n    };\n    let packed = packer_chip\n      .copy_and_pack(\n        layouter.namespace(|| \"packer\"),\n        config.gadget_config.clone(),\n        constants,\n        tensors,\n      )\n      .unwrap();\n\n    let zero = constants.get(&0).unwrap().clone();\n    let commit_chip = config.hasher.clone().unwrap();\n\n    let commitments = commit_chip\n      .commit(\n        layouter.namespace(|| \"commit\"),\n        config.gadget_config.clone(),\n        constants,\n        &packed,\n        zero.clone(),\n      )\n      .unwrap();\n    assert_eq!(commitments.len(), 1);\n\n    commitments[0].clone()\n  }\n}\n\nimpl<F: PrimeField + Ord + FromUniformBytes<64>> Circuit<F> for ModelCircuit<F> {\n  type Config = ModelConfig<F>;\n  type FloorPlanner = SimpleFloorPlanner;\n  type Params = ();\n\n  fn without_witnesses(&self) -> Self {\n    todo!()\n  }\n\n  fn configure(meta: &mut ConstraintSystem<F>) -> Self::Config {\n    let mut gadget_config = crate::model::GADGET_CONFIG.lock().unwrap().clone();\n    let columns = (0..gadget_config.num_cols)\n      .map(|_| meta.advice_column())\n      .collect::<Vec<_>>();\n    for col in columns.iter() {\n      meta.enable_equality(*col);\n    }\n    gadget_config.columns = columns;\n\n    let public_col = meta.instance_column();\n    meta.enable_equality(public_col);\n\n    gadget_config.fixed_columns = vec![meta.fixed_column()];\n    meta.enable_equality(gadget_config.fixed_columns[0]);\n\n    // The input lookup is always loaded\n    gadget_config = InputLookupChip::<F>::configure(meta, gadget_config);\n\n    let used_gadgets = gadget_config.used_gadgets.clone();\n    for gadget_type in used_gadgets.iter() {\n      gadget_config = match gadget_type {\n        GadgetType::AddPairs => AddPairsChip::<F>::configure(meta, gadget_config),\n        GadgetType::Adder => AdderChip::<F>::configure(meta, gadget_config),\n        GadgetType::BiasDivRoundRelu6 => BiasDivRoundRelu6Chip::<F>::configure(meta, gadget_config),\n        GadgetType::BiasDivFloorRelu6 => panic!(),\n        GadgetType::DotProduct => DotProductChip::<F>::configure(meta, gadget_config),\n        GadgetType::Exp => ExpGadgetChip::<F>::configure(meta, gadget_config),\n        GadgetType::Logistic => LogisticGadgetChip::<F>::configure(meta, gadget_config),\n        GadgetType::Max => MaxChip::<F>::configure(meta, gadget_config),\n        GadgetType::MulPairs => MulPairsChip::<F>::configure(meta, gadget_config),\n        GadgetType::Pow => PowGadgetChip::<F>::configure(meta, gadget_config),\n        GadgetType::Relu => ReluChip::<F>::configure(meta, gadget_config),\n        GadgetType::Rsqrt => RsqrtGadgetChip::<F>::configure(meta, gadget_config),\n        GadgetType::Sqrt => SqrtGadgetChip::<F>::configure(meta, gadget_config),\n        GadgetType::SqrtBig => SqrtBigChip::<F>::configure(meta, gadget_config),\n        GadgetType::Square => SquareGadgetChip::<F>::configure(meta, gadget_config),\n        GadgetType::SquaredDiff => SquaredDiffGadgetChip::<F>::configure(meta, gadget_config),\n        GadgetType::SubPairs => SubPairsChip::<F>::configure(meta, gadget_config),\n        GadgetType::Tanh => TanhGadgetChip::<F>::configure(meta, gadget_config),\n        GadgetType::VarDivRound => VarDivRoundChip::<F>::configure(meta, gadget_config),\n        GadgetType::VarDivRoundBig => VarDivRoundBigChip::<F>::configure(meta, gadget_config),\n        GadgetType::VarDivRoundBig3 => VarDivRoundBig3Chip::<F>::configure(meta, gadget_config),\n        GadgetType::InputLookup => gadget_config, // This is always loaded\n        GadgetType::Update => UpdateGadgetChip::<F>::configure(meta, gadget_config),\n        GadgetType::Packer => panic!(),\n      };\n    }\n\n    let hasher = if gadget_config.commit_before.len() + gadget_config.commit_after.len() > 0 {\n      let packer_config =\n        PackerChip::<F>::construct(gadget_config.num_bits_per_elem as usize, &gadget_config);\n      gadget_config = PackerChip::<F>::configure(meta, packer_config, gadget_config);\n\n      // TODO\n      let input = gadget_config.columns[0..L].try_into().unwrap();\n      let state = gadget_config.columns[L..L + WIDTH].try_into().unwrap();\n      let partial_sbox = gadget_config.columns[L + WIDTH].into();\n      Some(PoseidonCommitChip::<F, WIDTH, RATE, L>::configure(\n        meta,\n        input,\n        state,\n        partial_sbox,\n      ))\n    } else {\n      None\n    };\n\n    ModelConfig {\n      gadget_config: gadget_config.into(),\n      public_col,\n      hasher,\n      _marker: PhantomData,\n    }\n  }\n\n  fn synthesize(&self, config: Self::Config, mut layouter: impl Layouter<F>) -> Result<(), Error> {\n    // Assign tables\n    let gadget_rc: Rc<GadgetConfig> = config.gadget_config.clone().into();\n    for gadget in self.used_gadgets.iter() {\n      match gadget {\n        GadgetType::AddPairs => {\n          let chip = AddPairsChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"add pairs lookup\"))?;\n        }\n        GadgetType::Adder => {\n          let chip = AdderChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"adder lookup\"))?;\n        }\n        GadgetType::BiasDivRoundRelu6 => {\n          let chip = BiasDivRoundRelu6Chip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"bias div round relu6 lookup\"))?;\n        }\n        GadgetType::DotProduct => {\n          let chip = DotProductChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"dot product lookup\"))?;\n        }\n        GadgetType::VarDivRound => {\n          let chip = VarDivRoundChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"var div lookup\"))?;\n        }\n        GadgetType::Pow => {\n          let chip = PowGadgetChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"pow lookup\"))?;\n        }\n        GadgetType::Relu => {\n          let chip = ReluChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"relu lookup\"))?;\n        }\n        GadgetType::Rsqrt => {\n          let chip = RsqrtGadgetChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"rsqrt lookup\"))?;\n        }\n        GadgetType::Sqrt => {\n          let chip = SqrtGadgetChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"sqrt lookup\"))?;\n        }\n        GadgetType::Tanh => {\n          let chip = TanhGadgetChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"tanh lookup\"))?;\n        }\n        GadgetType::Exp => {\n          let chip = ExpGadgetChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"exp lookup\"))?;\n        }\n        GadgetType::Logistic => {\n          let chip = LogisticGadgetChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"logistic lookup\"))?;\n        }\n        GadgetType::InputLookup => {\n          let chip = InputLookupChip::<F>::construct(gadget_rc.clone());\n          chip.load_lookups(layouter.namespace(|| \"input lookup\"))?;\n        }\n        GadgetType::VarDivRoundBig => {}\n        GadgetType::VarDivRoundBig3 => {}\n        GadgetType::Max => {}\n        GadgetType::MulPairs => {}\n        GadgetType::SqrtBig => {}\n        GadgetType::Square => {}\n        GadgetType::SquaredDiff => {}\n        GadgetType::SubPairs => {}\n        GadgetType::Update => {}\n        _ => panic!(\"unsupported gadget {:?}\", gadget),\n      }\n    }\n\n    // Assign weights and constants\n    let constants_base = self\n      .assign_constants(\n        layouter.namespace(|| \"constants\"),\n        config.gadget_config.clone(),\n      )\n      .unwrap();\n    // Some halo2 cancer\n    let constants = self\n      .assign_constants2(\n        layouter.namespace(|| \"constants 2\"),\n        config.gadget_config.clone(),\n        &constants_base,\n      )\n      .unwrap();\n\n    let mut commitments = vec![];\n    let tensors = if self.commit_before.len() > 0 {\n      // Commit to the tensors before the DAG\n      let mut tensor_map = BTreeMap::new();\n      let mut ignore_idxes: Vec<i64> = vec![];\n      for commit_idxes in self.commit_before.iter() {\n        let to_commit = BTreeMap::from_iter(\n          commit_idxes\n            .iter()\n            .map(|idx| (*idx, self.tensors.get(idx).unwrap().clone())),\n        );\n        let (mut committed_tensors, commitment) = self.assign_and_commit(\n          layouter.namespace(|| \"commit\"),\n          &constants,\n          &config,\n          &to_commit,\n        );\n        commitments.push(commitment);\n        tensor_map.append(&mut committed_tensors);\n        ignore_idxes.extend(commit_idxes.iter());\n      }\n\n      // Assign the remainder of the tensors\n      let mut assign_map = BTreeMap::new();\n      for (idx, tensor) in self.tensors.iter() {\n        if ignore_idxes.contains(idx) {\n          continue;\n        }\n        assign_map.insert(*idx, tensor.clone());\n      }\n      let mut remainder_tensor_map = self\n        .assign_tensors_map(\n          layouter.namespace(|| \"assignment\"),\n          &config.gadget_config.columns,\n          &assign_map,\n        )\n        .unwrap();\n\n      // Merge the two maps\n      tensor_map.append(&mut remainder_tensor_map);\n\n      // Return the tensors\n      self.tensor_map_to_vec(&tensor_map).unwrap()\n    } else {\n      self\n        .assign_tensors_vec(\n          layouter.namespace(|| \"assignment\"),\n          &config.gadget_config.columns,\n          &self.tensors,\n        )\n        .unwrap()\n    };\n\n    // Perform the dag\n    let dag_chip = DAGLayerChip::<F>::construct(self.dag_config.clone());\n    let (final_tensor_map, result) = dag_chip.forward(\n      layouter.namespace(|| \"dag\"),\n      &tensors,\n      &constants,\n      config.gadget_config.clone(),\n      &LayerConfig::default(),\n    )?;\n\n    if self.commit_after.len() > 0 {\n      for commit_idxes in self.commit_after.iter() {\n        let to_commit = BTreeMap::from_iter(commit_idxes.iter().map(|idx| {\n          (\n            *idx,\n            final_tensor_map.get(&(*idx as usize)).unwrap().clone(),\n          )\n        }));\n        let commitment = self.copy_and_commit(\n          layouter.namespace(|| \"commit\"),\n          &constants,\n          &config,\n          &to_commit,\n        );\n        commitments.push(commitment);\n      }\n    }\n\n    let mut pub_layouter = layouter.namespace(|| \"public\");\n    let mut total_idx = 0;\n    let mut new_public_vals = vec![];\n    for cell in commitments.iter() {\n      pub_layouter\n        .constrain_instance(cell.as_ref().cell(), config.public_col, total_idx)\n        .unwrap();\n      let val = convert_to_bigint(cell.value().map(|x| x.to_owned()));\n      new_public_vals.push(val);\n      total_idx += 1;\n    }\n    for tensor in result {\n      for cell in tensor.iter() {\n        pub_layouter\n          .constrain_instance(cell.as_ref().cell(), config.public_col, total_idx)\n          .unwrap();\n        let val = convert_to_bigint(cell.value().map(|x| x.to_owned()));\n        new_public_vals.push(val);\n        total_idx += 1;\n      }\n    }\n    *PUBLIC_VALS.lock().unwrap() = new_public_vals;\n\n    Ok(())\n  }\n}\n"
  },
  {
    "path": "src/utils/helpers.rs",
    "content": "use halo2_proofs::{\n  circuit::{AssignedCell, Value},\n  halo2curves::ff::PrimeField,\n};\nuse ndarray::{Array, IxDyn};\nuse num_bigint::BigUint;\n\nuse crate::{gadgets::gadget::convert_to_u128, model::PUBLIC_VALS};\n\n// TODO: this is very bad\npub const RAND_START_IDX: i64 = i64::MIN;\npub const NUM_RANDOMS: i64 = 20001;\n\n// Conversion / printing functions\npub fn convert_to_bigint<F: PrimeField>(x: Value<F>) -> BigUint {\n  let mut big = Default::default();\n  x.map(|x| {\n    big = BigUint::from_bytes_le(x.to_repr().as_ref());\n  });\n  big\n}\n\npub fn convert_pos_int<F: PrimeField>(x: Value<F>) -> i128 {\n  let bias = 1 << 60;\n  let x_pos = x + Value::known(F::from(bias as u64));\n  let mut outp: i128 = 0;\n  x_pos.map(|x| {\n    let x_pos = convert_to_u128(&x);\n    let tmp = x_pos as i128 - bias;\n    outp = tmp;\n  });\n  return outp;\n}\n\npub fn print_pos_int<F: PrimeField>(prefix: &str, x: Value<F>, scale_factor: u64) {\n  let tmp = convert_pos_int(x);\n  let tmp_float = tmp as f64 / scale_factor as f64;\n  println!(\"{} x: {} ({})\", prefix, tmp, tmp_float);\n}\n\npub fn print_assigned_arr<F: PrimeField>(\n  prefix: &str,\n  arr: &Vec<&AssignedCell<F, F>>,\n  scale_factor: u64,\n) {\n  for (idx, x) in arr.iter().enumerate() {\n    print_pos_int(\n      &format!(\"{}[{}]\", prefix, idx),\n      x.value().map(|x: &F| x.to_owned()),\n      scale_factor,\n    );\n  }\n}\n\n// Get the public values\npub fn get_public_values<F: PrimeField>() -> Vec<F> {\n  let mut public_vals = vec![];\n  for val in PUBLIC_VALS.lock().unwrap().iter() {\n    let val = F::from_str_vartime(&val.to_str_radix(10));\n    public_vals.push(val.unwrap());\n  }\n  public_vals\n}\n\n// Broadcast\nfn shape_dominates(s1: &[usize], s2: &[usize]) -> bool {\n  if s1.len() != s2.len() {\n    return false;\n  }\n\n  for (x1, x2) in s1.iter().zip(s2.iter()) {\n    if x1 < x2 {\n      return false;\n    }\n  }\n\n  true\n}\n\n// Precondition: s1.len() < s2.len()\nfn intermediate_shape(s1: &[usize], s2: &[usize]) -> Vec<usize> {\n  let mut res = vec![1; s2.len() - s1.len()];\n  for s in s1.iter() {\n    res.push(*s);\n  }\n  res\n}\n\nfn final_shape(s1: &[usize], s2: &[usize]) -> Vec<usize> {\n  let mut res = vec![];\n  for (x1, x2) in s1.iter().zip(s2.iter()) {\n    res.push(std::cmp::max(*x1, *x2));\n  }\n  res\n}\n\npub fn broadcast<G: Clone>(\n  x1: &Array<G, IxDyn>,\n  x2: &Array<G, IxDyn>,\n) -> (Array<G, IxDyn>, Array<G, IxDyn>) {\n  if x1.shape() == x2.shape() {\n    return (x1.clone(), x2.clone());\n  }\n\n  if x1.ndim() == x2.ndim() {\n    let s1 = x1.shape();\n    let s2 = x2.shape();\n    if shape_dominates(s1, s2) {\n      return (x1.clone(), x2.broadcast(s1).unwrap().into_owned());\n    } else if shape_dominates(x2.shape(), x1.shape()) {\n      return (x1.broadcast(s2).unwrap().into_owned(), x2.clone());\n    }\n  }\n\n  let (tmp1, tmp2) = if x1.ndim() < x2.ndim() {\n    (x1, x2)\n  } else {\n    (x2, x1)\n  };\n\n  // tmp1.ndim() < tmp2.ndim()\n  let s1 = tmp1.shape();\n  let s2 = tmp2.shape();\n  let s = intermediate_shape(s1, s2);\n  let final_shape = final_shape(s2, s.as_slice());\n\n  let tmp1 = tmp1.broadcast(s.clone()).unwrap().into_owned();\n  let tmp1 = tmp1.broadcast(final_shape.as_slice()).unwrap().into_owned();\n  let tmp2 = tmp2.broadcast(final_shape.as_slice()).unwrap().into_owned();\n  // println!(\"x1: {:?} x2: {:?}\", x1.shape(), x2.shape());\n  // println!(\"s1: {:?} s2: {:?} s: {:?}\", s1, s2, s);\n  // println!(\"tmp1 shape: {:?}\", tmp1.shape());\n  // println!(\"tmp2 shape: {:?}\", tmp2.shape());\n\n  if x1.ndim() < x2.ndim() {\n    return (tmp1, tmp2);\n  } else {\n    return (tmp2, tmp1);\n  }\n}\n"
  },
  {
    "path": "src/utils/loader.rs",
    "content": "use std::{fs::File, io::BufReader};\n\nuse serde_derive::{Deserialize, Serialize};\n\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct TensorMsgpack {\n  pub idx: i64,\n  pub shape: Vec<i64>,\n  pub data: Vec<i64>,\n}\n\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct LayerMsgpack {\n  pub layer_type: String,\n  pub params: Vec<i64>,\n  pub inp_idxes: Vec<i64>,\n  pub inp_shapes: Vec<Vec<i64>>,\n  pub out_idxes: Vec<i64>,\n  pub out_shapes: Vec<Vec<i64>>,\n  pub mask: Vec<i64>,\n}\n\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct ModelMsgpack {\n  pub global_sf: i64,\n  pub k: i64,\n  pub num_cols: i64,\n  pub inp_idxes: Vec<i64>,\n  pub out_idxes: Vec<i64>,\n  pub tensors: Vec<TensorMsgpack>,\n  pub layers: Vec<LayerMsgpack>,\n  pub use_selectors: Option<bool>,\n  pub commit_before: Option<Vec<Vec<i64>>>,\n  pub commit_after: Option<Vec<Vec<i64>>>,\n  pub bits_per_elem: Option<i64>, // Specifically for packing for the commitments\n  pub num_random: Option<i64>,\n}\n\npub fn load_config_msgpack(config_path: &str) -> ModelMsgpack {\n  let model: ModelMsgpack = {\n    let file = File::open(config_path).unwrap();\n    let mut reader = BufReader::new(file);\n    rmp_serde::from_read(&mut reader).unwrap()\n  };\n  model\n}\n\npub fn load_model_msgpack(config_path: &str, inp_path: &str) -> ModelMsgpack {\n  let mut model = load_config_msgpack(config_path);\n  let inp: Vec<TensorMsgpack> = {\n    let file = File::open(inp_path).unwrap();\n    let mut reader = BufReader::new(file);\n    rmp_serde::from_read(&mut reader).unwrap()\n  };\n  for tensor in inp {\n    model.tensors.push(tensor);\n  }\n\n  // Default to using selectors, commit if use_selectors is not specified\n  if model.use_selectors.is_none() {\n    model.use_selectors = Some(true)\n  };\n  if model.commit_before.is_none() {\n    model.commit_before = Some(vec![])\n  };\n  if model.commit_after.is_none() {\n    model.commit_after = Some(vec![])\n  };\n  if model.bits_per_elem.is_none() {\n    model.bits_per_elem = Some(model.k)\n  };\n  if model.num_random.is_none() {\n    model.num_random = Some(20001)\n  };\n\n  model\n}\n"
  },
  {
    "path": "src/utils/proving_ipa.rs",
    "content": "use std::{\n  fs::File,\n  io::{BufReader, Write},\n  path::Path,\n  time::Instant,\n};\n\nuse halo2_proofs::{\n  dev::MockProver,\n  halo2curves::pasta::{EqAffine, Fp},\n  plonk::{create_proof, keygen_pk, keygen_vk, verify_proof},\n  poly::{\n    commitment::{Params, ParamsProver},\n    ipa::{\n      commitment::{IPACommitmentScheme, ParamsIPA},\n      multiopen::ProverIPA,\n      strategy::SingleStrategy,\n    },\n    VerificationStrategy,\n  },\n  transcript::{\n    Blake2bRead, Blake2bWrite, Challenge255, TranscriptReadBuffer, TranscriptWriterBuffer,\n  },\n};\n\nuse crate::{model::ModelCircuit, utils::helpers::get_public_values};\n\npub fn get_ipa_params(params_dir: &str, degree: u32) -> ParamsIPA<EqAffine> {\n  let path = format!(\"{}/{}.params\", params_dir, degree);\n  let params_path = Path::new(&path);\n  if File::open(&params_path).is_err() {\n    let params: ParamsIPA<EqAffine> = ParamsIPA::new(degree);\n    let mut buf = Vec::new();\n\n    params.write(&mut buf).expect(\"Failed to write params\");\n    let mut file = File::create(&params_path).expect(\"Failed to create params file\");\n    file\n      .write_all(&buf[..])\n      .expect(\"Failed to write params to file\");\n  }\n\n  let params_fs = File::open(&params_path).expect(\"couldn't load params\");\n  let params: ParamsIPA<EqAffine> =\n    Params::read::<_>(&mut BufReader::new(params_fs)).expect(\"Failed to read params\");\n  params\n}\n\npub fn time_circuit_ipa(circuit: ModelCircuit<Fp>) {\n  let rng = rand::thread_rng();\n  let start = Instant::now();\n\n  let degree = circuit.k as u32;\n  let empty_circuit = circuit.clone();\n  let proof_circuit = circuit;\n\n  let params = get_ipa_params(\"./params_ipa\", degree);\n\n  let circuit_duration = start.elapsed();\n  println!(\n    \"Time elapsed in params construction: {:?}\",\n    circuit_duration\n  );\n\n  let vk = keygen_vk(&params, &empty_circuit).unwrap();\n  let vk_duration = start.elapsed();\n  println!(\n    \"Time elapsed in generating vkey: {:?}\",\n    vk_duration - circuit_duration\n  );\n\n  let pk = keygen_pk(&params, vk, &empty_circuit).unwrap();\n  let pk_duration = start.elapsed();\n  println!(\n    \"Time elapsed in generating pkey: {:?}\",\n    pk_duration - vk_duration\n  );\n  drop(empty_circuit);\n\n  let fill_duration = start.elapsed();\n  let _prover = MockProver::run(degree, &proof_circuit, vec![vec![]]).unwrap();\n  let public_vals = get_public_values();\n  println!(\n    \"Time elapsed in filling circuit: {:?}\",\n    fill_duration - pk_duration\n  );\n\n  let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]);\n  create_proof::<IPACommitmentScheme<EqAffine>, ProverIPA<EqAffine>, _, _, _, _>(\n    &params,\n    &pk,\n    &[proof_circuit],\n    &[&[&public_vals]],\n    rng,\n    &mut transcript,\n  )\n  .unwrap();\n  let proof = transcript.finalize();\n  let proof_duration = start.elapsed();\n  println!(\"Proving time: {:?}\", proof_duration - fill_duration);\n\n  let proof_size = {\n    let mut folder = std::path::PathBuf::new();\n    folder.push(\"proof\");\n    let mut fd = std::fs::File::create(folder.as_path()).unwrap();\n    folder.pop();\n    fd.write_all(&proof).unwrap();\n    fd.metadata().unwrap().len()\n  };\n  println!(\"Proof size: {} bytes\", proof_size);\n\n  let strategy = SingleStrategy::new(&params);\n  let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]);\n  assert!(\n    verify_proof(\n      &params,\n      pk.get_vk(),\n      strategy,\n      &[&[&public_vals]],\n      &mut transcript\n    )\n    .is_ok(),\n    \"proof did not verify\"\n  );\n  let verify_duration = start.elapsed();\n  println!(\"Verifying time: {:?}\", verify_duration - proof_duration);\n}\n"
  },
  {
    "path": "src/utils/proving_kzg.rs",
    "content": "use std::{\n  fs::File,\n  io::{BufReader, Write},\n  path::Path,\n  time::Instant,\n};\n\nuse halo2_proofs::{\n  dev::MockProver,\n  halo2curves::bn256::{Bn256, Fr, G1Affine},\n  plonk::{create_proof, keygen_pk, keygen_vk, verify_proof, VerifyingKey},\n  poly::{\n    commitment::Params,\n    kzg::{\n      commitment::{KZGCommitmentScheme, ParamsKZG},\n      multiopen::{ProverSHPLONK, VerifierSHPLONK},\n      strategy::SingleStrategy,\n    },\n  },\n  transcript::{\n    Blake2bRead, Blake2bWrite, Challenge255, TranscriptReadBuffer, TranscriptWriterBuffer,\n  },\n  SerdeFormat,\n};\n\nuse crate::{model::ModelCircuit, utils::helpers::get_public_values};\n\npub fn get_kzg_params(params_dir: &str, degree: u32) -> ParamsKZG<Bn256> {\n  let rng = rand::thread_rng();\n  let path = format!(\"{}/{}.params\", params_dir, degree);\n  let params_path = Path::new(&path);\n  if File::open(&params_path).is_err() {\n    let params = ParamsKZG::<Bn256>::setup(degree, rng);\n    let mut buf = Vec::new();\n\n    params.write(&mut buf).expect(\"Failed to write params\");\n    let mut file = File::create(&params_path).expect(\"Failed to create params file\");\n    file\n      .write_all(&buf[..])\n      .expect(\"Failed to write params to file\");\n  }\n\n  let mut params_fs = File::open(&params_path).expect(\"couldn't load params\");\n  let params = ParamsKZG::<Bn256>::read(&mut params_fs).expect(\"Failed to read params\");\n  params\n}\n\npub fn serialize(data: &Vec<u8>, path: &str) -> u64 {\n  let mut file = File::create(path).unwrap();\n  file.write_all(data).unwrap();\n  file.metadata().unwrap().len()\n}\n\npub fn verify_kzg(\n  params: &ParamsKZG<Bn256>,\n  vk: &VerifyingKey<G1Affine>,\n  strategy: SingleStrategy<Bn256>,\n  public_vals: &Vec<Fr>,\n  mut transcript: Blake2bRead<&[u8], G1Affine, Challenge255<G1Affine>>,\n) {\n  assert!(\n    verify_proof::<\n      KZGCommitmentScheme<Bn256>,\n      VerifierSHPLONK<'_, Bn256>,\n      Challenge255<G1Affine>,\n      Blake2bRead<&[u8], G1Affine, Challenge255<G1Affine>>,\n      halo2_proofs::poly::kzg::strategy::SingleStrategy<'_, Bn256>,\n    >(&params, &vk, strategy, &[&[&public_vals]], &mut transcript)\n    .is_ok(),\n    \"proof did not verify\"\n  );\n}\n\npub fn time_circuit_kzg(circuit: ModelCircuit<Fr>) {\n  let rng = rand::thread_rng();\n  let start = Instant::now();\n\n  let degree = circuit.k as u32;\n  let params = get_kzg_params(\"./params_kzg\", degree);\n\n  let circuit_duration = start.elapsed();\n  println!(\n    \"Time elapsed in params construction: {:?}\",\n    circuit_duration\n  );\n\n  let vk_circuit = circuit.clone();\n  let vk = keygen_vk(&params, &vk_circuit).unwrap();\n  drop(vk_circuit);\n  let vk_duration = start.elapsed();\n  println!(\n    \"Time elapsed in generating vkey: {:?}\",\n    vk_duration - circuit_duration\n  );\n\n  let vkey_size = serialize(&vk.to_bytes(SerdeFormat::RawBytes), \"vkey\");\n  println!(\"vkey size: {} bytes\", vkey_size);\n\n  let pk_circuit = circuit.clone();\n  let pk = keygen_pk(&params, vk, &pk_circuit).unwrap();\n  let pk_duration = start.elapsed();\n  println!(\n    \"Time elapsed in generating pkey: {:?}\",\n    pk_duration - vk_duration\n  );\n  drop(pk_circuit);\n\n  let pkey_size = serialize(&pk.to_bytes(SerdeFormat::RawBytes), \"pkey\");\n  println!(\"pkey size: {} bytes\", pkey_size);\n\n  let fill_duration = start.elapsed();\n  let proof_circuit = circuit.clone();\n  let _prover = MockProver::run(degree, &proof_circuit, vec![vec![]]).unwrap();\n  let public_vals = get_public_values();\n  println!(\n    \"Time elapsed in filling circuit: {:?}\",\n    fill_duration - pk_duration\n  );\n\n  // Convert public vals to serializable format\n  let public_vals_u8: Vec<u8> = public_vals\n    .iter()\n    .map(|v: &Fr| v.to_bytes().to_vec())\n    .flatten()\n    .collect();\n  let public_vals_u8_size = serialize(&public_vals_u8, \"public_vals\");\n  println!(\"Public vals size: {} bytes\", public_vals_u8_size);\n\n  let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]);\n  create_proof::<\n    KZGCommitmentScheme<Bn256>,\n    ProverSHPLONK<'_, Bn256>,\n    Challenge255<G1Affine>,\n    _,\n    Blake2bWrite<Vec<u8>, G1Affine, Challenge255<G1Affine>>,\n    ModelCircuit<Fr>,\n  >(\n    &params,\n    &pk,\n    &[proof_circuit],\n    &[&[&public_vals]],\n    rng,\n    &mut transcript,\n  )\n  .unwrap();\n  let proof = transcript.finalize();\n  let proof_duration = start.elapsed();\n  println!(\"Proving time: {:?}\", proof_duration - fill_duration);\n\n  let proof_size = serialize(&proof, \"proof\");\n  let proof = std::fs::read(\"proof\").unwrap();\n\n  println!(\"Proof size: {} bytes\", proof_size);\n\n  let strategy = SingleStrategy::new(&params);\n  let transcript_read = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]);\n\n  println!(\"public vals: {:?}\", public_vals);\n  verify_kzg(\n    &params,\n    &pk.get_vk(),\n    strategy,\n    &public_vals,\n    transcript_read,\n  );\n  let verify_duration = start.elapsed();\n  println!(\"Verifying time: {:?}\", verify_duration - proof_duration);\n}\n\n// Standalone verification\npub fn verify_circuit_kzg(\n  circuit: ModelCircuit<Fr>,\n  vkey_fname: &str,\n  proof_fname: &str,\n  public_vals_fname: &str,\n) {\n  let degree = circuit.k as u32;\n  let params = get_kzg_params(\"./params_kzg\", degree);\n  println!(\"Loaded the parameters\");\n\n  let vk = VerifyingKey::read::<BufReader<File>, ModelCircuit<Fr>>(\n    &mut BufReader::new(File::open(vkey_fname).unwrap()),\n    SerdeFormat::RawBytes,\n    (),\n  )\n  .unwrap();\n  println!(\"Loaded vkey\");\n\n  let proof = std::fs::read(proof_fname).unwrap();\n\n  let public_vals_u8 = std::fs::read(&public_vals_fname).unwrap();\n  let public_vals: Vec<Fr> = public_vals_u8\n    .chunks(32)\n    .map(|chunk| Fr::from_bytes(chunk.try_into().expect(\"conversion failed\")).unwrap())\n    .collect();\n\n  let strategy = SingleStrategy::new(&params);\n  let transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]);\n\n  let start = Instant::now();\n  let verify_start = start.elapsed();\n  verify_kzg(&params, &vk, strategy, &public_vals, transcript);\n  let verify_duration = start.elapsed();\n  println!(\"Verifying time: {:?}\", verify_duration - verify_start);\n  println!(\"Proof verified!\")\n}\n"
  },
  {
    "path": "src/utils.rs",
    "content": "pub mod helpers;\npub mod loader;\npub mod proving_ipa;\npub mod proving_kzg;\n"
  },
  {
    "path": "testing/circuits/last_two_layers.py",
    "content": "import tensorflow as tf\nimport os \nimport numpy as np\n\ninterpreter = tf.lite.Interpreter(\n    model_path=f'./testing/circuits/v2_1.0_224.tflite'\n)\ninterpreter.allocate_tensors()\n\nNAME_TO_TENSOR = {}\nfor tensor_details in interpreter.get_tensor_details():\n    NAME_TO_TENSOR[tensor_details['name']] = tensor_details\nWc = interpreter.get_tensor(NAME_TO_TENSOR['Const_71']['index'])\nBc = interpreter.get_tensor(NAME_TO_TENSOR['MobilenetV2/Conv_1/Conv2D_bias']['index'])\n\nclass LastTwoLayers(tf.keras.Model):\n    def __init__(self, name=None):\n        super().__init__(name = name)\n        self.a_variable = tf.Variable(5.0, name=\"train_me\")\n        self.conv1 = tf.keras.layers.Conv2D(\n            1280,\n            (1, 1),\n            activation='relu6',\n            padding='same',\n            input_shape=(1, 7, 7, 320)\n        )\n        self.avg_pool = tf.keras.layers.AveragePooling2D(\n            pool_size=(7, 7),\n            padding='valid',\n            strides=(1, 1)\n        )\n        self.conv2 = tf.keras.layers.Conv2D(\n            102,\n            (1, 1),\n            padding='valid',\n            input_shape=(1, 1, 1, 1280)\n        )\n        self.softmax = tf.keras.layers.Softmax()\n\n    def call(self, x):\n        x = self.conv1(x)\n        x = self.avg_pool(x)\n        x = self.conv2(x)\n        x = tf.reshape(x, [1, 102])\n        x = self.softmax(x)\n        return x\n\nmy_sequential_model = LastTwoLayers(name=\"the_model\")\nmy_sequential_model.compile(optimizer='sgd', loss='categorical_crossentropy')\n\nx = np.random.random((1, 7, 7, 320))\n\nmy_sequential_model.predict(x)\n\nmy_sequential_model.conv1.set_weights([np.transpose(Wc, [1,2,3,0]), Bc])\n# x, y, chan_in, chan_out\n\n# 1 batch, 7 height, 320 width, 7 channels\n# 7 height, 1 width, 7 channels, 1280 out channels\n# 1 height, 320 width, 1280 out channels\n\n# 1 Batch, 7 height, 7 width, 320 channels () [This is the input to the layer]\n# 1 Batch, 7 height, 7 width, 1280 channels (dout) [This is the output of another layer]\n\n# We want to transform this so that we rotate the input by 90 degrees\n\nW = np.zeros([1, 1, 1280, 102])\n\nmy_sequential_model.conv2.set_weights([\n    W,\n    np.zeros([102])\n])\n\nconverter = tf.lite.TFLiteConverter.from_keras_model(my_sequential_model)\ntflite_model = converter.convert()\n\nwith open('./examples/v2_1.0_224_truncated/v2_1.0_224_truncated.tflite', 'wb') as f:\n  f.write(tflite_model)\n"
  }
]