Repository: KhronosGroup/NNEF-Tools Branch: main Commit: 6ca6fd2a0e3c Files: 269 Total size: 1.9 MB Directory structure: gitextract_brhyl024/ ├── .github/ │ └── workflows/ │ ├── build_nnef.yml │ └── build_nnef_tools.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── README.md ├── _config.yml ├── fix_nnef_binary_size.py ├── models/ │ └── README.md ├── nnef-pyproject/ │ ├── README.md │ ├── cpp_api.md │ ├── examples/ │ │ ├── alexnet.txt │ │ ├── googlenet.txt │ │ ├── resnet.txt │ │ ├── samples/ │ │ │ ├── sample.py │ │ │ ├── sample_ext.py │ │ │ └── sample_gen.py │ │ └── vgg.txt │ ├── nnef/ │ │ ├── __init__.py │ │ ├── binary.py │ │ ├── cpp/ │ │ │ ├── CMakeLists.txt │ │ │ ├── include/ │ │ │ │ ├── cnnef.h │ │ │ │ ├── nnef/ │ │ │ │ │ ├── common/ │ │ │ │ │ │ ├── binary.h │ │ │ │ │ │ ├── dictionary.h │ │ │ │ │ │ ├── error.h │ │ │ │ │ │ ├── lexer.h │ │ │ │ │ │ ├── parser.h │ │ │ │ │ │ ├── prototype.h │ │ │ │ │ │ ├── shapes.h │ │ │ │ │ │ ├── typespec.h │ │ │ │ │ │ ├── typeutils.h │ │ │ │ │ │ └── value.h │ │ │ │ │ ├── comp/ │ │ │ │ │ │ ├── comp_parser.h │ │ │ │ │ │ ├── evaluation.h │ │ │ │ │ │ ├── expression.h │ │ │ │ │ │ ├── fragment.h │ │ │ │ │ │ └── stdlib_source.h │ │ │ │ │ ├── flat/ │ │ │ │ │ │ ├── flat_parser.h │ │ │ │ │ │ ├── quant_parser.h │ │ │ │ │ │ └── stdlib_protos.h │ │ │ │ │ └── runtime/ │ │ │ │ │ ├── execution.h │ │ │ │ │ ├── ndrange.h │ │ │ │ │ └── operations.h │ │ │ │ └── nnef.h │ │ │ ├── infer.cpp │ │ │ ├── sample.cpp │ │ │ └── src/ │ │ │ ├── cnnef.cpp │ │ │ └── nnef.cpp │ │ ├── nnef.cpp │ │ ├── parser.py │ │ ├── printer.py │ │ ├── shapes.py │ │ └── validate.py │ ├── package_info.md │ ├── pyproject.toml │ ├── setup.py │ ├── stdlib.nnef │ └── tests/ │ └── test.py └── nnef_tools-pyproject/ ├── LICENSE ├── README.md ├── custom/ │ ├── composite_export_example.py │ ├── custom_operators_example.py │ ├── custom_optimizers_example.py │ ├── custom_transforms_example.py │ ├── onnx_custom_export_example.py │ └── onnx_custom_transforms_example.py ├── nnef_tools/ │ ├── __init__.py │ ├── conversion/ │ │ ├── __init__.py │ │ ├── converter.py │ │ ├── nnef_to_onnx.py │ │ ├── nnef_to_tf.py │ │ ├── nnef_to_tflite.py │ │ ├── onnx_to_nnef.py │ │ ├── tf_to_nnef.py │ │ └── tflite_to_nnef.py │ ├── convert.py │ ├── execute.py │ ├── execution/ │ │ ├── __init__.py │ │ └── tvm/ │ │ ├── __init__.py │ │ └── nnef_frontend/ │ │ ├── __init__.py │ │ ├── relax/ │ │ │ ├── __init__.py │ │ │ ├── nnef_frontend.py │ │ │ └── nnef_ops.py │ │ └── relay/ │ │ ├── __init__.py │ │ ├── from_nnef.py │ │ └── nnef_ops.py │ ├── generate.py │ ├── gmac.py │ ├── image_tensor.py │ ├── interpreter/ │ │ ├── __init__.py │ │ └── pytorch/ │ │ ├── __init__.py │ │ ├── nnef_module.py │ │ └── nnef_operators.py │ ├── io/ │ │ ├── __init__.py │ │ ├── caffe2/ │ │ │ ├── __init__.py │ │ │ ├── caffe/ │ │ │ │ ├── __init__.py │ │ │ │ └── proto/ │ │ │ │ ├── __init__.py │ │ │ │ ├── caffe.proto │ │ │ │ └── caffe_pb2.py │ │ │ ├── reader.py │ │ │ └── writer.py │ │ ├── nnef/ │ │ │ ├── __init__.py │ │ │ ├── helpers.py │ │ │ ├── reader.py │ │ │ └── writer.py │ │ ├── onnx/ │ │ │ ├── __init__.py │ │ │ ├── reader.py │ │ │ └── writer.py │ │ └── tf/ │ │ ├── __init__.py │ │ ├── graphdef/ │ │ │ ├── __init__.py │ │ │ ├── composite.py │ │ │ ├── protobuf.py │ │ │ ├── reader.py │ │ │ ├── utils.py │ │ │ └── writer.py │ │ └── lite/ │ │ ├── __init__.py │ │ ├── flatbuffers/ │ │ │ ├── AbsOptions.py │ │ │ ├── ActivationFunctionType.py │ │ │ ├── AddNOptions.py │ │ │ ├── AddOptions.py │ │ │ ├── ArgMaxOptions.py │ │ │ ├── ArgMinOptions.py │ │ │ ├── BatchMatMulOptions.py │ │ │ ├── BatchToSpaceNDOptions.py │ │ │ ├── BidirectionalSequenceLSTMOptions.py │ │ │ ├── BidirectionalSequenceRNNOptions.py │ │ │ ├── Buffer.py │ │ │ ├── BuiltinOperator.py │ │ │ ├── BuiltinOptions.py │ │ │ ├── CallOptions.py │ │ │ ├── CastOptions.py │ │ │ ├── CombinerType.py │ │ │ ├── ConcatEmbeddingsOptions.py │ │ │ ├── ConcatenationOptions.py │ │ │ ├── Conv2DOptions.py │ │ │ ├── CosOptions.py │ │ │ ├── CustomOptionsFormat.py │ │ │ ├── CustomQuantization.py │ │ │ ├── DensifyOptions.py │ │ │ ├── DepthToSpaceOptions.py │ │ │ ├── DepthwiseConv2DOptions.py │ │ │ ├── DequantizeOptions.py │ │ │ ├── DimensionMetadata.py │ │ │ ├── DimensionType.py │ │ │ ├── DivOptions.py │ │ │ ├── EmbeddingLookupSparseOptions.py │ │ │ ├── EqualOptions.py │ │ │ ├── ExpOptions.py │ │ │ ├── ExpandDimsOptions.py │ │ │ ├── FakeQuantOptions.py │ │ │ ├── FillOptions.py │ │ │ ├── FloorDivOptions.py │ │ │ ├── FloorModOptions.py │ │ │ ├── FullyConnectedOptions.py │ │ │ ├── FullyConnectedOptionsWeightsFormat.py │ │ │ ├── GatherNdOptions.py │ │ │ ├── GatherOptions.py │ │ │ ├── GreaterEqualOptions.py │ │ │ ├── GreaterOptions.py │ │ │ ├── HardSwishOptions.py │ │ │ ├── IfOptions.py │ │ │ ├── Int32Vector.py │ │ │ ├── L2NormOptions.py │ │ │ ├── LSHProjectionOptions.py │ │ │ ├── LSHProjectionType.py │ │ │ ├── LSTMKernelType.py │ │ │ ├── LSTMOptions.py │ │ │ ├── LeakyReluOptions.py │ │ │ ├── LessEqualOptions.py │ │ │ ├── LessOptions.py │ │ │ ├── LocalResponseNormalizationOptions.py │ │ │ ├── LogSoftmaxOptions.py │ │ │ ├── LogicalAndOptions.py │ │ │ ├── LogicalNotOptions.py │ │ │ ├── LogicalOrOptions.py │ │ │ ├── MatrixDiagOptions.py │ │ │ ├── MatrixSetDiagOptions.py │ │ │ ├── MaximumMinimumOptions.py │ │ │ ├── Metadata.py │ │ │ ├── MirrorPadMode.py │ │ │ ├── MirrorPadOptions.py │ │ │ ├── Model.py │ │ │ ├── MulOptions.py │ │ │ ├── NegOptions.py │ │ │ ├── NonMaxSuppressionV4Options.py │ │ │ ├── NonMaxSuppressionV5Options.py │ │ │ ├── NotEqualOptions.py │ │ │ ├── OneHotOptions.py │ │ │ ├── Operator.py │ │ │ ├── OperatorCode.py │ │ │ ├── PackOptions.py │ │ │ ├── PadOptions.py │ │ │ ├── PadV2Options.py │ │ │ ├── Padding.py │ │ │ ├── Pool2DOptions.py │ │ │ ├── PowOptions.py │ │ │ ├── QuantizationDetails.py │ │ │ ├── QuantizationParameters.py │ │ │ ├── QuantizeOptions.py │ │ │ ├── RNNOptions.py │ │ │ ├── RangeOptions.py │ │ │ ├── RankOptions.py │ │ │ ├── ReducerOptions.py │ │ │ ├── ReshapeOptions.py │ │ │ ├── ResizeBilinearOptions.py │ │ │ ├── ResizeNearestNeighborOptions.py │ │ │ ├── ReverseSequenceOptions.py │ │ │ ├── ReverseV2Options.py │ │ │ ├── SVDFOptions.py │ │ │ ├── ScatterNdOptions.py │ │ │ ├── SegmentSumOptions.py │ │ │ ├── SelectOptions.py │ │ │ ├── SelectV2Options.py │ │ │ ├── SequenceRNNOptions.py │ │ │ ├── ShapeOptions.py │ │ │ ├── SkipGramOptions.py │ │ │ ├── SliceOptions.py │ │ │ ├── SoftmaxOptions.py │ │ │ ├── SpaceToBatchNDOptions.py │ │ │ ├── SpaceToDepthOptions.py │ │ │ ├── SparseIndexVector.py │ │ │ ├── SparseToDenseOptions.py │ │ │ ├── SparsityParameters.py │ │ │ ├── SplitOptions.py │ │ │ ├── SplitVOptions.py │ │ │ ├── SquareOptions.py │ │ │ ├── SquaredDifferenceOptions.py │ │ │ ├── SqueezeOptions.py │ │ │ ├── StridedSliceOptions.py │ │ │ ├── SubGraph.py │ │ │ ├── SubOptions.py │ │ │ ├── Tensor.py │ │ │ ├── TensorType.py │ │ │ ├── TileOptions.py │ │ │ ├── TopKV2Options.py │ │ │ ├── TransposeConvOptions.py │ │ │ ├── TransposeOptions.py │ │ │ ├── Uint16Vector.py │ │ │ ├── Uint8Vector.py │ │ │ ├── UnidirectionalSequenceLSTMOptions.py │ │ │ ├── UniqueOptions.py │ │ │ ├── UnpackOptions.py │ │ │ ├── WhereOptions.py │ │ │ ├── WhileOptions.py │ │ │ ├── ZerosLikeOptions.py │ │ │ ├── __init__.py │ │ │ └── schema.fbs │ │ ├── helpers.py │ │ ├── reader.py │ │ └── writer.py │ ├── model/ │ │ ├── __init__.py │ │ ├── graph.py │ │ └── utils.py │ ├── operation_mapping.md │ ├── optimization/ │ │ ├── __init__.py │ │ ├── nnef_optimizer.py │ │ ├── onnx_optimizer.py │ │ ├── tf_optimizer.py │ │ └── tflite_optimizer.py │ ├── quantize.py │ ├── random_tensor.py │ ├── utils/ │ │ ├── __init__.py │ │ ├── stdio.py │ │ └── types.py │ └── visualize.py ├── package_info.md ├── pyproject.toml └── tests/ └── conversion/ ├── graphdef_test.py ├── onnx_test.py └── tflite_test.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/build_nnef.yml ================================================ name: Build, test and publish nnef on: push: tags: - 'nnef-v[0-9]+.[0-9]+.[0-9]+' jobs: build_wheels: name: Build nnef wheels on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: - ubuntu-latest - windows-latest - macos-latest - macos-14 steps: - uses: actions/checkout@v4 - name: Build wheels for nnef uses: pypa/cibuildwheel@v3.4.0 with: package-dir: nnef-pyproject output-dir: dist/ config-file: nnef-pyproject/pyproject.toml env: CIBW_BUILD: "cp38-* cp39-* cp310-* cp311-* cp312-* cp313-* cp314-*" - uses: actions/upload-artifact@v4 with: name: dist-${{ matrix.os }}-${{ github.ref_name }} path: ./dist/*.whl build_sdist: name: Build nnef sdist runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: "3.7" - name: Install dependencies run: | python -m pip install --upgrade pip pip install build - name: Build package run: python -m build ./nnef-pyproject/ --sdist --outdir ./dist - uses: actions/upload-artifact@v4 with: name: dist-${{ github.ref_name }} path: ./dist/*.tar.gz publish: name: Publish nnef runs-on: ubuntu-latest needs: [build_wheels, build_sdist] steps: - name: Download dist/ uses: actions/download-artifact@v4 with: path: dist merge-multiple: true - name: publish to PyPI uses: pypa/gh-action-pypi-publish@v1.13.0 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} ================================================ FILE: .github/workflows/build_nnef_tools.yml ================================================ name: Build, test and publish nnef_tools on: push: tags: - 'nnef_tools-v[0-9]+.[0-9]+.[0-9]+' jobs: build_nnef_tools: name: Build and publish nnef_tools runs-on: "ubuntu-latest" steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip pip install build pytest pytest-xdist - name: Build package run: python -m build ./nnef_tools-pyproject/ --outdir ./dist/ - name: Publish artifacts uses: actions/upload-artifact@v4 with: name: dist-${{ github.ref_name }} path: ./dist/* - name: Install run: python -m pip install ./nnef-pyproject/ ./nnef_tools-pyproject[full] - name: Test run: python -m pytest ./nnef_tools-pyproject/tests/ -n auto - name: Publish to PyPI uses: pypa/gh-action-pypi-publish@v1.13.0 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} ================================================ FILE: .gitignore ================================================ *.pyc __pycache__ .idea /_models /out /*/build /*/dist *.egg-info ================================================ FILE: CODE_OF_CONDUCT.md ================================================ A reminder that this issue tracker is managed by the Khronos Group. Interactions here should follow the Khronos Code of Conduct (https://www.khronos.org/developers/code-of-conduct), which prohibits aggressive or derogatory language. Please keep the discussion friendly and civil. ================================================ FILE: README.md ================================================ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)

**Development of the latest tools related to version 2.0 of the NNEF specification draft can be found on branch [v2.0](https://github.com/KhronosGroup/NNEF-Tools/tree/v2.0).** # NNEF-Tools NNEF reduces machine learning deployment fragmentation by enabling a rich mix of neural network training tools and inference engines to be used by applications across a diverse range of devices and platforms. This repository contains tools to generate and consume NNEF documents, such as a parser (C++ and Python) that can be included in consumer applications and converters for deep learning frameworks. * [NNEF Model Zoo](models#nnef-model-zoo) * [NNEF Tools](nnef_tools-pyproject#nnef-tools) * [NNEF Parser](nnef-pyproject#nnef-parser---repository) ## NNEF Model Zoo A **Model Zoo** is now available; the 'models' folder contains a variety of [NNEF models](models#nnef-model-zoo) converted from various sources. ## NNEF Tools [NNEF Tools](nnef_tools-pyproject#nnef-tools) folder contains tools to convert pre-trained models in `tensorFlow`/`caffe`/`caffe2`/`ONNX` to NNEF format. ## NNEF Parser [NNEF Parser](nnef-pyproject#nnef-parser---repository) folder contains `C++` and `Python` source code for a sample NNEF graph parser. ## Release Notes ### Added new operators in spec version 1.0.4 (06.15.2021) Following the update of the NNEF specification to version 1.0.4, conversion for the corresponding operators has been added. Furthermore, error handling of non-convertible models has been greately enhanced with error messages detailing the exact cause of failure listed for all non-convertible operations before conversion is started. ### Reworked NNEF Tools (10.21.2020) The tools for converting models to NNEF and transforming NNEF models has been thoroughly reworked to make them more robust and unified and easier to maintain. The basic functionality of the main scripts has been kept, however their parameterization has been simplified and unified in some places; please refer to the readme and the help (`-h` option) of the respective scripts for more details. The scripts cover the following major areas of functionality: model conversion, optimization, execution and visualization. A GMAC calculator is also provide, and further utility scripts may be added in the future. ### Change in quantization information in binary files (06.12.2020) According to the change in version 1.0.3 of the NNEF specification, quantization algorithm information has been deprecated in the tensor binary file format. The tensor binary only stores the item-type of the tensor data, and the binary reader does not return quantization information (also used to be called 'compression' info). Furthermore, the mapping between stored item-types and data-types in the structural description has been clarified, so that the reader of a tensor binary can tell what the data-type of the read tensor is. This enhances the reader as it can now properly map the binary data to C++ or Python numpy types upon reading. The C++ code has been updated to perform such a mapping, and is now able to return a typed array instead of just plain bytes. ### Change in shape inference compared to previous version (04.10.2019) According to a change in version 1.0.1 of the NNEF specification, the `shape_of` operator in NNEF syntax is deprecated, and the parser does not support it. This enables the decoupling of parsing from shape inference, allowing parsing to succeed even if shape information is not available for all operations, such as custom defined operations before the graph definition. Shape inference can still be run after training, furthermore it can be customized (via function pointers) for custom defined operations. ### TENSOR BINARY BUG FIX (10.19.2018) There was a bug in the Python code that reads/writes the tensor binary files (the header contained 4 extra padding bytes therefore not conforming to the spec). The code has been updated to read/write and _check_ the proper header size. As a consequence, any files written out with the code that contained the bug cannot be read back with the updated code. To aid the usage of such existing files, a script was created called `fix_nnef_binary_size.py` that can be used to remove the excess 4 bytes from existing NNEF files. The script is located in the root folder of this repo, it has no dependencies (not even the NNEF parser). It can be run on the main folder of an NNEF model, and it fixes all binary files in the folder. In case one runs it on an NNEF model that does not contain the bug, it does nothing. It can be used as follows: ``` python fix_nnef_binary_size.py my_nnef_model_folder ``` Such an invocation fixes the files in place. Optionally, a second argument can be supplied to the script to write the fixed files to a different output path. In this case, the script copies all non-binary files (such as graph.nnef) to the target folder, so the resulting folder contains the whole valid model. ================================================ FILE: _config.yml ================================================ theme: jekyll-theme-slate ================================================ FILE: fix_nnef_binary_size.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import os import struct def fix_nnef_binary(in_fn, out_fn): header_size = 128 with open(in_fn, 'rb') as file: file_size = os.fstat(file.fileno()).st_size header = file.read(header_size) excess = file.read(4) data = file.read() [magic1, magic2, major, minor] = bytearray(header[:4]) if magic1 != 0x4E or magic2 != 0xEF or major != 1 or minor != 0: return False data_length, = struct.unpack('i', header[4:8]) if file_size != header_size + data_length + 4: return False with open(out_fn, 'wb') as file: file.write(header) file.write(data) return True def fix_nnef_binaries(in_path, out_path): for root, dirs, files in os.walk(in_path): for filename in files: if not filename.startswith('.'): in_fn = os.path.join(root, filename) out_fn = os.path.join(out_path, os.path.relpath(in_fn, in_path)) if os.path.splitext(filename)[1] == '.dat': if fix_nnef_binary(in_fn, out_fn): print('Fixed file: ' + in_fn) elif out_fn != in_fn: with open(in_fn, 'rb') as in_file, open(out_fn, 'wb') as out_file: out_file.write(in_file.read()) if __name__ == "__main__": if len(sys.argv) < 2: print('input path must be provided') exit(-1) elif len(sys.argv) > 3: print('too many arguments provided') exit(-1) fix_nnef_binaries(in_path=sys.argv[1], out_path=sys.argv[2] if len(sys.argv) == 3 else sys.argv[1]) ================================================ FILE: models/README.md ================================================ NNEF model zoo ============== The following collection of models were compiled by running the converter tools in this repository on publicly available models. Each entry provides a link to the original and the converted model. * TensorFlow models have been acquired from [https://www.tensorflow.org/lite/guide/hosted_models] * ONNX models have been acquired from [https://github.com/onnx/models] * Caffe models have been acquired from [https://github.com/BVLC/caffe/wiki/Model-Zoo] * Caffe2 models have been acquired from [https://github.com/caffe2/models] AlexNet ------- _Floating point models_ Name | Size | Original | Converted --- | --- | --- | --- BVLC AlexNet | 244 Mb | [Caffe](https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/bvlc_alexnet.caffemodel.nnef.tgz) BVLC AlexNet | 244 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/bvlc_alexnet.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/bvlc_alexnet.onnx.nnef.tgz) VGG --- _Floating point models_ Name | Size | Original | Converted --- | --- | --- | --- VGG-16 | 553.6 MB Mb | [Caffe](https://gist.github.com/ksimonyan/211839e770f7b538e2d8) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/vgg16.caffemodel.nnef.tgz) VGG-19 | 574.8 MB Mb | [Caffe](https://gist.github.com/ksimonyan/3785162f95cd2d5fee77) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/vgg19.caffemodel.nnef.tgz) VGG-16 | 527.8 MB Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/vgg/vgg16/vgg16.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/vgg16.onnx.nnef.tgz) VGG-19 | 548.1 MB Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/vgg/vgg19/vgg19.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/vgg19.onnx.nnef.tgz) GoogleNet --------- _Floating point models_ Name | Size | Original | Converted --- | --- | --- | --- Inception v1 | 28 Mb | [Caffe2](https://github.com/caffe2/models/tree/master/inception_v1) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v1.caffe2.nnef.tgz) Inception v1 | 28 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/inception_v1.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v1.onnx.nnef.tgz) Inception v2 | 45 Mb | [Caffe2](https://github.com/caffe2/models/tree/master/inception_v2) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v2.caffe2.nnef.tgz) Inception v2 | 45 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/inception_v2.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v2.onnx.nnef.tgz) Inception v3 | 95.3 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v3.tfpb.nnef.tgz) Inception v4 | 170.7 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v4.tfpb.nnef.tgz) BVLC GoogleNet | 28 Mb | [Caffe](https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/bvlc_googlenet.caffemodel.nnef.tgz) BVLC GoogleNet | 28 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/bvlc_googlenet.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/bvlc_googlenet.onnx.nnef.tgz) _Quantized models_ Name | Size | Original | Converted --- | --- | --- | --- Inception v1 | 6.4 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v1_quant.tflite.nnef.tgz) Inception v2 | 11 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/inception_v2_224_quant_20181026.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v2_quant.tflite.nnef.tgz) Inception v3 | 23 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v3_quant.tflite.nnef.tgz) Inception v4 | 41 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v4_quant.tflite.nnef.tgz) ResNet ------ _Floating point models_ Name | Size | Original | Converted --- | --- | --- | --- Resnet v1-18 | 44.7 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v1/resnet18v1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_18.onnx.nnef.tgz) Resnet v1-34 | 83.3 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet34v1/resnet34v1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_34.onnx.nnef.tgz) Resnet v1-50 | 97.8 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v1/resnet50v1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_50.onnx.nnef.tgz) Resnet v1-101 | 170.6 MB Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet101v1/resnet101v1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_101.onnx.nnef.tgz) Resnet v1-152 | 242.3 Mb | [Caffe](https://github.com/KaimingHe/deep-residual-networks) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_152.caffemodel.nnef.tgz) Resnet v2-18 | 44.6 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v2/resnet18v2.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v2_18.onnx.nnef.tgz) Resnet v2-34 | 83.2 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet34v2/resnet34v2.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v2_34.onnx.nnef.tgz) Resnet v2-50 | 97.7 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v2_50.onnx.nnef.tgz) Resnet v2-101 | 170.4 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet101v2/resnet101v2.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v2_101.onnx.nnef.tgz) Inception-Resnet v2 | 121 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_resnet_v2.tfpb.nnef.tgz) MobileNet --------- _Floating point models_ Name | Size | Original | Converted --- | --- | --- | --- MobileNet v1-1.0 | 16.9 Mb | [TensorFlow](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v1_1.0.tfpb.nnef.tgz) MobileNet v1-1.0 | 17.2 Mb | [Caffe](https://github.com/shicai/MobileNet-Caffe) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v1_1.0.caffemodel.nnef.tgz) MobileNet v2-1.0 | 14.0 Mb | [TensorFlow](http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v2_1.0.tfpb.nnef.tgz) MobileNet v2-1.0 | 14.4 Mb | [Caffe](https://github.com/shicai/MobileNet-Caffe) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v2_1.0.caffemodel.nnef.tgz) MobileNet v2-1.0 | 13.6 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/mobilenet/mobilenetv2-1.0/mobilenetv2-1.0.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v2_1.0.onnx.nnef.tgz) _Quantized models_ Name | Size | Original | Converted --- | --- | --- | --- MobileNet v1-1.0 | 4.3 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v1_1.0_quant.tflite.nnef.tgz) MobileNet v2-1.0 | 3.4 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v2_1.0_quant.tflite.nnef.tgz) SqueezeNet ---------- _Floating point models_ Name | Size | Original | Converted --- | --- | --- | --- SqueezeNet | 5.0 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet.tfpb.nnef.tgz) SqueezeNet 1.0 | 4.7 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/squeezenet.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet_v1.0.onnx.nnef.tgz) SqueezeNet 1.1 | 4.7 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/squeezenet/squeezenet1.1/squeezenet1.1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet_v1.1.onnx.nnef.tgz) SqueezeNet 1.0 | 4.7 Mb | [Caffe](https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.0) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet_v1.0.caffemodel.nnef.tgz) SqueezeNet 1.1 | 4.7 Mb | [Caffe](https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet_v1.1.caffemodel.nnef.tgz) ShuffleNet ---------- _Floating point models_ Name | Size | Original | Converted --- | --- | --- | --- ShuffleNet | 5.3 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/shufflenet.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/shufflenet.onnx.nnef.tgz) NASNet ------ _Floating point models_ Name | Size | Original | Converted --- | --- | --- | --- NasNet mobile | 21.4 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/nasnet_mobile.tfpb.nnef.tgz) ================================================ FILE: nnef-pyproject/README.md ================================================ NNEF Parser - repository =================== Introduction ------------ The code consists of a C++ library that contains two example parsers (one for flat and one for compositional NNEF syntax). This library can be used to build tools that require parsing NNEF files. It requires a C++11 compatible compiler. The Python code wraps the C++ parser and adds some further utilities to load and save NNEF documents easily. It also contains a script to validate NNEF documents (`validate.py`) and optionally print a lowered version of the graph. If the tool encounters an invalid document, it prints the first error and stops parsing. Type `python validate.py -h` to show the usage help. C++ Library ----------- Documentation of the library: [cpp_api.md](cpp_api.md) Python Package -------------- Documentation of the Python package: [package_info.md](package_info.md) ================================================ FILE: nnef-pyproject/cpp_api.md ================================================ Building the C++ library ------------------------ The C++ library can be compiled with cmake. The `examples/samples/sample.cpp` contains a minimal example that showcases the use of the parser. Example of build commands under Linux: ```` $ cd nnef/cpp $ mkdir build && cd build $ cmake .. $ make ```` Using the C++ library --------------------- Using the C++ parser is as simple as follows: ``` #include "nnef.h" nnef::Graph graph; std::string error; bool success = nnef::load_graph("path/to/NNEF/folder", graph, error); ``` Upon succeess, the graph structure is filled, while in case of an error, the error string is filled. The fields inside the graph structure, and further parameters to the `load_graph` function are documented in `nnef.h`. After the graph is successfully loaded, shape inference can be performed in a subsequent call if required: ``` success = nnef::infer_shapes(graph, error); ``` Upon success, the shape fields of tensors are filled in. ================================================ FILE: nnef-pyproject/examples/alexnet.txt ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version 1.0; graph alexnet( input ) -> ( output ) { input = external(shape = [1, 3, 224, 224]); kernel1 = variable(shape = [64, 3, 11, 11], label = 'alexnet_v2/conv1/kernel'); bias1 = variable(shape = [1, 64], label = 'alexnet_v2/conv1/bias'); conv1 = conv(input, kernel1, bias1, padding = [(0, 0), (0, 0)], border = 'constant', stride = [4, 4], dilation = [1, 1]); relu1 = relu(conv1); pool1 = max_pool(relu1, size = [1, 1, 3, 3], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel2 = variable(shape = [192, 64, 5, 5], label = 'alexnet_v2/conv2/kernel'); bias2 = variable(shape = [1, 192], label = 'alexnet_v2/conv2/bias'); conv2 = conv(pool1, kernel2, bias2, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu2 = relu(conv2); pool2 = max_pool(relu2, size = [1, 1, 3, 3], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel3 = variable(shape = [384, 192, 3, 3], label = 'alexnet_v2/conv3/kernel'); bias3 = variable(shape = [1, 384], label = 'alexnet_v2/conv3/bias'); conv3 = conv(pool2, kernel3, bias3, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu3 = relu(conv3); kernel4 = variable(shape = [384, 384, 3, 3], label = 'alexnet_v2/conv4/kernel'); bias4 = variable(shape = [1, 384], label = 'alexnet_v2/conv4/bias'); conv4 = conv(relu3, kernel4, bias4, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu4 = relu(conv4); kernel5 = variable(shape = [256, 384, 3, 3], label = 'alexnet_v2/conv5/kernel'); bias5 = variable(shape = [1, 256], label = 'alexnet_v2/conv5/bias'); conv5 = conv(relu4, kernel5, bias5, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu5 = relu(conv5); pool3 = max_pool(relu5, size = [1, 1, 3, 3], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel6 = variable(shape = [4096, 256, 5, 5], label = 'alexnet_v2/fc6/kernel'); bias6 = variable(shape = [1, 4096], label = 'alexnet_v2/fc6/bias'); conv6 = conv(pool3, kernel6, bias6, padding = [(0, 0), (0, 0)], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu6 = relu(conv6); kernel7 = variable(shape = [4096, 4096, 1, 1], label = 'alexnet_v2/fc7/kernel'); bias7 = variable(shape = [1, 4096], label = 'alexnet_v2/fc7/bias'); conv7 = conv(relu6, kernel7, bias7, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu7 = relu(conv7); kernel8 = variable(shape = [1000, 4096, 1, 1], label = 'alexnet_v2/fc8/kernel'); bias8 = variable(shape = [1, 1000], label = 'alexnet_v2/fc8/bias'); output = conv(relu7, kernel8, bias8, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); } ================================================ FILE: nnef-pyproject/examples/googlenet.txt ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version 1.0; graph googlenet( input ) -> ( output ) { input = external(shape = [1, 3, 224, 224]); kernel1 = variable(shape = [64, 3, 7, 7], label = 'InceptionV1/Conv2d_1a_7x7/kernel'); bias1 = variable(shape = [1, 64], label = 'InceptionV1/Conv2d_1a_7x7/bias'); conv1 = conv(input, kernel1, bias1, padding = [], border = 'constant', stride = [2, 2], dilation = [1, 1]); relu1 = relu(conv1); pool1 = max_pool(relu1, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 2, 2]); kernel2 = variable(shape = [64, 64, 1, 1], label = 'InceptionV1/Conv2d_2b_1x1/kernel'); bias2 = variable(shape = [1, 64], label = 'InceptionV1/Conv2d_2b_1x1/bias'); conv2 = conv(pool1, kernel2, bias2, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu2 = relu(conv2); kernel3 = variable(shape = [192, 64, 3, 3], label = 'InceptionV1/Conv2d_2c_3x3/kernel'); bias3 = variable(shape = [1, 192], label = 'InceptionV1/Conv2d_2c_3x3/bias'); conv3 = conv(relu2, kernel3, bias3, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu3 = relu(conv3); pool2 = max_pool(relu3, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 2, 2]); kernel4 = variable(shape = [64, 192, 1, 1], label = 'InceptionV1/Mixed_3b/Branch_0/Conv2d_0a_1x1/kernel'); bias4 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_3b/Branch_0/Conv2d_0a_1x1/bias'); conv4 = conv(pool2, kernel4, bias4, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu4 = relu(conv4); kernel5 = variable(shape = [96, 192, 1, 1], label = 'InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/kernel'); bias5 = variable(shape = [1, 96], label = 'InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/bias'); conv5 = conv(pool2, kernel5, bias5, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu5 = relu(conv5); kernel6 = variable(shape = [128, 96, 3, 3], label = 'InceptionV1/Mixed_3b/Branch_1/Conv2d_0b_3x3/kernel'); bias6 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_3b/Branch_1/Conv2d_0b_3x3/bias'); conv6 = conv(relu5, kernel6, bias6, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu6 = relu(conv6); kernel7 = variable(shape = [16, 192, 1, 1], label = 'InceptionV1/Mixed_3b/Branch_2/Conv2d_0a_1x1/kernel'); bias7 = variable(shape = [1, 16], label = 'InceptionV1/Mixed_3b/Branch_2/Conv2d_0a_1x1/bias'); conv7 = conv(pool2, kernel7, bias7, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu7 = relu(conv7); kernel8 = variable(shape = [32, 16, 3, 3], label = 'InceptionV1/Mixed_3b/Branch_2/Conv2d_0b_3x3/kernel'); bias8 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_3b/Branch_2/Conv2d_0b_3x3/bias'); conv8 = conv(relu7, kernel8, bias8, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu8 = relu(conv8); pool3 = max_pool(pool2, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]); kernel9 = variable(shape = [32, 192, 1, 1], label = 'InceptionV1/Mixed_3b/Branch_3/Conv2d_0b_1x1/kernel'); bias9 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_3b/Branch_3/Conv2d_0b_1x1/bias'); conv9 = conv(pool3, kernel9, bias9, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu9 = relu(conv9); concat1 = concat([relu4,relu6,relu8,relu9], axis = 1); kernel10 = variable(shape = [128, 256, 1, 1], label = 'InceptionV1/Mixed_3c/Branch_0/Conv2d_0a_1x1/kernel'); bias10 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_3c/Branch_0/Conv2d_0a_1x1/bias'); conv10 = conv(concat1, kernel10, bias10, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu10 = relu(conv10); kernel11 = variable(shape = [128, 256, 1, 1], label = 'InceptionV1/Mixed_3c/Branch_1/Conv2d_0a_1x1/kernel'); bias11 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_3c/Branch_1/Conv2d_0a_1x1/bias'); conv11 = conv(concat1, kernel11, bias11, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu11 = relu(conv11); kernel12 = variable(shape = [192, 128, 3, 3], label = 'InceptionV1/Mixed_3c/Branch_1/Conv2d_0b_3x3/kernel'); bias12 = variable(shape = [1, 192], label = 'InceptionV1/Mixed_3c/Branch_1/Conv2d_0b_3x3/bias'); conv12 = conv(relu11, kernel12, bias12, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu12 = relu(conv12); kernel13 = variable(shape = [32, 256, 1, 1], label = 'InceptionV1/Mixed_3c/Branch_2/Conv2d_0a_1x1/kernel'); bias13 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_3c/Branch_2/Conv2d_0a_1x1/bias'); conv13 = conv(concat1, kernel13, bias13, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu13 = relu(conv13); kernel14 = variable(shape = [96, 32, 3, 3], label = 'InceptionV1/Mixed_3c/Branch_2/Conv2d_0b_3x3/kernel'); bias14 = variable(shape = [1, 96], label = 'InceptionV1/Mixed_3c/Branch_2/Conv2d_0b_3x3/bias'); conv14 = conv(relu13, kernel14, bias14, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu14 = relu(conv14); pool4 = max_pool(concat1, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]); kernel15 = variable(shape = [64, 256, 1, 1], label = 'InceptionV1/Mixed_3c/Branch_3/Conv2d_0b_1x1/kernel'); bias15 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_3c/Branch_3/Conv2d_0b_1x1/bias'); conv15 = conv(pool4, kernel15, bias15, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu15 = relu(conv15); concat2 = concat([relu10,relu12,relu14,relu15], axis = 1); pool5 = max_pool(concat2, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 2, 2]); kernel16 = variable(shape = [192, 480, 1, 1], label = 'InceptionV1/Mixed_4b/Branch_0/Conv2d_0a_1x1/kernel'); bias16 = variable(shape = [1, 192], label = 'InceptionV1/Mixed_4b/Branch_0/Conv2d_0a_1x1/bias'); conv16 = conv(pool5, kernel16, bias16, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu16 = relu(conv16); kernel17 = variable(shape = [96, 480, 1, 1], label = 'InceptionV1/Mixed_4b/Branch_1/Conv2d_0a_1x1/kernel'); bias17 = variable(shape = [1, 96], label = 'InceptionV1/Mixed_4b/Branch_1/Conv2d_0a_1x1/bias'); conv17 = conv(pool5, kernel17, bias17, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu17 = relu(conv17); kernel18 = variable(shape = [208, 96, 3, 3], label = 'InceptionV1/Mixed_4b/Branch_1/Conv2d_0b_3x3/kernel'); bias18 = variable(shape = [1, 208], label = 'InceptionV1/Mixed_4b/Branch_1/Conv2d_0b_3x3/bias'); conv18 = conv(relu17, kernel18, bias18, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu18 = relu(conv18); kernel19 = variable(shape = [16, 480, 1, 1], label = 'InceptionV1/Mixed_4b/Branch_2/Conv2d_0a_1x1/kernel'); bias19 = variable(shape = [1, 16], label = 'InceptionV1/Mixed_4b/Branch_2/Conv2d_0a_1x1/bias'); conv19 = conv(pool5, kernel19, bias19, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu19 = relu(conv19); kernel20 = variable(shape = [48, 16, 3, 3], label = 'InceptionV1/Mixed_4b/Branch_2/Conv2d_0b_3x3/kernel'); bias20 = variable(shape = [1, 48], label = 'InceptionV1/Mixed_4b/Branch_2/Conv2d_0b_3x3/bias'); conv20 = conv(relu19, kernel20, bias20, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu20 = relu(conv20); pool6 = max_pool(pool5, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]); kernel21 = variable(shape = [64, 480, 1, 1], label = 'InceptionV1/Mixed_4b/Branch_3/Conv2d_0b_1x1/kernel'); bias21 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4b/Branch_3/Conv2d_0b_1x1/bias'); conv21 = conv(pool6, kernel21, bias21, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu21 = relu(conv21); concat3 = concat([relu16,relu18,relu20,relu21], axis = 1); kernel22 = variable(shape = [160, 512, 1, 1], label = 'InceptionV1/Mixed_4c/Branch_0/Conv2d_0a_1x1/kernel'); bias22 = variable(shape = [1, 160], label = 'InceptionV1/Mixed_4c/Branch_0/Conv2d_0a_1x1/bias'); conv22 = conv(concat3, kernel22, bias22, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu22 = relu(conv22); kernel23 = variable(shape = [112, 512, 1, 1], label = 'InceptionV1/Mixed_4c/Branch_1/Conv2d_0a_1x1/kernel'); bias23 = variable(shape = [1, 112], label = 'InceptionV1/Mixed_4c/Branch_1/Conv2d_0a_1x1/bias'); conv23 = conv(concat3, kernel23, bias23, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu23 = relu(conv23); kernel24 = variable(shape = [224, 112, 3, 3], label = 'InceptionV1/Mixed_4c/Branch_1/Conv2d_0b_3x3/kernel'); bias24 = variable(shape = [1, 224], label = 'InceptionV1/Mixed_4c/Branch_1/Conv2d_0b_3x3/bias'); conv24 = conv(relu23, kernel24, bias24, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu24 = relu(conv24); kernel25 = variable(shape = [24, 512, 1, 1], label = 'InceptionV1/Mixed_4c/Branch_2/Conv2d_0a_1x1/kernel'); bias25 = variable(shape = [1, 24], label = 'InceptionV1/Mixed_4c/Branch_2/Conv2d_0a_1x1/bias'); conv25 = conv(concat3, kernel25, bias25, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu25 = relu(conv25); kernel26 = variable(shape = [64, 24, 3, 3], label = 'InceptionV1/Mixed_4c/Branch_2/Conv2d_0b_3x3/kernel'); bias26 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4c/Branch_2/Conv2d_0b_3x3/bias'); conv26 = conv(relu25, kernel26, bias26, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu26 = relu(conv26); pool7 = max_pool(concat3, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]); kernel27 = variable(shape = [64, 512, 1, 1], label = 'InceptionV1/Mixed_4c/Branch_3/Conv2d_0b_1x1/kernel'); bias27 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4c/Branch_3/Conv2d_0b_1x1/bias'); conv27 = conv(pool7, kernel27, bias27, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu27 = relu(conv27); concat4 = concat([relu22,relu24,relu26,relu27], axis = 1); kernel28 = variable(shape = [128, 512, 1, 1], label = 'InceptionV1/Mixed_4d/Branch_0/Conv2d_0a_1x1/kernel'); bias28 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_4d/Branch_0/Conv2d_0a_1x1/bias'); conv28 = conv(concat4, kernel28, bias28, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu28 = relu(conv28); kernel29 = variable(shape = [128, 512, 1, 1], label = 'InceptionV1/Mixed_4d/Branch_1/Conv2d_0a_1x1/kernel'); bias29 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_4d/Branch_1/Conv2d_0a_1x1/bias'); conv29 = conv(concat4, kernel29, bias29, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu29 = relu(conv29); kernel30 = variable(shape = [256, 128, 3, 3], label = 'InceptionV1/Mixed_4d/Branch_1/Conv2d_0b_3x3/kernel'); bias30 = variable(shape = [1, 256], label = 'InceptionV1/Mixed_4d/Branch_1/Conv2d_0b_3x3/bias'); conv30 = conv(relu29, kernel30, bias30, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu30 = relu(conv30); kernel31 = variable(shape = [24, 512, 1, 1], label = 'InceptionV1/Mixed_4d/Branch_2/Conv2d_0a_1x1/kernel'); bias31 = variable(shape = [1, 24], label = 'InceptionV1/Mixed_4d/Branch_2/Conv2d_0a_1x1/bias'); conv31 = conv(concat4, kernel31, bias31, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu31 = relu(conv31); kernel32 = variable(shape = [64, 24, 3, 3], label = 'InceptionV1/Mixed_4d/Branch_2/Conv2d_0b_3x3/kernel'); bias32 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4d/Branch_2/Conv2d_0b_3x3/bias'); conv32 = conv(relu31, kernel32, bias32, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu32 = relu(conv32); pool8 = max_pool(concat4, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]); kernel33 = variable(shape = [64, 512, 1, 1], label = 'InceptionV1/Mixed_4d/Branch_3/Conv2d_0b_1x1/kernel'); bias33 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4d/Branch_3/Conv2d_0b_1x1/bias'); conv33 = conv(pool8, kernel33, bias33, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu33 = relu(conv33); concat5 = concat([relu28,relu30,relu32,relu33], axis = 1); kernel34 = variable(shape = [112, 512, 1, 1], label = 'InceptionV1/Mixed_4e/Branch_0/Conv2d_0a_1x1/kernel'); bias34 = variable(shape = [1, 112], label = 'InceptionV1/Mixed_4e/Branch_0/Conv2d_0a_1x1/bias'); conv34 = conv(concat5, kernel34, bias34, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu34 = relu(conv34); kernel35 = variable(shape = [144, 512, 1, 1], label = 'InceptionV1/Mixed_4e/Branch_1/Conv2d_0a_1x1/kernel'); bias35 = variable(shape = [1, 144], label = 'InceptionV1/Mixed_4e/Branch_1/Conv2d_0a_1x1/bias'); conv35 = conv(concat5, kernel35, bias35, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu35 = relu(conv35); kernel36 = variable(shape = [288, 144, 3, 3], label = 'InceptionV1/Mixed_4e/Branch_1/Conv2d_0b_3x3/kernel'); bias36 = variable(shape = [1, 288], label = 'InceptionV1/Mixed_4e/Branch_1/Conv2d_0b_3x3/bias'); conv36 = conv(relu35, kernel36, bias36, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu36 = relu(conv36); kernel37 = variable(shape = [32, 512, 1, 1], label = 'InceptionV1/Mixed_4e/Branch_2/Conv2d_0a_1x1/kernel'); bias37 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_4e/Branch_2/Conv2d_0a_1x1/bias'); conv37 = conv(concat5, kernel37, bias37, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu37 = relu(conv37); kernel38 = variable(shape = [64, 32, 3, 3], label = 'InceptionV1/Mixed_4e/Branch_2/Conv2d_0b_3x3/kernel'); bias38 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4e/Branch_2/Conv2d_0b_3x3/bias'); conv38 = conv(relu37, kernel38, bias38, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu38 = relu(conv38); pool9 = max_pool(concat5, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]); kernel39 = variable(shape = [64, 512, 1, 1], label = 'InceptionV1/Mixed_4e/Branch_3/Conv2d_0b_1x1/kernel'); bias39 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4e/Branch_3/Conv2d_0b_1x1/bias'); conv39 = conv(pool9, kernel39, bias39, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu39 = relu(conv39); concat6 = concat([relu34,relu36,relu38,relu39], axis = 1); kernel40 = variable(shape = [256, 528, 1, 1], label = 'InceptionV1/Mixed_4f/Branch_0/Conv2d_0a_1x1/kernel'); bias40 = variable(shape = [1, 256], label = 'InceptionV1/Mixed_4f/Branch_0/Conv2d_0a_1x1/bias'); conv40 = conv(concat6, kernel40, bias40, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu40 = relu(conv40); kernel41 = variable(shape = [160, 528, 1, 1], label = 'InceptionV1/Mixed_4f/Branch_1/Conv2d_0a_1x1/kernel'); bias41 = variable(shape = [1, 160], label = 'InceptionV1/Mixed_4f/Branch_1/Conv2d_0a_1x1/bias'); conv41 = conv(concat6, kernel41, bias41, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu41 = relu(conv41); kernel42 = variable(shape = [320, 160, 3, 3], label = 'InceptionV1/Mixed_4f/Branch_1/Conv2d_0b_3x3/kernel'); bias42 = variable(shape = [1, 320], label = 'InceptionV1/Mixed_4f/Branch_1/Conv2d_0b_3x3/bias'); conv42 = conv(relu41, kernel42, bias42, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu42 = relu(conv42); kernel43 = variable(shape = [32, 528, 1, 1], label = 'InceptionV1/Mixed_4f/Branch_2/Conv2d_0a_1x1/kernel'); bias43 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_4f/Branch_2/Conv2d_0a_1x1/bias'); conv43 = conv(concat6, kernel43, bias43, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu43 = relu(conv43); kernel44 = variable(shape = [128, 32, 3, 3], label = 'InceptionV1/Mixed_4f/Branch_2/Conv2d_0b_3x3/kernel'); bias44 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_4f/Branch_2/Conv2d_0b_3x3/bias'); conv44 = conv(relu43, kernel44, bias44, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu44 = relu(conv44); pool10 = max_pool(concat6, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]); kernel45 = variable(shape = [128, 528, 1, 1], label = 'InceptionV1/Mixed_4f/Branch_3/Conv2d_0b_1x1/kernel'); bias45 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_4f/Branch_3/Conv2d_0b_1x1/bias'); conv45 = conv(pool10, kernel45, bias45, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu45 = relu(conv45); concat7 = concat([relu40,relu42,relu44,relu45], axis = 1); pool11 = max_pool(concat7, size = [1, 1, 2, 2], padding = [], border = 'ignore', stride = [1, 1, 2, 2]); kernel46 = variable(shape = [256, 832, 1, 1], label = 'InceptionV1/Mixed_5b/Branch_0/Conv2d_0a_1x1/kernel'); bias46 = variable(shape = [1, 256], label = 'InceptionV1/Mixed_5b/Branch_0/Conv2d_0a_1x1/bias'); conv46 = conv(pool11, kernel46, bias46, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu46 = relu(conv46); kernel47 = variable(shape = [160, 832, 1, 1], label = 'InceptionV1/Mixed_5b/Branch_1/Conv2d_0a_1x1/kernel'); bias47 = variable(shape = [1, 160], label = 'InceptionV1/Mixed_5b/Branch_1/Conv2d_0a_1x1/bias'); conv47 = conv(pool11, kernel47, bias47, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu47 = relu(conv47); kernel48 = variable(shape = [320, 160, 3, 3], label = 'InceptionV1/Mixed_5b/Branch_1/Conv2d_0b_3x3/kernel'); bias48 = variable(shape = [1, 320], label = 'InceptionV1/Mixed_5b/Branch_1/Conv2d_0b_3x3/bias'); conv48 = conv(relu47, kernel48, bias48, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu48 = relu(conv48); kernel49 = variable(shape = [32, 832, 1, 1], label = 'InceptionV1/Mixed_5b/Branch_2/Conv2d_0a_1x1/kernel'); bias49 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_5b/Branch_2/Conv2d_0a_1x1/bias'); conv49 = conv(pool11, kernel49, bias49, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu49 = relu(conv49); kernel50 = variable(shape = [128, 32, 3, 3], label = 'InceptionV1/Mixed_5b/Branch_2/Conv2d_0a_3x3/kernel'); bias50 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_5b/Branch_2/Conv2d_0a_3x3/bias'); conv50 = conv(relu49, kernel50, bias50, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu50 = relu(conv50); pool12 = max_pool(pool11, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]); kernel51 = variable(shape = [128, 832, 1, 1], label = 'InceptionV1/Mixed_5b/Branch_3/Conv2d_0b_1x1/kernel'); bias51 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_5b/Branch_3/Conv2d_0b_1x1/bias'); conv51 = conv(pool12, kernel51, bias51, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu51 = relu(conv51); concat8 = concat([relu46,relu48,relu50,relu51], axis = 1); kernel52 = variable(shape = [384, 832, 1, 1], label = 'InceptionV1/Mixed_5c/Branch_0/Conv2d_0a_1x1/kernel'); bias52 = variable(shape = [1, 384], label = 'InceptionV1/Mixed_5c/Branch_0/Conv2d_0a_1x1/bias'); conv52 = conv(concat8, kernel52, bias52, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu52 = relu(conv52); kernel53 = variable(shape = [192, 832, 1, 1], label = 'InceptionV1/Mixed_5c/Branch_1/Conv2d_0a_1x1/kernel'); bias53 = variable(shape = [1, 192], label = 'InceptionV1/Mixed_5c/Branch_1/Conv2d_0a_1x1/bias'); conv53 = conv(concat8, kernel53, bias53, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu53 = relu(conv53); kernel54 = variable(shape = [384, 192, 3, 3], label = 'InceptionV1/Mixed_5c/Branch_1/Conv2d_0b_3x3/kernel'); bias54 = variable(shape = [1, 384], label = 'InceptionV1/Mixed_5c/Branch_1/Conv2d_0b_3x3/bias'); conv54 = conv(relu53, kernel54, bias54, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu54 = relu(conv54); kernel55 = variable(shape = [48, 832, 1, 1], label = 'InceptionV1/Mixed_5c/Branch_2/Conv2d_0a_1x1/kernel'); bias55 = variable(shape = [1, 48], label = 'InceptionV1/Mixed_5c/Branch_2/Conv2d_0a_1x1/bias'); conv55 = conv(concat8, kernel55, bias55, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu55 = relu(conv55); kernel56 = variable(shape = [128, 48, 3, 3], label = 'InceptionV1/Mixed_5c/Branch_2/Conv2d_0b_3x3/kernel'); bias56 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_5c/Branch_2/Conv2d_0b_3x3/bias'); conv56 = conv(relu55, kernel56, bias56, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu56 = relu(conv56); pool13 = max_pool(concat8, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]); kernel57 = variable(shape = [128, 832, 1, 1], label = 'InceptionV1/Mixed_5c/Branch_3/Conv2d_0b_1x1/kernel'); bias57 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_5c/Branch_3/Conv2d_0b_1x1/bias'); conv57 = conv(pool13, kernel57, bias57, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu57 = relu(conv57); concat9 = concat([relu52,relu54,relu56,relu57], axis = 1); pool14 = avg_pool(concat9, size = [1, 1, 7, 7], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 1, 1]); kernel58 = variable(shape = [1000, 1024, 1, 1], label = 'InceptionV1/Logits/Conv2d_0c_1x1/kernel'); bias58 = variable(shape = [1, 1000], label = 'InceptionV1/Logits/Conv2d_0c_1x1/bias'); output = conv(pool14, kernel58, bias58, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); } ================================================ FILE: nnef-pyproject/examples/resnet.txt ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version 1.0; graph resnet_v2_50( input ) -> ( output ) { input = external(shape = [1, 3, 224, 224]); kernel1 = variable(shape = [64, 3, 7, 7], label = 'resnet_v2_50/conv1/kernel'); bias1 = variable(shape = [1, 64], label = 'resnet_v2_50/conv1/bias'); conv1 = conv(input, kernel1, bias1, padding = [(3, 3), (3, 3)], border = 'constant', stride = [2, 2], dilation = [1, 1]); pool1 = max_pool(conv1, size = [1, 1, 3, 3], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); beta1 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/beta'); moving_mean1 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/moving_mean'); moving_variance1 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/moving_variance'); norm1 = batch_normalization(pool1, mean = moving_mean1, variance = moving_variance1, offset = beta1, scale = 1.0, epsilon = 0.001); relu1 = relu(norm1); kernel2 = variable(shape = [256, 64, 1, 1], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/shortcut/kernel'); bias2 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/shortcut/bias'); conv2 = conv(relu1, kernel2, bias2, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); kernel3 = variable(shape = [64, 64, 1, 1], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv1/kernel'); bias3 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv1/bias'); conv3 = conv(relu1, kernel3, bias3, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu2 = relu(conv3); kernel4 = variable(shape = [64, 64, 3, 3], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv2/kernel'); bias4 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv2/bias'); conv4 = conv(relu2, kernel4, bias4, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu3 = relu(conv4); kernel5 = variable(shape = [256, 64, 1, 1], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv3/kernel'); bias5 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv3/bias'); conv5 = conv(relu3, kernel5, bias5, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add1 = add(conv2, conv5); beta2 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/preact/beta'); moving_mean2 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/preact/moving_mean'); moving_variance2 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/preact/moving_variance'); norm2 = batch_normalization(add1, mean = moving_mean2, variance = moving_variance2, offset = beta2, scale = 1.0, epsilon = 0.001); relu4 = relu(norm2); kernel6 = variable(shape = [64, 256, 1, 1], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv1/kernel'); bias6 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv1/bias'); conv6 = conv(relu4, kernel6, bias6, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu5 = relu(conv6); kernel7 = variable(shape = [64, 64, 3, 3], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv2/kernel'); bias7 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv2/bias'); conv7 = conv(relu5, kernel7, bias7, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu6 = relu(conv7); kernel8 = variable(shape = [256, 64, 1, 1], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv3/kernel'); bias8 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv3/bias'); conv8 = conv(relu6, kernel8, bias8, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add2 = add(add1, conv8); beta3 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/preact/beta'); moving_mean3 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/preact/moving_mean'); moving_variance3 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/preact/moving_variance'); norm3 = batch_normalization(add2, mean = moving_mean3, variance = moving_variance3, offset = beta3, scale = 1.0, epsilon = 0.001); relu7 = relu(norm3); pool2 = max_pool(add2, size = [1, 1, 1, 1], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel9 = variable(shape = [64, 256, 1, 1], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv1/kernel'); bias9 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv1/bias'); conv9 = conv(relu7, kernel9, bias9, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu8 = relu(conv9); kernel10 = variable(shape = [64, 64, 3, 3], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv2/kernel'); bias10 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv2/bias'); conv10 = conv(relu8, kernel10, bias10, padding = [(1, 1), (1, 1)], border = 'constant', stride = [2, 2], dilation = [1, 1]); relu9 = relu(conv10); kernel11 = variable(shape = [256, 64, 1, 1], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv3/kernel'); bias11 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv3/bias'); conv11 = conv(relu9, kernel11, bias11, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add3 = add(pool2, conv11); beta4 = variable(shape = [1, 256], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/preact/beta'); moving_mean4 = variable(shape = [1, 256], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/preact/moving_mean'); moving_variance4 = variable(shape = [1, 256], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/preact/moving_variance'); norm4 = batch_normalization(add3, mean = moving_mean4, variance = moving_variance4, offset = beta4, scale = 1.0, epsilon = 0.001); relu10 = relu(norm4); kernel12 = variable(shape = [512, 256, 1, 1], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/shortcut/kernel'); bias12 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/shortcut/bias'); conv12 = conv(relu10, kernel12, bias12, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); kernel13 = variable(shape = [128, 256, 1, 1], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv1/kernel'); bias13 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv1/bias'); conv13 = conv(relu10, kernel13, bias13, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu11 = relu(conv13); kernel14 = variable(shape = [128, 128, 3, 3], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv2/kernel'); bias14 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv2/bias'); conv14 = conv(relu11, kernel14, bias14, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu12 = relu(conv14); kernel15 = variable(shape = [512, 128, 1, 1], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv3/kernel'); bias15 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv3/bias'); conv15 = conv(relu12, kernel15, bias15, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add4 = add(conv12, conv15); beta5 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/preact/beta'); moving_mean5 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/preact/moving_mean'); moving_variance5 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/preact/moving_variance'); norm5 = batch_normalization(add4, mean = moving_mean5, variance = moving_variance5, offset = beta5, scale = 1.0, epsilon = 0.001); relu13 = relu(norm5); kernel16 = variable(shape = [128, 512, 1, 1], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv1/kernel'); bias16 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv1/bias'); conv16 = conv(relu13, kernel16, bias16, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu14 = relu(conv16); kernel17 = variable(shape = [128, 128, 3, 3], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv2/kernel'); bias17 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv2/bias'); conv17 = conv(relu14, kernel17, bias17, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu15 = relu(conv17); kernel18 = variable(shape = [512, 128, 1, 1], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv3/kernel'); bias18 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv3/bias'); conv18 = conv(relu15, kernel18, bias18, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add5 = add(add4, conv18); beta6 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/preact/beta'); moving_mean6 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/preact/moving_mean'); moving_variance6 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/preact/moving_variance'); norm6 = batch_normalization(add5, mean = moving_mean6, variance = moving_variance6, offset = beta6, scale = 1.0, epsilon = 0.001); relu16 = relu(norm6); kernel19 = variable(shape = [128, 512, 1, 1], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv1/kernel'); bias19 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv1/bias'); conv19 = conv(relu16, kernel19, bias19, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu17 = relu(conv19); kernel20 = variable(shape = [128, 128, 3, 3], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv2/kernel'); bias20 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv2/bias'); conv20 = conv(relu17, kernel20, bias20, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu18 = relu(conv20); kernel21 = variable(shape = [512, 128, 1, 1], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv3/kernel'); bias21 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv3/bias'); conv21 = conv(relu18, kernel21, bias21, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add6 = add(add5, conv21); beta7 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/preact/beta'); moving_mean7 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/preact/moving_mean'); moving_variance7 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/preact/moving_variance'); norm7 = batch_normalization(add6, mean = moving_mean7, variance = moving_variance7, offset = beta7, scale = 1.0, epsilon = 0.001); relu19 = relu(norm7); pool3 = max_pool(add6, size = [1, 1, 1, 1], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel22 = variable(shape = [128, 512, 1, 1], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv1/kernel'); bias22 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv1/bias'); conv22 = conv(relu19, kernel22, bias22, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu20 = relu(conv22); kernel23 = variable(shape = [128, 128, 3, 3], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv2/kernel'); bias23 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv2/bias'); conv23 = conv(relu20, kernel23, bias23, padding = [(1, 1), (1, 1)], border = 'constant', stride = [2, 2], dilation = [1, 1]); relu21 = relu(conv23); kernel24 = variable(shape = [512, 128, 1, 1], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv3/kernel'); bias24 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv3/bias'); conv24 = conv(relu21, kernel24, bias24, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add7 = add(pool3, conv24); beta8 = variable(shape = [1, 512], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/preact/beta'); moving_mean8 = variable(shape = [1, 512], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/preact/moving_mean'); moving_variance8 = variable(shape = [1, 512], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/preact/moving_variance'); norm8 = batch_normalization(add7, mean = moving_mean8, variance = moving_variance8, offset = beta8, scale = 1.0, epsilon = 0.001); relu22 = relu(norm8); kernel25 = variable(shape = [1024, 512, 1, 1], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/shortcut/kernel'); bias25 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/shortcut/bias'); conv25 = conv(relu22, kernel25, bias25, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); kernel26 = variable(shape = [256, 512, 1, 1], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv1/kernel'); bias26 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv1/bias'); conv26 = conv(relu22, kernel26, bias26, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu23 = relu(conv26); kernel27 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv2/kernel'); bias27 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv2/bias'); conv27 = conv(relu23, kernel27, bias27, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu24 = relu(conv27); kernel28 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv3/kernel'); bias28 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv3/bias'); conv28 = conv(relu24, kernel28, bias28, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add8 = add(conv25, conv28); beta9 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/preact/beta'); moving_mean9 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/preact/moving_mean'); moving_variance9 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/preact/moving_variance'); norm9 = batch_normalization(add8, mean = moving_mean9, variance = moving_variance9, offset = beta9, scale = 1.0, epsilon = 0.001); relu25 = relu(norm9); kernel29 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv1/kernel'); bias29 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv1/bias'); conv29 = conv(relu25, kernel29, bias29, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu26 = relu(conv29); kernel30 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv2/kernel'); bias30 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv2/bias'); conv30 = conv(relu26, kernel30, bias30, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu27 = relu(conv30); kernel31 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv3/kernel'); bias31 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv3/bias'); conv31 = conv(relu27, kernel31, bias31, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add9 = add(add8, conv31); beta10 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/preact/beta'); moving_mean10 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/preact/moving_mean'); moving_variance10 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/preact/moving_variance'); norm10 = batch_normalization(add9, mean = moving_mean10, variance = moving_variance10, offset = beta10, scale = 1.0, epsilon = 0.001); relu28 = relu(norm10); kernel32 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv1/kernel'); bias32 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv1/bias'); conv32 = conv(relu28, kernel32, bias32, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu29 = relu(conv32); kernel33 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv2/kernel'); bias33 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv2/bias'); conv33 = conv(relu29, kernel33, bias33, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu30 = relu(conv33); kernel34 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv3/kernel'); bias34 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv3/bias'); conv34 = conv(relu30, kernel34, bias34, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add10 = add(add9, conv34); beta11 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/preact/beta'); moving_mean11 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/preact/moving_mean'); moving_variance11 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/preact/moving_variance'); norm11 = batch_normalization(add10, mean = moving_mean11, variance = moving_variance11, offset = beta11, scale = 1.0, epsilon = 0.001); relu31 = relu(norm11); kernel35 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv1/kernel'); bias35 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv1/bias'); conv35 = conv(relu31, kernel35, bias35, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu32 = relu(conv35); kernel36 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv2/kernel'); bias36 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv2/bias'); conv36 = conv(relu32, kernel36, bias36, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu33 = relu(conv36); kernel37 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv3/kernel'); bias37 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv3/bias'); conv37 = conv(relu33, kernel37, bias37, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add11 = add(add10, conv37); beta12 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/preact/beta'); moving_mean12 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/preact/moving_mean'); moving_variance12 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/preact/moving_variance'); norm12 = batch_normalization(add11, mean = moving_mean12, variance = moving_variance12, offset = beta12, scale = 1.0, epsilon = 0.001); relu34 = relu(norm12); kernel38 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv1/kernel'); bias38 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv1/bias'); conv38 = conv(relu34, kernel38, bias38, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu35 = relu(conv38); kernel39 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv2/kernel'); bias39 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv2/bias'); conv39 = conv(relu35, kernel39, bias39, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu36 = relu(conv39); kernel40 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv3/kernel'); bias40 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv3/bias'); conv40 = conv(relu36, kernel40, bias40, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add12 = add(add11, conv40); beta13 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/preact/beta'); moving_mean13 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/preact/moving_mean'); moving_variance13 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/preact/moving_variance'); norm13 = batch_normalization(add12, mean = moving_mean13, variance = moving_variance13, offset = beta13, scale = 1.0, epsilon = 0.001); relu37 = relu(norm13); pool4 = max_pool(add12, size = [1, 1, 1, 1], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel41 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv1/kernel'); bias41 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv1/bias'); conv41 = conv(relu37, kernel41, bias41, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu38 = relu(conv41); kernel42 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv2/kernel'); bias42 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv2/bias'); conv42 = conv(relu38, kernel42, bias42, padding = [(1, 1), (1, 1)], border = 'constant', stride = [2, 2], dilation = [1, 1]); relu39 = relu(conv42); kernel43 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv3/kernel'); bias43 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv3/bias'); conv43 = conv(relu39, kernel43, bias43, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add13 = add(pool4, conv43); beta14 = variable(shape = [1, 1024], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/preact/beta'); moving_mean14 = variable(shape = [1, 1024], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/preact/moving_mean'); moving_variance14 = variable(shape = [1, 1024], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/preact/moving_variance'); norm14 = batch_normalization(add13, mean = moving_mean14, variance = moving_variance14, offset = beta14, scale = 1.0, epsilon = 0.001); relu40 = relu(norm14); kernel44 = variable(shape = [2048, 1024, 1, 1], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/shortcut/kernel'); bias44 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/shortcut/bias'); conv44 = conv(relu40, kernel44, bias44, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); kernel45 = variable(shape = [512, 1024, 1, 1], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv1/kernel'); bias45 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv1/bias'); conv45 = conv(relu40, kernel45, bias45, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu41 = relu(conv45); kernel46 = variable(shape = [512, 512, 3, 3], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv2/kernel'); bias46 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv2/bias'); conv46 = conv(relu41, kernel46, bias46, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu42 = relu(conv46); kernel47 = variable(shape = [2048, 512, 1, 1], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv3/kernel'); bias47 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv3/bias'); conv47 = conv(relu42, kernel47, bias47, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add14 = add(conv44, conv47); beta15 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/preact/beta'); moving_mean15 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/preact/moving_mean'); moving_variance15 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/preact/moving_variance'); norm15 = batch_normalization(add14, mean = moving_mean15, variance = moving_variance15, offset = beta15, scale = 1.0, epsilon = 0.001); relu43 = relu(norm15); kernel48 = variable(shape = [512, 2048, 1, 1], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv1/kernel'); bias48 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv1/bias'); conv48 = conv(relu43, kernel48, bias48, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu44 = relu(conv48); kernel49 = variable(shape = [512, 512, 3, 3], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv2/kernel'); bias49 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv2/bias'); conv49 = conv(relu44, kernel49, bias49, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu45 = relu(conv49); kernel50 = variable(shape = [2048, 512, 1, 1], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv3/kernel'); bias50 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv3/bias'); conv50 = conv(relu45, kernel50, bias50, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add15 = add(add14, conv50); beta16 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/preact/beta'); moving_mean16 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/preact/moving_mean'); moving_variance16 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/preact/moving_variance'); norm16 = batch_normalization(add15, mean = moving_mean16, variance = moving_variance16, offset = beta16, scale = 1.0, epsilon = 0.001); relu46 = relu(norm16); kernel51 = variable(shape = [512, 2048, 1, 1], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv1/kernel'); bias51 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv1/bias'); conv51 = conv(relu46, kernel51, bias51, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu47 = relu(conv51); kernel52 = variable(shape = [512, 512, 3, 3], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv2/kernel'); bias52 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv2/bias'); conv52 = conv(relu47, kernel52, bias52, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu48 = relu(conv52); kernel53 = variable(shape = [2048, 512, 1, 1], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv3/kernel'); bias53 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv3/bias'); conv53 = conv(relu48, kernel53, bias53, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); add16 = add(add15, conv53); beta17 = variable(shape = [1, 2048], label = 'resnet_v2_50/postnorm/beta'); moving_mean17 = variable(shape = [1, 2048], label = 'resnet_v2_50/postnorm/moving_mean'); moving_variance17 = variable(shape = [1, 2048], label = 'resnet_v2_50/postnorm/moving_variance'); norm17 = batch_normalization(add16, mean = moving_mean17, variance = moving_variance17, offset = beta17, scale = 1.0, epsilon = 0.001); relu49 = relu(norm17); reduce1 = mean_reduce(relu49, axes = [2, 3]); kernel54 = variable(shape = [1000, 2048, 1, 1], label = 'resnet_v2_50/logits/kernel'); bias54 = variable(shape = [1, 1000], label = 'resnet_v2_50/logits/bias'); output = conv(reduce1, kernel54, bias54, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); } ================================================ FILE: nnef-pyproject/examples/samples/sample.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import nnef graph = nnef.parse_string( """ version 1.0; graph Net( input ) -> ( output ) { input = external(shape = [1,3,224,224]); filter = variable(shape = [32,3,5,5], label = 'conv/filter'); output = conv(input, filter); } """ ) print(nnef.format_graph(graph.name, graph.inputs, graph.outputs, graph.operations, graph.tensors)) ================================================ FILE: nnef-pyproject/examples/samples/sample_ext.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import nnef def shuffle_shape(input, groups): assert input[1] % groups == 0, "input channels ({}) is not divisible by groups ({})".format(input[1], groups) return input graph = nnef.parse_string( """ version 1.0; extension KHR_enable_fragment_definitions; fragment shuffle( input: tensor, groups: integer ) -> ( output: tensor ); graph Net( input ) -> ( output ) { input = external(shape = [1,3,224,224]); filter = variable(shape = [32,3,5,5], label = 'conv/filter'); conv = conv(input, filter); output = shuffle(conv, groups = 4); } """ ) nnef.infer_shapes(graph, custom_shapes={'shuffle': shuffle_shape}) print(nnef.format_graph(graph.name, graph.inputs, graph.outputs, graph.operations, graph.tensors)) ================================================ FILE: nnef-pyproject/examples/samples/sample_gen.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import nnef import numpy as np from collections import OrderedDict input = nnef.Tensor('input', dtype='scalar') filter = nnef.Tensor('filter', dtype='scalar', data=np.random.randn(32,3,5,5)) output = nnef.Tensor('output', dtype='scalar') external = nnef.Operation('external', attribs={'shape': [1,3,224,224]}, inputs=OrderedDict(), outputs=OrderedDict([('output', nnef.Identifier('input'))])) variable = nnef.Operation('variable', attribs={'shape': [32,3,5,5], 'label': 'conv/filter'}, inputs=OrderedDict(), outputs=OrderedDict([('output', nnef.Identifier('filter'))])) conv = nnef.Operation('conv', attribs={}, inputs=OrderedDict([('input', nnef.Identifier('input')), ('filter', nnef.Identifier('filter'))]), outputs=OrderedDict([('output', nnef.Identifier('output'))])) graph = nnef.Graph('G', inputs=['input'], outputs=['output'], operations=[external, variable, conv], tensors={'input': input, 'filter': filter, 'output': output}) nnef.save_graph(graph, 'G', annotate_shapes=True) ================================================ FILE: nnef-pyproject/examples/vgg.txt ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version 1.0; graph vgg_19( input ) -> ( output ) { input = external(shape = [1, 3, 224, 224]); kernel1 = variable(shape = [64, 3, 3, 3], label = 'vgg_19/conv1/conv1_1/kernel'); bias1 = variable(shape = [1, 64], label = 'vgg_19/conv1/conv1_1/bias'); conv1 = conv(input, kernel1, bias1, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu1 = relu(conv1); kernel2 = variable(shape = [64, 64, 3, 3], label = 'vgg_19/conv1/conv1_2/kernel'); bias2 = variable(shape = [1, 64], label = 'vgg_19/conv1/conv1_2/bias'); conv2 = conv(relu1, kernel2, bias2, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu2 = relu(conv2); pool1 = max_pool(relu2, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel3 = variable(shape = [128, 64, 3, 3], label = 'vgg_19/conv2/conv2_1/kernel'); bias3 = variable(shape = [1, 128], label = 'vgg_19/conv2/conv2_1/bias'); conv3 = conv(pool1, kernel3, bias3, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu3 = relu(conv3); kernel4 = variable(shape = [128, 128, 3, 3], label = 'vgg_19/conv2/conv2_2/kernel'); bias4 = variable(shape = [1, 128], label = 'vgg_19/conv2/conv2_2/bias'); conv4 = conv(relu3, kernel4, bias4, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu4 = relu(conv4); pool2 = max_pool(relu4, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel5 = variable(shape = [256, 128, 3, 3], label = 'vgg_19/conv3/conv3_1/kernel'); bias5 = variable(shape = [1, 256], label = 'vgg_19/conv3/conv3_1/bias'); conv5 = conv(pool2, kernel5, bias5, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu5 = relu(conv5); kernel6 = variable(shape = [256, 256, 3, 3], label = 'vgg_19/conv3/conv3_2/kernel'); bias6 = variable(shape = [1, 256], label = 'vgg_19/conv3/conv3_2/bias'); conv6 = conv(relu5, kernel6, bias6, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu6 = relu(conv6); kernel7 = variable(shape = [256, 256, 3, 3], label = 'vgg_19/conv3/conv3_3/kernel'); bias7 = variable(shape = [1, 256], label = 'vgg_19/conv3/conv3_3/bias'); conv7 = conv(relu6, kernel7, bias7, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu7 = relu(conv7); kernel8 = variable(shape = [256, 256, 3, 3], label = 'vgg_19/conv3/conv3_4/kernel'); bias8 = variable(shape = [1, 256], label = 'vgg_19/conv3/conv3_4/bias'); conv8 = conv(relu7, kernel8, bias8, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu8 = relu(conv8); pool3 = max_pool(relu8, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel9 = variable(shape = [512, 256, 3, 3], label = 'vgg_19/conv4/conv4_1/kernel'); bias9 = variable(shape = [1, 512], label = 'vgg_19/conv4/conv4_1/bias'); conv9 = conv(pool3, kernel9, bias9, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu9 = relu(conv9); kernel10 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv4/conv4_2/kernel'); bias10 = variable(shape = [1, 512], label = 'vgg_19/conv4/conv4_2/bias'); conv10 = conv(relu9, kernel10, bias10, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu10 = relu(conv10); kernel11 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv4/conv4_3/kernel'); bias11 = variable(shape = [1, 512], label = 'vgg_19/conv4/conv4_3/bias'); conv11 = conv(relu10, kernel11, bias11, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu11 = relu(conv11); kernel12 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv4/conv4_4/kernel'); bias12 = variable(shape = [1, 512], label = 'vgg_19/conv4/conv4_4/bias'); conv12 = conv(relu11, kernel12, bias12, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu12 = relu(conv12); pool4 = max_pool(relu12, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel13 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv5/conv5_1/kernel'); bias13 = variable(shape = [1, 512], label = 'vgg_19/conv5/conv5_1/bias'); conv13 = conv(pool4, kernel13, bias13, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu13 = relu(conv13); kernel14 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv5/conv5_2/kernel'); bias14 = variable(shape = [1, 512], label = 'vgg_19/conv5/conv5_2/bias'); conv14 = conv(relu13, kernel14, bias14, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu14 = relu(conv14); kernel15 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv5/conv5_3/kernel'); bias15 = variable(shape = [1, 512], label = 'vgg_19/conv5/conv5_3/bias'); conv15 = conv(relu14, kernel15, bias15, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu15 = relu(conv15); kernel16 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv5/conv5_4/kernel'); bias16 = variable(shape = [1, 512], label = 'vgg_19/conv5/conv5_4/bias'); conv16 = conv(relu15, kernel16, bias16, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu16 = relu(conv16); pool5 = max_pool(relu16, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]); kernel17 = variable(shape = [4096, 512, 7, 7], label = 'vgg_19/fc6/kernel'); bias17 = variable(shape = [1, 4096], label = 'vgg_19/fc6/bias'); conv17 = conv(pool5, kernel17, bias17, padding = [(0, 0), (0, 0)], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu17 = relu(conv17); kernel18 = variable(shape = [4096, 4096, 1, 1], label = 'vgg_19/fc7/kernel'); bias18 = variable(shape = [1, 4096], label = 'vgg_19/fc7/bias'); conv18 = conv(relu17, kernel18, bias18, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); relu18 = relu(conv18); kernel19 = variable(shape = [1000, 4096, 1, 1], label = 'vgg_19/fc8/kernel'); bias19 = variable(shape = [1, 1000], label = 'vgg_19/fc8/bias'); output = conv(relu18, kernel19, bias19, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]); } ================================================ FILE: nnef-pyproject/nnef/__init__.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import _nnef from .parser import * from .printer import * from .binary import read_tensor, write_tensor from .shapes import infer_shapes, _StandardShapeFuncs import os Identifier = _nnef.Identifier # subclass of str Error = _nnef.Error # subclass of exception Graph = _nnef.Graph # namedtuple('Graph', ['name': str, 'tensors': typing.Dict[str, Tensor], 'operations': typing.List[Operation], # 'inputs': typing.List[str], 'outputs': typing.List['str']]) Tensor = _nnef.Tensor # namedtuple('Tensor', ['name': str, 'dtype': str, 'shape': typing.List[int], 'data': numpy.ndarray, # 'quantization': Dict[str, object]]) Operation = _nnef.Operation # namedtuple('Operation', ['name': str, 'attribs': OrderedDict[str, object], 'inputs': OrderedDict[str, object], # 'outputs': OrderedDict[str, object], 'dtype': str]) Tensor.__new__.__defaults__ = (None, None, None) Operation.__new__.__defaults__ = (None,) StandardOperations = set(_StandardShapeFuncs.keys()) def load_graph(path, stdlib=None, lowered=None, load_variables=True): if os.path.isfile(path): return parse_file(path, stdlib=stdlib, lowered=lowered) graph_fn = os.path.join(path, 'graph.nnef') quant_fn = os.path.join(path, 'graph.quant') graph = parse_file(graph_fn, quant_fn if os.path.isfile(quant_fn) else None, stdlib=stdlib, lowered=lowered) if load_variables: for operation in graph.operations: if operation.name == 'variable': variable_filename = operation.attribs['label'] + '.dat' if variable_filename.startswith('/'): variable_filename = variable_filename[1:] variable_filename = os.path.join(path, variable_filename) tensor_name = operation.outputs['output'] with open(variable_filename) as variable_file: data = read_tensor(variable_file) data_shape = list(data.shape) shape = operation.attribs['shape'] if data_shape != shape: raise _nnef.Error('shape {} in variable file does not match shape {} defined in network structure' .format(data_shape, shape)) tensor = graph.tensors[tensor_name] graph.tensors[tensor_name] = _nnef.Tensor(tensor.name, tensor.dtype, data_shape, data, tensor.quantization) return graph def save_graph(graph, path, annotate_shapes=False): if os.path.exists(path): raise RuntimeError("folder already exists: '{}'".format(path)) os.makedirs(path) text = format_graph(graph.name, graph.inputs, graph.outputs, graph.operations, graph.tensors, annotate_shapes=annotate_shapes) with open(os.path.join(path, 'graph.nnef'), mode='w') as file: file.write('version 1.0;\n\n') file.write(text) for operation in graph.operations: if operation.name == 'variable': variable_filename = operation.attribs['label'] + '.dat' if variable_filename.startswith('/'): variable_filename = variable_filename[1:] variable_filename = os.path.join(path, variable_filename) os.makedirs(os.path.split(variable_filename)[0], exist_ok=True) tensor_name = operation.outputs['output'] tensor = graph.tensors[tensor_name] if tensor.data is not None: with open(variable_filename, 'wb') as variable_file: write_tensor(variable_file, tensor.data, quantized=bool(tensor.quantization)) class Session: def __init__(self, path, stdlib=None, lowered=None): self._handle = _nnef.create_session(path, stdlib=stdlib, lowered=lowered) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): _nnef.cleanup_session(self._handle) def __call__(self, *inputs): return _nnef.execute_session(self._handle, tuple(inputs)) ================================================ FILE: nnef-pyproject/nnef/binary.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import numpy as np class ItemType: FLOAT = 0 UINT = 1 QUINT = 2 QINT = 3 INT = 4 BOOL = 5 def _numpy_dtype_split(dtype): splits = { np.float16: (ItemType.FLOAT, 16), np.float32: (ItemType.FLOAT, 32), np.float64: (ItemType.FLOAT, 64), np.int8: (ItemType.INT, 8), np.uint8: (ItemType.UINT, 8), np.int16: (ItemType.INT, 16), np.uint16: (ItemType.UINT, 16), np.int32: (ItemType.INT, 32), np.uint32: (ItemType.UINT, 32), np.int64: (ItemType.INT, 64), np.uint64: (ItemType.UINT, 64), np.bool_: (ItemType.BOOL, 1), } split = splits.get(dtype.type) if split is None: raise TypeError('unsupported tensor dtype: ' + str(dtype)) return split def _numpy_dtype_make(item_type, bits): dtypes = { (ItemType.FLOAT, 16): np.float16, (ItemType.FLOAT, 32): np.float32, (ItemType.FLOAT, 64): np.float64, (ItemType.INT, 8): np.int8, (ItemType.INT, 16): np.int16, (ItemType.INT, 32): np.int32, (ItemType.INT, 64): np.int64, (ItemType.UINT, 8): np.uint8, (ItemType.UINT, 16): np.uint16, (ItemType.UINT, 32): np.uint32, (ItemType.UINT, 64): np.uint64, (ItemType.QINT, 8): np.int8, (ItemType.QINT, 16): np.int16, (ItemType.QINT, 32): np.int32, (ItemType.QINT, 64): np.int64, (ItemType.QUINT, 8): np.uint8, (ItemType.QUINT, 16): np.uint16, (ItemType.QUINT, 32): np.uint32, (ItemType.QUINT, 64): np.uint64, (ItemType.BOOL, 1): np.bool_, } dtype = dtypes.get((item_type, bits)) if dtype is None: raise ValueError('unsupported combination of item type ({}) and bits per item ({})'.format(item_type, bits)) return dtype MaxTensorRank = 8 def _rank_of(shape): rank = len(shape) while rank > 1 and shape[rank - 1] == 1: rank -= 1 return rank _is_little_endian = sys.byteorder == 'little' def _tofile(data, file): if not _is_little_endian and data.dtype != np.uint8 and data.dtype != np.int8: data = data.byteswap() if file.seekable(): data.tofile(file) else: file.write(data.tobytes()) def _fromfile(file, dtype, count): if file.seekable(): data = np.fromfile(file, dtype, count) else: data = np.frombuffer(file.read(count * np.dtype(dtype).itemsize), dtype, count) if not _is_little_endian and data.dtype != np.uint8 and data.dtype != np.int8: data = data.byteswap() return data def write_tensor(file, tensor, quantized=False, version=(1, 0)): if isinstance(file, str): raise ValueError('file parameter must be a file object not a file name') _tofile(np.asarray([0x4E, 0xEF, version[0], version[1]], dtype=np.uint8), file) item_type, bits = _numpy_dtype_split(tensor.dtype) if quantized: if item_type == ItemType.INT: item_type = ItemType.QINT elif item_type == ItemType.UINT: item_type = ItemType.QUINT else: raise ValueError("invalid tensor dtype '{}' for quantized tensor".format(tensor.dtype)) count = int(np.prod(tensor.shape)) data_length = (count + 7) // 8 if bits == 1 else count * (bits // 8) _tofile(np.asarray([data_length, tensor.ndim], dtype=np.uint32), file) if tensor.ndim > MaxTensorRank: raise ValueError('tensor rank exceeds maximum possible value of {}'.format(MaxTensorRank)) _tofile(np.asarray(tensor.shape, dtype=np.uint32), file) _tofile(np.asarray([0] * (MaxTensorRank - tensor.ndim), dtype=np.uint32), file) _tofile(np.asarray([bits, item_type], dtype=np.uint32), file) _tofile(np.asarray([0] * 19, dtype=np.uint32), file) data = np.packbits(tensor) if bits == 1 else tensor _tofile(data, file) def read_tensor(file, return_quantization=False): if isinstance(file, str): raise ValueError('file parameter must be a file object not a file name') [magic1, magic2, major, minor] = _fromfile(file, dtype=np.uint8, count=4) if magic1 != 0x4E or magic2 != 0xEF: raise ValueError('not a valid NNEF file') if major > 1 or minor > 0: raise ValueError('unsupported file version') [data_length, rank] = _fromfile(file, dtype=np.uint32, count=2) if file.seekable(): header_size = 128 file_size = os.fstat(file.fileno()).st_size if file_size != header_size + data_length: raise ValueError('invalid tensor file; size does not match header info') if rank > MaxTensorRank: raise ValueError('tensor rank exceeds maximum possible value of {}'.format(MaxTensorRank)) shape = _fromfile(file, dtype=np.uint32, count=MaxTensorRank) shape = shape[:rank] [bits, item_type] = _fromfile(file, dtype=np.uint32, count=2) _reserved = _fromfile(file, dtype=np.uint32, count=19) if item_type == ItemType.UINT and _reserved[0] != 0: item_type = ItemType.INT quantized = item_type == ItemType.QINT or item_type == ItemType.QUINT count = int(np.prod(shape)) if bits == 1: byte_count = int((count + 7) // 8) data = _fromfile(file, dtype=np.uint8, count=byte_count) if len(data) != byte_count: raise ValueError('could not read tensor data') data = np.unpackbits(data).astype(bool)[:count] else: data = _fromfile(file, dtype=_numpy_dtype_make(item_type, bits), count=count) if len(data) != count: raise ValueError('could not read tensor data') tensor = data.reshape(shape) return (tensor, quantized) if return_quantization else tensor def _write_tensor_provisional(file, tensor, version=(1, 0)): _tofile(np.asarray([0x4E, 0xEF, version[0], version[1]], dtype=np.uint8), file) header_length = 4 + 4 + (tensor.ndim + 1) * 4 + 4 _tofile(np.asarray([header_length], dtype=np.uint32), file) _tofile(np.asarray([tensor.ndim], dtype=np.uint32), file) _tofile(np.asarray(tensor.shape, dtype=np.uint32), file) dtype, bits = _numpy_dtype_split(tensor.dtype) _tofile(np.asarray([dtype, bits], dtype=np.uint8), file) _tofile(np.asarray([0], dtype=np.uint16), file) _tofile(tensor, file) def _read_tensor_provisional(file): [magic1, magic2, major, minor] = _fromfile(file, dtype=np.uint8, count=4) if magic1 != 0x4E or magic2 != 0xEF: raise ValueError('not a valid NNEF file') if major > 1 or minor > 0: raise ValueError('unsupported file version') [_header_length] = _fromfile(file, dtype=np.uint32, count=1) [rank] = _fromfile(file, dtype=np.uint32, count=1) shape = _fromfile(file, dtype=np.uint32, count=rank) [code, bits] = _fromfile(file, dtype=np.uint8, count=2) [qlen] = _fromfile(file, dtype=np.uint16, count=1) assert (code == 0) assert (bits == 32) assert (qlen == 0) return _fromfile(file, dtype=np.float32, count=int(np.prod(shape))).reshape(shape) ================================================ FILE: nnef-pyproject/nnef/cpp/CMakeLists.txt ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. cmake_minimum_required(VERSION 3.0) project(nnef CXX) # build information message(STATUS "Build Configuration: ${CMAKE_BUILD_TYPE}") message(STATUS "Build executables in: ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}") # nnef library add_library(${PROJECT_NAME} include/cnnef.h include/nnef.h include/nnef/common/binary.h include/nnef/common/dictionary.h include/nnef/common/error.h include/nnef/common/lexer.h include/nnef/common/parser.h include/nnef/common/prototype.h include/nnef/common/shapes.h include/nnef/common/typespec.h include/nnef/common/typeutils.h include/nnef/common/value.h include/nnef/comp/comp_parser.h include/nnef/comp/evaluation.h include/nnef/comp/expression.h include/nnef/comp/fragment.h include/nnef/comp/stdlib_source.h include/nnef/flat/flat_parser.h include/nnef/flat/quant_parser.h include/nnef/flat/stdlib_protos.h src/nnef.cpp src/cnnef.cpp ) # build interface include dir is used when this cmake is included into # a larger project # install interface include dir will be put into the generated cmake config file # during install step target_include_directories(${PROJECT_NAME} PUBLIC $ PUBLIC $) set_target_properties(${PROJECT_NAME} PROPERTIES CXX_STANDARD 11) set_target_properties(${PROJECT_NAME} PROPERTIES DEBUG_POSTFIX _d) target_link_libraries(${PROJECT_NAME}) # install the library install(TARGETS ${PROJECT_NAME} EXPORT ${PROJECT_NAME} ARCHIVE DESTINATION lib LIBRARY DESTINATION lib RUNTIME DESTINATION bin) # then the headers install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include DESTINATION .) # generate and install cmake config file for find_package install(EXPORT ${PROJECT_NAME} DESTINATION lib/cmake/${PROJECT_NAME}) # generate an auxiliary config file also needed by find_package # it just includes the previously generated nnef.cmake file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake "include(\${CMAKE_CURRENT_LIST_DIR}/${PROJECT_NAME}.cmake)") install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake DESTINATION lib/cmake/${PROJECT_NAME}) ================================================ FILE: nnef-pyproject/nnef/cpp/include/cnnef.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _CNNEF_H_ #define _CNNEF_H_ #include #ifdef __cplusplus extern "C" { #endif #ifdef __cplusplus #if _WIN32 #define EXPORTDLL extern "C" __declspec(dllexport) #else #define EXPORTDLL extern "C" #endif #else // __cplusplus #if _WIN32 #define EXPORTDLL __declspec(dllexport) #else #define EXPORTDLL #endif #endif // __cplusplus typedef void* nnef_graph_t; typedef void* nnef_tensor_t; /* * Load NNEF graph from file * * @param path: the path to the NNEF model folder * @param error: the string to store the error message if any * * @return NNEF graph */ EXPORTDLL nnef_graph_t nnef_graph_load( const char* path, char *error ); /* * Copy an NNEF graph * * @param graph: NNEF graph * * @return the copy of NNEF graph */ EXPORTDLL nnef_graph_t nnef_graph_copy( nnef_graph_t graph ); /* * Release NNEF graph * * @param graph: NNEF graph */ EXPORTDLL void nnef_graph_release( nnef_graph_t graph ); /* * Perform shape inference on the graph * * @param graph: the graph object * @param error: the string to store the error message if any * * @return true if there were no errors, false otherwise */ EXPORTDLL int nnef_graph_infer_shapes( nnef_graph_t graph, char *error ); /* * Allocate tensor buffers in the graph * * @param graph: the graph object * @param error: the string to store the error message if any * * @return true if there were no errors, false otherwise */ EXPORTDLL int nnef_graph_allocate_buffers( nnef_graph_t graph, char *error ); /* * Execute a graph * * @param graph: the graph object * @param error: the string to store the error message if any * * @return true if there were no errors, false otherwise */ EXPORTDLL int nnef_graph_execute( nnef_graph_t graph, char *error ); /* * Query input names from NNEF graph * * @param graph: NNEF graph * @param inputs: input names * * @return input count */ EXPORTDLL size_t nnef_graph_input_names( nnef_graph_t graph, const char** inputs ); /* * Query output names from NNEF graph * * @param graph: NNEF graph * @param inputs: output names * * @return output count */ EXPORTDLL size_t nnef_graph_output_names( nnef_graph_t graph, const char** outputs ); /* * Find tensor in NNEF graph by name * * @param graph: NNEF graph * @param tensor_name: tensor name * * @return tensor */ EXPORTDLL nnef_tensor_t nnef_graph_find_tensor( nnef_graph_t graph, const char* tensor_name ); /* * Query name of an NNEF graph * * @param graph: NNEF graph * * @return graph name */ EXPORTDLL const char* nnef_graph_name( nnef_graph_t graph ); /* * Create a new tensor * * @return tensor */ EXPORTDLL nnef_tensor_t nnef_tensor_create(void); /* * Release a tensor */ EXPORTDLL void nnef_tensor_release( nnef_tensor_t tensor ); /* * Query tensor name * * @param tensor: tensor * * @return tensor name */ EXPORTDLL const char* nnef_tensor_name( nnef_tensor_t tensor ); /* * Query tensor data-type * * @param tensor: tensor * * @return data-type name */ EXPORTDLL const char* nnef_tensor_dtype( nnef_tensor_t tensor ); /* * Query tensor rank * * @param tensor: tensor * * @return tensor rank */ EXPORTDLL size_t nnef_tensor_rank( nnef_tensor_t tensor ); /* * Query tensor dims * * @param tensor: tensor * * @return tensor rank */ EXPORTDLL const int* nnef_tensor_dims( nnef_tensor_t tensor ); /* * Query tensor data * * @param tensor: tensor * * @return tensor data */ EXPORTDLL void* nnef_tensor_data( nnef_tensor_t tensor ); /* * Read tensor from binary file * * @param url: the name of the file to read from * @param tensor: tensor * @param error: the string to store the error message if any * * @return true if there were no errors, false otherwise */ EXPORTDLL int nnef_tensor_read( const char* path, nnef_tensor_t tensor, char *error ); /* * Write tensor to binary file * * @param url: the name of the file to write to * @param tensor: tensor * @param error: the string to store the error message if any * * @return true if there were no errors, false otherwise */ EXPORTDLL int nnef_tensor_write( const char* path, nnef_tensor_t tensor, char *error ); #ifdef __cplusplus } #endif #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/binary.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_BINARY_H_ #define _NNEF_BINARY_H_ #include "error.h" #include #include #include #include #include #include namespace nnef { struct TensorHeader { enum { MaxRank = 8 }; enum ItemType { Float, Uint, Quint, Qint, Int, Bool }; uint8_t magic[2]; uint8_t version[2]; uint32_t data_length; uint32_t rank; uint32_t extents[MaxRank]; uint32_t bits_per_item; uint32_t item_type; uint32_t reserved[19]; }; template void copy_and_cast_n( In* input, size_t n, Out* output ) { for ( size_t i = 0; i < n; ++i ) { *output++ = (Out)*input++; } } template inline void fill_tensor_header( TensorHeader& header, const size_t version[2], const size_t rank, const T* extents, const size_t bits_per_item, const TensorHeader::ItemType item_type ) { const char* magic = "N\xEF"; std::fill_n((uint8_t*)&header, sizeof(header), (uint8_t)0); header.magic[0] = (uint8_t)magic[0]; header.magic[1] = (uint8_t)magic[1]; header.version[0] = (uint8_t)version[0]; header.version[1] = (uint8_t)version[1]; if ( rank > TensorHeader::MaxRank ) { throw Error("tensor rank %d exceeds maximum possible value (%d)", (int)rank, (int)TensorHeader::MaxRank); } const uint32_t item_count = std::accumulate(extents, extents + rank, (uint32_t)1, std::multiplies()); header.data_length = (uint32_t)((item_count * bits_per_item + 7) / 8); header.bits_per_item = (uint32_t)bits_per_item; header.rank = (uint32_t)rank; header.item_type = item_type; std::copy_n(extents, rank, header.extents); } inline void validate_tensor_header( const TensorHeader& header ) { if ( header.magic[0] != 'N' || header.magic[1] != 0xEF ) { throw Error("invliad magic number in tensor binary"); } if ( header.version[0] != 1 || header.version[1] != 0 ) { throw Error("unknown version number %d.%d", (int)header.version[0], (int)header.version[1]); } if ( header.rank > TensorHeader::MaxRank ) { throw Error("tensor rank %d exceeds maximum allowed rank (%d)", (int)header.rank, (int)TensorHeader::MaxRank); } const size_t item_count = std::accumulate(header.extents, header.extents + header.rank, (size_t)1, std::multiplies()); if ( (size_t)header.data_length != (item_count * header.bits_per_item + 7) / 8 ) { throw Error("data length is not compatible with extents and bits per item"); } if ( (header.item_type & 0xffff0000) == 0 ) // Khronos-defined item type { const uint32_t code = (header.item_type & 0x0000ffff); switch ( code ) { case TensorHeader::Float: { if ( header.bits_per_item != 16 && header.bits_per_item != 32 && header.bits_per_item != 64 ) { throw Error("invalid bits per item for float item type: %d", (int)header.bits_per_item); } break; } case TensorHeader::Int: case TensorHeader::Uint: case TensorHeader::Quint: case TensorHeader::Qint: { if ( header.bits_per_item > 64 ) { throw Error("invalid bits per item for integer item type: %d", (int)header.bits_per_item); } break; } case TensorHeader::Bool: { if ( header.bits_per_item != 1 && header.bits_per_item != 8 ) { throw Error("invalid bits per item for bool item type: %d", (int)header.bits_per_item); } break; } default: { throw Error("unkown Khronos-defined item type code: %x", (int)code); } } } } inline void pack_bits( const size_t n, const bool* data, char* bytes ) { for ( size_t i = 0; i < n; ++i ) { bytes[i / 8] |= (data[i] << (7 - (i % 8))); } } inline void unpack_bits( const size_t n, const char* bytes, bool* data ) { for ( size_t i = 0; i < n; ++i ) { data[i] = (bytes[i / 8] >> (7 - (i % 8))) & 0x01; } } inline void from_bytes( const char* bytes, const size_t count, const size_t bits_per_item, float* data ) { if ( bits_per_item == 32 ) { copy_and_cast_n((const float*)bytes, count, data); } else if ( bits_per_item == 64 ) { copy_and_cast_n((const double*)bytes, count, data); } else { throw std::runtime_error("cannot load float data of " + std::to_string(bits_per_item) + " bits per item"); } } inline void from_bytes( const char* bytes, const size_t count, const size_t bits_per_item, int* data, const bool is_signed ) { if ( bits_per_item == 8 ) { if ( is_signed ) { copy_and_cast_n((const int8_t*)bytes, count, data); } else { copy_and_cast_n((const uint8_t*)bytes, count, data); } } else if ( bits_per_item == 16 ) { if ( is_signed ) { copy_and_cast_n((const int16_t*)bytes, count, data); } else { copy_and_cast_n((const uint16_t*)bytes, count, data); } } else if ( bits_per_item == 32 ) { if ( is_signed ) { copy_and_cast_n((const int32_t*)bytes, count, data); } else { copy_and_cast_n((const uint32_t*)bytes, count, data); } } else if ( bits_per_item == 64 ) { if ( is_signed ) { copy_and_cast_n((const int64_t*)bytes, count, data); } else { copy_and_cast_n((const uint64_t*)bytes, count, data); } } else { throw std::runtime_error("cannot load int data of " + std::to_string(bits_per_item) + " bits per item"); } } inline void from_bytes( const char* bytes, const size_t count, const size_t bits_per_item, bool* data ) { if ( bits_per_item == 1 ) { unpack_bits(count, bytes, data); } else if ( bits_per_item == 8 ) { copy_and_cast_n((const int8_t*)bytes, count, data); } else { throw std::runtime_error("cannot load bool data of " + std::to_string(bits_per_item) + " bits per item"); } } inline void to_bytes( const float* data, const size_t count, char* bytes ) { copy_and_cast_n(data, count, (float*)bytes); } inline void to_bytes( const int* data, const size_t count, char* bytes, const bool as_signed ) { if ( as_signed ) { copy_and_cast_n(data, count, (int32_t*)bytes); } else { copy_and_cast_n(data, count, (uint32_t*)bytes); } } inline void to_bytes( const bool* data, const size_t count, char* bytes ) { pack_bits(count, data, bytes); } } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/dictionary.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_DICTIONARY_H_ #define _NNEF_DICTIONARY_H_ #include #include namespace nnef { template using Dictionary = std::map; } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/error.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_ERROR_H_ #define _NNEF_ERROR_H_ #include #include #include namespace nnef { class Error : public std::exception { public: struct Position { unsigned line; unsigned column; const char* filename; const Position* origin; }; public: template Error( const Position& position, const char* format, Args&&... args ) : _position(position), _message(formatString(format, std::forward(args)...)) { } template Error( const char* format, Args&&... args ) : _position({0,0,nullptr,nullptr}), _message(formatString(format, std::forward(args)...)) { } virtual const char* what() const noexcept { return _message.c_str(); } const Position& position() const { return _position; } public: static std::string formatString( const char* fmt, ... ) { va_list args; va_start(args, fmt); auto length = vsnprintf(nullptr, 0, fmt, args); va_end(args); if ( length < 0 ) { throw std::logic_error("string formatting error"); } std::string str(length, '\0'); va_start(args, fmt); vsnprintf((char*)str.data(), length + 1, fmt, args); va_end(args); return str; } private: Position _position; std::string _message; }; } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/lexer.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_LEXER_H_ #define _NNEF_LEXER_H_ #include "error.h" #include #include #include #include namespace nnef { class Lexer { public: typedef Error::Position Position; public: enum Token { Eof, Version, Extension, Identifier, Characters, Decimal, Fractional, Graph, Fragment, Tensor, Integer, Scalar, Logical, String, True, False, For, In, If, Else, Yield, LengthOf, ShapeOf, RangeOf, Arrow, And, Or, Le, Ge, Eq, Ne, }; static std::string tokenString( int token ) { static const std::string strings[] = { "eof", "version", "extension", "identifier", "literal", "decimal", "fractional", "graph", "fragment", "tensor", "integer", "scalar", "logical", "string", "true", "false", "for", "in", "if", "else", "yield", "length_of", "shape_of", "range_of", "->", "&&", "||", "<=", ">=", "==", "!=", }; char ch = (char)token; return token <= Ne ? strings[token] : std::string(&ch, 1); } static bool isType( int token ) { return token >= Tensor && token <= String; } static bool isKeyword( int token ) { return token >= Fragment && token <= False; } static bool isOperator( int token ) { return token >= LengthOf; } public: Lexer( std::istream& input, const char* filename ) : _input(input), _position({1,1,filename,nullptr}), _token(Eof) { } void next() { _position.column += (unsigned)_string.length() + 2 * (_token == Characters); skipSpace(); skipComment(); _string.clear(); if ( _input.peek() == EOF ) { _token = Eof; } else if ( _input.peek() == '\'' || _input.peek() == '\"' ) { _token = getCharacters(); } else if ( std::isalpha(_input.peek()) || _input.peek() == '_' ) { _token = getIdentifier(); } else if ( std::isdigit(_input.peek()) ) { _token = getNumber(); } else { _token = getOperator(); } } int token() const { return _token; } const std::string& string() const { return _string; } const Position& position() const { return _position; } void readToken( int token ) { if ( _token != token ) { throw Error(_position, "expected token '%s', found '%s'", tokenString(token).c_str(), tokenString(_token).c_str()); } next(); } bool readIfToken( int token ) { if ( _token == token ) { next(); return true; } return false; } private: Token getCharacters() { char delim = _input.get(); while ( _input.peek() != delim && _input.peek() != EOF ) { _string += (char)_input.get(); } if ( _input.peek() == EOF ) { const Position position = { _position.line, _position.column + (unsigned)_string.length() + 1, _position.filename, nullptr }; throw Error(position, "expected %c", delim); } _input.get(); return Token::Characters; } Token getIdentifier() { static const std::map keywords = { std::make_pair("version", Token::Version), std::make_pair("extension", Token::Extension), std::make_pair("graph", Token::Graph), std::make_pair("fragment", Token::Fragment), std::make_pair("tensor", Token::Tensor), std::make_pair("integer", Token::Integer), std::make_pair("scalar", Token::Scalar), std::make_pair("logical", Token::Logical), std::make_pair("string", Token::String), std::make_pair("true", Token::True), std::make_pair("false", Token::False), std::make_pair("for", Token::For), std::make_pair("in", Token::In), std::make_pair("if", Token::If), std::make_pair("else", Token::Else), std::make_pair("yield", Token::Yield), std::make_pair("length_of", Token::LengthOf), std::make_pair("shape_of", Token::ShapeOf), std::make_pair("range_of", Token::RangeOf), }; do { _string += _input.get(); } while ( std::isalnum(_input.peek()) || _input.peek() == '_' ); auto it = keywords.find(_string); return it == keywords.end() ? Token::Identifier : it->second; } Token getNumber() { bool real = false; do { _string += _input.get(); if ( _input.peek() == '.' && !real ) { _string += _input.get(); real = true; } } while ( std::isdigit(_input.peek()) ); if ( _input.peek() == 'e' || _input.peek() == 'E' ) { _string += _input.get(); if ( _input.peek() == '+' || _input.peek() == '-' ) { _string += _input.get(); } if ( !std::isdigit(_input.peek()) ) { const Position position = { _position.line, _position.column + (unsigned)_string.length(), _position.filename, nullptr }; throw Error(position, "expected digit"); } while ( std::isdigit(_input.peek()) ) { _string += _input.get(); } real = true; } return real ? Token::Fractional : Token::Decimal; } int getOperator() { int token = _input.get(); _string += (char)token; if ( _input.peek() == '=' ) { if ( token == '<' ) { _string += (char)_input.get(); token = Le; } else if ( token == '>' ) { _string += (char)_input.get(); token = Ge; } else if ( token == '=' ) { _string += (char)_input.get(); token = Eq; } else if ( token == '!' ) { _string += (char)_input.get(); token = Ne; } } if ( token == '&' && _input.peek() == '&' ) { _string += (char)_input.get(); token = And; } else if ( token == '|' && _input.peek() == '|' ) { _string += (char)_input.get(); token = Or; } else if ( token == '-' && _input.peek() == '>' ) { _string += (char)_input.get(); token = Arrow; } return token; } void skipSpace() { while ( std::isspace(_input.peek()) ) { ++_position.column; char ch = _input.get(); if ( ch == '\r' || ch == '\n' ) { ++_position.line; _position.column = 1; } if ( ch == '\r' && _input.peek() == '\n' ) { _input.get(); } } } void skipComment() { while ( _input.peek() == '#' ) { while ( _input.peek() != '\n' && _input.peek() != '\r' && _input.peek() != EOF ) { _input.get(); ++_position.column; } skipSpace(); } } private: std::istream& _input; std::string _string; Position _position; int _token; }; inline float getScalarValue( Lexer& lexer ) { return (float)std::atof(lexer.string().c_str()); } inline int getIntegerValue( Lexer& lexer ) { return std::atoi(lexer.string().c_str()); } } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/parser.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_PARSER_H_ #define _NNEF_PARSER_H_ #include "value.h" #include "lexer.h" #include "prototype.h" #include "dictionary.h" #include namespace nnef { class Parser { public: typedef std::pair version_t; typedef std::vector extensions_t; enum Flags { KHR_ENABLE_FRAGMENT_DEFINITIONS = 0x1, KHR_ENABLE_OPERATOR_EXPRESSIONS = 0x2 }; public: struct Callback { virtual ~Callback() {} virtual void beginDocument( const std::string& filename, const version_t& version ) {} virtual void endDocument( const std::string& filename ) {} virtual bool handleExtension( const std::string& extension ) { return false; } virtual void beginGraph( const Prototype& proto, const Dictionary& fragments ) {} virtual void endGraph( const Prototype& proto, const Dictionary& dtypes ) {} virtual void operation( const Prototype& proto, const Dictionary& args, const Dictionary& dtypes ) = 0; }; public: virtual ~Parser() {} virtual void parse( std::istream& is, const char* filename, Callback& callback ) = 0; protected: static Typename getTypename( Lexer& lexer ) { switch ( lexer.token() ) { case Lexer::Integer: return Typename::Integer; case Lexer::Scalar: return Typename::Scalar; case Lexer::Logical: return Typename::Logical; case Lexer::String: return Typename::String; case '?': return Typename::Generic; default: throw Error(lexer.position(), "expected type name, found '%s'", Lexer::tokenString(lexer.token()).c_str()); } } static version_t readVersion( Lexer& lexer ) { lexer.readToken(Lexer::Version); if ( lexer.token() != Lexer::Fractional ) { throw Error(lexer.position(), "expected version number"); } auto str = lexer.string(); const size_t dots = std::count(str.begin(), str.end(), '.'); bool isdigits = std::all_of(str.begin(), str.end(), []( char ch ){ return std::isdigit(ch) || ch == '.'; }); if ( !isdigits || dots != 1 ) { throw Error(lexer.position(), "invalid version number format: %s", str.c_str()); } lexer.next(); auto dot = str.find('.'); auto major = std::atoi(str.substr(0,dot).c_str()); auto minor = std::atoi(str.substr(dot+1).c_str()); static const version_t MaxSupportedVersion(1,0); auto version = version_t(major,minor); if ( version > MaxSupportedVersion ) { throw Error(lexer.position(), "unsupported version %d.%d; maximum supported version is %d.%d", (int)major, (int)minor, (int)MaxSupportedVersion.first, (int)MaxSupportedVersion.second); } lexer.readToken(';'); return version; } static extensions_t readExtensions( Lexer& lexer, std::function handler ) { extensions_t extensions; while ( lexer.readIfToken(Lexer::Extension) ) { do { auto position = lexer.position(); extensions.push_back(lexer.string()); lexer.readToken(Lexer::Identifier); if ( !handler(extensions.back()) ) { throw Error(position, "could not handle extension '%s'", extensions.back().c_str()); } } while ( lexer.readIfToken(',') ); lexer.readToken(';'); } return extensions; } }; } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/prototype.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_PROTOTYPE_H_ #define _NNEF_PROTOTYPE_H_ #include "typespec.h" #include "value.h" #include #include #include namespace nnef { class Typed { public: Typed( const std::string& name, const Type* type ) : _name(name), _type(type) { } const std::string& name() const { return _name; } const Type* type() const { return _type; } private: std::string _name; const Type* _type; }; class Param : public Typed { public: Param( const std::string& name, const Type* type, const Value& defaultValue = Value::none() ) : Typed(name,type), _default(defaultValue) { } const Value& defaultValue() const { return _default; } private: Value _default; }; typedef Typed Result; class Prototype { private: void initGeneric() { auto isGeneric = []( const Typed& typed ){ return typed.type()->isGeneric(); }; _hasGenericParams = std::any_of(_params.begin(), _params.end(), isGeneric); _hasGenericResults = std::any_of(_results.begin(), _results.end(), isGeneric); } public: Prototype( const std::string& name, std::initializer_list params, std::initializer_list results, const PrimitiveType* genericParamDefault = nullptr ) : _name(name), _params(params), _results(results), _genericParamDefault(genericParamDefault) { initGeneric(); } Prototype( const std::string& name, std::vector& params, std::vector& results, const PrimitiveType* genericParamDefault = nullptr ) : _name(name), _params(std::move(params)), _results(std::move(results)), _genericParamDefault(genericParamDefault) { initGeneric(); } const std::string& name() const { return _name; } const PrimitiveType* genericParamDefault() const { return _genericParamDefault; } size_t paramCount() const { return _params.size(); } const Param& param( const size_t i ) const { return _params[i]; } const Param* param( const std::string& name ) const { for ( auto& param : _params ) { if ( param.name() == name ) { return ¶m; } } return nullptr; } size_t resultCount() const { return _results.size(); } const Result& result( const size_t i ) const { return _results[i]; } const Result* result( const std::string& name ) const { for ( auto& result : _results ) { if ( result.name() == name ) { return &result; } } return nullptr; } bool hasGenericParams() const { return _hasGenericParams; } bool hasGenericResults() const { return _hasGenericResults; } bool isGeneric() const { return _hasGenericParams || _hasGenericResults; } private: std::string _name; std::vector _params; std::vector _results; bool _hasGenericParams; bool _hasGenericResults; const PrimitiveType* _genericParamDefault; }; inline std::ostream& operator<<( std::ostream& os, const Typed& typed ) { os << typed.name() << ": " << typed.type()->toString(); return os; } inline std::ostream& operator<<( std::ostream& os, const Prototype& proto ) { os << proto.name(); if ( proto.isGeneric() ) { os << "toString(); } os << ">"; } os << "( "; for ( size_t i = 0; i < proto.paramCount(); ++i ) { if ( i ) { os << ", "; } os << proto.param(i); } os << " )"; os << " -> "; os << "( "; for ( size_t i = 0; i < proto.resultCount(); ++i ) { if ( i ) { os << ", "; } os << proto.result(i); } os << " )"; return os; } } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/shapes.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_SHAPES_H_ #define _NNEF_SHAPES_H_ #include #include #include #include #include #include #include #include "value.h" #include "error.h" namespace nnef { typedef std::vector Shape; inline std::string to_string( const Shape& shape ) { std::string str; str += '['; for ( size_t i = 0; i < shape.size(); ++i ) { if ( i ) { str += ','; } str += std::to_string(shape[i]); } str += ']'; return str; } inline Shape make_shape( const Value& arg, const size_t offset = 0 ) { Shape shape = Shape(offset + arg.size(), 1); for ( size_t i = 0; i < arg.size(); ++i ) { shape[i + offset] = arg[i].integer(); } return shape; } inline Shape make_padding_shape( const Value& arg, const size_t offset = 0 ) { Shape padding(offset + arg.size(), 0); for ( size_t i = 0; i < arg.size(); ++i ) { padding[i + offset] = arg[i][0].integer() + arg[i][1].integer(); } return padding; } inline size_t volume_of( const Shape& shape ) { return std::accumulate(shape.begin(), shape.end(), (size_t)1, std::multiplies()); } inline size_t volume_of( const Shape& shape, const size_t offset, const size_t length ) { return std::accumulate(shape.begin() + offset, shape.begin() + offset + length, (size_t)1, std::multiplies()); } inline bool broadcastable( const Shape& xShape, const Shape& yShape, const size_t n ) { for ( size_t i = 0; i < n; ++i ) { auto xi = i < xShape.size() ? xShape[i] : 1; auto yi = i < yShape.size() ? yShape[i] : 1; if ( !(xi == yi || xi == 1) ) { return false; } } return true; } inline bool broadcastable( const Shape& xShape, const Shape& yShape ) { const size_t rank = std::max(xShape.size(), yShape.size()); return broadcastable(xShape, yShape, rank); } inline bool broadcast_compatible( const Shape& xShape, const Shape& yShape, const size_t n ) { for ( size_t i = 0; i < n; ++i ) { auto xi = i < xShape.size() ? xShape[i] : 1; auto yi = i < yShape.size() ? yShape[i] : 1; if ( !(xi == yi || xi == 1 || yi == 1) ) { return false; } } return true; } inline bool broadcast_compatible( const Shape& xShape, const Shape& yShape ) { const size_t rank = std::max(xShape.size(), yShape.size()); return broadcast_compatible(xShape, yShape, rank); } inline bool axes_compatible_with_rank( const Value& axes, const size_t rank ) { for ( size_t i = 0; i < axes.size(); ++i ) { auto axis = axes[i].integer(); if ( axis < 0 || axis >= (Value::integer_t)rank ) { return false; } } return true; } inline bool contains_axis( const Value& axes, const size_t axis ) { for ( size_t i = 0; i < axes.size(); ++i ) { if ( axes[i].integer() == (Value::integer_t)axis ) { return true; } } return false; } template inline int sign( T val ) { return (T(0) < val) - (val < T(0)); } inline int ceil_div( int x, int y ) { return y > 0 ? (x + y - 1) / y : (x + y + 1) / y; } template inline T downsize( const T input, const T size, const T padding, const T stride, const T dilation ) { const T window = 1 + (size - 1) * dilation; return sign(input) * ((std::abs(input) + padding - window) / stride + 1); } template inline T downsize( const T input, const T stride ) { return sign(input) * ((std::abs(input) + stride - 1) / stride); } template inline T upsize( const T input, const T size, const T padding, const T stride, const T dilation ) { const T window = 1 + (size - 1) * dilation; return sign(input) * ((std::abs(input) - 1) * stride + window - padding); } template inline T upsize( const T input, const T stride ) { return input * stride; } template inline void check( bool condition, const char* message, Args&&... args ) { if ( !condition ) { throw std::logic_error(Error::formatString(message, std::forward(args)...)); } } inline void check_axis_compatible_with_rank( const Value& axis, const size_t rank ) { check(axis.integer() >= 0 && axis.integer() < (Value::integer_t)rank, "axis must be in range [0,%d); found %d", (int)rank, (int)axis.integer()); } inline void check_axes_compatible_with_rank( const Value& axes, const size_t rank ) { check(axes_compatible_with_rank(axes, rank), "axes must be in range [0,%d); found %s", (int)rank, axes.toString().c_str()); } inline void check_range( const char* name, const Value& value, const Value::integer_t min ) { if ( value.kind() == Value::Array || value.kind() == Value::Tuple ) { for ( size_t i = 0; i < value.size(); ++i ) { check_range(name, value[i], min); } } else if ( value.kind() == Value::Integer ) { check(value.integer() >= min, "'%s' must be >= %d (found %d)", name, min, (int)value.integer()); } } inline void check_rank( const char* name, const Value& value, const size_t rank ) { check(value.size() == rank, "length of array '%s' must be %d to match rank of operation (found %d)", name, (int)rank, (int)value.size()); } inline Shape broadcast_shape( const Shape& xShape, const Shape& yShape, const size_t n ) { const size_t rank = std::max(xShape.size(), yShape.size()); Shape zShape(rank); for ( size_t i = 0; i < n; ++i ) { auto xi = i < xShape.size() ? xShape[i] : 1; auto yi = i < yShape.size() ? yShape[i] : 1; zShape[i] = std::max(xi, yi); } return zShape; } inline Shape broadcast_shape( const Shape& xShape, const Shape& yShape ) { const size_t rank = std::max(xShape.size(), yShape.size()); return broadcast_shape(xShape, yShape, rank); } inline Shape nullary_shape( const Value& shape ) { return make_shape(shape); } inline Shape constant_shape( const Value& shape, const Value& value ) { auto result = nullary_shape(shape); check(value.size() == volume_of(result) || value.size() == 1, "shape volume (%d) does not match number of values (%d)", (int)volume_of(result), (int)value.size()); return result; } inline Shape unary_shape( const Shape& shape ) { return shape; } inline Shape binary_shape( const Shape& shape1, const Shape& shape2 ) { check(broadcast_compatible(shape1, shape2), "incompatible tensor shapes for broadcasting (%s vs %s)", to_string(shape1).c_str(), to_string(shape2).c_str()); return broadcast_shape(shape1, shape2); } inline Shape asymmetric_binary_shape( const Shape& shape1, const Shape& shape2 ) { check(broadcastable(shape2, shape1), "cannot broadcast second argument shape (%s) to first argument shape (%s)", to_string(shape2).c_str(), to_string(shape1).c_str()); return shape1; } inline Shape ternary_shape( const Shape& shape1, const Shape& shape2, const Shape& shape3 ) { return binary_shape(binary_shape(shape1, shape2), shape3); } inline Shape reduce_shape( const Shape& input, const Value& axes ) { check_axes_compatible_with_rank(axes, input.size()); Shape output = input; for ( size_t i = 0; i < axes.size(); ++i ) { auto axis = axes[i].integer(); output[axis] = 1; } return output; } inline Shape downsample_shape( const Shape& input, const Value& factor ) { for ( size_t i = 0; i < factor.size(); ++i ) { auto scale = factor[i].integer(); check(input[i+2] % scale == 0, "input extent (%d) must be divisible by factor (%d)", (int)input[i+2], (int)scale); } Shape output = input; for ( size_t i = 0; i < factor.size(); ++i ) { output[i+2] /= factor[i].integer(); } return output; } inline Shape upsample_shape( const Shape& input, const Value& factor ) { check_rank("factor", factor, input.size() - 2); Shape output = input; for ( size_t i = 0; i < factor.size(); ++i ) { output[i+2] *= factor[i].integer(); } return output; } inline Shape downsize_shape( const Shape& input, const Shape& kernel, const Shape& padding, const Shape& stride, const Shape& dilation, const size_t offset ) { Shape output(input.size()); for ( size_t i = offset; i < output.size(); ++i ) { output[i] = padding.size() ? downsize(input[i], kernel[i], padding[i], stride[i], dilation[i]) : downsize(input[i], stride[i]); } return output; } inline Shape upsize_shape( const Shape& input, const Shape& kernel, const Shape& padding, const Shape& stride, const Shape& dilation, const size_t offset ) { Shape output(input.size()); for ( size_t i = offset; i < output.size(); ++i ) { output[i] = padding.size() ? upsize(input[i], kernel[i], padding[i], stride[i], dilation[i]) : upsize(input[i], stride[i]); } return output; } inline Shape conv_like_shape( const Shape& input, const Shape& filter, const Shape& bias, const Value& /*border*/, const Value& padding, const Value& stride, const Value& dilation, const Value& groups, const Value& output_shape, const bool transposed ) { auto rank = input.size(); if ( padding.size() ) { check_rank("padding", padding, rank - 2); } if ( stride.size() ) { check_rank("stride", stride, rank - 2); } if ( dilation.size() ) { check_rank("dilation", dilation, rank - 2); } check_range("stride", stride, 1); check_range("dilation", dilation, 1); check_range("groups", groups, 0); auto groupCount = groups.integer() != 0 ? groups.integer() : transposed && output_shape && output_shape.size() ? output_shape[1].integer() : input[1]; if ( transposed ) { check(input[1] == filter[0], "filter batch (%d) does not match input channels (%d)", (int)filter[0], (int)input[1]); } else { check(input[1] == filter[1] * groupCount, "filter channels (%d) does not match input channels (%d) times groups (%d)", (int)filter[1], (int)input[1], (int)groupCount); } check(filter[0] % groupCount == 0, "filter batch (%d) must be divisible by groups (%d)", (int)filter[0], (int)groupCount); check(bias.size() <= 2, "bias shape must be of rank at most 2, found %d", (int)bias.size()); if ( bias.size() == 2 ) { check(bias[0] == 1, "bias shape must be singular for the batch dimension"); } if ( bias.size() > 0 ) { auto channels = transposed ? filter[1] * groupCount : filter[0]; check(bias.back() == channels || bias.back() == 1, "bias channels (%d) does not match output channels (%d)", (int)bias.back(), (int)channels); } const Shape strideShape = make_shape(stride, stride.size() ? 2 : rank); const Shape dilationShape = make_shape(dilation, dilation.size() ? 2 : rank); const Shape paddingShape = padding.size() ? make_padding_shape(padding, 2) : Shape(); if ( output_shape && output_shape.size() ) { const Shape outputShape = make_shape(output_shape); check_rank("output_shape", output_shape, rank); check_range("output_shape", output_shape, 1); check(outputShape[0] == input[0], "output batch (%d) does not match input batch (%d)", (int)outputShape[0], (int)input[0]); check(outputShape[1] == filter[1] * groupCount, "output channels (%d) does not match filter channels (%d) times groups (%d)", (int)outputShape[1], (int)filter[1], (int)groupCount); Shape expected = downsize_shape(outputShape, filter, paddingShape, strideShape, dilationShape, 2); std::copy_n(input.begin(), 2, expected.begin()); check(input == expected, "expected input shape %s derived from output shape is incompatible with actual input shape %s", to_string(expected).c_str(), to_string(input).c_str()); return outputShape; } if ( transposed ) { auto output = upsize_shape(input, filter, paddingShape, strideShape, dilationShape, 2); output[0] = input[0]; output[1] = filter[1] * groupCount; return output; } else { auto output = downsize_shape(input, filter, paddingShape, strideShape, dilationShape, 2); output[0] = input[0]; output[1] = filter[0]; return output; } } inline Shape separable_conv_like_shape( const Shape& input, const Shape& plane_filter, const Shape& point_filter, const Shape& bias, const Value& border, const Value& padding, const Value& stride, const Value& dilation, const Value& groups, const Value& output_shape, const bool transposed ) { for ( size_t i = 2; i < point_filter.size(); ++i ) { check(point_filter[i] == 1, "point filter must have singular extents in spatial dimensions"); } check(point_filter[1] == plane_filter[0], "channel dimension of point filter must equal batch dimension of plane filter"); check(plane_filter[1] == 1, "channel dimension of plane filter must be singular"); Shape filter = plane_filter; filter[0] = point_filter[0]; filter[1] = transposed ? point_filter[1] : input[1]; return conv_like_shape(input, filter, bias, border, padding, stride, dilation, groups, output_shape, transposed); } inline Shape conv_shape( const Shape& input, const Shape& filter, const Shape& bias, const Value& border, const Value& padding, const Value& stride, const Value& dilation, const Value& groups ) { return conv_like_shape(input, filter, bias, border, padding, stride, dilation, groups, Value::none(), false); } inline Shape deconv_shape( const Shape& input, const Shape& filter, const Shape& bias, const Value& border, const Value& padding, const Value& stride, const Value& dilation, const Value& output_shape, const Value& groups ) { return conv_like_shape(input, filter, bias, border, padding, stride, dilation, groups, output_shape, true); } inline Shape separable_conv_shape( const Shape& input, const Shape& plane_filter, const Shape& point_filter, const Shape& bias, const Value& border, const Value& padding, const Value& stride, const Value& dilation, const Value& groups ) { return separable_conv_like_shape(input, plane_filter, point_filter, bias, border, padding, stride, dilation, groups, Value::none(), false); } inline Shape separable_deconv_shape( const Shape& input, const Shape& plane_filter, const Shape& point_filter, const Shape& bias, const Value& border, const Value& padding, const Value& stride, const Value& dilation, const Value& output_shape, const Value& groups ) { return separable_conv_like_shape(input, plane_filter, point_filter, bias, border, padding, stride, dilation, groups, output_shape, true); } inline Shape pool_like_shape( const Shape& input, const Value& size, const Value& /*border*/, const Value& padding, const Value& stride, const Value& dilation, const Value& output_shape, const bool transposed ) { auto rank = input.size(); check_rank("size", size, rank); if ( padding.size() ) { check_rank("padding", padding, rank); } if ( stride.size() ) { check_rank("stride", stride, rank); } if ( dilation.size() ) { check_rank("dilation", dilation, rank); } check_range("size", size, 1); check_range("stride", stride, 1); check_range("dilation", dilation, 1); auto kernelShape = make_shape(size); auto strideShape = make_shape(stride, stride.size() ? 0 : rank); auto dilationShape = make_shape(dilation, dilation.size() ? 0 : rank); auto paddingShape = padding.size() ? make_padding_shape(padding) : Shape(); if ( output_shape && output_shape.size() ) { const Shape outputShape = make_shape(output_shape); check_rank("output_shape", output_shape, rank); check_range("output_shape", output_shape, 1); const Shape expected = downsize_shape(outputShape, kernelShape, paddingShape, strideShape, dilationShape, 0); check(input == expected, "expected input shape %s derived from output shape is incompatible with actual input shape %s", to_string(expected).c_str(), to_string(input).c_str()); return outputShape; } if ( transposed ) { return upsize_shape(input, kernelShape, paddingShape, strideShape, dilationShape, 0); } else { return downsize_shape(input, kernelShape, paddingShape, strideShape, dilationShape, 0); } } inline Shape sample_like_shape( const Shape& input, const Shape& index, const Value& size, const Value& border, const Value& padding, const Value& stride, const Value& dilation, const Value& output_shape, const bool transposed ) { check(index == input, "index shape incompatible with input shape (%s vs %s)", to_string(index).c_str(), to_string(input).c_str()); return pool_like_shape(input, size, border, padding, stride, dilation, output_shape, transposed); } inline Shape pool_shape( const Shape& input, const Value& size, const Value& border, const Value& padding, const Value& stride, const Value& dilation ) { return pool_like_shape(input, size, border, padding, stride, dilation, Value::none(), false); } inline Shape unpool_shape( const Shape& input, const Value& size, const Value& border, const Value& padding, const Value& stride, const Value& dilation, const Value& output_shape ) { return pool_like_shape(input, size, border, padding, stride, dilation, output_shape, true); } inline Shape sample_shape( const Shape& input, const Shape& index, const Value& size, const Value& border, const Value& padding, const Value& stride, const Value& dilation ) { return sample_like_shape(input, index, size, border, padding, stride, dilation, Value::none(), false); } inline Shape desample_shape( const Shape& input, const Shape& index, const Value& size, const Value& border, const Value& padding, const Value& stride, const Value& dilation, const Value& output_shape ) { return sample_like_shape(input, index, size, border, padding, stride, dilation, output_shape, true); } inline Shape normalize_shape_axes( const Shape& input, const Value& axes ) { check_axes_compatible_with_rank(axes, input.size()); return input; } inline Shape normalize_shape_size( const Shape& input, const Value& size ) { check_rank("size", size, input.size()); check_range("size", size, 1); return input; } inline Shape batchnorm_shape( const Shape& input, const Shape& mean, const Shape& variance, const Shape& offset, const Shape& scale, const Value& /*epsilon*/ ) { check(broadcastable(mean, input), "cannot broadcast 'mean' shape (%s) to 'input' shape (%s)", to_string(mean).c_str(), to_string(input).c_str()); check(broadcastable(variance, input), "cannot broadcast 'variance' shape (%s) to 'input' shape (%s)", to_string(variance).c_str(), to_string(input).c_str()); check(broadcastable(offset, input), "cannot broadcast 'offset' shape (%s) to 'input' shape (%s)", to_string(offset).c_str(), to_string(input).c_str()); check(broadcastable(scale, input), "cannot broadcast 'scale' shape (%s) to 'input' shape (%s)", to_string(scale).c_str(), to_string(input).c_str()); return input; } inline Shape roi_shape( const Shape& input, const Shape& rois, const Shape& index, const Value& size ) { check_rank("output_size", size, input.size() - 2); check_range("output_size", size, 1); check(rois.size() == 2, "'rois' must be a rank-2 tensor"); check(index.size() == 1, "'batch_index' must be a rank-1 tensor"); check(rois[1] == 4, "rois must be of extent 4 along dimension 1 (found %d)", (int)rois[1]); check(index[0] == rois[0], "'batch_index' must be of same length as dimension 0 of rois; found (%d vs %d)", (int)index[0], (int)rois[0]); Shape output(input.size()); output[0] = rois[0]; output[1] = input[1]; for ( size_t i = 0; i < size.size(); ++i ) { output[i+2] = (Shape::value_type)size[i].integer(); } return output; } inline Shape roi_shape_resample( const Shape& input, const Shape& rois, const Shape& index, const Value& size, const Value& rate ) { check_rank("sampling_rate", rate, input.size() - 2); check_range("sampling_rate", rate, 1); return roi_shape(input, rois, index, size); } inline Shape reshape_shape( const Shape& input, const Value& shape, const Value& axis_start, const Value& axis_count ) { check_axis_compatible_with_rank(axis_start, input.size() + 1); check_range("axis_count", axis_start, -1); const size_t offset = axis_start.integer(); const size_t length = axis_count.integer() == -1 ? input.size() - axis_start.integer() : axis_count.integer(); check(offset + length <= input.size(), "'axis_start' + 'axis_count' must be in range [0,%d], found %d", (int)input.size(), (int)(offset + length)); Shape output(input.begin(), input.begin() + offset); size_t autoAxis = std::numeric_limits::max(); for ( size_t i = 0; i < shape.size(); ++i ) { auto s = shape[i].integer(); if ( s == 0 ) { s = input[i + offset]; } else if ( s == -1 ) { check(autoAxis == std::numeric_limits::max(), "shape may only contain at most one -1 value"); s = 1; autoAxis = i + offset; } output.push_back(s); } output.insert(output.end(), input.begin() + offset + length, input.end()); auto inputVolume = volume_of(input, offset, length); auto outputVolume = volume_of(output, offset, shape.size()); if ( autoAxis != std::numeric_limits::max() ) { check(inputVolume % outputVolume == 0, "automatic output shape (%s) incompatible with input shape (%s)", (int)outputVolume, (int)inputVolume); output[autoAxis] = (Shape::value_type)(inputVolume / outputVolume); } else { check(inputVolume == outputVolume, "input volume (%d) does not equal output volume (%d)", (int)inputVolume, (int)outputVolume); } return output; } inline Shape transpose_shape( const Shape& input, const Value& axes ) { std::vector perm(axes.size()); for ( size_t i = 0; i < axes.size(); ++i ) { perm[i] = axes[i].integer(); } std::sort(perm.begin(), perm.end()); for ( size_t i = 0; i < perm.size(); ++i ) { check(perm[i] == i, "'axes' array must contain a permutation of dimensions from 0 to %d-1", (int)perm.size()); } Shape output = input; for ( size_t i = 0; i < axes.size(); ++i ) { auto j = axes[i].integer(); output[i] = input[j]; } return output; } inline std::vector split_shape( const Shape& value, const Value& axis, const Value& ratios ) { check_axis_compatible_with_rank(axis, value.size()); check_range("ratios", ratios, 1); auto idx = axis.integer(); Value::integer_t total = 0; for ( size_t i = 0; i < ratios.size(); ++i ) { total += ratios[i].integer(); } check(value[idx] % total == 0, "sum of split ratios (%d) does not divide whole extent (%d)", (int)total, (int)value[idx]); const Value::integer_t unit = value[idx] / total; std::vector values(ratios.size()); for ( size_t i = 0; i < values.size(); ++i ) { Shape item = value; item[idx] = unit * ratios[i].integer(); values[i] = item; } return values; } inline Shape concat_shape( const std::vector& valuesShape, const Value& axis ) { check(valuesShape.size() != 0, "input array must be non-empty"); Shape outputShape = valuesShape[0]; check_axis_compatible_with_rank(axis, outputShape.size()); const size_t idx = axis.integer(); bool compatibleShape = true; for ( size_t i = 1; i < valuesShape.size(); ++i ) { auto& partShape = valuesShape[i]; if ( partShape.size() != outputShape.size() ) { compatibleShape = false; break; } for ( size_t i = 0; i < outputShape.size(); ++i ) { if ( i == idx ) { outputShape[i] += partShape[i]; } else { compatibleShape &= outputShape[i] == partShape[i]; } } } check(compatibleShape, "incompatible tensor shapes in input array"); return outputShape; } inline Shape slice_shape( const Shape& input, const Value& axes, const Value& begin, const Value& end, const Value& stride ) { check(begin.size() == axes.size() && end.size() == axes.size(), "'axes', 'begin' and 'end' arrays must have the same length"); check(stride.size() == 0 || stride.size() == axes.size(), "'stride' must have the same length as 'axes'"); check_axes_compatible_with_rank(axes, input.size()); Shape output = input; for ( size_t i = 0; i < axes.size(); ++i ) { auto axis = axes[i].integer(); auto extent = input[axis]; auto str = stride.size() ? stride[i].integer() : 1; auto first = begin[i].integer(); if ( first < 0 ) { first += extent; } auto last = end[i].integer(); if ( last < 0 ) { last += extent; } else if ( last == 0 && str == 1 ) { last = extent; } if ( first < 0 ) { first = -1; } if ( first > extent ) { first = extent; } if ( last < 0 ) { last = -1; } if ( last > extent ) { last = extent; } check(str != 0, "'stride' must be non-zero"); if ( str > 0 ) { check(first >= 0 && last >= first, "slice range (%d:%d:%d) is invalid for axis %d", (int)first, (int)last, (int)str, (int)axis); } else { check(first < extent && last <= first, "slice range (%d:%d:%d) is invalid for axis %d", (int)first, (int)last, (int)str, (int)axis); } output[axis] = ceil_div(last - first, str); } return output; } inline Shape stack_shape( const std::vector& inputs, const Value& axis ) { auto& input = inputs[0]; bool compatibleShapes = std::all_of(inputs.begin() + 1, inputs.end(), [&]( const Shape& shape ){ return shape == input; }); check(compatibleShapes, "incompatible tensor shapes in input array"); Shape output(input.size() + 1); check_axis_compatible_with_rank(axis, output.size()); const size_t idx = axis.integer(); for ( size_t i = 0; i < idx; ++i ) { output[i] = input[i]; } output[idx] = (Shape::value_type)inputs.size(); for ( size_t i = idx + 1; i < output.size(); ++i ) { output[i] = input[i-1]; } return output; } inline std::vector unstack_shape( const Shape& input, const Value& axis ) { check_axis_compatible_with_rank(axis, input.size()); const size_t idx = axis.integer(); Shape output(input.size() - 1); for ( size_t i = 0; i < idx; ++i ) { output[i] = input[i]; } for ( size_t i = idx; i < output.size(); ++i ) { output[i] = input[i+1]; } return std::vector(input[idx], output); } inline Shape squeeze_shape( const Shape& input, const Value& axes ) { check_axes_compatible_with_rank(axes, input.size()); for ( size_t i = 0; i < axes.size(); ++i ) { auto axis = axes[i].integer(); check(input[axis] == 1, "squeezed dimension is not singleton (has extent %d)", (int)input[axis]); } Shape output(input.size() - axes.size()); for ( size_t i = 0, k = 0; i < input.size(); ++i ) { if ( !contains_axis(axes, i) ) { output[k++] = input[i]; } } return output; } inline Shape unsqueeze_shape( const Shape& input, const Value& axes ) { Shape output(input.size() + axes.size()); check_axes_compatible_with_rank(axes, output.size()); for ( size_t i = 0, k = 0; i < output.size(); ++i ) { output[i] = contains_axis(axes, i) ? (Shape::value_type)1 : input[k++]; } return output; } inline Shape tile_shape( const Shape& input, const Value& repeats ) { check_rank("repeats", repeats, input.size()); check_range("repeats", repeats, 1); Shape output(input.size()); for ( size_t i = 0; i < output.size(); ++i ) { output[i] = input[i] * repeats[i].integer(); } return output; } inline Shape pad_shape( const Shape& input, const Value& padding ) { check_rank("padding", padding, input.size()); Shape output(input.size()); for ( size_t i = 0; i < output.size(); ++i ) { output[i] = padding[i][0].integer() + input[i] + padding[i][1].integer(); } return output; } inline Shape gather_shape( const Shape& input, const Shape& indices, const Value& axis ) { check_axis_compatible_with_rank(axis, input.size()); const size_t idx = axis.integer(); Shape output(input.size() + indices.size() - 1); std::copy_n(input.begin(), idx, output.begin()); std::copy_n(indices.begin(), indices.size(), output.begin() + idx); std::copy(input.begin() + idx + 1, input.end(), output.begin() + idx + indices.size()); return output; } inline Shape matmul_shape( const Shape& A, const Shape& B, const Value& trA, const Value& trB ) { check(A.size() == B.size(), "rank mismatch for A and B (%d vs %d)", (int)A.size(), (int)B.size()); auto rank = A.size(); check(rank >= 2, "rank of A and B must be at least 2, found %d", (int)rank); auto batch_dims = rank - 2; check(broadcast_compatible(A, B, batch_dims), "incompatible tensor shapes for broadcasting first %d dimensions (%s vs %s)", (int)batch_dims, to_string(A).c_str(), to_string(B).c_str()); auto i0 = batch_dims + 0; auto i1 = batch_dims + 1; auto m = trA.logical() ? A[i1] : A[i0]; auto n = trB.logical() ? B[i0] : B[i1]; auto kA = trA.logical() ? A[i0] : A[i1]; auto kB = trB.logical() ? B[i1] : B[i0]; check(kA == kB, "inner dimensions must agree (%d vs %d)", (int)kA, (int)kB); Shape C = broadcast_shape(A, B, batch_dims); C[i0] = m; C[i1] = n; return C; } inline Shape linear_shape( const Shape& input, const Shape& filter, const Shape& bias ) { check(input.size() == 2, "input shape must be of rank 2 (found %d)", (int)input.size()); check(filter.size() == 2, "filter shape must be of rank 2 (found %d)", (int)filter.size()); check(input[1] == filter[1], "inner dimensions must agree (%d vs %d)", (int)input[1], (int)filter[1]); if ( bias.size() ) { check(bias[1] == filter[0], "bias channels (%d) does not match filter count (%d)", (int)bias[1], (int)filter[0]); } return Shape({ input[0], filter[0] }); } inline Shape update_shape( const Shape& variable, const Shape& value ) { check(value == variable, "updated shape %s does not equal variable shape %s", to_string(value).c_str(), to_string(variable).c_str()); return variable; } inline Shape softmax_shape( const Shape& inputShape, const Value& axes ) { check_axes_compatible_with_rank(axes, inputShape.size()); return inputShape; } inline std::vector copy_n_shape( const Shape& shape, const Value& times ) { check_range("times", times, 1); return std::vector(times.integer(), shape); } inline Shape add_n_shape( const std::vector& inputs ) { check(inputs.size() != 0, "input array must be non-empty"); auto& shape = inputs[0]; for ( size_t i = 1; i < inputs.size(); ++i ) { check(inputs[i] == shape, "incompatible item shapes in array (%s vs %s)", to_string(shape).c_str(), to_string(inputs[i]).c_str()); } return shape; } inline Shape quantize_shape( const Shape& input, const Shape& min, const Shape& max, const Value& bits ) { check(broadcastable(min, input), "cannot broadcast 'min' shape (%s) to 'input' shape (%s)", to_string(min).c_str(), to_string(input).c_str()); check(broadcastable(max, input), "cannot broadcast 'max' shape (%s) to 'input' shape (%s)", to_string(max).c_str(), to_string(input).c_str()); check_range("bits", bits, 0); return input; } inline Shape linear_quantize_shape( const Shape& input, const Shape& min, const Shape& max, const Value& bits ) { return quantize_shape(input, min, max, bits); } inline Shape logarithmic_quantize_shape( const Shape& input, const Shape& max, const Value& bits ) { return quantize_shape(input, Shape(), max, bits); } inline Shape zero_point_linear_quantize_shape( const Shape& input, const Shape& zero_point, const Shape& scale, const Value& bits ) { check(broadcastable(zero_point, input), "cannot broadcast 'zero_point' shape (%s) to 'input' shape (%s)", to_string(zero_point).c_str(), to_string(input).c_str()); check(broadcastable(scale, input), "cannot broadcast 'scale' shape (%s) to 'input' shape (%s)", to_string(scale).c_str(), to_string(input).c_str()); check_range("bits", bits, 0); return input; } } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/typespec.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_TYPESPEC_H_ #define _NNEF_TYPESPEC_H_ #include #include #include #include #include #include #include namespace nnef { enum class Typename { Integer, Scalar, Logical, String, Generic }; inline const char* toString( const Typename& name ) { static const char* strings[] = { "integer", "scalar", "logical", "string", "?" }; return strings[(size_t)name]; } inline Typename fromString( const std::string& str ) { static const std::map typenames = { { "integer", Typename::Integer }, { "scalar", Typename::Scalar }, { "logical", Typename::Logical }, { "string", Typename::String }, }; return typenames.at(str); } class Type { public: enum Kind { Primitive, Tensor, Array, Tuple }; public: virtual ~Type() {} virtual Kind kind() const = 0; virtual bool isAttribute() const = 0; virtual bool isGeneric() const = 0; virtual std::string toString() const = 0; }; class PrimitiveType : public Type { public: PrimitiveType( const Typename name ) : _name(name) { } Typename name() const { return _name; } virtual Kind kind() const { return Primitive; } virtual bool isAttribute() const { return true; } virtual bool isGeneric() const { return _name == Typename::Generic; } virtual std::string toString() const { return nnef::toString(_name); } private: Typename _name; }; class TensorType : public Type { public: TensorType( const Type* dataType ) : _dataType(dataType) { } const Type* dataType() const { return _dataType; } virtual Kind kind() const { return Tensor; } virtual std::string toString() const { return _dataType ? "tensor<" + _dataType->toString() + ">" : "tensor<>"; } virtual bool isAttribute() const { return false; } virtual bool isGeneric() const { return _dataType && _dataType->isGeneric(); } private: const Type* _dataType; }; class ArrayType : public Type { public: ArrayType( const Type* itemType ) : _itemType(itemType) { } const Type* itemType() const { return _itemType; } virtual Kind kind() const { return Array; } virtual std::string toString() const { return _itemType ? _itemType->toString() + "[]" : "[]"; } virtual bool isAttribute() const { return _itemType && _itemType->isAttribute(); } virtual bool isGeneric() const { return _itemType && _itemType->isGeneric(); } private: const Type* _itemType; }; class TupleType : public Type { public: TupleType( const std::vector& itemTypes ) : _itemTypes(itemTypes) { } TupleType( const std::initializer_list& itemTypes ) : _itemTypes(itemTypes) { } size_t size() const { return _itemTypes.size(); } const Type* itemType( const size_t i ) const { return _itemTypes[i]; } virtual Kind kind() const { return Tuple; } virtual bool isAttribute() const { return std::all_of(_itemTypes.begin(), _itemTypes.end(), []( const Type* type ){ return type->isAttribute(); }); } virtual bool isGeneric() const { return std::any_of(_itemTypes.begin(), _itemTypes.end(), []( const Type* type ){ return type->isGeneric(); }); } virtual std::string toString() const { std::string str; str += '('; for ( size_t i = 0; i < _itemTypes.size(); ++i ) { if ( i ) { str += ','; } str += _itemTypes[i]->toString(); } str += ')'; return str; } private: std::vector _itemTypes; }; inline const PrimitiveType* primitiveType( const Typename name ) { static const PrimitiveType types[] = { PrimitiveType(Typename::Integer), PrimitiveType(Typename::Scalar), PrimitiveType(Typename::Logical), PrimitiveType(Typename::String), PrimitiveType(Typename::Generic), }; return &types[(size_t)name]; } inline const TensorType* tensorType( const Typename name ) { static const TensorType types[] = { TensorType(primitiveType(Typename::Integer)), TensorType(primitiveType(Typename::Scalar)), TensorType(primitiveType(Typename::Logical)), TensorType(primitiveType(Typename::String)), TensorType(primitiveType(Typename::Generic)), }; return &types[(size_t)name]; } inline const TensorType* tensorType() { static const TensorType type(nullptr); return &type; } inline const Type* arrayType( const Type* itemType ) { static std::map types; auto it = types.lower_bound(itemType); if ( it == types.end() || it->first != itemType ) { it = types.emplace_hint(it, itemType, itemType); } return &it->second; } inline const Type* tupleType( const std::vector& itemTypes ) { static std::map,TupleType> types; auto it = types.lower_bound(itemTypes); if ( it == types.end() || it->first != itemTypes ) { it = types.emplace_hint(it, itemTypes, itemTypes); } return &it->second; } } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/typeutils.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_TYPEUTILS_H_ #define _NNEF_TYPEUTILS_H_ #include "typespec.h" #include "prototype.h" #include "dictionary.h" #include namespace nnef { inline bool isCastable( const Type* type1, const Type* type2, bool allowPrimitiveToTensor = true, bool allowArrayToTensor = false ) { if ( type1 == type2 ) { return true; } if ( type1->kind() == type2->kind() ) { switch ( type1->kind() ) { case Type::Primitive: { auto primitiveType1 = static_cast(type1); auto primitiveType2 = static_cast(type2); return primitiveType1->name() == primitiveType2->name() || primitiveType2->name() == Typename::Generic; } case Type::Tensor: { auto tensorType1 = static_cast(type1); auto tensorType2 = static_cast(type2); if ( tensorType1->dataType() && tensorType2->dataType() ) { return isCastable(tensorType1->dataType(), tensorType2->dataType(), allowPrimitiveToTensor, allowArrayToTensor); } else { return !tensorType2->dataType(); } } case Type::Array: { auto arrayType1 = static_cast(type1); auto arrayType2 = static_cast(type2); if ( arrayType1->itemType() && arrayType2->itemType() ) { return isCastable(arrayType1->itemType(), arrayType2->itemType(), allowPrimitiveToTensor, allowArrayToTensor); } else { return !arrayType1->itemType(); } } case Type::Tuple: { auto tupleType1 = static_cast(type1); auto tupleType2 = static_cast(type2); if ( tupleType1->size() != tupleType2->size() ) { return false; } for ( size_t i = 0; i < tupleType1->size(); ++i ) { if ( !isCastable(tupleType1->itemType(i), tupleType2->itemType(i), allowPrimitiveToTensor, allowArrayToTensor) ) { return false; } } return true; } } } else if ( type1->kind() == Type::Primitive && type2->kind() == Type::Tensor && allowPrimitiveToTensor ) { auto tensorType = static_cast(type2); return !tensorType->dataType() || isCastable(type1, tensorType->dataType()); } else if ( type1->kind() == Type::Array && type2->kind() == Type::Tensor && allowArrayToTensor ) { auto arrayType = static_cast(type1); auto itemType = arrayType->itemType(); while ( itemType->kind() != Type::Primitive ) { if ( itemType->kind() != Type::Array ) { return false; } itemType = static_cast(itemType)->itemType(); } auto tensorType = static_cast(type2); return !tensorType->dataType() || isCastable(itemType, tensorType->dataType()); } return false; } inline const Type* commonType( const Type* type1, const Type* type2 ) { if ( isCastable(type1, type2) ) { return type2; } else if ( isCastable(type2, type1) ) { return type1; } return nullptr; } inline const Type* bindDataType( const Type* paramType, const PrimitiveType* dataType ) { if ( !paramType->isGeneric() || dataType == primitiveType(Typename::Generic) ) { return paramType; } switch ( paramType->kind() ) { case Type::Primitive: { return paramType == primitiveType(Typename::Generic) ? dataType : paramType; } case Type::Tensor: { auto tensor = static_cast(paramType); return tensor->dataType() == primitiveType(Typename::Generic) ? tensorType(dataType->name()) : paramType; } case Type::Array: { auto array = static_cast(paramType); return array->itemType() ? arrayType(bindDataType(array->itemType(), dataType)) : paramType; } case Type::Tuple: { auto tuple = static_cast(paramType); std::vector itemTypes(tuple->size()); for ( size_t i = 0; i < tuple->size(); ++i ) { itemTypes[i] = bindDataType(tuple->itemType(i), dataType); } return tupleType(itemTypes); } } assert(false); return nullptr; } inline void deduceDataType( const Type* paramType, const Type* argType, const PrimitiveType*& dataType ) { if ( paramType->kind() == argType->kind() ) { switch ( paramType->kind() ) { case Type::Primitive: { if ( paramType->isGeneric() ) { auto primitiveType = static_cast(argType); if ( !dataType ) { dataType = primitiveType; } else if ( dataType != argType ) { throw std::make_pair(dataType->name(), primitiveType->name()); } } break; } case Type::Tensor: { auto tensorType1 = static_cast(paramType); auto tensorType2 = static_cast(argType); if ( tensorType1->dataType() && tensorType2->dataType() ) { deduceDataType(tensorType1->dataType(), tensorType2->dataType(), dataType); } break; } case Type::Array: { auto arrayType1 = static_cast(paramType); auto arrayType2 = static_cast(argType); if ( arrayType1->itemType() && arrayType2->itemType() ) { deduceDataType(arrayType1->itemType(), arrayType2->itemType(), dataType); } break; } case Type::Tuple: { auto tupleType1 = static_cast(paramType); auto tupleType2 = static_cast(argType); assert(tupleType1->size() == tupleType2->size()); for ( size_t i = 0; i < tupleType1->size(); ++i ) { deduceDataType(tupleType1->itemType(i), tupleType2->itemType(i), dataType); } break; } } } else if ( paramType->kind() == Type::Tensor && argType->kind() == Type::Primitive ) { auto tensorType = static_cast(paramType); deduceDataType(tensorType->dataType(), argType, dataType); } } inline bool deduceDataType( const Prototype& proto, const Dictionary& types, const PrimitiveType*& dataType ) { for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); if ( param.type()->isGeneric() ) { auto argType = types.at(param.name()); deduceDataType(param.type(), argType, dataType); } } return dataType != nullptr; } } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/common/value.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_VALUE_H_ #define _NNEF_VALUE_H_ #define CHECKS_SHOULD_THROW 1 #include #include #include namespace nnef { class Value; std::ostream& operator<<( std::ostream& os, const Value& arg ); class Value { public: typedef int integer_t; typedef float scalar_t; typedef bool logical_t; typedef std::string string_t; typedef std::vector items_t; struct identifier_t : public std::string { explicit identifier_t( const std::string& s ) : std::string(s) {} }; enum Kind { None, Integer, Scalar, Logical, String, Identifier, Array, Tuple }; private: Value( const Kind kind, const integer_t& value ) : _kind(kind), _integer(value) { } Value( const Kind kind, const scalar_t& value ) : _kind(kind), _scalar(value) { } Value( const Kind kind, const logical_t& value ) : _kind(kind), _logical(value) { } Value( const Kind kind, const string_t& value ) : _kind(kind), _string(value) { } Value( const Kind kind, const identifier_t& value ) : _kind(kind), _identifier(value) { } Value( const Kind kind, const items_t& value ) : _kind(kind), _items(value) { } Value( const Kind kind, items_t&& items ) : _kind(kind), _items(std::forward(items)) { } public: static const Value& none() { static const Value none; return none; } static Value integer( const integer_t& value ) { return Value(Integer, value); } static Value scalar( const scalar_t& value ) { return Value(Scalar, value); } static Value logical( const logical_t& value ) { return Value(Logical, value); } static Value string( const string_t& value ) { return Value(String, value); } static Value identifier( const std::string& value ) { return Value(Identifier, (identifier_t)value); } static Value array( const items_t& value ) { return Value(Array, value); } static Value tuple( const items_t& value ) { return Value(Tuple, value); } static Value array( items_t&& items ) { return Value(Array, std::forward(items)); } static Value tuple( items_t&& items ) { return Value(Tuple, std::forward(items)); } static Value make( const integer_t& value ) { return Value(Integer, value); } static Value make( const scalar_t& value ) { return Value(Scalar, value); } static Value make( const logical_t& value ) { return Value(Logical, value); } static Value make( const string_t& value ) { return Value(String, value); } static Value make( const identifier_t& value ) { return Value(Identifier, value); } public: Value() : _kind(None) { } Value( const Value& other ) { if ( &other != this ) { construct(other); } } Value( Value&& other ) { if ( &other != this ) { move(other); } } ~Value() { destroy(); } Value& operator=( const Value& other ) { if ( &other != this ) { destroy(); construct(other); } return *this; } Value& operator=( Value&& other ) { if ( &other != this ) { destroy(); move(other); } return *this; } explicit operator bool() const { return _kind != None; } Kind kind() const { return _kind; } const integer_t& integer() const { checkKind(Integer); return _integer; } const scalar_t& scalar() const { checkKind(Scalar); return _scalar; } const logical_t& logical() const { checkKind(Logical); return _logical; } const string_t& string() const { checkKind(String); return _string; } const identifier_t& identifier() const { checkKind(Identifier); return _identifier; } const items_t& array() const { checkKind(Array); return _items; } const items_t& tuple() const { checkKind(Tuple); return _items; } const items_t& items() const { checkItems(); return _items; } template const T& get() const { return get(T()); } size_t size() const { checkItems(); return _items.size(); } const Value& operator[]( const size_t i ) const { checkItems(); return _items[i]; } bool operator==( const Value& other ) const { return equals(other); } bool operator!=( const Value& other ) const { return !equals(other); } std::string toString() const { std::stringstream ss; ss << *this; return ss.str(); } private: const scalar_t& get( scalar_t ) const { return scalar(); } const integer_t& get( integer_t ) const { return integer(); } const logical_t& get( logical_t ) const { return logical(); } const string_t& get( string_t ) const { return string(); } const identifier_t& get( identifier_t ) const { return identifier(); } private: void checkKind( const Kind kind ) const { #if CHECKS_SHOULD_THROW if ( _kind != kind ) { throw std::invalid_argument("Value: kind mismatch"); } #endif } void checkItems() const { #if CHECKS_SHOULD_THROW if ( _kind != Array && _kind != Tuple ) { throw std::invalid_argument("Value: expected items"); } #endif } void move( Value& other ) { _kind = other._kind; switch ( _kind ) { case Array: case Tuple: { new(&_items) items_t(std::move(other._items)); break; } case String: { new(&_string) string_t(std::move(other._string)); break; } case Identifier: { new(&_identifier) identifier_t(std::move(other._identifier)); break; } case Integer: { _integer = other._integer; break; } case Scalar: { _scalar = other._scalar; break; } case Logical: { _logical = other._logical; break; } case None: { break; } } } void construct( const Value& other ) { _kind = other._kind; switch ( _kind ) { case Array: case Tuple: { new(&_items) items_t(other._items); break; } case String: { new(&_string) string_t(other._string); break; } case Identifier: { new(&_identifier) identifier_t(other._identifier); break; } case Integer: { _integer = other._integer; break; } case Scalar: { _scalar = other._scalar; break; } case Logical: { _logical = other._logical; break; } case None: { break; } } } void destroy() { switch ( _kind ) { case Array: case Tuple: { _items.~items_t(); break; } case String: { _string.~string_t(); break; } case Identifier: { _identifier.~identifier_t(); break; } default: { break; } } } bool equals( const Value& other ) const { if ( _kind != other._kind ) { return false; } switch ( _kind ) { case Array: case Tuple: { return _items == other._items; } case String: { return _string == other._string; } case Identifier: { return _identifier == other._identifier; } case Integer: { return _integer == other._integer; } case Scalar: { return _scalar == other._scalar; } case Logical: { return _logical == other._logical; } case None: { return true; } } return false; } private: Kind _kind; union { integer_t _integer; scalar_t _scalar; logical_t _logical; string_t _string; identifier_t _identifier; items_t _items; }; }; inline std::ostream& operator<<( std::ostream& os, const Value& arg ) { switch ( arg.kind() ) { case Value::None: { os << "none"; break; } case Value::Integer: { os << arg.integer(); break; } case Value::Scalar: { os << arg.scalar(); if ( (Value::integer_t)arg.scalar() == arg.scalar() ) { os << ".0"; } break; } case Value::Logical: { os << std::boolalpha << arg.logical(); break; } case Value::String: { os << '\'' << arg.string() << '\''; break; } case Value::Identifier: { os << arg.identifier(); break; } case Value::Array: { os << '['; for ( size_t i = 0; i < arg.size(); ++i ) { if ( i ) { os << ','; } os << arg[i]; } os << ']'; break; } case Value::Tuple: { os << '('; for ( size_t i = 0; i < arg.size(); ++i ) { if ( i ) { os << ','; } os << arg[i]; } os << ')'; break; } } return os; } inline std::vector nestedArrayShape( const Value& value ) { if ( value.kind() != Value::Array ) { return {}; } size_t rank = 1; for ( const Value* v = &value; v->size() > 0 && v->items().data()->kind() == Value::Array; v = v->items().data() ) { rank += 1; } std::vector shape(rank); const Value* v = &value; for ( size_t i = 0; i < rank; ++i, v = v->items().data() ) { shape[i] = (int)v->size(); } return shape; } } // namespace nnef #undef CHECKS_SHOULD_THROW #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/comp/comp_parser.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_COMP_PARSER_H_ #define _NNEF_COMP_PARSER_H_ #include "../common/dictionary.h" #include "../common/prototype.h" #include "../common/typeutils.h" #include "../common/parser.h" #include "../common/value.h" #include "../common/lexer.h" #include "../common/error.h" #include "stdlib_source.h" #include "expression.h" #include "evaluation.h" #include "fragment.h" #include #include #include #include namespace nnef { class CompParser : public Parser { public: typedef Error::Position Position; private: typedef Dictionary Fragments; typedef Dictionary Prototypes; typedef Dictionary Declarations; public: CompParser( const std::string& stdlib, const std::set& lowered = {} ) : _stdlib_source(!stdlib.empty() ? stdlib : stdlib_source()), _lowered(lowered), _flags(0) { } virtual void parse( std::istream& is, const char* filename, Callback& callback ) { Lexer lexer(is, filename); lexer.next(); auto version = readVersion(lexer); callback.beginDocument(filename, version); _flags = 0; auto extensions = readExtensions(lexer, [&]( const std::string& ext ) { return callback.handleExtension(ext) || handleExtension(ext); }); Prototypes prototypes; Fragments fragments; parseFragments(_stdlib_source, "stdlib", prototypes, fragments); if ( _flags & KHR_ENABLE_FRAGMENT_DEFINITIONS ) { while ( lexer.token() == Lexer::Fragment ) { auto fragment = parseFragment(lexer, prototypes, (_flags & KHR_ENABLE_OPERATOR_EXPRESSIONS) != 0); fragments.emplace(fragment.prototype().name(), std::move(fragment)); } } lexer.readToken(Lexer::Graph); auto graph = parsePrototype(lexer, prototypes, false, true); auto assignments = parseAssignments(lexer, graph, prototypes, (_flags & KHR_ENABLE_OPERATOR_EXPRESSIONS) != 0, true); callback.beginGraph(graph, prototypes); Dictionary values; Dictionary dtypes; std::set vars; Evaluation evaluation(assignments, fragments, _lowered); for ( auto& assignment : assignments ) { checkExternalsAndVariables(assignment.lhs(), assignment.rhs(), graph, vars); const Value context = evaluation.evaluateLvalue(assignment.lhs(), Dictionary(), true); evaluation.evaluateAssign(assignment.lhs(), assignment.rhs(), values, dtypes, callback, nullptr, context); } callback.endGraph(graph, dtypes); callback.endDocument(filename); lexer.readToken(Lexer::Eof); } private: bool handleExtension( const std::string& ext ) { if ( ext == "KHR_enable_fragment_definitions" ) { _flags |= KHR_ENABLE_FRAGMENT_DEFINITIONS; return true; } else if ( ext == "KHR_enable_operator_expressions" ) { _flags |= KHR_ENABLE_OPERATOR_EXPRESSIONS; return true; } return false; } static void parseFragments( const std::string& text, const char* filename, Prototypes& prototypes, Fragments& fragments ) { std::stringstream ss(text); Lexer lexer(ss, filename); lexer.next(); while ( lexer.token() != Lexer::Eof ) { auto fragment = parseFragment(lexer, prototypes, true); fragments.emplace(fragment.prototype().name(), std::move(fragment)); } } static Prototype parsePrototype( Lexer& lexer, const Prototypes& prototypes, bool allowTypespec, bool graph ) { auto position = lexer.position(); const std::string name = lexer.string(); lexer.readToken(Lexer::Identifier); if ( prototypes.count(name) ) { throw Error(position, "operation '%s' already defined", name.c_str()); } bool isGenericDecl = false; const PrimitiveType* genericParamDefault = nullptr; if ( !graph && lexer.readIfToken('<') ) { isGenericDecl = true; lexer.readToken('?'); if ( lexer.readIfToken('=') ) { genericParamDefault = primitiveType(getTypename(lexer)); lexer.next(); } lexer.readToken('>'); } std::vector params = parseParams(lexer, name, allowTypespec, graph); lexer.readToken(Lexer::Arrow); std::vector results = parseResults(lexer, name, allowTypespec, !graph); for ( auto& result : results ) { if ( std::find_if(params.begin(), params.end(), [&]( const Param& param ){ return param.name() == result.name(); }) != params.end() ) { throw Error(position, "invalid definition of operation '%s'; '%s' is defined both as parameter and as result", name.c_str(), result.name().c_str()); } } bool attribute = results.front().type()->isAttribute(); for ( size_t i = 1; i < results.size(); ++i ) { if ( results[i].type()->isAttribute() != attribute ) { throw Error(position, "result types of fragment must be all tensor types or all attribute types"); } } auto isGenericTyped = []( const Typed& typed ){ return typed.type()->isGeneric(); }; bool hasGenericParams = std::any_of(params.begin(), params.end(), isGenericTyped); bool hasGenericResults = std::any_of(results.begin(), results.end(), isGenericTyped); if ( (hasGenericParams || hasGenericResults) && !isGenericDecl ) { throw Error(position, "fragment with generic parameter or result types must be declared generic using "); } else if ( isGenericDecl && !hasGenericParams && !hasGenericResults ) { throw Error(position, "fragment declared as generic must have at least one generic parameter or result type"); } return Prototype(name, params, results, genericParamDefault); } static std::vector parseParams( Lexer& lexer, const std::string& op, bool allowTypespec, bool forceDefaults ) { std::vector params; lexer.readToken('('); bool expectAttribute = false; do { auto position = lexer.position(); auto name = lexer.string(); lexer.readToken(Lexer::Identifier); const Type* type = tensorType(); if ( allowTypespec ) { lexer.readToken(':'); type = parseTypespec(lexer, true); } if ( expectAttribute && !type->isAttribute() ) { throw Error(position, "expected attribute, found parameter of type '%s'", type->toString().c_str()); } expectAttribute |= type->isAttribute(); auto defaultValue = Value::none(); if ( lexer.token() == '=' ) { lexer.next(); auto expr = parseExpression(lexer, nullptr, nullptr, true, false, false, false); if ( !isCastable(expr->type(), type) ) { throw Error(expr->position(), "default value type '%s' cannot be cast to parameter type '%s'", expr->type()->toString().c_str(), type->toString().c_str()); } defaultValue = Evaluation::evaluateRvalue(*expr); } else if ( forceDefaults && type->isAttribute() ) { throw Error(position, "expected default value for parameter '%s'", name.c_str()); } if ( std::find_if(params.begin(), params.end(), [&]( const Param& param ){ return param.name() == name; }) != params.end() ) { throw Error(position, "duplicate parameter definition for fragment '%s'; parameter '%s' is already defined", op.c_str(), name.c_str()); } params.emplace_back(name, type, defaultValue); } while ( lexer.readIfToken(',') ); lexer.readToken(')'); return params; } static std::vector parseResults( Lexer& lexer, const std::string& op, bool allowTypespec, bool allowAttribute ) { std::vector results; lexer.readToken('('); do { auto position = lexer.position(); auto name = lexer.string(); lexer.readToken(Lexer::Identifier); const Type* type = tensorType(); if ( allowTypespec ) { lexer.readToken(':'); type = parseTypespec(lexer, false); if ( !allowAttribute && type->isAttribute() ) { throw Error(position, "non-tensor type not allowed in this context"); } } if ( std::find_if(results.begin(), results.end(), [&]( const Result& result ){ return result.name() == name; }) != results.end() ) { throw Error(position, "duplicate result definition for operation '%s'; result '%s' is already defined", op.c_str(), name.c_str()); } results.emplace_back(name, type); } while ( lexer.readIfToken(',') ); lexer.readToken(')'); return results; } static Fragment parseFragment( Lexer& lexer, Prototypes& prototypes, bool allowOperator ) { lexer.readToken(Lexer::Fragment); auto prototype = parsePrototype(lexer, prototypes, true, false); auto& proto = prototypes.emplace(prototype.name(), prototype).first->second; std::vector assignments; if ( !lexer.readIfToken(';') ) { assignments = parseAssignments(lexer, proto, prototypes, allowOperator, false); } return Fragment(proto, assignments); } static std::vector parseAssignments( Lexer& lexer, const Prototype& proto, const Prototypes& prototypes, bool allowOperator, bool graph ) { Declarations decls; for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); if ( !graph || param.type()->isAttribute() ) { decls[param.name()] = param.type(); } } std::vector assignments; lexer.readToken('{'); do { auto lhs = parseTuple(lexer, nullptr, nullptr, false, true, false); lexer.readToken('='); auto rhs = allowOperator ? parseExpression(lexer, &prototypes, &decls, true, true, true) : parseInvocation(lexer, &prototypes, &decls); lexer.readToken(';'); declare(*lhs, rhs->type(), decls); if ( !graph ) { checkOperationsAllowed(*rhs); } assignments.emplace_back(lhs, rhs); } while ( lexer.token() != '}' ); if ( graph ) { for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); if ( !decls.count(param.name()) ) { throw Error(lexer.position(), "graph parameter '%s' is not assigned", param.name().c_str()); } } } for ( size_t i = 0; i < proto.resultCount(); ++i ) { auto& result = proto.result(i); auto decl = decls[result.name()]; if ( !decl ) { throw Error(lexer.position(), "result '%s' of operation '%s' is not assigned", result.name().c_str(), proto.name().c_str()); } else if ( !isCastable(decl, result.type(), true) ) { throw Error(lexer.position(), "result '%s' of operation '%s' is declared as '%s' but assignment has incompatible type '%s'", result.name().c_str(), proto.name().c_str(), result.type()->toString().c_str(), decl->toString().c_str()); } } lexer.readToken('}'); return assignments; } static void checkOperationsAllowed( const Expr& rhs ) { traverse(rhs, []( const Expr& expr ) { if ( expr.kind() == Expr::Invocation ) { auto& invocation = static_cast(expr); if ( invocation.target() == "external" || invocation.target() == "variable" || invocation.target() == "update" ) { throw Error(invocation.position(), "operation '%s' not allowed inside fragments", invocation.target().c_str()); } } }); } void checkExternalsAndVariables( const Expr& lhs, const Expr& rhs, const Prototype& graph, std::set& vars ) { if ( (lhs.kind() == Expr::Array || lhs.kind() == Expr::Tuple) && rhs.kind() == lhs.kind() ) { auto& left = static_cast(lhs); auto& right = static_cast(rhs); for ( size_t i = 0; i < left.size(); ++i ) { checkExternalsAndVariables(left.item(i), right.item(i), graph, vars); } } else if ( rhs.kind() == Expr::Invocation && lhs.kind() == Expr::Identifier ) { auto& identifier = static_cast(lhs); auto& invocation = static_cast(rhs); if ( invocation.target() == "external" ) { if ( !graph.param(identifier.name()) ) { throw Error(identifier.position(), "identifiers assigned by operation 'external' must be graph parameters"); } } else { if ( graph.param(identifier.name()) ) { throw Error(identifier.position(), "graph parameter '%s' can only be assigned by operation 'external'", identifier.name().c_str()); } } if ( invocation.target() == "variable" ) { vars.insert(identifier.name()); } if ( invocation.target() == "update" ) { auto& arg = *invocation.arg("variable"); if ( arg.kind() != Expr::Identifier || !vars.count(static_cast(arg).name()) ) { throw Error(arg.position(), "first argument to operation 'update' must be a variable"); } } } } static void traverse( const Expr& expr, std::function func ) { func(expr); switch ( expr.kind() ) { case Expr::Literal: case Expr::Identifier: { break; } case Expr::Builtin: { auto& builtin = static_cast(expr); traverse(builtin.arg(), func); break; } case Expr::Array: case Expr::Tuple: { auto& items = static_cast(expr); for ( size_t i = 0; i < items.size(); ++i ) { traverse(items.item(i), func); } break; } case Expr::Subscript: { auto& subscript = static_cast(expr); traverse(subscript.sequence(), func); if ( subscript.begin() ) { traverse(*subscript.begin(), func); } if ( subscript.end() ) { traverse(*subscript.end(), func); } break; } case Expr::Comprehension: { auto& comprehension = static_cast(expr); for ( size_t i = 0; i < comprehension.iteratorCount(); ++i ) { traverse(comprehension.iterator(i), func); traverse(comprehension.iterable(i), func); } if ( comprehension.condition() ) { traverse(*comprehension.condition(), func); } traverse(comprehension.item(), func); break; } case Expr::Unary: { auto& unary = static_cast(expr); traverse(unary.right(), func); break; } case Expr::Binary: { auto& binary = static_cast(expr); traverse(binary.left(), func); traverse(binary.right(), func); break; } case Expr::Select: { auto& select = static_cast(expr); traverse(select.condition(), func); traverse(select.trueValue(), func); traverse(select.falseValue(), func); break; } case Expr::Invocation: { auto& invocation = static_cast(expr); for ( auto it = invocation.begin(); it != invocation.end(); ++it ) { traverse(*it->second, func); } break; } } } private: static const Type* parseArrayTypespec( Lexer& lexer, const Type* type ) { while ( lexer.readIfToken('[') ) { lexer.readToken(']'); type = arrayType(type); } return type; } static const Type* parseTupleTypespec( Lexer& lexer, bool allowUnboundTensor ) { auto position = lexer.position(); lexer.next(); std::vector items; do { items.push_back(parseTypespec(lexer, allowUnboundTensor)); } while ( lexer.readIfToken(',') ); lexer.readToken(')'); bool attribute = items.front()->isAttribute(); for ( size_t i = 1; i < items.size(); ++i ) { if ( items[i]->isAttribute() != attribute ) { throw Error(position, "item types in tuple type must be all attribute types or all tensor types"); } } return parseArrayTypespec(lexer, tupleType(items)); } static const Type* parseTypespec( Lexer& lexer, bool allowUnboundTensor ) { if ( lexer.token() == '(' ) { return parseTupleTypespec(lexer, allowUnboundTensor); } const Type* type = nullptr; if ( lexer.readIfToken(Lexer::Tensor) ) { lexer.readToken('<'); type = tensorType(); if ( lexer.token() != '>' ) { type = tensorType(getTypename(lexer)); lexer.next(); } else if ( !allowUnboundTensor ) { throw Error(lexer.position(), "unbound tensor not allowed in this context"); } lexer.readToken('>'); } else { const Typename name = getTypename(lexer); lexer.next(); type = primitiveType(name); } return parseArrayTypespec(lexer, type); } private: static Shared parseExpression( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, bool allowLiteral, bool allowIdentifier, bool allowOperator, bool allowSelect = true ) { auto expr = parsePrimary(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); if ( expr->kind() != Expr::Literal && allowOperator ) { expr = parseSubscripts(lexer, prototypes, decls, expr); } if ( allowOperator ) { expr = parseBinary(lexer, prototypes, decls, expr); if ( lexer.token() == Lexer::If && allowSelect ) { expr = parseSelect(lexer, prototypes, decls, expr); } } return expr; } static Shared parsePrimary( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, bool allowLiteral, bool allowIdentifier, bool allowOperator ) { switch ( lexer.token() ) { case Lexer::True: case Lexer::False: { if ( allowLiteral ) { return parseLogical(lexer); } break; } case Lexer::Fractional: { if ( allowLiteral ) { return parseScalar(lexer); } break; } case Lexer::Decimal: { if ( allowLiteral ) { return parseInteger(lexer); } break; } case Lexer::Characters: { if ( allowLiteral ) { return parseString(lexer); } break; } case Lexer::Identifier: { if ( allowIdentifier ) { return parseIdentifier(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); } break; } case '[': { return parseArray(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); } case '(': { return parseTuple(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); } case '-': { return parseUnary(lexer, prototypes, decls); } case '!': { if ( allowOperator ) { return parseUnary(lexer, prototypes, decls); } break; } case Lexer::ShapeOf: { throw Error(lexer.position(), "the use of operator 'shape_of' is deprecated and is not supported"); } case Lexer::LengthOf: case Lexer::RangeOf: case Lexer::Integer: case Lexer::Scalar: case Lexer::Logical: case Lexer::String: { if ( allowOperator ) { return parseBuiltin(lexer, prototypes, decls); } break; } default: { throw Error(lexer.position(), "unexpected token '%s'", Lexer::tokenString(lexer.token()).c_str()); } } throw Error(lexer.position(), "token '%s' not allowed in this context", Lexer::tokenString(lexer.token()).c_str()); } static Shared parseInteger( Lexer& lexer ) { auto position = lexer.position(); auto value = getIntegerValue(lexer); lexer.next(); return std::make_shared(position, value, primitiveType(Typename::Integer)); } static Shared parseScalar( Lexer& lexer ) { auto position = lexer.position(); auto value = getScalarValue(lexer); lexer.next(); return std::make_shared(position, value, primitiveType(Typename::Scalar)); } static Shared parseLogical( Lexer& lexer ) { auto position = lexer.position(); auto value = lexer.token() == Lexer::True; lexer.next(); return std::make_shared(position, value, primitiveType(Typename::Logical)); } static Shared parseString( Lexer& lexer ) { auto position = lexer.position(); auto value = lexer.string(); lexer.next(); return std::make_shared(position, value, primitiveType(Typename::String)); } static Shared parseIdentifier( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, bool allowLiteral, bool allowIdentifier, bool allowOperator ) { auto position = lexer.position(); auto string = lexer.string(); lexer.readToken(Lexer::Identifier); if ( lexer.token() == '(' || (lexer.token() == '<' && prototypes && prototypes->count(string)) ) { return parseInvocation(lexer, prototypes, decls, position, string, allowLiteral, allowIdentifier, allowOperator); } else { return makeIdentifier(position, string, decls); } } static Shared makeIdentifier( const Position& position, const std::string& name, Declarations* decls ) { const Type* type = nullptr; if ( decls ) { type = (*decls)[name]; if ( !type ) { throw Error(position, "undeclared identifier '%s'", name.c_str()); } } return std::make_shared(position, name, type); } static Shared parseArray( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, bool allowLiteral, bool allowIdentifier, bool allowOperator ) { auto position = lexer.position(); lexer.next(); std::vector> items; const Type* type = nullptr; if ( lexer.token() != ']' ) { if ( lexer.token() == Lexer::For ) { return parseComprehension(lexer, prototypes, decls, position); } auto first = parseExpression(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); items = { first }; type = first->type(); while ( lexer.readIfToken(',') ) { auto item = parseExpression(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); items.push_back(item); if ( decls ) { type = commonType(type, item->type()); if ( !type ) { throw Error(position, "incompatible item types (%s vs %s) in array", first->type()->toString().c_str(), item->type()->toString().c_str()); } } } } lexer.readToken(']'); return std::make_shared(position, items, arrayType(type)); } static Shared parseTuple( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, bool allowLiteral, bool allowIdentifier, bool allowOperator ) { auto position = lexer.position(); bool parenthesized = lexer.token() == '('; if ( parenthesized ) { lexer.next(); } std::vector> items; std::vector types; auto first = parseExpression(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); if ( lexer.token() == ',' ) { items = { first }; types = { first->type() }; while ( lexer.readIfToken(',') ) { auto item = parseExpression(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); items.push_back(item); types.push_back(item->type()); } } if ( parenthesized ) { lexer.readToken(')'); } return items.empty() ? first : std::make_shared(position, items, tupleType(types)); } static Shared parseInvocation( Lexer& lexer, const Prototypes* prototypes, Declarations* decls ) { auto position = lexer.position(); auto string = lexer.string(); lexer.readToken(Lexer::Identifier); if ( lexer.token() != '(' && lexer.token() != '<' ) { throw Error(position, "expected operation invocation"); } return parseInvocation(lexer, prototypes, decls, position, string, true, true, false); } static Shared parseInvocation( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, const Position& position, const std::string& target, bool allowLiteral, bool allowIdentifier, bool allowOperator ) { auto it = prototypes->find(target); if ( it == prototypes->end() ) { throw Error(position, "undefined operation '%s'", target.c_str()); } const Prototype& proto = it->second; const PrimitiveType* dataType = proto.genericParamDefault(); if ( lexer.readIfToken('<') ) { dataType = primitiveType(getTypename(lexer)); lexer.next(); lexer.readToken('>'); } lexer.readToken('('); Dictionary> args; bool expectNamed = false; do { auto position = lexer.position(); if ( args.size() >= proto.paramCount() ) { throw Error(position, "too many positional arguments; definition of '%s' has only %d parameters", proto.name().c_str(), (int)proto.paramCount()); } const Param* param = nullptr; Shared arg; bool named = false; if ( lexer.token() == Lexer::Identifier ) { auto string = lexer.string(); lexer.next(); if ( lexer.readIfToken('=') ) { param = proto.param(string); if ( !param ) { throw Error(position, "operation '%s' has no parameter called '%s'", proto.name().c_str(), string.c_str()); } arg = parseExpression(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); named = true; } else { param = &proto.param(args.size()); if ( lexer.token() == '(' ) { arg = parseInvocation(lexer, prototypes, decls, position, string, allowLiteral, allowIdentifier, allowOperator); } else { arg = makeIdentifier(position, string, decls); } arg = parseSubscripts(lexer, prototypes, decls, arg); arg = parseBinary(lexer, prototypes, decls, arg); if ( lexer.token() == Lexer::If ) { arg = parseSelect(lexer, prototypes, decls, arg); } } } else { param = &proto.param(args.size()); arg = parseExpression(lexer, prototypes, decls, allowLiteral, allowIdentifier, allowOperator); } auto paramType = dataType ? bindDataType(param->type(), dataType) : param->type(); if ( !isCastable(arg->type(), paramType) ) { throw Error(position, "argument of type '%s' cannot be cast to type '%s' for parameter '%s'", arg->type()->toString().c_str(), paramType->toString().c_str(), param->name().c_str()); } expectNamed |= named || paramType->isAttribute(); if ( expectNamed && !named ) { throw Error(position, "expected named argument"); } auto contained = args[param->name()]; if ( contained ) { auto& pos = contained->position(); throw Error(position, "duplicate arguments: parameter '%s' already assigned (%u,%u)", param->name().c_str(), pos.line, pos.column); } args[param->name()] = arg; } while ( lexer.readIfToken(',') ); for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); if ( !args.count(param.name()) ) { if ( !param.defaultValue() ) { throw Error(lexer.position(), "missing argument for fragment '%s'; parameter '%s' not assigned", proto.name().c_str(), param.name().c_str()); } else if ( param.type()->isGeneric() ) { auto valueType = typeOf(param.defaultValue()); auto paramType = dataType ? bindDataType(param.type(), dataType) : param.type(); if ( !isCastable(valueType, paramType) ) { throw Error(lexer.position(), "default value type '%s' cannot be cast to type '%s' for parameter '%s'", valueType->toString().c_str(), paramType->toString().c_str(), param.name().c_str()); } } } } lexer.readToken(')'); if ( proto.isGeneric() && !dataType && !deduceDataType(proto, args, dataType, position) ) { throw Error(position, "could not deduce generic data-type"); } const Type* type = resultType(proto, dataType); return std::make_shared(position, target, args, type, dataType); } static Shared parseUnary( Lexer& lexer, const Prototypes* prototypes, Declarations* decls ) { auto position = lexer.position(); int op = lexer.token(); lexer.next(); auto rhs = parseExpression(lexer, prototypes, decls, true, true, true); auto type = unaryResultType(rhs->type(), op); if ( !type ) { throw Error(position, "invalid operand type '%s' for operation '%s'", rhs->type()->toString().c_str(), Lexer::tokenString(op).c_str()); } if ( type->kind() == Type::Tensor ) { auto target = unaryOpName(op); auto args = makeUnaryOpArgs(rhs); return std::make_shared(position, target, args, type); } else { return std::make_shared(position, rhs, op, type); } } static Shared parseBinary( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, Shared lhs, int exprPrec = 0 ) { auto position = lhs->position(); while (true) { int tokPrec = tokenPrecedence(lexer.token()); if ( tokPrec < exprPrec ) { return lhs; } int op = lexer.token(); lexer.next(); auto rhs = parsePrimary(lexer, prototypes, decls, true, true, true); rhs = parseSubscripts(lexer, prototypes, decls, rhs); int nextPrec = tokenPrecedence(lexer.token()); if ( tokPrec < nextPrec ) { rhs = parseBinary(lexer, prototypes, decls, rhs, tokPrec + 1); } auto type = binaryResultType(lhs->type(), rhs->type(), op); if ( !type ) { throw Error(position, "invalid operand types '%s' and '%s' for operation '%s'", lhs->type()->toString().c_str(), rhs->type()->toString().c_str(), Lexer::tokenString(op).c_str()); } if ( type->kind() == Type::Tensor ) { auto target = binaryOpName(op); auto args = makeBinaryOpArgs(lhs, rhs); lhs = std::make_shared(position, target, args, type); } else { lhs = std::make_shared(position, lhs, rhs, op, type); } } } static Shared parseBuiltin( Lexer& lexer, const Prototypes* prototypes, Declarations* decls ) { auto position = lexer.position(); int op = lexer.token(); lexer.next(); lexer.readToken('('); auto arg = parseExpression(lexer, prototypes, decls, true, true, true); auto type = builtinResultType(op); if ( !type ) { throw Error(position, "invalid operand type '%s' for operation '%s'", arg->type()->toString().c_str(), Lexer::tokenString(op).c_str()); } lexer.readToken(')'); if ( op == Lexer::LengthOf ) { if ( arg->type()->kind() != Type::Array && arg->type() != primitiveType(Typename::String) ) { throw Error(position, "argument of length_of() must be an array or string (found %s)", arg->type()->toString().c_str()); } } if ( op == Lexer::ShapeOf ) { if ( arg->type()->kind() != Type::Tensor && arg->type()->kind() != Type::Primitive ) { throw Error(position, "argument of shape_of() must be of tensor or primitive type (found %s)", arg->type()->toString().c_str()); } } else if ( op == Lexer::RangeOf && arg->type() != primitiveType(Typename::String) ) { if ( arg->type()->kind() != Type::Array ) { throw Error(position, "argument of range_of() must be an array or string (found %s)", arg->type()->toString().c_str()); } } else if ( op == Lexer::Integer || op == Lexer::Scalar || op == Lexer::Logical || op == Lexer::String ) { if ( arg->type()->kind() != Type::Primitive ) { throw Error(position, "argument of %s() must be of non-tensor primitive type (found %s)", Lexer::tokenString(op).c_str(), arg->type()->toString().c_str()); } } return std::make_shared(position, arg, op, type); } static Shared parseSubscript( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, const Shared sequence ) { lexer.next(); Shared beg, end; const Type* type = nullptr; if ( sequence->type()->kind() == Type::Tuple ) { beg = parseExpression(lexer, prototypes, decls, true, true, true); if ( beg->kind() != Expr::Literal || beg->type() != primitiveType(Typename::Integer) ) { throw Error(beg->position(), "tuple index must be an integer literal"); } auto idx = static_cast(*beg).value(); lexer.readToken(']'); type = static_cast(sequence->type())->itemType(idx); } else if ( sequence->type()->kind() == Type::Array || sequence->type() == primitiveType(Typename::String) ) { if ( lexer.token() != ':' ) { beg = parseExpression(lexer, prototypes, decls, true, true, true); if ( beg->type() != primitiveType(Typename::Integer) ) { throw Error(beg->position(), "array index must be of type integer, found '%s'", beg->type()->toString().c_str()); } } bool range = false; if ( lexer.readIfToken(':') ) { range = true; if ( lexer.token() != ']' ) { end = parseExpression(lexer, prototypes, decls, true, true, true); if ( end->type() != primitiveType(Typename::Integer) ) { throw Error(end->position(), "array index must be of type integer, found '%s'", end->type()->toString().c_str()); } } } else { end = beg; } lexer.readToken(']'); if ( sequence->type()->kind() == Type::Array ) { auto arrayType = static_cast(sequence->type()); type = range ? arrayType : arrayType->itemType(); } else { type = primitiveType(Typename::String); } } else { throw Error(sequence->position(), "subscripted expression must be of type array, tuple, or string; found '%s'", sequence->type()->toString().c_str()); } return std::make_shared(sequence->position(), sequence, beg, end, type); } static Shared parseSubscripts( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, Shared sequence ) { while ( lexer.token() == '[' ) { sequence = parseSubscript(lexer, prototypes, decls, sequence); } return sequence; } static Shared parseSelect( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, Shared trueValue ) { lexer.readToken(Lexer::If); auto condition = parseExpression(lexer, prototypes, decls, true, true, true); if ( condition->type() != primitiveType(Typename::Logical) ) { throw Error(condition->position(), "condition must be a logical value"); } lexer.readToken(Lexer::Else); auto falseValue = parseExpression(lexer, prototypes, decls, true, true, true); const Type* type = commonType(trueValue->type(), falseValue->type()); if ( !type ) { throw Error(trueValue->position(), "incompatible types in if-else expression (%s vs %s)", trueValue->type()->toString().c_str(), falseValue->type()->toString().c_str()); } return std::make_shared(trueValue->position(), condition, trueValue, falseValue, type); } static Shared parseComprehension( Lexer& lexer, const Prototypes* prototypes, Declarations* decls, const Position& position ) { lexer.readToken(Lexer::For); std::vector> iterators, iterables; do { auto iterator = parseIterator(lexer, decls); lexer.readToken(Lexer::In); auto iterable = parseExpression(lexer, prototypes, decls, true, true, true, false); if ( iterable->type()->kind() != Type::Array ) { throw Error(iterable->position(), "expression not iterable"); } iterators.push_back(iterator); iterables.push_back(iterable); auto itemType = static_cast(iterable->type())->itemType(); declare(*iterator, itemType, *decls); } while ( lexer.readIfToken(',') ); Shared condition = nullptr; if ( lexer.readIfToken(Lexer::If) ) { condition = parseExpression(lexer, prototypes, decls, true, true, true); if ( condition->type() != primitiveType(Typename::Logical) ) { throw Error(condition->position(), "condition in comprehension expression must be a logical expression"); } } lexer.readToken(Lexer::Yield); auto item = parseExpression(lexer, prototypes, decls, true, true, true); const Type* type = arrayType(item->type()); for ( size_t i = 0; i < iterators.size(); ++i ) { undeclare(*iterators[i], *decls); } lexer.readToken(']'); return std::make_shared(position, iterators, iterables, condition, item, type); } private: static Shared parseIterator( Lexer& lexer, const Declarations* decls ) { if ( lexer.token() == Lexer::Identifier ) { auto iterator = std::make_shared(lexer.position(), lexer.string(), nullptr); lexer.readToken(Lexer::Identifier); return iterator; } if ( lexer.token() != '(' ) { throw Error(lexer.position(), "expected tuple or identifier"); } lexer.next(); auto position = lexer.position(); std::vector> items; std::vector types; auto first = parseIterator(lexer, decls); if ( lexer.token() == ',' ) { items = { first }; types = { first->type() }; while ( lexer.readIfToken(',') ) { auto item = parseIterator(lexer, decls); items.push_back(item); types.push_back(item->type()); } } lexer.readToken(')'); return items.empty() ? first : std::make_shared(position, items, tupleType(types)); } private: static void declare( const Expr& expr, const Type* type, Declarations& declared ) { switch ( expr.kind() ) { case Expr::Identifier: { auto& identifier = static_cast(expr); if ( declared.count(identifier.name()) ) { throw Error(expr.position(), "identifier '%s' is already declared", identifier.name().c_str()); } declared.emplace(identifier.name(), type); break; } case Expr::Array: { if ( type->kind() != Type::Array ) { throw Error(expr.position(), "cannot assign result of type '%s' to array", type->toString().c_str()); } auto& array = static_cast(expr); auto arrayType = static_cast(type); for ( size_t i = 0; i < array.size(); ++i ) { declare(array.item(i), arrayType->itemType(), declared); } break; } case Expr::Tuple: { if ( type->kind() != Type::Tuple ) { throw Error(expr.position(), "cannot assign result of type '%s' to tuple", type->toString().c_str()); } auto& tuple = static_cast(expr); auto tupleType = static_cast(type); if ( tupleType->size() != tuple.size() ) { throw Error(expr.position(), "cannot assign result of type '%s' to a tuple of size %d", type->toString().c_str(), (int)tuple.size()); } for ( size_t i = 0; i < tuple.size(); ++i ) { declare(tuple.item(i), tupleType->itemType(i), declared); } break; } default: { throw Error(expr.position(), "expression not allowed in this context"); } } } static void undeclare( const Expr& expr, Declarations& declared ) { switch ( expr.kind() ) { case Expr::Identifier: { auto& identifier = static_cast(expr); declared.erase(identifier.name()); break; } case Expr::Array: case Expr::Tuple: { auto& items = static_cast(expr); for ( size_t i = 0; i < items.size(); ++i ) { undeclare(items.item(i), declared); } break; } default: { throw Error(expr.position(), "expression not allowed in this context"); } } } private: static bool deduceDataType( const Prototype& proto, const Dictionary>& args, const PrimitiveType*& dataType, const Position& position ) { Dictionary types; for ( auto& arg : args ) { types[arg.first] = arg.second->type(); } for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); if ( !types.count(param.name()) ) { assert(param.defaultValue()); types[param.name()] = typeOf(param.defaultValue()); } } try { return nnef::deduceDataType(proto, types, dataType); } catch ( std::pair e ) { throw Error(position, "could not deduce data-type: ambiguous candidates '%s' vs '%s'", toString(e.first), toString(e.second)); } } static const Type* resultType( const Prototype& proto, const PrimitiveType* dataType ) { if ( proto.resultCount() == 1 ) { return dataType ? bindDataType(proto.result(0).type(), dataType) : proto.result(0).type(); } std::vector types(proto.resultCount()); for ( size_t i = 0; i < proto.resultCount(); ++i ) { types[i] = dataType ? bindDataType(proto.result(i).type(), dataType) : proto.result(i).type(); } return tupleType(types); } static const Type* unaryResultType( const Type* argType, int op ) { switch ( op ) { case '-': case '+': { if ( argType == primitiveType(Typename::Integer) || argType == primitiveType(Typename::Scalar) || argType == tensorType(Typename::Scalar) ) { return argType; } break; } case '!': { if ( argType == primitiveType(Typename::Logical) || argType == tensorType(Typename::Scalar) ) { return argType; } break; } } return nullptr; } static const Type* binaryResultType( const Type* lhsType, const Type* rhsType, int op ) { if ( op == Lexer::In && rhsType->kind() == Type::Array ) { return primitiveType(Typename::Logical); } else if ( op == '+' && lhsType->kind() == Type::Array && rhsType == lhsType ) { return lhsType; } else if ( op == '*' ) { if ( lhsType->kind() == Type::Array && rhsType == primitiveType(Typename::Integer) ) { return lhsType; } if ( rhsType->kind() == Type::Array && lhsType == primitiveType(Typename::Integer) ) { return rhsType; } } const Type* argType = commonType(lhsType, rhsType); switch ( op ) { case '<': case '>': case Lexer::Le: case Lexer::Ge: case Lexer::Eq: case Lexer::Ne: { return argType == tensorType(Typename::Scalar) ? (const Type*)tensorType(Typename::Logical) : (const Type*)primitiveType(Typename::Logical); } case '+': case '*': { if ( argType == primitiveType(Typename::String) ) { return argType; } } case '-': case '/': case '^': { if ( argType == primitiveType(Typename::Integer) || argType == primitiveType(Typename::Scalar) || argType == tensorType(Typename::Scalar) ) { return argType; } break; } case Lexer::And: case Lexer::Or: { if ( argType == primitiveType(Typename::Logical) || argType == tensorType(Typename::Scalar) ) { return argType; } break; } } return nullptr; } static const Type* builtinResultType( int op ) { switch ( op ) { case Lexer::LengthOf: { return primitiveType(Typename::Integer); } case Lexer::ShapeOf: { return arrayType(primitiveType(Typename::Integer)); } case Lexer::RangeOf: { return arrayType(primitiveType(Typename::Integer)); } case Lexer::Integer: { return primitiveType(Typename::Integer); } case Lexer::Scalar: { return primitiveType(Typename::Scalar); } case Lexer::String: { return primitiveType(Typename::String); } case Lexer::Logical: { return primitiveType(Typename::Logical); } } return nullptr; } static const Type* typeOf( const Value& value ) { switch ( value.kind() ) { case Value::Integer: { return primitiveType(Typename::Integer); } case Value::Scalar: { return primitiveType(Typename::Scalar); } case Value::Logical: { return primitiveType(Typename::Logical); } case Value::String: { return primitiveType(Typename::String); } case Value::Array: { auto itemType = value.size() ? typeOf(value[0]) : nullptr; return arrayType(itemType); } case Value::Tuple: { std::vector itemTypes(value.size()); for ( size_t i = 0; i < value.size(); ++i ) { itemTypes[i] = typeOf(value[i]); } return tupleType(itemTypes); } case Value::Identifier: case Value::None: { return nullptr; } } assert(false); return nullptr; } static int tokenPrecedence( int token ) { static const std::map precedence = { std::make_pair(Lexer::In, 10), std::make_pair(Lexer::And, 20), std::make_pair(Lexer::Or, 20), std::make_pair(Lexer::Le, 30), std::make_pair(Lexer::Ge, 30), std::make_pair(Lexer::Eq, 30), std::make_pair(Lexer::Ne, 30), std::make_pair('<', 30), std::make_pair('>', 30), std::make_pair('+', 40), std::make_pair('-', 40), std::make_pair('*', 50), std::make_pair('/', 50), std::make_pair('^', 60), }; auto it = precedence.find(token); return it != precedence.end() ? it->second : -1; } private: static const char* unaryOpName( int op ) { switch (op) { case '+': return "copy"; case '-': return "neg"; case '!': return "not"; default: return nullptr; } } static const char* binaryOpName( int op ) { switch (op) { case '+': return "add"; case '-': return "sub"; case '*': return "mul"; case '/': return "div"; case '^': return "pow"; case '<': return "lt"; case '>': return "gt"; case Lexer::Le: return "le"; case Lexer::Ge: return "ge"; case Lexer::Eq: return "eq"; case Lexer::Ne: return "ne"; case Lexer::And: return "and"; case Lexer::Or: return "or"; default: return nullptr; } } static Dictionary> makeUnaryOpArgs( const Shared& right ) { const Dictionary> args = { { "x", right }, }; return args; } static Dictionary> makeBinaryOpArgs( const Shared left, const Shared right ) { const Dictionary> args = { { "x", left }, { "y", right }, }; return args; } private: static bool checkGraphParamType( const Value& value, const Type* type ) { switch ( value.kind() ) { case Value::Integer: { return type == primitiveType(Typename::Integer); } case Value::Scalar: { return type == primitiveType(Typename::Scalar); } case Value::Logical: { return type == primitiveType(Typename::Logical); } case Value::String: { return type == primitiveType(Typename::String); } case Value::Identifier: { return type == tensorType(); } case Value::Array: { if ( type->kind() != Type::Array ) { return false; } auto arrayType = static_cast(type); for ( size_t i = 0; i < value.size(); ++i ) { if ( !checkGraphParamType(value[i], arrayType->itemType()) ) { return false; } } return true; } case Value::Tuple: { if ( type->kind() != Type::Tuple ) { return false; } auto tupleType = static_cast(type); for ( size_t i = 0; i < value.size(); ++i ) { if ( !checkGraphParamType(value[i], tupleType->itemType(i)) ) { return false; } } return true; } case Value::None: { return false; } } } private: const std::string _stdlib_source; const std::set& _lowered; size_t _flags; }; } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/comp/evaluation.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_EVALUATION_H_ #define _NNEF_EVALUATION_H_ #include "../common/dictionary.h" #include "../common/error.h" #include "../common/value.h" #include "../common/parser.h" #include "expression.h" #include "fragment.h" #include #include #include namespace nnef { class Evaluation { typedef Dictionary Fragments; typedef Parser::Callback Callback; public: Evaluation( const std::vector& assignments, const Fragments& fragments, const std::set& lowered ) : _fragments(fragments), _lowered(lowered) { for ( auto& assignment : assignments ) { addReservedIdentifiers(assignment.lhs()); } } public: static Value evaluateLvalue( const Expr& expr, const Dictionary& values, bool fallbackToIds ) { switch ( expr.kind() ) { case Expr::Identifier: { auto& identifier = static_cast(expr); auto it = values.find(identifier.name()); return it != values.end() ? it->second : (fallbackToIds ? Value::identifier(identifier.name()) : Value::identifier("")); } case Expr::Array: { auto& array = static_cast(expr); Value::items_t items(array.size()); for ( size_t i = 0; i < array.size(); ++i ) { items[i] = evaluateLvalue(array.item(i), values, fallbackToIds); } return Value::array(items); } case Expr::Tuple: { auto& tuple = static_cast(expr); Value::items_t items(tuple.size()); for ( size_t i = 0; i < tuple.size(); ++i ) { items[i] = evaluateLvalue(tuple.item(i), values, fallbackToIds); } return Value::tuple(items); } default: { assert(false); return Value::none(); } } } static Value evaluateRvalue( const Expr& expr ) { switch ( expr.kind() ) { case Expr::Literal: { return evaluateLiteral(expr); } case Expr::Array: case Expr::Tuple: { auto& sequence = static_cast(expr); Value::items_t items(sequence.size()); for ( size_t i = 0; i < sequence.size(); ++i ) { items[i] = evaluateRvalue(sequence.item(i)); } return expr.kind() == Expr::Array ? Value::array(items) : Value::tuple(items); } case Expr::Unary: { auto& unary = static_cast(expr); if ( unary.op() == '-' ) { auto arg = evaluateRvalue(unary.right()); if ( arg.kind() == Value::Integer ) { return Value::integer(-arg.integer()); } else if ( arg.kind() == Value::Scalar ) { return Value::scalar(-arg.scalar()); } } } default: { assert(false); return Value::none(); } } } void evaluateAssign( const Expr& lhs, const Expr& rhs, Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype, const Value& context ) { auto value = evaluate(rhs, values, dtypes, callback, dtype, context); assign(lhs, value, values, dtypes, callback); } private: Value evaluate( const Expr& expr, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype, const Value& context = Value::none() ) { switch ( expr.kind() ) { case Expr::Literal: { return evaluateLiteral(expr); } case Expr::Identifier: { return evaluate(static_cast(expr), values); } case Expr::Array: { return evaluate(static_cast(expr), values, dtypes, callback, dtype, context); } case Expr::Tuple: { return evaluate(static_cast(expr), values, dtypes, callback, dtype, context); } case Expr::Subscript: { return evaluate(static_cast(expr), values, dtypes, callback, dtype); } case Expr::Unary: { return evaluate(static_cast(expr), values, dtypes, callback, dtype); } case Expr::Binary: { return evaluate(static_cast(expr), values, dtypes, callback, dtype); } case Expr::Select: { return evaluate(static_cast(expr), values, dtypes, callback, dtype, context); } case Expr::Comprehension: { return evaluate(static_cast(expr), values, dtypes, callback, dtype, context); } case Expr::Builtin: { return evaluate(static_cast(expr), values, dtypes, callback, dtype); } case Expr::Invocation: { return evaluate(static_cast(expr), values, dtypes, callback, dtype, context); } default: { assert(false); return Value::none(); } } } static Value evaluateLiteral( const Expr& expr ) { auto type = static_cast(*expr.type()); switch ( type.name() ) { case Typename::Integer: { return evaluate(static_cast(expr)); } case Typename::Scalar: { return evaluate(static_cast(expr)); } case Typename::Logical: { return evaluate(static_cast(expr)); } case Typename::String: { return evaluate(static_cast(expr)); } default: { assert(false); return Value::none(); } } } static Value evaluate( const ScalarExpr& scalar ) { return Value::scalar(scalar.value()); } static Value evaluate( const IntegerExpr& integer ) { return Value::integer(integer.value()); } static Value evaluate( const LogicalExpr& logical ) { return Value::logical(logical.value()); } static Value evaluate( const StringExpr& string ) { return Value::string(string.value()); } static Value evaluate( const IdentifierExpr& identifier, const Dictionary& values ) { if ( !values.count(identifier.name()) ) { throw Error(identifier.position(), "undefined identifier '%s'", identifier.name().c_str()); } return values.at(identifier.name()); } Value evaluate( const ArrayExpr& array, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype, const Value& context ) { Value::items_t items(array.size()); for ( size_t i = 0; i < array.size(); ++i ) { auto ctx = context.kind() == Value::Array ? context[i] : Value::none(); items[i] = evaluate(array.item(i), values, dtypes, callback, dtype, ctx); } return Value::array(items); } Value evaluate( const TupleExpr& tuple, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype, const Value& context ) { Value::items_t items(tuple.size()); for ( size_t i = 0; i < tuple.size(); ++i ) { auto ctx = context.kind() == Value::Tuple ? context[i] : Value::none(); items[i] = evaluate(tuple.item(i), values, dtypes, callback, dtype, ctx); } return Value::tuple(items); } Value evaluate( const SubscriptExpr& subscript, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype ) { Value sequence = evaluate(subscript.sequence(), values, dtypes, callback, dtype); if ( subscript.isRange() ) { Value::integer_t i = subscript.begin() ? evaluate(*subscript.begin(), values, dtypes, callback, dtype).integer() : (Value::integer_t)0; if ( i < 0 ) { i += (Value::integer_t)sequence.size(); } if ( i < 0 || i > (Value::integer_t)sequence.size() ) { throw Error(subscript.position(), "range begin (%d) out of bounds (size = %d)", (int)i, (int)sequence.size()); } Value::integer_t j = subscript.end() ? evaluate(*subscript.end(), values, dtypes, callback, dtype).integer() : (Value::integer_t)sequence.size(); if ( j < 0 ) { j += (Value::integer_t)sequence.size(); } if ( j < 0 || j > (Value::integer_t)sequence.size() ) { throw Error(subscript.position(), "range end (%d) out of bounds (size = %d)", (int)j, (int)sequence.size()); } if ( j < i ) { throw Error(subscript.position(), "invalid range: %d:%d", (int)i, (int)j); } if ( sequence.kind() == Value::String ) { return Value::string(sequence.string().substr(i,j-i)); } else { auto it = sequence.items().begin(); Value::items_t items(it + i, it + j); return Value::array(items); } } else { Value::integer_t index = evaluate(*subscript.begin(), values, dtypes, callback, dtype).integer(); if ( index < 0 ) { index += (Value::integer_t)sequence.size(); } if ( index < 0 || index >= (Value::integer_t)sequence.size() ) { throw Error(subscript.position(), "index (%d) out of bounds (size = %d)", (int)index, (int)sequence.size()); } if ( sequence.kind() == Value::String ) { return Value::string(sequence.string().substr(index,1)); } else { return sequence[index]; } } } Value evaluate( const UnaryExpr& unary, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype ) { Value right = evaluate(unary.right(), values, dtypes, callback, dtype); if ( unary.op() == '!' ) { if ( right.kind() == Value::Logical ) { return Value::logical(!right.logical()); } } else if ( unary.op() == '-' ) { if ( right.kind() == Value::Integer ) { return Value::integer(-right.integer()); } else if ( right.kind() == Value::Scalar ) { return Value::scalar(-right.scalar()); } } else if ( unary.op() == '+' ) { return right; } assert(false); return Value::none(); } Value evaluate( const BinaryExpr& binary, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype ) { bool lazy = binary.op() == Lexer::And || binary.op() == Lexer::Or; Value left = evaluate(binary.left(), values, dtypes, callback, dtype); Value right = lazy ? Value::none() : evaluate(binary.right(), values, dtypes, callback, dtype); switch ( binary.op() ) { case '+': { if ( left.kind() == Value::String && right.kind() == Value::String ) { return Value::string(left.string() + right.string()); } else if ( left.kind() == Value::Array && right.kind() == Value::Array ) { Value::items_t items = left.array(); items.insert(items.end(), right.array().begin(), right.array().end()); return Value::array(items); } else { return evaluateBinary(left, right); } } case '*': { if ( left.kind() == Value::String && right.kind() == Value::Integer ) { Value::string_t str; for ( size_t i = 0; i < (size_t)right.integer(); ++i ) { str += left.string(); } return Value::string(str); } else if ( left.kind() == Value::Array && right.kind() == Value::Integer ) { Value::items_t items; for ( size_t i = 0; i < (size_t)right.integer(); ++i ) { items.insert(items.end(), left.array().begin(), left.array().end()); } return Value::array(items); } else { return evaluateBinary(left, right); } } case '-': { return evaluateBinary(left, right); } case '/': { return evaluateBinary(left, right); } case '^': { return evaluateBinary(left, right); } case '<': { return evaluateBinary(left, right); } case '>': { return evaluateBinary(left, right); } case Lexer::Le: { return evaluateBinary(left, right); } case Lexer::Ge: { return evaluateBinary(left, right); } case Lexer::Eq: { return evaluateBinary(left, right); } case Lexer::Ne: { return evaluateBinary(left, right); } case Lexer::And: { return !left.logical() ? left : evaluate(binary.right(), values, dtypes, callback, dtype); } case Lexer::Or: { return left.logical() ? left : evaluate(binary.right(), values, dtypes, callback, dtype); } case Lexer::In: { auto& items = right.array(); bool contains = std::find(items.begin(), items.end(), left) != items.end(); return Value::logical(contains); } default: { break; } } assert(false); return Value::none(); } Value evaluate( const SelectExpr& select, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype, const Value& context ) { Value condition = evaluate(select.condition(), values, dtypes, callback, dtype); return condition.logical() ? evaluate(select.trueValue(), values, dtypes, callback, dtype, context) : evaluate(select.falseValue(), values, dtypes, callback, dtype, context); } Value evaluate( const ComprehensionExpr& comprehension, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype, const Value& context ) { std::vector iterables; for ( size_t i = 0; i < comprehension.iteratorCount(); ++i ) { auto iterable = evaluate(comprehension.iterable(i), values, dtypes, callback, dtype); iterables.push_back(iterable); } const size_t length = iterables.front().size(); for ( size_t i = 1; i < iterables.size(); ++i ) { if ( iterables[i].size() != length ) { throw Error(comprehension.position(), "iterables must have the same length in array comprehension"); } } Value::items_t items; Dictionary ids = values; for ( size_t i = 0; i < length; ++i ) { for ( size_t k = 0; k < iterables.size(); ++k ) { assign(comprehension.iterator(k), iterables[k][i], ids, dtypes, callback); } bool accept = comprehension.condition() ? evaluate(*comprehension.condition(), ids, dtypes, callback, dtype).logical() : true; if ( accept ) { auto ctx = context.kind() == Value::Array && items.size() < context.size() ? context[items.size()] : Value::none(); auto item = evaluate(comprehension.item(), ids, dtypes, callback, dtype, ctx); items.push_back(item); } for ( size_t k = 0; k < iterables.size(); ++k ) { unassign(comprehension.iterator(k), ids); } } return Value::array(items); } Value evaluate( const InvocationExpr& invocation, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype, const Value& context ) { auto& fragment = _fragments.at(invocation.target()); auto& proto = fragment.prototype(); Dictionary ids; for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); auto arg = invocation.arg(param.name()); ids[param.name()] = arg ? evaluate(*arg, values, dtypes, callback, dtype) : param.defaultValue(); } const PrimitiveType* dataType = invocation.dataType() == primitiveType(Typename::Generic) ? dtype : invocation.dataType(); if ( dataType ) { ids["?"] = Value::string(dataType->toString()); } if ( !invocation.type()->isAttribute() ) { if ( context ) { checkStructure(context, invocation.type(), invocation.position()); } auto& resultType = static_cast(*invocation.type()); if ( proto.resultCount() == 1 ) { ids[proto.result(0).name()] = getResultValue(context, resultType, proto.name()); } else { assert(context.kind() == Value::Tuple); for ( size_t i = 0; i < proto.resultCount(); ++i ) { ids[proto.result(i).name()] = getResultValue(context[i], *resultType.itemType(i), proto.name()); } } } bool lower = fragment.assignmentCount() && _lowered.count(proto.name()); if ( lower ) { for ( size_t i = 0; i < fragment.assignmentCount(); ++i ) { auto& assignment = fragment.assignment(i); const Value ctx = evaluateLvalue(assignment.lhs(), ids, false); try { evaluateAssign(assignment.lhs(), assignment.rhs(), ids, dtypes, callback, dataType, ctx); } catch ( const Error& e ) { throw Error(chain(e.position(), invocation.position()), e.what()); } } } Value value; if ( proto.resultCount() == 1 ) { value = ids[proto.result(0).name()]; } else { Value::items_t items(proto.resultCount()); for ( size_t i = 0; i < proto.resultCount(); ++i ) { items[i] = ids[proto.result(i).name()]; } value = Value::tuple(items); } if ( hasNone(value) ) { throw Error(invocation.position(), "could not evaluate invocation (possibly unknown result array length)"); } if ( !lower ) { declare(value, invocation.type(), dtypes, dtype); callback.operation(proto, ids, dtypes); } return value; } Value evaluate( const BuiltinExpr& builtin, const Dictionary& values, Dictionary& dtypes, Callback& callback, const PrimitiveType* dtype ) { Value arg = evaluate(builtin.arg(), values, dtypes, callback, dtype); switch ( builtin.op() ) { case Lexer::LengthOf: { auto length = arg.kind() == Value::String ? arg.string().length() : arg.array().size(); return Value::integer((Value::integer_t)length); } case Lexer::RangeOf: { auto length = arg.kind() == Value::String ? arg.string().length() : arg.array().size(); Value::items_t items(length); for ( size_t i = 0; i < length; ++i ) { items[i] = Value::integer((Value::integer_t)i); } return Value::array(items); } case Lexer::ShapeOf: { throw Error(builtin.position(), "the use of operator 'shape_of' is deprecated and is not supported"); } case Lexer::Integer: { if ( arg.kind() == Value::Integer ) { return arg; } else if ( arg.kind() == Value::Scalar ) { return Value::integer((Value::integer_t)arg.scalar()); } else if ( arg.kind() == Value::Logical ) { return Value::integer((Value::integer_t)arg.logical()); } else if ( arg.kind() == Value::String ) { char* end; const char* str = arg.string().c_str(); auto value = (Value::integer_t)std::strtol(str, &end, 10); if ( end == str ) { throw Error(builtin.position(), "cannot convert string '%s' to integer", str); } return Value::integer(value); } break; } case Lexer::Scalar: { if ( arg.kind() == Value::Scalar ) { return arg; } else if ( arg.kind() == Value::Integer ) { return Value::scalar((Value::scalar_t)arg.integer()); } else if ( arg.kind() == Value::Logical ) { return Value::scalar((Value::scalar_t)arg.logical()); } else if ( arg.kind() == Value::String ) { char* end; const char* str = arg.string().c_str(); auto value = (Value::scalar_t)std::strtof(str, &end); if ( end == str ) { throw Error(builtin.position(), "cannot convert string '%s' to scalar", str); } return Value::scalar(value); } break; } case Lexer::Logical: { if ( arg.kind() == Value::Logical ) { return arg; } else if ( arg.kind() == Value::Integer ) { return Value::logical(arg.integer() != 0); } else if ( arg.kind() == Value::Scalar ) { return Value::logical(arg.scalar() != 0); } else if ( arg.kind() == Value::String ) { return Value::logical(!arg.string().empty()); } break; } case Lexer::String: { if ( arg.kind() == Value::Logical ) { return Value::string(std::to_string(arg.logical())); } else if ( arg.kind() == Value::Integer ) { return Value::string(std::to_string(arg.integer())); } else if ( arg.kind() == Value::Scalar ) { return Value::string(std::to_string(arg.scalar())); } else if ( arg.kind() == Value::String ) { return arg; } break; } default: { break; } } assert(false); return Value::none(); } private: template class Op> static Value evaluateBinary( const Value& left, const Value& right ) { if ( left.kind() == Value::Integer && right.kind() == Value::Integer ) { Op op; return Value::make(op(left.integer(), right.integer())); } else if ( left.kind() == Value::Scalar && right.kind() == Value::Scalar ) { Op op; return Value::make(op(left.scalar(), right.scalar())); } assert(false); return Value::none(); } static Typename dtypeOf( const Value& value, const Dictionary& dtypes ) { switch ( value.kind() ) { case Value::Scalar: return Typename::Scalar; case Value::Integer: return Typename::Integer; case Value::Logical: return Typename::Logical; case Value::String: return Typename::String; case Value::Identifier: return dtypes.at(value.identifier()); default: assert(false); return Typename::Generic; } } void insertCopy( const Value& lvalue, const Value& rvalue, Dictionary& dtypes, Callback& callback ) { const Typename dtype = dtypeOf(rvalue, dtypes); const Value dvalue = Value::string(toString(dtype)); const Prototype& proto = _fragments.at("copy").prototype(); const Dictionary args = { std::make_pair("x", rvalue), std::make_pair("y", lvalue), std::make_pair("?", dvalue) }; dtypes[lvalue.identifier()] = dtype; callback.operation(proto, args, dtypes); } void assign( const Expr& lhs, const Value& rvalue, Dictionary& ids, Dictionary& dtypes, Callback& callback ) { switch ( lhs.kind() ) { case Expr::Array: { auto& array = static_cast(lhs); if ( array.size() != rvalue.size() ) { throw Error(lhs.position(), "cannot assign array of length %d to array of length %d", (int)rvalue.size(), (int)array.size()); } for ( size_t i = 0; i < array.size(); ++i ) { assign(array.item(i), rvalue[i], ids, dtypes, callback); } break; } case Expr::Tuple: { auto& tuple = static_cast(lhs); assert(tuple.size() == rvalue.size()); for ( size_t i = 0; i < tuple.size(); ++i ) { assign(tuple.item(i), rvalue[i], ids, dtypes, callback); } break; } case Expr::Identifier: { auto& identifier = static_cast(lhs); auto& lvalue = ids[identifier.name()]; if ( lvalue ) { if ( lvalue != rvalue ) { if ( lvalue.kind() == Value::Array || lvalue.kind() == Value::Tuple ) { if ( lvalue.kind() == Value::Array && lvalue.size() != rvalue.size() ) { throw Error(lhs.position(), "cannot assign array of length %d to array of length %d", (int)rvalue.size(), (int)lvalue.size()); } for ( size_t i = 0; i < lvalue.size(); ++i ) { insertCopy(lvalue[i], rvalue[i], dtypes, callback); } } else { assert(lvalue.kind() == Value::Identifier); insertCopy(lvalue, rvalue, dtypes, callback); } } } else { lvalue = rvalue; } break; } default: { assert(false); break; } } } void unassign( const Expr& lhs, Dictionary& ids ) { switch ( lhs.kind() ) { case Expr::Array: case Expr::Tuple: { auto& items = static_cast(lhs); for ( size_t i = 0; i < items.size(); ++i ) { unassign(items.item(i), ids); } break; } case Expr::Identifier: { auto& identifier = static_cast(lhs); ids.erase(identifier.name()); break; } default: { assert(false); break; } } } static void declare( const Value& arg, const Type* type, Dictionary& dtypes, const PrimitiveType* dtype ) { switch ( arg.kind() ) { case Value::Identifier: { assert(type->kind() == Type::Tensor); const std::string& id = arg.identifier(); auto tensorType = static_cast(type); assert(tensorType->dataType()->kind() == Type::Primitive); auto dataType = static_cast(tensorType->dataType()); auto name = dataType->name() == Typename::Generic ? dtype->name() : dataType->name(); assert(!dtypes.count(id) || dtypes.at(id) == name); dtypes.emplace(id, name); break; } case Value::Array: { assert(type->kind() == Type::Array); auto arrayType = static_cast(type); for ( size_t i = 0; i < arg.size(); ++i ) { declare(arg[i], arrayType->itemType(), dtypes, dtype); } break; } case Value::Tuple: { assert(type->kind() == Type::Tuple); auto tupleType = static_cast(type); for ( size_t i = 0; i < arg.size(); ++i ) { declare(arg[i], tupleType->itemType(i), dtypes, dtype); } break; } default: { break; } } } static void checkStructure( const Value& value, const Type* type, const Error::Position& position ) { switch ( type->kind() ) { case Type::Primitive: case Type::Tensor: { if ( value.kind() != Value::Identifier ) { throw Error(position, "invocation context mismatch: expected identifier on left hand side to match type '%s'", type->toString().c_str()); } break; } case Type::Array: { if ( value.kind() == Value::Identifier || value.kind() == Value::None ) { break; } if ( value.kind() != Value::Array ) { throw Error(position, "invocation context mismatch: expected array on left hand side to match type '%s'", type->toString().c_str()); } auto& array = static_cast(*type); for ( size_t i = 0; i < value.size(); ++i ) { checkStructure(value[i], array.itemType(), position); } break; } case Type::Tuple: { if ( value.kind() != Value::Tuple ) { throw Error(position, "invocation context mismatch: expected tuple on left hand side to match type '%s'", type->toString().c_str()); } auto& tuple = static_cast(*type); for ( size_t i = 0; i < value.size(); ++i ) { checkStructure(value[i], tuple.itemType(i), position); } break; } } } private: typedef Error::Position Position; Position chain( const Position& position, const Position& origin ) { const Position chained = { position.line, position.column, position.filename, &origin }; return chained; } private: std::string nextTensorId( const std::string& op ) { return op + std::to_string(++_tensorCounts[op]); } std::string makeTensorId( const std::string& op ) { std::string id; do { id = nextTensorId(op); } while ( isReservedId(id) ); _reservedIds.insert(id); return id; } Value makeResultValue( const std::string& op, size_t idx ) { auto id = makeTensorId(op); return Value::identifier(idx ? indexedId(id, idx) : id); } Value getResultValue( const Value& context, const Type& type, const std::string op, size_t idx = 0 ) { if ( !context ) { if ( type.kind() == Type::Array ) { return Value::none(); } return makeResultValue(op, idx); } else if ( context.kind() == Value::Identifier ) { if ( type.kind() == Type::Array ) { return Value::none(); } return context.identifier() != "" ? context : makeResultValue(op, idx); } else if ( context.kind() == Value::Array ) { std::vector results(context.size()); auto& arrayType = static_cast(type); for ( size_t i = 0; i < context.size(); ++i ) { results[i] = getResultValue(context[i], *arrayType.itemType(), op, i + 1); } return Value::array(results); } else if ( context.kind() == Value::Tuple ) { std::vector results(context.size()); auto& tupleType = static_cast(type); for ( size_t i = 0; i < context.size(); ++i ) { results[i] = getResultValue(context[i], *tupleType.itemType(i), op); } return Value::array(results); } else { assert(false); return Value(); } } bool hasNone( const Value& value ) { switch ( value.kind() ) { case Value::None: { return true; } case Value::Tuple: case Value::Array: { for ( size_t i = 0; i < value.size(); ++i ) { if ( hasNone(value[i]) ) { return true; } } return false; } default: { return false; } } } void addReservedIdentifiers( const Expr& expr ) { switch ( expr.kind() ) { case Expr::Identifier: { auto& identifier = static_cast(expr); _reservedIds.insert(identifier.name()); break; } case Expr::Array: case Expr::Tuple: { auto& items = static_cast(expr); for ( size_t i = 0; i < items.size(); ++i ) { addReservedIdentifiers(items.item(i)); } break; } default: { assert(false); break; } } } bool isReservedId( const std::string& id ) { return _reservedIds.find(id) != _reservedIds.end(); } bool isReservedId( const std::string& id, const size_t size ) { for ( size_t i = 0; i < size; ++i ) { if ( isReservedId(indexedId(id,i+1)) ) { return true; } } return false; } std::string indexedId( const std::string& id, const size_t idx ) { return id + "_" + std::to_string(idx); } private: template struct power { T operator()( const T& left, const T& right ) { return (T)std::pow(left, right); } }; private: const Fragments& _fragments; const std::set& _lowered; Dictionary _tensorCounts; std::set _reservedIds; }; } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/comp/expression.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_EXPRESSION_H_ #define _NNEF_EXPRESSION_H_ #include "../common/dictionary.h" #include "../common/typespec.h" #include "../common/lexer.h" #include #include #include #include #include #include namespace nnef { template using Shared = std::shared_ptr; class Expr { public: typedef Error::Position Position; enum Kind { Literal, Identifier, Array, Tuple, Subscript, Comprehension, Unary, Binary, Select, Invocation, Builtin }; public: Expr( const Position& position ) : _position(position) { } const Position& position() const { return _position; } virtual ~Expr() {} virtual Kind kind() const = 0; virtual const Type* type() const = 0; virtual void print( std::ostream& os ) const = 0; private: Position _position; }; inline std::ostream& operator<<( std::ostream& os, const Expr& expr ) { expr.print(os); return os; } template class LiteralExpr : public Expr { public: typedef V value_type; public: LiteralExpr( const Position& position, const value_type& value, const Type* type ) : Expr(position), _value(value), _type(type) { } const value_type& value() const { return _value; } virtual Kind kind() const { return Literal; } virtual const Type* type() const { return _type; } virtual void print( std::ostream& os ) const { print(os, _value); } private: template static void print( std::ostream& os, const S& value ) { os << std::boolalpha << value; } static void print( std::ostream& os, const std::string& value ) { os << '\'' << value << '\''; } protected: value_type _value; const Type* _type; }; typedef LiteralExpr ScalarExpr; typedef LiteralExpr IntegerExpr; typedef LiteralExpr LogicalExpr; typedef LiteralExpr StringExpr; class IdentifierExpr : public Expr { public: IdentifierExpr( const Position& position, const std::string& name, const Type* type ) : Expr(position), _name(name), _type(type) { } const std::string& name() const { return _name; } virtual Kind kind() const { return Identifier; } virtual const Type* type() const { return _type; } virtual void print( std::ostream& os ) const { os << _name; } private: std::string _name; const Type* _type; }; class SubscriptExpr : public Expr { public: SubscriptExpr( const Position& position, const Shared& sequence, const Shared& begin, const Shared& end, const Type* type ) : Expr(position), _sequence(sequence), _begin(begin), _end(end), _type(type) { } virtual bool isRange() const { return _begin != _end || !_begin; } const Expr& sequence() const { return *_sequence; } const Expr* begin() const { return _begin.get(); } const Expr* end() const { return _end.get(); } virtual Kind kind() const { return Subscript; } virtual const Type* type() const { return _type; } virtual void print( std::ostream& os ) const { _sequence->print(os); os << '['; if ( _begin ) { _begin->print(os); } if ( isRange() ) { os << ':'; } if ( _end ) { _begin->print(os); } os << ']'; } private: const Shared _sequence; const Shared _begin; const Shared _end; const Type* _type; }; class ItemExpr : public Expr { public: ItemExpr( const Position& position, const Type* type ) : Expr(position), _type(type) { } ItemExpr( const Position& position, std::vector>& items, const Type* type ) : Expr(position), _items(std::move(items)), _type(type) { } size_t size() const { return _items.size(); } const Expr& item( const size_t i ) const { return *_items[i]; } virtual const Type* type() const { return _type; } protected: std::vector> _items; const Type* _type; }; class ArrayExpr : public ItemExpr { public: ArrayExpr( const Position& position, const Type* type ) : ItemExpr(position, type) { } ArrayExpr( const Position& position, std::vector>& items, const Type* type ) : ItemExpr(position, items, type) { } virtual Kind kind() const { return Array; } virtual void print( std::ostream& os ) const { os << '['; for ( size_t i = 0; i < _items.size(); ++i ) { if ( i ) { os << ','; } _items[i]->print(os); } os << ']'; } }; class TupleExpr : public ItemExpr { public: TupleExpr( const Position& position, const Type* type ) : ItemExpr(position, type) { } TupleExpr( const Position& position, std::vector>& items, const Type* type ) : ItemExpr(position, items, type) { } virtual Kind kind() const { return Tuple; } virtual void print( std::ostream& os ) const { os << '('; for ( size_t i = 0; i < _items.size(); ++i ) { if ( i ) { os << ','; } _items[i]->print(os); } os << ')'; } }; class ComprehensionExpr : public Expr { public: ComprehensionExpr( const Position& position, std::vector>& iterators, std::vector>& iterables, const Shared& condition, const Shared& item, const Type* type ) : Expr(position), _iterators(std::move(iterators)), _iterables(std::move(iterables)), _condition(condition), _item(item), _type(type) { } size_t iteratorCount() const { return _iterators.size(); } const Expr& iterator( const size_t i ) const { return *_iterators[i]; } const Expr& iterable( const size_t i ) const { return *_iterables[i]; } const Expr* condition() const { return _condition.get(); } const Expr& item() const { return *_item; } virtual Kind kind() const { return Comprehension; } virtual const Type* type() const { return _type; } virtual void print( std::ostream& os ) const { os << '['; os << "for "; for ( size_t i = 0; i < _iterators.size(); ++i ) { if ( i ) { os << ", "; } _iterators[i]->print(os); os << " in "; _iterables[i]->print(os); } if ( _condition ) { os << " if "; _condition->print(os); } os << " yield "; _item->print(os); os << ']'; } private: const std::vector> _iterators; const std::vector> _iterables; const Shared _condition; const Shared _item; const Type* _type; }; class UnaryExpr : public Expr { public: UnaryExpr( const Position& position, const Shared& right, int op, const Type* type ) : Expr(position), _right(right), _type(type), _op(op) { } const Expr& right() const { return *_right; } int op() const { return _op; } virtual Kind kind() const { return Unary; } virtual const Type* type() const { return _type; } virtual void print( std::ostream& os ) const { const std::string str = Lexer::tokenString(_op); os << str; if ( str.length() > 1 ) { os << '('; } _right->print(os); if ( str.length() > 1 ) { os << ')'; } } private: const Shared _right; const Type* _type; int _op; }; class BinaryExpr : public Expr { public: BinaryExpr( const Position& position, const Shared& left, const Shared& right, int op, const Type* type ) : Expr(position), _left(left), _right(right), _type(type), _op(op) { } const Expr& left() const { return *_left; } const Expr& right() const { return *_right; } int op() const { return _op; } virtual Kind kind() const { return Binary; } virtual const Type* type() const { return _type; } virtual void print( std::ostream& os ) const { if ( _left->kind() == Binary ) { os << '('; } _left->print(os); if ( _left->kind() == Binary ) { os << ')'; } os << ' ' << Lexer::tokenString(_op) << ' '; if ( _right->kind() == Binary ) { os << '('; } _right->print(os); if ( _right->kind() == Binary ) { os << ')'; } } private: const Shared _left; const Shared _right; const Type* _type; int _op; }; class BuiltinExpr : public Expr { public: BuiltinExpr( const Position& position, const Shared& arg, int op, const Type* type ) : Expr(position), _arg(arg), _type(type), _op(op) { } const Expr& arg() const { return *_arg; } int op() const { return _op; } virtual Kind kind() const { return Builtin; } virtual const Type* type() const { return _type; } virtual void print( std::ostream& os ) const { os << Lexer::tokenString(_op) << '('; _arg->print(os); os << ')'; } private: const Shared _arg; const Type* _type; int _op; }; class SelectExpr : public Expr { public: SelectExpr( const Position& position, const Shared& condition, const Shared& trueValue, const Shared& falseValue, const Type* type ) : Expr(position), _cond(condition), _true(trueValue), _false(falseValue), _type(type) { } const Expr& condition() const { return *_cond; } const Expr& trueValue() const { return *_true; } const Expr& falseValue() const { return *_false; } virtual Kind kind() const { return Select; } virtual const Type* type() const { return _type; } virtual void print( std::ostream& os ) const { _true->print(os); os << " if "; _cond->print(os); os << " else "; _false->print(os); } private: const Shared _cond; const Shared _true; const Shared _false; const Type* _type; }; class InvocationExpr : public Expr { public: typedef Dictionary>::const_iterator const_iterator; public: InvocationExpr( const Position& position, const std::string& target, Dictionary>& args, const Type* type, const PrimitiveType* dataType = nullptr ) : Expr(position), _target(target), _dataType(dataType), _args(std::move(args)), _type(type) { } const std::string& target() const { return _target; } const PrimitiveType* dataType() const { return _dataType; } const Expr* arg( const std::string& name ) const { auto it = _args.find(name); return it != _args.end() ? it->second.get() : nullptr; } const_iterator begin() const { return _args.begin(); } const_iterator end() const { return _args.end(); } virtual Kind kind() const { return Kind::Invocation; } virtual const Type* type() const { return _type; } virtual void print( std::ostream& os ) const { os << _target; if ( _dataType ) { os << '<' << _dataType->toString() << '>'; } os << '('; for ( auto it = _args.begin(); it != _args.end(); ++it ) { if ( it != _args.begin() ) { os << ", "; } os << it->first << " = " << *it->second; } os << ')'; } private: std::string _target; const PrimitiveType* _dataType; Dictionary> _args; const Type* _type; }; } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/comp/fragment.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_FRAGMENT_H_ #define _NNEF_FRAGMENT_H_ #include "../common/prototype.h" #include "expression.h" #include namespace nnef { class Assignment { public: Assignment( const Shared& lhs, const Shared& rhs ) : _lhs(lhs), _rhs(rhs) { } const Expr& lhs() const { return *_lhs; } const Expr& rhs() const { return *_rhs; } private: const Shared _lhs; const Shared _rhs; }; class Fragment { public: Fragment( const Prototype& prototype ) : _prototype(prototype) { } Fragment( const Prototype& prototype, std::vector& assignments ) : _prototype(prototype), _assignments(std::move(assignments)) { } const Prototype& prototype() const { return _prototype; } size_t assignmentCount() const { return _assignments.size(); } const Assignment& assignment( const size_t i ) const { return _assignments[i]; } private: const Prototype& _prototype; const std::vector _assignments; }; inline std::ostream& operator<<( std::ostream& os, const Assignment& assignment ) { os << assignment.lhs() << " = " << assignment.rhs(); return os; } inline std::ostream& operator<<( std::ostream& os, const Fragment& fragment ) { os << fragment.prototype() << std::endl; if ( fragment.assignmentCount() ) { os << '{' << std::endl; for ( size_t i = 0; i < fragment.assignmentCount(); ++i ) { os << '\t' << fragment.assignment(i) << std::endl; } os << '}' << std::endl; } return os; } } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/comp/stdlib_source.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_STDLIB_SOURCE_H_ #define _NNEF_STDLIB_SOURCE_H_ namespace nnef { template struct _stdlib_source { static const char* text; }; template const char* _stdlib_source::text = R"STDLIB( # tensor declaration operations fragment external( shape: integer[] ) -> ( output: tensor ); fragment variable( shape: integer[], label: string ) -> ( output: tensor ); fragment constant( shape: integer[], value: ?[] ) -> ( output: tensor ); fragment update( variable: tensor, value: tensor ) -> ( result: tensor ); # tensor shape operations fragment reshape( input: tensor, shape: integer[], axis_start: integer = 0, axis_count: integer = -1 ) -> ( output: tensor ); fragment transpose( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment concat( values: tensor[], axis: integer ) -> ( value: tensor ); fragment split( value: tensor, axis: integer, ratios: integer[] ) -> ( values: tensor[] ); fragment slice( input: tensor, axes: integer[], begin: integer[], end: integer[], stride: integer[] = [] ) -> ( output: tensor ); fragment squeeze( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment unsqueeze( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment stack( values: tensor[], axis: integer ) -> ( value: tensor ); fragment unstack( value: tensor, axis: integer ) -> ( values: tensor[] ); fragment tile( input: tensor, repeats: integer[] ) -> ( output: tensor ); fragment pad( input: tensor, padding: (integer, integer)[], border: string = 'constant', value: scalar = 0.0 ) -> ( output: tensor ); fragment gather( input: tensor, indices: tensor, axis: integer = 0 ) -> ( output: tensor ); fragment cast( input: tensor<> ) -> ( output: tensor ); # element-wise arithmetic operations fragment add( x: tensor, y: tensor ) -> ( z: tensor ); fragment sub( x: tensor, y: tensor ) -> ( z: tensor ); fragment mul( x: tensor, y: tensor ) -> ( z: tensor ); fragment div( x: tensor, y: tensor ) -> ( z: tensor ); fragment pow( x: tensor, y: tensor ) -> ( z: tensor ); fragment exp( x: tensor ) -> ( y: tensor ); fragment log( x: tensor ) -> ( y: tensor ); fragment sin( x: tensor ) -> ( y: tensor ); fragment cos( x: tensor ) -> ( y: tensor ); fragment tan( x: tensor ) -> ( y: tensor ); fragment sinh( x: tensor ) -> ( y: tensor ); fragment cosh( x: tensor ) -> ( y: tensor ); fragment tanh( x: tensor ) -> ( y: tensor ); fragment asin( x: tensor ) -> ( y: tensor ); fragment acos( x: tensor ) -> ( y: tensor ); fragment atan( x: tensor ) -> ( y: tensor ); fragment asinh( x: tensor ) -> ( y: tensor ); fragment acosh( x: tensor ) -> ( y: tensor ); fragment atanh( x: tensor ) -> ( y: tensor ); fragment abs( x: tensor ) -> ( y: tensor ); fragment sign( x: tensor ) -> ( y: tensor ); fragment rcp( x: tensor ) -> ( y: tensor ); fragment neg( x: tensor ) -> ( y: tensor ); fragment copy( x: tensor ) -> ( y: tensor ); # element-wise comparison operations fragment lt( x: tensor, y: tensor ) -> ( z: tensor ); fragment gt( x: tensor, y: tensor ) -> ( z: tensor ); fragment le( x: tensor, y: tensor ) -> ( z: tensor ); fragment ge( x: tensor, y: tensor ) -> ( z: tensor ); fragment eq( x: tensor, y: tensor ) -> ( z: tensor ); fragment ne( x: tensor, y: tensor ) -> ( z: tensor ); # element-wise logical operations fragment and( x: tensor, y: tensor ) -> ( z: tensor ); fragment or( x: tensor, y: tensor ) -> ( z: tensor ); fragment not( x: tensor ) -> ( y: tensor ); # element-wise rounding operations fragment floor( x: tensor ) -> ( y: tensor ); fragment ceil( x: tensor ) -> ( y: tensor ); fragment round( x: tensor ) -> ( y: tensor ); # element-wise select operation fragment select( condition: tensor, true_value: tensor, false_value: tensor ) -> ( output: tensor ); # simplifier operations fragment sqr( x: tensor ) -> ( y: tensor ) { y = x ^ 2.0; } fragment sqrt( x: tensor ) -> ( y: tensor ) { y = x ^ 0.5; } fragment rsqr( x: tensor ) -> ( y: tensor ) { y = x ^ -2.0; } fragment rsqrt( x: tensor ) -> ( y: tensor ) { y = x ^ -0.5; } fragment log2( x: tensor ) -> ( y: tensor ) { y = log(x) / log(2.0); } fragment min( x: tensor, y: tensor ) -> ( z: tensor ) { z = select(x < y, x, y); } fragment max( x: tensor, y: tensor ) -> ( z: tensor ) { z = select(x > y, x, y); } fragment clamp( x: tensor, a: tensor, b: tensor ) -> ( y: tensor ) { y = max(min(x, b), a); } # matrix multiplication fragment matmul( A: tensor, B: tensor, transposeA: logical = false, transposeB: logical = false ) -> ( C: tensor ); )STDLIB" /* break the raw literal because of max length limit */ R"STDLIB( # sliding-window operations fragment conv( input: tensor, filter: tensor, bias: tensor = 0.0, border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], groups: integer = 1 ) -> ( output: tensor ); fragment deconv( input: tensor, filter: tensor, bias: tensor = 0.0, border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], output_shape: integer[] = [], groups: integer = 1 ) -> ( output: tensor ); fragment box( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], normalize: logical = false ) -> ( output: tensor ); fragment debox( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], output_shape: integer[] = [], normalize: logical = false ) -> ( output: tensor ); fragment argmax_pool( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( index: tensor ); fragment sample( input: tensor, index: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor ); fragment desample( input: tensor, index: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], output_shape: integer[] = [] ) -> ( output: tensor ); # up/down-sampling operations fragment nearest_downsample( input: tensor, factor: integer[] ) -> ( output: tensor ) { dims = 2 + length_of(factor); output = box(input, size = [1] * dims, stride = [1,1] + factor, padding = [(0,0)] * dims); } fragment area_downsample( input: tensor, factor: integer[] ) -> ( output: tensor ) { dims = 2 + length_of(factor); output = box(input, size = [1,1] + factor, stride = [1,1] + factor, padding = [(0,0)] * dims, normalize = true); } fragment nearest_upsample( input: tensor, factor: integer[] ) -> ( output: tensor ) { dims = 2 + length_of(factor); output = debox(input, size = [1,1] + factor, stride = [1,1] + factor, padding = [(0,0)] * dims); } fragment multilinear_upsample( input: tensor, factor: integer[], method: string = 'symmetric', border: string = 'replicate' ) -> ( output: tensor ); # reduce operations fragment sum_reduce( input: tensor, axes: integer[], normalize: logical = false ) -> ( output: tensor ); fragment max_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment min_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment argmax_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment argmin_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment any_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment all_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment mean_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ) { output = sum_reduce(input, axes = axes, normalize = true); } fragment moments( input: tensor, axes: integer[] ) -> ( mean: tensor, variance: tensor ) { mean = mean_reduce(input, axes = axes); variance = mean_reduce(sqr(input - mean), axes = axes); } # activation functions fragment relu( x: tensor ) -> ( y: tensor ) { y = max(x, 0.0); } fragment sigmoid( x: tensor ) -> ( y: tensor ) { y = 1.0 / (1.0 + exp(-x)); } fragment softabs( x: tensor, epsilon: scalar ) -> ( y: tensor ) { y = sqrt(sqr(x) + epsilon); } fragment softmax( x: tensor, axes: integer[] = [1] ) -> ( y: tensor ) { m = max_reduce(x, axes = axes); e = exp(x - m); y = e / sum_reduce(e, axes = axes); } fragment softplus( x: tensor ) -> ( y: tensor ) { y = log(exp(x) + 1.0); } fragment elu( x: tensor, alpha: scalar = 1.0 ) -> ( y: tensor ) { y = select(x < 0.0, alpha * (exp(x) - 1.0), x); } fragment selu( x: tensor, alpha: scalar = 1.67326319, lambda: scalar = 1.05070102 ) -> ( y: tensor ) { y = lambda * select(x < 0.0, alpha * (exp(x) - 1.0), x); } fragment gelu( x: tensor ) -> ( y: tensor ) { # the exact definition of gelu is x * Phi(x) where Phi(x) is the # CDF of the standard normal distribution, which can be approximated # for example by sigmoid(1.702 * x) y = x * sigmoid(1.702 * x); } fragment silu( x: tensor ) -> ( y: tensor ) { y = x * sigmoid(x); } fragment prelu( x: tensor, alpha: tensor ) -> ( y: tensor ) { y = select(x < 0.0, alpha * x, x); } fragment leaky_relu( x: tensor, alpha: scalar ) -> ( y: tensor ) { y = prelu(x, alpha = alpha); } )STDLIB" /* break the raw literal because of max length limit */ R"STDLIB( # pooling operations fragment max_pool_with_index( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor, index: tensor ) { index = argmax_pool(input, size = size, border = border, padding = padding, stride = stride, dilation = dilation); output = sample(input, index, size = size, border = border, padding = padding, stride = stride, dilation = dilation); } fragment max_pool( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor ) { output, index = max_pool_with_index(input, size = size, border = border, padding = padding, stride = stride, dilation = dilation); } fragment avg_pool( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor ) { output = box(input, size = size, border = border, padding = padding, stride = stride, dilation = dilation, normalize = true); } fragment rms_pool( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor ) { output = sqrt(avg_pool(sqr(input), size = size, border = border, padding = padding, stride = stride, dilation = dilation)); } # linear operations fragment linear( input: tensor, filter: tensor, bias: tensor = 0.0 ) -> ( output: tensor ) { output = matmul(input, filter, transposeB = true) + bias; } fragment separable_conv( input: tensor, plane_filter: tensor, point_filter: tensor, bias: tensor = 0.0, border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], groups: integer = 1 ) -> ( output: tensor ) { filtered = conv(input, plane_filter, border = border, padding = padding, stride = stride, dilation = dilation, groups = 0); output = conv(filtered, point_filter, bias, groups = groups); } fragment separable_deconv( input: tensor, plane_filter: tensor, point_filter: tensor, bias: tensor = 0.0, border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], output_shape: integer[] = [], groups: integer = 1 ) -> ( output: tensor ) { filtered = deconv(input, point_filter, groups = groups); output = deconv(filtered, plane_filter, bias, border = border, padding = padding, stride = stride, dilation = dilation, output_shape = output_shape, groups = 0); } # normalization operations fragment local_response_normalization( input: tensor, size: integer[], alpha: scalar = 1.0, beta: scalar = 0.5, bias: scalar = 1.0 ) -> ( output: tensor ) { sigma = bias + alpha * box(sqr(input), size = size, normalize = true); output = input / (sigma ^ beta); } fragment local_mean_normalization( input: tensor, size: integer[] ) -> ( output: tensor ) { mean = box(input, size = size, normalize = true); output = sub(input, mean); } fragment local_variance_normalization( input: tensor, size: integer[], bias: scalar = 0.0, epsilon: scalar = 0.0 ) -> ( output: tensor ) { sigma = sqrt(box(sqr(input), size = size, normalize = true)); output = input / max(sigma + bias, epsilon); } fragment local_contrast_normalization( input: tensor, size: integer[], bias: scalar = 0.0, epsilon: scalar = 0.0 ) -> ( output: tensor ) { centered = local_mean_normalization(input, size = size); output = local_variance_normalization(centered, size = size, bias = bias, epsilon = epsilon); } fragment l1_normalization( input: tensor, axes: integer[], bias: scalar = 0.0, epsilon: scalar = 0.0 ) -> ( output: tensor ) { sigma = sum_reduce(abs(input), axes = axes); output = input / max(sigma + bias, epsilon); } fragment l2_normalization( input: tensor, axes: integer[], bias: scalar = 0.0, epsilon: scalar = 0.0 ) -> ( output: tensor ) { sigma = sqrt(sum_reduce(sqr(input), axes = axes)); output = input / max(sigma + bias, epsilon); } fragment batch_normalization( input: tensor, mean: tensor, variance: tensor, offset: tensor, scale: tensor, epsilon: scalar ) -> ( output: tensor ) { output = offset + scale * (input - mean) / sqrt(variance + epsilon); } )STDLIB" /* break the raw literal because of max length limit */ R"STDLIB( # roi operations fragment avg_roi_pool( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[] ) -> ( output: tensor ); fragment max_roi_pool( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[] ) -> ( output: tensor ); fragment roi_resample( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[], method: string = 'symmetric' ) -> ( output: tensor ); fragment avg_roi_align( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[], sampling_rate: integer[], resize_method: string = 'symmetric' ) -> ( output: tensor ) { size = [for i in range_of(output_size) yield output_size[i] * sampling_rate[i]]; resized = roi_resample(input, rois, batch_index, output_size = size, method = resize_method); output = avg_pool(resized, size = sampling_rate, stride = sampling_rate); } fragment max_roi_align( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[], sampling_rate: integer[], resize_method: string = 'symmetric' ) -> ( output: tensor ) { size = [for i in range_of(output_size) yield output_size[i] * sampling_rate[i]]; resized = roi_resample(input, rois, batch_index, output_size = size, method = resize_method); output = max_pool(resized, size = sampling_rate, stride = sampling_rate); } # quantization operations fragment min_max_linear_quantize( x: tensor, min: tensor, max: tensor, bits: integer, signed: logical, symmetric: logical ) -> ( y: tensor ) { r = scalar(2 ^ bits - 1 - integer(signed && symmetric)); z = clamp(x, min, max); p = scalar(2 ^ (bits - 1) - integer(symmetric) if signed else 0); q = round((z - min) / (max - min) * r) - p; y = (q + p) / r * (max - min) + min; } fragment zero_point_linear_quantize( x: tensor, zero_point: tensor, scale: tensor, bits: integer, signed: logical, symmetric: logical ) -> ( y: tensor ) { z = cast(zero_point); s = round(x / scale) + z; r = scalar(2 ^ (bits - 1) - 1 if signed else 2 ^ bits - 1); q = clamp(s, 0.0 if !signed else -r if symmetric else -r - 1.0, r); y = (q - z) * scale; } fragment linear_quantize( x: tensor, min: tensor, max: tensor, bits: integer ) -> ( y: tensor ) { y = min_max_linear_quantize(x, min = min, max = max, bits = bits, signed = false, symmetric = false); } fragment logarithmic_quantize( x: tensor, max: tensor, bits: integer ) -> ( y: tensor ) { m = ceil(log2(max)); r = scalar(2 ^ bits - 1); q = round(clamp(log2(abs(x)), m - r, m)); y = sign(x) * 2.0 ^ q; } # misc operations fragment copy_n( x: tensor, times: integer ) -> ( y: tensor[] ) { y = [x] * times; } fragment add_n( x: tensor[] ) -> ( y: tensor ) { y = x[0] + add_n(x[1:]) if length_of(x) > 0 else constant(shape = [1], value = [0.0]); } )STDLIB"; inline const char* stdlib_source() { return _stdlib_source::text; } } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/flat/flat_parser.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_FLAT_PARSER_H_ #define _NNEF_FLAT_PARSER_H_ #include "../common/prototype.h" #include "../common/dictionary.h" #include "../common/typeutils.h" #include "../common/parser.h" #include "../common/value.h" #include "../common/lexer.h" #include "../common/error.h" #include "stdlib_protos.h" #include namespace nnef { class FlatParser : public Parser { public: typedef Error::Position Position; public: virtual void parse( std::istream& is, const char* filename, Callback& callback ) { Lexer lexer(is, filename); lexer.next(); auto version = readVersion(lexer); callback.beginDocument(filename, version); auto extensions = readExtensions(lexer, [&]( const std::string& ext ) { return callback.handleExtension(ext); }); static Dictionary prototypes = buildPrototypes(); parseGraph(lexer, prototypes, callback); callback.endDocument(filename); } private: void parseGraph( Lexer& lexer, const Dictionary& prototypes, Callback& callback ) { lexer.readToken(Lexer::Graph); const std::string name = lexer.string(); lexer.readToken(Lexer::Identifier); auto params = parseIdentifiers(lexer); lexer.readToken(Lexer::Arrow); auto results = parseIdentifiers(lexer); const Prototype graph(name, params, results); callback.beginGraph(graph, prototypes); lexer.readToken('{'); Dictionary dtypes; while ( lexer.token() != '}' ) { parseAssignment(lexer, graph, prototypes, dtypes, callback); } checkGraphParamsAssigned(graph, dtypes, lexer.position()); lexer.readToken('}'); callback.endGraph(graph, dtypes); lexer.readToken(Lexer::Eof); } template static std::vector parseIdentifiers( Lexer& lexer ) { std::vector identifiers; lexer.readToken('('); do { const std::string id = lexer.string(); lexer.readToken(Lexer::Identifier); identifiers.emplace_back(id, tensorType(Typename::Scalar)); } while ( lexer.readIfToken(',') ); lexer.readToken(')'); return identifiers; } static void checkGraphParam( const Value& arg, const Prototype& graph, const std::string& target, const Position& position ) { switch ( arg.kind() ) { case Value::Identifier: { if ( target == "external" ) { if ( !graph.param(arg.identifier()) ) { throw Error(position, "identifier '%s' assigned by operation 'external' must be a graph parameter", arg.identifier().c_str()); } } else { if ( graph.param(arg.identifier()) ) { throw Error(position, "graph parameter '%s' can only be assigned by operation 'external'", arg.identifier().c_str()); } } break; } case Value::Array: case Value::Tuple: { for ( size_t i = 0; i < arg.size(); ++i ) { checkGraphParam(arg[i], graph, target, position); } break; } default: { assert(false); } } } static void checkGraphParamsAssigned( const Prototype& graph, const Dictionary& declared, const Position& position ) { for ( size_t i = 0; i < graph.paramCount(); ++i ) { auto& param = graph.param(i); if ( !declared.count(param.name()) ) { throw Error(position, "graph parameter '%s' not assigned", param.name().c_str()); } } for ( size_t i = 0; i < graph.resultCount(); ++i ) { auto& result = graph.result(i); if ( !declared.count(result.name()) ) { throw Error(position, "graph result '%s' not assigned", result.name().c_str()); } } } private: void parseAssignment( Lexer& lexer, const Prototype& graph, const Dictionary& prototypes, Dictionary& dtypes, Callback& callback ) { auto position = lexer.position(); const Value results = parseTuple(lexer, nullptr, false, true); lexer.readToken('='); const std::string target = lexer.string(); lexer.readToken(Lexer::Identifier); auto it = prototypes.find(target); if ( it == prototypes.end() ) { throw Error(lexer.position(), "undefined operation '%s'", target.c_str()); } auto& proto = it->second; checkGraphParam(results, graph, proto.name(), position); const PrimitiveType* dataType = proto.genericParamDefault(); if ( lexer.readIfToken('<') ) { if ( lexer.token() == '?' ) { throw Error(lexer.position(), "expected type name"); } dataType = primitiveType(getTypename(lexer)); lexer.next(); lexer.readToken('>'); } lexer.readToken('('); Dictionary args = parseArguments(proto, lexer, &dtypes, dataType, true, false, false); lexer.readToken(')'); lexer.readToken(';'); if ( results.size() != proto.resultCount() ) { throw Error(position, "left-hand-side item count must match result count of operation (%d)", (int)proto.resultCount()); } if ( proto.isGeneric() && !dataType && !deduceDataType(proto, args, dtypes, dataType, position) ) { throw Error(position, "could not deduce generic data-type"); } if ( dataType ) { args["?"] = Value::string(dataType->toString()); } for ( size_t i = 0; i < proto.resultCount(); ++i ) { auto& result = proto.result(i); auto type = dataType ? bindDataType(result.type(), dataType) : result.type(); declare(results[i], type, dtypes, position); args.emplace(result.name(), std::move(results[i])); } callback.operation(proto, args, dtypes); } protected: static Dictionary parseArguments( const Prototype& proto, Lexer& lexer, const Dictionary* decls, const PrimitiveType* dataType, const bool allowIdentifier, const bool allowArrayToTensor, bool expectNamed, const Param* exclusion = nullptr ) { Dictionary args; do { auto position = lexer.position(); if ( args.size() >= proto.paramCount() ) { throw Error(position, "too many arguments; definition of '%s' has only %d parameters", proto.name().c_str(), (int)proto.paramCount()); } const Param* param = nullptr; Value arg = Value::none(); bool named = false; if ( lexer.token() == Lexer::Identifier ) { auto string = lexer.string(); lexer.next(); if ( lexer.token() == '=' ) { lexer.next(); param = proto.param(string); if ( !param ) { throw Error(position, "operation '%s' has no parameter called '%s'", proto.name().c_str(), string.c_str()); } arg = parseValue(lexer, decls, true, allowIdentifier); named = true; } else if ( allowIdentifier ) { param = &proto.param(args.size()); arg = makeIdentifier(string, position, decls); } else { throw Error(position, "token 'identifier' not allowed in this context"); } } else { param = &proto.param(args.size()); arg = parseValue(lexer, decls, true, allowIdentifier); } auto paramType = dataType ? bindDataType(param->type(), dataType) : param->type(); auto argType = typeOf(arg, *decls); if ( !isCastable(argType, paramType, true, allowArrayToTensor) ) { throw Error(position, "argument of type '%s' cannot be cast to type '%s' for parameter '%s'", argType->toString().c_str(), paramType->toString().c_str(), param->name().c_str()); } expectNamed |= named || paramType->isAttribute(); if ( expectNamed && !named ) { throw Error(position, "expected named argument"); } if ( args.count(param->name()) ) { throw Error(position, "duplicate arguments: parameter '%s' already assigned", param->name().c_str()); } if ( param == exclusion ) { throw Error(lexer.position(), "argument '%s' of operation '%s' must not be provided in this context", param->name().c_str(), proto.name().c_str()); } if ( param->type()->kind() == Type::Tensor && isJaggedArray(arg) ) { throw Error(lexer.position(), "tensor literal argument for argument '%s' must not be jagged nested array", param->name().c_str()); } args.emplace(param->name(), std::move(arg)); } while ( lexer.readIfToken(',') ); for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); if ( ¶m != exclusion && !args.count(param.name()) ) { if ( param.defaultValue() ) { if ( param.type()->isGeneric() ) { auto valueType = typeOf(param.defaultValue(), *decls); auto paramType = dataType ? bindDataType(param.type(), dataType) : param.type(); if ( !isCastable(valueType, paramType, true, allowArrayToTensor) ) { throw Error(lexer.position(), "default value type '%s' cannot be cast to type '%s' for parameter '%s'", valueType->toString().c_str(), paramType->toString().c_str(), param.name().c_str()); } } args[param.name()] = param.defaultValue(); } else { throw Error(lexer.position(), "missing argument for operation '%s'; parameter '%s' not assigned", proto.name().c_str(), param.name().c_str()); } } } return args; } private: static bool checkNestedArrayShape( const Value& value, const int* shape, const size_t rank ) { if ( rank == 0 ) { return value.kind() != Value::Array; } else if ( value.kind() != Value::Array || value.size() != (size_t)*shape ) { return false; } for ( size_t i = 0; i < value.size(); ++i ) { if ( !checkNestedArrayShape(value[i], shape + 1, rank - 1) ) { return false; } } return true; } static bool isJaggedArray( const Value& value ) { auto shape = nestedArrayShape(value); return !checkNestedArrayShape(value, shape.data(), shape.size()); } private: static void declare( const Value& arg, const Type* type, Dictionary& dtypes, const Position& position ) { switch ( arg.kind() ) { case Value::Identifier: { if ( type->kind() != Type::Tensor ) { throw Error(position, "cannot assign result of type '%s' to tensor identifier", type->toString().c_str()); } const std::string& id = arg.identifier(); if ( dtypes.count(id) ) { throw Error(position, "identifier '%s' already declared", id.c_str()); } auto dataType = static_cast(type)->dataType(); assert(dataType->kind() == Type::Primitive); dtypes.emplace(id, static_cast(dataType)->name()); break; } case Value::Array: { if ( type->kind() != Type::Array ) { throw Error(position, "cannot assign result of type '%s' to array", type->toString().c_str()); } auto arrayType = static_cast(type); for ( size_t i = 0; i < arg.size(); ++i ) { declare(arg[i], arrayType->itemType(), dtypes, position); } break; } case Value::Tuple: { if ( type->kind() != Type::Tuple ) { throw Error(position, "cannot assign result of type '%s' to tuple", type->toString().c_str()); } auto tupleType = static_cast(type); for ( size_t i = 0; i < arg.size(); ++i ) { declare(arg[i], tupleType->itemType(i), dtypes, position); } break; } default: { throw Error(position, "literal expression not allowed in this context"); } } } private: static Value parseValue( Lexer& lexer, const Dictionary* decls, bool allowLiteral, bool allowIdentifier ) { switch ( lexer.token() ) { case Lexer::True: case Lexer::False: { if ( allowLiteral ) { return parseLogical(lexer); } break; } case '-': case Lexer::Decimal: case Lexer::Fractional: { if ( allowLiteral ) { return parseNumber(lexer); } break; } case Lexer::Characters: { if ( allowLiteral ) { return parseString(lexer); } break; } case '[': { return parseArray(lexer, decls, allowLiteral, allowIdentifier); } case '(': { return parseTuple(lexer, decls, allowLiteral, allowIdentifier); } case Lexer::Identifier: { if ( allowIdentifier ) { return parseIdentifier(lexer, decls); } break; } default: { throw Error(lexer.position(), "unexpected token '%s'", Lexer::tokenString(lexer.token()).c_str()); } } throw Error(lexer.position(), "token '%s' not allowed in this context", Lexer::tokenString(lexer.token()).c_str()); } static Value parseNumber( Lexer& lexer ) { bool negative = lexer.token() == '-'; if ( negative ) { lexer.next(); } if ( lexer.token() == Lexer::Decimal ) { return parseInteger(lexer, negative); } else if ( lexer.token() == Lexer::Fractional ) { return parseScalar(lexer, negative); } else { throw Error(lexer.position(), "expected number"); } } static Value parseInteger( Lexer& lexer, bool negative ) { auto value = getIntegerValue(lexer); lexer.next(); return Value::integer(negative ? -value : value); } static Value parseScalar( Lexer& lexer, bool negative ) { auto value = getScalarValue(lexer); lexer.next(); return Value::scalar(negative ? -value : value); } static Value parseLogical( Lexer& lexer ) { auto value = lexer.token() == Lexer::True; lexer.next(); return Value::logical(value); } static Value parseString( Lexer& lexer ) { auto value = lexer.string(); lexer.next(); return Value::string(value); } static Value parseIdentifier( Lexer& lexer, const Dictionary* decls ) { auto value = makeIdentifier(lexer.string(), lexer.position(), decls); lexer.next(); return value; } static Value makeIdentifier( const std::string& name, const Position& position, const Dictionary* decls ) { if ( decls && !decls->count(name) ) { throw Error(position, "undeclared identifier '%s'", name.c_str()); } return Value::identifier(name); } static Value parseArray( Lexer& lexer, const Dictionary* decls, bool allowLiteral, bool allowIdentifier ) { lexer.readToken('['); std::vector items; if ( lexer.token() != ']' ) { do { auto item = parseValue(lexer, decls, allowLiteral, allowIdentifier); items.push_back(std::move(item)); } while ( lexer.readIfToken(',') ); } lexer.readToken(']'); return Value::array(std::move(items)); } static Value parseTuple( Lexer& lexer, const Dictionary* decls, bool allowLiteral, bool allowIdentifier ) { std::vector items; bool parenthesized = lexer.token() == '('; if ( parenthesized ) { lexer.next(); auto first = parseValue(lexer, decls, allowLiteral, allowIdentifier); lexer.readToken(','); items.push_back(first); } do { auto item = parseValue(lexer, decls, allowLiteral, allowIdentifier); items.push_back(std::move(item)); } while ( lexer.readIfToken(',') ); if ( parenthesized ) { lexer.readToken(')'); } return Value::tuple(std::move(items)); } private: static const Type* typeOf( const Value& value, const Dictionary& declared ) { switch ( value.kind() ) { case Value::Integer: { return primitiveType(Typename::Integer); } case Value::Scalar: { return primitiveType(Typename::Scalar); } case Value::Logical: { return primitiveType(Typename::Logical); } case Value::String: { return primitiveType(Typename::String); } case Value::Identifier: { return tensorType(declared.at(value.identifier())); } case Value::Array: { auto itemType = value.size() ? typeOf(value[0], declared) : nullptr; return arrayType(itemType); } case Value::Tuple: { std::vector itemTypes(value.size()); for ( size_t i = 0; i < value.size(); ++i ) { itemTypes[i] = typeOf(value[i], declared); } return tupleType(itemTypes); } case Value::None: { return nullptr; } } assert(false); return nullptr; } static bool deduceDataType( const Prototype& proto, const Dictionary& args, const Dictionary& declared, const PrimitiveType*& dataType, const Position& position ) { Dictionary types; for ( auto& arg : args ) { types[arg.first] = typeOf(arg.second, declared); } for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); if ( !types.count(param.name()) ) { assert(param.defaultValue()); types[param.name()] = typeOf(param.defaultValue(), declared); } } try { return nnef::deduceDataType(proto, types, dataType); } catch ( std::pair e ) { throw Error(position, "could not deduce data-type: ambiguous candidates '%s' vs '%s'", toString(e.first), toString(e.second)); } } static Dictionary buildPrototypes() { static auto stdlibPrototypes = nnef::stdlibPrototypes(); Dictionary prototypes; for ( auto& proto : stdlibPrototypes ) { prototypes.emplace(proto.name(), std::move(proto)); } return prototypes; } }; } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/flat/quant_parser.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_QUANTIZATION_H_ #define _NNEF_QUANTIZATION_H_ #include "../common/lexer.h" #include "../common/error.h" #include "../common/prototype.h" #include "../common/dictionary.h" #include "flat_parser.h" #include #include namespace nnef { class QuantParser : public FlatParser { public: static Dictionary> parse( std::istream& is, const char* filename, const Dictionary& prototypes ) { Lexer lexer(is, filename); lexer.next(); Dictionary> quantization; for ( unsigned line = 0; lexer.token() != Lexer::Eof; ++line ) { const std::string tensor = lexer.string(); if ( quantization.count(tensor) ) { throw Error(lexer.position(), "duplicate quantization entries for tensor '%s'", tensor.c_str()); } lexer.readToken(Lexer::Characters); lexer.readToken(':'); auto args = parseInvocation(lexer, prototypes); quantization.emplace(tensor, std::move(args)); } return quantization; } private: static Dictionary parseInvocation( Lexer& lexer, const Dictionary& prototypes ) { Position position = lexer.position(); const std::string op = lexer.string(); lexer.readToken(Lexer::Identifier); auto it = prototypes.find(op); if ( it == prototypes.end() ) { throw Error(position, "undefined quantization operation '%s'", op.c_str()); } auto& proto = it->second; if ( !proto.paramCount() ) { throw Error(position, "quantization operation must have at least one parameter"); } if ( proto.param(0).type()->kind() != Type::Tensor ) { throw Error(position, "first parameter of quantization operation must be of type tensor"); } lexer.readToken('('); Dictionary args = parseArguments(proto, lexer, nullptr, nullptr, false, true, true, &proto.param(0)); lexer.readToken(')'); lexer.readToken(';'); args["op-name"] = Value::string(op); return args; } }; } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/flat/stdlib_protos.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_STDLIB_PROTOS_H_ #define _NNEF_STDLIB_PROTOS_H_ #include "../common/value.h" #include "../common/typespec.h" #include "../common/prototype.h" #include "../common/dictionary.h" namespace nnef { static std::vector stdlibPrototypes() { static const PrimitiveType* Scalar = primitiveType(Typename::Scalar); static const PrimitiveType* Integer = primitiveType(Typename::Integer); static const PrimitiveType* Logical = primitiveType(Typename::Logical); static const PrimitiveType* String = primitiveType(Typename::String); static const PrimitiveType* Generic = primitiveType(Typename::Generic); static const Type* ScalarTensor = tensorType(Typename::Scalar); static const Type* IntegerTensor = tensorType(Typename::Integer); static const Type* LogicalTensor = tensorType(Typename::Logical); static const Type* GenericTensor = tensorType(Typename::Generic); static const Type* TypelessTensor = tensorType(); static const Type* Integers = arrayType(Integer); static const Type* Generics = arrayType(Generic); static const Type* Tensors = arrayType(ScalarTensor); static const Type* GenericTensors = arrayType(GenericTensor); static const Type* IntegerPair = tupleType({ Integer, Integer }); static const Type* IntegerPairs = arrayType(IntegerPair); static const Value ScalarZero = Value::scalar(0.0); static const Value ScalarOne = Value::scalar(1.0); static const Value ScalarHalf = Value::scalar(0.5); static const Value IntegerMinusOne = Value::integer(-1); static const Value IntegerZero = Value::integer(0); static const Value IntegerOne = Value::integer(1); static const Value LogicalFalse = Value::logical(false); static const Value LogicalTrue = Value::logical(true); static const Value StringConstant = Value::string("constant"); static const Value StringSymmetric = Value::string("symmetric"); static const Value StringReplicate = Value::string("replicate"); static const Value EmptyArray = Value::array({}); static const Value IntegersOne = Value::array({ IntegerOne }); static const std::vector prototypes = { Prototype("external", { Param("shape", Integers), }, { Result("output", GenericTensor) }, Scalar), Prototype("constant", { Param("shape", Integers), Param("value", Generics), }, { Result("output", GenericTensor) }, Scalar), Prototype("variable", { Param("shape", Integers), Param("label", String), }, { Result("output", GenericTensor) }, Scalar), Prototype("update", { Param("variable", GenericTensor), Param("value", GenericTensor), }, { Result("result", GenericTensor) }), Prototype("reshape", { Param("input", GenericTensor), Param("shape", Integers), Param("axis_start", Integer, IntegerZero), Param("axis_count", Integer, IntegerMinusOne), }, { Result("output", GenericTensor) }), Prototype("transpose", { Param("input", GenericTensor), Param("axes", Integers), }, { Result("output", GenericTensor) }), Prototype("concat", { Param("values", GenericTensors), Param("axis", Integer), }, { Result("value", GenericTensor) }), Prototype("split", { Param("value", GenericTensor), Param("axis", Integer), Param("ratios", Integers), }, { Result("values", GenericTensors) }), Prototype("slice", { Param("input", GenericTensor), Param("axes", Integers), Param("begin", Integers), Param("end", Integers), Param("stride", Integers, EmptyArray), }, { Result("output", GenericTensor) }), Prototype("stack", { Param("values", GenericTensors), Param("axis", Integer), }, { Result("value", GenericTensor) }), Prototype("unstack", { Param("value", GenericTensor), Param("axis", Integer), }, { Result("values", GenericTensors) }), Prototype("squeeze", { Param("input", GenericTensor), Param("axes", Integers), }, { Result("output", GenericTensor) }), Prototype("unsqueeze", { Param("input", GenericTensor), Param("axes", Integers), }, { Result("output", GenericTensor) }), Prototype("pad", { Param("input", ScalarTensor), Param("padding", IntegerPairs), Param("border", String, StringConstant), Param("value", Scalar, ScalarZero), }, { Result("output", ScalarTensor) }), Prototype("tile", { Param("input", GenericTensor), Param("repeats", Integers), }, { Result("output", GenericTensor) }), Prototype("gather", { Param("input", GenericTensor), Param("indices", IntegerTensor), Param("axis", Integer, IntegerZero), }, { Result("output", GenericTensor) }), Prototype("cast", { Param("input", TypelessTensor), }, { Result("output", GenericTensor) }), Prototype("add", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", ScalarTensor) }), Prototype("sub", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", ScalarTensor) }), Prototype("mul", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", ScalarTensor) }), Prototype("div", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", ScalarTensor) }), Prototype("pow", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", ScalarTensor) }), Prototype("min", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", ScalarTensor) }), Prototype("max", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", ScalarTensor) }), Prototype("lt", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", LogicalTensor) }), Prototype("le", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", LogicalTensor) }), Prototype("gt", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", LogicalTensor) }), Prototype("ge", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", LogicalTensor) }), Prototype("eq", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", LogicalTensor) }), Prototype("ne", { Param("x", ScalarTensor), Param("y", ScalarTensor) }, { Result("z", LogicalTensor) }), Prototype("and", { Param("x", LogicalTensor), Param("y", LogicalTensor) }, { Result("z", LogicalTensor) }), Prototype("or", { Param("x", LogicalTensor), Param("y", LogicalTensor) }, { Result("z", LogicalTensor) }), Prototype("select", { Param("condition", LogicalTensor), Param("true_value", GenericTensor), Param("false_value", GenericTensor), }, { Result("output", GenericTensor) }), Prototype("clamp", { Param("x", ScalarTensor), Param("a", ScalarTensor), Param("b", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("copy", { Param("x", GenericTensor), }, { Result("y", GenericTensor) }), Prototype("neg", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("rcp", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("exp", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("log", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("sin", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("cos", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("tan", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("asin", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("acos", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("atan", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("sinh", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("cosh", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("tanh", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("asinh", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("acosh", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("atanh", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("abs", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("sign", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("floor", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("ceil", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("round", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("sqr", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("sqrt", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("rsqr", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("rsqrt", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("log2", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("not", { Param("x", LogicalTensor), }, { Result("y", LogicalTensor) }), Prototype("relu", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("sigmoid", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("elu", { Param("x", ScalarTensor), Param("alpha", ScalarTensor, ScalarOne), }, { Result("y", ScalarTensor) }), Prototype("selu", { Param("x", ScalarTensor), Param("alpha", ScalarTensor, Value::scalar(1.67326319)), Param("lambda", ScalarTensor, Value::scalar(1.05070102)), }, { Result("y", ScalarTensor) }), Prototype("gelu", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("silu", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("prelu", { Param("x", ScalarTensor), Param("alpha", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("leaky_relu", { Param("x", ScalarTensor), Param("alpha", Scalar), }, { Result("y", ScalarTensor) }), Prototype("softabs", { Param("x", ScalarTensor), Param("epsilon", Scalar), }, { Result("y", ScalarTensor) }), Prototype("softplus", { Param("x", ScalarTensor), }, { Result("y", ScalarTensor) }), Prototype("softmax", { Param("x", ScalarTensor), Param("axes", Integers, IntegersOne), }, { Result("y", ScalarTensor) }), Prototype("conv", { Param("input", ScalarTensor), Param("filter", ScalarTensor), Param("bias", ScalarTensor, ScalarZero), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), Param("groups", Integer, IntegerOne), }, { Result("output", ScalarTensor) }), Prototype("deconv", { Param("input", ScalarTensor), Param("filter", ScalarTensor), Param("bias", ScalarTensor, ScalarZero), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), Param("output_shape", Integers, EmptyArray), Param("groups", Integer, IntegerOne), }, { Result("output", ScalarTensor) }), Prototype("box", { Param("input", ScalarTensor), Param("size", Integers), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), Param("normalize", Logical, LogicalFalse), }, { Result("output", ScalarTensor) }), Prototype("debox", { Param("input", ScalarTensor), Param("size", Integers), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), Param("output_shape", Integers, EmptyArray), Param("normalize", Logical, LogicalFalse), }, { Result("output", ScalarTensor) }), Prototype("sample", { Param("input", ScalarTensor), Param("index", IntegerTensor), Param("size", Integers), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), }, { Result("output", ScalarTensor) }), Prototype("desample", { Param("input", ScalarTensor), Param("index", IntegerTensor), Param("size", Integers), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), Param("output_shape", Integers, EmptyArray), }, { Result("output", ScalarTensor) }), Prototype("max_pool", { Param("input", ScalarTensor), Param("size", Integers), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), }, { Result("output", ScalarTensor) }), Prototype("argmax_pool", { Param("input", ScalarTensor), Param("size", Integers), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), }, { Result("index", IntegerTensor) }), Prototype("max_pool_with_index", { Param("input", ScalarTensor), Param("size", Integers), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), }, { Result("output", ScalarTensor), Result("index", IntegerTensor) }), Prototype("avg_pool", { Param("input", ScalarTensor), Param("size", Integers), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), }, { Result("output", ScalarTensor) }), Prototype("rms_pool", { Param("input", ScalarTensor), Param("size", Integers), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), }, { Result("output", ScalarTensor) }), Prototype("separable_conv", { Param("input", ScalarTensor), Param("plane_filter", ScalarTensor), Param("point_filter", ScalarTensor), Param("bias", ScalarTensor, ScalarZero), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), Param("groups", Integer, IntegerOne), }, { Result("output", ScalarTensor) }), Prototype("separable_deconv", { Param("input", ScalarTensor), Param("plane_filter", ScalarTensor), Param("point_filter", ScalarTensor), Param("bias", ScalarTensor, ScalarZero), Param("border", String, StringConstant), Param("padding", IntegerPairs, EmptyArray), Param("stride", Integers, EmptyArray), Param("dilation", Integers, EmptyArray), Param("output_shape", Integers, EmptyArray), Param("groups", Integer, IntegerOne), }, { Result("output", ScalarTensor) }), Prototype("nearest_downsample", { Param("input", ScalarTensor), Param("factor", Integers), }, { Result("output", ScalarTensor) }), Prototype("nearest_upsample", { Param("input", ScalarTensor), Param("factor", Integers), }, { Result("output", ScalarTensor) }), Prototype("area_downsample", { Param("input", ScalarTensor), Param("factor", Integers), }, { Result("output", ScalarTensor) }), Prototype("multilinear_upsample", { Param("input", ScalarTensor), Param("factor", Integers), Param("method", String, StringSymmetric), Param("border", String, StringReplicate), }, { Result("output", ScalarTensor) }), Prototype("local_response_normalization", { Param("input", ScalarTensor), Param("size", Integers), Param("alpha", Scalar, ScalarOne), Param("beta", Scalar, ScalarHalf), Param("bias", Scalar, ScalarOne), }, { Result("output", ScalarTensor) }), Prototype("local_mean_normalization", { Param("input", ScalarTensor), Param("size", Integers), }, { Result("output", ScalarTensor) }), Prototype("local_variance_normalization", { Param("input", ScalarTensor), Param("size", Integers), Param("bias", Scalar, ScalarZero), Param("epsilon", Scalar, ScalarZero), }, { Result("output", ScalarTensor) }), Prototype("local_contrast_normalization", { Param("input", ScalarTensor), Param("size", Integers), Param("bias", Scalar, ScalarZero), Param("epsilon", Scalar, ScalarZero), }, { Result("output", ScalarTensor) }), Prototype("l1_normalization", { Param("input", ScalarTensor), Param("axes", Integers), Param("bias", Scalar, ScalarZero), Param("epsilon", Scalar, ScalarZero), }, { Result("output", ScalarTensor) }), Prototype("l2_normalization", { Param("input", ScalarTensor), Param("axes", Integers), Param("bias", Scalar, ScalarZero), Param("epsilon", Scalar, ScalarZero), }, { Result("output", ScalarTensor) }), Prototype("batch_normalization", { Param("input", ScalarTensor), Param("mean", ScalarTensor), Param("variance", ScalarTensor), Param("offset", ScalarTensor, ScalarZero), Param("scale", ScalarTensor, ScalarOne), Param("epsilon", Scalar, ScalarZero), }, { Result("output", ScalarTensor) }), Prototype("sum_reduce", { Param("input", ScalarTensor), Param("axes", Integers), Param("normalize", Logical, LogicalFalse), }, { Result("output", ScalarTensor) }), Prototype("min_reduce", { Param("input", ScalarTensor), Param("axes", Integers), }, { Result("output", ScalarTensor) }), Prototype("max_reduce", { Param("input", ScalarTensor), Param("axes", Integers), }, { Result("output", ScalarTensor) }), Prototype("mean_reduce", { Param("input", ScalarTensor), Param("axes", Integers), }, { Result("output", ScalarTensor) }), Prototype("argmax_reduce", { Param("input", ScalarTensor), Param("axes", Integers), }, { Result("output", IntegerTensor) }), Prototype("argmin_reduce", { Param("input", ScalarTensor), Param("axes", Integers), }, { Result("output", IntegerTensor) }), Prototype("any_reduce", { Param("input", LogicalTensor), Param("axes", Integers), }, { Result("output", LogicalTensor) }), Prototype("all_reduce", { Param("input", LogicalTensor), Param("axes", Integers), }, { Result("output", LogicalTensor) }), Prototype("moments", { Param("input", ScalarTensor), Param("axes", Integers), }, { Result("mean", ScalarTensor), Result("variance", ScalarTensor) }), Prototype("max_roi_pool", { Param("input", ScalarTensor), Param("rois", ScalarTensor), Param("batch_index", IntegerTensor), Param("output_size", Integers), }, { Result("output", ScalarTensor) }), Prototype("avg_roi_pool", { Param("input", ScalarTensor), Param("rois", ScalarTensor), Param("batch_index", IntegerTensor), Param("output_size", Integers), }, { Result("output", ScalarTensor) }), Prototype("roi_resample", { Param("input", ScalarTensor), Param("rois", ScalarTensor), Param("batch_index", IntegerTensor), Param("output_size", Integers), Param("method", String, StringSymmetric), }, { Result("output", ScalarTensor) }), Prototype("max_roi_align", { Param("input", ScalarTensor), Param("rois", ScalarTensor), Param("batch_index", IntegerTensor), Param("output_size", Integers), Param("sampling_rate", Integers), Param("resize_method", String, StringSymmetric), }, { Result("output", ScalarTensor) }), Prototype("avg_roi_align", { Param("input", ScalarTensor), Param("rois", ScalarTensor), Param("batch_index", IntegerTensor), Param("output_size", Integers), Param("sampling_rate", Integers), Param("resize_method", String, StringSymmetric), }, { Result("output", ScalarTensor) }), Prototype("matmul", { Param("A", ScalarTensor), Param("B", ScalarTensor), Param("transposeA", Logical, LogicalFalse), Param("transposeB", Logical, LogicalFalse), }, { Result("C", ScalarTensor) }), Prototype("linear", { Param("input", ScalarTensor), Param("filter", ScalarTensor), Param("bias", ScalarTensor, ScalarZero), }, { Result("output", ScalarTensor) }), Prototype("add_n", { Param("x", Tensors), }, { Result("y", ScalarTensor) }), Prototype("copy_n", { Param("x", GenericTensor), Param("times", Integer), }, { Result("y", GenericTensors) }), Prototype("min_max_linear_quantize", { Param("x", ScalarTensor), Param("min", ScalarTensor), Param("max", ScalarTensor), Param("bits", Integer), Param("signed", Logical, LogicalTrue), Param("symmetric", Logical, LogicalFalse), }, { Result("y", ScalarTensor) }), Prototype("zero_point_linear_quantize", { Param("x", ScalarTensor), Param("zero_point", IntegerTensor), Param("scale", ScalarTensor), Param("bits", Integer), Param("signed", Logical), Param("symmetric", Logical), }, { Result("y", ScalarTensor) }), Prototype("linear_quantize", { Param("x", ScalarTensor), Param("min", ScalarTensor), Param("max", ScalarTensor), Param("bits", Integer), }, { Result("y", ScalarTensor) }), Prototype("logarithmic_quantize", { Param("x", ScalarTensor), Param("max", ScalarTensor), Param("bits", Integer), }, { Result("y", ScalarTensor) }), }; return prototypes; } } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/runtime/execution.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_RUNTIME_EXECUTION_H_ #define _NNEF_RUNTIME_EXECUTION_H_ #include "nnef.h" #include "operations.h" #include #define DISPATCH_BY_DTYPE(name) \ inline void execute_##name( const Operation& op, TensorDict& tensors ) \ { \ if ( op.dtype == "scalar" ) _execute_##name(op, tensors); \ else if ( op.dtype == "integer" ) _execute_##name(op, tensors); \ else if ( op.dtype == "logical" ) _execute_##name(op, tensors); \ else throw std::runtime_error("operation not implemented: " + std::string(#name) + ""); \ } \ namespace nnef { namespace rt { inline Tensor _make_tensor( const size_t rank, const int shape[], const size_t item_bytes ) { Tensor tensor; tensor.shape.assign(shape, shape + rank); tensor.data.resize(volume_of(tensor.shape) * item_bytes); return tensor; } typedef std::map TensorDict; typedef std::function Executor; template tensor_view _tensor_view( const Tensor& tensor ) { return tensor_view{ tensor.shape.size(), volume_of(tensor.shape), tensor.shape.data(), (T*)tensor.data.data() }; } template tensor_view _tensor_view( const T& value ) { return tensor_view{ 0, 1, nullptr, (T*)&value }; } template tensor_view _tensor_view( const Value& value, const TensorDict& tensors ) { return value.kind() == Value::Identifier ? _tensor_view(tensors.at(value.identifier())) : _tensor_view(value.get()); } const std::string& _literal_dtype( const Value& value ) { static const std::string dtypes[] = { "", "integer", "scalar", "logical", "string" }; return dtypes[(size_t)value.kind()]; } inline void check_supported_rank( const std::string& op, const size_t rank, const size_t max ) { if ( rank > max ) { throw std::runtime_error("operation not implemented: " + op + " with rank = " + std::to_string(rank)); } } inline void execute_external( const Operation& op, TensorDict& tensors ) { } inline void execute_variable( const Operation& op, TensorDict& tensors ) { } template inline void _execute_constant( const Operation& op, TensorDict& tensors ) { auto& output = op.outputs.get("output"); auto& value = op.attribs.get("value"); auto& tensor = tensors.at(output.identifier()); const size_t n = volume_of(tensor.shape); auto data = (T*)tensor.data.data(); if ( value.kind() == Value::Array ) { if ( value.size() == n ) { for ( size_t i = 0; i < n; ++i ) { data[i] = value[i].get(); } } else { std::fill_n(data, n, value[0].scalar()); } } else { std::fill_n(data, n, value.scalar()); } } DISPATCH_BY_DTYPE(constant) template Executor make_unary_executor( const F func ) { return [=]( const Operation& op, TensorDict& tensors ) { auto& x = op.inputs.get("x"); auto& y = op.outputs.get("y"); unary(_tensor_view(x, tensors), _tensor_view(y, tensors), func); }; } template Executor make_unary_executor_ext( const F func, const S ...attrib ) { return [=]( const Operation& op, TensorDict& tensors ) { auto& x = op.inputs.get("x"); auto& y = op.outputs.get("y"); unary(_tensor_view(x, tensors), _tensor_view(y, tensors), [&]( const T x ) { return func(x, op.attribs.get(attrib).scalar()...); }); }; } template Executor make_binary_executor( const F func ) { return [=]( const Operation& op, TensorDict& tensors ) { auto& x = op.inputs.get("x"); auto& y = op.inputs.get("y"); auto& z = op.outputs.get("z"); binary(_tensor_view(x, tensors), _tensor_view(y, tensors), _tensor_view(z, tensors), func); }; } template Executor make_reduce_executor( const F func, const T init ) { return [=]( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto input_view = _tensor_view(input, tensors); auto output_view = _tensor_view(output, tensors); reduce(input_view, output_view, func, init); if ( op.name == "mean_reduce" || (op.name == "sum_reduce" && op.attribs.get("normalize").logical()) ) { const T volume = (T)(input_view.volume / output_view.volume); binary((tensor_view)output_view, _tensor_view(volume), output_view, std::divides()); } }; } template void _execute_select( const Operation& op, TensorDict& tensors ) { auto& c = op.inputs.get("condition"); auto& x = op.inputs.get("true_value"); auto& y = op.inputs.get("false_value"); auto& z = op.outputs.get("output"); select(_tensor_view(c, tensors), _tensor_view(x, tensors), _tensor_view(y, tensors), _tensor_view(z, tensors)); } DISPATCH_BY_DTYPE(select) inline Shape _extract_items( const Value& value ) { Shape items(value.size()); for ( size_t i = 0; i < value.size(); ++i ) { auto& v = value[i]; items[i] = v.kind() == Value::Tuple ? v[0].integer() : v.integer(); } return items; } inline Shape _make_padding( const size_t rank, const int input[], const int output[], const int filter[], const int stride[], const int dilation[] ) { Shape padding(rank); for ( size_t i = 0; i < rank; ++i ) { padding[i] = std::max((output[i] - 1) * stride[i] + (filter[i] - 1) * dilation[i] + 1 - input[i], 0) / 2; } return padding; } template void execute_conv( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& filter = op.inputs.get("filter"); auto& bias = op.inputs.get("bias"); auto& output = op.outputs.get("output"); auto& padding = op.attribs.get("padding"); auto& stride = op.attribs.get("stride"); auto& dilation = op.attribs.get("dilation"); auto& groups = op.attribs.get("groups").integer(); auto& border = op.attribs.get("border").string(); if ( border != "constant" ) { throw std::runtime_error("operation not implemented: " + op.name + " with border = '" + border + "'"); } auto input_view = _tensor_view(Transposed ? output : input, tensors); auto output_view = _tensor_view(Transposed ? input : output, tensors); auto filter_view = _tensor_view(filter, tensors); auto bias_view = _tensor_view(bias, tensors); const size_t d = input_view.rank - 2; check_supported_rank(op.name, d, 3); const Shape strideShape = stride.size() ? _extract_items(stride) : Shape(d, 1); const Shape dilationShape = dilation.size() ? _extract_items(dilation) : Shape(d, 1); const Shape paddingShape = padding.size() ? _extract_items(padding) : _make_padding(d, input_view.shape + 2, output_view.shape + 2, filter_view.shape + 2, strideShape.data(), dilationShape.data()); if ( groups == 1 ) { conv(filter_view, bias_view, input_view, output_view, paddingShape.data(), strideShape.data(), dilationShape.data()); } else if ( groups == 0 || groups == input_view.shape[1] ) { depthwise_conv(filter_view, bias_view, input_view, output_view, paddingShape.data(), strideShape.data(), dilationShape.data()); } else { grouped_conv(filter_view, bias_view, input_view, output_view, paddingShape.data(), strideShape.data(), dilationShape.data(), groups); } } template void _execute_pool( const Operation& op, TensorDict& tensors, const F func, const T init ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto& size = op.attribs.get("size"); auto& padding = op.attribs.get("padding"); auto& stride = op.attribs.get("stride"); auto& dilation = op.attribs.get("dilation"); auto& border = op.attribs.get("border").string(); if ( border != "constant" && border != "ignore" ) { throw std::runtime_error("operation not implemented: " + op.name + " with border = '" + border + "'"); } auto input_view = _tensor_view(Transposed ? output : input, tensors); auto output_view = _tensor_view(Transposed ? input : output, tensors); const size_t d = input_view.rank; check_supported_rank(op.name, d, 5); const Shape sizeShape = _extract_items(size); const Shape strideShape = stride.size() ? _extract_items(stride) : Shape(d, 1); const Shape dilationShape = dilation.size() ? _extract_items(dilation) : Shape(d, 1); const Shape paddingShape = padding.size() ? _extract_items(padding) : _make_padding(d, input_view.shape, output_view.shape, sizeShape.data(), strideShape.data(), dilationShape.data()); pool(input_view, output_view, sizeShape.data(), paddingShape.data(), strideShape.data(), dilationShape.data(), func, init, border != "ignore"); if ( op.name == "avg_pool" || op.name == "avg_unpool" || ((op.name == "box" || op.name == "debox") && op.attribs.get("normalize").logical()) ) { if ( border == "constant" ) { const T volume = (T)volume_of(sizeShape); binary((tensor_view)output_view, _tensor_view(volume), output_view, std::divides()); } else if ( border == "ignore" ) { Tensor tensor = _make_tensor(d, output_view.shape, sizeof(T)); pool_area(_tensor_view(tensor), input_view.shape, output_view.shape, sizeShape.data(), paddingShape.data(), strideShape.data(), dilationShape.data()); binary((tensor_view)output_view, _tensor_view(tensor), output_view, std::divides()); } } } template Executor make_pool_executor( const F func, const T init ) { return [=]( const Operation& op, TensorDict& tensors ) { _execute_pool(op, tensors, func, init); }; } template void _execute_reshape( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto input_view = _tensor_view(input, tensors); auto output_view = _tensor_view(output, tensors); std::copy_n(input_view.data, input_view.volume, output_view.data); } DISPATCH_BY_DTYPE(reshape) template void _execute_transpose( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto& axes = op.attribs.get("axes"); const size_t rank = tensors.at(input.identifier()).shape.size(); check_supported_rank(op.name, rank, 5); std::vector perm(rank); for ( size_t i = 0; i < axes.size(); ++i ) { perm[i] = axes[i].integer(); } std::iota(perm.begin() + axes.size(), perm.end(), axes.size()); transpose(_tensor_view(input, tensors), _tensor_view(output, tensors), perm.data()); } DISPATCH_BY_DTYPE(transpose) template void _execute_concat( const Operation& op, TensorDict& tensors ) { auto& values = op.inputs.get("values"); auto& value = op.outputs.get("value"); auto& axis = op.attribs.get("axis").integer(); std::vector> v; for ( size_t i = 0; i < values.size(); ++i ) { v.emplace_back(_tensor_view(values[i], tensors)); } if ( op.name == "stack" ) { concat(v.size(), v.data(), _tensor_view(value, tensors), axis); } else { concat(v.size(), v.data(), _tensor_view(value, tensors), axis); } } DISPATCH_BY_DTYPE(concat) template void _execute_split( const Operation& op, TensorDict& tensors ) { auto& value = op.inputs.get("value"); auto& values = op.outputs.get("values"); auto& axis = op.attribs.get("axis").integer(); std::vector> v; for ( size_t i = 0; i < values.size(); ++i ) { v.emplace_back(_tensor_view(values[i], tensors)); } if ( op.name == "unstack" ) { split(v.size(), _tensor_view(value, tensors), v.data(), axis); } else { split(v.size(), _tensor_view(value, tensors), v.data(), axis); } } DISPATCH_BY_DTYPE(split) template void execute_pad( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto& padding = op.attribs.get("padding"); auto& border = op.attribs.get("border").string(); auto& value = op.attribs.get("value"); auto input_view = _tensor_view(input, tensors); auto output_view = _tensor_view(output, tensors); auto paddingShape = _extract_items(padding); const size_t d = input_view.rank; check_supported_rank(op.name, d, 5); if ( border == "constant" ) { pad_constant(input_view, output_view, paddingShape.data(), value.get()); } else if ( border == "replicate" ) { pad_replicate(input_view, output_view, paddingShape.data()); } else if ( border == "reflect" ) { pad_reflect(input_view, output_view, paddingShape.data()); } else if ( border == "reflect-even" ) { pad_reflect_even(input_view, output_view, paddingShape.data()); } else { throw std::runtime_error("operation not implemented: pad with border == '" + border + "'"); } } template void _execute_tile( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto input_view = _tensor_view(input, tensors); auto output_view = _tensor_view(output, tensors); const size_t d = input_view.rank; check_supported_rank(op.name, d, 5); tile(input_view, output_view); } DISPATCH_BY_DTYPE(tile) template void _execute_slice( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto& axes = op.attribs.get("axes"); auto& begin = op.attribs.get("begin"); auto& stride = op.attribs.get("stride"); auto input_view = _tensor_view(input, tensors); auto output_view = _tensor_view(output, tensors); const size_t d = input_view.rank; check_supported_rank(op.name, d, 5); std::vector offset(d, 0); std::vector step(d, 1); for ( size_t i = 0; i < axes.size(); ++i ) { auto axis = axes[i].integer(); auto offs = begin[i].integer(); if ( offs < 0 ) { offs += input_view.shape[axis]; } if ( offs < 0 ) { offs = -1; } if ( offs > input_view.shape[axis] ) { offs = input_view.shape[axis]; } offset[axis] = offs; step[axis] = stride.size() ? stride[i].integer() : 1; } slice(input_view, output_view, offset.data(), step.data()); } DISPATCH_BY_DTYPE(slice) template void _execute_gather( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& indices = op.inputs.get("indices"); auto& output = op.outputs.get("output"); auto& axis = op.attribs.get("axis").integer(); auto input_view = _tensor_view(input, tensors); auto indices_view = _tensor_view(indices, tensors); auto output_view = _tensor_view(output, tensors); gather(input_view, indices_view, output_view, axis); } DISPATCH_BY_DTYPE(gather) template void _execute_cast( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto& input_dtype = input.kind() == Value::Identifier ? tensors.at(input.identifier()).dtype : _literal_dtype(input); auto output_view = _tensor_view(output, tensors); if ( input_dtype == "scalar" ) { auto input_view = _tensor_view(input, tensors); std::copy_n(input_view.data, input_view.volume, output_view.data); } else if ( input_dtype == "integer" ) { auto input_view = _tensor_view(input, tensors); std::copy_n(input_view.data, input_view.volume, output_view.data); } else if ( input_dtype == "logical" ) { auto input_view = _tensor_view(input, tensors); std::copy_n(input_view.data, input_view.volume, output_view.data); } else { throw std::runtime_error("operation 'cast' from dtype 'string' is not implemented"); } } DISPATCH_BY_DTYPE(cast) template void execute_matmul( const Operation& op, TensorDict& tensors ) { auto& A = op.inputs.get("A"); auto& B = op.inputs.get("B"); auto& C = op.outputs.get("C"); bool trA = op.attribs.get("transposeA").logical(); bool trB = op.attribs.get("transposeB").logical(); matmul(trA, trB, _tensor_view(A, tensors), _tensor_view(B, tensors), _tensor_view(C, tensors)); } template void execute_linear( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& filter = op.inputs.get("filter"); auto& bias = op.inputs.get("bias"); auto& output = op.outputs.get("output"); linear(_tensor_view(filter, tensors), _tensor_view(bias, tensors), _tensor_view(input, tensors), _tensor_view(output, tensors)); } template void execute_softmax( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("x"); auto& output = op.outputs.get("y"); auto& axes = op.attribs.get("axes"); auto input_view = _tensor_view(input, tensors); auto output_view = _tensor_view(output, tensors); if ( axes.size() != 1 ) { throw std::runtime_error("operation not implemented: softmax with multiple axes"); } softmax(input_view, output_view, axes[0].integer()); } template Executor make_arg_reduce_executor( const F func ) { return [=]( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto& axes = op.attribs.get("axes"); auto input_view = _tensor_view(input, tensors); auto output_view = _tensor_view(output, tensors); if ( axes.size() != 1 ) { throw std::runtime_error("operation not implemented: argmax_reduce with multiple axes"); } arg_reduce(input_view, output_view, axes[0].integer(), func); }; } template void execute_multilinear_upsample( const Operation& op, TensorDict& tensors ) { auto& input = op.inputs.get("input"); auto& output = op.outputs.get("output"); auto& factor = op.attribs.get("factor"); auto& border = op.attribs.get("border").string(); auto& method = op.attribs.get("method").string(); auto input_view = _tensor_view(input, tensors); auto output_view = _tensor_view(output, tensors); const size_t d = input_view.rank - 2; check_supported_rank(op.name, d, 2); for ( size_t i = 0; i < factor.size(); ++i ) { if ( factor[i].integer() != 2 ) { throw std::runtime_error("operation not implemented: multilinear_upsample with factor != 2"); } } if ( method == "aligned" ) { throw std::runtime_error("operation not implemented: multilinear_upsample with method == 'aligned'"); } if ( border == "constant" ) { if ( method == "symmetric" ) { multilinear_upsample2x_symmetric(input_view, output_view); } else if ( method == "asymmetric" ) { multilinear_upsample2x_asymmetric(input_view, output_view); } } else if ( border == "replicate" ) { Shape input_padding(input_view.rank, 0); for ( size_t i = 2; i < input_view.rank; ++i ) { input_padding[i] = 1; } Shape output_padding(output_view.rank, 0); for ( size_t i = 2; i < output_view.rank; ++i ) { output_padding[i] = factor[i-2].integer(); } Shape padded_input_shape(input_view.shape, input_view.shape + input_view.rank); for ( size_t i = 2; i < padded_input_shape.size(); ++i ) { padded_input_shape[i] += 1 + 1; } Shape padded_output_shape(output_view.shape, output_view.shape + output_view.rank); for ( size_t i = 2; i < padded_output_shape.size(); ++i ) { padded_output_shape[i] += 2 * factor[i-2].integer(); } Tensor padded_input = _make_tensor(padded_input_shape.size(), padded_input_shape.data(), sizeof(T)); Tensor padded_output = _make_tensor(padded_output_shape.size(), padded_output_shape.data(), sizeof(T)); pad_replicate((tensor_view)input_view, _tensor_view(padded_input), input_padding.data()); if ( method == "symmetric" ) { multilinear_upsample2x_symmetric(_tensor_view(padded_input), _tensor_view(padded_output)); } else if ( method == "asymmetric" ) { multilinear_upsample2x_asymmetric(_tensor_view(padded_input), _tensor_view(padded_output)); } const Shape stride(input_view.rank, 1); slice(_tensor_view(padded_output), output_view, output_padding.data(), stride.data()); } else { throw std::runtime_error("operation not implemented: multilinear_upsample with border == '" + border + "'"); } } template void _execute_update( const Operation& op, TensorDict& tensors ) { auto& value = op.inputs.get("value"); auto& result = op.outputs.get("result"); auto input_view = _tensor_view(value, tensors); auto output_view = _tensor_view(result, tensors); std::copy_n(input_view.data, input_view.volume, output_view.data); } DISPATCH_BY_DTYPE(update) static const std::map Executors = { { "external", execute_external }, { "constant", execute_constant }, { "variable", execute_variable }, { "neg", make_unary_executor(std::negate()) }, { "not", make_unary_executor(std::logical_not()) }, { "abs", make_unary_executor([]( float x ){ return std::abs(x); }) }, { "sign", make_unary_executor([]( float x ){ return x > 0.f ? 1.f : x < 0.f ? -1.f : 0.f; }) }, { "exp", make_unary_executor([]( float x ){ return std::exp(x); }) }, { "log", make_unary_executor([]( float x ){ return std::log(x); }) }, { "log2", make_unary_executor([]( float x ){ return std::log(x) / std::log(2.f); }) }, { "sin", make_unary_executor([]( float x ){ return std::sin(x); }) }, { "cos", make_unary_executor([]( float x ){ return std::cos(x); }) }, { "tan", make_unary_executor([]( float x ){ return std::tan(x); }) }, { "asin", make_unary_executor([]( float x ){ return std::asin(x); }) }, { "acos", make_unary_executor([]( float x ){ return std::acos(x); }) }, { "atan", make_unary_executor([]( float x ){ return std::atan(x); }) }, { "sinh", make_unary_executor([]( float x ){ return std::sinh(x); }) }, { "cosh", make_unary_executor([]( float x ){ return std::cosh(x); }) }, { "tanh", make_unary_executor([]( float x ){ return std::tanh(x); }) }, { "asinh", make_unary_executor([]( float x ){ return std::asinh(x); }) }, { "acosh", make_unary_executor([]( float x ){ return std::acosh(x); }) }, { "atanh", make_unary_executor([]( float x ){ return std::atanh(x); }) }, { "round", make_unary_executor([]( float x ){ return std::round(x); }) }, { "floor", make_unary_executor([]( float x ){ return std::floor(x); }) }, { "ceil", make_unary_executor([]( float x ){ return std::ceil(x); }) }, { "sqrt", make_unary_executor([]( float x ){ return std::sqrt(x); }) }, { "sqr", make_unary_executor([]( float x ){ return x * x; }) }, { "rsqrt", make_unary_executor([]( float x ){ return 1.f / std::sqrt(x); }) }, { "rsqr", make_unary_executor([]( float x ){ return 1.f / (x * x); }) }, { "rcp", make_unary_executor([]( float x ){ return 1.f / x; }) }, { "copy", make_unary_executor([]( float x ){ return x; }) }, { "sigmoid", make_unary_executor([]( float x ){ return 1.f / (1.f + std::exp(-x)); }) }, { "tanh", make_unary_executor([]( float x ){ return std::tanh(x); }) }, { "relu", make_unary_executor([]( float x ){ return std::max(x, 0.f); }) }, { "leaky_relu", make_unary_executor_ext([]( float x, float alpha ) { return x < 0.f ? alpha * x : x; }, "alpha") }, { "elu", make_unary_executor_ext([]( float x, float alpha ) { return x < 0.f ? alpha * (std::exp(x) - 1.f) : x; }, "alpha") }, { "selu", make_unary_executor_ext([]( float x, float alpha, float lambda ) { return lambda * (x < 0.f ? alpha * (std::exp(x) - 1.f) : x); }, "alpha", "lambda") }, { "gelu", make_unary_executor([]( float x ){ return x / (1.f + std::exp(-1.702f * x)); }) }, { "silu", make_unary_executor([]( float x ){ return x / (1.f + std::exp(-x)); }) }, { "softplus", make_unary_executor([]( float x ){ return std::log(std::exp(x) + 1.f); }) }, { "add", make_binary_executor(std::plus()) }, { "sub", make_binary_executor(std::minus()) }, { "mul", make_binary_executor(std::multiplies()) }, { "div", make_binary_executor(std::divides()) }, { "pow", make_binary_executor([]( float x, float y ){ return std::pow(x,y); }) }, { "min", make_binary_executor([]( float x, float y ){ return std::min(x,y); }) }, { "max", make_binary_executor([]( float x, float y ){ return std::max(x,y); }) }, { "and", make_binary_executor(std::logical_and()) }, { "or", make_binary_executor(std::logical_or()) }, { "lt", make_binary_executor(std::less()) }, { "gt", make_binary_executor(std::greater()) }, { "le", make_binary_executor(std::less_equal()) }, { "ge", make_binary_executor(std::greater_equal()) }, { "eq", make_binary_executor(std::equal_to()) }, { "ne", make_binary_executor(std::not_equal_to()) }, { "select", execute_select }, { "sum_reduce", make_reduce_executor(std::plus(), 0.f) }, { "mean_reduce", make_reduce_executor(std::plus(), 0.f) }, { "min_reduce", make_reduce_executor([]( float x, float y ){ return std::min(x,y); }, std::numeric_limits::infinity()) }, { "max_reduce", make_reduce_executor([]( float x, float y ){ return std::max(x,y); }, -std::numeric_limits::infinity()) }, { "any_reduce", make_reduce_executor(std::logical_or(), false) }, { "all_reduce", make_reduce_executor(std::logical_and(), true) }, { "conv", execute_conv }, { "deconv", execute_conv }, { "box", make_pool_executor(std::plus(), 0.f) }, { "debox", make_pool_executor(std::plus(), 0.f) }, { "sum_pool", make_pool_executor(std::plus(), 0.f) }, { "sum_unpool", make_pool_executor(std::plus(), 0.f) }, { "avg_pool", make_pool_executor(std::plus(), 0.f) }, { "avg_unpool", make_pool_executor(std::plus(), 0.f) }, { "min_pool", make_pool_executor([]( float x, float y ){ return std::min(x,y); }, std::numeric_limits::infinity()) }, { "max_pool", make_pool_executor([]( float x, float y ){ return std::max(x,y); }, -std::numeric_limits::infinity()) }, { "reshape", execute_reshape }, { "squeeze", execute_reshape }, { "unsqueeze", execute_reshape }, { "transpose", execute_transpose }, { "concat", execute_concat }, { "split", execute_split }, { "stack", execute_concat }, { "unstack", execute_split }, { "pad", execute_pad }, { "tile", execute_tile }, { "slice", execute_slice }, { "gather", execute_gather }, { "cast", execute_cast }, { "matmul", execute_matmul }, { "linear", execute_linear }, { "softmax", execute_softmax }, { "argmin_reduce", make_arg_reduce_executor(std::less()) }, { "argmax_reduce", make_arg_reduce_executor(std::greater()) }, { "multilinear_upsample", execute_multilinear_upsample }, { "update", execute_update }, }; }} // namespace nnef::rt #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/runtime/ndrange.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_RUNTIME_NDRANGE_H_ #define _NNEF_RUNTIME_NDRANGE_H_ namespace nnef { namespace rt { template struct _nd_loop { static inline void call( const S shape[], I index[], const Op& op ) { for ( index[N-K] = 0; index[N-K] < shape[N-K]; ++index[N-K] ) { _nd_loop::call(shape, index, op); } } }; template struct _nd_loop { static inline void call( const S shape[], I index[], const Op& op ) { for ( index[N-1] = 0; index[N-1] < shape[N-1]; ++index[N-1] ) { op(index); } } }; template struct _nd_loop<0,0,I,S,Op> { static inline void call( const S shape[], I index[], const Op& op ) { op(index); } }; template inline void nd_loop( const S shape[], const Op& op ) { I index[N]; _nd_loop::call(shape, index, op); }; template struct _nd_offset { static inline size_t call( const S shape[], const I index[] ) { return _nd_offset::call(shape, index) * shape[N-1] + index[N-1]; } }; template struct _nd_offset<1,I,S> { static inline size_t call( const S shape[], const I index[] ) { return index[0]; } }; template struct _nd_offset<0,I,S> { static inline size_t call( const S shape[], const I index[] ) { return 0; } }; template inline size_t nd_offset( const S shape[], const I index[] ) { return _nd_offset::call(shape, index); } template struct _nd_volume { static inline size_t call( const S shape[] ) { return _nd_volume::call(shape) * shape[N-1]; } }; template struct _nd_volume<1,S> { static inline size_t call( const S shape[] ) { return shape[0]; } }; template struct _nd_volume<0,S> { static inline size_t call( const S shape[] ) { return 1; } }; template inline size_t nd_volume( const S shape[] ) { return _nd_volume::call(shape); } template inline size_t nd_volume( const size_t rank, const S shape[] ) { return std::accumulate(shape, shape + rank, (S)1, std::multiplies()); } template struct _for_n { static inline void call( const Op& op ) { _for_n::call(op); op(N-1); }; }; template struct _for_n<1,Op> { static inline void call( const Op& op ) { op(0); }; }; template struct _for_n<0,Op> { static inline void call( const Op& op ) { }; }; template inline void for_n( const Op& op ) { _for_n::call(op); } template struct _all_n { static inline bool call( const Op& op ) { return _all_n::call(op) && op(N-1); }; }; template struct _all_n<1,Op> { static inline bool call( const Op& op ) { return op(0); }; }; template struct _all_n<0,Op> { static inline bool call( const Op& op ) { return true; }; }; template inline bool all_n( const Op& op ) { return _all_n::call(op); }; template struct tensor_view { const size_t rank; const size_t volume; const int* shape; T* data; tensor_view operator[]( const size_t idx ) const { const size_t size = volume / *shape; return tensor_view{ rank - 1, size, shape + 1, data + size * idx }; } operator tensor_view() const { return tensor_view{ rank, volume, shape, data }; } }; template T& at( tensor_view& view, const int idx[] ) { return view.data[nd_offset(view.shape, idx)]; } }} // namespace nnef::rt #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef/runtime/operations.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_RUNTIME_OPERATIONS_H_ #define _NNEF_RUNTIME_OPERATIONS_H_ #include #include #include "ndrange.h" namespace nnef { namespace rt { template void _unary( const size_t n, const T* x, const size_t dx, T* y, const size_t dy, const Op& op ) { for ( size_t i = 0; i < n; ++ i, x += dx, y += dy ) { *y = op(*x); } } template void unary( tensor_view x, tensor_view y, const Op& op ) { _unary(y.volume, x.data, 1, y.data, 1, op); } template inline void _binary( const size_t n, const T* x, const size_t dx, const T* y, const size_t dy, R* z, const size_t dz, const Op& op ) { for ( size_t i = 0; i < n; ++i, x += dx, y += dy, z += dz ) { *z = op(*x, *y); } } template inline void binary( tensor_view x, tensor_view y, tensor_view z, const Op& op ) { if ( (x.volume == z.volume || x.volume == 1) && (y.volume == z.volume || y.volume == 1) ) { _binary(z.volume, x.data, x.volume == z.volume, y.data, y.volume == z.volume, z.data, 1, op); } else { const size_t dx = *x.shape != 1; const size_t dy = *y.shape != 1; for ( size_t xi = 0, yi = 0, zi = 0; zi < *z.shape; ++zi, xi += dx, yi += dy ) { binary(x[xi], y[yi], z[zi], op); } } } template void _select( const size_t n, const bool* c, const size_t dc, const T* x, const size_t dx, const T* y, const size_t dy, T* z, const size_t dz ) { for ( size_t i = 0; i < n; ++i, c += dc, x += dx, y += dy, z += dz ) { *z = *c ? *x : *y; } } template void select( tensor_view c, tensor_view x, tensor_view y, tensor_view z ) { if ( (c.volume == z.volume || c.volume == 1) && (x.volume == z.volume || x.volume == 1) && (y.volume == z.volume || y.volume == 1) ) { _select(z.volume, c.data, c.volume == z.volume, x.data, x.volume == z.volume, y.data, y.volume == z.volume, z.data, 1); } else { const size_t dc = *c.shape != 1; const size_t dx = *x.shape != 1; const size_t dy = *y.shape != 1; for ( size_t ci = 0, xi = 0, yi = 0, zi = 0; zi < *z.shape; ++zi, ci += dc, xi += dx, yi += dy ) { select(c[ci], x[xi], y[yi], z[zi]); } } } template void _reduce( const size_t n, const T* x, const size_t dx, T* y, const size_t dy, const Op& op ) { for ( size_t i = 0; i < n; ++i, x += dx, y += dy ) { *y = op(*x, *y); } } template void _reduce( tensor_view x, tensor_view y, const Op& op ) { if ( y.volume == x.volume || y.volume == 1 ) { _reduce(x.volume, x.data, 1, y.data, y.volume == x.volume, op); } else { const size_t dy = *y.shape != 1; for ( size_t xi = 0, yi = 0; xi < *x.shape; ++xi, yi += dy ) { _reduce(x[xi], y[yi], op); } } } template void reduce( tensor_view x, tensor_view y, const Op& op, const T init ) { std::fill_n(y.data, y.volume, init); _reduce(x, y, op); } template void _bias( tensor_view bias, tensor_view tensor ) { if ( bias.volume == 1 ) { std::fill_n(tensor.data, tensor.volume, *bias.data); } else { T* data = tensor.data; const size_t size = nd_volume(tensor.rank - 2, tensor.shape + 2); for ( size_t b = 0; b < tensor.shape[0]; ++b ) { for ( size_t c = 0; c < tensor.shape[1]; ++c, data += size ) { std::fill_n(data, size, bias.data[c]); } } } } template static void _conv_core( tensor_view filter, tensor_view input, tensor_view output, const int padding[], const int stride[], const int dilation[] ) { int input_index[D]; nd_loop(output.shape, [&]( const int output_index[] ) { nd_loop(filter.shape, [&]( const int filter_index[] ) { for_n([&]( const size_t k ) { input_index[k] = output_index[k] * stride[k] + filter_index[k] * dilation[k] - padding[k]; }); if ( all_n([&]( const size_t k ){ return input_index[k] >= 0 && input_index[k] < input.shape[k]; }) ) { if ( Transposed ) { at(input, input_index) += at(output, output_index) * at(filter, filter_index); } else { at(output, output_index) += at(input, input_index) * at(filter, filter_index); } } }); }); } template void _conv( tensor_view filter, tensor_view bias, tensor_view input, tensor_view output, const int padding[], const int stride[], const int dilation[] ) { _bias(bias, Transposed ? input : output); for ( size_t b = 0; b < output.shape[0]; ++b ) { for ( size_t z = 0; z < output.shape[1]; ++z ) { for ( size_t c = 0; c < input.shape[1]; ++c ) { _conv_core(filter[z][c], input[b][c], output[b][z], padding, stride, dilation); } } } } template void conv( tensor_view filter, tensor_view bias, tensor_view input, tensor_view output, const int padding[], const int stride[], const int dilation[] ) { static decltype(&_conv) funcs[] = { _conv, _conv, _conv, }; funcs[input.rank - 3](filter, bias, input, output, padding, stride, dilation); } template void _depthwise_conv( tensor_view filter, tensor_view bias, tensor_view input, tensor_view output, const int padding[], const int stride[], const int dilation[] ) { const size_t multiplier = output.shape[1] / input.shape[1]; const bool broadcast = filter.shape[0] == 1; _bias(bias, Transposed ? input : output); for ( size_t b = 0; b < input.shape[0]; ++b ) { for ( size_t c = 0; c < input.shape[1]; ++c ) { for ( size_t m = 0; m < multiplier; ++m ) { const size_t z = multiplier * c + m; _conv_core(filter[broadcast ? 0 : z][0], input[b][c], output[b][z], padding, stride, dilation); } } } } template void depthwise_conv( tensor_view filter, tensor_view bias, tensor_view input, tensor_view output, const int padding[], const int stride[], const int dilation[] ) { static decltype(&_depthwise_conv) funcs[] = { _depthwise_conv, _depthwise_conv, _depthwise_conv, }; funcs[input.rank - 3](filter, bias, input, output, padding, stride, dilation); } template void _grouped_conv( tensor_view filter, tensor_view bias, tensor_view input, tensor_view output, const int padding[], const int stride[], const int dilation[], const size_t groups ) { _bias(bias, Transposed ? input : output); const size_t input_block = input.shape[1] / groups; const size_t output_block = output.shape[1] / groups; for ( size_t b = 0; b < input.shape[0]; ++b ) { for ( size_t g = 0; g < groups; ++g ) { for ( size_t z = 0; z < output_block; ++z ) { for ( size_t c = 0; c < input_block; ++c ) { _conv_core(filter[g * output_block + z][c], input[b][g * input_block + c], output[b][g * output_block + z], padding, stride, dilation); } } } } } template void grouped_conv( tensor_view filter, tensor_view bias, tensor_view input, tensor_view output, const int padding[], const int stride[], const int dilation[], const size_t groups ) { static decltype(&_grouped_conv) funcs[] = { _grouped_conv, _grouped_conv, _grouped_conv, }; funcs[input.rank - 3](filter, bias, input, output, padding, stride, dilation, groups); } template static void _pool_core( tensor_view input, tensor_view output, const int size[], const int padding[], const int stride[], const int dilation[], const Op& op, const bool include_border ) { int input_index[D]; nd_loop(output.shape, [&]( const int output_index[] ) { nd_loop(size, [&]( const int kernel_index[] ) { for_n([&]( const size_t k ) { input_index[k] = output_index[k] * stride[k] + kernel_index[k] * dilation[k] - padding[k]; }); const bool valid = all_n([&]( const size_t k ){ return input_index[k] >= 0 && input_index[k] < input.shape[k]; }); T& value = Transposed ? at(input, input_index) : at(output, output_index); if ( valid ) { value = op(value, Transposed ? at(output, output_index) : at(input, input_index)); } else if ( include_border && !Transposed ) { value = op(value, (T)0); } }); }); } template void _pool( tensor_view input, tensor_view output, const int size[], const int padding[], const int stride[], const int dilation[], const Op& op, const T init, const bool include_border ) { std::fill_n(Transposed ? input.data : output.data, Transposed ? input.volume : output.volume, init); _pool_core(input, output, size, padding, stride, dilation, op, include_border); } template void pool( tensor_view input, tensor_view output, const int size[], const int padding[], const int stride[], const int dilation[], const Op& op, const T init, const bool include_border ) { static decltype(&_pool) funcs[] = { _pool, _pool, _pool, _pool, _pool, }; funcs[input.rank - 1](input, output, size, padding, stride, dilation, op, init, include_border); } template static void _pool_area( tensor_view tensor, const int input_shape[], const int output_shape[], const int size[], const int padding[], const int stride[], const int dilation[] ) { std::fill_n(tensor.data, nd_volume(tensor.shape), (T)0); int input_index[D]; nd_loop(output_shape, [&]( const int output_index[] ) { nd_loop(size, [&]( const int kernel_index[] ) { for_n([&]( const size_t k ) { input_index[k] = output_index[k] * stride[k] + kernel_index[k] * dilation[k] - padding[k]; }); if ( all_n([&]( const size_t k ){ return input_index[k] >= 0 && input_index[k] < input_shape[k]; }) ) { ++at(tensor, Transposed ? input_index : output_index); } }); }); } template static void pool_area( tensor_view tensor, const int input_shape[], const int output_shape[], const int size[], const int padding[], const int stride[], const int dilation[] ) { static decltype(&_pool_area) funcs[] = { _pool_area, _pool_area, _pool_area, _pool_area, _pool_area, }; funcs[tensor.rank - 1](tensor, input_shape, output_shape, size, padding, stride, dilation); } template void _matmul( const size_t m, const size_t n, const size_t k, const T* A, const T* B, T* C ) { for ( size_t i = 0; i < m; ++i ) { for ( size_t j = 0; j < n; ++j, ++C ) { for ( size_t l = 0; l < k; ++l ) { *C += A[trA ? l * m + i : i * k + l] * B[trB ? j * k + l : l * n + j]; } } } } template void matmul( const bool trA, const bool trB, tensor_view A, tensor_view B, tensor_view C ) { std::fill_n(C.data, nd_volume(C.rank, C.shape), 0.f); const size_t offset = C.rank - 2; const size_t dA = nd_volume<2>(A.shape + offset); const size_t dB = nd_volume<2>(B.shape + offset); const size_t dC = nd_volume<2>(C.shape + offset); const size_t m = C.shape[offset]; const size_t n = C.shape[offset + 1]; const size_t k = trA ? A.shape[offset] : A.shape[offset + 1]; const size_t b = nd_volume(offset, C.shape); for ( size_t i = 0; i < b; ++i, A.data += dA, B.data += dB, C.data += dC ) { if ( trA && trB ) { _matmul(m, n, k, A.data, B.data, C.data); } else if ( trA ) { _matmul(m, n, k, A.data, B.data, C.data); } else if ( trB ) { _matmul(m, n, k, A.data, B.data, C.data); } else { _matmul(m, n, k, A.data, B.data, C.data); } } } template void linear( tensor_view filter, tensor_view bias, tensor_view input, tensor_view output ) { const size_t m = output.shape[0]; const size_t n = output.shape[1]; const size_t k = input.shape[1]; if ( bias.volume == 1 ) { std::fill_n(output.data, m * n, *bias.data); } else { T* data = output.data; for ( size_t i = 0; i < m; ++i, data += n ) { std::copy_n(bias.data, n, data); } } _matmul(m, n, k, input.data, filter.data, output.data); } template void _linear_upsample2x_symmetric( tensor_view input, tensor_view output ) { const T zero = 0; const T weights[] = { 0.25, 0.75, 0.75, 0.25 }; const int shape[] = { 1, 1, 4 }; tensor_view filter = { 3, 4, shape, weights }; tensor_view bias = { 0, 1, nullptr, &zero }; const int padding[] = { 1 }; const int stride[] = { 2 }; const int dilation[] = { 1 }; return _depthwise_conv(filter, bias, output, input, padding, stride, dilation); } template void _linear_upsample2x_asymmetric( tensor_view input, tensor_view output ) { const T zero = 0; const T weights[] = { 0.5, 1.0, 0.5, }; const int shape[] = { 1, 1, 3 }; tensor_view filter = { 3, 3, shape, weights }; tensor_view bias = { 0, 1, nullptr, &zero }; const int padding[] = { 1 }; const int stride[] = { 2 }; const int dilation[] = { 1 }; return _depthwise_conv(filter, bias, output, input, padding, stride, dilation); } template void _bilinear_upsample2x_symmetric( tensor_view input, tensor_view output ) { const T zero = 0; const T weights[] = { 0.0625, 0.1875, 0.1875, 0.0625, 0.1875, 0.5625, 0.5625, 0.1875, 0.1875, 0.5625, 0.5625, 0.1875, 0.0625, 0.1875, 0.1875, 0.0625, }; const int shape[] = { 1, 1, 4, 4 }; tensor_view filter = { 4, 16, shape, weights }; tensor_view bias = { 0, 1, nullptr, &zero }; const int padding[] = { 1, 1 }; const int stride[] = { 2, 2 }; const int dilation[] = { 1, 1 }; return _depthwise_conv(filter, bias, output, input, padding, stride, dilation); } template void _bilinear_upsample2x_asymmetric( tensor_view input, tensor_view output ) { const T zero = 0; const T weights[] = { 0.25, 0.5, 0.25, 0.50, 1.0, 0.50, 0.25, 0.5, 0.25, }; const int shape[] = { 1, 1, 3, 3 }; tensor_view filter = { 4, 9, shape, weights }; tensor_view bias = { 0, 1, nullptr, &zero }; const int padding[] = { 1, 1 }; const int stride[] = { 2, 2 }; const int dilation[] = { 1, 1 }; return _depthwise_conv(filter, bias, output, input, padding, stride, dilation); } template void multilinear_upsample2x_symmetric( tensor_view input, tensor_view output ) { static decltype(&_linear_upsample2x_symmetric) funcs[] = { _linear_upsample2x_symmetric, _bilinear_upsample2x_symmetric, }; return funcs[input.rank - 3](input, output); } template void multilinear_upsample2x_asymmetric( tensor_view input, tensor_view output ) { static decltype(&_linear_upsample2x_asymmetric) funcs[] = { _linear_upsample2x_asymmetric, _bilinear_upsample2x_asymmetric, }; return funcs[input.rank - 3](input, output); } template T _reduce( const size_t n, const T* x, const size_t dx, const Op& op ) { T r = *x; x += dx; for ( size_t i = 1; i < n; ++i, x += dx ) { r = op(r, *x); } return r; } template void _softmax( const size_t n, const size_t m, const T* x, T* y ) { const T max = _reduce(n, x, m, []( const T x, const T y ){ return std::max(x,y); }); _unary(n, x, m, y, m, [&]( const T x ){ return std::exp(x - max); }); const T sum = _reduce(n, y, m, std::plus()); _binary(n, y, m, &sum, 0, y, m, std::divides()); } template void softmax( tensor_view input, tensor_view output, const size_t axis ) { const size_t batch = nd_volume(axis, input.shape); const size_t channels = input.shape[axis]; const size_t size = nd_volume(input.rank - axis - 1, input.shape + axis + 1); const size_t volume = channels * size; for ( size_t i = 0; i < batch; ++i, input.data += volume, output.data += volume ) { for ( size_t j = 0; j < size; ++j ) { _softmax(channels, size, input.data + j, output.data + j); } } } template I _arg_reduce( const size_t n, const T* x, const size_t dx, const Op& op ) { I idx = 0; T val = *x; x += dx; for ( size_t i = 1; i < n; ++i, x += dx ) { if ( op(*x, val) ) { val = *x; idx = (I)i; } } return idx; } template void arg_reduce( tensor_view input, tensor_view output, const size_t axis, const Op& op ) { const size_t batch = nd_volume(axis, input.shape); const size_t channels = input.shape[axis]; const size_t size = nd_volume(input.rank - axis - 1, input.shape + axis + 1); const size_t volume = channels * size; for ( size_t i = 0; i < batch; ++i, input.data += volume, output.data += size ) { for ( size_t j = 0; j < size; ++j ) { output.data[j] = _arg_reduce(channels, input.data + j, size, op); } } } template void _transpose( tensor_view x, tensor_view y, const size_t perm[] ) { int yi[D]; nd_loop(x.shape, [&]( const int xi[] ) { for_n([&]( const size_t k ) { yi[k] = xi[perm[k]]; }); at(y,yi) = at(x,xi); }); } template void transpose( tensor_view x, tensor_view y, const size_t perm[] ) { static decltype(&_transpose<1,T>) funcs[] = { _transpose<1,T>, _transpose<2,T>, _transpose<3,T>, _transpose<4,T>, _transpose<5,T>, }; funcs[x.rank - 1](x, y, perm); } template void concat( const size_t n, tensor_view x[], tensor_view y, const size_t axis ) { const size_t b = nd_volume(axis, y.shape); const size_t m = nd_volume(y.rank - axis - 1, y.shape + axis + 1); for ( size_t i = 0; i < b; ++i ) { for ( size_t j = 0; j < n; ++j ) { const size_t size = Singular ? m : x[j].shape[axis] * m; std::copy_n(x[j].data, size, y.data); x[j].data += size; y.data += size; } } } template void split( const size_t n, tensor_view x, tensor_view y[], const size_t axis ) { const size_t b = nd_volume(axis, x.shape); const size_t m = nd_volume(x.rank - axis - 1, x.shape + axis + 1); for ( size_t i = 0; i < b; ++i ) { for ( size_t j = 0; j < n; ++j ) { const size_t size = Singular ? m : y[j].shape[axis] * m; std::copy_n(x.data, size, y[j].data); x.data += size; y[j].data += size; } } } template void _tile( tensor_view input, tensor_view output ) { int input_index[D]; nd_loop(output.shape, [&]( const int output_index[] ) { for_n([&]( const size_t k ) { input_index[k] = output_index[k] % input.shape[k]; }); at(output, output_index) = at(input, input_index); }); } template void tile( tensor_view input, tensor_view output ) { static decltype(&_tile<1,T>) funcs[] = { _tile<1,T>, _tile<2,T>, _tile<3,T>, _tile<4,T>, _tile<5,T>, }; return funcs[input.rank - 1](input, output); } template void _pad_constant( tensor_view input, tensor_view output, const int padding[], const T value ) { int input_index[D]; nd_loop(output.shape, [&]( const int output_index[] ) { for_n([&]( const size_t k ) { input_index[k] = output_index[k] - padding[k]; }); const bool valid = all_n([&]( const size_t k ){ return input_index[k] >= 0 && input_index[k] < input.shape[k]; }); at(output, output_index) = valid ? at(input, input_index) : (T)value; }); } template void _pad_replicate( tensor_view input, tensor_view output, const int padding[] ) { int input_index[D]; nd_loop(output.shape, [&]( const int output_index[] ) { for_n([&]( const size_t k ) { input_index[k] = std::min(std::max(output_index[k] - padding[k], 0), input.shape[k] - 1); }); at(output, output_index) = at(input, input_index); }); } template void _pad_reflect( tensor_view input, tensor_view output, const int padding[] ) { int input_index[D]; nd_loop(output.shape, [&]( const int output_index[] ) { for_n([&]( const size_t k ) { auto index = output_index[k] - padding[k]; if ( index < 0 ) { input_index[k] = -index; } else if ( index >= input.shape[k] ) { input_index[k] = 2 * (input.shape[k] - 1) - index; } else { input_index[k] = index; } }); at(output, output_index) = at(input, input_index); }); } template void _pad_reflect_even( tensor_view input, tensor_view output, const int padding[] ) { int input_index[D]; nd_loop(output.shape, [&]( const int output_index[] ) { for_n([&]( const size_t k ) { auto index = output_index[k] - padding[k]; if ( index < 0 ) { input_index[k] = -index - 1; } else if ( index >= input.shape[k] ) { input_index[k] = 2 * (input.shape[k] - 1) - index + 1; } else { input_index[k] = index; } }); at(output, output_index) = at(input, input_index); }); } template void pad_constant( tensor_view input, tensor_view output, const int padding[], const T value ) { static decltype(&_pad_constant<1,T>) funcs[] = { _pad_constant<1,T>, _pad_constant<2,T>, _pad_constant<3,T>, _pad_constant<4,T>, _pad_constant<5,T>, }; return funcs[input.rank - 1](input, output, padding, value); } template void pad_replicate( tensor_view input, tensor_view output, const int padding[] ) { static decltype(&_pad_replicate<1,T>) funcs[] = { _pad_replicate<1,T>, _pad_replicate<2,T>, _pad_replicate<3,T>, _pad_replicate<4,T>, _pad_replicate<5,T>, }; return funcs[input.rank - 1](input, output, padding); } template void pad_reflect( tensor_view input, tensor_view output, const int padding[] ) { static decltype(&_pad_reflect<1,T>) funcs[] = { _pad_reflect<1,T>, _pad_reflect<2,T>, _pad_reflect<3,T>, _pad_reflect<4,T>, _pad_reflect<5,T>, }; return funcs[input.rank - 1](input, output, padding); } template void pad_reflect_even( tensor_view input, tensor_view output, const int padding[] ) { static decltype(&_pad_reflect_even<1,T>) funcs[] = { _pad_reflect_even<1,T>, _pad_reflect_even<2,T>, _pad_reflect_even<3,T>, _pad_reflect_even<4,T>, _pad_reflect_even<5,T>, }; return funcs[input.rank - 1](input, output, padding); } template void _slice( tensor_view input, tensor_view output, const int offset[], const int stride[] ) { int input_index[D]; nd_loop(output.shape, [&]( const int output_index[] ) { for_n([&]( const size_t k ) { input_index[k] = offset[k] + stride[k] * output_index[k]; }); at(output, output_index) = at(input, input_index); }); } template void slice( tensor_view input, tensor_view output, const int offset[], const int stride[] ) { static decltype(&_slice<1,T>) funcs[] = { _slice<1,T>, _slice<2,T>, _slice<3,T>, _slice<4,T>, _slice<5,T>, }; return funcs[input.rank - 1](input, output, offset, stride); } template void _gather( const T* input, const I* indices, T* output, const size_t b, const size_t d, const size_t n, const size_t m ) { for ( size_t k = 0; k < b; ++k, input += d * m ) { for ( size_t i = 0; i < n; ++i, output += m ) { std::copy_n(input + indices[i] * m, m, output); } } } template void gather( tensor_view input, tensor_view indices, tensor_view output, const size_t axis ) { const size_t b = nd_volume(axis, input.shape); const size_t d = input.shape[axis]; const size_t n = nd_volume(indices.rank, indices.shape); const size_t m = nd_volume(input.rank - axis - 1, input.shape + axis + 1); _gather(input.data, indices.data, output.data, b, d, n, m); } }} // namespace nnef::rt #endif ================================================ FILE: nnef-pyproject/nnef/cpp/include/nnef.h ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NNEF_H_ #define _NNEF_H_ #include #include #include #include #include #include #include "nnef/common/value.h" namespace nnef { /* * Ordered key-value pairs of arbitrary typed parameter values used for operation attributes */ struct ValueDict : public std::vector> { typedef std::pair item_type; bool contains( const std::string& key ) const { return std::find_if(this->begin(), this->end(), [&]( const item_type& item ){ return item.first == key; }) != this->end(); } const Value& get( const std::string& key, const Value& defult = Value::none() ) const { auto it = std::find_if(this->begin(), this->end(), [&]( const item_type& item ){ return item.first == key; }); return it != this->end() ? it->second : defult; } const Value& get( const size_t idx ) const { return std::vector::at(idx).second; } }; /* * Tensor data-structure used both for activation and variable tensors */ struct Tensor { std::string name; // name of the tensor in the graph std::string dtype; // data-type of the tensor (such as "scalar", "integer", "logical") std::vector shape; // shape of the tensor, filled if shape propagation is in effect std::vector data; // byte array of the data of the tensor, filled in if tensor is a variable ValueDict quantization; // quantization algorithm info for both activation and variable tensors // used keys: "op-name" (string), attribute names depending on op-name }; /* * Operation data-structure to represent a single operation in the graph */ struct Operation { std::string name; // name (kind) of the operation std::string dtype; // data-type in case the operation is generic (such as "scalar", "integer", "logical") ValueDict attribs; // ordered dictionary of non-tensor attributes of the operation (declaration order) ValueDict inputs; // ordered dictionary of tensor inputs of the operation (may also contain constants) ValueDict outputs; // ordered dictionary tensor outputs of the operation }; /* * Graph data-structure, list of tensors and operations */ struct Graph { std::string name; // name of the graph std::map tensors; // list of tensors in the graph std::vector operations; // list of operations in the graph, in topograpic order std::vector inputs; // list of input tensor ids std::vector outputs; // list of output tensor ids }; /* * Parse the NNEF graph from file * * @param graph_fn: name of the graph file * @param quant_fn: name of the quantization file * @param graph: the graph data structure to fill in * @param error: the string to store the error message if any * @param stdlib: the implementation of standard operations to use * @param lowered: a list of operations to be lowered * * @return true if there were no parsing errors, false otherwise */ bool parse_file( const std::string& graph_fn, const std::string& quant_fn, Graph& graph, std::string& error, const std::string& stdlib = "", const std::set& lowered = {} ) noexcept; /* * Parse the NNEF graph from string * * @param graph_str: the graph string * @param quant_str: the quantization string * @param graph: the graph data structure to fill in * @param error: the string to store the error message if any * @param stdlib: the implementation of standard operations to use * @param lowered: a list of operations to be lowered * * @return true if there were no parsing errors, false otherwise */ bool parse_string( const std::string& graph_str, const std::string& quant_str, Graph& graph, std::string& error, const std::string& stdlib = "", const std::set& lowered = {} ) noexcept; /* * Read/write a single tensor from/to binary stream * * @param is/os: the stream to read from/write to * @param tensor: the tensor object to fill into/from * @param error: the string to store the error message if any * * @return true if there were no errors, false otherwise */ bool read_tensor( std::istream& is, Tensor& tensor, std::string& error ) noexcept; bool write_tensor( std::ostream& os, const Tensor& tensor, std::string& error ) noexcept; /* * Read/write a single tensor from/to a binary file * * @param filename: the name of the file to read from/write to * @param tensor: the tensor object to fill into/from * @param error: the string to store the error message if any * * @return true if there were no errors, false otherwise */ bool read_tensor( const std::string& filename, Tensor& tensor, std::string& error ) noexcept; bool write_tensor( const std::string& filename, const Tensor& tensor, std::string& error ) noexcept; /* * Load variables/whole model from set of files in a folder * * @param path: the path to the top level NNEF model folder * @param graph: the graph object to load tensors into * @param error: the string to store the error message if any * @param stdlib: the implementation of standard operations to use * @param lowered: a list of operations to be lowered * * @return true if there were no errors, false otherwise */ bool load_variables( const std::string& path, Graph& graph, std::string& error ) noexcept; bool load_graph( const std::string& path, Graph& graph, std::string& error, const std::string& stdlib = "", const std::set& lowered = {} ) noexcept; /* * Shape propagation function type */ typedef std::function ShapeFunc; /* * Perform shape inference on the graph * * @param graph: the graph object * @param error: the string to store the error message if any * @param custom_shapes: shape inference functions for custom operations * * @return true if there were no errors, false otherwise */ bool infer_shapes( Graph& graph, std::string& error, const std::map>& input_shapes = {}, const std::map& custom_shapes = {} ) noexcept; /* * Allocate tensor buffers in the graph * * @param graph: the graph object * @param error: the string to store the error message if any * * @return true if there were no errors, false otherwise */ bool allocate_buffers( Graph& graph, std::string& error ) noexcept; /* * Execute a graph * * @param graph: the graph object * @param error: the string to store the error message if any * * @return true if there were no errors, false otherwise */ bool execute( Graph& graph, std::string& error ) noexcept; } // namespace nnef #endif ================================================ FILE: nnef-pyproject/nnef/cpp/infer.cpp ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "nnef.h" #include #include #include #include #include #include #ifdef _WIN32 #include #else #include #endif const std::set lowered = { "separable_conv", "separable_deconv", "rms_pool", "local_response_normalization", "local_mean_normalization", "local_variance_normalization", "local_contrast_normalization", "l1_normalization", "l2_normalization", "batch_normalization", "area_downsample", "nearest_downsample", "nearest_upsample", "linear_quantize", "logarithmic_quantize", "leaky_relu", "prelu", "clamp", }; std::string read_file( const char* fn ) { std::ifstream is(fn); if ( !is ) { throw std::runtime_error("file not found: " + std::string(fn)); } return std::string((std::istreambuf_iterator(is)), std::istreambuf_iterator()); } bool read_inputs_from_cin( nnef::Graph& graph, std::string& error ) { for ( auto& input : graph.inputs ) { auto& tensor = graph.tensors.at(input); if ( !nnef::read_tensor(std::cin, tensor, error) ) { return false; } } return true; } bool read_inputs_from_file( nnef::Graph& graph, const std::vector& inputs, std::string& error ) { size_t idx = 0; for ( auto& input : graph.inputs ) { auto& tensor = graph.tensors.at(input); if ( !nnef::read_tensor(inputs[idx++], tensor, error) ) { return false; } } return true; } bool write_output_to_cout( const nnef::Graph& graph, std::string& error ) { for ( auto& output : graph.outputs ) { auto& tensor = graph.tensors.at(output); if ( !nnef::write_tensor(std::cout, tensor, error) ) { return false; } } return true; } bool write_output_to_file( const nnef::Graph& graph, const std::vector& outputs, std::string& error ) { size_t idx = 0; for ( auto& output : graph.outputs ) { auto& tensor = graph.tensors.at(output); if ( !nnef::write_tensor(outputs[idx++], tensor, error) ) { return false; } } return true; } template T sqr( const T x ) { return x * x; } template T relative_difference( const size_t n, const T* ref, const T* dat ) { T diff = 0; T range = 0; for ( size_t i = 0; i < n; ++i ) { diff += sqr(ref[i] - dat[i]); range += sqr(ref[i]); } return std::sqrt(diff / range); } std::ostream& operator<<( std::ostream& os, const std::vector& v ) { os << '['; for ( size_t i = 0; i < v.size(); ++i ) { if ( i ) { os << ','; } os << v[i]; } os << ']'; return os; } int volume( const std::vector& v ) { return std::accumulate(v.begin(), v.end(), 1, std::multiplies()); } int main( int argc, const char * argv[] ) { if ( argc < 2 ) { std::cerr << "Input file name must be provided" << std::endl; return -1; } const std::string path = argv[1]; std::string stdlib; std::vector inputs; std::vector outputs; bool compare = false; for ( size_t i = 2; i < argc; ++i ) { const std::string arg = argv[i]; if ( arg == "--stdlib" ) { if ( ++i == argc ) { std::cerr << "Stdlib file name must be provided after --stdlib; ignoring option" << std::endl; } try { stdlib = read_file(argv[i]); } catch ( std::runtime_error e ) { std::cerr << e.what() << std::endl; } } else if ( arg == "--input" ) { if ( i + 1 == argc ) { std::cerr << "Input file name(s) must be provided after --input; ignoring option" << std::endl; } while ( i + 1 < argc && *argv[i+1] != '-' ) { inputs.push_back(argv[++i]); } } else if ( arg == "--output" ) { if ( i + 1 == argc ) { std::cerr << "Output file name(s) must be provided after --output; ignoring option" << std::endl; } while ( i + 1 < argc && *argv[i+1] != '-' ) { outputs.push_back(argv[++i]); } } else if ( arg == "--compare" ) { compare = true; } else { std::cerr << "Unrecognized option: " << argv[i] << "; ignoring" << std::endl; } } nnef::Graph graph; std::string error; if ( !nnef::load_graph(path, graph, error, stdlib, lowered) ) { std::cerr << error << std::endl; return -1; } std::map> input_shapes; if ( !inputs.empty() || !isatty(STDIN_FILENO) ) { bool read = !inputs.empty() ? read_inputs_from_file(graph, inputs, error) : read_inputs_from_cin(graph, error); if ( !read ) { std::cerr << error << std::endl; return -1; } for ( auto& input : graph.inputs ) { input_shapes.emplace(input, graph.tensors.at(input).shape); } } if ( !nnef::infer_shapes(graph, error, input_shapes) ) { std::cerr << error << std::endl; return -1; } if ( !nnef::allocate_buffers(graph, error) ) { std::cerr << error << std::endl; return -1; } std::cerr << "Executing model: " << path << std::endl; if ( !nnef::execute(graph, error) ) { std::cerr << error << std::endl; return -1; } if ( compare && !outputs.empty() ) { for ( size_t i = 0; i < graph.outputs.size(); ++i ) { const nnef::Tensor& output = graph.tensors.at(graph.outputs[i]); nnef::Tensor tensor; if ( !nnef::read_tensor(outputs[i], tensor, error) ) { std::cerr << error << std::endl; return -1; } if ( output.dtype != tensor.dtype ) { std::cout << "data-type " << output.dtype << " of '" << graph.outputs[i] << "' does not match reference data-type " << tensor.dtype << std::endl; } else if ( output.shape != tensor.shape ) { std::cout << "shape " << output.shape << " of '" << graph.outputs[i] << "' does not match reference shape " << tensor.shape << std::endl; } else { if ( tensor.dtype == "scalar" ) { auto diff = relative_difference(volume(tensor.shape), (const float*)tensor.data.data(), (const float*)output.data.data()); std::cout << "'" << graph.outputs[i] << "' diff = " << diff << std::endl; } else { auto matches = output.data == tensor.data; std::cout << "'" << graph.outputs[i] << "' " << (matches ? "matches" : "does not match") << std::endl; } } } } else if ( !outputs.empty() || !isatty(STDOUT_FILENO) ) { bool write = !outputs.empty() ? write_output_to_file(graph, outputs, error) : write_output_to_cout(graph, error); if ( !write ) { std::cerr << error << std::endl; return -1; } } return 0; } ================================================ FILE: nnef-pyproject/nnef/cpp/sample.cpp ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "nnef.h" #include #include int main( int argc, const char * argv[] ) { if ( argc < 2 ) { std::cerr << "Input file name must be provided" << std::endl; return -1; } const std::string path = argv[1]; nnef::Graph graph; std::string error; if ( !nnef::load_graph(path, graph, error, "") ) { std::cerr << error << std::endl; return -1; } if ( !nnef::infer_shapes(graph, error) ) { std::cerr << error << std::endl; return -1; } std::cerr << "Successfully parsed file: " << path << std::endl; return 0; } ================================================ FILE: nnef-pyproject/nnef/cpp/src/cnnef.cpp ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cnnef.h" #include "nnef.h" #include using namespace nnef; nnef_graph_t nnef_graph_load( const char* path, char *perror ) { std::string error; Graph *nnef_graph = new Graph(); bool success = load_graph(path, *nnef_graph, error); if ( !success ) { if ( perror != NULL ) { strncpy(perror, error.c_str(), error.length() + 1); } return NULL; } return nnef_graph; } nnef_graph_t nnef_graph_copy( nnef_graph_t graph ) { const Graph *nnef_graph = (const Graph*)graph; return new Graph(*nnef_graph); } void nnef_graph_release( nnef_graph_t graph ) { Graph *nnef_graph = (Graph *)graph; if ( nnef_graph ) { delete nnef_graph; } } int nnef_graph_infer_shapes( nnef_graph_t graph, char *perror ) { std::string error; Graph* nnef_graph = (Graph*)graph; if ( !infer_shapes(*nnef_graph, error) ) { if ( perror != NULL ) { strncpy(perror, error.c_str(), error.length() + 1); } return 0; } return 1; } int nnef_graph_allocate_buffers( nnef_graph_t graph, char *perror ) { std::string error; Graph *nnef_graph = (Graph *)graph; if ( nnef_graph == NULL ) { return 0; } if ( !nnef::allocate_buffers(*nnef_graph, error) ) { if ( perror != NULL ) { strncpy(perror, error.c_str(), error.length() + 1); } return 0; } return 1; } int nnef_graph_execute( nnef_graph_t graph, char *perror ) { Graph *nnef_graph = (Graph *)graph; if ( nnef_graph == NULL ) { return 0; } std::string error; if ( !nnef::execute(*nnef_graph, error) ) { if ( perror != NULL ) { strncpy(perror, error.c_str(), error.length() + 1); } return 0; } return 1; } size_t nnef_graph_input_names( nnef_graph_t graph, const char** inputs ) { const Graph* nnef_graph = (const Graph*)graph; if ( inputs != NULL ) { for ( size_t i = 0; i < nnef_graph->inputs.size(); ++i ) { inputs[i] = nnef_graph->inputs[i].c_str(); } } return nnef_graph->inputs.size(); } size_t nnef_graph_output_names( nnef_graph_t graph, const char** outputs ) { const Graph* nnef_graph = (const Graph*)graph; if ( outputs != NULL ) { for ( size_t i = 0; i < nnef_graph->outputs.size(); ++i ) { outputs[i] = nnef_graph->outputs[i].c_str(); } } return nnef_graph->outputs.size(); } nnef_tensor_t nnef_graph_find_tensor( nnef_graph_t graph, const char* tensor_name ) { const Graph *nnef_graph = (const Graph*)graph; if ( nnef_graph == NULL ) { return NULL; } std::map::const_iterator it = nnef_graph->tensors.find(tensor_name); return it != nnef_graph->tensors.end() ? (nnef_tensor_t)&it->second : NULL; } const char* nnef_graph_name( nnef_graph_t graph ) { const Graph *nnef_graph = (const Graph*)graph; return nnef_graph->name.c_str(); } nnef_tensor_t nnef_tensor_create(void) { return new nnef::Tensor(); } void nnef_tensor_release( nnef_tensor_t tensor ) { Tensor* nnef_tensor = (Tensor*)tensor; if ( nnef_tensor != NULL ) { delete nnef_tensor; } } const char* nnef_tensor_name( nnef_tensor_t tensor ) { const Tensor *nnef_tensor = (const Tensor*)tensor; return nnef_tensor->name.c_str(); } const char* nnef_tensor_dtype( nnef_tensor_t tensor ) { const Tensor *nnef_tensor = (const Tensor*)tensor; return nnef_tensor->dtype.c_str(); } size_t nnef_tensor_rank( nnef_tensor_t tensor ) { const Tensor *nnef_tensor = (const Tensor*)tensor; return nnef_tensor->shape.size(); } const int* nnef_tensor_dims( nnef_tensor_t tensor ) { const Tensor *nnef_tensor = (const Tensor*)tensor; return nnef_tensor->shape.data(); } void* nnef_tensor_data( nnef_tensor_t tensor ) { const Tensor* nnef_tensor = (const Tensor*)tensor; return (void*)nnef_tensor->data.data(); } int nnef_tensor_read( const char* path, nnef_tensor_t tensor, char *perror ) { Tensor *nnef_tensor = (Tensor *)tensor; if ( nnef_tensor == NULL ) { return 0; } std::string error; if ( !read_tensor(path, *nnef_tensor, error) ) { if ( perror != NULL ) { strncpy(perror, error.c_str(), error.length() + 1); } return 0; } return 1; } int nnef_tensor_write( const char* path, nnef_tensor_t tensor, char *perror ) { const Tensor *nnef_tensor = (const Tensor *)tensor; if ( nnef_tensor == NULL ) { return 0; } std::string error; if ( !write_tensor(path, *nnef_tensor, error) ) { if ( perror != NULL ) { strncpy(perror, error.c_str(), error.length() + 1); } return 0; } return 1; } ================================================ FILE: nnef-pyproject/nnef/cpp/src/nnef.cpp ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include "nnef.h" #include "nnef/comp/comp_parser.h" #include "nnef/flat/quant_parser.h" #include "nnef/common/binary.h" #include "nnef/common/shapes.h" #include "nnef/runtime/execution.h" namespace nnef { struct ParseCallback : public Parser::Callback { Graph& graph; std::istream& qis; const std::string& qfn; Dictionary> quantizations; ParseCallback( Graph& graph, std::istream& qis, const std::string& qfn ) : graph(graph), qis(qis), qfn(qfn) { } virtual void beginGraph( const Prototype& proto, const Dictionary& fragments ) { graph.name = proto.name(); graph.operations.clear(); graph.tensors.clear(); graph.inputs.resize(proto.paramCount()); for ( size_t i = 0; i < proto.paramCount(); ++i ) { graph.inputs[i] = proto.param(i).name(); } graph.outputs.resize(proto.resultCount()); for ( size_t i = 0; i < proto.resultCount(); ++i ) { graph.outputs[i] = proto.result(i).name(); } if ( qis ) { quantizations = nnef::QuantParser::parse(qis, qfn.c_str(), fragments); } } virtual void endGraph( const Prototype& proto, const Dictionary& dtypes ) { for ( auto& it : dtypes ) { Tensor tensor; tensor.name = it.first; tensor.dtype = toString(it.second); if ( quantizations.count(it.first) ) { for ( auto& item : quantizations.at(it.first) ) { tensor.quantization.push_back(item); } } graph.tensors.emplace(it.first, std::move(tensor)); } } virtual void operation( const Prototype& proto, const Dictionary& args, const Dictionary& dtypes ) { Operation operation; operation.name = proto.name(); operation.dtype = args.count("?") ? args.at("?").string() : std::string(); for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); auto& value = args.at(param.name()); if ( param.type()->isAttribute() ) { operation.attribs.emplace_back(param.name(), value); } else { operation.inputs.emplace_back(param.name(), value); } } for ( size_t i = 0; i < proto.resultCount(); ++i ) { auto& result = proto.result(i); auto& value = args.at(result.name()); operation.outputs.emplace_back(result.name(), value); } graph.operations.push_back(std::move(operation)); } }; std::string format_error_position( const Error::Position& pos ) { return "'" + std::string(pos.filename) + "' [" + std::to_string(pos.line) + ":" + std::to_string(pos.column) + "]"; } bool parse( std::istream& graph_is, const std::string& graph_fn, std::istream& quant_is, const std::string& quant_fn, Graph& graph, std::string& error, const std::string& stdlib, const std::set& lowered ) noexcept { ParseCallback callback(graph, quant_is, quant_fn); CompParser parser(stdlib, lowered); try { parser.parse(graph_is, graph_fn.c_str(), callback); return true; } catch ( const nnef::Error& e ) { error = "Parse error in file " + format_error_position(e.position()) + " " + e.what(); auto origin = e.position().origin; while ( origin ) { error += "\n... evaluated from file " + format_error_position(e.position()); origin = origin->origin; } return false; } } bool parse_file( const std::string& graph_fn, const std::string& quant_fn, Graph& graph, std::string& error, const std::string& stdlib, const std::set& lowered ) noexcept { std::ifstream graph_is(graph_fn); if ( !graph_is ) { error = "Could not open graph file: " + std::string(graph_fn); return false; } std::ifstream quant_is; if ( !quant_fn.empty() ) { quant_is.open(quant_fn); if ( !quant_is ) { error = "Could not open quantization file: " + std::string(quant_fn); return false; } } return parse(graph_is, graph_fn, quant_is, quant_fn, graph, error, stdlib, lowered); } bool parse_string( const std::string& graph_str, const std::string& quant_str, Graph& graph, std::string& error, const std::string& stdlib, const std::set& lowered ) noexcept { std::stringstream graph_is(graph_str); std::stringstream quant_is; if ( !quant_str.empty() ) { quant_is.str(quant_str); } return parse(graph_is, "input", quant_is, "quantization", graph, error, stdlib, lowered); } size_t item_bytes( const std::string& dtype ) { return dtype == "scalar" ? sizeof(float) : dtype == "integer" ? sizeof(int) : dtype == "logical" ? sizeof(bool) : 0; } size_t item_bits( const std::string& dtype ) { return dtype == "scalar" ? 32 : dtype == "integer" ? sizeof(int) * 8 : dtype == "logical" ? 1 : 0; } bool read_tensor( std::istream& is, Tensor& tensor, std::string& error ) noexcept { TensorHeader header; is.read((char*)&header, sizeof(header)); if ( header.item_type == TensorHeader::Uint && header.reserved[0] != 0 ) { header.item_type = TensorHeader::Int; } try { validate_tensor_header(header); tensor.shape.assign(header.extents, header.extents + header.rank); } catch ( const nnef::Error& e ) { error = "Invalid tensor header: " + std::string(e.what()); return false; } std::vector bytes(header.data_length); is.read(bytes.data(), bytes.size()); if ( !is ) { error = "Failed to read tensor data"; return false; } const size_t count = volume_of(tensor.shape); if ( header.item_type == TensorHeader::Float ) { tensor.dtype = "scalar"; tensor.data.resize(count * sizeof(float)); from_bytes(bytes.data(), count, header.bits_per_item, (float*)tensor.data.data()); } else if ( header.item_type == TensorHeader::Bool ) { tensor.dtype = "logical"; tensor.data.resize(count * sizeof(bool)); from_bytes(bytes.data(), count, header.bits_per_item, (bool*)tensor.data.data()); } else if ( header.item_type == TensorHeader::Int || header.item_type == TensorHeader::Uint ) { tensor.dtype = "integer"; tensor.data.resize(count * sizeof(int)); from_bytes(bytes.data(), count, header.bits_per_item, (int*)tensor.data.data(), header.item_type == TensorHeader::Int); } else if ( header.item_type == TensorHeader::Qint || header.item_type == TensorHeader::Quint ) { tensor.dtype = "scalar"; tensor.data.resize(header.data_length); tensor.data = bytes; tensor.quantization.emplace_back("signed", Value::logical(header.item_type == TensorHeader::Qint)); } else { error = "Unsupported tensor item-type '" + std::to_string(header.item_type) + "' and bits per item '" + std::to_string(header.bits_per_item) + "'"; return false; } return (bool)is; } bool write_tensor( std::ostream& os, const Tensor& tensor, std::string& error ) noexcept { if ( tensor.shape.size() > TensorHeader::MaxRank ) { error = "Tensor rank " + std::to_string(tensor.shape.size()) + " exceeds maximum allowed rank (" + std::to_string(TensorHeader::MaxRank) + ")"; return false; } const bool quantized = !tensor.quantization.empty(); const bool is_signed = tensor.quantization.get("signed", Value::logical(true)).logical(); const TensorHeader::ItemType item_type = quantized ? (is_signed ? TensorHeader::Qint : TensorHeader::Quint) : tensor.dtype == "scalar" ? TensorHeader::Float : tensor.dtype == "integer" ? TensorHeader::Int : TensorHeader::Bool; TensorHeader header; const size_t version[] = { 1, 0 }; const size_t count = volume_of(tensor.shape); const size_t bits_per_item = quantized ? tensor.data.size() * 8 / count : item_bits(tensor.dtype); fill_tensor_header(header, version, tensor.shape.size(), tensor.shape.data(), bits_per_item, item_type); std::vector bytes(header.data_length); if ( tensor.dtype == "scalar" ) { if ( quantized ) { bytes = tensor.data; } else { to_bytes((const float*)tensor.data.data(), count, bytes.data()); } } else if ( tensor.dtype == "integer" ) { to_bytes((const int*)tensor.data.data(), count, bytes.data(), true); } else if ( tensor.dtype == "logical" ) { to_bytes((const bool*)tensor.data.data(), count, bytes.data()); } else { error = "Invalid tensor data-type: '" + tensor.dtype + "'"; return false; } os.write((char*)&header, sizeof(header)); os.write(bytes.data(), bytes.size()); if ( !os ) { error = "Failed to write tensor data"; return false; } return true; } bool read_tensor( const std::string& filename, Tensor& tensor, std::string& error ) noexcept { std::ifstream is(filename, std::ios::binary); if ( !is ) { error = "Could not open tensor file: " + filename; return false; } return read_tensor(is, tensor, error); } bool write_tensor( const std::string& filename, const Tensor& tensor, std::string& error ) noexcept { std::ofstream os(filename, std::ios::binary); if ( !os ) { error = "Could not open tensor file: " + filename; return false; } return write_tensor(os, tensor, error); } bool load_variables( const std::string& path, Graph& graph, std::string& error ) noexcept { const std::string sep = path.back() == '/' || path.back() == '\\' ? "" : "/"; for ( auto& op : graph.operations ) { if ( op.name == "variable" ) { auto& label = op.attribs.get("label").string(); auto& shape = op.attribs.get("shape"); auto& id = op.outputs.begin()->second.identifier(); auto& tensor = graph.tensors.at(id); const std::string filename = path + sep + label + ".dat"; if ( !read_tensor(filename, tensor, error) ) { return false; } if ( tensor.dtype != op.dtype ) { error = "item-type '" + tensor.dtype + "' in variable file '" + filename + "' does not match data-type '" + op.dtype + "' defined in network structure"; return false; } Value::items_t items(tensor.shape.size()); for ( size_t i = 0; i < items.size(); ++i ) { items[i] = Value::integer(tensor.shape[i]); } Value tensorShape = Value::array(items); if ( tensorShape != shape ) { error = "shape " + tensorShape.toString() + " in variable file '" + filename + "' does not match shape " + shape.toString() + " defined in network structure"; return false; } } } return true; } bool file_exists( const std::string& path ) { std::ifstream is(path); return is.is_open(); } bool load_graph( const std::string& path, Graph& graph, std::string& error, const std::string& stdlib, const std::set& lowered ) noexcept { const std::string sep = path.back() == '/' || path.back() == '\\' ? "" : "/"; const std::string graph_fn = path + sep + "graph.nnef"; const std::string quant_fn = path + sep + "graph.quant"; if ( !file_exists(graph_fn) ) { return parse_file(path, "", graph, error, stdlib, lowered); } if ( !parse_file(graph_fn, file_exists(quant_fn) ? quant_fn : "", graph, error, stdlib, lowered) ) { return false; } if ( !load_variables(path, graph, error) ) { return false; } return true; } namespace impl { template struct index_sequence {}; template struct index_sequence_maker : public index_sequence_maker {}; template struct index_sequence_maker<0U, Next ... > { using type = index_sequence; }; template using make_index_sequence = typename index_sequence_maker::type; template struct front_count_of { enum { value = 0 }; }; template struct front_count_of { enum { value = front_count_of::value + 1 }; }; const Shape shape_of( const Graph& graph, const Value& value ) { return value.kind() == Value::Identifier ? graph.tensors.at(value.identifier()).shape : nestedArrayShape(value); } Shape& shape_ref( Graph& graph, const Value& value ) { return graph.tensors[value.identifier()].shape; } template ShapeFunc make_shape_func( Shape(*func)(const Args&...), index_sequence, index_sequence ) { return [=]( const Operation& op, Graph& graph ) { const Shape shape = func(shape_of(graph, op.inputs[Idxs1].second)..., op.attribs[Idxs2].second...); for ( size_t i = 0; i < op.outputs.size(); ++i ) { shape_ref(graph, op.outputs[i].second) = shape; } }; } template ShapeFunc make_shape_func( std::vector(*func)(const Shape&,const Args&...), index_sequence ) { return [=]( const Operation& op, Graph& graph ) { const std::vector shapes = func(shape_of(graph, op.inputs.front().second), op.attribs[Idxs].second...); const auto& outputs = op.outputs.front().second; check(shapes.size() == outputs.size(), "number of shapes (%d) does not match number of outputs (%d)", (int)shapes.size(), (int)outputs.size()); for ( size_t i = 0; i < outputs.size(); ++i ) { shape_ref(graph, outputs[i]) = shapes[i]; } }; } template ShapeFunc make_shape_func( Shape(*func)(const std::vector&,const Args&...), index_sequence ) { return [=]( const Operation& op, Graph& graph ) { const auto& inputs = op.inputs.front().second; std::vector shapes(inputs.size()); for ( size_t i = 0; i < shapes.size(); ++i ) { shapes[i] = shape_of(graph, inputs[i]); } const Shape shape = func(shapes, op.attribs[Idxs].second...); for ( size_t i = 0; i < op.outputs.size(); ++i ) { shape_ref(graph, op.outputs[i].second) = shape; } }; } } // namespace impl template ShapeFunc make_shape_func( Shape(*func)(const Value&,const Args&...) ) { return impl::make_shape_func(func, impl::make_index_sequence<0>(), impl::make_index_sequence()); } template::value> ShapeFunc make_shape_func( Shape(*func)(const Shape&,const Args&...) ) { return impl::make_shape_func(func, impl::make_index_sequence(), impl::make_index_sequence()); } template ShapeFunc make_shape_func( Shape(*func)(const std::vector&,const Args&...) ) { return impl::make_shape_func(func, impl::make_index_sequence()); } template ShapeFunc make_shape_func( std::vector(*func)(const Shape&,const Args&...) ) { return impl::make_shape_func(func, impl::make_index_sequence()); } static const std::map StandardShapeFuncs = { { "external", make_shape_func(nullary_shape) }, { "constant", make_shape_func(constant_shape) }, { "variable", make_shape_func(nullary_shape) }, { "copy", make_shape_func(unary_shape) }, { "neg", make_shape_func(unary_shape) }, { "not", make_shape_func(unary_shape) }, { "rcp", make_shape_func(unary_shape) }, { "exp", make_shape_func(unary_shape) }, { "log", make_shape_func(unary_shape) }, { "sin", make_shape_func(unary_shape) }, { "cos", make_shape_func(unary_shape) }, { "tan", make_shape_func(unary_shape) }, { "asin", make_shape_func(unary_shape) }, { "acos", make_shape_func(unary_shape) }, { "atan", make_shape_func(unary_shape) }, { "sinh", make_shape_func(unary_shape) }, { "cosh", make_shape_func(unary_shape) }, { "tanh", make_shape_func(unary_shape) }, { "asinh", make_shape_func(unary_shape) }, { "acosh", make_shape_func(unary_shape) }, { "atanh", make_shape_func(unary_shape) }, { "abs", make_shape_func(unary_shape) }, { "sign", make_shape_func(unary_shape) }, { "floor", make_shape_func(unary_shape) }, { "ceil", make_shape_func(unary_shape) }, { "round", make_shape_func(unary_shape) }, { "sqr", make_shape_func(unary_shape) }, { "sqrt", make_shape_func(unary_shape) }, { "rsqr", make_shape_func(unary_shape) }, { "rsqrt", make_shape_func(unary_shape) }, { "log2", make_shape_func(unary_shape) }, { "relu", make_shape_func(unary_shape) }, { "sigmoid", make_shape_func(unary_shape) }, { "elu", make_shape_func(unary_shape) }, { "selu", make_shape_func(unary_shape) }, { "gelu", make_shape_func(unary_shape) }, { "silu", make_shape_func(unary_shape) }, { "softabs", make_shape_func(unary_shape) }, { "softplus", make_shape_func(unary_shape) }, { "leaky_relu", make_shape_func(unary_shape) }, { "prelu", make_shape_func(asymmetric_binary_shape) }, { "linear_quantize", make_shape_func(linear_quantize_shape) }, { "logarithmic_quantize", make_shape_func(logarithmic_quantize_shape) }, { "min_max_linear_quantize", make_shape_func(linear_quantize_shape) }, { "zero_point_linear_quantize", make_shape_func(zero_point_linear_quantize_shape) }, { "add", make_shape_func(binary_shape) }, { "sub", make_shape_func(binary_shape) }, { "mul", make_shape_func(binary_shape) }, { "div", make_shape_func(binary_shape) }, { "min", make_shape_func(binary_shape) }, { "max", make_shape_func(binary_shape) }, { "pow", make_shape_func(binary_shape) }, { "lt", make_shape_func(binary_shape) }, { "le", make_shape_func(binary_shape) }, { "gt", make_shape_func(binary_shape) }, { "ge", make_shape_func(binary_shape) }, { "eq", make_shape_func(binary_shape) }, { "ne", make_shape_func(binary_shape) }, { "and", make_shape_func(binary_shape) }, { "or", make_shape_func(binary_shape) }, { "conv", make_shape_func(conv_shape) }, { "deconv", make_shape_func(deconv_shape) }, { "separable_conv", make_shape_func(separable_conv_shape) }, { "separable_deconv", make_shape_func(separable_deconv_shape) }, { "box", make_shape_func(pool_shape) }, { "max_pool", make_shape_func(pool_shape) }, { "argmax_pool", make_shape_func(pool_shape) }, { "max_pool_with_index", make_shape_func(pool_shape) }, { "avg_pool", make_shape_func(pool_shape) }, { "rms_pool", make_shape_func(pool_shape) }, { "debox", make_shape_func(unpool_shape) }, { "sample", make_shape_func(sample_shape) }, { "desample", make_shape_func(desample_shape) }, { "sum_reduce", make_shape_func(reduce_shape) }, { "min_reduce", make_shape_func(reduce_shape) }, { "max_reduce", make_shape_func(reduce_shape) }, { "mean_reduce", make_shape_func(reduce_shape) }, { "argmax_reduce", make_shape_func(reduce_shape) }, { "argmin_reduce", make_shape_func(reduce_shape) }, { "any_reduce", make_shape_func(reduce_shape) }, { "all_reduce", make_shape_func(reduce_shape) }, { "moments", make_shape_func(reduce_shape) }, { "nearest_downsample", make_shape_func(downsample_shape) }, { "area_downsample", make_shape_func(downsample_shape) }, { "nearest_upsample", make_shape_func(upsample_shape) }, { "multilinear_upsample", make_shape_func(upsample_shape) }, { "local_response_normalization", make_shape_func(normalize_shape_size) }, { "local_mean_normalization", make_shape_func(normalize_shape_size) }, { "local_variance_normalization", make_shape_func(normalize_shape_size) }, { "local_contrast_normalization", make_shape_func(normalize_shape_size) }, { "l1_normalization", make_shape_func(normalize_shape_axes) }, { "l2_normalization", make_shape_func(normalize_shape_axes) }, { "batch_normalization", make_shape_func(batchnorm_shape) }, { "avg_roi_pool", make_shape_func(roi_shape) }, { "max_roi_pool", make_shape_func(roi_shape) }, { "avg_roi_align", make_shape_func(roi_shape) }, { "max_roi_align", make_shape_func(roi_shape) }, { "roi_resample", make_shape_func(roi_shape_resample) }, { "reshape", make_shape_func(reshape_shape) }, { "transpose", make_shape_func(transpose_shape) }, { "split", make_shape_func(split_shape) }, { "concat", make_shape_func(concat_shape) }, { "slice", make_shape_func(slice_shape) }, { "stack", make_shape_func(stack_shape) }, { "unstack", make_shape_func(unstack_shape) }, { "squeeze", make_shape_func(squeeze_shape) }, { "unsqueeze", make_shape_func(unsqueeze_shape) }, { "tile", make_shape_func(tile_shape) }, { "pad", make_shape_func(pad_shape) }, { "cast", make_shape_func(unary_shape) }, { "gather", make_shape_func(gather_shape) }, { "matmul", make_shape_func(matmul_shape) }, { "linear", make_shape_func(linear_shape) }, { "update", make_shape_func(update_shape) }, { "softmax", make_shape_func(softmax_shape) }, { "copy_n", make_shape_func(copy_n_shape) }, { "add_n", make_shape_func(add_n_shape) }, { "select", make_shape_func(ternary_shape) }, { "clamp", make_shape_func(ternary_shape) }, }; bool infer_shapes( Graph& graph, std::string& error, const std::map& input_shapes, const std::map& custom_shapes ) noexcept { for ( auto& op : graph.operations ) { auto it = StandardShapeFuncs.find(op.name); if ( it == StandardShapeFuncs.end() ) { it = custom_shapes.find(op.name); if ( it == custom_shapes.end() ) { error = "Shape function for operation '" + op.name + "' is not provided"; return false; } } auto func = it->second; if ( op.name == "external" ) { auto& id = op.outputs.get("output").identifier(); auto it = input_shapes.find(id); if ( it != input_shapes.end() ) { auto& original = op.attribs.get("shape"); if ( it->second.size() != original.size() ) { error = "Overridden external shape rank (" + std::to_string(it->second.size()) + ") does not match original rank (" + std::to_string(original.size()) + ")"; return false; } graph.tensors.at(id).shape = it->second; continue; } } try { func(op, graph); } catch ( const std::exception& e ) { auto& output = op.outputs.front().second; auto& id = output.kind() == Value::Identifier ? output.identifier() : output[0].identifier(); error = "Shape error while inferring shape of tensor '" + id + "' (operation '" + op.name + "'): " + e.what(); return false; } } return true; } bool allocate_buffers( Graph& graph, std::string& error ) noexcept { for ( auto& item : graph.tensors ) { auto& tensor = item.second; tensor.data.resize(volume_of(tensor.shape) * item_bytes(tensor.dtype)); } return true; } bool execute( Graph& graph, std::string& error ) noexcept { try { for ( auto& op : graph.operations ) { auto it = rt::Executors.find(op.name); if ( it == rt::Executors.end() ) { throw std::runtime_error("operation not implemented: " + op.name); } auto& func = it->second; func(op, graph.tensors); } return true; } catch ( const std::runtime_error& e ) { error = "Runtime error: " + std::string(e.what()); return false; } } } // namespace nnef ================================================ FILE: nnef-pyproject/nnef/nnef.cpp ================================================ /* * Copyright (c) 2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Python.h" #include "numpy/arrayobject.h" #include "nnef/flat/flat_parser.h" #include "nnef/comp/comp_parser.h" #include "nnef/flat/quant_parser.h" #include "nnef.h" #include #include #include #include #include #include #include static PyObject* NNEF_Error; #if PY_MAJOR_VERSION >= 3 #define PY_STRING_OBJECT PyUnicodeObject #define PY_STRING_TYPE PyUnicode_Type #define PY_STRING_CHECK PyUnicode_Check #define PY_STRING_FROM_CSTR PyUnicode_FromString #define PY_STRING_AS_CSTR PyUnicode_AsUTF8 #define PY_INTEGER_CHECK PyLong_Check #define PY_INTEGER_AS_LONG PyLong_AsLong #else #define PY_STRING_OBJECT PyStringObject #define PY_STRING_TYPE PyString_Type #define PY_STRING_CHECK PyString_Check #define PY_STRING_FROM_CSTR PyString_FromString #define PY_STRING_AS_CSTR PyString_AsString #define PY_INTEGER_CHECK PyInt_Check #define PY_INTEGER_AS_LONG PyInt_AsLong #endif struct NNEF_Identifier { PY_STRING_OBJECT str; }; static PyTypeObject NNEF_Identifier_Type = { PyVarObject_HEAD_INIT(NULL, 0) "_nnef.Identifier", /* tp_name */ sizeof(NNEF_Identifier), /* tp_basicsize */ 0, /* tp_itemsize */ 0, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ }; static PyObject* OrderedDict; static PyObject* NamedTuple; static PyObject* Tensor; static PyObject* Operation; static PyObject* Graph; // make tuple by STEALING references to args template static PyObject* makePyTuple( Args&& ...args ) { PyObject* tuple = PyTuple_Pack(sizeof...(args), args...); for ( auto& arg : { args... } ) { Py_DECREF(arg); } return tuple; } // make object by STEALING references to args template static PyObject* makePyObject( PyObject* type, Args&& ...args ) { PyObject* argsTuple = makePyTuple(std::forward(args)...); PyObject* obj = PyObject_CallObject(type, argsTuple); Py_DECREF(argsTuple); return obj; } static PyObject* makeNamedTuple( const char* name, std::initializer_list fields ) { PyObject* pyName = PY_STRING_FROM_CSTR(name); PyObject* pyFields = PyList_New(fields.size()); size_t i = 0; for ( auto& field : fields ) { PyList_SetItem(pyFields, i++, PY_STRING_FROM_CSTR(field)); } return makePyObject(NamedTuple, pyName, pyFields); } static PyObject* buildPyBoolean( bool value ) { if ( value ) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } static PyObject* buildPyNone() { Py_RETURN_NONE; } static PyObject* buildPyObjectFromValue( const nnef::Value& value ) { switch ( value.kind() ) { case nnef::Value::None: { return buildPyNone(); } case nnef::Value::Integer: { return Py_BuildValue("i", value.integer()); } case nnef::Value::Scalar: { return Py_BuildValue("f", value.scalar()); } case nnef::Value::Logical: { return buildPyBoolean(value.logical()); } case nnef::Value::String: { return PY_STRING_FROM_CSTR(value.string().c_str()); } case nnef::Value::Identifier: { PyObject* arg = PY_STRING_FROM_CSTR(value.identifier().c_str()); return makePyObject((PyObject*)&NNEF_Identifier_Type, arg); } case nnef::Value::Array: { PyObject* list = PyList_New(value.size()); for ( size_t i = 0; i < value.size(); ++i ) { PyList_SetItem(list, i, buildPyObjectFromValue(value[i])); } return list; } case nnef::Value::Tuple: { PyObject* tuple = PyTuple_New(value.size()); for ( size_t i = 0; i < value.size(); ++i ) { PyTuple_SetItem(tuple, i, buildPyObjectFromValue(value[i])); } return tuple; } } return nullptr; } static int numpy_type_num( const nnef::Typename& dtype ) { switch ( dtype ) { case nnef::Typename::Scalar: return NPY_FLOAT32; case nnef::Typename::Integer: return NPY_INT32; case nnef::Typename::Logical: return NPY_BOOL; default: return NPY_VOID; } } static PyArray_Descr* numpy_dtype( const nnef::Typename& dtype ) { switch ( dtype ) { case nnef::Typename::Scalar: return PyArray_DescrFromType(NPY_FLOAT32); case nnef::Typename::Integer: return PyArray_DescrFromType(NPY_INT32); case nnef::Typename::Logical: return PyArray_DescrFromType(NPY_BOOL); default: return NULL; } } static std::string buildErrorString( nnef::Error e ) { std::string str = "Parse error in '" + std::string(e.position().filename) + "' [" + std::to_string(e.position().line) + ":" + std::to_string(e.position().column) + "] " + e.what(); auto origin = e.position().origin; while ( origin ) { str += "\n... evaluated from '" + std::string(e.position().filename) + "' [" + std::to_string(e.position().line) + ":" + std::to_string(e.position().column) + "]"; origin = origin->origin; } return str; } struct GraphCallback : public nnef::Parser::Callback { GraphCallback( std::istream& qis, const char* qfn ) : qis(qis), qfn(qfn), tensors(NULL), operations(NULL), graph(NULL), version(NULL), extensions(NULL) { } ~GraphCallback() { if ( tensors ) Py_DECREF(tensors); if ( operations ) Py_DECREF(operations); if ( graph ) Py_DECREF(graph); if ( version ) Py_DECREF(version); if ( extensions ) Py_DECREF(extensions); } virtual void beginDocument( const std::string& filename, const nnef::Parser::version_t& version ) { this->version = makePyTuple(Py_BuildValue("i", version.first), Py_BuildValue("i", version.second)); this->extensions = PyList_New(0); } virtual bool handleExtension( const std::string& ext ) { PyObject* pyStr = PY_STRING_FROM_CSTR(ext.c_str()); PyList_Append(this->extensions, pyStr); Py_DECREF(pyStr); return false; } virtual void beginGraph( const nnef::Prototype& proto, const nnef::Dictionary& fragments ) { PyObject* name = PY_STRING_FROM_CSTR(proto.name().c_str()); this->protos = &fragments; this->tensors = PyDict_New(); this->operations = PyList_New(0); PyObject* inputs = PyList_New(proto.paramCount()); for ( size_t i = 0; i < proto.paramCount(); ++i ) { PyList_SetItem(inputs, i, PY_STRING_FROM_CSTR(proto.param(i).name().c_str())); } PyObject* outputs = PyList_New(proto.resultCount()); for ( size_t i = 0; i < proto.resultCount(); ++i ) { PyList_SetItem(outputs, i, PY_STRING_FROM_CSTR(proto.result(i).name().c_str())); } Py_INCREF(this->tensors); Py_INCREF(this->operations); this->graph = makePyObject(Graph, name, tensors, operations, inputs, outputs); if ( qis ) { quant = nnef::QuantParser::parse(qis, qfn, fragments); } } virtual void endGraph( const nnef::Prototype& proto, const nnef::Dictionary& dtypes ) { for ( auto& it : dtypes ) { PyObject* name = PY_STRING_FROM_CSTR(it.first.c_str()); PyObject* shape = buildPyNone(); PyObject* dtype = PY_STRING_FROM_CSTR(nnef::toString(it.second)); PyObject* data = buildPyNone(); PyObject* quantization = PyDict_New(); if ( quant.count(it.first) ) { auto& attribs = quant.at(it.first); auto& op_name = attribs.at("op-name").string(); auto& op_proto = protos->at(op_name); for ( auto& qit : attribs ) { auto obj = buildPyObjectFromValue(qit.second); auto param = op_proto.param(qit.first.c_str()); if ( param && param->type()->kind() == nnef::Type::Tensor ) { auto tensor_type = (const nnef::TensorType*)param->type(); auto data_type = (const nnef::PrimitiveType*)tensor_type->dataType(); PyArray_Descr* array_dtype = numpy_dtype(data_type->name()); PyObject* array = PyArray_FromAny(obj, array_dtype, 0, 0, 0, NULL); // steals reference to dtype Py_DECREF(obj); obj = array; } PyDict_SetItemString(quantization, qit.first.c_str(), obj); Py_DECREF(obj); } } PyObject* tensor = makePyObject(Tensor, name, dtype, shape, data, quantization); PyDict_SetItemString(tensors, it.first.c_str(), tensor); Py_DECREF(tensor); } } virtual void operation( const nnef::Prototype& proto, const nnef::Dictionary& args, const nnef::Dictionary& dtypes ) { PyObject* attribs = PyList_New(0); PyObject* inputs = PyList_New(0); PyObject* outputs = PyList_New(0); PyObject* dtype = args.count("?") ? PY_STRING_FROM_CSTR(args.at("?").string().c_str()) : buildPyNone(); for ( size_t i = 0; i < proto.paramCount(); ++i ) { auto& param = proto.param(i); auto& value = args.at(param.name()); PyObject* item = makePyTuple(PY_STRING_FROM_CSTR(param.name().c_str()), buildPyObjectFromValue(value)); PyList_Append(param.type()->isAttribute() ? attribs : inputs, item); Py_DECREF(item); } for ( size_t i = 0; i < proto.resultCount(); ++i ) { auto& result = proto.result(i); auto& value = args.at(result.name()); PyObject* item = makePyTuple(PY_STRING_FROM_CSTR(result.name().c_str()), buildPyObjectFromValue(value)); PyList_Append(outputs, item); Py_DECREF(item); } PyObject* name = PY_STRING_FROM_CSTR(proto.name().c_str()); attribs = makePyObject(OrderedDict, attribs); inputs = makePyObject(OrderedDict, inputs); outputs = makePyObject(OrderedDict, outputs); PyObject* operation = makePyObject(Operation, name, attribs, inputs, outputs, dtype); PyList_Append(operations, operation); Py_DECREF(operation); } std::istream& qis; const char* qfn; nnef::Dictionary> quant; const nnef::Dictionary* protos; PyObject* tensors; PyObject* operations; PyObject* graph; PyObject* version; PyObject* extensions; }; static PyObject* parse( PyObject* self, PyObject* args, PyObject* kwargs, bool isFile ) { const char* input = nullptr; const char* quant = nullptr; const char* stdlib = nullptr; PyObject* lower = nullptr; static const char* kwlist[] = { "", "quantization", "stdlib", "lowered", NULL }; if ( !PyArg_ParseTupleAndKeywords(args, kwargs, "s|zzO!", (char**)kwlist, &input, &quant, &stdlib, &PyList_Type, &lower) ) { return NULL; } if ( !stdlib ) { stdlib = ""; } std::ifstream gfs, qfs; std::stringstream gss, qss; if ( isFile ) { gfs.open(input); if ( !gfs ) { const std::string message = "Could not open file: " + std::string(input); PyErr_SetString(NNEF_Error, message.c_str()); return NULL; } if ( quant ) { qfs.open(quant); if ( !qfs ) { const std::string message = "Could not open file: " + std::string(quant); PyErr_SetString(NNEF_Error, message.c_str()); return NULL; } } } else { gss.str(input); if ( quant ) { qss.str(quant); } } std::istream& gis = isFile ? (std::istream&)gfs : (std::istream&)gss; std::istream& qis = isFile ? (std::istream&)qfs : (std::istream&)qss; std::set lowered; if ( lower ) { for ( Py_ssize_t i = 0; i < PyList_Size(lower); ++i ) { PyObject* item = PyList_GetItem(lower, i); if ( !PY_STRING_CHECK(item) ) { const std::string message = "Paremeter 'lowered' must be a list of strings"; PyErr_SetString(NNEF_Error, message.c_str()); return NULL; } lowered.insert(PY_STRING_AS_CSTR(item)); } } nnef::CompParser parser(stdlib, lowered); GraphCallback callback(qis, isFile ? quant : "quantization"); try { parser.parse(gis, isFile ? input : "input", callback); Py_INCREF(callback.graph); return callback.graph; } catch ( const nnef::Error& e ) { PyErr_SetString(NNEF_Error, buildErrorString(e).c_str()); return NULL; } catch ( const std::invalid_argument& e ) { PyErr_SetString(PyExc_ValueError, e.what()); return NULL; } catch ( const std::exception& e ) { PyErr_SetString(PyExc_Exception, e.what()); return NULL; } } static PyObject* parseFile( PyObject* self, PyObject* args, PyObject* kwargs ) { return parse(self, args, kwargs, true); } static PyObject* parseString( PyObject* self, PyObject* args, PyObject* kwargs ) { return parse(self, args, kwargs, false); } static PyObject* createSession( PyObject* self, PyObject* args, PyObject* kwargs ) { static const char* kwlist[] = { "", "stdlib", "lowered", NULL }; const char* path = nullptr; const char* stdlib = nullptr; PyObject* lower = nullptr; if ( !PyArg_ParseTupleAndKeywords(args, kwargs, "s|zO!", (char**)kwlist, &path, &stdlib, &PyList_Type, &lower) ) { return NULL; } if ( !stdlib ) { stdlib = ""; } std::set lowered; if ( lower ) { for ( Py_ssize_t i = 0; i < PyList_Size(lower); ++i ) { PyObject* item = PyList_GetItem(lower, i); if ( !PY_STRING_CHECK(item) ) { const std::string message = "Paremeter 'lowered' must be a list of strings"; PyErr_SetString(NNEF_Error, message.c_str()); return NULL; } lowered.insert(PY_STRING_AS_CSTR(item)); } } std::unique_ptr graph(new nnef::Graph()); std::string error; if ( !nnef::load_graph(path, *graph, error, stdlib, lowered) ) { PyErr_SetString(PyExc_ValueError, error.c_str()); return NULL; } if ( !nnef::infer_shapes(*graph, error) ) { PyErr_SetString(PyExc_ValueError, error.c_str()); return NULL; } if ( !nnef::allocate_buffers(*graph, error) ) { PyErr_SetString(PyExc_ValueError, error.c_str()); return NULL; } const size_t handle = reinterpret_cast(graph.release()); return PyLong_FromSize_t(handle); } static PyObject* cleanupSession( PyObject* self, PyObject* args, PyObject* kwargs ) { PyObject* handle; static const char* kwlist[] = { "", NULL }; if ( !PyArg_ParseTupleAndKeywords(args, kwargs, "O", (char**)kwlist, &handle) ) { return NULL; } nnef::Graph* graph = reinterpret_cast(PyLong_AsSize_t(handle)); delete graph; Py_RETURN_NONE; } static PyObject* executeSession( PyObject* self, PyObject* args, PyObject* kwargs ) { PyObject* handle; PyObject* inputs; static const char* kwlist[] = { "", "", NULL }; if ( !PyArg_ParseTupleAndKeywords(args, kwargs, "OO!", (char**)kwlist, &handle, &PyTuple_Type, &inputs) ) { return NULL; } nnef::Graph* graph = reinterpret_cast(PyLong_AsSize_t(handle)); if ( PyTuple_Size(inputs) != graph->inputs.size() ) { PyErr_Format(PyExc_ValueError, "number of inputs (%d) does not match number of graph inputs (%d)", (int)PyTuple_Size(inputs), (int)graph->inputs.size()); return NULL; } for ( size_t i = 0; i < PyTuple_Size(inputs); ++i ) { PyObject* input = PyTuple_GetItem(inputs, i); if ( !PyArray_Check(input) ) { PyErr_SetString(PyExc_ValueError, "inputs must be numpy arrays"); return NULL; } PyArrayObject* array = (PyArrayObject*)input; nnef::Tensor& tensor = graph->tensors.at(graph->inputs[i]); nnef::Typename dtype = nnef::fromString(tensor.dtype); if ( PyArray_TYPE(array) != numpy_type_num(dtype) ) { PyErr_Format(PyExc_ValueError, "dtype of input %d does not match input dtype in graph", (int)i+1); return NULL; } if ( PyArray_NDIM(array) != tensor.shape.size() || !std::equal(tensor.shape.begin(), tensor.shape.end(), PyArray_SHAPE(array)) ) { PyErr_Format(PyExc_ValueError, "shape of input %d does not match input shape in graph", (int)i+1); return NULL; } std::copy_n(PyArray_BYTES(array), tensor.data.size(), tensor.data.data()); } std::string error; if ( !nnef::execute(*graph, error) ) { PyErr_SetString(PyExc_ValueError, error.c_str()); return NULL; } PyObject* outputs = PyTuple_New(graph->outputs.size()); for ( size_t i = 0; i < graph->outputs.size(); ++i ) { nnef::Tensor& tensor = graph->tensors.at(graph->outputs[i]); std::vector shape(tensor.shape.begin(), tensor.shape.end()); nnef::Typename dtype = nnef::fromString(tensor.dtype); PyObject* output = PyArray_SimpleNew(shape.size(), shape.data(), numpy_type_num(dtype)); std::copy_n(tensor.data.data(), tensor.data.size(), PyArray_BYTES((PyArrayObject*)output)); PyTuple_SetItem(outputs, i, output); } return outputs; } static PyMethodDef NNEF_Methods[] = { { "parse_file", (PyCFunction)parseFile, METH_VARARGS | METH_KEYWORDS, "Parse the contents of a file" }, { "parse_string", (PyCFunction)parseString, METH_VARARGS | METH_KEYWORDS, "Parse the contents of a string" }, { "create_session", (PyCFunction)createSession, METH_VARARGS | METH_KEYWORDS, "Create session for executing a graph" }, { "cleanup_session", (PyCFunction)cleanupSession, METH_VARARGS | METH_KEYWORDS, "Cleanup session" }, { "execute_session", (PyCFunction)executeSession, METH_VARARGS | METH_KEYWORDS, "Execute graph in a session" }, { NULL, NULL, 0, NULL } }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef nnef_module = { PyModuleDef_HEAD_INIT, "_nnef", "_nnef module", -1, NNEF_Methods, }; #endif #if PY_MAJOR_VERSION >= 3 #define INIT_FUNC_NAME PyInit__nnef #define RETURN_ERROR return NULL #else #define INIT_FUNC_NAME init_nnef #define RETURN_ERROR return #endif PyMODINIT_FUNC INIT_FUNC_NAME(void) { NNEF_Identifier_Type.tp_base = &PY_STRING_TYPE; if ( PyType_Ready(&NNEF_Identifier_Type) < 0 ) { RETURN_ERROR; } #if PY_MAJOR_VERSION >= 3 PyObject* module = PyModule_Create(&nnef_module); #else PyObject* module = Py_InitModule("_nnef", NNEF_Methods); #endif if ( module == NULL ) { RETURN_ERROR; } NNEF_Error = PyErr_NewException((char*)"_nnef.Error", NULL, NULL); PyModule_AddObject(module, "Error", NNEF_Error); PyModule_AddObject(module, "Identifier", (PyObject*)&NNEF_Identifier_Type); PyObject* collections = PyImport_ImportModule("collections"); PyObject* dict = PyModule_GetDict(collections); OrderedDict = PyDict_GetItemString(dict, "OrderedDict"); NamedTuple = PyDict_GetItemString(dict, "namedtuple"); Py_DECREF(collections); Tensor = makeNamedTuple("Tensor", { "name", "dtype", "shape", "data", "quantization" }); PyModule_AddObject(module, "Tensor", Tensor); Operation = makeNamedTuple("Operation", { "name", "attribs", "inputs", "outputs", "dtype" }); PyModule_AddObject(module, "Operation", Operation); Graph = makeNamedTuple("Graph", { "name", "tensors", "operations", "inputs", "outputs" }); PyModule_AddObject(module, "Graph", Graph); import_array(); #if PY_MAJOR_VERSION >= 3 return module; #endif } ================================================ FILE: nnef-pyproject/nnef/parser.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import _nnef def parse_file(graph_fn, quant_fn=None, stdlib=None, lowered=None): return _nnef.parse_file(graph_fn, quantization=quant_fn, stdlib=stdlib, lowered=lowered or []) def parse_string(graph_str, quant_str=None, stdlib=None, lowered=None): return _nnef.parse_string(graph_str, quantization=quant_str, stdlib=stdlib, lowered=lowered or []) ================================================ FILE: nnef-pyproject/nnef/printer.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import _nnef def format_version(version): major, minor = version return 'version {}.{};'.format(major, minor) def format_extensions(extensions): string = str() for i, ext in enumerate(extensions): if i != 0: string += '\n' string += 'extension {};'.format(ext) return string def format_argument(value): if isinstance(value, _nnef.Identifier): return value elif isinstance(value, str): return "'" + value + "'" elif isinstance(value, bool): return 'true' if value else 'false' elif isinstance(value, (int, float)): return str(value) elif isinstance(value, list): return '[' + ', '.join(format_argument(item) for item in value) + ']' elif isinstance(value, tuple): return '(' + ', '.join(format_argument(item) for item in value) + ')' else: raise TypeError('arguments must be of type int, float, str, nnef.Identifier or list/tuple of such, found: ' + str(type(value))) def format_result(value): if isinstance(value, list): return '[' + ', '.join(format_result(item) for item in value) + ']' elif isinstance(value, tuple): return '(' + ', '.join(format_result(item) for item in value) + ')' elif isinstance(value, _nnef.Identifier): return value else: raise TypeError('results must be of type nnef.Identifier or list/tuple of such, found: ' + str(type(value))) def format_shapes(result, tensors): if isinstance(result, list): return '[' + ', '.join(format_shapes(item, tensors) for item in result) + ']' elif isinstance(result, tuple): return '(' + ', '.join(format_shapes(item, tensors) for item in result) + ')' elif isinstance(result, _nnef.Identifier): return str(tensors[result].shape) else: raise TypeError('results must be of type nnef.Identifier or list/tuple of such, found: ' + str(type(result))) def format_invocation(name, attribs, inputs, outputs=None, dtype=None): string = str() if outputs is not None: string += ', '.join([format_result(output) for output in outputs]) string += ' = ' string += name if dtype is not None: string += '<' + dtype + '>' string += '(' string += ', '.join([format_argument(input) for input in inputs]) if len(inputs) and len(attribs): string += ', ' string += ', '.join(key + ' = ' + format_argument(value) for (key, value) in attribs.items()) string += ')' return string def format_graph(name, inputs, outputs, operations, tensors, annotate_shapes=False): string = 'graph ' + name + '( ' + ', '.join(inputs) + ' ) -> ( ' + ', '.join(outputs) + ' )\n' string += '{\n' for operation in operations: inputs = operation.inputs.values() outputs = operation.outputs.values() invocation = format_invocation(operation.name, operation.attribs, inputs, outputs, operation.dtype) string += '\t' + invocation + ';' if annotate_shapes: string += '\t# ' + ', '.join(format_shapes(output, tensors) for output in outputs) string += '\n' string += '}\n' return string ================================================ FILE: nnef-pyproject/nnef/shapes.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import nnef import numpy as np def _ceil_div(x, y): return (x + y - 1) // y if y > 0 else (x + y + 1) // y def _clamp(x, a, b): return max(a, min(b, x)) def _ensure_rank(array, rank, value=1): return array if len(array) == rank else array + [value] * rank def _volume(shape): volume = 1 for s in shape: volume *= s return volume def _broadcast_compatible(x,y): return all(xi == yi or xi == 1 or yi == 1 for xi, yi in zip(x, y)) def _broadcastable(x,y): return all(xi == yi or xi == 1 for xi, yi in zip(x, y)) def _broadcast_shape(x, y): assert _broadcast_compatible(x, y), "arguments are not broadcast compatible ({} vs {})".format(x, y) rank = max(len(x), len(y)) return [max(xi,yi) for (xi, yi) in zip(_ensure_rank(x, rank), _ensure_rank(y, rank))] def _downsize_shape(input, kernel, padding, stride, dilation): return [(i + p + q - (k - 1) * d - 1) // s + 1 for i, k, (p, q), s, d in zip(input, kernel, padding, stride, dilation)] \ if padding else [(i + s - 1) // s for i, s in zip(input, stride)] def _upsize_shape(input, kernel, padding, stride, dilation): return [(i - 1) * s + (k - 1) * d + 1 - p - q for i, k, (p, q), s, d in zip(input, kernel, padding, stride, dilation)] \ if padding else [i * s for i, s in zip(input, stride)] def nullary_shape(shape, **kwargs): return shape def unary_shape(arg, **kwargs): return arg def binary_shape(left, right, **kwargs): return _broadcast_shape(left, right) def asymmetric_binary_shape(left, right, **kwargs): assert _broadcastable(right, left), \ "second argument shape ({}) cannot be broadcast to first argument shape ({})".format(right, left) return left def ternary_shape(cond, left, right, **kwargs): value = _broadcast_shape(left, right) return _broadcast_shape(cond, value) def pool_shape(input, size, border=None, padding=[], stride=[], dilation=[], output_shape=None, transposed=False, **kwargs): rank = len(input) assert len(size) == rank, "expected kernel shape of rank {}, found {}".format(rank, size) assert not padding or len(padding) == rank, "expected 'padding' of length {}, found {}".format(rank, padding) assert not stride or len(stride) == rank, "expected 'stride' of length {}, found {}".format(rank, stride) assert not dilation or len(dilation) == rank, "expected 'dilation' of length {}, found {}".format(rank, dilation) assert all(s > 0 for s in stride), "'stride' must be positive, found {}".format(stride) assert all(d > 0 for d in dilation), "'dilation' must be positive, found {}".format(dilation) stride = _ensure_rank(stride, rank) dilation = _ensure_rank(dilation, rank) if output_shape: assert len(output_shape) == rank, "expected 'output_shape' of length {}, found {}".format(rank, output_shape) assert all(s > 0 for s in output_shape), "'output_shape' must be positive, found {}".format(output_shape) expected_shape = _downsize_shape(output_shape, size, padding, stride, dilation) assert input == expected_shape, \ "expected input shape {} derived from 'output_shape' is incompatible with actual input shape {}".\ format(expected_shape, input) return output_shape if transposed: return _upsize_shape(input, size, padding, stride, dilation) else: return _downsize_shape(input, size, padding, stride, dilation) def pool_with_index_shape(input, size, border=None, padding=[], stride=[], dilation=[]): shape = pool_shape(input, size, border, padding, stride, dilation) return (shape, shape) def unpool_shape(input, size, border=None, padding=[], stride=[], dilation=[], output_shape=None, **kwargs): return pool_shape(input, size, border, padding, stride, dilation, output_shape, transposed=True, **kwargs) def sample_shape(input, index, size, border=None, padding=[], stride=[], dilation=[], output_shape=None, transposed=False): assert index == input, "'index' shape {} does not match 'input' shape {}".format(input, index) return pool_shape(input, size, border, padding, stride, dilation, output_shape, transposed) def desample_shape(input, index, size, border=None, padding=[], stride=[], dilation=[], output_shape=None): return sample_shape(input, index, size, border, padding, stride, dilation, output_shape, transposed=True) def conv_shape(input, filter, bias=[], border=None, padding=[], stride=[], dilation=[], groups=1, output_shape=None, transposed=False): rank = len(input) assert len(filter) == rank, "expected filter shape of rank {}, found {}".format(rank, filter) assert not padding or len(padding) == rank - 2, "expected 'padding' of length {}, found {}".format(rank - 2, padding) assert not stride or len(stride) == rank - 2, "expected 'stride' of length {}, found {}".format(rank - 2, stride) assert not dilation or len(dilation) == rank - 2, "expected 'dilation' of rank {}, found {}".format(rank - 2, dilation) assert all(s > 0 for s in stride), "'stride' must be positive, found {}".format(stride) assert all(d > 0 for d in dilation), "'dilation' must be positive, found {}".format(dilation) assert groups >= 0, "'groups' must be non-negative, found {}".format(groups) if groups == 0: groups = output_shape[1] if transposed and output_shape else input[1] if transposed: assert filter[0] == input[1], "filter batch ({}) does not match input channels ({})".format(filter[0], input[1]) else: assert filter[1] * groups == input[1], \ "filter channels ({}) times groups ({}) does not match input channels ({})".format(filter[1], groups, input[1]) assert filter[0] % groups == 0, "'groups' ({}) does not divide filter batch ({})".format(groups, filter[0]) assert len(bias) <= 2, "expected bias shape of rank at most 2, found {}".format(bias) if len(bias) == 2: assert bias[0] == 1, "'bias' batch dimension must be singular" if len(bias): channels = filter[1] * groups if transposed else filter[0] assert bias[-1] == channels, "'bias' channels ({}) does not match filter batch ({})".format(bias[-1], channels) stride = _ensure_rank(stride, rank - 2) dilation = _ensure_rank(dilation, rank - 2) if output_shape: assert len(output_shape) == rank, "expected 'output_shape' of length {}, found {}".format(rank, output_shape) assert all(s > 0 for s in output_shape), "'output_shape' must be positive, found {}".format(output_shape) assert output_shape[0] == input[0], \ "output batch ({}) does not match input batch ({})".format(output_shape[0], input[0]) assert output_shape[1] == filter[1] * groups, \ "output channels ({}) does not match input channels ({}) times groups ({})".format(output_shape[1], input[1], groups) expected_shape = [input[0], filter[0]] + _downsize_shape(output_shape[2:], filter[2:], padding, stride, dilation) assert input == expected_shape, \ "expected input shape {} derived from 'output_shape' is incompatible with actual input shape {}". \ format(expected_shape, input) return output_shape if transposed: return [input[0], filter[1] * groups] + _upsize_shape(input[2:], filter[2:], padding, stride, dilation) else: return [input[0], filter[0]] + _downsize_shape(input[2:], filter[2:], padding, stride, dilation) def separable_conv_shape(input, plane_filter, point_filter, bias=[], border=None, padding=[], stride=[], dilation=[], groups=1, output_shape=None, transposed=False): assert all(x == 1 for x in point_filter[2:]), \ "point-wise filter must be singular in spatial dimensions, found {}".format(point_filter) assert point_filter[1] == plane_filter[0], \ "channel dimension of point-wise filter ({}) does not equal batch dimension of depth-wise filter ({})".\ format(point_filter[1], plane_filter[0]) assert plane_filter[1] == 1, "channel dimension of plane-wise filter must be singular, found {}".format(plane_filter) channels = point_filter[1] if transposed else input[1] filter = [point_filter[0], channels] + plane_filter[2:] return conv_shape(input, filter, bias, border, padding, stride, dilation, groups, output_shape, transposed) def separable_deconv_shape(input, plane_filter, point_filter, bias=[], border=None, padding=[], stride=[], dilation=[], groups=1, output_shape=None): return separable_conv_shape(input, plane_filter, point_filter, bias, border, padding, stride, dilation, groups, output_shape, transposed=True) def deconv_shape(input, filter, bias=[], border=None, padding=[], stride=[], dilation=[], groups=1, output_shape=None): return conv_shape(input, filter, bias, border, padding, stride, dilation, groups, output_shape, transposed=True) def reduce_shape(input, axes, **kwargs): rank = len(input) assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes) return [1 if i in axes else input[i] for i in range(rank)] def normalize_shape(input, **kwargs): rank = len(input) axes = kwargs.get('axes') size = kwargs.get('size') if axes: assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes) if size: assert len(size) == rank, "expected 'size' of length {}, found {}".format(rank, size) assert all(s >= 1 for s in size), "'size' must be positive, found {}".format(size) return input def moments_shape(input, axes): shape = reduce_shape(input, axes=axes) return shape, list(shape) def downsample_shape(input, factor, **kwargs): rank = len(input) assert len(factor) == rank - 2, "expected 'factor' of length {}, found {}".format(rank, factor) assert all(i % f == 0 for i, f in zip(input[2:], factor)), \ "'factor' {} does not divide spatial input shape {}".format(factor, input[2:]) return input[:2] + [i // f for i, f in zip(input[2:], factor)] def upsample_shape(input, factor, **kwargs): rank = len(input) assert len(factor) == rank - 2, "expected 'factor' of length {}, found {}".format(rank, factor) return input[:2] + [i * f for i, f in zip(input[2:], factor)] def reshape_shape(input, shape, axis_start=0, axis_count=-1): rank = len(input) assert all(s >= -1 for s in shape), "items in 'shape' must be >= -1, found {}".format(shape) assert sum(1 for s in shape if s == -1) <= 1, "at most one item may be -1 in 'shape', found {}".format(shape) assert 0 <= axis_start <= rank, "'axis_start' must be in range [0,{}], found {}".format(rank, axis_start) assert axis_count >= -1, "'axis_count' must be non-negative or -1, found {}".format(axis_count) if axis_count == -1: axis_count = rank - axis_start axis_end = axis_start + axis_count assert axis_end <= rank, "'axis_start' + 'axis_count' ({}) must be in range [0,{}]".format(axis_end, rank) shape = list(shape) # don't modify original list for i in range(len(shape)): if shape[i] == 0: shape[i] = input[i + axis_start] input_range = input[axis_start:axis_end] if -1 in shape: idx = shape.index(-1) assert _volume(input_range) % _volume(shape) == 0, \ "volume of 'shape' ({}) does not divide volume of 'input[{}:{}]' ({})".format(shape, axis_start, axis_end, input_range) shape[idx] = _volume(input_range) // -_volume(shape) else: assert _volume(shape) == _volume(input_range), \ "volume of 'shape' ({}) does not equal volume of 'input[{}:{}]' ({})".format(shape, axis_start, axis_end, input_range) return input[:axis_start] + shape + input[axis_end:] def transpose_shape(input, axes): rank = len(axes) assert sorted(axes) == list(range(rank)), "axes must be a permutation of [0..{}], found {}".format(rank-1, axes) return [input[axis] for axis in axes] + input[rank:] def squeeze_shape(input, axes): rank = len(input) assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes) return [input[i] for i in range(rank) if not i in axes] def unsqueeze_shape(input, axes): rank = len(input) + len(axes) assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes) output = list(input) for axis in axes: output = output[:axis] + [1] + output[axis:] return output def concat_shape(values, axis): assert len(values) != 0, "'values' must be non-empty" shape = list(values[0]) rank = len(shape) assert 0 <= axis < rank, "'axis' must be in range [0,{}), found {}".format(rank, axis) for value in values: assert len(value) == len(shape), "'values' must have the same rank, found {}".format(values) assert all(value[i] == shape[i] for i in range(rank) if i != axis), \ "shapes of 'values' must be identical for all dimensions other than 'axis' ({}), found {}".format(axis, values) shape[axis] = sum(value[axis] for value in values) return shape def split_shape(value, axis, ratios): rank = len(value) assert 0 <= axis < rank, "axis must be in range [0,{}), found {}".format(rank, axis) assert all(r > 0 for r in ratios), "'ratios' must be positive, found {}".format(ratios) total = sum(ratios) assert value[axis] % total == 0, \ "sum of 'ratios' ({}) does not divide input shape along dimension 'axis' ({})".format(total, value[axis]) unit = value[axis] // total return [[unit * r if i == axis else value[i] for i in range(rank)] for r in ratios] def stack_shape(values, axis): assert len(values) != 0, "'values' must be non-empty" shape = values[0] rank = len(shape) + 1 assert 0 <= axis < rank, "'axis' must be in range [0,{}), found {}".format(rank, axis) assert all(value == shape for value in values), "shapes of 'values' must be identical, found {}".format(values) return shape[:axis] + [len(values)] + shape[axis:] def unstack_shape(value, axis): rank = len(value) assert 0 <= axis < rank, "'axis' must be in range [0,{}), found {}".format(rank, axis) return [value[:axis] + value[axis+1:]] * value[axis] def slice_shape(input, axes, begin, end, stride=[]): rank = len(input) if len(stride) == 0: stride = [1] * len(axes) if all(s == 1 for s in stride): end = [input[axis] if offs == 0 else offs for axis, offs in zip(axes, end)] assert len(begin) == len(axes), \ "length of 'begin' ({}) does not equal length of 'axes' ({})".format(len(begin), len(axes)) assert len(end) == len(axes), \ "length of 'end' ({}) does not equal length of 'axes' ({})".format(len(end), len(axes)) assert len(stride) == len(axes), \ "length of 'stride' ({}) does not equal length of 'axes' ({})".format(len(begin), len(axes)) assert all(0 <= axis < rank for axis in axes), "'axes' must be in range [0,{}), found {}".format(rank, axes) begin = [_clamp(offs + input[axis] if offs < 0 else offs, -1, input[axis]) for axis, offs in zip(axes, begin)] end = [_clamp(offs + input[axis] if offs < 0 else offs, -1, input[axis]) for axis, offs in zip(axes, end)] assert all(s != 0 for s in stride), "'stride' must be non-zero" assert all(0 <= first <= last if str > 0 else last <= first < input[axis] for axis, first, last, str in zip(axes, begin, end, stride)), \ "slice range ({}:{}:{}) is invalid".format(begin, end, stride) output = list(input) for axis, first, last, str in zip(axes, begin, end, stride): output[axis] = _ceil_div(last - first, str) return output def tile_shape(input, repeats): rank = len(input) assert len(repeats) == rank, "expected 'repeats' of length {}, found {}".format(rank, repeats) return [i * r for i, r in zip(input, repeats)] def pad_shape(input, padding, **kwargs): rank = len(input) assert len(padding) == rank, "expected 'padding' of length {}, found {}".format(rank, padding) return [p + i + q for i, (p, q) in zip(input, padding)] def gather_shape(input, indices, axis=0): rank = len(input) assert 0 <= axis < rank, "'axis' must be in range [0,{}), found {}".format(rank, axis) return input[:axis] + indices + input[axis+1:] def matmul_shape(A, B, transposeA=False, transposeB=False): assert len(A) == len(B), "argument rank mismatch ({} vs {})".format(len(A), len(B)) assert len(A) >= 2, "rank of arguments must be at least 2, found {}".format(len(A)) m = A[-1] if transposeA else A[-2] n = B[-2] if transposeB else B[-1] kA = A[-2] if transposeA else A[-1] kB = B[-1] if transposeB else B[-2] assert kA == kB, "inner dimensions must agree ({} vs {})".format(kA, kB) return _broadcast_shape(A[:-2], B[:-2]) + [m,n] def linear_shape(input, filter, bias=[]): assert len(input) == 2, "rank of input must be 2, found {}".format(len(input)) assert len(filter) == 2, "rank of filter must be 2, found {}".format(len(filter)) assert len(bias) <= 2, "rank of bias must be at most 2, found {}".format(len(bias)) assert input[1] == filter[1], "input channels ({}) does not match filter channels ({})".format(input[1], filter[1]) if len(bias) == 2: assert bias[0] == 1, "'bias' batch dimension must be singular" if len(bias): c = len(bias) - 1 assert bias[c] == filter[0], "'bias' channels ({}) does not match filter batch ({})".format(bias[c], filter[0]) return [input[0], filter[0]] def softmax_shape(input, axes=[1]): rank = len(input) assert all(0 <= axis < rank for axis in axes), "axes must be in range [0,{}), found {}".format(rank, axes) return input def batchnorm_shape(input, mean, variance, offset, scale, epsilon=0): assert epsilon >= 0, "'epsilon' must be non-negative, found {}".format(epsilon) assert _broadcastable(mean, input), \ "'mean' shape {} cannot be broadcast to 'input' shape {}".format(mean, input) assert _broadcastable(variance, input), \ "'variance' shape {} cannot be broadcast to 'input' shape {}".format(variance, input) assert _broadcastable(offset, input), \ "'offset' shape {} cannot be broadcast to 'input' shape {}".format(offset, input) assert _broadcastable(scale, input), \ "'scale' shape {} cannot be broadcast to 'input' shape {}".format(scale, input) return input def roi_shape(input, rois, batch_index, output_size, **kwargs): rank = len(input) assert len(output_size) == rank - 2, "expected 'output_size' of length {}, found {}".format(rank - 2, output_size) assert all(s > 0 for s in output_size), "'output_size' must be positive, found {}".format(output_size) assert len(rois) == 2, "'rois' must be of rank 2, found {}".format(rois) assert rois[1] == 4, "'rois' must be of extent 4 along dimension 1, found {}".format(rois) assert len(batch_index) == 1, "'batch_index' must be of rank 1, found {}".format(batch_index) assert batch_index[0] == rois[0], \ "'batch_index' must be of same length as dimension 0 of rois; found {} vs {}".format(batch_index, rois) rate = kwargs.get('sampling_rate') if rate: assert len(rate) == rank - 2, "expected 'sampling_rate' of length {}, found {}".format(rank - 2, rate) assert all(r > 0 for r in rate), "'rate' must be positive, found {}".format(rate) return [rois[0], input[1]] + output_size def quantize_shape(input, *args, **kwargs): for arg in args: assert _broadcastable(arg, input), \ "'min/max' shape {} cannot be broadcast to 'input' shape {}".format(arg, input) bits = kwargs.get('bits') if bits is not None: assert bits > 0, "'bits' must be positive, found {}".format(bits) return input def update_shape(variable, value): assert value == variable, "shape of update value {} does not match shape of variable {}".format(value, variable) return variable def copy_n_shape(value, times): assert times > 0, "'times' must be positive, found {}".format(times) return [value] * times def add_n_shape(values): assert len(values) != 0, "values must be non-empty" shape = values[0] assert all(value == shape for value in values), "shapes of values must be identical, found {}".format(values) return shape def _get_shape(graph, value): if isinstance(value, nnef.Identifier): return graph.tensors[value].shape elif isinstance(value, np.ndarray): return list(value.shape) elif isinstance(value, list): return [_get_shape(graph, v) for v in value] else: return [] def _set_shape(graph, value, shape): if isinstance(value, nnef.Identifier): tensor = graph.tensors[value] graph.tensors[value] = nnef.Tensor(tensor.name, tensor.dtype, shape, tensor.data, tensor.quantization) elif isinstance(value, list): for v, s in zip(value, shape): _set_shape(graph, v, s) def infer_shapes(graph, external_shapes={}, custom_shapes={}): # type: (nnef.Graph, dict)->None for op in graph.operations: func = _StandardShapeFuncs.get(op.name) if func is None: func = custom_shapes.get(op.name) if func is None: raise nnef.Error("shape inference function is not defined for operation '{}'".format(op.name)) if op.name == 'external': id = op.outputs['output'] override = external_shapes.get(id) if override is not None: override = list(override) original = op.attribs['shape'] assert len(override) == len(original), \ "overridden external shape rank ({}) does not match original rank ({})".format(len(override), len(original)) _set_shape(graph, id, override) continue input_shapes = [_get_shape(graph, input) for input in op.inputs.values()] try: output_shapes = func(*input_shapes, **op.attribs) if not isinstance(output_shapes, tuple): output_shapes = (output_shapes,) outputs = op.outputs.values() assert len(outputs) == len(output_shapes), \ "number of shapes ({}) does not match number of outputs ({})".format(len(outputs), len(output_shapes)) for output, shape in zip(outputs, output_shapes): if isinstance(output, list): assert isinstance(shape, list), "expected list of shapes" assert len(output) == len(shape), \ "number of shapes ({}) does not match number of outputs ({})".format(len(output), len(shape)) _set_shape(graph, output, shape) except AssertionError as e: raise nnef.Error("while inferring shape of tensor(s) '{}' (operation '{}'): {}". format(', '.join(op.outputs.values()), op.name, e)) for tensor in graph.tensors.values(): if tensor.quantization: for key, value in tensor.quantization.items(): if isinstance(value, np.ndarray): assert _broadcastable(value.shape, tensor.shape) def _infer_op_shapes(op_name, attribs, input_shapes, output_counts, custom_shapes={}): func = _StandardShapeFuncs.get(op_name) if func is None: func = custom_shapes.get(op_name) if func is None: raise nnef.Error("shape inference function is not defined for operation '{}'".format(op_name)) try: output_shapes = func(*input_shapes, **attribs) if not isinstance(output_shapes, tuple): output_shapes = (output_shapes,) assert len(output_counts) == len(output_shapes), \ "number of shapes ({}) does not match number of outputs ({})".format(len(output_counts), len(output_shapes)) for count, shape in zip(output_counts, output_shapes): if isinstance(count, list): assert isinstance(shape, list), "expected list of shapes" assert count == len(shape), \ "number of shapes ({}) does not match number of outputs ({})".format(count, len(shape)) return output_shapes except AssertionError as e: raise nnef.Error("while inferring output shape of operation '{}': {}".format(op_name, e)) _StandardShapeFuncs = { 'external': nullary_shape, 'variable': nullary_shape, 'constant': nullary_shape, 'copy': unary_shape, 'neg': unary_shape, 'not': unary_shape, 'rcp': unary_shape, 'exp': unary_shape, 'log': unary_shape, 'sin': unary_shape, 'cos': unary_shape, 'tan': unary_shape, 'asin': unary_shape, 'acos': unary_shape, 'atan': unary_shape, 'sinh': unary_shape, 'cosh': unary_shape, 'tanh': unary_shape, 'asinh': unary_shape, 'acosh': unary_shape, 'atanh': unary_shape, 'abs': unary_shape, 'sign': unary_shape, 'floor': unary_shape, 'ceil': unary_shape, 'round': unary_shape, 'sqr': unary_shape, 'sqrt': unary_shape, 'rsqr': unary_shape, 'rsqrt': unary_shape, 'log2': unary_shape, 'sigmoid': unary_shape, 'relu': unary_shape, 'elu': unary_shape, 'selu': unary_shape, 'gelu': unary_shape, 'silu': unary_shape, 'softabs': unary_shape, 'softplus': unary_shape, 'leaky_relu': unary_shape, 'prelu': asymmetric_binary_shape, 'add': binary_shape, 'sub': binary_shape, 'mul': binary_shape, 'div': binary_shape, 'pow': binary_shape, 'min': binary_shape, 'max': binary_shape, 'lt': binary_shape, 'le': binary_shape, 'gt': binary_shape, 'ge': binary_shape, 'eq': binary_shape, 'ne': binary_shape, 'and': binary_shape, 'or': binary_shape, 'select': ternary_shape, 'clamp': ternary_shape, 'conv': conv_shape, 'deconv': deconv_shape, 'separable_conv': separable_conv_shape, 'separable_deconv': separable_deconv_shape, 'box': pool_shape, 'debox': unpool_shape, 'sample': sample_shape, 'desample': desample_shape, 'avg_pool': pool_shape, 'max_pool': pool_shape, 'argmax_pool': pool_shape, 'rms_pool': pool_shape, 'max_pool_with_index': pool_with_index_shape, 'max_unpool': unpool_shape, 'avg_unpool': unpool_shape, 'sum_reduce': reduce_shape, 'min_reduce': reduce_shape, 'max_reduce': reduce_shape, 'mean_reduce': reduce_shape, 'argmin_reduce': reduce_shape, 'argmax_reduce': reduce_shape, 'any_reduce': reduce_shape, 'all_reduce': reduce_shape, 'local_response_normalization': normalize_shape, 'local_mean_normalization': normalize_shape, 'local_variance_normalization': normalize_shape, 'local_contrast_normalization': normalize_shape, 'l1_normalization': normalize_shape, 'l2_normalization': normalize_shape, 'moments': moments_shape, 'batch_normalization': batchnorm_shape, 'nearest_downsample': downsample_shape, 'area_downsample': downsample_shape, 'nearest_upsample': upsample_shape, 'multilinear_upsample': upsample_shape, 'reshape': reshape_shape, 'transpose': transpose_shape, 'squeeze': squeeze_shape, 'unsqueeze': unsqueeze_shape, 'stack': stack_shape, 'unstack': unstack_shape, 'split': split_shape, 'concat': concat_shape, 'slice': slice_shape, 'tile': tile_shape, 'pad': pad_shape, 'cast': unary_shape, 'gather': gather_shape, 'matmul': matmul_shape, 'linear': linear_shape, 'softmax': softmax_shape, 'linear_quantize': quantize_shape, 'logarithmic_quantize': quantize_shape, 'min_max_linear_quantize': quantize_shape, 'zero_point_linear_quantize': quantize_shape, 'avg_roi_pool': roi_shape, 'max_roi_pool': roi_shape, 'avg_roi_align': roi_shape, 'max_roi_align': roi_shape, 'roi_resample': roi_shape, 'update': update_shape, 'copy_n': copy_n_shape, 'add_n': add_n_shape, } ================================================ FILE: nnef-pyproject/nnef/validate.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import nnef import argparse if __name__ == '__main__': ap = argparse.ArgumentParser() ap.add_argument('path', type=str, help='path to the model to validate') ap.add_argument('--stdlib', type=str, help='file name of alternate standard operation definitions ' '(defaults to all-primitive definitions)', default='') ap.add_argument('--lower', type=str, help='comma separated list of operations to lower (if defined as compound)', default='') ap.add_argument('--shapes', action="store_true", help='perform shape validation as well') ap.add_argument('--input-shape', type=str, help='override input shapes contained in the model; ' 'must be a Python list (applied to all inputs) ' 'or dict expression (applied by input name)', default=None) args = ap.parse_args() stdlib = '' if args.stdlib: try: with open(args.stdlib) as file: stdlib = file.read() except FileNotFoundError as e: print('Could not open file: ' + args.stdlib) exit(-1) try: graph = nnef.load_graph(args.path, stdlib=stdlib, lowered=args.lower.split(',')) except nnef.Error as err: print(err) exit(-1) if args.input_shape: input_shape = eval(args.input_shape) if not isinstance(input_shape, (list, dict)): print("input-shape must be Python list or dict expression") exit(-1) for op in graph.operations: if op.name == 'external': if isinstance(input_shape, dict): name = op.outputs['output'] if name in input_shape: op.attribs['shape'] = input_shape[name] else: op.attribs['shape'] = input_shape if args.shapes: try: nnef.infer_shapes(graph) except nnef.Error as err: print('Shape error: ' + str(err)) exit(-1) print(nnef.format_graph(graph.name, graph.inputs, graph.outputs, graph.operations, graph.tensors, annotate_shapes=args.shapes)) print('Validation succeeded') ================================================ FILE: nnef-pyproject/package_info.md ================================================ NNEF Parser Project =================== This package contains a sample NNEF parser, using a C++ backend. Using the module ----------------------- In the python interpreter, type import nnef graph = nnef.load_graph('example.nnef') If the path (`example.nnef`) points to a folder (with a graph.nnef in it), the whole model with weights is loaded. If it points to a file, it is interpreted as the graph description only, and it is loaded without weights. Alternatively, the methods graph = nnef.parse_file("graph.nnef", quantization = "graph.quant") and graph = nnef.parse_string("version 1.0; graph ...", quantization = "...") can be used to parse a graph and optional quantization info from files or strings. After invocation, `graph` is a data structure (named tuple) containing the name, tensors, operations, inputs and outputs of the graph. If shape information is also required, it can be obtained by calling `nnef.infer_shapes(graph)`, which updates the shape information on the graph structure in place. ================================================ FILE: nnef-pyproject/pyproject.toml ================================================ [project] name = "nnef" version = "1.0.10" description = "A package for parsing NNEF files" requires-python = ">=3.7" classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ] dynamic = ["readme"] keywords = ["nnef"] authors = [ { name = "Khronos Group", email = "nnef@lists.khronos.org" }, ] maintainers = [{ name = "Viktor Gyenes", email = "viktor.gyenes@aimotive.com" }] dependencies = ["numpy"] [build-system] requires = ["setuptools", "wheel", "numpy", "Cython"] build-backend = "setuptools.build_meta" [project.urls] "Homepage" = "https://www.khronos.org/nnef" "Repository" = "https://github.com/KhronosGroup/NNEF-Tools" [tool.setuptools.dynamic] readme = { file = ["package_info.md"], content-type = "text/markdown" } [tool.setuptools.package-data] "nnef.cpp" = ["**/*"] [tool.cibuildwheel] # Skip PyPy wheels skip = "pp*" test-command = "python {package}/tests/test.py" ================================================ FILE: nnef-pyproject/setup.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import Extension, setup import numpy from os import name as os_name setup( ext_modules=[ Extension( "_nnef", sources=["nnef/nnef.cpp", "nnef/cpp/src/nnef.cpp"], include_dirs=["nnef/cpp/include", numpy.get_include()], language="c++", extra_compile_args=["-std=c++11"] if os_name != "nt" else [], ) ], ) ================================================ FILE: nnef-pyproject/stdlib.nnef ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tensor declaration operations fragment external( shape: integer[] ) -> ( output: tensor ); fragment variable( shape: integer[], label: string ) -> ( output: tensor ); fragment constant( shape: integer[], value: ?[] ) -> ( output: tensor ); fragment update( variable: tensor, value: tensor ) -> ( result: tensor ); # tensor shape operations fragment reshape( input: tensor, shape: integer[], axis_start: integer = 0, axis_count: integer = -1 ) -> ( output: tensor ); fragment transpose( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment concat( values: tensor[], axis: integer ) -> ( value: tensor ); fragment split( value: tensor, axis: integer, ratios: integer[] ) -> ( values: tensor[] ); fragment slice( input: tensor, axes: integer[], begin: integer[], end: integer[], stride: integer[] = [] ) -> ( output: tensor ); fragment squeeze( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment unsqueeze( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment stack( values: tensor[], axis: integer ) -> ( value: tensor ); fragment unstack( value: tensor, axis: integer ) -> ( values: tensor[] ); fragment tile( input: tensor, repeats: integer[] ) -> ( output: tensor ); fragment pad( input: tensor, padding: (integer, integer)[], border: string = 'constant', value: scalar = 0.0 ) -> ( output: tensor ); fragment gather( input: tensor, indices: tensor, axis: integer = 0 ) -> ( output: tensor ); fragment cast( input: tensor<> ) -> ( output: tensor ); # element-wise arithmetic operations fragment add( x: tensor, y: tensor ) -> ( z: tensor ); fragment sub( x: tensor, y: tensor ) -> ( z: tensor ); fragment mul( x: tensor, y: tensor ) -> ( z: tensor ); fragment div( x: tensor, y: tensor ) -> ( z: tensor ); fragment pow( x: tensor, y: tensor ) -> ( z: tensor ); fragment exp( x: tensor ) -> ( y: tensor ); fragment log( x: tensor ) -> ( y: tensor ); fragment sin( x: tensor ) -> ( y: tensor ); fragment cos( x: tensor ) -> ( y: tensor ); fragment tan( x: tensor ) -> ( y: tensor ); fragment sinh( x: tensor ) -> ( y: tensor ); fragment cosh( x: tensor ) -> ( y: tensor ); fragment tanh( x: tensor ) -> ( y: tensor ); fragment asin( x: tensor ) -> ( y: tensor ); fragment acos( x: tensor ) -> ( y: tensor ); fragment atan( x: tensor ) -> ( y: tensor ); fragment asinh( x: tensor ) -> ( y: tensor ); fragment acosh( x: tensor ) -> ( y: tensor ); fragment atanh( x: tensor ) -> ( y: tensor ); fragment abs( x: tensor ) -> ( y: tensor ); fragment sign( x: tensor ) -> ( y: tensor ); fragment rcp( x: tensor ) -> ( y: tensor ); fragment neg( x: tensor ) -> ( y: tensor ); fragment copy( x: tensor ) -> ( y: tensor ); # element-wise comparison operations fragment lt( x: tensor, y: tensor ) -> ( z: tensor ); fragment gt( x: tensor, y: tensor ) -> ( z: tensor ); fragment le( x: tensor, y: tensor ) -> ( z: tensor ); fragment ge( x: tensor, y: tensor ) -> ( z: tensor ); fragment eq( x: tensor, y: tensor ) -> ( z: tensor ); fragment ne( x: tensor, y: tensor ) -> ( z: tensor ); # element-wise logical operations fragment and( x: tensor, y: tensor ) -> ( z: tensor ); fragment or( x: tensor, y: tensor ) -> ( z: tensor ); fragment not( x: tensor ) -> ( y: tensor ); # element-wise rounding operations fragment floor( x: tensor ) -> ( y: tensor ); fragment ceil( x: tensor ) -> ( y: tensor ); fragment round( x: tensor ) -> ( y: tensor ); # element-wise select operation fragment select( condition: tensor, true_value: tensor, false_value: tensor ) -> ( output: tensor ); # simplifier operations fragment sqr( x: tensor ) -> ( y: tensor ) { y = x ^ 2.0; } fragment sqrt( x: tensor ) -> ( y: tensor ) { y = x ^ 0.5; } fragment rsqr( x: tensor ) -> ( y: tensor ) { y = x ^ -2.0; } fragment rsqrt( x: tensor ) -> ( y: tensor ) { y = x ^ -0.5; } fragment log2( x: tensor ) -> ( y: tensor ) { y = log(x) / log(2.0); } fragment min( x: tensor, y: tensor ) -> ( z: tensor ) { z = select(x < y, x, y); } fragment max( x: tensor, y: tensor ) -> ( z: tensor ) { z = select(x > y, x, y); } fragment clamp( x: tensor, a: tensor, b: tensor ) -> ( y: tensor ) { y = max(min(x, b), a); } # matrix multiplication fragment matmul( A: tensor, B: tensor, transposeA: logical = false, transposeB: logical = false ) -> ( C: tensor ); # sliding-window operations fragment conv( input: tensor, filter: tensor, bias: tensor = 0.0, border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], groups: integer = 1 ) -> ( output: tensor ); fragment deconv( input: tensor, filter: tensor, bias: tensor = 0.0, border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], output_shape: integer[] = [], groups: integer = 1 ) -> ( output: tensor ); fragment box( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], normalize: logical = false ) -> ( output: tensor ); fragment debox( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], output_shape: integer[] = [], normalize: logical = false ) -> ( output: tensor ); fragment argmax_pool( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( index: tensor ); fragment sample( input: tensor, index: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor ); fragment desample( input: tensor, index: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], output_shape: integer[] = [] ) -> ( output: tensor ); # up/down-sampling operations fragment nearest_downsample( input: tensor, factor: integer[] ) -> ( output: tensor ) { dims = 2 + length_of(factor); output = box(input, size = [1] * dims, stride = [1,1] + factor, padding = [(0,0)] * dims); } fragment area_downsample( input: tensor, factor: integer[] ) -> ( output: tensor ) { dims = 2 + length_of(factor); output = box(input, size = [1,1] + factor, stride = [1,1] + factor, padding = [(0,0)] * dims, normalize = true); } fragment nearest_upsample( input: tensor, factor: integer[] ) -> ( output: tensor ) { dims = 2 + length_of(factor); output = debox(input, size = [1,1] + factor, stride = [1,1] + factor, padding = [(0,0)] * dims); } fragment multilinear_upsample( input: tensor, factor: integer[], method: string = 'symmetric', border: string = 'replicate' ) -> ( output: tensor ); # reduce operations fragment sum_reduce( input: tensor, axes: integer[], normalize: logical = false ) -> ( output: tensor ); fragment max_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment min_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment argmax_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment argmin_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment any_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment all_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ); fragment mean_reduce( input: tensor, axes: integer[] ) -> ( output: tensor ) { output = sum_reduce(input, axes = axes, normalize = true); } fragment moments( input: tensor, axes: integer[] ) -> ( mean: tensor, variance: tensor ) { mean = mean_reduce(input, axes = axes); variance = mean_reduce(sqr(input - mean), axes = axes); } # activation functions fragment relu( x: tensor ) -> ( y: tensor ) { y = max(x, 0.0); } fragment sigmoid( x: tensor ) -> ( y: tensor ) { y = 1.0 / (1.0 + exp(-x)); } fragment softabs( x: tensor, epsilon: scalar ) -> ( y: tensor ) { y = sqrt(sqr(x) + epsilon); } fragment softmax( x: tensor, axes: integer[] = [1] ) -> ( y: tensor ) { m = max_reduce(x, axes = axes); e = exp(x - m); y = e / sum_reduce(e, axes = axes); } fragment softplus( x: tensor ) -> ( y: tensor ) { y = log(exp(x) + 1.0); } fragment elu( x: tensor, alpha: scalar = 1.0 ) -> ( y: tensor ) { y = select(x < 0.0, alpha * (exp(x) - 1.0), x); } fragment selu( x: tensor, alpha: scalar = 1.67326319, lambda: scalar = 1.05070102 ) -> ( y: tensor ) { y = lambda * select(x < 0.0, alpha * (exp(x) - 1.0), x); } fragment gelu( x: tensor ) -> ( y: tensor ) { # the exact definition of gelu is x * Phi(x) where Phi(x) is the # CDF of the standard normal distribution, which can be approximated # for example by sigmoid(1.702 * x) y = x * sigmoid(1.702 * x); } fragment silu( x: tensor ) -> ( y: tensor ) { y = x * sigmoid(x); } fragment prelu( x: tensor, alpha: tensor ) -> ( y: tensor ) { y = select(x < 0.0, alpha * x, x); } fragment leaky_relu( x: tensor, alpha: scalar ) -> ( y: tensor ) { y = prelu(x, alpha = alpha); } # pooling operations fragment max_pool_with_index( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor, index: tensor ) { index = argmax_pool(input, size = size, border = border, padding = padding, stride = stride, dilation = dilation); output = sample(input, index, size = size, border = border, padding = padding, stride = stride, dilation = dilation); } fragment max_pool( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor ) { output, index = max_pool_with_index(input, size = size, border = border, padding = padding, stride = stride, dilation = dilation); } fragment avg_pool( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor ) { output = box(input, size = size, border = border, padding = padding, stride = stride, dilation = dilation, normalize = true); } fragment rms_pool( input: tensor, size: integer[], border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [] ) -> ( output: tensor ) { output = sqrt(avg_pool(sqr(input), size = size, border = border, padding = padding, stride = stride, dilation = dilation)); } # linear operations fragment linear( input: tensor, filter: tensor, bias: tensor = 0.0 ) -> ( output: tensor ) { output = matmul(input, filter, transposeB = true) + bias; } fragment separable_conv( input: tensor, plane_filter: tensor, point_filter: tensor, bias: tensor = 0.0, border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], groups: integer = 1 ) -> ( output: tensor ) { filtered = conv(input, plane_filter, border = border, padding = padding, stride = stride, dilation = dilation, groups = 0); output = conv(filtered, point_filter, bias, groups = groups); } fragment separable_deconv( input: tensor, plane_filter: tensor, point_filter: tensor, bias: tensor = 0.0, border: string = 'constant', padding: (integer,integer)[] = [], stride: integer[] = [], dilation: integer[] = [], output_shape: integer[] = [], groups: integer = 1 ) -> ( output: tensor ) { filtered = deconv(input, point_filter, groups = groups); output = deconv(filtered, plane_filter, bias, border = border, padding = padding, stride = stride, dilation = dilation, output_shape = output_shape, groups = 0); } # normalization operations fragment local_response_normalization( input: tensor, size: integer[], alpha: scalar = 1.0, beta: scalar = 0.5, bias: scalar = 1.0 ) -> ( output: tensor ) { sigma = bias + alpha * box(sqr(input), size = size, normalize = true); output = input / (sigma ^ beta); } fragment local_mean_normalization( input: tensor, size: integer[] ) -> ( output: tensor ) { mean = box(input, size = size, normalize = true); output = sub(input, mean); } fragment local_variance_normalization( input: tensor, size: integer[], bias: scalar = 0.0, epsilon: scalar = 0.0 ) -> ( output: tensor ) { sigma = sqrt(box(sqr(input), size = size, normalize = true)); output = input / max(sigma + bias, epsilon); } fragment local_contrast_normalization( input: tensor, size: integer[], bias: scalar = 0.0, epsilon: scalar = 0.0 ) -> ( output: tensor ) { centered = local_mean_normalization(input, size = size); output = local_variance_normalization(centered, size = size, bias = bias, epsilon = epsilon); } fragment l1_normalization( input: tensor, axes: integer[], bias: scalar = 0.0, epsilon: scalar = 0.0 ) -> ( output: tensor ) { sigma = sum_reduce(abs(input), axes = axes); output = input / max(sigma + bias, epsilon); } fragment l2_normalization( input: tensor, axes: integer[], bias: scalar = 0.0, epsilon: scalar = 0.0 ) -> ( output: tensor ) { sigma = sqrt(sum_reduce(sqr(input), axes = axes)); output = input / max(sigma + bias, epsilon); } fragment batch_normalization( input: tensor, mean: tensor, variance: tensor, offset: tensor, scale: tensor, epsilon: scalar ) -> ( output: tensor ) { output = offset + scale * (input - mean) / sqrt(variance + epsilon); } # roi operations fragment avg_roi_pool( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[] ) -> ( output: tensor ); fragment max_roi_pool( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[] ) -> ( output: tensor ); fragment roi_resample( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[], method: string = 'symmetric' ) -> ( output: tensor ); fragment avg_roi_align( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[], sampling_rate: integer[], resize_method: string = 'symmetric' ) -> ( output: tensor ) { size = [for i in range_of(output_size) yield output_size[i] * sampling_rate[i]]; resized = roi_resample(input, rois, batch_index, output_size = size, method = resize_method); output = avg_pool(resized, size = sampling_rate, stride = sampling_rate); } fragment max_roi_align( input: tensor, rois: tensor, batch_index: tensor, output_size: integer[], sampling_rate: integer[], resize_method: string = 'symmetric' ) -> ( output: tensor ) { size = [for i in range_of(output_size) yield output_size[i] * sampling_rate[i]]; resized = roi_resample(input, rois, batch_index, output_size = size, method = resize_method); output = max_pool(resized, size = sampling_rate, stride = sampling_rate); } # quantization operations fragment min_max_linear_quantize( x: tensor, min: tensor, max: tensor, bits: integer, signed: logical, symmetric: logical ) -> ( y: tensor ) { r = scalar(2 ^ bits - 1 - integer(signed && symmetric)); z = clamp(x, min, max); p = scalar(2 ^ (bits - 1) - integer(symmetric) if signed else 0); q = round((z - min) / (max - min) * r) - p; y = (q + p) / r * (max - min) + min; } fragment zero_point_linear_quantize( x: tensor, zero_point: tensor, scale: tensor, bits: integer, signed: logical, symmetric: logical ) -> ( y: tensor ) { z = cast(zero_point); s = round(x / scale) + z; r = scalar(2 ^ (bits - 1) - 1 if signed else 2 ^ bits - 1); q = clamp(s, 0.0 if !signed else -r if symmetric else -r - 1.0, r); y = (q - z) * scale; } fragment linear_quantize( x: tensor, min: tensor, max: tensor, bits: integer ) -> ( y: tensor ) { y = min_max_linear_quantize(x, min = min, max = max, bits = bits, signed = false, symmetric = false); } fragment logarithmic_quantize( x: tensor, max: tensor, bits: integer ) -> ( y: tensor ) { m = ceil(log2(max)); r = scalar(2 ^ bits - 1); q = round(clamp(log2(abs(x)), m - r, m)); y = sign(x) * 2.0 ^ q; } # misc operations fragment copy_n( x: tensor, times: integer ) -> ( y: tensor[] ) { y = [x] * times; } fragment add_n( x: tensor[] ) -> ( y: tensor ) { y = x[0] + add_n(x[1:]) if length_of(x) > 0 else constant(shape = [1], value = [0.0]); } ================================================ FILE: nnef-pyproject/tests/test.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import import unittest import nnef class ParserTest(unittest.TestCase): def test_empty_document(self): with self.assertRaises(nnef.Error): nnef.parse_string("") def test_empty_body(self): with self.assertRaises(nnef.Error): nnef.parse_string("version 1.0; graph G( input ) -> ( output ) {}") def test_minimal(self): nnef.parse_string(""" version 1.0; graph G( input ) -> ( output ) { input = external(shape = []); output = copy(input); } """) def test_empty_input_not_declared(self): with self.assertRaises(nnef.Error): nnef.parse_string(""" version 1.0; graph G( input ) -> ( output ) { output = copy(input); } """) def test_input_not_external(self): with self.assertRaises(nnef.Error): nnef.parse_string(""" version 1.0; graph G( input ) -> ( output ) { input = constant(shape = [], value = [1.0]); output = copy(input); } """) def test_external_not_input(self): with self.assertRaises(nnef.Error): nnef.parse_string(""" version 1.0; graph G( input ) -> ( output ) { input = external(shape = []); other = external(shape = []); output = add(input, other); } """) def test_empty_output_not_declared(self): with self.assertRaises(nnef.Error): nnef.parse_string(""" version 1.0; graph G( input ) -> ( output ) { input = external(shape = []); } """) def test_variable_update(self): nnef.parse_string(""" version 1.0; graph G( input ) -> ( output ) { input = external(shape = []); var = variable(shape = [], label = 'var'); output = update(var, input); } """) def test_non_variable_update(self): with self.assertRaises(nnef.Error): nnef.parse_string(""" version 1.0; graph G( input ) -> ( output ) { input = external(shape = []); output = update(input, input); } """) def test_custom_fragment(self): nnef.parse_string(""" version 1.0; extension KHR_enable_fragment_definitions, KHR_enable_operator_expressions; fragment op( input: tensor ) -> ( output: tensor ) { output = input; } graph G( input ) -> ( output ) { input = external(shape = []); output = op(input); } """) def test_reshape(self): graph = nnef.parse_string(""" version 1.0; graph G( input ) -> ( output ) { input = external(shape = [1,2,3,4]); output = reshape(input, axis_start = 1, axis_count = 2, shape = [6]); } """) nnef.infer_shapes(graph) # def test_alexnet(self): # nnef.parse_file("../examples/alexnet.txt") # # def test_googlenet(self): # nnef.parse_file("../examples/googlenet.txt") # # def test_resnet(self): # nnef.parse_file("../examples/resnet.txt") # # def test_vggnet(self): # nnef.parse_file("../examples/vgg.txt") if __name__ == '__main__': unittest.main() ================================================ FILE: nnef_tools-pyproject/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: nnef_tools-pyproject/README.md ================================================ # NNEF Tools This package contains a set of tools for converting and transforming machine learning models. ## Dependencies The python package supports extras for different functionalities: | Functionality | Extra | Additional packages | |--------------------------------|---------------------|----------------------------------------------| | TensorFlow Protobuf conversion | tensorflow-protobuf | tensorflow | | TensorFlow Lite conversion | tensorflow-lite | tensorflow, flatbuffers | | ONNX conversion | onnx | protobuf, onnx, onnx-simplifier, onnxruntime | | Caffe and Caffe2 conversion | caffe | protobuf, torch | | Visualization of NNEF models | visualization | graphviz | | Full install | full | _all packages listed above_ | Installing ONNX and Caffe dependencies (for reference): ``` pip install nnet_tools[onnx, caffe] ``` ## Usage [Python package usage](package_info.md) ================================================ FILE: nnef_tools-pyproject/custom/composite_export_example.py ================================================ import numpy as np import src.nnef_tools.io.tf.graphdef as graphdef try: import tensorflow.compat.v1 as tf tf.disable_v2_behavior() except ImportError: import tensorflow as tf # define composite operators as decorated python functions @graphdef.composite_function def lp_norm(x, p=2, axis=None, keepdims=False, name=None): return tf.pow(tf.reduce_sum(tf.pow(tf.abs(x), p), axis=axis, keepdims=keepdims), 1 / p) @graphdef.composite_function def sum_pool2d(input, ksize, strides, padding, data_format='NHWC', name=None): pooled = tf.nn.avg_pool2d(input, ksize=ksize, strides=strides, padding=padding, data_format=data_format) return pooled * float(np.prod(ksize)) # reset tractking of composite functions graphdef.reset_composites() # define the TF graph x = tf.placeholder(shape=(None, 32, 32, 3), dtype=tf.float32, name='input') w = tf.get_variable('w', shape=(5, 5, 3, 16), dtype=tf.float32, initializer=tf.zeros_initializer) x = tf.nn.conv2d(x, w, strides=1, padding='SAME') x = sum_pool2d(x, ksize=(1, 3, 3, 1), strides=1, padding='SAME') x = lp_norm(x, axis=3, keepdims=True) # export the graph to protobuf with tf.Session() as sess: sess.run(tf.global_variables_initializer()) graphdef.save_default_graph('test.pb', session=sess, outputs={x: 'output'}, input_shapes={'input': (1, 32, 32, 3)}) ================================================ FILE: nnef_tools-pyproject/custom/custom_operators_example.py ================================================ import torch # define how the PyTorch interpreter should execute the op def shuffle(input, groups): shape = list(input.shape) reshaped = input.reshape([shape[0], groups, shape[1] / groups] + shape[2:]) transposed = reshaped.permute(0, 2, 1, *list(range(3, len(shape) + 1))) return transposed.reshape(shape) # mapping from op names to executor functions CUSTOM_OPERATORS = { 'shuffle': shuffle, } ================================================ FILE: nnef_tools-pyproject/custom/custom_optimizers_example.py ================================================ # Define how a sequence of ops is replaced by a new sequence. # First test if the sequence of ops matched should be really replaced; return False if not. # If yes, create new Tensors and Operations in the graph with the Tensor() and Operation() constructors. # DO NOT perform modifications to the graph before all checks passed! def replace_shuffle(reshape1, transpose, reshape2): if reshape2.output.shape != reshape1.input.shape: return False if len(reshape1.output.shape) != len(reshape1.input.shape) + 1 or \ reshape1.output.shape[0] != reshape1.input.shape[0] or \ reshape1.output.shape[3:] != reshape1.input.shape[2:]: return False axes = transpose.attribs['axes'] if axes[:3] != [0, 2, 1] or axes[3:] != list(range(3, len(axes))): return False groups = reshape1.output.shape[1] Operation(reshape1.graph, type='shuffle', attribs={'groups': groups}, inputs=reshape1.input, outputs=reshape2.output, custom=True) # List sequences of op types that should be matched and replaced if the replacer function does not return False # An item in the list may be a set as well, in which case any of its items can count as a match in the sequence # Use a tuple for the key sequence, because list is not hashable CUSTOM_OPTIMIZERS = { ('reshape', 'transpose', 'reshape'): replace_shuffle, } ================================================ FILE: nnef_tools-pyproject/custom/custom_transforms_example.py ================================================ from src.nnef_tools import Transform # define mapping from custom op names to converter transforms that maps them in this case to NNEF ops CUSTOM_TRANSFORMS = { 'sum_pool2d': Transform( type='box', inputs=( '!transpose_input(I[0], data_format)', ), outputs=( '!transpose_output(O[0], data_format)', ), attribs={ 'size': '!nxc_to_ncx(ensure_list(ksize), cond=is_nxc(data_format))', 'stride': '!nxc_to_ncx(ensure_list(strides), cond=is_nxc(data_format))', 'padding': '!convert_padding(padding, I[0].rank)', 'normalize': False, } ), 'lp_norm': Transform( type='!"l1_normalization" if p == 1 else "l2_normalization"', cond='!p == 1 or p == 2', inputs='!I[0]', outputs='!transpose_like(O[0], ref=I[0])', attribs={ 'axes': '!ensure_list(transpose_axis_like(axis, ref=I[0]))', } ), } ================================================ FILE: nnef_tools-pyproject/custom/onnx_custom_export_example.py ================================================ import torch import torch.nn.functional as F from torch.onnx import register_custom_op_symbolic def aim_affine_grid(g, trans, shape, align_corners): return g.op("com.example::affine_grid", trans, shape, align_corners) def aim_grid_sample(g, input, grid, mode, padding, align_corners): return g.op("com.example::grid_sample", input, grid, mode, padding, align_corners) register_custom_op_symbolic('::affine_grid_generator', aim_affine_grid, 1) register_custom_op_symbolic('::grid_sampler', aim_grid_sample, 1) class AffineTransform(torch.nn.Module): def __init__(self, width, height): super(AffineTransform, self).__init__() self.width = width self.height = height def forward(self, input, theta): batch = int(input.shape[0]) # int() forces static shape instead of dynamic Shape() op channel = int(input.shape[1]) grid = F.affine_grid(size=[batch, channel, self.height, self.width], theta=theta) return F.grid_sample(input, grid) class Model(torch.nn.Module): def __init__(self, grid_size): super(Model, self).__init__() self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(4, 4), stride=(2, 2)) self.conv2 = torch.nn.Conv2d(in_channels=16, out_channels=8, kernel_size=(3, 3), stride=(1, 1)) self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) self.affine = AffineTransform(width=grid_size[0], height=grid_size[1]) def forward(self, x, t): x = self.conv1(x) x = self.pool(x) x = self.affine(x, t) x = self.conv2(x) return x model = Model(grid_size=(100, 100)) model.eval() x = torch.randn(1, 3, 224, 224, requires_grad=False) y = torch.zeros(size=(1, 2, 3)) torch.onnx.export(model, (x, y), "test.onnx", opset_version=10) ================================================ FILE: nnef_tools-pyproject/custom/onnx_custom_transforms_example.py ================================================ from src.nnef_tools import Transform def affine_grid_shape(theta, shape): return shape CUSTOM_SHAPES = { 'grid_sample': lambda input, grid: input, 'affine_grid': affine_grid_shape, } CUSTOM_TRANSFORMS = { 'affine_grid': Transform( type='affine_grid', using={ 'size': '!as_const(I[1])', 'align': '!as_const(I[2])', }, cond={ '!align == 0': 'align_corners must be 0 (false)', }, inputs=( '!I[0]', ), outputs=( '!O[0]', ), attribs={ 'shape': '!size', } ), 'grid_sample': Transform( type='grid_sample', using={ 'mode': '!as_const(I[2])', 'padding': '!as_const(I[3])', 'align': '!as_const(I[4])', }, cond={ '!mode == 0': 'mode must be 0 (bilinear)', '!padding == 0': 'padding_mode must be 0 (zeros)', '!align == 0': 'align_corners must be 0 (false)', }, inputs=( '!I[0]', '!I[1]', ), outputs=( '!O[0]', ), ), } ================================================ FILE: nnef_tools-pyproject/nnef_tools/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: nnef_tools-pyproject/nnef_tools/conversion/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .converter import Transform, Converter, ConversionError ================================================ FILE: nnef_tools-pyproject/nnef_tools/conversion/converter.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from ..model import * from ..utils import types import numpy as np import functools import inspect import math import copy import six import re class Transform: def __init__(self, type, name=None, inputs=None, outputs=None, attribs=None, defaults=None, using=None, cond=None, custom=False): self.type = type self.name = name or '!_name_' self.inputs = inputs or () self.outputs = outputs or () self.attribs = attribs or {} self.defaults = defaults self.using = using or {} self.cond = cond self.custom = custom def with_type(self, type): return Transform(type=type, name=self.name, inputs=self.inputs, outputs=self.outputs, attribs=self.attribs, defaults=self.defaults, using=self.using, cond=self.cond, custom=self.custom) class ConversionError(Exception): def __init__(self, message, details=None): Exception.__init__(self, message) self.details = details class Converter: @staticmethod def find_public_methods(obj): methods = inspect.getmembers(obj, predicate=inspect.ismethod) return {name: func for name, func in methods if not name.startswith('_')} @staticmethod def find_public_functions(obj): methods = inspect.getmembers(obj, predicate=inspect.isfunction) return {name: func for name, func in methods if not name.startswith('_')} @staticmethod def decomposed_operations(): return [] # return list of decomposed NNEF ops in subclass if converting from NNEF @staticmethod def defined_operations(): return {} # return dictionary of NNEF operator (fragment) definitions in subclass if converting to NNEF @staticmethod def defined_operation_dependencies(): return {} # return dictionary of NNEF operator (fragment) dependencies in subclass if converting to NNEF @staticmethod def defined_shapes(): return {} # return dictionary of shape functions for NNEF fragments defined by the converter @staticmethod def unpack_transforms(transforms): unpacked = {} for key, value in six.iteritems(transforms): assert isinstance(value, Transform) if isinstance(key, tuple): if isinstance(value, Transform) and isinstance(value.type, tuple): for key_item, type_item in zip(key, value.type): value_item = copy.deepcopy(value) value_item.type = type_item unpacked[key_item] = value_item else: for item in key: unpacked[item] = value else: unpacked[key] = value return unpacked @staticmethod def merge_transforms(default_transforms, custom_transforms): if custom_transforms is None: return default_transforms transforms = dict(default_transforms) transforms.update(custom_transforms) return transforms def __init__(self, transforms, functions=None, mirror_unsupported=False, infer_shapes=False, custom_shapes=None): self._graph = None self._transforms = transforms self._callables = self.find_public_methods(self) if functions: self._callables.update({name: functools.partial(func, self) for name, func in six.iteritems(functions)}) self._mirror_unsupported = mirror_unsupported self._infer_shapes = infer_shapes self._custom_shapes = custom_shapes or {} def __call__(self, graph): if not self._infer_shapes: unknown_tensors = [tensor for tensor in graph.tensors if (tensor.shape is None or any(s is None for s in tensor.shape)) and len(tensor.consumers)] if len(unknown_tensors): names = ["'{}'".format(tensor.name) for tensor in unknown_tensors if tensor.name] raise ConversionError(("Input graph contains tensors with dynamic shape: " + ", ".join(names) if len(names) else "(no names)") + "\nTry the --fold-constants option to evaluate constant sub-graphs " "or the --static-only option to convert only the static part of the graph") self._graph = Graph(name=graph.name) self._tensor_map = {tensor: self._copy_tensor_(tensor) for tensor in graph.tensors} self._tensor_map.update({val: key for key, val in six.iteritems(self._tensor_map)}) self._transposes = {} self._prepare(self._graph) if not self._infer_shapes: errors = [] for op in graph.operations: transform = self._transforms.get(op.type) if transform is None and self._mirror_unsupported: continue if isinstance(transform, Transform) and transform.type is None: continue error = self._error_message(op, transform) if error is not None: errors.append(error) if len(errors): raise ConversionError("Found {} operator(s) that cannot be converted\n{}".format(len(errors), "\n".join(errors))) for op in graph.operations: transform = self._transforms.get(op.type) if isinstance(transform, Transform) and transform.type is None: continue if self._infer_shapes: error = self._error_message(op, transform) if error is not None: raise ConversionError(error) count = len(self._graph.operations) if transform is not None: self._convert(op, transform) elif self._mirror_unsupported: self._mirror(op) if self._infer_shapes: from nnef.shapes import _infer_op_shapes for op in self._graph.operations[count:]: input_shapes = [list(tensor.shape) for tensor in op.inputs] if not isinstance(op.inputs, tuple): input_shapes = (input_shapes,) output_counts = [len(op.outputs)] if not isinstance(op.outputs, tuple) else [None] * len(op.outputs) output_shapes = _infer_op_shapes(op.type, op.attribs, input_shapes, output_counts, custom_shapes=self._custom_shapes) if not isinstance(op.outputs, tuple): output_shapes = output_shapes[0] for output, shape in zip(op.outputs, output_shapes): output.shape = tuple(shape) for tensor, shape in self._transposes.items(): tensor.shape = shape self._graph.remove_tensors([tensor for tensor in self._graph.tensors if len(tensor.producers) == 0 and len(tensor.consumers) == 0]) self._graph.inputs = tuple(self._tensor_map[tensor] for tensor in graph.inputs if self._tensor_map[tensor].graph) self._graph.outputs = tuple(self._tensor_map[tensor] for tensor in graph.outputs if self._tensor_map[tensor].graph) return self._graph def tensor_mapping(self): return {key.name: value.name for key, value in six.iteritems(self._tensor_map) if value.graph == self._graph and key.name is not None and value.name is not None} def _global_attribs(self): return {} def _prepare(self, graph): pass def _check_conditions(self, op, transform): op_inputs = list(self._tensor_map[tensor] for tensor in op.inputs) op_outputs = list(self._tensor_map[tensor] for tensor in op.outputs) op_attribs = self._add_default_attribs(op.attribs, transform.defaults, op_inputs, op_outputs, op.type, op.name) using = {'_type_': op.type, '_name_': op.name, **self._global_attribs()} for key, item in six.iteritems(transform.using): value = self._evaluate(op_attribs, op_inputs, op_outputs, item, using) self._check_value(value, 'using', key, op.type, op.name) using[key] = value error = None if transform.cond is not None: for condition, message in transform.cond.items(): if not self._evaluate(op_attribs, op_inputs, op_outputs, condition, using): error = message if error is None else error + ', ' + message return error def _error_message(self, op, transform): if transform is None: return "Conversion of operator '{}' is not implemented".format(op.type) message = self._check_conditions(op, transform) if message is not None: attribs = {key: value for key, value in six.iteritems(op.attribs) if not key.startswith('_')} input_shapes = ", ".join(str(tensor.shape) for tensor in op.inputs) output_shapes = ", ".join(str(tensor.shape) for tensor in op.outputs) return "Conversion of operator '{}' is not possible: {}"\ "\n attributes: {}\n input-shapes: {}\n output-shapes: {}"\ .format(op.type, message, attribs, input_shapes, output_shapes) return None def _convert(self, op, transform): op_inputs = list(self._tensor_map[tensor] for tensor in op.inputs) op_outputs = list(self._tensor_map[tensor] for tensor in op.outputs) op_attribs = self._add_default_attribs(op.attribs, transform.defaults, op_inputs, op_outputs, op.type, op.name) using = {'_type_': op.type, '_name_': op.name, **self._global_attribs()} for key, item in six.iteritems(transform.using): value = self._evaluate(op_attribs, op_inputs, op_outputs, item, using) self._check_value(value, 'using', key, op.type, op.name) using[key] = value type = self._evaluate(op_attribs, op_inputs, op_outputs, transform.type, using) self._check_value(type, 'field', 'type', op.type, op.name) name = self._evaluate(op_attribs, op_inputs, op_outputs, transform.name, using) self._check_value(name, 'field', 'name', op.type, op.name) attribs = {} for key, item in six.iteritems(transform.attribs): value = self._evaluate(op_attribs, op_inputs, op_outputs, item, using) if value is not None: attribs[key] = value for key, value in six.iteritems(attribs): self._check_value(value, 'attribute', key, op.type, op.name) if isinstance(transform.inputs, list): inputs = self._evaluate_tensor_list(op_attribs, op_inputs, op_outputs, transform.inputs, using) elif isinstance(transform.inputs, tuple): inputs = tuple(self._filter_none(self._evaluate(op_attribs, op_inputs, op_outputs, item, using) for item in transform.inputs)) else: inputs = (self._evaluate(op_attribs, op_inputs, op_outputs, transform.inputs, using),) for idx, item in enumerate(inputs): self._check_value(item, 'input', idx, op.type, op.name, tensor=True) offset = len(self._graph.operations) if isinstance(transform.outputs, list): outputs = self._evaluate_tensor_list(op_attribs, op_inputs, op_outputs, transform.outputs, using) elif isinstance(transform.outputs, tuple): outputs = tuple(self._filter_none(self._evaluate(op_attribs, op_inputs, op_outputs, item, using) for item in transform.outputs)) else: outputs = (self._evaluate(op_attribs, op_inputs, op_outputs, transform.outputs, using),) for idx, item in enumerate(outputs): self._check_value(item, 'output', idx, op.type, op.name, tensor=True) op = Operation(self._graph, type=type, name=name, attribs=attribs, inputs=inputs, outputs=outputs, custom=transform.custom) self._graph.reverse(offset) return op def _mirror(self, op): inputs_type = tuple if isinstance(op.inputs, tuple) else list outputs_type = tuple if isinstance(op.outputs, tuple) else list op_inputs = inputs_type(self._tensor_map[tensor] for tensor in op.inputs) op_outputs = outputs_type(self._tensor_map[tensor] for tensor in op.outputs) return Operation(self._graph, type=op.type, name=op.name, attribs=op.attribs, inputs=op_inputs, outputs=op_outputs, custom=True) def _add_default_attribs(self, attribs, defaults, inputs, outputs, op_type, op_name): if defaults is None: return attribs attribs = dict(attribs) for key, value in six.iteritems(defaults): if key not in attribs: value = self._evaluate({}, inputs, outputs, value) self._check_value(value, 'default', key, op_type, op_name) attribs[key] = value return attribs def _evaluate(self, attribs, inputs, outputs, arg, using={}): if isinstance(arg, str) and arg[0] == '!': try: return eval(arg[1:], {'I': inputs, 'O': outputs, **attribs, **using, **self._callables, 'np': np, 'math': math}) except Exception as e: return e else: return arg def _evaluate_tensor_list(self, attribs, inputs, outputs, arg, using): values = [] for item in arg: value = self._evaluate(attribs, inputs, outputs, item, using) if isinstance(value, Tensor) or isinstance(value, Exception): values.append(value) else: assert isinstance(value, (list, tuple)) values += list(value) return values def _filter_none(self, items): return (item for item in items if item is not None) def _check_value(self, value, kind, key, op_type, op_name, tensor=False): if isinstance(value, Exception): raise ConversionError("Could not evaluate {kind} '{key}' while converting operator '{type}'; {err}: {cause}" .format(kind=kind, key=key, type=op_type, name=op_name, err=type(value).__name__, cause=str(value) or repr(value))) if tensor and not isinstance(value, Tensor): raise ConversionError("While converting operator '{op_type}', {kind} '{key}' must result in a tensor, " "but found {value_type}" .format(kind=kind, key=key, op_type=op_type, value_type=type(value))) def _copy_tensor_(self, tensor): return Tensor(self._graph, name=tensor.name, dtype=tensor.dtype, shape=tensor.shape, data=tensor.data, quant=copy.deepcopy(tensor.quant)) def _read_constant(self, tensor, type): raise NotImplementedError() def _make_constant(self, graph, dtype, value, inline): raise NotImplementedError() def _const_operation(self, output, value): raise NotImplementedError() def _transpose_operation(self, input, output, perm): raise NotImplementedError() def _reshape_operation(self, input, output, shape): raise NotImplementedError() def _squeeze_operation(self, input, output, axes): raise NotImplementedError() def _unsqueeze_operation(self, input, output, axes): raise NotImplementedError() def _scale_operation(self, input, output, scalar): raise NotImplementedError() @staticmethod def _permute(items, perm): permuted = list(items) for i in range(len(perm)): permuted[i] = items[perm[i]] return type(items)(permuted) @staticmethod def _inverse_permute(items, perm): permuted = list(items) for i in range(len(perm)): permuted[perm[i]] = items[i] return type(items)(permuted) def _working_shape(self, tensor): return self._transposes.get(tensor) or tensor.shape def _pre_transpose(self, input, perm): shape = self._permute(self._working_shape(input), perm) output = Tensor(input.graph, dtype=input.dtype, shape=shape, quant=copy.deepcopy(input.quant)) self._transpose_operation(input, output, perm) return output def _post_transpose(self, output, perm): shape = self._inverse_permute(self._working_shape(output), perm) input = Tensor(output.graph, dtype=output.dtype, shape=shape, quant=copy.deepcopy(output.quant)) self._transpose_operation(input, output, perm) return input def _pre_squeeze(self, input, axes): shape = self.squeeze_shape(self._working_shape(input), axes) output = Tensor(input.graph, dtype=input.dtype, shape=shape, quant=copy.deepcopy(input.quant)) self._squeeze_operation(input, output, axes) return output def _pre_unsqueeze(self, input, axes): shape = self.unsqueeze_shape(self._working_shape(input), axes) output = Tensor(input.graph, dtype=input.dtype, shape=shape, quant=copy.deepcopy(input.quant)) self._unsqueeze_operation(input, output, axes) return output def _post_squeeze(self, output, axes): shape = self.unsqueeze_shape(self._working_shape(output), axes) input = Tensor(output.graph, dtype=output.dtype, shape=shape, quant=copy.deepcopy(output.quant)) self._squeeze_operation(input, output, axes) return input def _post_unsqueeze(self, output, axes): shape = self.squeeze_shape(self._working_shape(output), axes) input = Tensor(output.graph, dtype=output.dtype, shape=shape, quant=copy.deepcopy(output.quant)) self._unsqueeze_operation(input, output, axes) return input def _reshape(self, input, shape): output = Tensor(input.graph, dtype=input.dtype, shape=shape, quant=copy.deepcopy(input.quant)) self._reshape_operation(input, output, shape) return output def _shape_of(self, value): if isinstance(value, (list, tuple)): length = len(value) return (length,) + self._shape_of(value[0]) if length > 0 else (0,) elif isinstance(value, np.ndarray): return value.shape else: return () def squeeze_shape(self, shape, axes): return type(shape)(shape[i] for i in range(len(shape)) if i not in axes) def unsqueeze_shape(self, shape, axes): for axis in axes: shape = shape[:axis] + (1,) + shape[axis:] return shape def transposing(self, tensor): return tensor in self._transposes def nxc_to_ncx(self, items, cond=True): return items[0:1] + items[-1:] + items[1:-1] if cond else items def ncx_to_nxc(self, items, cond=True): return items[0:1] + items[2:] + items[1:2] if cond else items def xcn_to_ncx(self, items, cond=True): return items[-1:] + items[-2:-1] + items[:-2] if cond else items def ncx_to_xcn(self, items, cond=True): return items[2:] + items[1:2] + items[0:1] if cond else items def cxn_to_ncx(self, items, cond=True): return items[-1:] + items[:-1] if cond else items def ncx_to_cxn(self, items, cond=True): return items[1:] + items[:1] if cond else items def nxc_to_ncx_perm(self, rank): return self.nxc_to_ncx(list(range(rank))) def ncx_to_nxc_perm(self, rank): return self.ncx_to_nxc(list(range(rank))) def xcn_to_ncx_perm(self, rank): return self.xcn_to_ncx(list(range(rank))) def ncx_to_xcn_perm(self, rank): return self.ncx_to_xcn(list(range(rank))) def cxn_to_ncx_perm(self, rank): return self.cxn_to_ncx(list(range(rank))) def ncx_to_cxn_perm(self, rank): return self.ncx_to_cxn(list(range(rank))) def axis_nxc_to_ncx(self, value, rank): if isinstance(value, (list, tuple)): return type(value)(self.axis_nxc_to_ncx(v, rank) for v in value) else: if value < 0: value += rank return 0 if value == 0 else 1 if value == rank - 1 else value + 1 def axis_ncx_to_nxc(self, value, rank): if isinstance(value, (list, tuple)): return type(value)(self.axis_ncx_to_nxc(v, rank) for v in value) else: if value < 0: value += rank return 0 if value == 0 else rank - 1 if value == 1 else value - 1 def ensure_positive(self, axis, rank): if isinstance(axis, (list, tuple)): return type(axis)(self.ensure_positive(item, rank) for item in axis) else: return axis + rank if axis < 0 else axis def as_const(self, tensor, type=None): return self._read_constant(self._tensor_map[tensor], type=type) def is_const(self, tensor, type=None): tensor = self._tensor_map[tensor] if tensor.data is not None: return True try: self._read_constant(tensor, type=type) return True except ConversionError: return False def is_zero(self, tensor): return self.is_const(tensor) and len(tensor.shape) == 0 and self.as_const(tensor) == 0 def as_tensor(self, arg, dtype, inline=None): return self._make_constant(self._graph, dtype=dtype, value=arg, inline=inline) def new_tensor(self, shape, dtype): return Tensor(self._graph, dtype=dtype, shape=shape) def is_integer_upsample(self, input_shape, output_shape): return all(output % input == 0 for input, output in zip(input_shape, output_shape)) def is_integer_downsample(self, input_shape, output_shape): return all(input % output == 0 for input, output in zip(input_shape, output_shape)) def upsample_factor(self, input_shape, output_shape): return [output // input for input, output in zip(input_shape, output_shape)] def downsample_factor(self, input_shape, output_shape): return [input // output for input, output in zip(input_shape, output_shape)] def from_numpy(self, array, type=None): return types.from_numpy(array, type) def to_numpy(self, value, dtype=None): return types.to_numpy(value, dtype) def flexible_batch(self, output_shape, batch): return [0] + output_shape[1:] if output_shape[0] == batch else output_shape def fixed_batch(self, output_shape, batch): return [batch] + output_shape[1:] if output_shape[0] == 0 else output_shape class ConverterToNNEF(Converter): _DtypeFromNumpy = { np.float16: 'scalar', np.float32: 'scalar', np.float64: 'scalar', np.int8: 'integer', np.uint8: 'integer', np.int16: 'integer', np.uint16: 'integer', np.int32: 'integer', np.uint32: 'integer', np.int64: 'integer', np.uint64: 'integer', np.bool_: 'logical', } def __init__(self, transforms, functions=None, mirror_unsupported=False, infer_shapes=False, custom_shapes=None): Converter.__init__(self, transforms, functions, mirror_unsupported, infer_shapes, custom_shapes) def _insert_externals_and_constants(self, graph): for tensor in graph.tensors: mapped = self._tensor_map[tensor] if mapped.producer is None and len(mapped.consumers) > 0: if mapped.data is None: Operation(graph, type='external', inputs=(), outputs=tensor, attribs={'shape': list(tensor.shape), 'dtype': tensor.dtype}) else: Operation(graph, type='constant', inputs=(), outputs=tensor, attribs={'shape': list(tensor.shape), 'dtype': tensor.dtype, 'value': mapped.data}) def _ensure_valid_ids(self, graph): if graph.name is not None: graph.name = self.ensure_valid_id(graph.name) for tensor in graph.tensors: if tensor.name is not None: tensor.name = self.ensure_valid_id(tensor.name) def _make_constant(self, graph, dtype, value, inline): if isinstance(value, tuple): value = list(value) shape = value.shape if isinstance(value, np.ndarray) else (len(value),) if isinstance(value, list) else () isarray = isinstance(value, np.ndarray) or isinstance(value, list) tensor = Tensor(graph, dtype=dtype, shape=shape) if inline: tensor.data = types.to_numpy(value, dtype) else: self._const_operation(tensor, value=value if isarray else [value]) return tensor def _const_operation(self, output, value): Operation(output.graph, type='constant', inputs=(), outputs=output, attribs={'value': value, 'dtype': output.dtype, 'shape': list(output.shape)}) def _transpose_operation(self, input, output, perm): Operation(input.graph, type='transpose', inputs=input, outputs=output, attribs={'axes': perm}) def _reshape_operation(self, input, output, shape): Operation(input.graph, type='reshape', inputs=input, outputs=output, attribs={'shape': list(shape)}) def _squeeze_operation(self, input, output, axes): Operation(input.graph, type='squeeze', inputs=input, outputs=output, attribs={'axes': axes}) def _unsqueeze_operation(self, input, output, axes): Operation(input.graph, type='unsqueeze', inputs=input, outputs=output, attribs={'axes': axes}) def _scale_operation(self, input, output, scalar): if not isinstance(scalar, Tensor): scalar = self.as_tensor(scalar, np.float32) Operation(input.graph, type='mul', inputs=(input, scalar), outputs=output) def _bias_operation(self, input, output, bias): if not isinstance(bias, Tensor): bias = self.as_tensor(bias, np.float32) Operation(input.graph, type='add', inputs=(input, bias), outputs=output) def _transform_constant(self, tensor, func): data = func(tensor.producer.attribs['value']) tensor.shape = data.shape tensor.producer.attribs['value'] = data tensor.producer.attribs['shape'] = list(data.shape) @staticmethod def remove_unused_constants(graph): ops = [op for op in graph.operations if op.type == 'constant' and not op.output.has_consumer] tensors = [op.output for op in ops] graph.outputs = [tensor for tensor in graph.outputs if tensor not in tensors] graph.remove_operations(ops, unlink=True) graph.remove_tensors(tensors) @staticmethod def inline_scalar_constants(graph): for op in graph.operations: if op.type == 'constant': value = op.attribs['value'] if not isinstance(value, np.ndarray): value = np.array(value, op.output.dtype).reshape(op.output.shape) if len(value.shape) == 0: op.output.data = value graph.remove_operation(op, unlink=True) @staticmethod def convert_constants_to_variables(graph): variables = 0 for op in graph.operations: if op.type == 'constant': value = op.attribs['value'] if isinstance(value, np.ndarray): variables += 1 op.type = 'variable' op.attribs['label'] = op.name if op.name else 'variable' + str(variables) op.output.data = value del op.attribs['value'] @staticmethod def ensure_valid_id(name): return re.sub('[^_0-9a-zA-Z]+', '_', name) def nnef_dtype(self, dtype): return ConverterToNNEF._DtypeFromNumpy[dtype] class ConverterFromNNEF(Converter): @staticmethod def decomposed_operations(): return ['separable_conv', 'separable_deconv', 'rms_pool', 'local_mean_normalization', 'local_variance_normalization', 'local_contrast_normalization', 'l1_normalization', 'moments'] def __init__(self, transforms, functions=None, mirror_unsupported=False): Converter.__init__(self, transforms, functions, mirror_unsupported) @staticmethod def convert_variables_to_constants(graph): for op in graph.operations: if op.type == 'variable': op.type = 'constant' op.attribs['value'] = op.output.data del op.attribs['label'] @staticmethod def fill_data_in_constants(graph): for op in graph.operations: if op.type == 'constant': op.output.data = op.attribs['value'] def _is_constant(self, tensor): if tensor.producer: return tensor.producer.type == 'constant' else: return tensor.data is not None def _read_constant(self, tensor, type): if tensor.data is not None: value = tensor.data elif tensor.producer and tensor.producer.type == 'constant': value = tensor.producer.attribs['value'] else: raise ConversionError('trying to evaluate non-constant tensor') return types.from_numpy(value, type=type) if isinstance(value, np.ndarray) else \ types.cast(value, type=type) if type is not None else value ================================================ FILE: nnef_tools-pyproject/nnef_tools/conversion/nnef_to_onnx.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from .converter import ConverterFromNNEF as _Converter, Transform from ..model import Tensor, Operation from ..utils import types import numpy as np from nnef.shapes import pool_shape, reduce_shape, deconv_shape class Converter(_Converter): @staticmethod def defined_shapes(): return { 'lp_pool': pool_shape, 'lp_reduce': reduce_shape, 'mean_variance_normalization': lambda input, scale, offset, **kwargs: input, 'lstm_step': lambda x, h, c, W, R, B: (h, c), 'lstm_loop': lambda X, W, R, B, h, c, **kwargs: (h, c), 'erf': lambda x: x, 'mish': lambda x: x, 'depth_to_space': lambda x, block_size, **kwargs: [x[0], x[1] // block_size ** 2, x[2] * block_size, x[3] * block_size], 'space_to_depth': lambda x, block_size, **kwargs: [x[0], x[1] * block_size ** 2, x[2] // block_size, x[3] // block_size], } @staticmethod def decomposed_operations(): return ['lstm_step', 'lstm_loop'] def __init__(self, custom_transforms=None, custom_functions=None, mirror_unsupported=False): _Converter.__init__(self, transforms=self.merge_transforms(_Transforms, custom_transforms), functions=custom_functions, mirror_unsupported=mirror_unsupported) def __call__(self, graph): self.fill_data_in_constants(graph) self.convert_variables_to_constants(graph) graph = _Converter.__call__(self, graph) self._fix_inline_constants(graph) return graph def _fix_inline_constants(self, graph): constants = 0 for tensor in graph.tensors: if tensor.name is None: constants += 1 tensor.name = '$' + str(constants) def _make_constant(self, graph, dtype, value, inline): return Tensor(graph, dtype=dtype, shape=self._shape_of(value), data=types.to_numpy(value, dtype=dtype)) def _const_operation(self, output, value): Operation(output.graph, type='Constant', inputs=(), outputs=output, attribs={'value': types.to_numpy(value, dtype=output.dtype)}) def _transform_constant(self, tensor, func): if tensor.producer: data = func(tensor.producer.attribs['value'] if tensor.producer else tensor.data) tensor.shape = data.shape tensor.producer.attribs['value'] = data else: tensor.data = func(tensor.data) tensor.shape = tensor.data.shape def _squeeze_operation(self, input, output, axes): Operation(input.graph, type='Squeeze', inputs=input, outputs=output, attribs={'axes': axes}) def _unsqueeze_operation(self, input, output, axes): Operation(input.graph, type='Unsqueeze', inputs=input, outputs=output, attribs={'axes': axes}) def _interleave(self, items): return [item[0] for item in items] + [item[1] for item in items] def squeeze_input(self, tensor, axes): return self._pre_squeeze(tensor, axes=axes) if len(axes) else tensor def squeeze_output(self, tensor, axes): return self._post_squeeze(tensor, axes=axes) if len(axes) else tensor def unsqueeze_input(self, tensor, axes): return self._pre_unsqueeze(tensor, axes=axes) if len(axes) else tensor def unsqueeze_output(self, tensor, axes): return self._post_unsqueeze(tensor, axes=axes) if len(axes) else tensor def squeeze_vector(self, tensor): if self._is_constant(tensor) and len(self._tensor_map[tensor].consumers) == 1: self._transform_constant(tensor, lambda data: np.squeeze(data, 0)) return tensor else: return self.squeeze_input(tensor, axes=[0]) def convert_pads(self, padding, truncate=False): return self._interleave(padding[2:] if truncate else padding) if padding != [] else None def convert_auto_pad(self, padding): return "SAME_UPPER" if padding == [] else "NOTSET" def convert_output_padding(self, input_shape, filter_shape, output_shape, padding, stride, dilation, groups): calculated_shape = deconv_shape(input_shape, filter_shape, padding=padding, stride=stride, dilation=dilation, groups=groups) output_padding = [o - c for c, o in zip(calculated_shape[2:], output_shape[2:])] return output_padding def is_const(self, tensor, value=None): return self._is_constant(self._tensor_map[tensor]) and value is None or self.as_const(tensor) == value def broadcast(self, tensor, rank): return self.unsqueeze_input(tensor, axes=list(range(tensor.rank, rank))) _Transforms = Converter.unpack_transforms({ ('external', 'constant'): Transform(type=None), ('conv', 'deconv'): Transform( type=('Conv', 'ConvTranspose'), defaults={ 'output_shape': None, }, using={ 'transposed': '!_type_ == "deconv"', 'group': '!groups if groups != 0 else O[0].shape[1] if transposed else I[0].shape[1]', }, cond={ '!I[2].rank != 0 or (is_const(I[2]) and as_const(I[2]) == 0)': 'bias must be constant 0 or of rank 1', }, inputs=( '!I[0]', '!I[1]', '!squeeze_vector(I[2]) if I[2].rank != 0 else None', ), outputs='!O[0]', attribs={ 'auto_pad': '!convert_auto_pad(padding)', 'pads': '!convert_pads(padding)', 'strides': '!stride', 'dilations': '!dilation', 'group': '!group', 'output_shape': '!output_shape if _type_ == "deconv" and output_shape != [] and padding == [] else None', 'output_padding': '!convert_output_padding(I[0].shape, I[1].shape, output_shape, padding=padding, ' 'stride=stride, dilation=dilation, groups=group) ' 'if _type_ == "deconv" and output_shape != [] and padding != [] else None', 'kernel_shape': '!I[1].shape[2:]', } ), ('max_pool', 'avg_pool', 'lp_pool'): Transform( type=('MaxPool', 'AveragePool', 'LpPool'), cond={ '!size[:2] == [1,1]': 'size must be 1 in batch and channel dimensions', '!stride[:2] == [1,1]': 'stride must be 1 in batch and channel dimensions', '!dilation[:2] == [1,1]': 'dilation must be 1 in batch and channel dimensions', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'kernel_shape': '!size[2:]', 'auto_pad': '!convert_auto_pad(padding)', 'pads': '!convert_pads(padding, truncate=True)', 'strides': '!stride[2:]', 'dilations': '!dilation[2:] if _type_ == "max_pool" and not all(d == 1 for d in dilation[2:]) else None', 'count_include_pad': '!(1 if border == "constant" else 0) if _type_ == "avg_pool" else None', } ), ('min_reduce', 'max_reduce', 'mean_reduce', 'sum_reduce'): Transform( type=('ReduceMin', 'ReduceMax', 'ReduceMean', 'ReduceSum'), inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!axes', 'keepdims': 1, } ), 'lp_reduce': Transform( type='!"ReduceL1" if p == 1 else "ReduceL2"', cond={ '!p == 1 or p == 2': 'p must be 1 or 2', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!axes', 'keepdims': 1, } ), ('argmin_reduce', 'argmax_reduce'): Transform( type=('ArgMin', 'ArgMax'), cond={ '!len(axes) == 1': 'axes must be of length 1', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axis': '!axes[0]', 'keepdims': 1, } ), 'batch_normalization': Transform( type='BatchNormalization', inputs=( '!I[0]', '!squeeze_vector(I[4])', '!squeeze_vector(I[3])', '!squeeze_vector(I[1])', '!squeeze_vector(I[2])', ), outputs='!O[0]', attribs={ 'epsilon': '!epsilon', 'spatial': '!0 if I[1].rank == I[0].rank else None', } ), ('relu', 'sigmoid', 'tanh', 'softplus', 'selu', 'not', 'copy', 'elu', 'erf', 'mish', 'abs', 'sign', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh', 'exp', 'log', 'neg', 'sqrt', 'ceil', 'floor', 'round'): Transform( type=('Relu', 'Sigmoid', 'Tanh', 'Softplus', 'Selu', 'Not', 'Identity', 'Elu', 'Erf', 'Mish', 'Abs', 'Sign', 'Sin', 'Cos', 'Tan', 'Asin', 'Acos', 'Atan', 'Sinh', 'Cosh', 'Tanh', 'Asinh', 'Acosh', 'Atanh', 'Exp', 'Log', 'Neg', 'Sqrt', 'Ceil', 'Floor', 'Round'), inputs='!I[0]', outputs='!O[0]', ), ('add', 'sub', 'mul', 'div', 'pow', 'min', 'max', 'and', 'or', 'eq', 'lt', 'gt', 'le', 'ge'): Transform( type=('Add', 'Sub', 'Mul', 'Div', 'Pow', 'Min', 'Max', 'And', 'Or', 'Equal', 'Less', 'Greater', 'LessOrEqual', 'GreaterOrEqual'), inputs=( '!broadcast(I[0], O[0].rank)', '!broadcast(I[1], O[0].rank)', ), outputs='!O[0]', ), 'sqr': Transform( type='Mul', inputs=('!I[0]', '!I[0]'), outputs='!O[0]', ), 'leaky_relu': Transform( type='LeakyRelu', inputs='!I[0]', outputs='!O[0]', attribs={ 'alpha': '!alpha', } ), 'prelu': Transform( type='PRelu', inputs=( '!I[0]', '!broadcast(I[1], I[0].rank)', ), outputs='!O[0]', ), 'transpose': Transform( type='Transpose', inputs='!I[0]', outputs='!O[0]', attribs={ 'perm': '!axes', } ), 'reshape': Transform( type='Reshape', inputs=( '!I[0]', '!as_tensor(shape, np.int64)', ), outputs='!O[0]', ), 'squeeze': Transform( type='Squeeze', inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!axes', } ), 'unsqueeze': Transform( type='Unsqueeze', inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!axes', } ), 'matmul': Transform( using={ 'transposed': '!transposeA or transposeB', }, type="!'Gemm' if transposed else 'MatMul'", inputs=('!I[0]', '!I[1]'), outputs='!O[0]', attribs={ 'transA': '!int(transposeA) if transposed else None', 'transB': '!int(transposeB) if transposed else None', } ), 'linear': Transform( type='Gemm', inputs=( '!I[0]', '!I[1]', '!squeeze_vector(I[2])', ), outputs='!O[0]', attribs={ 'transA': 0, 'transB': 1, } ), 'local_response_normalization': Transform( type='LRN', cond={ '!size[0] == 1 and all(s == 1 for s in size[2:])': 'size must be 1 in all non-channel dimensions', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'alpha': '!alpha', 'beta': '!beta', 'bias': '!bias', 'size': '!size[1]', } ), 'concat': Transform( type='Concat', inputs=['!I[:]'], outputs='!O[0]', attribs={ 'axis': '!axis', } ), 'split': Transform( type='Split', using={ 'factor': '!I[0].shape[axis] // sum(ratios)', }, inputs='!I[0]', outputs=['!O[:]'], attribs={ 'axis': '!axis', 'split': '![r * factor for r in ratios]', } ), 'softmax': Transform( type='Softmax', cond={ '!len(axes) == 1': 'axes must be of length 1', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axis': '!axes[0]', } ), 'add_n': Transform( type='Sum', inputs=['!I[:]'], outputs='!O[0]', ), 'select': Transform( type='Where', inputs=( '!broadcast(I[0], O[0].rank)', '!broadcast(I[1], O[0].rank)', '!broadcast(I[2], O[0].rank)', ), outputs='!O[0]', ), 'clamp': Transform( type='Clip', cond={ '!I[1].rank == 0': 'input a must be of rank 0', '!I[2].rank == 0': 'input b must be of rank 0', }, inputs=( '!I[0]', '!I[1]', '!I[2]', ), outputs='!O[0]', ), 'pad': Transform( type='Pad', inputs=( '!I[0]', '!as_tensor(convert_pads(padding), np.int64)', '!as_tensor(value, np.float32)', ), outputs='!O[0]', attribs={ 'mode': '!"edge" if border == "replicate" else border', } ), 'tile': Transform( type='Tile', inputs=( '!I[0]', '!as_tensor(repeats, np.int64)', ), outputs='!O[0]', ), 'slice': Transform( type='Slice', inputs=( '!I[0]', '!as_tensor(begin, np.int64)', '!as_tensor(end, np.int64)', '!as_tensor(axes, np.int64)', '!as_tensor(stride, np.int64)', ), outputs='!O[0]', ), ('l1_normalization', 'l2_normalization'): Transform( type='LpNormalization', cond={ '!len(axes) == 1': 'axes must be of length 1', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axis': '!axes[0]', 'p': '!1 if _type_ == "l1_normalization" else 2', } ), 'mean_variance_normalization': Transform( type='!"InstanceNormalization" if instance else "MeanVarianceNormalization"', using={ 'instance': '!axes == list(range(2, I[0].rank))' ' and I[1].rank == 2 and I[1].shape[0] == 1' ' and I[2].rank == 2 and I[2].shape[0] == 1', }, cond={ '!is_const(scale, 1.0) if not instance else True': 'scale must be 1 if operation does not denote instance normalization', '!is_const(offset, 0.0) if not instance else True': 'offset must be 0 if operation does not denote instance normalization', }, inputs=( '!I[0]', '!squeeze_vector(I[1]) if instance else None', '!squeeze_vector(I[2]) if instance else None', ), outputs='!O[0]', attribs={ 'axes': '!axes if not instance else None', 'epsilon': '!epsilon if instance else None', } ), ('nearest_upsample', 'multilinear_upsample'): Transform( type='Resize', using={ 'linear': '!_type_ == "multilinear_upsample"', }, inputs=( '!I[0]', '!as_tensor([], np.float32)', '!as_tensor([1.0, 1.0] + [float(f) for f in factor], np.float32)', ), outputs='!O[0]', attribs={ 'mode': '!"linear" if linear else "nearest"', 'coordinate_transformation_mode': '!("half_pixel" if method == "symmetric" else' ' "asymmetric" if method == "asymmetric" else' ' "align_corners") if linear else None', } ), 'nearest_downsample': Transform( type='Resize', inputs=( '!I[0]', '!as_tensor([], np.float32)', '!as_tensor([1.0, 1.0] + [1.0 / f for f in factor], np.float32)', ), outputs='!O[0]', attribs={ 'mode': 'nearest', } ), 'gather': Transform( type='Gather', inputs=('!I[0]', '!I[1]'), outputs='!O[0]', attribs={ 'axis': '!axis', }, ), 'cast': Transform( type='Cast', inputs='!I[0]', outputs='!O[0]', attribs={ 'to': '!O[0].dtype', } ), 'depth_to_space': Transform( type="DepthToSpace", inputs='!I[0]', outputs='!O[0]', attribs={ 'blocksize': '!block_size', 'mode': '!"DCR" if blocks_first else "CRD"', }, ), 'space_to_depth': Transform( type="SpaceToDepth", inputs='!I[0]', outputs='!O[0]', attribs={ 'blocksize': '!block_size', }, ), }) ================================================ FILE: nnef_tools-pyproject/nnef_tools/conversion/nnef_to_tf.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from .converter import ConverterFromNNEF as _Converter, Transform from ..model import Tensor, Operation from ..model.utils import generate_op_names_from_op_type from ..utils import types import numpy as np import copy class Converter(_Converter): @staticmethod def defined_shapes(): return { 'relu6': lambda shape: shape, } @staticmethod def decomposed_operations(): return _Converter.decomposed_operations() + ['linear'] def __init__(self, data_format='NXC', io_transpose=False, custom_transforms=None, custom_functions=None, mirror_unsupported=False): _Converter.__init__(self, transforms=self.merge_transforms(_Transforms, custom_transforms), functions=custom_functions, mirror_unsupported=mirror_unsupported) self._data_format = data_format self._io_transpose = io_transpose def __call__(self, graph): self.convert_variables_to_constants(graph) graph = _Converter.__call__(self, graph) self._fix_output_transposes(graph) self._remove_unused_constants(graph) generate_op_names_from_op_type(graph) return graph def _global_attribs(self): return {'_lite_': False} def _prepare(self, graph): self._fix_inline_constants(graph) def _fix_inline_constants(self, graph): for tensor in graph.tensors: mapped = self._tensor_map[tensor] if not mapped.producer and mapped.data is not None: self._const_operation(tensor, tensor.data) def _remove_unused_constants(self, graph): ops = [op for op in graph.operations if op.type == 'Const' and not op.output.has_consumer] tensors = [op.output for op in ops] graph.outputs = [tensor for tensor in graph.outputs if tensor not in tensors] graph.remove_operations(ops, unlink=True) graph.remove_tensors(tensors) def _fix_output_transposes(self, graph): graph.outputs = [self.transpose_input(tensor) if self.needs_io_transpose(tensor) else self.undo_transpose(tensor) for tensor in graph.outputs] def _const_operation(self, output, value): Operation(output.graph, type='Const', inputs=(), outputs=output, attribs={'value': types.to_numpy(value, dtype=output.dtype), 'dtype': output.dtype}) def _transpose_operation(self, input, output, perm): Operation(input.graph, type='Transpose', inputs=(input, self.as_tensor(perm, np.int32)), outputs=output, attribs={'T': input.dtype}) def _reshape_operation(self, input, output, shape): Operation(input.graph, type='Reshape', inputs=(input, self.as_tensor(shape, np.int32)), outputs=output, attribs={'T': input.dtype}) def _squeeze_operation(self, input, output, axes): Operation(input.graph, type='Squeeze', inputs=input, outputs=output, attribs={'squeeze_dims': axes, 'T': input.dtype}) def _unsqueeze_operation(self, input, output, axes): if len(axes) == 1: Operation(input.graph, type='ExpandDims', inputs=(input, self.as_tensor(axes[0], np.int32)), outputs=output, attribs={'T': input.dtype}) else: Operation(input.graph, type='Reshape', inputs=(input, self.as_tensor(output.shape, np.int32)), outputs=output, attribs={'T': input.dtype}) def _scale_operation(self, input, output, scalar): if not isinstance(scalar, Tensor): scalar = self.as_tensor(scalar, np.float32) Operation(input.graph, type='Mul', inputs=(input, scalar), outputs=output, attribs={'T': input.dtype}) def _bias_operation(self, input, output, bias): if not isinstance(bias, Tensor): bias = self.as_tensor(bias, np.float32) if bias.rank == 1: Operation(output.graph, type='BiasAdd', inputs=(input, bias), outputs=output, attribs={'T': output.dtype}) else: Operation(output.graph, type='Add', inputs=(input, bias), outputs=output, attribs={'T': output.dtype}) def _make_constant(self, graph, dtype, value, inline): tensor = Tensor(graph, dtype=dtype, shape=self._shape_of(value)) self._const_operation(tensor, value) return tensor def _transform_constant(self, tensor, func): data = func(tensor.producer.attribs['value']) tensor.shape = data.shape tensor.producer.attribs['value'] = data def _is_conv_filter(self, tensor, groups): tensor = self._tensor_map.get(tensor) return tensor and len(tensor.consumers) > 0 and \ all(op.type == 'conv' and op.inputs[1] is tensor and op.attribs['groups'] == groups for op in tensor.consumers) def _ensure_constant_producer(self, tensor): if tensor.is_constant and tensor.producer is None: Operation(tensor.graph, type='Const', inputs=(), outputs=tensor, attribs={'value': tensor.data, 'dtype': tensor.data.dtype.type}) def _is_nxc(self, format): return format[0] == 'N' and format[-1] == 'C' and len(format) > 2 def _is_xcn(self, format): return format[-2] == 'C' and format[-1] == 'N' and len(format) > 2 def _is_cxn(self, format): return format[0] == 'C' and format[-1] == 'N' and len(format) > 2 def needs_io_transpose(self, tensor): if tensor.rank <= 2: return False if isinstance(self._io_transpose, bool): return self._io_transpose else: return tensor.name in self._io_transpose def is_nxc(self): return self._is_nxc(self._data_format) def data_format(self, rank): X = 'W' if rank == 1 else 'HW' if rank == 2 else 'DHW' if rank == 3 else None return self._data_format.replace('X', X) if X else self._data_format def convert_padding(self, value): return 'SAME' if value == [] else 'VALID' if all(item == (0, 0) for item in value) else 'EXPLICIT' def convert_explicit_paddings(self, value): if value == [] or all(item == (0, 0) for item in value): return None else: paddings = [item for pair in value for item in pair] return [0, 0] + paddings + [0, 0] if self.is_nxc() else [0, 0, 0, 0] + paddings def convert_size(self, value): if isinstance(value, tuple): return (1,) + value + (1,) if self.is_nxc() else (1, 1) + value[2:] else: return [1] + value + [1] if self.is_nxc() else [1, 1] + value[2:] def transpose_input(self, tensor): if self.is_nxc(): return self._pre_transpose(tensor, self.ncx_to_nxc_perm(tensor.rank)) \ if not self.transposing(tensor) and tensor.rank > 2 else tensor else: assert not self.transposing(tensor) return tensor def transpose_output(self, tensor): if self.is_nxc(): self._transposes[tensor] = self.ncx_to_nxc(tensor.shape) return tensor def transpose_filter(self, tensor, format='XCN'): if self._is_xcn(format): perm = self.ncx_to_xcn_perm(tensor.rank) elif self._is_nxc(format): perm = self.ncx_to_nxc_perm(tensor.rank) elif self._is_cxn(format): perm = self.ncx_to_cxn_perm(tensor.rank) else: assert False return self._pre_transpose(tensor, perm) def transpose_depthwise_filter(self, tensor, channels, format='XCN'): if self._is_xcn(format): perm = self.ncx_to_xcn_perm(tensor.rank) elif self._is_nxc(format): perm = self.ncx_to_nxc_perm(tensor.rank) elif self._is_cxn(format): perm = self.ncx_to_cxn_perm(tensor.rank) else: assert False shape = tensor.shape[2:] + (channels, tensor.shape[0] // channels) return self._reshape(self._pre_transpose(tensor, perm), shape) def transpose_like(self, tensor, reference): if self.transposing(reference): self.transpose_output(tensor) return tensor def transpose_list_like(self, items, ref): return self.ncx_to_nxc(items) if self.transposing(ref) else items def transpose_axis_like(self, axis, ref, rank=None): return self.axis_ncx_to_nxc(axis, rank or ref.rank) if self.transposing(ref) else axis def undo_transpose(self, tensor): perm = self.nxc_to_ncx_perm(tensor.rank) if perm == list(range(tensor.rank)): return tensor return self._pre_transpose(tensor, perm) if self.transposing(tensor) else tensor def squeeze_input(self, tensor, axes): return self._pre_squeeze(tensor, axes=axes) def squeeze_output(self, tensor, axes): return self._post_squeeze(tensor, axes=axes) def unsqueeze_input(self, tensor, axes): return self._pre_unsqueeze(tensor, axes=axes) def unsqueeze_output(self, tensor, axes): return self._post_unsqueeze(tensor, axes=axes) def squeeze_vector(self, tensor): if self._is_constant(tensor) and len(self._tensor_map[tensor].consumers) == 1: self._transform_constant(tensor, lambda data: np.squeeze(data, 0)) return tensor else: return self.squeeze_input(tensor, axes=[0]) def scale_output(self, output, scalar): input = Tensor(output.graph, dtype=output.dtype, shape=self._working_shape(output), quant=copy.deepcopy(output.quant)) self._scale_operation(input, output, scalar) return input def bias_add(self, output, bias): if bias.rank == 0 and np.all(bias.data == 0): return output input = Tensor(output.graph, dtype=output.dtype, shape=self._working_shape(output), quant=copy.deepcopy(output.quant)) self._bias_operation(input, output, bias) return input def split_sizes(self, ratios, size): p = size / sum(ratios) return [p * r for r in ratios] def convert_binarg(self, tensor, other): self._ensure_constant_producer(tensor) if tensor.rank == 0: return tensor needs_transpose = self.transposing(other) and not self.transposing(tensor) if other.rank > tensor.rank: if tensor.rank == 2 and tensor.shape[0] == 1 and needs_transpose: return self.squeeze_vector(tensor) tensor = self._pre_unsqueeze(tensor, axes=list(range(tensor.rank, other.rank))) return self.transpose_input(tensor) if needs_transpose else tensor def as_numpy(self, value, dtype=None): return types.to_numpy(value, dtype) def as_bits(self, items): bits = 0 for idx, val in enumerate(items): if val: bits |= (1 << idx) return bits def out_of_range(self, x, limit): return x >= limit or x <= -limit _Transforms = Converter.unpack_transforms({ 'external': Transform( type='Placeholder', using={'needs_transpose': '!needs_io_transpose(O[0])'}, outputs='!transpose_output(O[0]) if needs_transpose else O[0]', attribs={ 'shape': '!tuple(ncx_to_nxc(shape) if needs_transpose else shape)', 'dtype': '!dtype', } ), 'constant': Transform( type='Const', outputs='!O[0]', attribs={ 'dtype': '!O[0].dtype', 'value': '!value if isinstance(value, np.ndarray) else as_numpy(value[0] if shape == [] else value)', } ), 'conv': Transform( type='!"Conv{n}D".format(n=I[0].rank - 2) if groups != 0 else "DepthwiseConv2dNative"', cond={ '!I[0].rank == 4 or I[0].rank == 5': 'rank must be 4 or 5', }, using={ 'channels': '!I[0].shape[1]', }, inputs=( '!transpose_input(I[0])', '!transpose_filter(I[1]) if groups != 0 else transpose_depthwise_filter(I[1], channels)', ), outputs=( '!bias_add(transpose_output(O[0]), squeeze_vector(I[2]) if I[2].rank == 2 else I[2])', ), attribs={ 'padding': '!convert_padding(padding)', 'explicit_paddings': '!convert_explicit_paddings(padding)', 'strides': '!convert_size(stride)', 'dilations': '!convert_size(dilation)', 'data_format': '!data_format(I[0].rank - 2)', 'T': '!I[0].dtype', } ), 'deconv': Transform( type='!"Conv{n}DBackpropInput".format(n=I[0].rank - 2) if groups != 0 else "DepthwiseConv2dNativeBackpropInput"', cond={ '!I[0].rank == 4 or I[0].rank == 5': 'rank must be 4 or 5', }, using={ 'channels': '!O[0].shape[1]', }, inputs=( '!as_tensor(ncx_to_nxc(output_shape) if is_nxc() else output_shape, np.int32)', '!transpose_filter(I[1]) if groups != 0 else transpose_depthwise_filter(I[1], channels)', '!transpose_input(I[0])', ), outputs=( '!bias_add(transpose_output(O[0]), squeeze_vector(I[2]) if I[2].rank == 2 else I[2])', ), attribs={ 'padding': '!convert_padding(padding)', 'explicit_paddings': '!convert_explicit_paddings(padding)', 'strides': '!convert_size(stride)', 'dilations': '!convert_size(dilation)', 'data_format': '!data_format(I[0].rank - 2)', 'T': '!I[0].dtype', } ), ('max_pool', 'avg_pool', 'max_pool_with_index'): Transform( type=('MaxPool', 'AvgPool', 'MaxPoolWithArgmax'), cond={ '!border == "ignore"': 'border must be "ignore"', }, inputs=( '!transpose_input(I[0])', ), outputs=( '!transpose_output(O[0])', '!transpose_output(O[1]) if len(O) > 1 else None', ), attribs={ 'ksize': '!ncx_to_nxc(size) if is_nxc() else size', 'strides': '!ncx_to_nxc(stride) if is_nxc() else stride', 'padding': '!convert_padding(padding)', 'explicit_paddings': '!convert_explicit_paddings(padding)', 'data_format': '!data_format(I[0].rank - 2) if _type_ != "max_pool_with_index" else None', 'T': '!I[0].dtype', } ), 'box': Transform( type='AvgPool', using={'volume': '!int(np.prod(size))'}, inputs=( '!transpose_input(I[0])', ), outputs=( '!scale_output(transpose_output(O[0]), volume) if not normalize else transpose_output(O[0])', ), attribs={ 'ksize': '!ncx_to_nxc(size) if is_nxc() else size', 'strides': '!ncx_to_nxc(stride) if is_nxc() else stride', 'padding': '!convert_padding(padding)', 'explicit_paddings': '!convert_explicit_paddings(padding)', 'data_format': '!data_format(I[0].rank - 2)', 'T': '!I[0].dtype', } ), 'reshape': Transform( type='Reshape', inputs=( '!undo_transpose(I[0])', '!as_tensor(fixed_batch(shape, I[0].shape[0]), np.int32)', ), outputs='!O[0]', attribs={ 'T': '!dtype if not _lite_ else None', } ), 'transpose': Transform( type='Transpose', inputs=( '!I[0]', '!as_tensor(transpose_axis_like(axes, I[0]), np.int32)', ), outputs='!O[0]', attribs={ 'T': '!dtype if not _lite_ else None', } ), 'squeeze': Transform( type='Squeeze', inputs='!undo_transpose(I[0])', outputs='!O[0]', attribs={ 'squeeze_dims': '!axes', 'T': '!dtype if not _lite_ else None', } ), 'unsqueeze': Transform( type='!"ExpandDims" if len(axes) == 1 else "Reshape"', using={ 'new_shape': '!unsqueeze_shape(I[0].shape, axes)', }, inputs=( '!undo_transpose(I[0])', '!as_tensor(axes if len(axes) == 1 else new_shape, np.int32)', ), outputs='!O[0]', attribs={ 'T': '!dtype if not _lite_ else None', 'new_shape': '!new_shape if _lite_ else None', } ), 'stack': Transform( type='Pack', inputs=['![undo_transpose(t) for t in I]'], outputs='!O[0]', attribs={ 'axis': '!axis', 'N': '!len(I) if not _lite_ else None', 'values_count': '!len(I) if _lite_ else None', 'T': '!dtype if not _lite_ else None', } ), 'unstack': Transform( type='Unpack', inputs='!undo_transpose(I[0])', outputs=['!O[:]'], attribs={ 'axis': '!axis', 'num': '!len(O)', 'T': '!dtype if not _lite_ else None', } ), ('min_reduce', 'max_reduce', 'mean_reduce', 'sum_reduce', 'any_reduce', 'all_reduce'): Transform( type=('Min', 'Max', 'Mean', 'Sum', 'Any', 'All'), using={'dims': '!transpose_axis_like(axes, I[0])'}, inputs=( '!I[0]', '!as_tensor(dims, np.int32)', ), outputs='!transpose_like(O[0], I[0])', attribs={ 'keep_dims': True, 'T': '!I[0].dtype if I[0].dtype != bool and not _lite_ else None', } ), 'concat': Transform( type='Concat', using={ 'dim': '!transpose_axis_like(axis, I[0])' }, inputs=['!as_tensor(dim, np.int32)', '!I[:]'], outputs='!transpose_like(O[0], I[0])', attribs={ 'N': '!len(I)', 'T': '!O[0].dtype if not _lite_ else None', } ), 'split': Transform( type='SplitV', using={ 'dim': '!transpose_axis_like(axis, I[0])' }, inputs=( '!I[0]', '!as_tensor(split_sizes(ratios, I[0].shape[axis]), np.int64)', '!as_tensor(dim, np.int32)', ), outputs=['![transpose_like(O[i], I[0]) for i in range(len(O))]'], attribs={ 'num_split': '!len(ratios) if not _lite_ else None', 'num_splits': '!len(ratios) if _lite_ else None', 'T': '!I[0].dtype if not _lite_ else None', } ), ('add', 'sub', 'mul', 'div', 'pow', 'lt', 'gt', 'le', 'ge', 'eq', 'ne', 'min', 'max', 'and', 'or'): Transform( type=('Add', 'Sub', 'Mul', 'RealDiv', 'Pow', 'Less', 'Greater', 'LessEqual', 'GreaterEqual', 'Equal', 'NotEqual', 'Minimum', 'Maximum', 'LogicalAnd', 'LogicalOr'), inputs=( '!convert_binarg(I[0], I[1])', '!convert_binarg(I[1], I[0])', ), outputs='!transpose_output(O[0]) if transposing(I[0]) or transposing(I[1]) else O[0]', attribs={ 'T': '!I[0].dtype if I[0].dtype != bool and not _lite_ else None', } ), ('copy', 'relu', 'relu6', 'elu', 'selu', 'gelu', 'silu', 'sigmoid', 'softplus', 'exp', 'log', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh', 'neg', 'rcp', 'sign', 'abs', 'floor', 'ceil', 'round', 'sqr', 'sqrt', 'rsqrt', 'not'): Transform( type=('Identity', 'Relu', 'Relu6', 'Elu', 'Selu', 'Gelu', 'Silu', 'Sigmoid', 'Softplus', 'Exp', 'Log', 'Sin', 'Cos', 'Tan', 'Asin', 'Acos', 'Atan', 'Sinh', 'Cosh', 'Tanh', 'Asinh', 'Acosh', 'Atanh', 'Neg', 'Reciprocal', 'Sign', 'Abs', 'Floor', 'Ceil', 'Round', 'Square', 'Sqrt', 'Rsqrt', 'LogicalNot'), inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'T': '!I[0].dtype if I[0].dtype != bool and not _lite_ else None', } ), 'leaky_relu': Transform( type='LeakyRelu', inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'alpha': '!alpha', 'T': '!I[0].dtype if not _lite_ else None', } ), 'batch_normalization': Transform( type='FusedBatchNorm', using={ 'channels': '!O[0].shape[1]' }, inputs=( '!transpose_input(I[0])', '!squeeze_vector(I[4])', '!squeeze_vector(I[3])', '!squeeze_vector(I[1])', '!squeeze_vector(I[2])', ), outputs=( '!transpose_output(O[0])', '!new_tensor(shape=(channels,), dtype=O[0].dtype)', '!new_tensor(shape=(channels,), dtype=O[0].dtype)', '!new_tensor(shape=(channels,), dtype=O[0].dtype)', '!new_tensor(shape=(channels,), dtype=O[0].dtype)', ), attribs={ 'epsilon': '!epsilon', 'data_format': '!data_format(I[0].rank - 2)', 'T': '!I[0].dtype if not _lite_ else None', 'is_training': False, } ), 'softmax': Transform( type='Softmax', cond={ '!axes == [1])': 'axes must equal channel dimension (1)', }, inputs='!transpose_input(I[0])', outputs='!transpose_output(O[0])', attribs={ 'T': '!I[0].dtype if not _lite_ else None', 'beta': '!1.0 if _lite_ else None', } ), 'matmul': Transform( type='MatMul', inputs=('!I[0]', '!I[1]'), outputs='!O[0]', attribs={ 'transpose_a': '!transposeA', 'transpose_b': '!transposeB', 'T': '!I[0].dtype if not _lite_ else None', }, ), 'clamp': Transform( type='ClipByValue', inputs=('!I[0]', '!I[1]', '!I[2]'), outputs='!transpose_like(O[0], I[0])', attribs={'T': '!I[0].dtype if not _lite_ else None'}, ), 'pad': Transform( type='!"Pad" if border == "constant" else "MirrorPad"', cond={ '!border in ["constant", "reflect", "reflect-even"]': 'border must be one of "constant", "reflect", "reflect-even"', }, using={'paddings': '![list(item) for item in padding]'}, inputs=( '!I[0]', '!as_tensor(ncx_to_nxc(paddings, cond=transposing(I[0])), np.int32)', ), outputs='!transpose_like(O[0], I[0])', attribs={ 'T': '!I[0].dtype if not _lite_ else None', 'mode': '!"REFLECT" if border == "reflect" else "SYMMETRIC" if border == "reflect-even" else None', }, ), 'tile': Transform( type='Tile', inputs=( '!I[0]', '!as_tensor(transpose_list_like(repeats, I[0]), np.int32)', ), outputs='!transpose_like(O[0], I[0])', attribs={ 'T': '!I[0].dtype if not _lite_ else None', }, ), 'slice': Transform( type='StridedSlice', using={ 'dims': '!ncx_to_nxc(list(range(I[0].rank)), cond=transposing(I[0]))', 'axis': '!ncx_to_nxc(axes, cond=transposing(I[0]))', 'begs': '!ncx_to_nxc(begin, cond=transposing(I[0]))', 'ends': '!ncx_to_nxc(end, cond=transposing(I[0]))', 'strs': '!ncx_to_nxc(stride, cond=transposing(I[0]))', }, inputs=( '!I[0]', '!as_tensor([begs[axis.index(i)] if i in axis else 0 for i in dims], np.int32)', '!as_tensor([ends[axis.index(i)] if i in axis else 0 for i in dims], np.int32)', '!as_tensor([strs[axis.index(i)] if i in axis else 1 for i in dims], np.int32)', ), outputs='!transpose_like(O[0], I[0])', attribs={ 'T': '!I[0].dtype if not _lite_ else None', 'Index': '!np.int32', 'begin_mask': '!as_bits([1 if i not in axis or out_of_range(begs[axis.index(i)], I[0].shape[i]) else 0' ' for i in dims])', 'end_mask': '!as_bits([1 if i not in axis or (ends[axis.index(i)] == 0 and strs[axis.index(i)] == 1)' ' or out_of_range(ends[axis.index(i)], I[0].shape[i]) else 0 for i in dims])', 'ellipsis_mask': 0, 'new_axis_mask': 0, 'shrink_axis_mask': 0, }, ), ('argmin_reduce', 'argmax_reduce'): Transform( type=('ArgMin', 'ArgMax'), cond={ '!len(axes) == 1': 'axes must be of length 1', }, using={'axis': '!transpose_axis_like(axes[0], ref=I[0])'}, inputs=( '!I[0]', '!as_tensor(axis, np.int32)', ), outputs='!transpose_like(unsqueeze_output(O[0], axes) if not _lite_ else O[0], I[0])', attribs={ 'T': '!I[0].dtype if not _lite_ else None', 'output_type': '!O[0].dtype', } ), 'select': Transform( type='Select', inputs=( '!I[0]', '!convert_binarg(I[1], I[0])', '!convert_binarg(I[2], I[0])', ), outputs='!transpose_like(O[0], I[0])', attribs={ 'T': '!I[1].dtype if not _lite_ else None', } ), 'nearest_upsample': Transform( type='ResizeNearestNeighbor', using={ 'size': '!I[0].shape[2:]' }, inputs=( '!transpose_input(I[0])', '!as_tensor([s * f for s, f in zip(size, factor)], np.int32)', ), outputs='!transpose_output(O[0])', attribs={ 'T': '!I[0].dtype if not _lite_ else None', 'align_corners': False, 'half_pixel_centers': False, } ), 'multilinear_upsample': Transform( type='ResizeBilinear', using={ 'size': '!I[0].shape[2:]' }, inputs=( '!transpose_input(I[0])', '!as_tensor([s * f for s, f in zip(size, factor)], np.int32)', ), outputs='!transpose_output(O[0])', attribs={ 'T': '!I[0].dtype if not _lite_ else None', 'align_corners': '!method == "aligned"', 'half_pixel_centers': '!method == "symmetric"', } ), ('nearest_downsample', 'area_downsample'): Transform( type=('ResizeNearestNeighbor', 'ResizeArea'), using={'size': '!I[0].shape[2:]'}, inputs=( '!transpose_input(I[0])', '!as_tensor([s // f for s, f in zip(size, factor)], np.int32)', ), outputs='!transpose_output(O[0])', attribs={ 'T': '!I[0].dtype if not _lite_ else None', 'align_corners': False, 'half_pixel_centers': '!False if _type_ == "nearest_downsample" else None', } ), 'local_response_normalization': Transform( type='LRN', cond={ '!all(s == 1 or i == 1 for i, s in enumerate(size))': 'size must be singular for all non-channel dimensions', }, inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'depth_radius': '!size[1] // 2 if not _lite_ else None', 'radius': '!size[1] // 2 if _lite_ else None', 'alpha': '!alpha / size[1]', 'beta': '!beta', 'bias': '!bias', } ), 'add_n': Transform( type='AddN', inputs=['!I[:]'], outputs='!transpose_like(O[0], I[0])', attribs={ 'T': '!O[0].dtype', 'N': '!len(I)', }, ), 'cast': Transform( type='Cast', inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'SrcT': '!I[0].dtype', 'DstT': '!O[0].dtype', }, ), 'gather': Transform( type='GatherV2', inputs=( '!I[0]', '!I[1]', '!as_tensor(transpose_axis_like(axis, I[0]), np.int32)', ), outputs='!transpose_like(O[0], I[0])', attribs={ 'Tparams': '!I[0].dtype', 'Tindices': '!I[1].dtype', 'Taxis': np.int32, }, ), }) ================================================ FILE: nnef_tools-pyproject/nnef_tools/conversion/nnef_to_tflite.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from .converter import ConverterFromNNEF as _Converter, Transform, ConversionError from .nnef_to_tf import Converter as _TFConverter, _Transforms as _TFTransforms from ..model import Tensor, Operation from ..model.utils import generate_tensor_names_from_op_type from ..io.tf.lite import CustomOptionsKey import numpy as np import copy def tflite_detection_postprocess_shape(input, scores, anchors, **kwargs): return [], [], [], [] class Converter(_TFConverter): @staticmethod def defined_shapes(): return { 'relu6': lambda shape: shape, 'TFLite_Detection_PostProcess': tflite_detection_postprocess_shape, } @staticmethod def decomposed_operations(): return _Converter.decomposed_operations() def __init__(self, io_transpose=False, custom_transforms=None, custom_functions=None, mirror_unsupported=False): _Converter.__init__(self, transforms=self.merge_transforms(_Transforms, custom_transforms), functions=custom_functions, mirror_unsupported=mirror_unsupported) self._data_format = 'NXC' self._io_transpose = io_transpose def __call__(self, graph): graph = _TFConverter.__call__(self, graph) self._generate_tensor_names(graph) self._fix_custom_options(graph) return graph def _global_attribs(self): return {'_lite_': True} def _prepare(self, graph): self._fix_quantized_dtypes(graph) self._fix_quantization_attribs(graph) self._transpose_externals(graph) def _transpose_externals(self, graph): for tensor in graph.tensors: mapped = self._tensor_map[tensor] if mapped.producer and mapped.producer.type == 'external' and self.needs_io_transpose(tensor): self._transposes[tensor] = self.ncx_to_nxc(tensor.shape) def _generate_tensor_names(self, graph): generate_tensor_names_from_op_type(graph) placeholders = 0 constants = 0 for tensor in graph.tensors: if tensor.name is None: if tensor.data is None: placeholders += 1 tensor.name = 'PLACEHOLDER' + str(placeholders) else: constants += 1 tensor.name = 'CONSTANT' + str(constants) def _fix_quantized_dtypes(self, graph): for tensor in graph.tensors: if tensor.quant and tensor.dtype == np.float32: bits = tensor.quant['bits'] signed = tensor.quant['signed'] assert bits == 8 or bits == 32 tensor.dtype = (np.int8 if signed else np.uint8) if bits == 8 else (np.int32 if signed else np.uint32) def _fix_quantization_attribs(self, graph): for tensor in graph.tensors: if tensor.quant: opname = tensor.quant['op-name'] if opname != 'zero_point_linear_quantize': raise ConversionError("Quantization operation '{}' cannot be converted to TFLite") del tensor.quant['op-name'] del tensor.quant['bits'] if 'signed' in tensor.quant: del tensor.quant['signed'] if 'symmetric' in tensor.quant: del tensor.quant['symmetric'] def _fix_custom_options(self, graph): for op in graph.operations: if op.custom: options = op.attribs.get(CustomOptionsKey) if options is not None: op.attribs[CustomOptionsKey] = bytes.fromhex(options) def _make_constant(self, graph, dtype, value, inline): return Tensor(graph, dtype=dtype, shape=self._shape_of(value), data=value) def _ensure_constant_producer(self, tensor): pass def _transform_constant(self, tensor, func): data = func(tensor.data) tensor.shape = data.shape tensor.data = data def _squeeze_operation(self, input, output, axes): Operation(input.graph, type='SQUEEZE', inputs=input, outputs=output, attribs={'squeeze_dims': axes}) def _unsqueeze_operation(self, input, output, axes): if len(axes) == 1: Operation(input.graph, type='EXPAND_DIMS', inputs=(input, self.as_tensor(axes[0], np.int32)), outputs=output) else: Operation(input.graph, type='RESHAPE', inputs=(input, self.as_tensor(output.shape, np.int32)), outputs=output, attribs={'new_shape': output.shape}) def _transpose_operation(self, input, output, perm): Operation(input.graph, type='TRANSPOSE', inputs=(input, self.as_tensor(perm, np.int32)), outputs=output) def _reshape_operation(self, input, output, shape): Operation(input.graph, type='RESHAPE', inputs=(input, self.as_tensor(shape, np.int32)), outputs=output, attribs={'new_shape': shape}) def _bias_operation(self, input, output, bias): if not isinstance(bias, Tensor): bias = self.as_tensor(bias, np.float32) Operation(input.graph, type='ADD', inputs=(input, bias), outputs=output) def _scale_operation(self, input, output, scalar): if not isinstance(scalar, Tensor): scalar = self.as_tensor(scalar, np.float32) Operation(input.graph, type='MUL', inputs=(input, scalar), outputs=output) def _pad_operation(self, input, output, paddings): if not isinstance(paddings, Tensor): paddings = self.as_tensor(paddings, np.int64) Operation(input.graph, type='PAD', inputs=(input, paddings), outputs=output, attribs={}) def is_same_padding(self, input_size, output_size, stride): return all(o == i // s for i, o, s in zip(input_size, output_size, stride)) def is_valid_padding(self, padding): return len(padding) != 0 and all(p == (0, 0) for p in padding) def pad_input(self, input, paddings): if all(item == (0, 0) for item in paddings): return input shape = tuple(p + x + q for x, (p, q) in zip(self._working_shape(input), paddings)) output = Tensor(input.graph, dtype=input.dtype, shape=shape, quant=copy.deepcopy(input.quant)) self._pad_operation(input, output, paddings) return output _Transforms = Converter.unpack_transforms({ ('external', 'constant'): Transform(type=None), 'conv': Transform( type='!"CONV_2D" if not depthwise else "DEPTHWISE_CONV_2D"', cond={ '!I[0].rank == 4': 'rank must be 4', }, using={ 'depthwise': '!groups == 0', 'channels': '!I[0].shape[1]', 'valid_pad': '!is_valid_padding(padding)', 'same_pad': '!is_same_padding(I[0].shape[2:], O[0].shape[2:], stride)', 'pads': '![(0, 0)] + padding + [(0, 0)]', }, inputs=( '!transpose_input(I[0]) if same_pad or valid_pad else pad_input(transpose_input(I[0]), pads)', '!transpose_filter(I[1], format="NXC" if not depthwise else "CXN")', '!squeeze_vector(I[2])', ), outputs='!transpose_output(O[0])', attribs={ 'stride_h': '!stride[0]', 'stride_w': '!stride[1]', 'dilation_h_factor': '!dilation[0]', 'dilation_w_factor': '!dilation[1]', 'padding': '!"VALID" if valid_pad else "SAME"', 'depth_multiplier': '!O[0].shape[1] // channels if depthwise else None', } ), 'deconv': Transform( type='TRANSPOSE_CONV', cond={ '!I[0].rank == 4': 'rank must be 4', '!groups == 1': 'groups must be 1', }, using={ 'depthwise': '!groups == 0', 'channels': '!O[0].shape[1]', 'valid_pad': '!is_valid_padding(padding)', 'same_pad': '!is_same_padding(I[0].shape[2:], O[0].shape[2:], stride)', 'pads': '![(0, 0)] + padding + [(0, 0)]', }, inputs=( '!as_tensor(ncx_to_nxc(output_shape), np.int32)', '!transpose_filter(I[1], format="CXN" if not depthwise else "NXC")', '!transpose_input(I[0]) if same_pad or valid_pad else pad_input(transpose_input(I[0]), pads)', ), outputs='!bias_add(transpose_output(O[0]), squeeze_vector(I[2]) if I[2].rank == 2 else I[2])', attribs={ 'stride_h': '!stride[0]', 'stride_w': '!stride[1]', 'padding': '!"VALID" if valid_pad else "SAME"', 'depth_multiplier': '!I[1].shape[0] // channels if depthwise else None', } ), ('max_pool', 'avg_pool'): Transform( cond={ '!size[0] == 1 and size[1] == 1 and ': 'size must be 1 in batch and channel dimensions', '!stride[0] == 1 and stride[1] == 1': 'stride must be 1 in batch and channel dimensions', '!border == "ignore"': 'border must be "ignore"', }, type=('MAX_POOL_2D', 'AVERAGE_POOL_2D'), using={ 'valid_pad': '!is_valid_padding(padding)', 'same_pad': '!is_same_padding(I[0].shape[2:], O[0].shape[2:], stride[2:])', }, inputs=( '!transpose_input(I[0]) if same_pad or valid_pad else pad_input(transpose_input(I[0]), padding)', ), outputs=( '!transpose_output(O[0])', ), attribs={ 'filter_height': '!size[2]', 'filter_width': '!size[3]', 'stride_h': '!stride[2]', 'stride_w': '!stride[3]', 'padding': '!"VALID" if valid_pad else "SAME"', } ), 'reshape': Transform( type='RESHAPE', using={ 'new_shape': '!fixed_batch(shape, I[0].shape[0])', }, inputs=( '!undo_transpose(I[0])', '!as_tensor(new_shape, np.int32)', ), outputs='!O[0]', attribs={ 'new_shape': '!new_shape', } ), 'concat': Transform( type='CONCATENATION', inputs=['!I[:]'], outputs='!transpose_like(O[0], I[0])', attribs={ 'axis': '!transpose_axis_like(axis, I[0])', } ), 'copy': Transform( type='RESHAPE', using={ 'shape': '!transpose_list_like(I[0].shape, I[0])', }, inputs=( '!I[0]', '!as_tensor(shape, np.int32)', ), outputs='!transpose_like(O[0], I[0])', attribs={ 'new_shape': '!shape', } ), 'linear': Transform( type='FULLY_CONNECTED', inputs=( '!I[0]', '!I[1]', '!squeeze_vector(I[2]) if not is_zero(I[2]) else None', ), outputs='!O[0]', attribs={ 'fused_activation_function': "NONE", 'weights_format': "DEFAULT", 'keep_num_dims': True, 'asymmetric_quantize_inputs': False, } ), 'matmul': Transform( type='BATCH_MATMUL', inputs=( '!I[0]', '!I[1]', ), outputs='!O[0]', attribs={ 'adj_x': '!transposeA', 'adj_y': '!transposeB', 'asymmetric_quantize_inputs': False, } ), 'batch_normalization': Transform( type='MUL', cond={ '!I[1].data is not None and I[2].data is not None and' ' (len(I) == 3 or I[3].data is not None) and (len(I) == 4 or I[4].data is not None)': 'all parameters must be constants', '!not any(t.quant for t in I)': 'quantized inputs or parameters are not supported', }, using={ 'mean': '!np.squeeze(I[1].data, axis=0) if I[1].data is not None else None', 'std': '!np.squeeze(np.sqrt(I[2].data + epsilon), axis=0) if I[2].data is not None else None', 'offset': '!np.squeeze(I[3].data, axis=0) if I[3].data is not None else None if len(I) > 3 else 0', 'scale': '!np.squeeze(I[4].data, axis=0) if I[4].data is not None else None if len(I) > 4 else 1', }, inputs=( '!transpose_input(I[0])', '!as_tensor(scale / std, np.float32)', ), outputs='!bias_add(transpose_like(O[0], I[0]), as_tensor(offset - scale * mean / std, np.float32))', ), 'l2_normalization': Transform( type='L2_NORMALIZATION', cond={ '!axes == list(range(I[0].rank))': 'axes must denote all dimensions', }, inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', ), 'prelu': Transform( type='PRELU', inputs=('!I[0]', '!I[1]'), outputs='!transpose_like(O[0], I[0])', ), 'pad': Transform( type='!"PAD" if border == "constant" else "MIRROR_PAD"', cond={ '!border in ["constant", "reflect", "reflect-even"]': 'border must be one of "constant", "reflect", "reflect-even"', }, using={'paddings': '![list(item) for item in padding]'}, inputs=( '!I[0]', '!as_tensor(ncx_to_nxc(paddings, cond=transposing(I[0])), np.int32)', ), outputs='!transpose_like(O[0], I[0])', attribs={ 'mode': '!0 if border == "reflect" else 1 if border == "reflect-even" else None', }, ), 'gather': Transform( type='GATHER', inputs=('!I[0]', '!I[1]'), outputs='!transpose_like(O[0], I[0])', attribs={ 'axis': '!transpose_axis_like(axis, I[0])', }, ), 'cast': Transform( type='CAST', inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'in_data_type': '!I[0].dtype', 'out_data_type': '!O[0].dtype', }, ), # 'copy': _TFTransforms['copy'].with_type('IDENTITY'), # only works in TF 2.3 'transpose': _TFTransforms['transpose'].with_type('TRANSPOSE'), 'split': _TFTransforms['split'].with_type('SPLIT_V'), 'squeeze': _TFTransforms['squeeze'].with_type('SQUEEZE'), 'unsqueeze': _TFTransforms['unsqueeze'].with_type('!"EXPAND_DIMS" if len(axes) == 1 else "RESHAPE"'), 'relu': _TFTransforms['relu'].with_type('RELU'), 'relu6': _TFTransforms['relu6'].with_type('RELU6'), 'elu': _TFTransforms['elu'].with_type('ELU'), 'leaky_relu': _TFTransforms['leaky_relu'].with_type('LEAKY_RELU'), 'sigmoid': _TFTransforms['sigmoid'].with_type('LOGISTIC'), 'sin': _TFTransforms['sin'].with_type('SIN'), 'cos': _TFTransforms['cos'].with_type('COS'), 'tan': _TFTransforms['tan'].with_type('TAN'), 'asin': _TFTransforms['asin'].with_type('ASIN'), 'acos': _TFTransforms['acos'].with_type('ACOS'), 'atan': _TFTransforms['atan'].with_type('ATAN'), 'sinh': _TFTransforms['sinh'].with_type('SINH'), 'cosh': _TFTransforms['cosh'].with_type('COSH'), 'tanh': _TFTransforms['tanh'].with_type('TANH'), 'asinh': _TFTransforms['asinh'].with_type('ASINH'), 'acosh': _TFTransforms['acosh'].with_type('ACOSH'), 'atanh': _TFTransforms['atanh'].with_type('ATANH'), 'exp': _TFTransforms['exp'].with_type('EXP'), 'log': _TFTransforms['log'].with_type('LOG'), 'abs': _TFTransforms['abs'].with_type('ABS'), 'neg': _TFTransforms['neg'].with_type('NEG'), 'not': _TFTransforms['not'].with_type('LOGICAL_NOT'), 'floor': _TFTransforms['floor'].with_type('FLOOR'), 'ceil': _TFTransforms['ceil'].with_type('CEIL'), 'round': _TFTransforms['round'].with_type('ROUND'), 'sqr': _TFTransforms['sqr'].with_type('SQUARE'), 'sqrt': _TFTransforms['sqrt'].with_type('SQRT'), 'rsqrt': _TFTransforms['rsqrt'].with_type('RSQRT'), 'add': _TFTransforms['add'].with_type('ADD'), 'sub': _TFTransforms['sub'].with_type('SUB'), 'mul': _TFTransforms['mul'].with_type('MUL'), 'div': _TFTransforms['div'].with_type('DIV'), 'pow': _TFTransforms['pow'].with_type('POW'), 'min': _TFTransforms['min'].with_type('MINIMUM'), 'max': _TFTransforms['max'].with_type('MAXIMUM'), 'and': _TFTransforms['and'].with_type('LOGICAL_AND'), 'or': _TFTransforms['or'].with_type('LOGICAL_OR'), 'lt': _TFTransforms['lt'].with_type('LESS'), 'le': _TFTransforms['le'].with_type('LESS_EQUAL'), 'gt': _TFTransforms['gt'].with_type('GREATER'), 'ge': _TFTransforms['ge'].with_type('GREATER_EQUAL'), 'eq': _TFTransforms['eq'].with_type('EQUAL'), 'ne': _TFTransforms['ne'].with_type('NOT_EQUAL'), 'select': _TFTransforms['select'].with_type('SELECT'), 'min_reduce': _TFTransforms['min_reduce'].with_type('REDUCE_MIN'), 'max_reduce': _TFTransforms['max_reduce'].with_type('REDUCE_MAX'), 'mean_reduce': _TFTransforms['mean_reduce'].with_type('MEAN'), 'sum_reduce': _TFTransforms['sum_reduce'].with_type('SUM'), 'any_reduce': _TFTransforms['any_reduce'].with_type('REDUCE_ANY'), 'all_reduce': _TFTransforms['all_reduce'].with_type('REDUCE_ALL'), 'argmin_reduce': _TFTransforms['argmin_reduce'].with_type('ARG_MIN'), 'argmax_reduce': _TFTransforms['argmax_reduce'].with_type('ARG_MAX'), 'stack': _TFTransforms['stack'].with_type('PACK'), 'unstack': _TFTransforms['unstack'].with_type('UNPACK'), 'tile': _TFTransforms['tile'].with_type('TILE'), 'slice': _TFTransforms['slice'].with_type('STRIDED_SLICE'), 'softmax': _TFTransforms['softmax'].with_type('SOFTMAX'), 'local_response_normalization': _TFTransforms['local_response_normalization'].with_type('LOCAL_RESPONSE_NORMALIZATION'), 'nearest_upsample': _TFTransforms['nearest_upsample'].with_type('RESIZE_NEAREST_NEIGHBOR'), 'nearest_downsample': _TFTransforms['nearest_downsample'].with_type('RESIZE_NEAREST_NEIGHBOR'), 'multilinear_upsample': _TFTransforms['multilinear_upsample'].with_type('RESIZE_BILINEAR'), 'add_n': _TFTransforms['add_n'].with_type('ADD_N'), }) ================================================ FILE: nnef_tools-pyproject/nnef_tools/conversion/onnx_to_nnef.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from .converter import ConverterToNNEF as _Converter, Transform, ConversionError from ..model.utils import generate_tensor_names_from_op_type from ..model import Tensor from ..utils import types from collections import OrderedDict import numpy as np import copy from nnef.shapes import pool_shape, reduce_shape _LP_POOL_FRAGMENT = """ fragment lp_pool( input: tensor, size: integer[], border: string = 'constant', padding: (integer, integer)[] = [], stride: integer[] = [], dilation: integer[] = [], p: scalar = 2.0 ) -> ( output: tensor ) { powered = pow(abs(input), p); summed = box(powered, size = size, border = border, padding = padding, stride = stride, dilation = dilation); output = pow(summed, 1.0 / p); } """ _LP_REDUCE_FRAGMENT = """ fragment lp_reduce( input: tensor, axes: integer[], p: scalar = 2.0 ) -> ( output: tensor ) { powered = pow(abs(input), p); summed = sum_reduce(powered, axes = axes); output = pow(summed, 1.0 / p); } """ _MEAN_VARIANCE_NORMALIZATION_FRAGMENT = """ fragment mean_variance_normalization( input: tensor, scale: tensor, offset: tensor, axes: integer[], epsilon: scalar = 1e-5 ) -> ( output: tensor ) { mean, variance = moments(input, axes = axes); output = scale * (input - mean) / sqrt(variance + epsilon) + offset; } """ _LSTM_STEP_FRAGMENT = """ fragment lstm_step( x: tensor, h: tensor, c: tensor, W: tensor, R: tensor, B: tensor ) -> ( h_out: tensor, c_out: tensor ) { [Wb, Rb] = split(B, axis = 1, ratios = [1, 1]); z = linear(x, W, Wb) + linear(h, R, Rb); [i, f, g, o] = split(z, axis = 1, ratios=[1, 1, 1, 1]); c_out = sigmoid(f) * c + sigmoid(i) * tanh(g); h_out = sigmoid(o) * tanh(c_out); } """ _LSTM_LOOP_FRAGMENT = """ fragment lstm_loop( X: tensor, W: tensor, R: tensor, B: tensor, h0: tensor, c0: tensor, steps: integer, index: integer = 0, axis: integer = 0 ) -> ( hn: tensor, cn: tensor ) { x0 = squeeze(slice(X, axes = [axis], begin = [index], end = [index + 1]), axes = [axis]); h1, c1 = lstm_step(x0, h0, c0, W, R, B); hn, cn = lstm_loop(X, W, R, B, h1, c1, index = index + 1, steps=steps) if index + 1 < steps else (h1, c1); } """ _ERF_FRAGMENT = """ fragment erf( x: tensor ) -> ( y: tensor ) { t = 1.0 / (1.0 + 0.3275911 * abs(x)); z = 1.0 - (((((1.061405429 * t + -1.453152027) * t) + 1.421413741) * t + -0.284496736) * t + 0.254829592) * t * exp(-x * x); y = sign(x) * z; } """ _MISH_FRAGMENT = """ fragment mish( x: tensor ) -> ( y: tensor ) { y = x * tanh(log(1.0 + exp(x))); } """ _DEPTH_TO_SPACE_FRAGMENT = """ fragment depth_to_space( x: tensor, block_size: integer, blocks_first: logical ) -> ( y: tensor ) { r = reshape(x, axis_start=1, axis_count=1, shape=[block_size, block_size, -1] if blocks_first else [-1, block_size, block_size]); t = transpose(r, axes=[0, 3, 4, 1, 5, 2] if blocks_first else [0, 1, 4, 2, 5, 3]); q = reshape(t, axis_start=4, axis_count=2, shape=[-1]); y = reshape(q, axis_start=2, axis_count=2, shape=[-1]); } """ _SPACE_TO_DEPTH_FRAGMENT = """ fragment space_to_depth( x: tensor, block_size: integer, blocks_first: logical ) -> ( y: tensor ) { p = reshape(x, axis_start=3, axis_count=1, shape=[-1, block_size]); r = reshape(p, axis_start=2, axis_count=1, shape=[-1, block_size]); t = transpose(r, axes=[0, 3, 5, 1, 2, 4] if blocks_first else [0, 1, 3, 5, 2, 4]); y = reshape(t, axis_start=1, axis_count=1, shape=[-1]); } """ _INT_MAX = 2 ** 31 - 1 class Converter(_Converter): @staticmethod def defined_operations(): return { 'lp_pool': _LP_POOL_FRAGMENT, 'lp_reduce': _LP_REDUCE_FRAGMENT, 'mean_variance_normalization': _MEAN_VARIANCE_NORMALIZATION_FRAGMENT, 'lstm_step': _LSTM_STEP_FRAGMENT, 'lstm_loop': _LSTM_LOOP_FRAGMENT, 'erf': _ERF_FRAGMENT, 'mish': _MISH_FRAGMENT, 'depth_to_space': _DEPTH_TO_SPACE_FRAGMENT, 'space_to_depth': _SPACE_TO_DEPTH_FRAGMENT, } @staticmethod def defined_operation_dependencies(): return { 'lstm_loop': ['lstm_step'], } @staticmethod def defined_shapes(): return { 'lp_pool': pool_shape, 'lp_reduce': reduce_shape, 'mean_variance_normalization': lambda input, scale, offset, **kwargs: input, 'lstm_step': lambda x, h, c, W, R, B: (h, c), 'lstm_loop': lambda X, W, R, B, h, c, **kwargs: (h, c), 'erf': lambda x: x, 'mish': lambda x: x, 'depth_to_space': lambda x, block_size, **kwargs: [x[0], x[1] // block_size ** 2, x[2] * block_size, x[3] * block_size], 'space_to_depth': lambda x, block_size, **kwargs: [x[0], x[1] * block_size ** 2, x[2] // block_size, x[3] // block_size], } def __init__(self, custom_transforms=None, custom_functions=None, mirror_unsupported=False, keep_io_names=False, infer_shapes=False, custom_shapes=None, io_transpose=False): _Converter.__init__(self, transforms=self.merge_transforms(_Transforms, custom_transforms), functions=custom_functions, mirror_unsupported=mirror_unsupported, infer_shapes=infer_shapes, custom_shapes=dict(**self.defined_shapes(), **custom_shapes or {})) self._keep_io_names = keep_io_names self._io_transpose = io_transpose def __call__(self, graph): graph = _Converter.__call__(self, graph) self.remove_unused_constants(graph) self.inline_scalar_constants(graph) self.convert_constants_to_variables(graph) self._ensure_valid_ids(graph) if self._io_transpose is not False: self._transpose_inputs(graph) self._transpose_outputs(graph) graph.sort() generate_tensor_names_from_op_type(graph, keep_io_names=self._keep_io_names) return graph def _prepare(self, graph): self._insert_externals_and_constants(graph) def _is_constant(self, tensor): if tensor.producer: return tensor.producer.type == 'Constant' else: return tensor.data is not None def _read_constant(self, tensor, type=None): if tensor.producer and tensor.producer.type == 'Constant': value = tensor.producer.attribs['value'] elif not tensor.producer: value = tensor.data else: raise ConversionError('trying to evaluate non-constant tensor') return types.from_numpy(value, type=type) if isinstance(value, np.ndarray) else types.cast(value, type=type) def _needs_io_transpose(self, tensor): if tensor.rank <= 2: return False if isinstance(self._io_transpose, bool): return self._io_transpose else: return tensor.name in self._io_transpose def _transpose_inputs(self, graph): inputs = [self._transpose_input(tensor) if self._needs_io_transpose(tensor) else tensor for tensor in graph.inputs] if self._keep_io_names: for i in range(len(inputs)): if inputs[i] is not graph.inputs[i]: inputs[i].name = graph.inputs[i].name graph.inputs = inputs def _transpose_outputs(self, graph): outputs = [self._transpose_output(tensor) if self._needs_io_transpose(tensor) else tensor for tensor in graph.outputs] if self._keep_io_names: for i in range(len(outputs)): if outputs[i] is not graph.outputs[i]: outputs[i].name = graph.outputs[i].name graph.outputs = outputs def _transpose_input(self, tensor): external = tensor.producer external.outputs = self._post_transpose(tensor, self.ncx_to_nxc_perm(tensor.rank)) external.attribs['shape'] = list(self.nxc_to_ncx(tensor.shape)) return external.output def _transpose_output(self, tensor): return self._pre_transpose(tensor, self.nxc_to_ncx_perm(tensor.rank)) @staticmethod def _interleave(items): return [item[0] for item in items] + [item[1] for item in items] @staticmethod def _uninterleave(items): count = len(items) // 2 return list(zip(items[:count], items[count:])) def convert_padding(self, pads, auto_pad, output_padding, rank, ceil_stride=None): if auto_pad == "NOTSET" or auto_pad == "SAME_LOWER": padding = self._uninterleave(pads) if output_padding is not None: for i in range(len(padding)): padding[i] = (padding[i][0], padding[i][1] - output_padding[i]) padding = [(0, 0)] * (rank - len(padding)) + padding return self.ceil_pads(padding, ceil_stride) if ceil_stride else padding elif auto_pad == "VALID": padding = [(0, 0,)] * rank if output_padding is not None: offs = rank - len(output_padding) for i in range(len(output_padding)): padding[i + offs] = (padding[i + offs][0], padding[i + offs][1] - output_padding[i]) return self.ceil_pads(padding, ceil_stride) if ceil_stride else padding elif auto_pad == "SAME_UPPER": return [] else: assert False def convert_pads(self, pads): return self._uninterleave(pads) def squeeze_input(self, tensor, axes, keep_dims=False): return self._pre_squeeze(tensor, axes=axes) if not keep_dims and len(axes) else tensor def squeeze_output(self, tensor, axes, keep_dims=False): return self._post_squeeze(tensor, axes=axes) if not keep_dims and len(axes) else tensor def unsqueeze_input(self, tensor, axes, keep_dims=False): return self._pre_unsqueeze(tensor, axes=axes) if not keep_dims and len(axes) else tensor def unsqueeze_output(self, tensor, axes, keep_dims=False): return self._post_unsqueeze(tensor, axes=axes) if not keep_dims and len(axes) else tensor def unsqueeze_vector(self, tensor): original = self._tensor_map[tensor] if self._is_constant(original) and len(original.consumers) == 1: self._transform_constant(tensor, lambda data: np.expand_dims(data, 0)) return tensor else: return self.unsqueeze_input(tensor, axes=[0]) def bias_add(self, output, bias): if bias.rank == 0 and bias.data == 0: return output input = Tensor(output.graph, dtype=output.dtype, shape=output.shape, quant=copy.deepcopy(output.quant)) self._bias_operation(input, output, bias) return input def lower_pads(self, input_size, filter_size, output_size, stride, dilation): rank = len(input_size) total = [None] * rank for i in range(rank): dilated_size = (filter_size[i] - 1) * dilation[i] + 1 total[i] = max((input_size[i] // stride[i] - 1) * stride[i] + dilated_size - output_size[i], 0) pads = [(t // 2, t - t // 2) for t in total] return self._interleave(pads) def ceil_pads(self, pads, stride): return [(p, q + s - 1) for (p, q), s in zip(pads, stride)] def broadcast(self, tensor, rank): return self.unsqueeze_input(tensor, axes=list(range(rank - tensor.rank))) if tensor.rank > 0 else tensor def ensure_list(self, arg): return [arg] if not isinstance(arg, list) else arg def ensure_scalar(self, arg): return arg[0] if isinstance(arg, list) and len(arg) == 1 else arg def limit_range(self, x): return _INT_MAX if x > _INT_MAX else -_INT_MAX if x < -_INT_MAX else x def is_unused(self, tensor): if len(tensor.name) == 0: return True original = self._tensor_map[tensor] return len(original.consumers) == 0 _Transforms = Converter.unpack_transforms({ ('Conv', 'ConvTranspose'): Transform( type=('conv', 'deconv'), defaults={ 'strides': '![1] * (I[0].rank - 2)', 'dilations': '![1] * (I[0].rank - 2)', 'pads': '![0, 0] * (I[0].rank - 2)', 'auto_pad': "NOTSET", 'group': 1, 'output_shape': None, 'output_padding': None, }, using={ '_pads': '!lower_pads(I[0].shape[2:], I[1].shape[2:], O[0].shape[2:], strides, dilations)' ' if auto_pad == "SAME_LOWER" else pads', }, inputs=( '!I[0]', '!I[1]', '!unsqueeze_vector(I[2]) if len(I) > 2 else None', ), outputs='!O[0]', attribs={ 'stride': '!strides', 'dilation': '!dilations', 'padding': '!convert_padding(_pads, auto_pad, output_padding, I[0].rank - 2)', 'groups': '!group', 'output_shape': '!output_shape', } ), ('MaxPool', 'AveragePool', 'LpPool'): Transform( type=('max_pool', 'avg_pool', 'lp_pool'), defaults={ 'strides': '![1] * (I[0].rank - 2)', 'dilations': '![1] * (I[0].rank - 2)', 'pads': '![0, 0] * (I[0].rank - 2)', 'auto_pad': "NOTSET", 'ceil_mode': 0, 'storage_order': 0, 'count_include_pad': 0, }, cond={ '!storage_order == 0': 'storage_order must be 0', }, using={ '_pads': '!lower_pads(I[0].shape[2:], kernel_shape, O[0].shape[2:], strides, dilations)' ' if auto_pad == "SAME_LOWER" else pads', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'size': '![1, 1] + kernel_shape', 'stride': '![1, 1] + strides', 'dilation': '![1, 1] + dilations', 'padding': '!convert_padding(_pads, auto_pad, None, I[0].rank, [1, 1] + strides if ceil_mode == 1 else None)', 'border': '!"constant" if count_include_pad else "ignore"', } ), ('GlobalMaxPool', 'GlobalAveragePool', 'GlobalLpPool'): Transform( type=('max_reduce', 'mean_reduce', 'lp_reduce'), defaults={ 'p': 2, }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!list(range(2, I[0].rank))', 'p': '!float(p) if _type_ == "GlobalLpPool" else None', } ), ('ReduceMin', 'ReduceMax', 'ReduceMean', 'ReduceSum', 'ReduceL1', 'ReduceL2'): Transform( type=('min_reduce', 'max_reduce', 'mean_reduce', 'sum_reduce', 'lp_reduce', 'lp_reduce'), defaults={ 'keepdims': 1, }, inputs='!I[0]', outputs='!squeeze_output(O[0], axes, keepdims)', attribs={ 'axes': '!ensure_positive(axes, I[0].rank)', 'p': '!1.0 if _type_ == "ReduceL1" else 2.0 if _type_ == "ReduceL2" else None', } ), ('ArgMin', 'ArgMax'): Transform( type=('argmin_reduce', 'argmax_reduce'), defaults={ 'axis': 0, 'keepdims': 1, 'select_last_index': 0, }, using={ 'axes': '![ensure_positive(axis, I[0].rank)]', }, cond={ '!select_last_index == 0': 'select_last_index must be 0', }, inputs='!I[0]', outputs='!squeeze_output(O[0], axes, keepdims)', attribs={ 'axes': '!axes', } ), 'BatchNormalization': Transform( type='batch_normalization', defaults={ 'epsilon': 1e-5, 'spatial': 1, }, inputs=( '!I[0]', '!unsqueeze_vector(I[3])', '!unsqueeze_vector(I[4])', '!unsqueeze_vector(I[2])', '!unsqueeze_vector(I[1])', ), outputs='!O[0]', attribs={ 'epsilon': '!epsilon', } ), ('Relu', 'Sigmoid', 'Tanh', 'Softplus', 'Selu', 'Not', 'Identity', 'Elu', 'Erf', 'Mish', 'Abs', 'Sign', 'Sin', 'Cos', 'Tan', 'Asin', 'Acos', 'Atan', 'Sinh', 'Cosh', 'Tanh', 'Asinh', 'Acosh', 'Atanh', 'Exp', 'Log', 'Neg', 'Sqrt', 'Ceil', 'Floor', 'Round'): Transform( type=('relu', 'sigmoid', 'tanh', 'softplus', 'selu', 'not', 'copy', 'elu', 'erf', 'mish', 'abs', 'sign', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh', 'exp', 'log', 'neg', 'sqrt', 'ceil', 'floor', 'round'), inputs='!I[0]', outputs='!O[0]', ), ('Add', 'Sub', 'Mul', 'Div', 'Pow', 'Min', 'Max', 'And', 'Or', 'Equal', 'Less', 'Greater', 'LessOrEqual', 'GreaterOrEqual'): Transform( type=('add', 'sub', 'mul', 'div', 'pow', 'min', 'max', 'and', 'or', 'eq', 'lt', 'gt', 'le', 'ge'), inputs=( '!broadcast(I[0], O[0].rank)', '!broadcast(I[1], O[0].rank)', ), outputs='!O[0]', ), 'LeakyRelu': Transform( type='leaky_relu', defaults={ 'alpha': 0.01, }, inputs='!I[0]', outputs='!O[0]', attribs={ 'alpha': '!alpha', } ), 'PRelu': Transform( type='prelu', inputs=( '!I[0]', '!broadcast(I[1], I[0].rank)', ), outputs='!O[0]', ), 'Transpose': Transform( type='transpose', defaults={ 'perm': '!list(reversed(range(I[0].rank)))', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!ensure_positive(perm, I[0].rank)', } ), 'Reshape': Transform( type='reshape', defaults={ 'shape': '!as_const(I[1])', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'shape': '!flexible_batch(shape, I[0].shape[0])', } ), 'Flatten': Transform( type='reshape', defaults={ 'axis': 1, }, inputs='!I[0]', outputs='!O[0]', attribs={ 'shape': '![0] * axis + [-1]', } ), 'Squeeze': Transform( type='squeeze', defaults={ 'axes': '!as_const(I[1])', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!ensure_positive(axes, I[0].rank)', } ), 'Unsqueeze': Transform( type='unsqueeze', defaults={ 'axes': '!as_const(I[1])', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!ensure_positive(axes, O[0].rank)', } ), 'MatMul': Transform( type='matmul', inputs=( '!broadcast(I[0], O[0].rank)', '!broadcast(I[1], O[0].rank)', ), outputs=( '!O[0]', ), ), 'Gemm': Transform( type='!"linear" if is_linear else "matmul"', defaults={ 'alpha': 1.0, 'beta': 1.0, 'transA': 0, 'transB': 0, }, cond={ '!alpha == 1.0': 'alpha must be 1', '!beta == 1.0 or len(I) == 2': 'beta must be 1', }, using={ 'is_linear': '!len(I) > 2 and I[2].rank == 1 and transB', 'bias': '!broadcast(I[2], O[0].rank) if len(I) > 2 and not is_linear else None', }, inputs=( '!I[0]', '!I[1]', '!unsqueeze_vector(I[2]) if is_linear else None', ), outputs='!O[0] if is_linear or bias is None else bias_add(O[0], bias)', attribs={ 'transposeA': '!bool(transA) if not is_linear else None', 'transposeB': '!bool(transB) if not is_linear else None', } ), 'LRN': Transform( type='local_response_normalization', defaults={ 'alpha': 0.0001, 'beta': 0.75, 'bias': 1.0, }, inputs='!I[0]', outputs='!O[0]', attribs={ 'alpha': '!alpha', 'beta': '!beta', 'bias': '!bias', 'size': '![1, size] + [1] * (I[0].rank - 2)', } ), 'Concat': Transform( type='concat', defaults={ 'axis': 1, }, inputs=['!I[:]'], outputs='!O[0]', attribs={ 'axis': '!ensure_positive(axis, O[0].rank)', } ), 'Split': Transform( type='split', defaults={ 'axis': 0, 'split': '!as_const(I[1])', }, inputs='!I[0]', outputs=['!O[:]'], attribs={ 'axis': '!ensure_positive(axis, I[0].rank)', 'ratios': '!split', } ), 'Dropout': Transform( type='copy', inputs='!I[0]', outputs='!O[0]', ), 'Softmax': Transform( type='softmax', defaults={ 'axis': 1, }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '![ensure_positive(axis, I[0].rank)]', } ), 'Sum': Transform( type='add_n', inputs=['!I[:]'], outputs='!O[0]', ), 'Where': Transform( type='select', inputs=( '!broadcast(I[0], O[0].rank)', '!broadcast(I[1], O[0].rank)', '!broadcast(I[2], O[0].rank)', ), outputs='!O[0]', ), 'Clip': Transform( type='!"max" if I[2].name == "" else "min" if I[1].name == "" else "clamp"', inputs=( '!I[0]', '!I[1] if I[1].name != "" else None', '!I[2] if I[2].name != "" else None', ), outputs='!O[0]', ), 'Pad': Transform( type='pad', defaults={ 'mode': "constant", 'value': 0.0, }, using={ 'constant_value': '!ensure_scalar(as_const(I[2])) if len(I) > 2 else value', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'padding': '!convert_pads(as_const(I[1]) if len(I) > 1 else pads)', 'value': '!constant_value', 'border': '!"replicate" if mode == "edge" else mode', } ), 'Tile': Transform( type='tile', inputs='!I[0]', outputs='!O[0]', attribs={ 'repeats': '!as_const(I[1])', } ), 'Expand': Transform( type='tile', inputs='!I[0]', outputs='!O[0]', attribs={ 'repeats': '![O[0].shape[i] // I[0].shape[i] for i in range(I[0].rank)]', } ), 'Slice': Transform( type='slice', using={ 'axes': '!as_const(I[3]) if len(I) > 3 else list(range(I[0].rank))', 'starts': '![limit_range(x) for x in as_const(I[1])]', 'ends': '![limit_range(x) for x in as_const(I[2])]', 'steps': '!as_const(I[4]) if len(I) > 4 else None', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!ensure_positive(axes, I[0].rank)', 'begin': '!starts', 'end': '!ends', 'stride': '!steps', } ), 'LpNormalization': Transform( type='!"l1_normalization" if p == 1 else "l2_normalization"', defaults={ 'axis': -1, 'p': 2, }, cond={ '!p == 1 or p == 2': 'p must be 1 or 2', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '![ensure_positive(axis, I[0].rank)]', } ), 'MeanVarianceNormalization': Transform( type='mean_variance_normalization', defaults={ 'axes': [0, 2, 3], }, inputs=( '!I[0]', '!as_tensor(1.0, np.float32, inline=True)', '!as_tensor(0.0, np.float32, inline=True)', ), outputs='!O[0]', attribs={ 'axes': '!axes', 'epsilon': 0.0, } ), 'InstanceNormalization': Transform( type='mean_variance_normalization', defaults={ 'epsilon': 1e-5, }, inputs=( '!I[0]', '!unsqueeze_vector(I[1])', '!unsqueeze_vector(I[2])', ), outputs='!O[0]', attribs={ 'axes': '!list(range(2, I[0].rank))', 'epsilon': '!epsilon', } ), 'Upsample': Transform( type='!"nearest_upsample" if mode == "nearest" else "multilinear_upsample"', defaults={ 'mode': "nearest", 'scales': '!as_const(I[1])', }, cond={ '!scales[0] == 1 and scales[1] == 1': 'scales must be 1 in batch and channel dimensions', '!all(int(s) == s for s in scales[2:])': 'scales must be integers in all dimensions', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'factor': '![int(s) for s in scales[2:]]', 'method': '!"asymmetric" if mode == "linear" else None', } ), 'Resize': Transform( type='!("nearest_downsample" if downsample else "nearest_upsample") if mode == "nearest" else' ' "multilinear_upsample"', defaults={ 'mode': "nearest", 'coordinate_transformation_mode': "half_pixel", }, using=OrderedDict([ ('scales', '!as_const(I[1 if len(I) == 2 else 2])'), ('sizes', '!as_const(I[3]) if len(I) > 3 else' ' [int(I[0].shape[i] * scales[i]) for i in range(I[0].rank)]'), ('upsample', '!is_integer_upsample(I[0].shape, sizes)'), ('downsample', '!is_integer_downsample(I[0].shape, sizes)'), ]), cond={ '!mode == "nearest" or mode == "linear"': 'mode must be one of "nearest", "linear"', '!upsample or downsample if mode == "nearest" else True': "nearest resize must be integer up-sample or down-sample", '!upsample if mode == "linear" else True': 'linear resize must be integer up-sample', '!sizes[0] == I[0].shape[0] and sizes[1] == I[0].shape[1]': 'batch and channel dimensions must be preserved', '!coordinate_transformation_mode == "half_pixel" or' ' coordinate_transformation_mode == "pytorch_half_pixel" or' ' coordinate_transformation_mode == "asymmetric" or' ' coordinate_transformation_mode == "align_corners"': 'coordinate_transformation_mode must be one of' ' "half_pixel", "pytorch_half_pixel", "asymmetric", "align_corners"', }, inputs='!I[0]', outputs='!O[0]', attribs={ 'factor': '!upsample_factor(I[0].shape[2:], sizes[2:]) if upsample else' ' downsample_factor(I[0].shape[2:], sizes[2:])', 'method': '!("aligned" if coordinate_transformation_mode == "align_corners" else' ' "symmetric" if coordinate_transformation_mode == "half_pixel" or ' ' coordinate_transformation_mode == "pytorch_half_pixel" else' ' "asymmetric") if mode == "linear" else None', } ), 'Constant': Transform( type='constant', outputs='!O[0]', attribs={ 'value': '!ensure_list(from_numpy(value)) if len(value.shape) <= 1 and int(np.prod(value.shape)) <= 10 ' 'else value', 'shape': '!list(value.shape)', 'dtype': '!value.dtype', } ), 'Gather': Transform( type='gather', inputs=('!I[0]', '!I[1]'), outputs='!O[0]', defaults={ 'axis': 0, }, attribs={ 'axis': '!ensure_positive(axis, I[0].rank)', }, ), 'Cast': Transform( using={ 'same_type': '!nnef_dtype(O[0].dtype) == nnef_dtype(I[0].dtype)', }, type='!"copy" if same_type else "cast"', inputs='!I[0]', outputs='!O[0]', attribs={ 'dtype': '!O[0].dtype if not same_type else None', }, ), 'LSTM': Transform( cond={ '!direction == "forward"': 'direction must be "forward"', '!is_unused(O[0])': 'first output must not have consumer operations', '!len(I[4].name) == 0': 'sequence_lens must not be defined', }, defaults={ 'layout': 0, }, using={ 'seq_axis': '!0 if layout == 0 else 1', 'dir_axis': '!0 if layout == 0 else 2', }, type='lstm_loop', inputs=( '!I[0]', # X '!squeeze_input(I[1], axes=[0])', # W '!squeeze_input(I[2], axes=[0])', # R '!I[3]', # B '!squeeze_input(I[5], axes=[dir_axis])', # h_0 '!squeeze_input(I[6], axes=[dir_axis])', # c_0 ), outputs=( '!unsqueeze_output(O[1], axes=[dir_axis])', # h_n '!unsqueeze_output(O[2], axes=[dir_axis])', # c_n ), attribs={ 'steps': '!I[0].shape[seq_axis]', 'axis': '!seq_axis', }, ), 'DepthToSpace': Transform( type="depth_to_space", defaults={ 'mode': "DCR", }, inputs='!I[0]', outputs='!O[0]', attribs={ 'block_size': '!blocksize', 'blocks_first': '!mode == "DCR"', }, ), 'SpaceToDepth': Transform( type="space_to_depth", defaults={ 'mode': "DCR", }, inputs='!I[0]', outputs='!O[0]', attribs={ 'block_size': '!blocksize', 'blocks_first': '!mode == "DCR"', }, ), }) ================================================ FILE: nnef_tools-pyproject/nnef_tools/conversion/tf_to_nnef.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from .converter import ConverterToNNEF as _Converter, Transform, ConversionError from ..model.utils import generate_tensor_names_from_op_type from ..utils import types from collections import OrderedDict import numpy as np _RELU6_FRAGMENT = """ fragment relu6( input: tensor ) -> ( output: tensor ) { output = clamp(input, 0.0, 6.0); } """ _INT_MAX = 2 ** 31 - 1 class Converter(_Converter): _ConvOpTypes = ['Conv1D', 'Conv2D', 'Conv3D', 'Conv2DBackpropInput', 'Conv1DBackpropInput', 'Conv3DBackpropInput'] _DepthwiseConvOpTypes = ['DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropInput'] @staticmethod def defined_operations(): return { 'relu6': _RELU6_FRAGMENT, } def __init__(self, io_transpose=False, custom_transforms=None, custom_functions=None, mirror_unsupported=False, keep_io_names=False): _Converter.__init__(self, transforms=self.merge_transforms(_Transforms, custom_transforms), functions=custom_functions, mirror_unsupported=mirror_unsupported) self._io_transpose = io_transpose self._keep_io_names = keep_io_names def __call__(self, graph): graph = _Converter.__call__(self, graph) self.remove_unused_constants(graph) self.inline_scalar_constants(graph) self.convert_constants_to_variables(graph) self._fix_output_transposes(graph) self._ensure_valid_ids(graph) generate_tensor_names_from_op_type(graph, keep_io_names=self._keep_io_names) return graph def _global_attribs(self): return {'_lite_': False} def _fix_output_transposes(self, graph): outputs = [self.transpose_input(tensor) if self.needs_io_transpose(tensor) else self.undo_transpose(tensor) for tensor in graph.outputs] if self._keep_io_names: for i in range(len(outputs)): if outputs[i] is not graph.outputs[i]: outputs[i].name = graph.outputs[i].name graph.outputs = outputs def _is_conv_filter(self, tensor): tensor = self._tensor_map.get(tensor) return tensor and len(tensor.consumers) > 0 and \ all(op.type in Converter._ConvOpTypes and op.inputs[1] is tensor for op in tensor.consumers) def _is_depthwise_conv_filter(self, tensor): tensor = self._tensor_map.get(tensor) return tensor and len(tensor.consumers) > 0 and \ all(op.type in Converter._DepthwiseConvOpTypes and op.inputs[1] is tensor for op in tensor.consumers) def _is_constant(self, tensor): if tensor.producer: return tensor.producer.type == 'Const' else: return tensor.data is not None def _read_constant(self, tensor, type=None): if tensor.producer is None: return types.from_numpy(tensor.data, type=type) elif tensor.producer.type == 'Const': value = tensor.producer.attribs['value'] return types.from_numpy(value, type=type) if isinstance(value, np.ndarray) else types.cast(value, type=type) else: raise ConversionError('trying to evaluate non-constant tensor') def needs_io_transpose(self, tensor): if tensor.rank <= 2: return False if isinstance(self._io_transpose, bool): return self._io_transpose else: return tensor.name in self._io_transpose def is_nxc(self, format): return format[0] == 'N' and format[-1] == 'C' and len(format) > 2 def is_cxn(self, format): return format[0] == 'C' and format[-1] == 'N' and len(format) > 2 def is_xcn(self, format): return format[-2] == 'C' and format[-1] == 'N' and len(format) > 2 def transpose_input(self, tensor, format='NXC'): if self.is_nxc(format): return self._pre_transpose(tensor, self.nxc_to_ncx_perm(tensor.rank)) \ if not self.transposing(tensor) and tensor.rank > 2 else tensor else: assert not self.transposing(tensor) return tensor def transpose_output(self, tensor, format='NXC'): if self.is_nxc(format): self._transposes[tensor] = self.nxc_to_ncx(tensor.shape) return tensor def transpose_filter(self, tensor, format='XCN'): if self.is_xcn(format): perm = self.xcn_to_ncx_perm(tensor.rank) elif self.is_nxc(format): perm = self.nxc_to_ncx_perm(tensor.rank) elif self.is_cxn(format): perm = self.cxn_to_ncx_perm(tensor.rank) else: assert False return self._pre_transpose(tensor, perm) def transpose_depthwise_filter(self, tensor, format='XCN'): if self.is_xcn(format): perm = self.xcn_to_ncx_perm(tensor.rank) elif self.is_nxc(format): perm = self.nxc_to_ncx_perm(tensor.rank) elif self.is_cxn(format): perm = self.cxn_to_ncx_perm(tensor.rank) else: assert False shape = tensor.shape[:-2] + (1, -1) return self._pre_transpose(self._reshape(tensor, shape), perm) def transpose_like(self, tensor, ref): if ref is not None and self.transposing(ref): self.transpose_output(tensor) return tensor def undo_transpose(self, tensor): perm = self.ncx_to_nxc_perm(tensor.rank) if perm == list(range(tensor.rank)): return tensor return self._pre_transpose(tensor, perm) if self.transposing(tensor) else tensor def convert_size(self, value, format): return value[1:-1] if self.is_nxc(format) else value[2:] def convert_padding(self, padding, rank, explicit_paddings=None, format=None): padding = padding.upper() if padding == 'SAME': return [] elif padding == 'VALID': return [(0, 0)] * rank elif padding == 'EXPLICIT': assert explicit_paddings is not None and format is not None explicit_paddings = list(zip(explicit_paddings[0::2], explicit_paddings[1::2])) return explicit_paddings[1:-1] if self.is_nxc(format) else explicit_paddings[2:] else: assert False, "unknown padding type '{}'".format(padding) def transpose_list_like(self, items, ref): return self.nxc_to_ncx(items) if ref is not None and self.transposing(ref) else items def transpose_axis_like(self, axis, ref, rank=None): return self.axis_nxc_to_ncx(axis, rank or ref.rank) if ref is not None and self.transposing(ref) else \ self.ensure_positive(axis, rank or ref.rank) def squeeze_input(self, tensor, axes, keep_dims=False): return self._pre_squeeze(tensor, axes=axes) if not keep_dims else tensor def squeeze_output(self, tensor, axes, keep_dims=False): return self._post_squeeze(tensor, axes=axes) if not keep_dims else tensor def unsqueeze_input(self, tensor, axes, keep_dims=False): return self._pre_unsqueeze(tensor, axes=axes) if not keep_dims else tensor def unsqueeze_output(self, tensor, axes, keep_dims=False): return self._post_unsqueeze(tensor, axes=axes) if not keep_dims else tensor def unsqueeze_vector(self, tensor): original = self._tensor_map[tensor] if self._is_constant(original) and len(original.consumers) == 1: self._transform_constant(tensor, lambda data: np.expand_dims(data, 0)) return tensor else: return self.unsqueeze_input(tensor, axes=[0]) def convert_binarg(self, tensor, other): if tensor.rank == 0: return tensor needs_transpose = self.transposing(other) and not self.transposing(tensor) if other.rank > tensor.rank: if tensor.rank == 1 and needs_transpose: return self.unsqueeze_vector(tensor) tensor = self._pre_unsqueeze(tensor, axes=list(range(other.rank - tensor.rank))) return self.transpose_input(tensor) if needs_transpose else tensor def ensure_list(self, value): return value if isinstance(value, list) else list(value) if isinstance(value, tuple) else [value] def is_bit_set(self, mask, idx): return mask & (1 << idx) != 0 def bit_count(self, mask): count = 0 for i in range(mask.bit_length()): if self.is_bit_set(mask, i): count += 1 return count def replace_item_with(self, items, index, count, value): return items[:index] + [value] * count + items[index+1:] def replace_bit_with(self, mask, index, count, value): value_bits = (((1 << count) - 1) << index) if value else 0 low_bits = mask & ((1 << index) - 1) high_bits = (mask & ~((1 << (index + 1)) - 1)) << (count - 1) return low_bits | value_bits | high_bits def beg_index(self, stride): return _INT_MAX if stride < 0 else 0 def end_index(self, stride): return _INT_MAX if stride > 0 else -_INT_MAX _Transforms = Converter.unpack_transforms({ 'Placeholder': Transform( type='external', using={'needs_transpose': '!needs_io_transpose(O[0])'}, outputs='!transpose_output(O[0]) if needs_transpose else O[0]', attribs={ 'shape': '!list(nxc_to_ncx(shape) if needs_transpose else shape)', 'dtype': '!dtype', } ), 'Const': Transform( type='constant', outputs='!O[0]', attribs={ 'shape': '!list(value.shape)', 'dtype': '!dtype', 'value': '!value', } ), ('Conv2D', 'Conv3D', 'DepthwiseConv2dNative'): Transform( type='conv', using={ 'depthwise': '!_type_ == "DepthwiseConv2dNative"', }, defaults={ 'explicit_paddings': [], }, inputs=( '!transpose_input(I[0], data_format)', '!transpose_filter(I[1]) if not depthwise else transpose_depthwise_filter(I[1])', ), outputs=( '!transpose_output(O[0], data_format)', ), attribs={ 'stride': '!convert_size(strides, data_format)', 'dilation': '!convert_size(dilations, data_format)', 'padding': '!convert_padding(padding, I[0].rank - 2, explicit_paddings, data_format)', 'groups': '!1 if not depthwise else 0', } ), ('Conv2DBackpropInput', 'Conv3DBackpropInput', 'DepthwiseConv2dNativeBackpropInput'): Transform( type='deconv', using={ 'depthwise': '!_type_ == "DepthwiseConv2dNativeBackpropInput"', }, defaults={ 'explicit_paddings': [], }, inputs=( '!transpose_input(I[2], data_format)', '!transpose_filter(I[1]) if not depthwise else transpose_depthwise_filter(I[1])', ), outputs=( '!transpose_output(O[0], data_format)', ), attribs={ 'stride': '!convert_size(strides, data_format)', 'dilation': '!convert_size(dilations, data_format)', 'padding': '!convert_padding(padding, I[2].rank - 2, explicit_paddings, data_format)', 'output_shape': '!nxc_to_ncx(as_const(I[0])) if is_nxc(data_format) else as_const(I[0])', 'groups': '!1 if not depthwise else 0', } ), ('MaxPool', 'AvgPool', 'MaxPoolWithArgmax'): Transform( type=('max_pool', 'avg_pool', 'max_pool_with_index'), defaults={ 'explicit_paddings': [], 'data_format': 'NHWC', }, inputs=( '!transpose_input(I[0], data_format)', ), outputs=( '!transpose_output(O[0], data_format)', '!transpose_output(O[1], data_format) if len(O) > 1 else None', ), attribs={ 'size': '!nxc_to_ncx(ksize) if is_nxc(data_format) else ksize', 'stride': '!nxc_to_ncx(strides) if is_nxc(data_format) else strides', 'padding': '!convert_padding(padding, I[0].rank, explicit_paddings, data_format)', 'border': '!"ignore"', } ), 'Concat': Transform( type='concat', inputs=['!I[1:]'], outputs='!transpose_like(O[0], I[1])', attribs={ 'axis': '!transpose_axis_like(as_const(I[0]), I[1], O[0].rank)', } ), 'ConcatV2': Transform( type='concat', inputs=['!I[:-1]'], outputs='!transpose_like(O[0], I[0])', attribs={ 'axis': '!transpose_axis_like(as_const(I[-1]), I[0], O[0].rank)', } ), 'Split': Transform( type='split', inputs='!I[1]', outputs=['![transpose_like(O[i], I[1]) for i in range(len(O))]'], attribs={ 'axis': '!transpose_axis_like(as_const(I[0]), I[1])', 'ratios': '![1] * (num_split if not _lite_ else num_splits)', } ), 'SplitV': Transform( type='split', inputs='!I[0]', outputs=['![transpose_like(O[i], I[0]) for i in range(len(O))]'], attribs={ 'axis': '!transpose_axis_like(as_const(I[2]), I[0])', 'ratios': '!as_const(I[1])', } ), 'Reshape': Transform( type='reshape', inputs='!undo_transpose(I[0])', outputs='!O[0]', attribs={ 'shape': '!flexible_batch(as_const(I[1]), I[0].shape[0])', 'dtype': '!I[0].dtype', } ), 'Transpose': Transform( type='transpose', inputs='!I[0]', outputs='!O[0]', attribs={ 'axes': '!transpose_axis_like(as_const(I[1]), I[0])', 'dtype': '!I[0].dtype', } ), 'Squeeze': Transform( type='squeeze', inputs='!undo_transpose(I[0])', outputs='!O[0]', attribs={ 'axes': '!ensure_list(ensure_positive(squeeze_dims, I[0].rank)) if len(squeeze_dims) != 0 else' ' [i for i, x in enumerate(I[0].shape) if x == 1]', 'dtype': '!I[0].dtype', } ), 'ExpandDims': Transform( type='unsqueeze', inputs='!undo_transpose(I[0])', outputs='!O[0]', attribs={ 'axes': '!ensure_list(ensure_positive(as_const(I[1]), O[0].rank))', 'dtype': '!I[0].dtype', } ), 'Pack': Transform( type='stack', inputs=['![undo_transpose(t) for t in I]'], outputs='!O[0]', attribs={ 'axis': '!ensure_positive(axis, O[0].rank)', } ), 'Unpack': Transform( type='unstack', inputs='!undo_transpose(I[0])', outputs=['!O[:]'], attribs={ 'axis': '!ensure_positive(axis, I[0].rank)', } ), ('Min', 'Max', 'Mean', 'Sum', 'Any', 'All'): Transform( type=('min_reduce', 'max_reduce', 'mean_reduce', 'sum_reduce', 'any_reduce', 'all_reduce'), using={ 'axes': '!ensure_list(transpose_axis_like(as_const(I[1]), I[0]))' }, inputs='!I[0]', outputs=( '!transpose_like(O[0], I[0]) if keep_dims else squeeze_output(O[0], axes)', ), attribs={ 'axes': '!axes', } ), ('Add', 'AddV2', 'Sub', 'Mul', 'RealDiv', 'Pow', 'LogicalAnd', 'LogicalOr', 'Less', 'Greater', 'LessEqual', 'GreaterEqual', 'Equal', 'NotEqual', 'Minimum', 'Maximum'): Transform( type=('add', 'add', 'sub', 'mul', 'div', 'pow', 'and', 'or', 'lt', 'gt', 'le', 'ge', 'eq', 'ne', 'min', 'max'), inputs=( '!convert_binarg(I[0], I[1])', '!convert_binarg(I[1], I[0])', ), outputs='!transpose_output(O[0]) if transposing(I[0]) or transposing(I[1]) else O[0]', ), ('Identity', 'Relu', 'Relu6', 'Elu', 'Selu', 'Gelu', 'Silu', 'Swish', 'Sigmoid', 'Softplus', 'Exp', 'Log', 'Sin', 'Cos', 'Tan', 'Asin', 'Acos', 'Atan', 'Sinh', 'Cosh', 'Tanh', 'Asinh', 'Acosh', 'Atanh', 'Neg', 'Reciprocal', 'Sign', 'Abs', 'Floor', 'Ceil', 'Round', 'Square', 'Sqrt', 'Rsqrt', 'LogicalNot'): Transform( type=('copy', 'relu', 'relu6', 'elu', 'selu', 'gelu', 'silu', 'silu', 'sigmoid', 'softplus', 'exp', 'log', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh', 'neg', 'rcp', 'sign', 'abs', 'floor', 'ceil', 'round', 'sqr', 'sqrt', 'rsqrt', 'not'), inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', ), 'LeakyRelu': Transform( type='leaky_relu', inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'alpha': '!alpha', } ), ('FusedBatchNorm', 'FusedBatchNormV3'): Transform( type='batch_normalization', inputs=( '!transpose_input(I[0], data_format)', '!unsqueeze_vector(I[3])', '!unsqueeze_vector(I[4])', '!unsqueeze_vector(I[2])', '!unsqueeze_vector(I[1])', ), outputs=( '!transpose_output(O[0], data_format)', ), attribs={ 'epsilon': '!epsilon', } ), 'BiasAdd': Transform( type='add', inputs=( '!transpose_input(I[0], data_format)', '!unsqueeze_vector(I[1])', ), outputs='!transpose_output(O[0], data_format)', ), 'Softmax': Transform( type='softmax', cond={ '!beta == 1 if _lite_ else True': 'beta must be 1', }, inputs='!transpose_input(I[0])', outputs='!transpose_output(O[0])', attribs={ 'axes': [1], } ), 'MatMul': Transform( type='matmul', inputs=('!I[0]', '!I[1]'), outputs='!O[0]', attribs={ 'transposeA': '!transpose_a', 'transposeB': '!transpose_b', }, ), 'ClipByValue': Transform( type='clamp', inputs=('!I[0]', '!I[1]', '!I[2]'), outputs='!transpose_like(O[0], I[0])', ), ('Pad', 'MirrorPad'): Transform( type='pad', cond={ '!mode in ["CONSTANT", "REFLECT", "SYMMETRIC"]': 'mode must be one of "CONSTANT", "REFLECT" or SYMMETRIC', }, defaults={ 'mode': 'CONSTANT', }, using={ 'paddings': '!transpose_list_like(as_const(I[1]), ref=I[0])', }, inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'padding': '![tuple(item) for item in paddings]', 'border': '!"reflect" if mode == "REFLECT" else "reflect-even" if mode == "SYMMETRIC" else "constant"', } ), 'Slice': Transform( type='slice', using={ 'beg': '!as_const(I[1])', 'end': '![0 if s == -1 else b + s for b, s in zip(as_const(I[1]), as_const(I[2]))]', }, inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'axes': '!list(range(I[0].rank))', 'begin': '!transpose_list_like(beg, ref=I[0])', 'end': '!transpose_list_like(end, ref=I[0])', } ), 'StridedSlice': Transform( type='slice', using=OrderedDict([ ('ref', '!I[0] if new_axis_mask == 0 else None'), ('rank', '!I[0].rank + bit_count(new_axis_mask)'), ('stride', '!as_const(I[3])'), ('ellipsis_index', '!int(math.log2(ellipsis_mask)) if ellipsis_mask != 0 else None'), ('ellipsis_count', '!rank - (len(stride) - 1) if ellipsis_mask != 0 else None'), ('beg', '!replace_item_with(as_const(I[1]), ellipsis_index, ellipsis_count, 0) ' 'if ellipsis_index is not None else as_const(I[1])'), ('end', '!replace_item_with(as_const(I[2]), ellipsis_index, ellipsis_count, 0) ' 'if ellipsis_index is not None else as_const(I[2])'), ('begin_mask', '!replace_bit_with(begin_mask, ellipsis_index, ellipsis_count, 1) ' 'if ellipsis_index is not None else begin_mask'), ('end_mask', '!replace_bit_with(end_mask, ellipsis_index, ellipsis_count, 1) ' 'if ellipsis_index is not None else end_mask'), ('new_axis_mask', '!replace_bit_with(new_axis_mask, ellipsis_index, ellipsis_count, 0) ' 'if ellipsis_index is not None else new_axis_mask'), ('shrink_axis_mask', '!replace_bit_with(shrink_axis_mask, ellipsis_index, ellipsis_count, 0) ' 'if ellipsis_index is not None else shrink_axis_mask'), ('masked_beg', '![beg_index(s) if is_bit_set(begin_mask,i) else b ' 'for i, (b, s) in enumerate(zip(beg,stride))]'), ('masked_end', '![end_index(s) if is_bit_set(end_mask,i) else b + 1 ' 'if is_bit_set(shrink_axis_mask,i) else e ' 'for i, (b, e, s) in enumerate(zip(beg,end,stride))]'), ('axes', '!transpose_axis_like([i for i in range(rank) ' 'if not (is_bit_set(begin_mask,i) and is_bit_set(end_mask,i)) ' 'and not is_bit_set(new_axis_mask,i)], ref, rank)'), ('new_axes', '!transpose_axis_like([i for i in range(rank) ' 'if is_bit_set(new_axis_mask,i)], ref, rank)'), ('del_axes', '!transpose_axis_like([i for i in range(rank) ' 'if is_bit_set(shrink_axis_mask,i)], ref, rank)'), ]), inputs='!unsqueeze_input(undo_transpose(I[0]), new_axes) if len(new_axes) else I[0]', outputs='!transpose_like(squeeze_output(O[0], del_axes) if len(del_axes) else O[0], ref)', attribs={ 'axes': '![i for i in range(rank) if i in axes]', 'begin': '![b for i, b in enumerate(transpose_list_like(masked_beg, ref)) if i in axes]', 'end': '![e for i, e in enumerate(transpose_list_like(masked_end, ref)) if i in axes]', 'stride': '![s for i, s in enumerate(transpose_list_like(stride, ref)) if i in axes]', } ), ('ArgMin', 'ArgMax'): Transform( type=('argmin_reduce', 'argmax_reduce'), using={ 'axis': '!transpose_axis_like(as_const(I[1]), ref=I[0])' }, inputs='!I[0]', outputs='!transpose_like(O[0], ref=I[0]) if _lite_ else squeeze_output(O[0], [axis])', attribs={ 'axes': '!ensure_list(axis)', } ), 'Select': Transform( type='select', inputs=( '!I[0]', '!convert_binarg(I[1], I[0])', '!convert_binarg(I[2], I[0])', ), outputs='!transpose_like(O[0], ref=I[0])', ), 'Tile': Transform( type='tile', inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'repeats': '!transpose_list_like(as_const(I[1]), I[0])', } ), 'ResizeNearestNeighbor': Transform( type='!"nearest_upsample" if upsample else "nearest_downsample"', using=OrderedDict([ ('old_size', '!I[0].shape[1:-1]'), ('new_size', '!as_const(I[1])'), ('upsample', '!is_integer_upsample(old_size, new_size)'), ('downsample', '!is_integer_downsample(old_size, new_size)'), ]), cond={ '!upsample or downsample': 'nearest resize must be integer up-sample or down-sample', '!not align_corners': 'align_corners is not supported', }, inputs='!transpose_input(I[0])', outputs='!transpose_output(O[0])', attribs={ 'factor': '!upsample_factor(old_size, new_size) if upsample else downsample_factor(old_size, new_size)', } ), 'ResizeArea': Transform( type='area_downsample', cond={ '!is_integer_downsample(I[0].shape[1:-1], O[0].shape[1:-1])': 'area resize must be integer down-sample', '!not align_corners': 'align_corners is not supported', }, using={ 'size': '!I[0].shape[1:-1]' }, inputs='!transpose_input(I[0])', outputs='!transpose_output(O[0])', attribs={ 'factor': '!downsample_factor(size, as_const(I[1]))', } ), 'ResizeBilinear': Transform( type='multilinear_upsample', cond={ '!is_integer_upsample(I[0].shape[1:-1], O[0].shape[1:-1])': 'bilinear resize must be integer up-sample', }, using={ 'size': '!I[0].shape[1:-1]' }, inputs='!transpose_input(I[0])', outputs='!transpose_output(O[0])', attribs={ 'factor': '!upsample_factor(size, as_const(I[1]))', 'method': '!"aligned" if align_corners else "symmetric" if half_pixel_centers else "asymmetric"', } ), 'LRN': Transform( type='local_response_normalization', using={ 'size': '!(radius if _lite_ else depth_radius) * 2 + 1' }, inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'size': '![1, size] + [1] * (I[0].rank - 2)', 'alpha': '!alpha * size', 'beta': '!beta', 'bias': '!bias', } ), 'Cast': Transform( using={ 'same_type': '!nnef_dtype(O[0].dtype) == nnef_dtype(I[0].dtype)', }, type='!"copy" if same_type else "cast"', inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'dtype': '!O[0].dtype if not same_type else None', }, ), ('Gather', 'GatherV2'): Transform( type='gather', using={ 'axis': '!transpose_axis_like(as_const(I[2]), ref=I[0])' }, inputs=('!I[0]', '!I[1]'), outputs='!transpose_like(O[0], I[0])', attribs={ 'axis': '!axis', }, ), 'AddN': Transform( type='add_n', inputs=['!I[:]'], outputs='!transpose_like(O[0], I[0])' ), }) ================================================ FILE: nnef_tools-pyproject/nnef_tools/conversion/tflite_to_nnef.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from .converter import Converter as _Converter, Transform, ConversionError from .tf_to_nnef import Converter as _TFConverter, _Transforms as _TFTransforms, _RELU6_FRAGMENT from ..model import Tensor, Operation from ..utils import types from ..io.tf.lite import CustomOptionsKey import numpy as np import copy _DETECTION_POSTPROCESS_FRAGMENT = """ fragment TFLite_Detection_PostProcess( boxes: tensor, scores: tensor, anchors: tensor, detections_per_class: integer, max_classes_per_detection: integer, max_detections: integer, nms_iou_threshold: scalar, nms_score_threshold: scalar, num_classes: integer, use_regular_nms: logical, h_scale: scalar, w_scale: scalar, x_scale: scalar, y_scale: scalar ) -> ( detection_boxes: tensor, detection_classes: tensor, detection_scores: tensor, num_detections: tensor ); """ class Converter(_TFConverter): _ConvOpTypes = ['CONV_1D', 'CONV_2D', 'CONV_3D', 'TRANSPOSE_CONV', 'DEPTHWISE_CONV_2D'] _ActivationOpTypes = { 'ELU': 'elu', 'RELU': 'relu', 'RELU6': 'relu6', 'LOGISTIC': 'sigmoid', 'TANH': 'tanh', } @staticmethod def defined_operations(): return { 'relu6': _RELU6_FRAGMENT, 'TFLite_Detection_PostProcess': _DETECTION_POSTPROCESS_FRAGMENT, } def __init__(self, io_transpose=False, custom_transforms=None, custom_functions=None, mirror_unsupported=False, keep_io_names=False): _Converter.__init__(self, transforms=self.merge_transforms(_Transforms, custom_transforms), functions=custom_functions, mirror_unsupported=mirror_unsupported) self._io_transpose = io_transpose self._keep_io_names = keep_io_names def __call__(self, graph): graph = _TFConverter.__call__(self, graph) self._fix_custom_options(graph) return graph def _global_attribs(self): return {'_lite_': True} def _prepare(self, graph): self._fix_quantization_attribs(graph) self._fix_quantized_dtypes(graph) self._insert_externals_and_constants(graph) self._transpose_externals(graph) def _is_constant(self, tensor): return tensor.producer is None and tensor.data is not None def _read_constant(self, tensor, type=None): if tensor.producer is None: return types.from_numpy(tensor.data, type=type) else: raise ConversionError('trying to evaluate non-constant tensor') def _transpose_externals(self, graph): for op in graph.operations: if op.type == 'external': if self.needs_io_transpose(op.output): shape = self.nxc_to_ncx(op.output.shape) op.attribs['shape'] = list(shape) self._transposes[op.output] = shape @staticmethod def _is_zero(value): return np.all(value == 0) if isinstance(value, np.ndarray) else value == 0 def _fix_quantized_dtypes(self, graph): for tensor in graph.tensors: if tensor.quant: scale = tensor.quant.get('scale') if scale is not None and not self._is_zero(scale): tensor.dtype = np.float32 else: tensor.quant = None def _fix_quantization_attribs(self, graph): dtype_bits = { np.int8: 8, np.uint8: 8, np.int16: 16, np.uint16: 16, np.int32: 32, np.uint32: 32, np.int64: 64, np.uint64: 64, } for tensor in graph.tensors: if tensor.quant: scale = tensor.quant.get('scale') zero_point = tensor.quant.get('zero_point') if scale is not None and not self._is_zero(scale): if 'min' in tensor.quant: del tensor.quant['min'] if 'max' in tensor.quant: del tensor.quant['max'] assert tensor.dtype == np.uint8 or tensor.dtype == np.int8 or \ tensor.dtype == np.uint16 or tensor.dtype == np.int16 or \ tensor.dtype == np.uint32 or tensor.dtype == np.int32, \ "unknown quantized dtype '{}'".format(tensor.dtype) tensor.quant['op-name'] = 'zero_point_linear_quantize' tensor.quant['bits'] = 32 if self._is_conv_bias(tensor) else dtype_bits[tensor.dtype] tensor.quant['signed'] = tensor.dtype == np.int8 or tensor.dtype == np.int16 or tensor.dtype == np.int32 tensor.quant['symmetric'] = self._is_conv_filter(tensor) if tensor.data is None: if isinstance(zero_point, np.ndarray) and len(zero_point.shape) == 1: tensor.quant['zero_point'] = np.expand_dims(zero_point, axis=0) if isinstance(scale, np.ndarray) and len(scale.shape) == 1: tensor.quant['scale'] = np.expand_dims(scale, axis=0) def _fix_custom_options(self, graph): for op in graph.operations: if op.custom: options = op.attribs.get(CustomOptionsKey) if options is not None: op.attribs[CustomOptionsKey] = options.hex() def _is_conv_filter(self, tensor): tensor = self._tensor_map.get(tensor) return tensor and len(tensor.consumers) > 0 and \ all(op.type in Converter._ConvOpTypes and op.inputs[1] is tensor for op in tensor.consumers) def _is_conv_bias(self, tensor): tensor = self._tensor_map.get(tensor) return tensor and len(tensor.consumers) > 0 and \ all(op.type in Converter._ConvOpTypes and op.inputs[2] is tensor for op in tensor.consumers) def activation(self, output, func): if func is None or func == 'NONE': return output if func not in self._ActivationOpTypes: raise ConversionError("Unsupported fused activation function '{}'".format(func)) input = Tensor(output.graph, dtype=output.dtype, shape=self._working_shape(output), quant=copy.deepcopy(output.quant)) Operation(output.graph, type=self._ActivationOpTypes[func], inputs=input, outputs=output) return input def flat_list(self, array): return [item for items in array for item in items] if len(array) and isinstance(array[0], (list, tuple)) else array def flatten(self, input): shape = (input.shape[0], int(np.prod(input.shape[1:]))) output = Tensor(input.graph, dtype=input.dtype, shape=shape, quant=copy.deepcopy(input.quant)) self._reshape_operation(input, output, shape) return output def same_shape(self, input, output): return self._tensor_map[input].shape == self._tensor_map[output].shape _Transforms = Converter.unpack_transforms({ ('CONV_1D', 'CONV_2D', 'CONV_3D', 'DEPTHWISE_CONV_2D'): Transform( type='conv', using={ 'depthwise': '!_type_ == "DEPTHWISE_CONV_2D"', }, inputs=( '!transpose_input(I[0])', '!transpose_filter(I[1], format="NXC" if not depthwise else "CXN")', '!unsqueeze_vector(I[2])', ), outputs='!activation(transpose_output(O[0]), fused_activation_function)', attribs={ 'stride': '![stride_h, stride_w]', 'dilation': '![dilation_h_factor, dilation_w_factor]', 'padding': '!convert_padding(padding, I[0].rank - 2)', 'groups': '!1 if not depthwise else 0', } ), 'TRANSPOSE_CONV': Transform( type='deconv', using={ 'depthwise': False, }, inputs=( '!transpose_input(I[2])', '!transpose_filter(I[1], format="CXN" if not depthwise else "NXC")', ), outputs='!transpose_output(O[0])', attribs={ 'stride': '![stride_h, stride_w]', 'padding': '!convert_padding(padding, I[0].rank - 2)', 'output_shape': '!nxc_to_ncx(as_const(I[0]))', 'groups': '!1 if not depthwise else 0', } ), ('MAX_POOL_2D', 'AVERAGE_POOL_2D'): Transform( type=('max_pool', 'avg_pool'), inputs=( '!transpose_input(I[0])', ), outputs=( '!transpose_output(O[0])', ), attribs={ 'size': '![1, 1, filter_height, filter_width]', 'stride': '![1, 1, stride_h, stride_w]', 'padding': '!convert_padding(padding, I[0].rank)', 'border': '!"ignore"', } ), 'RESHAPE': Transform( type='reshape', inputs='!undo_transpose(I[0])', outputs='!O[0]', attribs={ 'shape': '!flexible_batch(flat_list(as_const(I[1])) if len(I) > 1 else new_shape, I[0].shape[0])', 'dtype': '!I[0].dtype', } ), 'CONCATENATION': Transform( type='concat', inputs=['!I[:]'], outputs='!activation(transpose_like(O[0], I[0]), fused_activation_function)', attribs={ 'axis': '!transpose_axis_like(axis, I[0], O[0].rank)', } ), 'FULLY_CONNECTED': Transform( type='linear', cond={ '!weights_format == "DEFAULT"': 'wights_format must be "DEFAULT"', }, inputs=( '!I[0] if keep_num_dims else flatten(I[0])', '!I[1]', '!unsqueeze_vector(I[2]) if len(I) > 2 else None', ), outputs='!activation(O[0], fused_activation_function)', ), 'BATCH_MATMUL': Transform( type='matmul', cond={ '!asymmetric_quantize_inputs == False': 'asymmetric_quantize_inputs must be False', }, inputs=('!I[0]', '!I[1]'), outputs='!O[0]', attribs={ 'transposeA': '!adj_x', 'transposeB': '!adj_y', }, ), 'L2_NORMALIZATION': Transform( type='l2_normalization', inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'axes': '!list(range(I[0].rank))', } ), 'PRELU': Transform( type='prelu', inputs=('!I[0]', '!I[1]'), outputs='!transpose_like(O[0], I[0])', ), ('PAD', 'MIRROR_PAD'): Transform( type='pad', cond={ '!mode < 2': 'mode must be 0 or 1', }, defaults={ 'mode': None, }, using={ 'paddings': '!transpose_list_like(as_const(I[1]), ref=I[0])', }, inputs='!I[0]', outputs='!transpose_like(O[0], I[0])', attribs={ 'padding': '![tuple(item) for item in paddings]', 'border': '!"reflect" if mode == 0 else "reflect-even" if mode == 1 else "constant"', } ), 'GATHER': Transform( type='gather', inputs=('!I[0]', '!I[1]'), outputs='!transpose_like(O[0], I[0])', attribs={ 'axis': '!transpose_axis_like(axis, I[0])', }, ), 'IDENTITY': _TFTransforms['Identity'], 'QUANTIZE': _TFTransforms['Identity'], 'TRANSPOSE': _TFTransforms['Transpose'], 'SPLIT': _TFTransforms['Split'], 'SPLIT_V': _TFTransforms['SplitV'], 'PACK': _TFTransforms['Pack'], 'UNPACK': _TFTransforms['Unpack'], 'TILE': _TFTransforms['Tile'], 'SQUEEZE': _TFTransforms['Squeeze'], 'EXPAND_DIMS': _TFTransforms['ExpandDims'], 'SLICE': _TFTransforms['Slice'], 'STRIDED_SLICE': _TFTransforms['StridedSlice'], 'RELU': _TFTransforms['Relu'], 'RELU6': _TFTransforms['Relu6'], 'ELU': _TFTransforms['Elu'], 'LEAKY_RELU': _TFTransforms['LeakyRelu'], 'LOGISTIC': _TFTransforms['Sigmoid'], 'SIN': _TFTransforms['Sin'], 'COS': _TFTransforms['Cos'], 'TAN': _TFTransforms['Tan'], 'ASIN': _TFTransforms['Asin'], 'ACOS': _TFTransforms['Acos'], 'ATAN': _TFTransforms['Atan'], 'SINH': _TFTransforms['Sinh'], 'COSH': _TFTransforms['Cosh'], 'TANH': _TFTransforms['Tanh'], 'ASINH': _TFTransforms['Asinh'], 'ACOSH': _TFTransforms['Acosh'], 'ATANH': _TFTransforms['Atanh'], 'EXP': _TFTransforms['Exp'], 'LOG': _TFTransforms['Log'], 'ABS': _TFTransforms['Abs'], 'NEG': _TFTransforms['Neg'], 'LOGICAL_NOT': _TFTransforms['LogicalNot'], 'FLOOR': _TFTransforms['Floor'], 'CEIL': _TFTransforms['Ceil'], 'ROUND': _TFTransforms['Round'], 'SQUARE': _TFTransforms['Square'], 'SQRT': _TFTransforms['Sqrt'], 'RSQRT': _TFTransforms['Rsqrt'], 'ADD': _TFTransforms['Add'], 'SUB': _TFTransforms['Sub'], 'MUL': _TFTransforms['Mul'], 'DIV': _TFTransforms['RealDiv'], 'POW': _TFTransforms['Pow'], 'MINIMUM': _TFTransforms['Minimum'], 'MAXIMUM': _TFTransforms['Maximum'], 'LOGICAL_AND': _TFTransforms['LogicalAnd'], 'LOGICAL_OR': _TFTransforms['LogicalOr'], 'LESS': _TFTransforms['Less'], 'LESS_EQUAL': _TFTransforms['LessEqual'], 'GREATER': _TFTransforms['Greater'], 'GREATER_EQUAL': _TFTransforms['GreaterEqual'], 'EQUAL': _TFTransforms['Equal'], 'NOT_EQUAL': _TFTransforms['NotEqual'], 'SELECT': _TFTransforms['Select'], 'REDUCE_MIN': _TFTransforms['Min'], 'REDUCE_MAX': _TFTransforms['Max'], 'MEAN': _TFTransforms['Mean'], 'SUM': _TFTransforms['Sum'], 'REDUCE_ANY': _TFTransforms['Any'], 'REDUCE_ALL': _TFTransforms['All'], 'ARG_MIN': _TFTransforms['ArgMin'], 'ARG_MAX': _TFTransforms['ArgMax'], 'SOFTMAX': _TFTransforms['Softmax'], 'LOCAL_RESPONSE_NORMALIZATION': _TFTransforms['LRN'], 'RESIZE_NEAREST_NEIGHBOR': _TFTransforms['ResizeNearestNeighbor'], 'RESIZE_BILINEAR': _TFTransforms['ResizeBilinear'], 'ADD_N': _TFTransforms['AddN'], 'CAST': _TFTransforms['Cast'], }) ================================================ FILE: nnef_tools-pyproject/nnef_tools/convert.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .conversion import * from .model import utils import numpy as np import importlib import argparse import json import six def get_reader(input_format, decomposed, fold_constants, custom_shapes): if input_format == 'tf': from .io.tf.graphdef import Reader return Reader(fold_constants=fold_constants) elif input_format == 'tflite': from .io.tf.lite import Reader return Reader() elif input_format == 'nnef': from .io.nnef import Reader return Reader(decomposed=decomposed, custom_shapes=custom_shapes) elif input_format == 'onnx': from .io.onnx import Reader return Reader(simplify=fold_constants) elif input_format == 'caffe2': from .io.caffe2 import Reader return Reader() elif input_format == 'caffe': from .io.caffe2 import Reader return Reader(legacy=True) else: return None def get_writer(output_format, fragments, fragment_dependencies, generate_fragments, annotate_shapes, compression): if output_format == 'tf': from .io.tf.graphdef import Writer return Writer() elif output_format == 'tflite': from .io.tf.lite import Writer return Writer() elif output_format == 'nnef': from .io.nnef import Writer return Writer(fragments=fragments, fragment_dependencies=fragment_dependencies, generate_custom_fragments=generate_fragments, annotate_shapes=annotate_shapes, compression=compression) elif output_format == 'onnx': from .io.onnx import Writer return Writer() elif output_format == 'caffe2': from .io.caffe2 import Writer return Writer() else: return None def get_converter(input_format, output_format, io_transpose, custom_transforms, custom_functions, custom_shapes, mirror_unsupported, keep_io_names): if input_format == 'tf' and output_format == 'nnef': from .conversion.tf_to_nnef import Converter return Converter(io_transpose=io_transpose, custom_transforms=custom_transforms, custom_functions=custom_functions, mirror_unsupported=mirror_unsupported, keep_io_names=keep_io_names) elif input_format == 'nnef' and output_format == 'tf': from .conversion.nnef_to_tf import Converter return Converter(io_transpose=io_transpose, custom_transforms=custom_transforms, custom_functions=custom_functions, mirror_unsupported=mirror_unsupported) elif input_format == 'tflite' and output_format == 'nnef': from .conversion.tflite_to_nnef import Converter return Converter(io_transpose=io_transpose, custom_transforms=custom_transforms, custom_functions=custom_functions, mirror_unsupported=mirror_unsupported, keep_io_names=keep_io_names) elif input_format == 'nnef' and output_format == 'tflite': from .conversion.nnef_to_tflite import Converter return Converter(io_transpose=io_transpose, custom_transforms=custom_transforms, custom_functions=custom_functions, mirror_unsupported=mirror_unsupported) elif (input_format == 'onnx' or input_format == 'caffe2' or input_format == 'caffe') and output_format == 'nnef': from .conversion.onnx_to_nnef import Converter return Converter(custom_transforms=custom_transforms, custom_functions=custom_functions, custom_shapes=custom_shapes, infer_shapes=bool(custom_shapes), mirror_unsupported=mirror_unsupported, keep_io_names=keep_io_names, io_transpose=io_transpose) elif input_format == 'nnef' and (output_format == 'onnx' or output_format == 'caffe2'): from .conversion.nnef_to_onnx import Converter return Converter(custom_transforms=custom_transforms, custom_functions=custom_functions, mirror_unsupported=mirror_unsupported) else: return None def get_optimizer(format, custom_optimizers=None, dequantize=False): if format == 'nnef': from .optimization.nnef_optimizer import Optimizer return Optimizer(custom_optimizers=custom_optimizers, dequantize=dequantize) elif format == 'tf': from .optimization.tf_optimizer import Optimizer return Optimizer(custom_optimizers=custom_optimizers) elif format == 'tflite': from .optimization.tflite_optimizer import Optimizer return Optimizer(custom_optimizers=custom_optimizers) elif format == 'onnx': from .optimization.onnx_optimizer import Optimizer return Optimizer(custom_optimizers=custom_optimizers) else: return None def get_custom_converters(module_names): CUSTOM_TRANSFORMS = "CUSTOM_TRANSFORMS" transforms = {} functions = {} for module_name in module_names: module = importlib.import_module(module_name) if hasattr(module, CUSTOM_TRANSFORMS): transforms.update(getattr(module, CUSTOM_TRANSFORMS)) functions.update(Converter.find_public_functions(module)) return transforms, functions def get_custom_shapes(module_names): CUSTOM_SHAPES = "CUSTOM_SHAPES" shapes = {} for module_name in module_names: module = importlib.import_module(module_name) if hasattr(module, CUSTOM_SHAPES): shapes.update(getattr(module, CUSTOM_SHAPES)) return shapes def get_custom_fragments(module_names): CUSTOM_FRAGMENTS = "CUSTOM_FRAGMENTS" fragments = {} for module_name in module_names: module = importlib.import_module(module_name) if hasattr(module, CUSTOM_FRAGMENTS): fragments.update(getattr(module, CUSTOM_FRAGMENTS)) return fragments def get_custom_optimizers(module_names): CUSTOM_OPTIMIZERS = "CUSTOM_OPTIMIZERS" optimizers = {} for module_name in module_names: module = importlib.import_module(module_name) if hasattr(module, CUSTOM_OPTIMIZERS): optimizers.update(getattr(module, CUSTOM_OPTIMIZERS)) return optimizers def needs_conversion(input_format, output_format): if input_format == 'caffe2' and output_format == 'onnx': return False elif input_format == 'onnx' and output_format == 'caffe2': return False elif input_format == 'caffe' and (output_format == 'onnx' or output_format == 'caffe2'): return False else: return input_format != output_format def check_nan_or_inf(graph, which): valid = True for tensor in graph.tensors: if tensor.data is not None: if np.any(np.isnan(tensor.data)): print("{} graph contains nan in tensor '{}'".format(which, tensor.name)) valid = False if np.any(np.isinf(tensor.data)): print("{} graph contains inf in tensor '{}'".format(which, tensor.name)) valid = False for op in graph.operations: for key, value in six.iteritems(op.attribs): if isinstance(value, np.ndarray) and np.issubdtype(value.dtype.type, np.floating): if np.any(np.isnan(value)): print("{} graph contains nan in attribute '{}' of operator '{}'".format(which, key, op.type) + " named '{}'".format(op.name) if op.name is not None else "") valid = False if np.any(np.isinf(value)): print("{} graph contains inf in attribute '{}' of operator '{}'".format(which, key, op.type) + " named '{}'".format(op.name) if op.name is not None else "") valid = False return valid def main(args): io_transpose = False if args.io_transpose is None else True if len(args.io_transpose) == 0 else args.io_transpose custom_transforms, custom_functions = get_custom_converters(args.custom_converters) \ if args.custom_converters is not None else (None, None) custom_shapes = get_custom_shapes(args.custom_shapes) or {} if args.custom_shapes is not None else {} converter = None if needs_conversion(args.input_format, args.output_format): converter = get_converter(args.input_format, args.output_format, io_transpose, custom_transforms, custom_functions, custom_shapes, args.mirror_unsupported, args.keep_io_names) if converter is None: print("Unsupported conversion: {} to {}".format(args.input_format, args.output_format)) return -1 decomposed = converter.decomposed_operations() if converter else [] fragments = converter.defined_operations() if converter else {} dependencies = converter.defined_operation_dependencies() if converter else {} if args.decompose is not None: decomposed += args.decompose if converter is not None: custom_shapes.update(converter.defined_shapes()) if args.custom_fragments is not None: fragments.update(get_custom_fragments(args.custom_fragments)) reader = get_reader(args.input_format, decomposed=decomposed, fold_constants=args.fold_constants, custom_shapes=custom_shapes) if reader is None: print("Unsupported input-format: {}".format(args.input_format)) return -1 writer = get_writer(args.output_format, fragments=fragments, fragment_dependencies=dependencies, generate_fragments=args.generate_custom_fragments, annotate_shapes=args.annotate_shapes, compression=args.compress) if writer is None: print("Unsupported output-format: {}".format(args.output_format)) return -1 default_output_model = args.input_model + '.' + (args.output_format if args.output_format != 'tf' else 'pb') reader_kwargs = {} if args.input_shapes is not None: input_shapes = eval(args.input_shapes) if not isinstance(input_shapes, dict) or not all(isinstance(name, str) and isinstance(shape, tuple) for name, shape in six.iteritems(input_shapes)): print("'Input-shape' must be a dict of strings to tuples") return -1 reader_kwargs['input_shapes'] = input_shapes try: graph = reader(args.input_model, **reader_kwargs) if not check_nan_or_inf(graph, 'Input'): return -1 if args.input_names is not None or args.output_names is not None: not_found_names = [] if args.input_names is not None: input_names = set(args.input_names) inputs = [tensor for tensor in graph.tensors if tensor.name in input_names] if len(inputs) != len(input_names): found_names = [tensor.name for tensor in inputs] not_found_names.append([input_name for input_name in input_names if input_name not in found_names]) else: graph.inputs = inputs if args.output_names is not None: output_names = set(args.output_names) outputs = [tensor for tensor in graph.tensors if tensor.name in output_names] if len(outputs) != len(output_names): found_names = [tensor.name for tensor in outputs] not_found_names.append([output_name for output_name in output_names if output_name not in found_names]) else: graph.outputs = outputs if len(not_found_names) > 0: print("Could not find tensor(s) in graph: {}".format(not_found_names)) return -1 utils.remove_unreachable(graph) optimizer = get_optimizer(args.input_format) if optimizer: graph = optimizer(graph, only_required=True) if not check_nan_or_inf(graph, 'Optimized input'): return -1 if args.static_only: if not utils.remove_dynamic(graph): print("Conversion is called with --static-only but model contains dynamic inputs, " "which would result in an empty model") return -1 utils.remove_unreachable(graph) if converter: graph.sort() graph = converter(graph) if not check_nan_or_inf(graph, 'Converted'): return -1 tensor_mapping = converter.tensor_mapping() if args.tensor_mapping is not None and converter else None if args.optimize: custom_optimizers = get_custom_optimizers(args.custom_optimizers) if args.custom_optimizers is not None else None optimizer = get_optimizer(args.output_format, custom_optimizers=custom_optimizers, dequantize=args.dequantize) if optimizer: tensor_lookup = {tensor.name: tensor for tensor in graph.tensors if tensor.name is not None} \ if args.tensor_mapping is not None else None graph = optimizer(graph) if not check_nan_or_inf(graph, 'Optimized output'): return -1 if args.tensor_mapping is not None: if converter: tensor_mapping = {src: tensor_lookup[dst].name for src, dst in six.iteritems(tensor_mapping) if tensor_lookup[dst].graph is graph} else: tensor_mapping = {name: tensor.name for name, tensor in six.iteritems(tensor_lookup) if tensor.graph is graph} writer(graph, args.output_model or default_output_model) print("Written '{}'".format(args.output_model or default_output_model)) if args.tensor_mapping is not None: with open(args.tensor_mapping, 'w') as file: json.dump(tensor_mapping, file, indent=4) print("Written '{}'".format(args.tensor_mapping)) return 0 except IOError as e: print(e) return -1 except ConversionError as e: print(e) if e.details: for detail in e.details: print(detail) return -1 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input-model', type=str, required=True, help='The input model') parser.add_argument('--output-model', type=str, default=None, help='The output model') parser.add_argument('--input-format', type=str, required=True, choices=['tf', 'tflite', 'onnx', 'nnef', 'caffe2', 'caffe'], help='The format of the input model') parser.add_argument('--output-format', type=str, required=True, choices=['tf', 'tflite', 'onnx', 'nnef', 'caffe2'], help='The format of the output model') parser.add_argument('--input-shapes', type=str, default=None, help='The (dict of) shape(s) to use for input(s).') parser.add_argument('--io-transpose', type=str, nargs='*', default=None, help='The inputs/outputs to transpose') parser.add_argument('--fold-constants', action='store_true', help='Enable folding of constant ops') parser.add_argument('--optimize', action='store_true', help='Turn on optimization of resulting NNEF model') parser.add_argument('--dequantize', action='store_true', help='Dequantize the weights of a quantized network and omit quantization parameters') parser.add_argument('--custom-converters', type=str, nargs='+', help='Module(s) containing custom converter code') parser.add_argument('--custom-shapes', type=str, nargs='+', help='Module(s) containing custom shape inference code (when converting to/from NNEF)') parser.add_argument('--custom-fragments', type=str, nargs='+', help='Module(s) containing custom fragment code (when converting to NNEF)') parser.add_argument('--custom-optimizers', type=str, nargs='+', help='Module(s) containing custom optimizer code (when converting to NNEF)') parser.add_argument('--mirror-unsupported', action='store_true', help='Enable mirror-conversion of unsupported operations') parser.add_argument('--generate-custom-fragments', action='store_true', help='Enable automatic generation of fragments for custom operations') parser.add_argument('--keep-io-names', action='store_true', help='Keep the names of model inputs/outputs if possible') parser.add_argument('--decompose', type=str, nargs='*', default=None, help='Names of operators to be decomposed by NNEF parser') parser.add_argument('--input-names', type=str, nargs='+', help='Names of input tensor where the graph is cut before conversion') parser.add_argument('--output-names', type=str, nargs='+', help='Names of output tensor where the graph is cut before conversion') parser.add_argument('--static-only', action='store_true', help='Only convert static part of the graph, for which tensor shapes are known') parser.add_argument('--tensor-mapping', type=str, nargs='?', default=None, const='tensor_mapping.json', help='Export mapping of tensor names from input to output model') parser.add_argument('--annotate-shapes', action='store_true', help='Add tensor shapes as comments to NNEF output model') parser.add_argument('--compress', type=int, nargs='?', default=None, const=1, help='Compress output NNEF folder at the given compression level') exit(main(parser.parse_args())) ================================================ FILE: nnef_tools-pyproject/nnef_tools/execute.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .utils import stdio from .interpreter import Statistics from collections import namedtuple import importlib import argparse import numpy as np import json import nnef import six import sys import os _onnx_dtype_to_numpy = { "tensor(float)": np.float32, "tensor(double)": np.float64, "tensor(int8)": np.int8, "tensor(int16)": np.int16, "tensor(int32)": np.int32, "tensor(int64)": np.int64, "tensor(uint8)": np.uint8, "tensor(uint16)": np.uint16, "tensor(uint32)": np.uint32, "tensor(uint64)": np.uint64, "tensor(bool)": np.bool_, } _nnef_dtype_to_numpy = { 'scalar': np.float32, 'integer': np.int32, 'logical': np.bool_, } _numpy_dtype_remap = { np.short: np.int64, np.longlong: np.int64, np.ushort: np.uint64, np.uint: np.uint64, np.ulonglong: np.uint64, np.double: np.float64, np.longdouble: np.float64, } def _is_lambda(v): LAMBDA = lambda: 0 return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__ def uniform(min=0, max=1): return lambda shape: np.random.uniform(min, max, shape) def normal(mean=0, std=1): return lambda shape: np.random.normal(mean, std, shape) def needs_transpose(io_transpose, name): return io_transpose is not None and (len(io_transpose) == 0 or name in io_transpose) def transpose_channels_last_to_first(x): rank = len(x.shape) return np.transpose(x, axes=[0, rank - 1] + list(range(1, rank - 1))) def transpose_channels_first_to_last(x): rank = len(x.shape) return np.transpose(x, axes=[0] + list(range(2, rank)) + [1]) def read_input(file, name, shape, dtype, transpose): data = nnef.read_tensor(file) any_batch = shape[0] == 0 offset = int(any_batch) if tuple(data.shape[offset:]) != tuple(shape[offset:]): raise ValueError("Mismatch between declared and read shape for input '{}'; {} vs {}" .format(name, data.shape, shape)) if data.dtype != dtype: raise ValueError("Mismatch between declared and read dtype for input '{}'; {} vs {}" .format(name, data.dtype, dtype)) return transpose_channels_first_to_last(data) if transpose else data def compute_statistics(array): if array.size == 0: return Statistics(num=0, min=0.0, max=0.0, sum=0.0, ssum=0.0) return Statistics( num=array.size, min=float(np.min(array)), max=float(np.max(array)), sum=float(np.sum(array)), ssum=float(np.sum(array * array)), ) class RandomInputSource: def __init__(self, distribution): self._distribution = distribution def __call__(self, name, shape, dtype): return self._distribution(shape).astype(dtype) class StreamInputSource: def __init__(self, stream, io_transpose): self._stream = stream self._io_transpose = io_transpose def __call__(self, name, shape, dtype): return read_input(self._stream, name, shape, dtype, transpose=needs_transpose(self._io_transpose, name)) class FileInputSource: def __init__(self, folder, io_transpose): self._folder = folder self._io_transpose = io_transpose def __call__(self, name, shape, dtype): with open(os.path.join(self._folder, name + '.dat')) as file: return read_input(file, name, shape, dtype, transpose=needs_transpose(self._io_transpose, name)) TensorInfo = namedtuple('TensorInfo', ['name', 'shape', 'dtype']) class Executor: def input_info(self): raise NotImplementedError() def output_info(self): raise NotImplementedError() def tensor_info(self): raise NotImplementedError() def __call__(self, inputs, output_names=None, collect_statistics=False): raise NotImplementedError() class TFExecutor(Executor): def __init__(self, model_path): try: import tensorflow.compat.v1 as tf except ImportError: import tensorflow as tf from .io.tf.graphdef.protobuf import GraphDef self.Session = tf.Session graph_def = GraphDef() with open(model_path, 'rb') as file: graph_def.ParseFromString(file.read()) self.graph = tf.Graph() with self.graph.as_default(): tf.import_graph_def(graph_def, name='') ops = self.graph.get_operations() consumed = {tensor for op in ops for tensor in op.inputs} self.inputs = [op.outputs[0] for op in ops if op.type == 'Placeholder'] self.outputs = [tensor for op in ops if len(op.inputs) for tensor in op.outputs if tensor not in consumed and tensor.name.endswith(':0')] def input_info(self): return [TensorInfo(tensor.name, tuple(tensor.shape.as_list()), tensor.dtype.as_numpy_dtype) for tensor in self.inputs] def output_info(self): return [TensorInfo(tensor.name, tuple(tensor.shape.as_list()), tensor.dtype.as_numpy_dtype) for tensor in self.outputs] def tensor_info(self): tensors = [tensor for op in self.graph.get_operations() for tensor in op.outputs] return [TensorInfo(tensor.name, tuple(tensor.shape.as_list()), tensor.dtype.as_numpy_dtype) for tensor in tensors] def __call__(self, inputs, output_names=None, collect_statistics=False): ops = self.graph.get_operations() if output_names is not None: tensor_names = {tensor.name for op in ops for tensor in op.outputs} invalid = {name for name in output_names if name not in tensor_names} if len(invalid): raise ValueError('Invalid tensor name(s): {}'.format(invalid)) outputs = {tensor.name: tensor for op in ops for tensor in op.outputs if tensor.name in output_names} else: outputs = {tensor.name: tensor for tensor in self.outputs} if collect_statistics: tensors = {tensor.name: tensor for op in ops for tensor in op.outputs if tensor.name.endswith(':0')} with self.Session(graph=self.graph) as sess: values = sess.run(tensors, feed_dict=inputs) outputs = {name: values[name] for name in outputs} stats = {} for name, array in six.iteritems(values): stats[name] = compute_statistics(array) return outputs, stats else: with self.Session(graph=self.graph) as sess: outputs = sess.run(outputs, feed_dict=inputs) return outputs, None class TFLiteExecutor(Executor): def __init__(self, model_path): try: import tensorflow.compat.v1 as tf except ImportError: import tensorflow as tf self.interpreter = tf.lite.Interpreter(model_path=model_path) self.interpreter.allocate_tensors() def input_info(self): return [TensorInfo(tensor['name'], tensor['shape'], tensor['dtype']) for tensor in self.interpreter.get_input_details()] def output_info(self): return [TensorInfo(tensor['name'], tensor['shape'], tensor['dtype']) for tensor in self.interpreter.get_output_details()] def tensor_info(self): return [TensorInfo(tensor['name'], tensor['shape'], tensor['dtype']) for tensor in self.interpreter.get_tensor_details()] def __call__(self, inputs, output_names=None, collect_statistics=False): for tensor in self.interpreter.get_input_details(): self.interpreter.set_tensor(tensor['index'], inputs[tensor['name']]) self.interpreter.invoke() if output_names is not None: tensor_names = {tensor['name'] for tensor in self.interpreter.get_tensor_details()} invalid = {name for name in output_names if name not in tensor_names} if len(invalid): raise ValueError('Invalid tensor name(s): {}'.format(invalid)) outputs = {tensor['name']: self.interpreter.get_tensor(tensor['index']) for tensor in self.interpreter.get_tensor_details() if tensor['name'] in output_names} else: outputs = {tensor['name']: self.interpreter.get_tensor(tensor['index']) for tensor in self.interpreter.get_output_details()} stats = {tensor['name']: compute_statistics(self.interpreter.get_tensor(tensor['index'])) for tensor in self.interpreter.get_tensor_details()} if collect_statistics else None return outputs, stats class ONNXExecutor(Executor): def __init__(self, model_path, require_intermediates=False): import onnxruntime options = onnxruntime.SessionOptions() options.inter_op_num_threads = 1 options.intra_op_num_threads = 1 options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL self.session = onnxruntime.InferenceSession(model_path, sess_options=options, providers=['CPUExecutionProvider']) self.inputs = [TensorInfo(tensor.name, tensor.shape, _onnx_dtype_to_numpy[tensor.type]) for tensor in self.session.get_inputs()] self.outputs = [TensorInfo(tensor.name, tensor.shape, _onnx_dtype_to_numpy[tensor.type]) for tensor in self.session.get_outputs()] if require_intermediates: import onnx from onnx.shape_inference import infer_shapes model = onnx.load_model(model_path) model = infer_shapes(model) for info in model.graph.value_info: output_info = model.graph.output.add() output_info.ParseFromString(info.SerializeToString()) self.session = onnxruntime.InferenceSession(model.SerializeToString(), sess_options=options, providers=['CPUExecutionProvider']) def input_info(self): return self.inputs def output_info(self): return self.outputs def tensor_info(self): return None def __call__(self, inputs, output_names=None, collect_statistics=False): if output_names is not None: inputs_as_outputs = {name: inputs[name] for name in output_names if name in inputs} output_names = [name for name in output_names if name not in inputs] else: output_names = [output.name for output in self.outputs] inputs_as_outputs = {} if collect_statistics: original_outputs = [output.name for output in self.outputs] fetch_names = [tensor.name for tensor in self.session.get_outputs() if tensor.name not in original_outputs] + original_outputs values = self.session.run(fetch_names, inputs) outputs = {name: value for name, value in zip(fetch_names, values) if name in set(output_names)} stats = {} for name, value in zip(fetch_names, values): stats[name] = compute_statistics(value) else: values = self.session.run(output_names, inputs) outputs = {name: value for name, value in zip(output_names, values)} stats = None outputs.update(inputs_as_outputs) return outputs, stats class NNEFExecutor(Executor): def __init__(self, model_path, custom_operators, decomposed): from .interpreter.pytorch import Interpreter self.interpreter = Interpreter(model_path, custom_operators=custom_operators, decomposed=decomposed) def input_info(self): return [TensorInfo(tensor.name, tensor.shape, _nnef_dtype_to_numpy[tensor.dtype]) for tensor in self.interpreter.input_details()] def output_info(self): return [TensorInfo(tensor.name, tensor.shape, _nnef_dtype_to_numpy[tensor.dtype]) for tensor in self.interpreter.output_details()] def tensor_info(self): return [TensorInfo(tensor.name, tensor.shape, _nnef_dtype_to_numpy[tensor.dtype]) for tensor in self.interpreter.tensor_details()] def __call__(self, inputs, output_names=None, collect_statistics=False): inputs = [inputs[tensor.name] for tensor in self.interpreter.input_details()] if collect_statistics: return self.interpreter(inputs, output_names, collect_statistics) else: return self.interpreter(inputs, output_names, collect_statistics), None def get_executor(format, model_path, require_intermediates, custom_operators, decomposed): if format == 'tf': return TFExecutor(model_path) elif format == 'tflite': return TFLiteExecutor(model_path) elif format == 'onnx': return ONNXExecutor(model_path, require_intermediates) elif format == 'nnef': return NNEFExecutor(model_path, custom_operators, decomposed) else: return None def write_nnef_tensor(filename, value): with open(filename, 'wb') as file: dtype = _numpy_dtype_remap.get(value.dtype.type) if dtype is not None: value = value.astype(dtype) nnef.write_tensor(file, value) def write_statistics(filename, statistics): statistics = {name: {'min': stats.min, 'max': stats.max, 'mean': stats.mean(), 'std': stats.std()} for name, stats in six.iteritems(statistics)} with open(filename, 'w') as file: json.dump(statistics, file, indent=4) def get_custom_operators(module_names): CUSTOM_OPERATORS = "CUSTOM_OPERATORS" operators = {} for module_name in module_names: module = importlib.import_module(module_name) if hasattr(module, CUSTOM_OPERATORS): operators.update(getattr(module, CUSTOM_OPERATORS)) return operators def batched_info(tensor_info, batch_size): for info in tensor_info: if info.shape[0] != batch_size and info.shape[0] != 1 and not isinstance(info.shape[0], str): raise ValueError('invalid input shape {} for batch size {}'.format(info.shape, batch_size)) return [TensorInfo(name=info.name, shape=(batch_size, *info.shape[1:]), dtype=info.dtype) for info in tensor_info] def accumulate_statistics(global_stats, local_stats): if global_stats is None: return local_stats for name, stats in six.iteritems(local_stats): global_stats[name] += stats return global_stats def main(args): if args.input_path is not None: source = FileInputSource(args.input_path, args.io_transpose) elif args.random is not None: if args.batch_size == 0: print('batch-size must not be 0 when inputs are random generated', file=sys.stderr) return -1 try: distribution = eval(args.random) if not _is_lambda(distribution): distribution = distribution() source = RandomInputSource(distribution) except Exception as e: print("Could not evaluate distribution: " + str(e), file=sys.stderr) return -1 else: if not stdio.is_stdin_piped(): print('Input must be piped', file=sys.stderr) return -1 stdio.set_stdin_to_binary() source = StreamInputSource(sys.stdin, args.io_transpose) output_names = eval(args.output_names) if args.output_names is not None and args.output_names != "*" else args.output_names custom_operators = get_custom_operators(args.custom_operators) if args.custom_operators is not None else None if args.random is not None and args.seed is not None: np.random.seed(args.seed) collect_statistics = args.statistics is not None try: executor = get_executor(args.format, args.model, collect_statistics, custom_operators, args.decompose) if isinstance(output_names, dict): fetch_names = output_names.keys() elif output_names == "*": tensors = executor.tensor_info() fetch_names = [info.name for info in tensors] if tensors is not None else None else: fetch_names = output_names input_info = executor.input_info() if args.batch_size is not None: input_info = batched_info(input_info, args.batch_size) output_info = executor.output_info() inputs = {info.name: source(info.name, info.shape, info.dtype) for info in input_info} batch_size = args.batch_size if batch_size == 0: batch_size = next(iter(six.itervalues(inputs))).shape[0] if not all(input.shape[0] == batch_size for input in six.itervalues(inputs)): print('All inputs must have the same batch-size', file=sys.stderr) return -1 if batch_size is not None and batch_size != 1: slices = {name: [] for name in fetch_names} if fetch_names is not None else \ {info.name: [] for info in output_info} stats = None for k in range(batch_size): slice_inputs = {name: np.expand_dims(data[k], axis=0) for name, data in six.iteritems(inputs)} slice_outputs, slice_stats = executor(slice_inputs, fetch_names, collect_statistics) for name, data in six.iteritems(slice_outputs): slices[name].append(data) if collect_statistics: stats = accumulate_statistics(stats, slice_stats) outputs = {name: np.concatenate(items, axis=0) for name, items in six.iteritems(slices)} else: outputs, stats = executor(inputs, fetch_names, collect_statistics) except ValueError as e: print(e, file=sys.stderr) return -1 for name, value in six.iteritems(outputs): if needs_transpose(args.io_transpose, name): outputs[name] = transpose_channels_last_to_first(value) if isinstance(output_names, dict): outputs = {output_names[name]: value for name, value in six.iteritems(outputs)} if args.tensor_mapping is not None: with open(args.tensor_mapping) as file: tensor_mapping = json.load(file) if stats is not None: stats = {tensor_mapping.get(key, key): value for key, value in six.iteritems(stats)} if stats is not None: write_statistics(args.statistics, stats) print('Written {}'.format(args.statistics)) if args.output_path is not None: for name, value in six.iteritems(outputs): filename = os.path.join(args.output_path, name + ".dat") write_nnef_tensor(filename, value) print('Written {}'.format(filename)) else: if not stdio.is_stdout_piped(): if collect_statistics: return 0 print('Output must be piped', file=sys.stderr) return -1 stdio.set_stdout_to_binary() for name, value in six.iteritems(outputs): nnef.write_tensor(sys.stdout, value) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('model', type=str, help='The model to execute') parser.add_argument('--format', type=str, required=True, choices=['tf', 'tflite', 'onnx', 'nnef'], help='The format of the model') parser.add_argument('--random', type=str, default=None, help='Random distribution for input generation') parser.add_argument('--seed', type=int, default=None, help='Random seed for input generation') parser.add_argument('--input-path', type=str, default=None, help='Folder to read inputs from') parser.add_argument('--output-path', type=str, default=None, help='Folder to save outputs into') parser.add_argument('--output-names', type=str, default=None, help='The set (dict) of tensor names (to file names) considered as outputs to be saved. ' 'Use * to save all tensors') parser.add_argument('--io-transpose', type=str, nargs='*', default=None, help='The inputs/outputs to transpose from channels last to channels first dimension order') parser.add_argument('--decompose', type=str, nargs='*', default=None, help='Names of operators to be decomposed by NNEF parser') parser.add_argument('--statistics', type=str, nargs='?', default=None, const='stats.json', help='Calculate activations statistics and save to output path in json format') parser.add_argument('--custom-operators', type=str, nargs='+', default=None, help='Module(s) containing custom operator code') parser.add_argument('--batch-size', type=int, default=None, help='Specify batch-size for single-batch models') parser.add_argument('--tensor-mapping', type=str, default=None, help='Use mapping of tensor names for statistics') exit(main(parser.parse_args())) ================================================ FILE: nnef_tools-pyproject/nnef_tools/execution/__init__.py ================================================ # Copyright (c) 2017-2025 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: nnef_tools-pyproject/nnef_tools/execution/tvm/__init__.py ================================================ # Copyright (c) 2017-2025 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: nnef_tools-pyproject/nnef_tools/execution/tvm/nnef_frontend/__init__.py ================================================ # Copyright (c) 2017-2025 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: nnef_tools-pyproject/nnef_tools/execution/tvm/nnef_frontend/relax/__init__.py ================================================ # Copyright (c) 2017-2025 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ NNEF frontend for converting graphs into Relax IRModels. """ import tvm from packaging import version ver = version.parse(tvm.__version__) if ver.minor < 20: raise ImportError(f"TVM version 0.20 or higher is required, but found {tvm.__version__}") from .nnef_frontend import from_nnef ================================================ FILE: nnef_tools-pyproject/nnef_tools/execution/tvm/nnef_frontend/relax/nnef_frontend.py ================================================ # Copyright (c) 2017-2025 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NNEF: Neural Network Exchange Format frontend for TVM relay""" import os import typing import nnef import numpy as np import tvm from tvm import relax from tvm.ir import IRModule from tvm.relax import expr as tvm_expr from .nnef_ops import _get_converter_map def get_type(elem_type: str): """ Gives numpy style type for nnef primitive types, uses x32 versions. :param elem_type: string, (scalar, integer, logical, string) :return: returns numpy dtype equivalent (float32, int32, bool, string) """ if elem_type == "scalar": return "float32" if elem_type == "integer": return "int32" if elem_type == "logical": return "bool" if elem_type == "string": return "string" raise TypeError(f'Type "{elem_type}" is not implemented') # Converter class class NNEFConverter: """ Helper class for class level attributes, for conversion of NNEF model. Public method to use is from_nnef. Parameters ---------- keep_params_in_input : bool, optional If this parameter is true, the nnef variables will be converted to constants, and be embedded into the relay model, allowing optimizations at compile time. If False the params will have to be added as inputs, the model can't load them automatically """ def __init__(self, keep_params_in_input=False): self._nodes = {} self._consts = {} self._inputs = {} self._num_inputs = 0 self._params = {} self._num_params = 0 self._keep_params_in_input = keep_params_in_input self._bb = relax.BlockBuilder() def from_nnef(self, graph: nnef.Graph) -> tvm.IRModule: """ Convert an NNEF model into an equivalent TVM Relay IRModule. Parameters ---------- graph : nnef.Graph An NNEF Graph object that was imported with nnef.load_graph. Shapes should be inferred by nnef.infer_shapes on graph beforehand. Returns ------- mod : tvm.IRModule The relay module for compilation params : dict of str to tvm.nd.NDArray The parameter dictionary to be used """ with self._bb.function("main"): with self._bb.dataflow(): self._parse_inputs(graph) self._construct_nodes(graph) outputs = [self._nodes[n] for n in graph.outputs] outputs = outputs[0] if len(outputs) == 1 else tvm_expr.Tuple(outputs) output_var = self._bb.emit_output(outputs) func_attrs = {"num_input": self._num_inputs} input_list = [value for value in self._inputs.values() if isinstance(value, relax.Var)] if self._keep_params_in_input and self._params: param_var_list, param_value_list = map(list, zip(*self._params.values())) input_list.append(param_var_list) func_attrs["params"] = param_value_list self._bb.emit_func_output(output_var, input_list) relax_mod = self._bb.get() relax_mod["main"] = relax_mod["main"].with_attrs(func_attrs) return relax_mod def _parse_inputs(self, graph): """Save inputs into class from inputs attrib of graph""" for inp in graph.inputs: self._num_inputs += 1 tensor = graph.tensors[inp] self._nodes[inp] = self._new_var(inp, shape=tensor.shape, dtype=get_type(tensor.dtype)) self._inputs[inp] = self._nodes[inp] def _construct_nodes(self, graph): """Construct TVM relay calls from every operation of the nnef graph""" for op in graph.operations: if op.name == "external": # externals are handled as input, not needed, # but nnef treats them as operations as well continue if op.name == "variable": self._set_variable(graph.tensors[op.outputs["output"]]) elif op.name == "constant": self._set_const(op) else: # every other operator can be grouped more easily, # as it does not need self for conversion self._set_operator(op) def _set_operator(self, node): self._set_literal_inputs(node) inputs = [] for ink, inv in node.inputs.items(): if isinstance(inv, list): for i, linv in enumerate(inv): if linv in self._nodes.keys(): inputs.append(self._nodes[linv]) else: # handle literal inputs name = f"{node.name}_{ink}_{i}" assert name in self._nodes, f"{name} has not been properly handled" inputs.append(self._nodes[name]) else: if inv in self._nodes.keys(): inputs.append(self._nodes[inv]) else: # handle literal inputs name = f"{node.name}_{ink}" assert name in self._nodes, f"{name} has not been properly handled" inputs.append(self._nodes[name]) converted = self._get_relay_op_call(node.name, inputs, node.attribs) converted = self._bb.normalize(converted) if not isinstance(converted.struct_info, relax.TupleStructInfo): outputs_num = 1 else: outputs_num = len(converted.struct_info.fields) if outputs_num == 1: # check if the singular ret val is a list of only one element ret_val = list(node.outputs.values())[0] if isinstance(ret_val, list): self._nodes[ret_val[0]] = converted else: self._nodes[ret_val] = converted else: for i, out in zip(range(outputs_num), node.outputs["values"]): self._nodes[out] = converted[i] def _set_const(self, node): """Create a tvm.relay.Constant from a nnef constant tensor""" name = node.outputs["output"] data = node.attribs["value"] shape = node.attribs["shape"] if len(data) == 1: data = np.full(shape, data, dtype=get_type(node.dtype)) else: data = np.array(data, dtype=get_type(node.dtype)) self._consts[name] = tvm_expr.const(data) self._nodes[name] = self._consts[name] def _set_variable(self, tensor): """Create a tvm.relay.Var (or Constant) from a nnef variable tensor""" tens_data = tensor.data if not self._keep_params_in_input: self._consts[tensor.name] = tvm_expr.const(tens_data) self._nodes[tensor.name] = self._consts[tensor.name] else: var = self._new_var(tensor.name, shape=tensor.shape, dtype=get_type(tensor.dtype)) self._nodes[tensor.name] = var self._params[tensor.name] = (var, tvm.nd.array(tens_data)) def _set_literal_inputs(self, node): """Checks if node has literal inputs and saves them into a tvm.relay.Constant. naming as {node.name}_{input field name}""" for field_name, value in node.inputs.items(): if isinstance(value, list): for v in value: if v not in self._nodes.keys(): self._nodes[f"{node.name}_{v}"] = tvm_expr.const(v) else: if value not in self._nodes.keys(): self._nodes[f"{node.name}_{field_name}"] = tvm_expr.const(value) def _get_relay_op_call(self, name, inputs, attrs): """Returns the tvm.Call equivalent to the nnef operator""" conv_map = _get_converter_map() if name in conv_map: call = conv_map[name](self._bb, *inputs, **attrs) else: # This error is reached if NNEF is expanded with additional ops raise NotImplementedError( f"Operator {name} is not implemented, as {name} has been added after 1.0.5." ) return call def _infer_type(self, val): if isinstance(val, bool): return "bool", True if isinstance(val, float): return "float32", True if isinstance(val, int): return "int32", True if isinstance(val, str): # the string vals can be names of nodes in some of the cases if isinstance(val, nnef.Identifier): if val in self._nodes.keys(): node = self._nodes[val] if isinstance(node, tvm_expr.Var): return node.type_annotation.dtype, False if isinstance(node, tvm_expr.Constant): return node.data.dtype, False if isinstance(node, tvm_expr.Call): return node.checked_type.dtype, False raise Exception( f"{val} has not been loaded into the model " "but it should have been, as a var or call." ) return "string", True raise TypeError(f'Value "{val}" is not a recognized type') def _new_var(self, name, shape, dtype="float32"): return relax.Var( name_hint=name, struct_info=relax.TensorStructInfo(shape=shape, dtype=dtype), ) def from_nnef( model: typing.Union[str, os.PathLike, nnef.Graph], keep_params_in_input: bool = False, ) -> IRModule: """ Convert an NNEF model into an equivalent TVM Relay IRModule. Parameters ---------- model : os.PathLike or str or nnef.Graph Path to an NNEF model directory, containing the graph.nnef (and weight files) keep_params_in_input : bool, optional If this parameter is true, the nnef variables will be converted to constants, and be embedded into the relax model, allowing optimizations at compile time. If False the params will have to be added as inputs, the model can't load them automatically Returns ------- mod : tvm.IRModule The relay module for compilation params : dict of str to tvm.nd.NDArray The parameter dictionary to be used """ conv_clss = NNEFConverter(keep_params_in_input) if not isinstance(model, nnef.Graph): model = nnef.load_graph(model) # fills in the nnef graph's shape information nnef.infer_shapes(model) return conv_clss.from_nnef(graph=model) ================================================ FILE: nnef_tools-pyproject/nnef_tools/execution/tvm/nnef_frontend/relax/nnef_ops.py ================================================ # Copyright (c) 2017-2025 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NNEF frontend converter helper funcs and ops""" import math import itertools from functools import reduce import numpy as np import tvm from tvm import relax from tvm.relax import expr as tvm_expr from tvm.relax import op as tvm_op from tvm import topi # Base methods def dimension_picker(prefix, kernel_shape, suffix=""): """ Returns the correct name for nth dimensional operator. Uses the "kernel_shape" attribute.\n E.g.call: dimension_picker(op_name)(attr) :param prefix: the name of the operator (e.g. conv) :param kernel_shape: shape of the tensor to fit the operation :param suffix: optional suffix for ops :return: "prefix`n`d" where n is the correct dimension for the kernel """ rank = len(kernel_shape[2:]) if rank == 1: return prefix + "1d" + suffix if rank == 2: return prefix + "2d" + suffix if rank == 3: return prefix + "3d" + suffix op_name = prefix + "1d/2d/3d" msg = f"Only 1D, 2D, and 3D kernels are supported for operator {op_name}." raise tvm.error.OpAttributeInvalid(msg) def _size_conv(size, rank): # window of size (DH)W is only possible when it is checked outside, # which is needed for alternative solution if rank == 3: if len(size) == 1: return size if len(size) == 3: assert ( size[0] == 1 and size[1] == 1 ), "Incorrect window dimensions, first two dimensions must be 1" return size[2] if rank == 4: if len(size) == 2: return size if len(size) == 4: assert ( size[0] == 1 and size[1] == 1 ), "Incorrect window dimensions, first two dimensions must be 1" return size[2:] if rank == 5: if len(size) == 3: return size if len(size) == 5: assert ( size[0] == 1 and size[1] == 1 ), "Incorrect window dimensions, first two dimensions must be 1" return size[2:] raise ValueError(f"Unexpected window size, got {len(size)}") def _stride_conv(stride, rank): if rank == 3: # {conv style} :: [s] -> [s] if len(stride) == 1: return stride # {pool style} :: [N, C, s] -> asrt N,C == 1; [s] if len(stride) == 3: assert ( stride[0] == 1 and stride[1] == 1 ), "Not supported stride dimensions, first two dimensions must be 1" return stride[2:] if rank == 4: # {conv style} :: [sh, sw] -> [sh, sw] if len(stride) == 2: return stride # {pool style} :: [N, C, sh, sw] -> asrt N,C == 1; [sh, sw] if len(stride) == 4: assert ( stride[0] == 1 and stride[1] == 1 ), "Not supported stride dimensions, first two dimensions must be 1" return stride[2:] if rank == 5: # {conv style} :: [sd, sh, sw] -> [sd, sh, sw] if len(stride) == 3: return stride # {pool style} :: [N, C, sd, sh, sw] -> asrt N,C == 1; [sd, sh, sw] if len(stride) == 5: assert ( stride[0] == 1 and stride[1] == 1 ), "Not supported stride dimensions, first two dimensions must be 1" return stride[2:] raise ValueError(f"Unexpected stride in {rank - 2}D, got {len(stride)}: {stride}") def _padding_conv(padding, rank, keepdims=False): if isinstance(padding[0], (tuple, list)): # 1D if rank == 3: # {conv style} :: [(l,r)] -> (l,r) if len(padding) == 1: return padding[0] if len(padding) == 3: # {pool style} :: [(batch),(channel),(l,r)] -> asrt N,C == 0, (l,r) if not keepdims: assert padding[0] == (0, 0) and padding[1] == (0, 0), ( "Incorrect padding. " "Padding on C,I dimensions not supported" ) return padding[2] # {sliding window style} :: [(batch),(channel),(l,r)] -> [(batch),(channel),(l,r)] else: return padding # 2D if rank == 4: # {conv style} :: [(u,d),(l,r)] -> (u, l, d, r) if len(padding) == 2: # change UDLR to ULDR padding, LC is faster here return [x[i] for i in [0, 1] for x in padding] if len(padding) == 4: # {pool style} :: [(batch size),(channel),(u,d),(l,r)] -> # -> asrt N,C == 0, (u, l, d, r) if not keepdims: assert padding[0] == (0, 0) and padding[1] == (0, 0), ( "Incorrect padding. " "Padding on C,I dimensions not supported" ) # itertools is faster than LC (slicing) return list(itertools.chain.from_iterable(zip(padding[2], padding[3]))) # {sliding window style} :: [(batch),(channel),(u,d),(l,r)] -> # -> [(batch),(channel),(u,d),(l,r)] else: return padding # 3D if rank == 5: # {conv style} :: [(f,b),(u,d),(l,r)] -> (f, u, l, b, d, r) if len(padding) == 3: # LC is faster return [x[i] for i in [0, 1] for x in padding] if len(padding) == 5: # {pool style} :: [(batch size),(channel),(f,b)(u,p),(l,r)] -> # -> asrt N,C == 0, (f, u, l, b, d, r) if not keepdims: assert padding[0] == (0, 0) and padding[1] == (0, 0), ( "Incorrect padding. " "Padding on C,I dimensions not supported" ) # itertools faster barely return list( itertools.chain.from_iterable(zip(padding[2], padding[3], padding[4])) ) # {s-w style} :: [(batch),(channel),(f,b),(u,d),(l,r)] -> # -> [(batch),(channel),(f,b),(u,d),(l,r)] else: return padding raise ValueError( f"Incorrect padding style for {rank - 2}D operand. Only length of {rank - 2}, {rank} " f"supported, got {len(padding)}: {padding}" ) raise ValueError("nnef should not have singular padding") def _calculate_nnef_padding(active_shape, strides, kernel_shape, dilation): """Ordering of nnef autopad and tvm autopad are sometimes different, this method calculates nnef like padding from dimensions Parameters ---------- active_shape the data dimensions strides the strides over the active dimensions kernel_shape the shape of the window, must have the same rank as active shape dilation the dilations over the active dimensions """ output = [(ui + (s - 1)) // s for ui, s in zip(active_shape, strides)] dilated = [(f - 1) * d + 1 for f, d in zip(kernel_shape, dilation)] total = [ max(0, (di - 1) * s + df - ui) for di, s, df, ui in zip(output, strides, dilated, active_shape) ] padding = [(pad // 2, (pad + 1) // 2) for pad in total] return padding def _calculate_nnef_padding_deconv(data_sh, strides, kernel_active_sh, dilation, output_shape): out_sh = output_shape[2:] if output_shape else [ui * s for ui, s in zip(data_sh, strides)] dilated = [(f - 1) * d + 1 for f, d in zip(kernel_active_sh[2:], dilation)] total = [ max(0, (di - 1) * s + df - ui) for di, s, df, ui in zip(data_sh, strides, dilated, out_sh) ] return total, out_sh def __unexpected_attrs(op, kwargs): raise NotImplementedError( f"{op} received unexpected attributes(s), possibly mismatched versions. " "Attributes(s) ignored: " + ", ".join(f"{k} := {v}" for k, v in kwargs.items()) ) # Conversion map, operator functions def _get_converter_map(): return { # Unary "copy": copy_converter, # arithmetic "neg": neg_converter, "rcp": rcp_converter, "exp": exp_converter, "log": log_converter, "sin": sin_converter, "cos": cos_converter, "tan": tan_converter, "sinh": sinh_converter, "cosh": cosh_converter, "tanh": tanh_converter, "asin": asin_converter, "acos": acos_converter, "atan": atan_converter, "asinh": asinh_converter, "acosh": acosh_converter, "atanh": atanh_converter, "abs": abs_converter, "sign": sign_converter, "not": not_converter, # logical "floor": floor_converter, # rounding "ceil": ceil_converter, "round": round_converter, # Binary "add": add_converter, # arithmetic "sub": sub_converter, "mul": mul_converter, "div": div_converter, "pow": pow_converter, "lt": lt_converter, # comparison "gt": gt_converter, "le": le_converter, "ge": ge_converter, "eq": eq_converter, "ne": ne_converter, "and": and_converter, # logical "or": or_converter, # select "select": select_converter, # simplifier "sqr": sqr_converter, "sqrt": sqrt_converter, "rsqr": rsqr_converter, "rsqrt": rsqrt_converter, "log2": log2_converter, "min": min_converter, "max": max_converter, "clamp": clamp_converter, # sliding-window "conv": conv_converter, "deconv": deconv_converter, "box": box_converter, "debox": debox_converter, "argmax_pool": ndop, "sample": ndop, "desample": ndop, "nearest_downsample": nearest_downsample_converter, "area_downsample": area_downsample_converter, "nearest_upsample": nearest_upsample_converter, "multilinear_upsample": multilinear_upsample_converter, # reduce "sum_reduce": sum_reduce_converter, "max_reduce": max_reduce_converter, "min_reduce": min_reduce_converter, "argmax_reduce": argmax_reduce_converter, "argmin_reduce": argmin_reduce_converter, "all_reduce": all_reduce_converter, "any_reduce": any_reduce_converter, "mean_reduce": mean_reduce_converter, # tensor shape "reshape": reshape_converter, "squeeze": squeeze_converter, "unsqueeze": unsqueeze_converter, "transpose": transpose_converter, "split": split_converter, "concat": concat_converter, "stack": stack_converter, "unstack": unstack_converter, "slice": slice_converter, "pad": pad_converter, "tile": tile_converter, # region-of-interest - not needed - not supported "avg_roi_pool": ndop, "max_roi_pool": ndop, "roi_resample": ndop, "avg_roi_align": ndop, "max_roi_align": ndop, # matrix multiplication "matmul": matmul_converter, # variables "update": ndop, # --- not used # Compound "sigmoid": sigmoid_converter, # activation "relu": relu_converter, "prelu": prelu_converter, "leaky_relu": leaky_relu_converter, "elu": elu_converter, "selu": selu_converter, "gelu": gelu_converter, "silu": silu_converter, "softmax": softmax_converter, "softplus": softplus_converter, "linear": linear_converter, # linear "separable_conv": separable_conv_converter, "separable_deconv": separable_deconv_converter, "max_pool_with_index": ndop, # pooling "max_pool": max_pool_converter, "avg_pool": avg_pool_converter, "rms_pool": rms_pool_converter, "local_response_normalization": local_response_normalization_converter, # normalization "local_mean_normalization": local_mean_normalization_converter, "local_variance_normalization": local_variance_normalization_converter, "local_contrast_normalization": local_contrast_normalization_converter, "l1_normalization": l1_normalization_converter, "l2_normalization": l2_normalization_converter, "batch_normalization": batch_normalization_converter, "min_max_linear_quantize": ndop, # quantization "zero_point_linear_quantize": ndop, "linear_quantize": ndop, "logarithmic_quantize": ndop, # MISC "copy_n": ndop, "add_n": ndop, "moments": ndop, } # pylint: disable=unused-argument # not implemented ops def ndop(*args, **kwargs): # print(args, kwargs) raise Exception("Not supported operator was called, please check for compatibility") # # Unary ops def copy_converter(bbuilder, data, **kwargs): """Copy converter""" if kwargs: __unexpected_attrs("copy", kwargs) return bbuilder.emit_te(topi.identity, data) def neg_converter(bbuilder, data, **kwargs): """Neg converter""" if kwargs: __unexpected_attrs("neg", kwargs) return relax.op.unary.negative(data) def rcp_converter(bbuilder, data, **kwargs): """Rcp converter""" if kwargs: __unexpected_attrs("rcp", kwargs) if isinstance(data, relax.Call): d_type = data.checked_type.dtype else: d_type = data.struct_info.dtype return div_converter(bbuilder, tvm_expr.const(1, dtype=d_type), data) def exp_converter(bbuilder, data, **kwargs): """Exp converter""" if kwargs: __unexpected_attrs("exp", kwargs) return relax.op.unary.exp(data) def log_converter(bbuilder, data, **kwargs): """Log converter""" if kwargs: __unexpected_attrs("log", kwargs) return relax.op.unary.log(data) def sin_converter(bbuilder, data, **kwargs): """Sin converter""" if kwargs: __unexpected_attrs("sin", kwargs) return relax.op.unary.sin(data) def cos_converter(bbuilder, data, **kwargs): """Cos converter""" if kwargs: __unexpected_attrs("cos", kwargs) return relax.op.unary.cos(data) def tan_converter(bbuilder, data, **kwargs): """Tan converter""" if kwargs: __unexpected_attrs("tan", kwargs) return relax.op.unary.tan(data) def sinh_converter(bbuilder, data, **kwargs): """Sinh converter""" if kwargs: __unexpected_attrs("sinh", kwargs) return relax.op.unary.sinh(data) def cosh_converter(bbuilder, data, **kwargs): """Cosh converter""" if kwargs: __unexpected_attrs("cosh", kwargs) return relax.op.unary.cosh(data) def tanh_converter(bbuilder, data, **kwargs): """Tanh converter""" if kwargs: __unexpected_attrs("tanh", kwargs) return relax.op.unary.tanh(data) def asin_converter(bbuilder, data, **kwargs): """Asin converter""" if kwargs: __unexpected_attrs("asin", kwargs) return relax.op.unary.asin(data) def acos_converter(bbuilder, data, **kwargs): """Acos converter""" if kwargs: __unexpected_attrs("acos", kwargs) return relax.op.unary.acos(data) def atan_converter(bbuilder, data, **kwargs): """Atan converter""" if kwargs: __unexpected_attrs("atan", kwargs) return relax.op.unary.atan(data) def asinh_converter(bbuilder, data, **kwargs): """Asinh converter""" if kwargs: __unexpected_attrs("asinh", kwargs) return relax.op.unary.asinh(data) def acosh_converter(bbuilder, data, **kwargs): """Acosh converter""" if kwargs: __unexpected_attrs("acosh", kwargs) return relax.op.unary.acosh(data) def atanh_converter(bbuilder, data, **kwargs): """Atanh converter""" if kwargs: __unexpected_attrs("atanh", kwargs) return relax.op.unary.atanh(data) def abs_converter(bbuilder, data, **kwargs): """Abs converter""" if kwargs: __unexpected_attrs("abs", kwargs) return relax.op.unary.abs(data) def sign_converter(bbuilder, data, **kwargs): """Sign converter""" if kwargs: __unexpected_attrs("sign", kwargs) return relax.op.unary.sign(data) def not_converter(bbuilder, data, **kwargs): """Not converter""" if kwargs: __unexpected_attrs("not", kwargs) return relax.op.unary.logical_not(data) def floor_converter(bbuilder, data, **kwargs): """Floor converter""" if kwargs: __unexpected_attrs("floor", kwargs) return relax.op.unary.floor(data) def ceil_converter(bbuilder, data, **kwargs): """Ceil converter""" if kwargs: __unexpected_attrs("ceil", kwargs) return relax.op.unary.ceil(data) def round_converter(bbuilder, data, **kwargs): """Round converter""" if kwargs: __unexpected_attrs("round", kwargs) return relax.op.unary.round(data) # # Binary ops def add_converter(bbuilder, lhs, rhs, **kwargs): """Add converter""" if kwargs: __unexpected_attrs("add", kwargs) return relax.op.binary.add(lhs, rhs) def sub_converter(bbuilder, lhs, rhs, **kwargs): """Sub converter""" if kwargs: __unexpected_attrs("sub", kwargs) return relax.op.binary.subtract(lhs, rhs) def mul_converter(bbuilder, lhs, rhs, **kwargs): """Mul converter""" if kwargs: __unexpected_attrs("mul", kwargs) lhs = bbuilder.normalize(lhs) rhs = bbuilder.normalize(rhs) l_ndim = len(lhs.struct_info.shape) r_ndim = len(rhs.struct_info.shape) if l_ndim > r_ndim > 0: rhs = relax.op.expand_dims(rhs, [d + 2 for d in range(l_ndim - r_ndim)]) if r_ndim > l_ndim > 0: lhs = relax.op.expand_dims(lhs, [d + 2 for d in range(r_ndim - l_ndim)]) return relax.op.binary.multiply(lhs, rhs) def div_converter(bbuilder, lhs, rhs, **kwargs): """Div converter""" if kwargs: __unexpected_attrs("div", kwargs) return relax.op.binary.divide(lhs, rhs) def pow_converter(bbuilder, lhs, rhs, **kwargs): """Pow converter""" if kwargs: __unexpected_attrs("pow", kwargs) return relax.op.binary.power(lhs, rhs) def lt_converter(bbuilder, lhs, rhs, **kwargs): """Lt converter""" if kwargs: __unexpected_attrs("lt", kwargs) return relax.op.binary.less(lhs, rhs) def gt_converter(bbuilder, lhs, rhs, **kwargs): """Gt converter""" if kwargs: __unexpected_attrs("gt", kwargs) return relax.op.binary.greater(lhs, rhs) def le_converter(bbuilder, lhs, rhs, **kwargs): """Le converter""" if kwargs: __unexpected_attrs("le", kwargs) return relax.op.binary.less_equal(lhs, rhs) def ge_converter(bbuilder, lhs, rhs, **kwargs): """Ge converter""" if kwargs: __unexpected_attrs("ge", kwargs) return relax.op.binary.greater_equal(lhs, rhs) def eq_converter(bbuilder, lhs, rhs, **kwargs): """Eq converter""" if kwargs: __unexpected_attrs("eq", kwargs) return relax.op.binary.equal(lhs, rhs) def ne_converter(bbuilder, lhs, rhs, **kwargs): """Ne converter""" if kwargs: __unexpected_attrs("ne", kwargs) return relax.op.binary.not_equal(lhs, rhs) def and_converter(bbuilder, lhs, rhs, **kwargs): """And converter""" if kwargs: __unexpected_attrs("and", kwargs) return relax.op.binary.logical_and(lhs, rhs) def or_converter(bbuilder, lhs, rhs, **kwargs): """Or converter""" if kwargs: __unexpected_attrs("or", kwargs) return relax.op.binary.logical_or(lhs, rhs) # # Select op def select_converter(bbuilder, condition, t_val, f_val, **kwargs): """Select converter""" if kwargs: __unexpected_attrs("select", kwargs) return relax.op.where(condition, t_val, f_val) # # Simplifier ops def sqr_converter(bbuilder, data, **kwargs): """sqr converter""" if kwargs: __unexpected_attrs("sqr", kwargs) d_type = data.struct_info.dtype return pow_converter(bbuilder, data, tvm_expr.const(2.0, dtype=d_type)) def sqrt_converter(bbuilder, data, **kwargs): """sqrt converter""" if kwargs: __unexpected_attrs("sqrt", kwargs) return relax.op.unary.sqrt(data) def rsqr_converter(bbuilder, data, **kwargs): """rsqr converter""" if kwargs: __unexpected_attrs("rsqr", kwargs) if isinstance(data, relax.Call): d_type = data.checked_type.dtype else: d_type = data.struct_info.dtype return pow_converter(bbuilder, data, tvm_expr.const(-2.0, dtype=d_type)) def rsqrt_converter(bbuilder, data, **kwargs): """rsqrt converter""" if kwargs: __unexpected_attrs("rsqrt", kwargs) return relax.op.unary.rsqrt(data) def log2_converter(bbuilder, data, **kwargs): """log2 converter""" if kwargs: __unexpected_attrs("log2", kwargs) # no equivalent in Relax, using TOpI return bbuilder.emit_te(topi.log2, data) def min_converter(bbuilder, lhs, rhs, **kwargs): """Min converter""" if kwargs: __unexpected_attrs("min", kwargs) return relax.op.binary.minimum(lhs, rhs) def max_converter(bbuilder, lhs, rhs, **kwargs): """Max converter""" if kwargs: __unexpected_attrs("max", kwargs) return relax.op.binary.maximum(lhs, rhs) def clamp_converter(bbuilder, x, a, b, **kwargs): """Clamp converter""" if kwargs: __unexpected_attrs("clamp", kwargs) # only works if b and a are Constant floats, not tensors if isinstance(a, tvm_expr.Constant) and isinstance(b, tvm_expr.Constant): return relax.op.clip( x, tvm_expr.PrimValue(a.data.numpy().item()), tvm_expr.PrimValue(b.data.numpy().item()) ) return max_converter(bbuilder, min_converter(bbuilder, x, b), a) # # Sliding-window ops def conv_converter( bbuilder, data, kernel, bias, border, stride, padding, dilation, groups, **kwargs ): """Convolution converter, skips bias if it's 0.0 (no bias)""" if kwargs: __unexpected_attrs("conv", kwargs) if border != "constant": print(f"Currently {border} border is not supported, used `constant` border") kernel_shape = [v.value for v in kernel.struct_info.shape.values] dshape = [v.value for v in data.struct_info.shape.values] if hasattr(data.struct_info, "ndim"): ndim = data.struct_info.ndim else: ndim = len(data.struct_info.shape) strides = _stride_conv(stride, ndim) if stride else (1,) * (ndim - 2) dilation = _stride_conv(dilation, ndim) if dilation else (1,) * (ndim - 2) if not padding: padding = _calculate_nnef_padding(dshape[2:], strides, kernel_shape[2:], dilation) pad = _padding_conv(padding, ndim) channels = kernel_shape[0] if groups == 0: groups = channels if ndim == 3: op = relax.op.nn.conv1d elif ndim == 4: op = relax.op.nn.conv2d elif ndim == 5: op = relax.op.nn.conv3d else: raise NotImplementedError("Ndim > 5 not supported for convolution.") conv_out = op( data=data, weight=kernel, strides=strides, padding=pad, dilation=dilation, groups=groups, ) res = None if isinstance(bias, tvm_expr.Constant): # nnef has bias of 0 if it is not needed if (bias.data.numpy() == 0).all(): res = conv_out if not res: bias = relax.op.reshape( bias, [1, -1] + [ 1, ] * (ndim - 2), ) res = relax.op.add(conv_out, bias) return res def deconv_converter( bbuilder, data, kernel, bias, border, stride, padding, dilation, output_shape, groups, **kwargs ): """Deconvolution converter, using convxd_transpose skips bias if it's 0.0 (no bias)""" if kwargs: __unexpected_attrs("deconv", kwargs) if border != "constant": print(f"Currently {border} border is not supported, used `constant` border") kernel_shape = [v.value for v in kernel.struct_info.shape.values] rank = len(kernel_shape) strides = _stride_conv(stride, rank) if stride else (1,) * (rank - 2) dilation = _stride_conv(dilation, rank) if dilation else (1,) * (rank - 2) total, out_sh = _calculate_nnef_padding_deconv( [v.value for v in data.struct_info.shape.values], strides, kernel_shape, dilation, output_shape, ) if padding: pad = _padding_conv(padding, rank) else: pad = _padding_conv([(pad // 2, (pad + 1) // 2) for pad in total], rank) if groups == 0: groups = kernel_shape[0] # limit output padding to modulo stride because of tvm checks out_pad = ( [(x - (y - t)) % s for x, y, t, s in zip(output_shape[2:], out_sh, total, stride)] if output_shape else (0, 0) ) if rank == 3: op = relax.op.nn.conv1d_transpose elif rank == 4: op = relax.op.nn.conv2d_transpose else: raise NotImplementedError("Ndim > 4 not supported for deconvolution. 3D WIP.") deconv_out = op( data=data, weight=kernel, strides=strides, padding=pad, dilation=dilation, groups=groups, output_padding=out_pad, ) res = None if isinstance(bias, tvm_expr.Constant): if (bias.data.numpy() == 0).all(): res = deconv_out if not res: bias = relax.op.reshape( bias, [1, -1] + [ 1, ] * (rank - 2), ) res = relax.op.add(deconv_out, bias) return res def box_converter(bbuilder, data, size, border, padding, stride, dilation, normalize, **kwargs): """Box operator converter, summation over sliding window, equal to conv with constant filter""" if kwargs: __unexpected_attrs("box", kwargs) dshape = [v.value for v in data.struct_info.shape.values] d_type = data.struct_info.dtype if size[:2] == [1, 1]: size[0] = dshape[1] if normalize: kernel = relax.op.full(size, relax.const(1 / math.prod(size[2:]), d_type), d_type) else: kernel = relax.op.ones(size, d_type) kernel = bbuilder.normalize(kernel) out = conv_converter( bbuilder, data, kernel, tvm_expr.const(0, dtype=d_type), border, stride, padding, dilation, dshape[1], ) else: # if boxing on channel or batch dims avg pool can solve with permute # we need permute indexes with inactive shape + active shape format, so active at the back def _apply_permutation(items, perm): return [items[ind] for ind in perm] inactive = [i for i, s in enumerate(size) if s == 1] active = [i for i, s in enumerate(size) if s != 1] permuted_ins = inactive + active inverse = [0] * len(permuted_ins) for i, p in enumerate(permuted_ins): inverse[p] = i data = relax.op.permute_dims(data, permuted_ins) size = _apply_permutation(size, permuted_ins) data = bbuilder.normalize(data) out = avg_pool_converter( bbuilder, data, size[2:], border, padding, stride[2:], dilation[2:] ) out = relax.op.permute_dims(out, inverse) if not normalize: out = bbuilder.normalize(out) out = mul_converter( bbuilder, out, tvm_expr.const(math.prod(size), dtype=out.struct_info.dtype) ) return out def debox_converter( bbuilder, data, size, border, padding, stride, dilation, normalize, output_shape, **kwargs ): """Debox operator converter, inverse of box, equal to deconv with constant filter""" if kwargs: __unexpected_attrs("debox", kwargs) dshape = [v.value for v in data.struct_info.shape.values] if isinstance(data, relax.Call): d_type = data.checked_type.dtype else: d_type = data.struct_info.dtype size[0] = dshape[1] if normalize: kernel = relax.op.full(relax.const(1 / math.prod(size[2:]), d_type), size, d_type) else: kernel = relax.op.ones(size, d_type) kernel = bbuilder.normalize(kernel) out = deconv_converter( bbuilder, data, kernel, tvm_expr.const(0, dtype=d_type), border, stride, padding, dilation, output_shape, groups=dshape[1], ) return out def nearest_downsample_converter(bbuilder, data, factor, **kwargs): """Nearest neighbour downsample converter""" if kwargs: __unexpected_attrs("nearest_downsample", kwargs) dims = 2 + len(factor) return box_converter( bbuilder, data, size=[1] * dims, border="constant", padding=[(0, 0)] * dims, stride=[1, 1] + factor, dilation=(1,) * (dims - 2), normalize=False, ) def area_downsample_converter(bbuilder, data, factor, **kwargs): """Area downsample converter""" if kwargs: __unexpected_attrs("area_downsample", kwargs) dims = 2 + len(factor) return box_converter( bbuilder, data, size=[1, 1] + factor, border="constant", padding=[(0, 0)] * dims, stride=[1, 1] + factor, dilation=(1,) * (dims - 2), normalize=True, ) def nearest_upsample_converter(bbuilder, data, factor, **kwargs): """Nearest neighbour upsample converter""" if kwargs: __unexpected_attrs("nearest_upsample", kwargs) dshape = [v.value for v in data.struct_info.shape.values] new_size = [d * f for d, f in zip(dshape[2:], factor)] ndims = len(dshape) if ndims == 3: op = topi.image.resize1d if ndims == 4: op = topi.image.resize2d if ndims == 5: op = topi.image.resize3d return bbuilder.emit_te( op, data, [ 0, ] * ndims, # dummy value so typecheck goes through, roi is not used new_size, method="nearest_neighbor", rounding_method="round", ) def multilinear_upsample_converter(bbuilder, data, factor, method, border, **kwargs): """Multilinear upsampling converter""" if kwargs: __unexpected_attrs("linear_upsample", kwargs) # for aligned and symmetric replicate resize can be used dshape = [v.value for v in data.struct_info.shape.values] ndims = len(dshape) if ndims == 3: op = topi.image.resize1d if ndims == 4: op = topi.image.resize2d if ndims == 5: op = topi.image.resize3d new_size = [d * f for d, f in zip(dshape[2:], factor)] if method == "aligned": # conversion from nn.upsampling to image.resizexd, re: discuss:11650 return bbuilder.emit_te( op, data, [ 0, ] * ndims, # dummy value so typecheck goes through, roi is not used new_size, method="linear", coordinate_transformation_mode="align_corners", ) if method == "symmetric" and border == "replicate": return bbuilder.emit_te( op, data, [ 0, ] * ndims, # dummy value so typecheck goes through, roi is not used new_size, method="linear", coordinate_transformation_mode="half_pixel", ) # other combinations need to be calculated with convolution def _upsample_weights_1d(fact, symm): if symm: _weights = [1 - (i + 0.5) / fact for i in range(fact)] _weights = list(reversed(_weights)) + _weights else: _weights = [1 - abs(i) / float(fact) for i in range(-fact + 1, fact)] return np.array(_weights) def _upsample_weights_nd(fact, symm): _weights = [_upsample_weights_1d(f, symm) for f in fact] return reduce(np.multiply, np.ix_(*_weights)) n, c = dshape[:2] symmetric = method == "symmetric" weights = _upsample_weights_nd(factor, symmetric) weights = np.reshape(weights, newshape=(1, 1) + weights.shape) kernel = tile_converter(bbuilder, tvm_expr.const(weights), (c, 1) + (1,) * len(factor)) kernel = bbuilder.normalize(kernel) output_shape = [n, c] + [f * s for f, s in zip(factor, dshape[2:])] if symmetric: return deconv_converter( bbuilder, data, kernel, tvm_expr.const(0.0), border="constant", stride=factor, padding=[(f - 1, f - 1) for f in factor], dilation=[], groups=c, output_shape=output_shape, ) else: replicate = border == "replicate" if replicate: data = pad_converter( bbuilder, data, [(0, 0), (0, 0)] + [(1, 0)] * len(factor), border, tvm_expr.const(0.0), ) data = bbuilder.normalize(data) padding = factor else: padding = [f // 2 for f in factor] return deconv_converter( bbuilder, data, kernel, tvm_expr.const(0.0), border="constant", stride=factor, padding=[(p, p - 1) for p in padding], dilation=[], groups=c, output_shape=output_shape, ) # # Reduce ops def sum_reduce_converter(bbuilder, data, axes, normalize, keepdims=True, **kwargs): """Sum reduce converter""" if kwargs: __unexpected_attrs("sum_reduce", kwargs) out = relax.op.sum(data, axes, keepdims=keepdims) if normalize: return l2_normalization_converter(bbuilder, out, 0, [x - 2 for x in axes], 0.0) return out def max_reduce_converter(bbuilder, data, axes, keepdims=True, **kwargs): """Max reduce converter""" if kwargs: __unexpected_attrs("max_reduce", kwargs) return relax.op.max(data, axes, keepdims=keepdims) def min_reduce_converter(bbuilder, data, axes, keepdims=True, **kwargs): """Min reduce converter""" if kwargs: __unexpected_attrs("min_reduce", kwargs) return relax.op.min(data, axes, keepdims=keepdims) def argmax_reduce_converter(bbuilder, data, axes, keepdims=True, **kwargs): """Argmax reduce converter""" if kwargs: __unexpected_attrs("argmax_reduce", kwargs) # relax.op.argmax only supports singular axis, using TOpI return bbuilder.emit_te(topi.argmax, data, axes, keepdims=keepdims) def argmin_reduce_converter(bbuilder, data, axes, keepdims=True, **kwargs): """Argmin reduce converter""" if kwargs: __unexpected_attrs("argmin_reduce", kwargs) # relax.op.argmin only supports singular axis, using TOpI return bbuilder.emit_te(topi.argmin, data, axes, keepdims=keepdims) def all_reduce_converter(bbuilder, data, axes, keepdims=True, **kwargs): """All reduce converter""" if kwargs: __unexpected_attrs("all_reduce", kwargs) # no equivalent in Relax, using TOpI return bbuilder.emit_te(topi.all, data, axes, keepdims) def any_reduce_converter(bbuilder, data, axes, keepdims=True, **kwargs): """Any reduce converter""" if kwargs: __unexpected_attrs("any_reduce", kwargs) # no equivalent in Relax, using TOpI return bbuilder.emit_te(topi.any, data, axes, keepdims) def mean_reduce_converter(bbuilder, data, axes, keepdims=True, **kwargs): """Mean reduce converter""" if kwargs: __unexpected_attrs("mean_reduce", kwargs) return relax.op.mean(data, axes, keepdims=keepdims) # # Tensor shape ops def reshape_converter(bbuilder, data, shape, axis_start, axis_count, **kwargs): """Reshape converter""" if kwargs: __unexpected_attrs("reshape", kwargs) dshape = [v.value for v in data.struct_info.shape.values] if axis_count == -1: newshape = dshape[:axis_start] + shape else: newshape = dshape newshape[axis_start : axis_start + axis_count] = shape return relax.op.reshape(data, newshape) def squeeze_converter(bbuilder, data, axes, **kwargs): """Squeeze converter""" if kwargs: __unexpected_attrs("squeeze", kwargs) return relax.op.squeeze(data, axes) def unsqueeze_converter(bbuilder, data, axes, **kwargs): """Unsqueeze converter""" if kwargs: __unexpected_attrs("unsqueeze", kwargs) axes = sorted(axes) for axis in axes: if axis < 0 and isinstance(data, tvm_expr.Var): axis = len(data.type_annotation.concrete_shape) + len(axes) + axis data = tvm_op.expand_dims(data, axis=axis) return data def transpose_converter(bbuilder, data, axes, **kwargs): """Transpose converter""" if kwargs: __unexpected_attrs("transpose", kwargs) return relax.op.permute_dims(data, axes) def split_converter(bbuilder, data, axis, ratios, **kwargs): """Split converter""" if kwargs: __unexpected_attrs("split", kwargs) axis_len = [v.value for v in data.struct_info.shape.values][axis] rat_mul = axis_len / sum(ratios) ratio_list = [(r * rat_mul) for r in ratios] s = 0 indices = [] for rat in ratio_list[:-1]: s += rat # Strictly needs int indices.append(int(s)) return relax.op.split(data, indices, axis) def concat_converter(bbuilder, *data, axis, **kwargs): """Concat converter""" if kwargs: __unexpected_attrs("concat", kwargs) return relax.op.concat(data, axis) def stack_converter(bbuilder, *data, axis, **kwargs): """Stack converter""" if kwargs: __unexpected_attrs("stack", kwargs) data = [relax.op.expand_dims(d, axis) for d in data] return relax.op.concat(data, axis) def unstack_converter(bbuilder, data, axis, **kwargs): """Unstack converter""" if kwargs: __unexpected_attrs("unstack", kwargs) split = split_converter( bbuilder, data, axis, [1] * [v.value for v in data.struct_info.shape.values][axis] ) split = bbuilder.normalize(split) res = [] for i in range(len(split.struct_info.fields)): res.append(squeeze_converter(bbuilder, split[i], axis)) return tvm.relax.Tuple(relax.Tuple(res)) def slice_converter(bbuilder, data, axes, begin, end, stride, **kwargs): """Slice converter""" if kwargs: __unexpected_attrs("slice", kwargs) if not stride: stride = [1] * len(axes) return relax.op.strided_slice(data, begin=begin, end=end, strides=stride, axes=axes) def pad_converter(bbuilder, data, padding, border, value, **kwargs): """Pad converter""" if kwargs: __unexpected_attrs("pad", kwargs) if border not in ["constant", "replicate", "reflect"]: print(f"{border} border type is not supported in padding. Assumed constant") border = "constant" if border == "replicate": border = "edge" # padding can only be tuple even though docs say tuple> pad = sum(padding, ()) pad_before, pad_after = zip(*padding) # reflect can only work with TOPI mirror_pad if border == "reflect": return bbuilder.emit_te(tvm.topi.nn.mirror_pad, data, pad_before, pad_after, "REFLECT") if border == "edge": raise tvm.error.OpNotImplemented( "Replicate - Edge mode is currently not supperted in TVM relax" ) # constant works with normal relax.nn.pad return relax.op.nn.pad(data, pad, border, value) def tile_converter(bbuilder, data, repeats, **kwargs): """Tile converter""" if kwargs: __unexpected_attrs("tile", kwargs) return relax.op.tile(data, repeats) # # Region-of-interest ops # # Matrix multiplication def matmul_converter(bbuilder, a, b, **kwargs): """Matmul converter real signature: matmul_converter(a, b, transposeA, transposeB)""" transpose_a = kwargs.pop("transposeA") transpose_b = kwargs.pop("transposeB") if kwargs: __unexpected_attrs("matmul", kwargs) if transpose_a: ndim = len(a.struct_info.shape.values) axes = list(range(ndim - 2)) axes.append(ndim - 1) axes.append(ndim - 2) a = relax.op.permute_dims(a, axes) if transpose_b: ndim = len(a.struct_info.shape.values) axes = list(range(ndim - 2)) axes.append(ndim - 1) axes.append(ndim - 2) b = relax.op.permute_dims(b, axes) a = bbuilder.normalize(a) b = bbuilder.normalize(b) return relax.op.matmul(a, b) # # Variable updates # # Compound ops def sigmoid_converter(bbuilder, data, **kwargs): """Sigmoid converter""" if kwargs: __unexpected_attrs("sigmoid", kwargs) return relax.op.unary.sigmoid(data) def relu_converter(bbuilder, data, **kwargs): """RELU converter""" if kwargs: __unexpected_attrs("relu", kwargs) return relax.op.nn.relu(data) def prelu_converter(bbuilder, data, alpha, **kwargs): """PRELU converter""" if kwargs: __unexpected_attrs("prelu", kwargs) # prelu can't handle float vals but NNEF supports direct parameter, this is just in case if isinstance(alpha, tvm_expr.Constant): if alpha.data.numpy().size == 1: return relax.op.nn.leakyrelu(data, alpha.data.numpy().item()) # alpha needs to be a tensor whose rank is the same as of data, # and the only non 1 dim is the channel dims axes = [ 0, ] + [a + 2 for a in range(data.struct_info.ndim - 2)] alpha = relax.op.expand_dims(alpha, axes) # using select for prelu return select_converter( bbuilder, data < tvm_expr.const(0.0), mul_converter(bbuilder, alpha, data), data ) def leaky_relu_converter(bbuilder, data, alpha, **kwargs): """Leaky RELU converter""" if kwargs: __unexpected_attrs("leaky_relu", kwargs) return relax.op.nn.leakyrelu(data, alpha) def elu_converter(bbuilder, data, alpha, **kwargs): """ELU converter""" if kwargs: __unexpected_attrs("elu", kwargs) return select_converter( bbuilder, lt_converter(bbuilder, data, tvm_expr.const(0.0)), mul_converter( bbuilder, tvm_expr.const(alpha), sub_converter(bbuilder, exp_converter(bbuilder, data), tvm_expr.const(1.0)), ), data, ) def selu_converter(bbuilder, data, alpha, **kwargs): """SELU converter True signature is selu_converter(data, alpha, lambda)""" lambda_var = kwargs.pop("lambda") if kwargs: __unexpected_attrs("selu", kwargs) return mul_converter( bbuilder, tvm_expr.const(lambda_var), select_converter( bbuilder, data < tvm_expr.const(0.0), mul_converter( bbuilder, tvm_expr.const(alpha), sub_converter(bbuilder, exp_converter(bbuilder, data), tvm_expr.const(1.0)), ), data, ), ) def gelu_converter(bbuilder, data, **kwargs): """GELU converter NNEF definition for GELU: the exact definition of GELU is x * Phi(x) where Phi(x) is the CDF of the standard normal distribution, which can be approximated for example by sigmoid(1.702 * x) `mul_converter(data, sigmoid_converter(mul_converter(tvm_expr.const(1.702), data)))` But in this case we will use the erf to calculate normcdf (same as to pytorch GELU impl) """ if kwargs: __unexpected_attrs("gelu", kwargs) return relax.op.nn.gelu(data) def silu_converter(bbuilder, data, **kwargs): """SiLU converter""" if kwargs: __unexpected_attrs("silu", kwargs) return mul_converter(bbuilder, data, sigmoid_converter(bbuilder, data)) def softmax_converter(bbuilder, data, axes, **kwargs): """Softmax converter""" if kwargs: __unexpected_attrs("softmax", kwargs) if len(axes) > 1: print("Multiple axes not supported, operation has been done along the first axis in axes.") axis = axes[0] return relax.op.nn.softmax(data, axis) def softplus_converter(bbuilder, data, **kwargs): """Softplus converter""" if kwargs: __unexpected_attrs("softplus", kwargs) return log_converter( bbuilder, add_converter(bbuilder, exp_converter(bbuilder, data), tvm_expr.const(1.0)) ) # # linear ops def linear_converter(bbuilder, data, _filter, bias, **kwargs): """Linear converter""" if kwargs: __unexpected_attrs("linear", kwargs) out = matmul_converter(bbuilder, data, _filter, transposeA=False, transposeB=True) out = bbuilder.normalize(out) res = None if isinstance(bias, tvm_expr.Constant): if (bias.data.numpy() == 0).all(): res = out if hasattr(data.struct_info, "ndim"): ndim = data.struct_info.ndim else: ndim = len(data.struct_info.shape) if not res: bias = relax.op.reshape( bias, [1, -1] + [ 1, ] * (ndim - 2), ) res = relax.op.add(out, bias) return res def separable_conv_converter( bbuilder, data, plane_filter, point_filter, bias, border, padding, stride, dilation, groups, **kwargs, ): """Separable convolution converter""" if kwargs: __unexpected_attrs("separable_conv", kwargs) if isinstance(data, relax.Call): d_type = data.checked_type.dtype else: d_type = data.struct_info.dtype filtered = conv_converter( bbuilder, data, plane_filter, tvm_expr.const(0, dtype=d_type), border, stride, padding, dilation, 0, ) filtered = bbuilder.normalize(filtered) return conv_converter(bbuilder, filtered, point_filter, bias, "constant", [], [], [], groups) def separable_deconv_converter( bbuilder, data, plane_filter, point_filter, bias, border, padding, stride, dilation, output_shape, groups, **kwargs, ): """Separable deconvolution converter""" if kwargs: __unexpected_attrs("separable_deconv", kwargs) if isinstance(data, relax.Call): d_type = data.checked_type.dtype else: d_type = data.struct_info.dtype filtered = deconv_converter( bbuilder, data, point_filter, tvm_expr.const(0, dtype=d_type), "constant", [], [], [], [], groups, ) filtered = bbuilder.normalize(filtered) return deconv_converter( bbuilder, filtered, plane_filter, bias, border, stride, padding, dilation, output_shape, 0 ) def max_pool_converter(bbuilder, data, size, border, padding, stride, dilation, **kwargs): """Max pool converter""" if kwargs: __unexpected_attrs("max_pool", kwargs) if border != "constant": print(f"Currently {border} border is not supported, used `constant` border") dshape = [v.value for v in data.struct_info.shape.values] rank = len(dshape) pool_size = _size_conv(size, rank) strides = _stride_conv(stride, rank) if stride else (1,) * (rank - 2) dilation = _stride_conv(dilation, rank) if dilation else (1,) * (rank - 2) if not padding: # padding is truncated to `conv style` (only active layers are present) padding = _calculate_nnef_padding(dshape[2:], strides, pool_size, dilation) pad = _padding_conv(padding, rank) if border == "constant": padding = [(0, 0), (0, 0)] + padding data = pad_converter(bbuilder, data, padding, border, 0.0) data = bbuilder.normalize(data) pad = (0, 0) if rank == 3: op = relax.op.nn.max_pool1d elif rank == 4: op = relax.op.nn.max_pool2d elif rank == 5: op = relax.op.nn.max_pool3d else: raise NotImplementedError("Ndim > 5 not supported for max pool.") return op( data, pool_size=pool_size, strides=strides, dilation=dilation, padding=pad, ) def avg_pool_converter(bbuilder, data, size, border, padding, stride, dilation, **kwargs): """Avg pool converter""" if kwargs: __unexpected_attrs("avg_pool", kwargs) if border not in ["constant", "ignore"]: print(f"Currently {border} border is not supported, used `constant` border") dshape = [v.value for v in data.struct_info.shape.values] rank = len(dshape) pool_size = _size_conv(size, rank) strides = _stride_conv(stride, rank) if stride else (1,) * (rank - 2) dilation = _stride_conv(dilation, rank) if dilation else (1,) * (rank - 2) # padding is truncated to `conv style` (only active layers are present) active_shape = dshape[2:] if not padding: padding = _calculate_nnef_padding(active_shape, strides, pool_size, dilation) pad = _padding_conv(padding, rank) if rank == 3: op = relax.op.nn.avg_pool1d elif rank == 4: op = relax.op.nn.avg_pool2d elif rank == 5: op = relax.op.nn.avg_pool3d else: raise NotImplementedError("Ndim > 5 not supported for avg pool.") return op( data, pool_size=pool_size, strides=strides, dilation=dilation, padding=pad, count_include_pad=border != "ignore", ) def rms_pool_converter(bbuilder, data, size, border, padding, stride, dilation, **kwargs): """Rms pool converter""" if kwargs: __unexpected_attrs("rms_pool", kwargs) return sqrt_converter( bbuilder, avg_pool_converter( bbuilder, bbuilder.normalize(sqr_converter(bbuilder, data)), size=size, border=border, padding=padding, stride=stride, dilation=dilation, ), ) # # Normalization def local_response_normalization_converter(bbuilder, data, size, alpha, beta, bias, **kwargs): """LRN converter""" if kwargs: __unexpected_attrs("local_response_normalization", kwargs) axis = [i for i in range(len(size)) if size[i] > 1] if len(axis) == 1: axis = axis[0] else: print("Multi axis LRN is not implemented properly, using first axis where size != 1") axis = axis[0] size = size[axis] return bbuilder.emit_te(topi.nn.lrn, data, size, axis, alpha, beta, bias) def local_mean_normalization_converter(bbuilder, data, size, **kwargs): """LMN converter""" if kwargs: __unexpected_attrs("local_mean_normalization", kwargs) mean = box_converter(bbuilder, data, size, "constant", [], [], [], normalize=True) mean = bbuilder.normalize(mean) return sub_converter(bbuilder, data, mean) def local_variance_normalization_converter(bbuilder, data, size, bias, epsilon, **kwargs): """LVN converter""" if kwargs: __unexpected_attrs("local_variance_normalization", kwargs) sigma = box_converter( bbuilder, bbuilder.normalize(sqr_converter(bbuilder, data)), size, "constant", [], [], [], normalize=True, ) sigma = bbuilder.normalize(sigma) return div_converter( bbuilder, data, max_converter( bbuilder, add_converter(bbuilder, sqrt_converter(bbuilder, sigma), tvm_expr.const(bias)), tvm_expr.const(epsilon), ), ) def local_contrast_normalization_converter(bbuilder, data, size, bias, epsilon, **kwargs): """LCN converter""" if kwargs: __unexpected_attrs("local_contrast_normalization", kwargs) centered = local_mean_normalization_converter(bbuilder, data, size) centered = bbuilder.normalize(centered) return local_variance_normalization_converter(bbuilder, centered, size, bias, epsilon) def l1_normalization_converter(bbuilder, data, axes, bias, epsilon, **kwargs): """L1 norm converter""" if kwargs: __unexpected_attrs("l1_normalization", kwargs) sigma = sum_reduce_converter(bbuilder, abs_converter(bbuilder, data), axes, False) return div_converter( bbuilder, data, max_converter( bbuilder, add_converter(bbuilder, sigma, tvm_expr.const(bias)), tvm_expr.const(epsilon) ), ) def l2_normalization_converter(bbuilder, data, axes, bias, epsilon, **kwargs): """L2 norm converter""" if kwargs: __unexpected_attrs("l2_normalization", kwargs) # relay style l2_norm not supported, used equation from NNEF sigma = sum_reduce_converter( bbuilder, sqr_converter(bbuilder, data), axes=axes, normalize=False ) res = div_converter( bbuilder, data, max_converter( bbuilder, add_converter(bbuilder, sqrt_converter(bbuilder, sigma), tvm_expr.const(bias)), tvm_expr.const(epsilon), ), ) return res def batch_normalization_converter(bbuilder, data, mean, variance, offset, scale, epsilon, **kwargs): """Batch norm converter""" if kwargs: __unexpected_attrs("batch_normalization", kwargs) mean = squeeze_converter(bbuilder, mean, 0) variance = squeeze_converter(bbuilder, variance, 0) offset = squeeze_converter(bbuilder, offset, 0) scale = squeeze_converter(bbuilder, scale, 0) mean = bbuilder.normalize(mean) variance = bbuilder.normalize(variance) offset = bbuilder.normalize(offset) scale = bbuilder.normalize(scale) res = bbuilder.emit_te(topi.nn.batch_norm, data, scale, offset, mean, variance, 1, epsilon) return res[0] # # Misc ops ================================================ FILE: nnef_tools-pyproject/nnef_tools/execution/tvm/nnef_frontend/relay/__init__.py ================================================ # Copyright (c) 2017-2025 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import tvm from packaging import version ver = version.parse(tvm.__version__) if ver.minor > 19: raise ImportError(f"TVM version 0.19 or lower is required, but found {tvm.__version__}") if ver.minor != 19: warnings.warn(f"TVM version 0.19 is recommended, but found {tvm.__version__}. Some features may not work as expected.") from .from_nnef import from_nnef ================================================ FILE: nnef_tools-pyproject/nnef_tools/execution/tvm/nnef_frontend/relay/from_nnef.py ================================================ # Copyright (c) 2017-2025 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NNEF: Neural Network Exchange Format frontend for TVM relay""" import os import typing import nnef import numpy as np import tvm from tvm import relay from tvm.ir import IRModule from tvm.relay import expr as tvm_expr from tvm.relay import analysis, function from tvm.relay.frontend.common import new_var, fold_constant, set_span, infer_type from .nnef_ops import _get_converter_map def get_type(elem_type: str): """ Gives numpy style type for nnef primitive types, uses x32 versions. :param elem_type: string, (scalar, integer, logical, string) :return: returns numpy dtype equivalent (float32, int32, bool, string) """ if elem_type == "scalar": return "float32" if elem_type == "integer": return "int32" if elem_type == "logical": return "bool" if elem_type == "string": return "string" raise TypeError(f'Type "{elem_type}" is not implemented') def make_parameter_span(source_name_list, name_sep="."): return name_sep.join(source_name_list) # Converter class class NNEFConverter: """ Helper class for class level attributes, for conversion of NNEF model. Public method to use is from_nnef. Parameters ---------- freeze_vars : bool, optional If this parameter is true, the nnef variables will be converted to constants, and be embedded into the relay model, allowing optimizations at compile time. """ def __init__(self, freeze_vars=False): self._nodes = {} self._consts = {} self._inputs = {} self._num_inputs = 0 self._params = {} self._num_params = 0 self._freeze_vars = freeze_vars def from_nnef(self, graph: nnef.Graph) -> typing.Tuple[tvm.IRModule, dict]: """ Convert an NNEF model into an equivalent TVM Relay IRModule. Parameters ---------- graph : nnef.Graph An NNEF Graph object that was imported with nnef.load_graph. Shapes should be inferred by nnef.infer_shapes on graph beforehand. Returns ------- mod : tvm.IRModule The relay module for compilation params : dict of str to tvm.nd.NDArray The parameter dictionary to be used """ self._parse_inputs(graph) self._construct_nodes(graph) outputs = [self._nodes[n] for n in graph.outputs] outputs = outputs[0] if len(outputs) == 1 else tvm_expr.Tuple(outputs) nodes = {v: k for k, v in self._nodes.items()} free_vars = analysis.free_vars(outputs) free_vars = [nodes[var] for var in free_vars] for i_name in self._params.keys(): if i_name in free_vars and i_name not in self._inputs: self._inputs[i_name] = self._nodes[i_name] func = function.Function(list(self._inputs.values()), outputs) return IRModule.from_expr(func), self._params def _parse_inputs(self, graph): """Save inputs into class from inputs attrib of graph""" for inp in graph.inputs: self._num_inputs += 1 tensor = graph.tensors[inp] self._nodes[inp] = new_var(inp, shape=tensor.shape, dtype=get_type(tensor.dtype)) self._inputs[inp] = self._nodes[inp] def _construct_nodes(self, graph): """Construct TVM relay calls from every operation of the nnef graph""" for op in graph.operations: if op.name == "external": # externals are handled as input, not needed, # but nnef treats them as operations as well continue if op.name == "variable": self._set_variable(graph.tensors[op.outputs["output"]]) elif op.name == "constant": self._set_const(op) else: # every other operator can be grouped more easily, # as it does not need self for conversion self._set_operator(op) def _set_operator(self, node): self._set_literal_inputs(node) self._set_parameter_span(node, node.name) inputs = [] for ink, inv in node.inputs.items(): if isinstance(inv, list): for i, linv in enumerate(inv): if linv in self._nodes.keys(): inputs.append(self._nodes[linv]) else: # handle literal inputs name = f"{node.name}_{ink}_{i}" assert name in self._nodes, f"{name} has not been properly handled" inputs.append(self._nodes[name]) else: if inv in self._nodes.keys(): inputs.append(self._nodes[inv]) else: # handle literal inputs name = f"{node.name}_{ink}" assert name in self._nodes, f"{name} has not been properly handled" inputs.append(self._nodes[name]) converted = self._get_relay_op_call(node.name, inputs, node.attribs) if not isinstance(converted, tvm_expr.TupleWrapper): outputs_num = 1 else: outputs_num = len(converted) if outputs_num == 1: if not isinstance(converted, tvm_expr.TupleWrapper): converted = fold_constant(converted) else: converted = fold_constant(converted.astuple()) else: converted = tvm_expr.TupleWrapper(fold_constant(converted.astuple()), len(converted)) converted = set_span(converted, node.name) if outputs_num == 1: # check if the singular ret val is a list of only one element ret_val = list(node.outputs.values())[0] if isinstance(ret_val, list): self._nodes[ret_val[0]] = converted else: self._nodes[ret_val] = converted else: for i, out in zip(range(outputs_num), node.outputs["values"]): self._nodes[out] = converted[i] def _set_const(self, node): """Create a tvm.relay.Constant from a nnef constant tensor""" name = node.outputs["output"] data = node.attribs["value"] shape = node.attribs["shape"] if len(data) == 1: data = np.full(shape, data, dtype=get_type(node.dtype)) else: data = np.array(data, dtype=get_type(node.dtype)) self._consts[name] = tvm_expr.const(data) self._nodes[name] = self._consts[name] def _set_variable(self, tensor): """Create a tvm.relay.Var (or Constant if freeze_vars) from a nnef variable tensor""" tens_data = tensor.data if self._freeze_vars: self._consts[tensor.name] = tvm_expr.const(tens_data) self._nodes[tensor.name] = self._consts[tensor.name] else: self._nodes[tensor.name] = new_var( tensor.name, shape=tensor.shape, dtype=get_type(tensor.dtype) ) self._params[tensor.name] = tens_data def _set_literal_inputs(self, node): """Checks if node has literal inputs and saves them into a tvm.relay.Constant. naming as {node.name}_{input field name}""" for field_name, value in node.inputs.items(): if isinstance(value, list): for v in value: if v not in self._nodes.keys(): self._nodes[f"{node.name}_{v}"] = tvm_expr.const(v) else: if value not in self._nodes.keys(): self._nodes[f"{node.name}_{field_name}"] = tvm_expr.const(value) def _set_parameter_span(self, node, node_source_name): for field_name, name in node.inputs.items(): if isinstance(name, list): for n in name: self._set_par_span_helper(node, node_source_name, n, field_name) else: self._set_par_span_helper(node, node_source_name, name, field_name) def _set_par_span_helper(self, node, node_source_name, name, field_name): if name not in self._nodes.keys(): name = f"{node.name}_{field_name}" expr = self._nodes.get(name) if expr: expr_with_span = set_span(expr, make_parameter_span([node_source_name, name])) self._nodes[name] = expr_with_span if name in self._inputs: self._inputs[name] = expr_with_span if isinstance(expr, relay.Constant): self._consts[name] = expr_with_span def _get_relay_op_call(self, name, inputs, attrs): """Returns the tvm.Call equivalent to the nnef operator""" conv_map = _get_converter_map() if name in conv_map: call = conv_map[name](*inputs, **attrs) else: # This error is reached if NNEF is expanded with additional ops raise NotImplementedError( f"Operator {name} is not implemented, as {name} has been added after 1.0.5." ) return call def _infer_type(self, val): if isinstance(val, bool): return "bool", True if isinstance(val, float): return "float32", True if isinstance(val, int): return "int32", True if isinstance(val, str): # the string vals can be names of nodes in some of the cases if isinstance(val, nnef.Identifier): if val in self._nodes.keys(): node = self._nodes[val] if isinstance(node, tvm_expr.Var): return node.type_annotation.dtype, False if isinstance(node, tvm_expr.Constant): return node.data.dtype, False if isinstance(node, tvm_expr.Call): return infer_type(node).checked_type.dtype, False raise Exception( f"{val} has not been loaded into the model " "but it should have been, as a var or call." ) return "string", True raise TypeError(f'Value "{val}" is not a recognized type') def from_nnef( model: typing.Union[str, os.PathLike, nnef.Graph], freeze_vars: bool = False, ) -> typing.Tuple[IRModule, dict]: """ Convert an NNEF model into an equivalent TVM Relay IRModule. Parameters ---------- model : os.PathLike or str or nnef.Graph Path to an NNEF model directory, containing the graph.nnef (and weight files) freeze_vars : bool, optional If this parameter is true, the nnef variables will be converted to constants, and be embedded into the relay model, allowing optimizations at compile time. Returns ------- mod : tvm.IRModule The relay module for compilation params : dict of str to tvm.nd.NDArray The parameter dictionary to be used """ conv_clss = NNEFConverter(freeze_vars) if not isinstance(model, nnef.Graph): model = nnef.load_graph(model) # fills in the nnef graph's shape information nnef.infer_shapes(model) return conv_clss.from_nnef(graph=model) ================================================ FILE: nnef_tools-pyproject/nnef_tools/execution/tvm/nnef_frontend/relay/nnef_ops.py ================================================ # Copyright (c) 2017-2025 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NNEF frontend converter helper funcs and ops""" import math import itertools from functools import reduce import numpy as np import tvm from tvm import relay from tvm.relay import expr as tvm_expr from tvm.relay import op as tvm_op from tvm.relay.frontend.common import get_relay_op, infer_shape, infer_type # Base methods def dimension_picker(prefix, kernel_shape, suffix=""): """ Returns the correct name for nth dimensional operator. Uses the "kernel_shape" attribute.\n E.g.call: dimension_picker(op_name)(attr) :param prefix: the name of the operator (e.g. conv) :param kernel_shape: shape of the tensor to fit the operation :param suffix: optional suffix for ops :return: "prefix`n`d" where n is the correct dimension for the kernel """ rank = len(kernel_shape[2:]) if rank == 1: return prefix + "1d" + suffix if rank == 2: return prefix + "2d" + suffix if rank == 3: return prefix + "3d" + suffix op_name = prefix + "1d/2d/3d" msg = f"Only 1D, 2D, and 3D kernels are supported for operator {op_name}." raise tvm.error.OpAttributeInvalid(msg) def _size_conv(size, rank): # window of size (DH)W is only possible when it is checked outside, # which is needed for alternative solution if rank == 3: if len(size) == 1: return size if len(size) == 3: assert ( size[0] == 1 and size[1] == 1 ), "Incorrect window dimensions, first two dimensions must be 1" return size[2] if rank == 4: if len(size) == 2: return size if len(size) == 4: assert ( size[0] == 1 and size[1] == 1 ), "Incorrect window dimensions, first two dimensions must be 1" return size[2:] if rank == 5: if len(size) == 3: return size if len(size) == 5: assert ( size[0] == 1 and size[1] == 1 ), "Incorrect window dimensions, first two dimensions must be 1" return size[2:] raise ValueError(f"Unexpected window size, got {len(size)}") def _stride_conv(stride, rank): if rank == 3: # {conv style} :: [s] -> [s] if len(stride) == 1: return stride # {pool style} :: [N, C, s] -> asrt N,C == 1; [s] if len(stride) == 3: assert ( stride[0] == 1 and stride[1] == 1 ), "Not supported stride dimensions, first two dimensions must be 1" return stride[2:] if rank == 4: # {conv style} :: [sh, sw] -> [sh, sw] if len(stride) == 2: return stride # {pool style} :: [N, C, sh, sw] -> asrt N,C == 1; [sh, sw] if len(stride) == 4: assert ( stride[0] == 1 and stride[1] == 1 ), "Not supported stride dimensions, first two dimensions must be 1" return stride[2:] if rank == 5: # {conv style} :: [sd, sh, sw] -> [sd, sh, sw] if len(stride) == 3: return stride # {pool style} :: [N, C, sd, sh, sw] -> asrt N,C == 1; [sd, sh, sw] if len(stride) == 5: assert ( stride[0] == 1 and stride[1] == 1 ), "Not supported stride dimensions, first two dimensions must be 1" return stride[2:] raise ValueError(f"Unexpected stride in {rank - 2}D, got {len(stride)}: {stride}") def _padding_conv(padding, rank, keepdims=False): if isinstance(padding[0], (tuple, list)): # 1D if rank == 3: # {conv style} :: [(l,r)] -> (l,r) if len(padding) == 1: return padding[0] if len(padding) == 3: # {pool style} :: [(batch),(channel),(l,r)] -> asrt N,C == 0, (l,r) if not keepdims: assert padding[0] == (0, 0) and padding[1] == (0, 0), ( "Incorrect padding. " "Padding on C,I dimensions not supported" ) return padding[2] # {sliding window style} :: [(batch),(channel),(l,r)] -> [(batch),(channel),(l,r)] else: return padding # 2D if rank == 4: # {conv style} :: [(u,d),(l,r)] -> (u, l, d, r) if len(padding) == 2: # change UDLR to ULDR padding, LC is faster here return [x[i] for i in [0, 1] for x in padding] if len(padding) == 4: # {pool style} :: [(batch size),(channel),(u,d),(l,r)] -> # -> asrt N,C == 0, (u, l, d, r) if not keepdims: assert padding[0] == (0, 0) and padding[1] == (0, 0), ( "Incorrect padding. " "Padding on C,I dimensions not supported" ) # itertools is faster than LC (slicing) return list(itertools.chain.from_iterable(zip(padding[2], padding[3]))) # {sliding window style} :: [(batch),(channel),(u,d),(l,r)] -> # -> [(batch),(channel),(u,d),(l,r)] else: return padding # 3D if rank == 5: # {conv style} :: [(f,b),(u,d),(l,r)] -> (f, u, l, b, d, r) if len(padding) == 3: # LC is faster return [x[i] for i in [0, 1] for x in padding] if len(padding) == 5: # {pool style} :: [(batch size),(channel),(f,b)(u,p),(l,r)] -> # -> asrt N,C == 0, (f, u, l, b, d, r) if not keepdims: assert padding[0] == (0, 0) and padding[1] == (0, 0), ( "Incorrect padding. " "Padding on C,I dimensions not supported" ) # itertools faster barely return list( itertools.chain.from_iterable(zip(padding[2], padding[3], padding[4])) ) # {s-w style} :: [(batch),(channel),(f,b),(u,d),(l,r)] -> # -> [(batch),(channel),(f,b),(u,d),(l,r)] else: return padding raise ValueError( f"Incorrect padding style for {rank - 2}D operand. Only length of {rank - 2}, {rank} " f"supported, got {len(padding)}: {padding}" ) raise ValueError("nnef should not have singular padding") def _calculate_nnef_padding(active_shape, strides, kernel_shape, dilation): """Ordering of nnef autopad and tvm autopad are sometimes different, this method calculates nnef like padding from dimensions Parameters ---------- active_shape the data dimensions strides the strides over the active dimensions kernel_shape the shape of the window, must have the same rank as active shape dilation the dilations over the active dimensions """ output = [(ui + (s - 1)) // s for ui, s in zip(active_shape, strides)] dilated = [(f - 1) * d + 1 for f, d in zip(kernel_shape, dilation)] total = [ max(0, (di - 1) * s + df - ui) for di, s, df, ui in zip(output, strides, dilated, active_shape) ] padding = [(pad // 2, (pad + 1) // 2) for pad in total] return padding def _calculate_nnef_padding_deconv(data_sh, strides, kernel_active_sh, dilation, output_shape): out_sh = output_shape[2:] if output_shape else [ui * s for ui, s in zip(data_sh, strides)] dilated = [(f - 1) * d + 1 for f, d in zip(kernel_active_sh[2:], dilation)] total = [ max(0, (di - 1) * s + df - ui) for di, s, df, ui in zip(data_sh, strides, dilated, out_sh) ] return total, out_sh def __unexpected_attrs(op, kwargs): raise NotImplementedError( f"{op} received unexpected attributes(s), possibly mismatched versions. " "Attributes(s) ignored: " + ", ".join(f"{k} := {v}" for k, v in kwargs.items()) ) # Conversion map, operator functions def _get_converter_map(): return { # Unary "copy": copy_converter, # arithmetic "neg": neg_converter, "rcp": rcp_converter, "exp": exp_converter, "log": log_converter, "sin": sin_converter, "cos": cos_converter, "tan": tan_converter, "sinh": sinh_converter, "cosh": cosh_converter, "tanh": tanh_converter, "asin": asin_converter, "acos": acos_converter, "atan": atan_converter, "asinh": asinh_converter, "acosh": acosh_converter, "atanh": atanh_converter, "abs": abs_converter, "sign": sign_converter, "not": not_converter, # logical "floor": floor_converter, # rounding "ceil": ceil_converter, "round": round_converter, # Binary "add": add_converter, # arithmetic "sub": sub_converter, "mul": mul_converter, "div": div_converter, "pow": pow_converter, "lt": lt_converter, # comparison "gt": gt_converter, "le": le_converter, "ge": ge_converter, "eq": eq_converter, "ne": ne_converter, "and": and_converter, # logical "or": or_converter, # select "select": select_converter, # simplifier "sqr": sqr_converter, "sqrt": sqrt_converter, "rsqr": rsqr_converter, "rsqrt": rsqrt_converter, "log2": log2_converter, "min": min_converter, "max": max_converter, "clamp": clamp_converter, # sliding-window "conv": conv_converter, "deconv": deconv_converter, "box": box_converter, "debox": debox_converter, "argmax_pool": ndop, "sample": ndop, "desample": ndop, "nearest_downsample": nearest_downsample_converter, "area_downsample": area_downsample_converter, "nearest_upsample": nearest_upsample_converter, "multilinear_upsample": multilinear_upsample_converter, # reduce "sum_reduce": sum_reduce_converter, "max_reduce": max_reduce_converter, "min_reduce": min_reduce_converter, "argmax_reduce": argmax_reduce_converter, "argmin_reduce": argmin_reduce_converter, "all_reduce": all_reduce_converter, "any_reduce": any_reduce_converter, "mean_reduce": mean_reduce_converter, # tensor shape "reshape": reshape_converter, "squeeze": squeeze_converter, "unsqueeze": unsqueeze_converter, "transpose": transpose_converter, "split": split_converter, "concat": concat_converter, "stack": stack_converter, "unstack": unstack_converter, "slice": slice_converter, "pad": pad_converter, "tile": tile_converter, # region-of-interest - not needed - not supported "avg_roi_pool": ndop, "max_roi_pool": ndop, "roi_resample": ndop, "avg_roi_align": ndop, "max_roi_align": ndop, # matrix multiplication "matmul": matmul_converter, # variables "update": ndop, # --- not used # Compound "sigmoid": sigmoid_converter, # activation "relu": relu_converter, "prelu": prelu_converter, "leaky_relu": leaky_relu_converter, "elu": elu_converter, "selu": selu_converter, "gelu": gelu_converter, "silu": silu_converter, "softmax": softmax_converter, "softplus": softplus_converter, "linear": linear_converter, # linear "separable_conv": separable_conv_converter, "separable_deconv": separable_deconv_converter, "max_pool_with_index": ndop, # pooling "max_pool": max_pool_converter, "avg_pool": avg_pool_converter, "rms_pool": rms_pool_converter, "local_response_normalization": local_response_normalization_converter, # normalization "local_mean_normalization": local_mean_normalization_converter, "local_variance_normalization": local_variance_normalization_converter, "local_contrast_normalization": local_contrast_normalization_converter, "l1_normalization": l1_normalization_converter, "l2_normalization": l2_normalization_converter, "batch_normalization": batch_normalization_converter, "min_max_linear_quantize": ndop, # quantization "zero_point_linear_quantize": ndop, "linear_quantize": ndop, "logarithmic_quantize": ndop, # MISC "copy_n": ndop, "add_n": ndop, "moments": ndop, } # not implemented ops def ndop(*args, **kwargs): raise Exception("Not supported operator was called, please check for compatibility") # # Unary ops def copy_converter(data, **kwargs): """Copy converter""" if kwargs: __unexpected_attrs("copy", kwargs) return get_relay_op("copy")(data) def neg_converter(data, **kwargs): """Neg converter""" if kwargs: __unexpected_attrs("neg", kwargs) return get_relay_op("negative")(data) def rcp_converter(data, **kwargs): """Rcp converter""" if kwargs: __unexpected_attrs("rcp", kwargs) if isinstance(data, relay.Call): d_type = infer_type(data).checked_type.dtype else: d_type = data.type_annotation.dtype return div_converter(tvm_expr.const(1, dtype=d_type), data) def exp_converter(data, **kwargs): """Exp converter""" if kwargs: __unexpected_attrs("exp", kwargs) return get_relay_op("exp")(data) def log_converter(data, **kwargs): """Log converter""" if kwargs: __unexpected_attrs("log", kwargs) return get_relay_op("log")(data) def sin_converter(data, **kwargs): """Sin converter""" if kwargs: __unexpected_attrs("sin", kwargs) return get_relay_op("sin")(data) def cos_converter(data, **kwargs): """Cos converter""" if kwargs: __unexpected_attrs("cos", kwargs) return get_relay_op("cos")(data) def tan_converter(data, **kwargs): """Tan converter""" if kwargs: __unexpected_attrs("tan", kwargs) return get_relay_op("tan")(data) def sinh_converter(data, **kwargs): """Sinh converter""" if kwargs: __unexpected_attrs("sinh", kwargs) return get_relay_op("sinh")(data) def cosh_converter(data, **kwargs): """Cosh converter""" if kwargs: __unexpected_attrs("cosh", kwargs) return get_relay_op("cosh")(data) def tanh_converter(data, **kwargs): """Tanh converter""" if kwargs: __unexpected_attrs("tanh", kwargs) return get_relay_op("tanh")(data) def asin_converter(data, **kwargs): """Asin converter""" if kwargs: __unexpected_attrs("asin", kwargs) return get_relay_op("asin")(data) def acos_converter(data, **kwargs): """Acos converter""" if kwargs: __unexpected_attrs("acos", kwargs) return get_relay_op("acos")(data) def atan_converter(data, **kwargs): """Atan converter""" if kwargs: __unexpected_attrs("atan", kwargs) return get_relay_op("atan")(data) def asinh_converter(data, **kwargs): """Asinh converter""" if kwargs: __unexpected_attrs("asinh", kwargs) return get_relay_op("asinh")(data) def acosh_converter(data, **kwargs): """Acosh converter""" if kwargs: __unexpected_attrs("acosh", kwargs) return get_relay_op("acosh")(data) def atanh_converter(data, **kwargs): """Atanh converter""" if kwargs: __unexpected_attrs("atanh", kwargs) return get_relay_op("atanh")(data) def abs_converter(data, **kwargs): """Abs converter""" if kwargs: __unexpected_attrs("abs", kwargs) return get_relay_op("abs")(data) def sign_converter(data, **kwargs): """Sign converter""" if kwargs: __unexpected_attrs("sign", kwargs) return get_relay_op("sign")(data) def not_converter(data, **kwargs): """Not converter""" if kwargs: __unexpected_attrs("not", kwargs) return get_relay_op("logical_not")(data) def floor_converter(data, **kwargs): """Floor converter""" if kwargs: __unexpected_attrs("floor", kwargs) return get_relay_op("floor")(data) def ceil_converter(data, **kwargs): """Ceil converter""" if kwargs: __unexpected_attrs("ceil", kwargs) return get_relay_op("ceil")(data) def round_converter(data, **kwargs): """Round converter""" if kwargs: __unexpected_attrs("round", kwargs) return get_relay_op("round")(data) # # Binary ops def add_converter(lhs, rhs, **kwargs): """Add converter""" if kwargs: __unexpected_attrs("add", kwargs) return get_relay_op("add")(lhs, rhs) def sub_converter(lhs, rhs, **kwargs): """Sub converter""" if kwargs: __unexpected_attrs("sub", kwargs) return get_relay_op("subtract")(lhs, rhs) def mul_converter(lhs, rhs, **kwargs): """Mul converter""" if kwargs: __unexpected_attrs("mul", kwargs) return get_relay_op("multiply")(lhs, rhs) def div_converter(lhs, rhs, **kwargs): """Div converter""" if kwargs: __unexpected_attrs("div", kwargs) return get_relay_op("divide")(lhs, rhs) def pow_converter(lhs, rhs, **kwargs): """Pow converter""" if kwargs: __unexpected_attrs("pow", kwargs) return get_relay_op("power")(lhs, rhs) def lt_converter(lhs, rhs, **kwargs): """Lt converter""" if kwargs: __unexpected_attrs("lt", kwargs) return get_relay_op("less")(lhs, rhs) def gt_converter(lhs, rhs, **kwargs): """Gt converter""" if kwargs: __unexpected_attrs("gt", kwargs) return get_relay_op("greater")(lhs, rhs) def le_converter(lhs, rhs, **kwargs): """Le converter""" if kwargs: __unexpected_attrs("le", kwargs) return get_relay_op("less_equal")(lhs, rhs) def ge_converter(lhs, rhs, **kwargs): """Ge converter""" if kwargs: __unexpected_attrs("ge", kwargs) return get_relay_op("greater_equal")(lhs, rhs) def eq_converter(lhs, rhs, **kwargs): """Eq converter""" if kwargs: __unexpected_attrs("eq", kwargs) return get_relay_op("equal")(lhs, rhs) def ne_converter(lhs, rhs, **kwargs): """Ne converter""" if kwargs: __unexpected_attrs("ne", kwargs) return get_relay_op("not_equal")(lhs, rhs) def and_converter(lhs, rhs, **kwargs): """And converter""" if kwargs: __unexpected_attrs("and", kwargs) return get_relay_op("logical_and")(lhs, rhs) def or_converter(lhs, rhs, **kwargs): """Or converter""" if kwargs: __unexpected_attrs("or", kwargs) return get_relay_op("logical_or")(lhs, rhs) # # Select op def select_converter(condition, t_val, f_val, **kwargs): """Select converter""" if kwargs: __unexpected_attrs("select", kwargs) return get_relay_op("where")(condition, t_val, f_val) # # Simplifier ops def sqr_converter(data, **kwargs): """sqr converter""" if kwargs: __unexpected_attrs("sqr", kwargs) if isinstance(data, relay.Call): d_type = infer_type(data).checked_type.dtype else: d_type = data.type_annotation.dtype return get_relay_op("power")(data, tvm_expr.const(2.0, dtype=d_type)) def sqrt_converter(data, **kwargs): """sqrt converter""" if kwargs: __unexpected_attrs("sqrt", kwargs) return get_relay_op("sqrt")(data) def rsqr_converter(data, **kwargs): """rsqr converter""" if kwargs: __unexpected_attrs("rsqr", kwargs) if isinstance(data, relay.Call): d_type = infer_type(data).checked_type.dtype else: d_type = data.type_annotation.dtype return get_relay_op("power")(data, tvm_expr.const(-2.0, dtype=d_type)) def rsqrt_converter(data, **kwargs): """rsqrt converter""" if kwargs: __unexpected_attrs("rsqrt", kwargs) return get_relay_op("rsqrt")(data) def log2_converter(data, **kwargs): """log2 converter""" if kwargs: __unexpected_attrs("log2", kwargs) return get_relay_op("log2")(data) def min_converter(lhs, rhs, **kwargs): """Min converter""" if kwargs: __unexpected_attrs("min", kwargs) return get_relay_op("minimum")(lhs, rhs) def max_converter(lhs, rhs, **kwargs): """Max converter""" if kwargs: __unexpected_attrs("max", kwargs) return get_relay_op("maximum")(lhs, rhs) def clamp_converter(x, a, b, **kwargs): """Clamp converter""" if kwargs: __unexpected_attrs("clamp", kwargs) # only works if b and a are Constant floats, not tensors if isinstance(a, tvm_expr.Constant) and isinstance(b, tvm_expr.Constant): return get_relay_op("clip")(x, float(a.data.numpy()), float(b.data.numpy())) return max_converter(min_converter(x, b), a) # # Sliding-window ops def conv_converter(data, kernel, bias, border, stride, padding, dilation, groups, **kwargs): """Convolution converter, skips bias if it's 0.0 (no bias)""" if kwargs: __unexpected_attrs("conv", kwargs) if border != "constant": print(f"Currently {border} border is not supported, used `constant` border") kernel_shape = infer_shape(kernel) dshape = infer_shape(data) strides = _stride_conv(stride, len(kernel_shape)) if stride else (1,) * (len(kernel_shape) - 2) dilation = dilation if dilation else ((1,) * (len(kernel_shape) - 2)) if not padding: padding = _calculate_nnef_padding(dshape[2:], strides, kernel_shape[2:], dilation) pad = _padding_conv(padding, len(kernel_shape)) channels = kernel_shape[0] if groups == 0: groups = channels op = get_relay_op(dimension_picker("conv", kernel_shape)) conv_out = op( data=data, weight=kernel, strides=strides, padding=pad, dilation=dilation, groups=groups, channels=channels, kernel_size=kernel_shape[2:], ) res = None if isinstance(bias, tvm_expr.Constant): # nnef has bias of 0 if it is not needed if (bias.data.numpy() == 0).all(): res = conv_out if not res: # squeeze needed as nnef has bias of shape [1, channel] res = tvm_op.nn.bias_add(conv_out, relay.squeeze(bias, axis=0)) return res def deconv_converter( data, kernel, bias, border, stride, padding, dilation, output_shape, groups, **kwargs ): """Deconvolution converter, using convxd_transpose skips bias if it's 0.0 (no bias)""" if kwargs: __unexpected_attrs("deconv", kwargs) if border != "constant": print(f"Currently {border} border is not supported, used `constant` border") kernel_shape = infer_shape(kernel) rank = len(kernel_shape) strides = _stride_conv(stride, rank) if stride else (1,) * (rank - 2) dilation = dilation if dilation else ((1,) * (rank - 2)) total, out_sh = _calculate_nnef_padding_deconv( infer_shape(data), strides, kernel_shape, dilation, output_shape ) if padding: pad = _padding_conv(padding, rank) else: pad = _padding_conv([(pad // 2, (pad + 1) // 2) for pad in total], rank) if groups == 0: groups = kernel_shape[0] channels = kernel_shape[1] * groups # limit output padding to modulo stride because of tvm checks out_pad = ( [(x - (y - t)) % s for x, y, t, s in zip(output_shape[2:], out_sh, total, stride)] if output_shape else (0, 0) ) op = get_relay_op(dimension_picker("conv", kernel_shape, suffix="_transpose")) deconv_out = op( data=data, weight=kernel, strides=strides, padding=pad, dilation=dilation, groups=groups, channels=channels, kernel_size=kernel_shape[2:], output_padding=out_pad, ) res = None if isinstance(bias, tvm_expr.Constant): if bias.data.numpy() == np.array([0.0]): res = deconv_out if not res: # squeeze needed bc nnef has bias of shape [1, channel] res = tvm_op.nn.bias_add(deconv_out, relay.squeeze(bias, axis=0)) return res def box_converter(data, size, border, padding, stride, dilation, normalize, **kwargs): """Box operator converter, summation over sliding window, equal to conv with constant filter""" if kwargs: __unexpected_attrs("box", kwargs) dshape = infer_shape(data) if isinstance(data, relay.Call): d_type = infer_type(data).checked_type.dtype else: d_type = data.type_annotation.dtype size[0] = dshape[1] if normalize: kernel = relay.full(tvm_op.const(1 / math.prod(size[2:]), d_type), size, d_type) else: kernel = relay.ones(size, d_type) out = conv_converter( data, kernel, tvm_expr.const(0, dtype=d_type), border, stride, padding, dilation, dshape[1] ) return out def debox_converter( data, size, border, padding, stride, dilation, normalize, output_shape, **kwargs ): """Debox operator converter, inverse of box, equal to deconv with constant filter""" if kwargs: __unexpected_attrs("debox", kwargs) dshape = infer_shape(data) if isinstance(data, relay.Call): d_type = infer_type(data).checked_type.dtype else: d_type = data.type_annotation.dtype size[0] = dshape[1] if normalize: kernel = relay.full(tvm_op.const(1 / math.prod(size[2:]), d_type), size, d_type) else: kernel = relay.ones(size, d_type) out = deconv_converter( data, kernel, tvm_expr.const(0, dtype=d_type), border, stride, padding, dilation, output_shape, groups=dshape[1], ) return out def nearest_downsample_converter(data, factor, **kwargs): """Nearest neighbour downsample converter""" if kwargs: __unexpected_attrs("nearest_downsample", kwargs) dims = 2 + len(factor) return box_converter( data, size=[1] * dims, border="constant", padding=[(0, 0)] * dims, stride=[1, 1] + factor, dilation=(1,) * (dims - 2), normalize=False, ) def area_downsample_converter(data, factor, **kwargs): """Area downsample converter""" if kwargs: __unexpected_attrs("area_downsample", kwargs) dims = 2 + len(factor) return box_converter( data, size=[1, 1] + factor, border="constant", padding=[(0, 0)] * dims, stride=[1, 1] + factor, dilation=(1,) * (dims - 2), normalize=True, ) def nearest_upsample_converter(data, factor, **kwargs): """Nearest neighbour upsample converter""" if kwargs: __unexpected_attrs("nearest_upsample", kwargs) # conversion from nn.upsampling to image.resizexd, re: discuss:11650 # dshape = infer_shape(data) new_size = [d * f for d, f in zip(dshape[2:], factor)] return get_relay_op(dimension_picker("resize", dshape))( data, new_size, method="nearest_neighbor", # coordinate_transformation_mode="asymmetric", rounding_method="round", ) def multilinear_upsample_converter(data, factor, method, border, **kwargs): """Multilinear upsampling converter""" if kwargs: __unexpected_attrs("linear_upsample", kwargs) # for aligned and symmetric replicate resize can be used dshape = infer_shape(data) new_size = [d * f for d, f in zip(dshape[2:], factor)] if method == "aligned": # conversion from nn.upsampling to image.resizexd, re: discuss:11650 return get_relay_op(dimension_picker("resize", dshape))( data, new_size, method="linear", coordinate_transformation_mode="align_corners", ) if method == "symmetric" and border == "replicate": return get_relay_op(dimension_picker("resize", dshape))( data, new_size, method="linear", coordinate_transformation_mode="half_pixel", ) # other combinations need to be calculated with convolution def _upsample_weights_1d(fact, symm): if symm: _weights = [1 - (i + 0.5) / fact for i in range(fact)] _weights = list(reversed(_weights)) + _weights else: _weights = [1 - abs(i) / float(fact) for i in range(-fact + 1, fact)] return np.array(_weights) def _upsample_weights_nd(fact, symm): _weights = [_upsample_weights_1d(f, symm) for f in fact] return reduce(np.multiply, np.ix_(*_weights)) n, c = dshape[:2] symmetric = method == "symmetric" weights = _upsample_weights_nd(factor, symmetric) weights = np.reshape(weights, newshape=(1, 1) + weights.shape) kernel = tile_converter(tvm_expr.const(weights), (c, 1) + (1,) * len(factor)) output_shape = [n, c] + [f * s for f, s in zip(factor, dshape[2:])] if symmetric: return deconv_converter( data, kernel, tvm_expr.const(0.0), border="constant", stride=factor, padding=[(f - 1, f - 1) for f in factor], dilation=[], groups=c, output_shape=output_shape, ) else: replicate = border == "replicate" if replicate: data = pad_converter( data, [(0, 0), (0, 0)] + [(1, 0)] * len(factor), border, tvm_expr.const(0.0) ) padding = factor else: padding = [f // 2 for f in factor] return deconv_converter( data, kernel, tvm_expr.const(0.0), border="constant", stride=factor, padding=[(p, p - 1) for p in padding], dilation=[], groups=c, output_shape=output_shape, ) # # Reduce ops def sum_reduce_converter(data, axes, normalize, keepdims=True, **kwargs): """Sum reduce converter""" if kwargs: __unexpected_attrs("sum_reduce", kwargs) out = get_relay_op("sum")(data, axes, keepdims=keepdims) if normalize: return l2_normalization_converter(out, 0, [x - 2 for x in axes], 0.0) return out def max_reduce_converter(data, axes, keepdims=True, **kwargs): """Max reduce converter""" if kwargs: __unexpected_attrs("max_reduce", kwargs) return get_relay_op("max")(data, axes, keepdims=keepdims) def min_reduce_converter(data, axes, keepdims=True, **kwargs): """Min reduce converter""" if kwargs: __unexpected_attrs("min_reduce", kwargs) return get_relay_op("min")(data, axes, keepdims=keepdims) def argmax_reduce_converter(data, axes, keepdims=True, **kwargs): """Argmax reduce converter""" if kwargs: __unexpected_attrs("argmax_reduce", kwargs) return get_relay_op("argmax")(data, axes, keepdims=keepdims) def argmin_reduce_converter(data, axes, keepdims=True, **kwargs): """Argmin reduce converter""" if kwargs: __unexpected_attrs("argmin_reduce", kwargs) return get_relay_op("argmin")(data, axes, keepdims=keepdims) def all_reduce_converter(data, axes, keepdims=True, **kwargs): """All reduce converter""" if kwargs: __unexpected_attrs("all_reduce", kwargs) return get_relay_op("all")(data, axes, keepdims=keepdims) def any_reduce_converter(data, axes, keepdims=True, **kwargs): """Any reduce converter""" if kwargs: __unexpected_attrs("any_reduce", kwargs) return get_relay_op("any")(data, axes, keepdims=keepdims) def mean_reduce_converter(data, axes, keepdims=True, **kwargs): """Mean reduce converter""" if kwargs: __unexpected_attrs("mean_reduce", kwargs) return get_relay_op("mean")(data, axes, keepdims=keepdims) # # Tensor shape ops def reshape_converter(data, shape, axis_start, axis_count, **kwargs): """Reshape converter""" if kwargs: __unexpected_attrs("reshape", kwargs) dshape = list(infer_shape(data)) if axis_count == -1: newshape = dshape[:axis_start] + shape else: newshape = dshape newshape[axis_start : axis_start + axis_count] = shape return get_relay_op("reshape")(data, newshape) def squeeze_converter(data, axes, **kwargs): """Squeeze converter""" if kwargs: __unexpected_attrs("squeeze", kwargs) return relay.squeeze(data, axes) def unsqueeze_converter(data, axes, **kwargs): """Unsqueeze converter""" if kwargs: __unexpected_attrs("unsqueeze", kwargs) axes = sorted(axes) for axis in axes: if axis < 0 and isinstance(data, tvm_expr.Var): axis = len(data.type_annotation.concrete_shape) + len(axes) + axis data = tvm_op.expand_dims(data, axis=axis, num_newaxis=1) return data def transpose_converter(data, axes, **kwargs): """Transpose converter""" if kwargs: __unexpected_attrs("transpose", kwargs) return get_relay_op("transpose")(data, axes) def split_converter(data, axis, ratios, **kwargs): """Split converter""" if kwargs: __unexpected_attrs("split", kwargs) axis_len = infer_shape(data)[axis] rat_mul = axis_len / sum(ratios) ratio_list = [(r * rat_mul) for r in ratios] s = 0 indices = [] for rat in ratio_list[:-1]: s += rat # Strictly needs int indices.append(int(s)) return get_relay_op("split")(data, indices, axis) def concat_converter(*data, axis, **kwargs): """Concat converter""" if kwargs: __unexpected_attrs("concat", kwargs) return get_relay_op("concatenate")(data, axis) def stack_converter(*data, axis, **kwargs): """Stack converter""" if kwargs: __unexpected_attrs("stack", kwargs) return get_relay_op("stack")(data, axis) def unstack_converter(data, axis, **kwargs): """Unstack converter""" if kwargs: __unexpected_attrs("unstack", kwargs) split = split_converter(data, axis, [1] * infer_shape(data)[axis]) res = [] for i in range(len(split)): res.append(squeeze_converter(split[i], axis)) return tvm_expr.TupleWrapper(relay.Tuple(res), len(res)) def slice_converter(data, axes, begin, end, stride, **kwargs): """Slice converter""" if kwargs: __unexpected_attrs("slice", kwargs) if not stride: stride = [1] * len(axes) return get_relay_op("strided_slice")(data, begin, end, strides=stride, axes=axes) def pad_converter(data, padding, border, value, **kwargs): """Pad converter""" if kwargs: __unexpected_attrs("pad", kwargs) if border not in ["constant", "replicate", "reflect"]: print(f"{border} border type is not supported in padding. Assumed constant") border = "constant" if border == "replicate": border = "edge" return get_relay_op("pad")(data, padding, value, border) def tile_converter(data, repeats, **kwargs): """Tile converter""" if kwargs: __unexpected_attrs("tile", kwargs) return get_relay_op("tile")(data, repeats) # # Region-of-interest ops # # Matrix multiplication def matmul_converter(a, b, **kwargs): """Matmul converter real signature: matmul_converter(a, b, transposeA, transposeB)""" transpose_a = kwargs.pop("transposeA") transpose_b = kwargs.pop("transposeB") if kwargs: __unexpected_attrs("matmul", kwargs) a_shape = infer_shape(a) b_shape = infer_shape(b) a_rank = len(a_shape) b_rank = len(b_shape) if a_rank == 2 and b_rank == 2: out = get_relay_op("matmul")(a, b, transpose_a=transpose_a, transpose_b=transpose_b) else: batch_shape = [1] * (max(a_rank, b_rank) - 2) for i, j in enumerate(reversed(a_shape[:-2])): batch_shape[i] = j for i, j in enumerate(reversed(b_shape[:-2])): # Need to check if axis can be broadcasted if batch_shape[i] == 1 or j == 1 or batch_shape[i] == j: batch_shape[i] = max(batch_shape[i], j) else: msg = "Batch dimensions are not broadcastable." raise AssertionError(msg) batch_shape = batch_shape[::-1] a = tvm_op.broadcast_to(a, batch_shape + list(a_shape[-2:])) b = tvm_op.broadcast_to(b, batch_shape + list(b_shape[-2:])) out = get_relay_op("batch_matmul")( tvm_op.reshape(a, [-1, *a_shape[-2:]]), tvm_op.reshape(b, [-1, *b_shape[-2:]]), transpose_b=transpose_b, transpose_a=transpose_a, ) out_shape = batch_shape + [a_shape[-2]] + [b_shape[-1]] out = tvm_op.reshape(out, out_shape) return out # # Variable updates # # Compound ops def sigmoid_converter(data, **kwargs): """Sigmoid converter""" if kwargs: __unexpected_attrs("sigmoid", kwargs) return get_relay_op("sigmoid")(data) def relu_converter(data, **kwargs): """RELU converter""" if kwargs: __unexpected_attrs("relu", kwargs) return get_relay_op("relu")(data) def prelu_converter(data, alpha, **kwargs): """PRELU converter""" if kwargs: __unexpected_attrs("prelu", kwargs) # prelu can"t handle float vals but NNEF supports direct parameter, this is just in case if isinstance(alpha, tvm_expr.Constant): if alpha.data.numpy().size == 1: return get_relay_op("leaky_relu")(data, alpha.data.numpy().item()) return get_relay_op("prelu")(data, alpha) def leaky_relu_converter(data, alpha, **kwargs): """Leaky RELU converter""" if kwargs: __unexpected_attrs("leaky_relu", kwargs) return get_relay_op("leaky_relu")(data, alpha) def elu_converter(data, alpha, **kwargs): """ELU converter""" if kwargs: __unexpected_attrs("elu", kwargs) return select_converter( lt_converter(data, tvm_expr.const(0.0)), mul_converter( tvm_expr.const(alpha), sub_converter(exp_converter(data), tvm_expr.const(1.0)) ), data, ) def selu_converter(data, alpha, **kwargs): """SELU converter True signature is selu_converter(data, alpha, lambda)""" lambda_var = kwargs.pop("lambda") if kwargs: __unexpected_attrs("selu", kwargs) return mul_converter( tvm_expr.const(lambda_var), select_converter( data < tvm_expr.const(0.0), mul_converter( tvm_expr.const(alpha), sub_converter(exp_converter(data), tvm_expr.const(1.0)) ), data, ), ) def gelu_converter(data, **kwargs): """GELU converter NNEF definition for GELU: the exact definition of GELU is x * Phi(x) where Phi(x) is the CDF of the standard normal distribution, which can be approximated for example by sigmoid(1.702 * x) `mul_converter(data, sigmoid_converter(mul_converter(tvm_expr.const(1.702), data)))` But in this case we will use the erf to calculate normcdf (same as to pytorch GELU impl) """ if kwargs: __unexpected_attrs("gelu", kwargs) return data * ( tvm_expr.const(0.5) + tvm_op.erf(data * tvm_expr.const(0.5**0.5)) * tvm_expr.const(0.5) ) def silu_converter(data, **kwargs): """SiLU converter""" if kwargs: __unexpected_attrs("silu", kwargs) return mul_converter(data, sigmoid_converter(data)) def softmax_converter(data, axes, **kwargs): """Softmax converter""" if kwargs: __unexpected_attrs("softmax", kwargs) if len(axes) > 1: print("Multiple axes not supported, operation has been done along the first axis in axes.") axis = axes[0] return get_relay_op("softmax")(data, axis) def softplus_converter(data, **kwargs): """Softplus converter""" if kwargs: __unexpected_attrs("softplus", kwargs) return log_converter(add_converter(exp_converter(data), tvm_expr.const(1.0))) # # linear ops def linear_converter(data, _filter, bias, **kwargs): """Linear converter""" if kwargs: __unexpected_attrs("linear", kwargs) out = get_relay_op("matmul")(data, _filter, transpose_b=True) res = None if isinstance(bias, tvm_expr.Constant): if (bias.data.numpy() == 0).all(): res = out if not res: # squeeze needed because nnef has bias of shape [1, channel] res = tvm_op.nn.bias_add(out, relay.squeeze(bias, axis=0)) return res def separable_conv_converter( data, plane_filter, point_filter, bias, border, padding, stride, dilation, groups, **kwargs ): """Separable convolution converter""" if kwargs: __unexpected_attrs("separable_conv", kwargs) if isinstance(data, relay.Call): d_type = infer_type(data).checked_type.dtype else: d_type = data.type_annotation.dtype filtered = conv_converter( data, plane_filter, tvm_expr.const(0, dtype=d_type), border, stride, padding, dilation, 0 ) return conv_converter(filtered, point_filter, bias, "constant", [], [], [], groups) def separable_deconv_converter( data, plane_filter, point_filter, bias, border, padding, stride, dilation, output_shape, groups, **kwargs, ): """Separable deconvolution converter""" if kwargs: __unexpected_attrs("separable_deconv", kwargs) if isinstance(data, relay.Call): d_type = infer_type(data).checked_type.dtype else: d_type = data.type_annotation.dtype filtered = deconv_converter( data, point_filter, tvm_expr.const(0, dtype=d_type), "constant", [], [], [], [], groups ) return deconv_converter( filtered, plane_filter, bias, border, stride, padding, dilation, output_shape, 0 ) def max_pool_converter(data, size, border, padding, stride, dilation, **kwargs): """Max pool converter""" if kwargs: __unexpected_attrs("max_pool", kwargs) if border != "constant": print(f"Currently {border} border is not supported, used `constant` border") dshape = infer_shape(data) rank = len(dshape) pool_size = _size_conv(size, rank) strides = _stride_conv(stride, rank) if stride else (1,) * (rank - 2) dilation = dilation if dilation else ((1,) * (rank - 2)) if not padding: # padding is truncated to `conv style` (only active layers are present) padding = _calculate_nnef_padding(dshape[2:], strides, pool_size, dilation) pad = _padding_conv(padding, rank) if border == "constant": padding = [(0, 0), (0, 0)] + padding data = pad_converter(data, padding, border, tvm_expr.const(0.0)) pad = (0, 0) op = get_relay_op(dimension_picker("max_pool", dshape)) return op( data, pool_size=pool_size, strides=strides, dilation=dilation, padding=pad, ) def avg_pool_converter(data, size, border, padding, stride, dilation, **kwargs): """Avg pool converter""" if kwargs: __unexpected_attrs("avg_pool", kwargs) if border not in ["constant", "ignore"]: print(f"Currently {border} border is not supported, used `constant` border") dshape = infer_shape(data) rank = len(dshape) pool_size = _size_conv(size, rank) strides = _stride_conv(stride, rank) if stride else (1,) * (rank - 2) dilation = dilation if dilation else ((1,) * (rank - 2)) # padding is truncated to `conv style` (only active layers are present) active_shape = dshape[2:] if not padding: padding = _calculate_nnef_padding(active_shape, strides, pool_size, dilation) pad = _padding_conv(padding, rank) op = get_relay_op(dimension_picker("avg_pool", dshape)) return op( data, pool_size=pool_size, strides=strides, dilation=dilation, padding=pad, count_include_pad=border != "ignore", ) def rms_pool_converter(data, size, border, padding, stride, dilation, **kwargs): """Rms pool converter""" if kwargs: __unexpected_attrs("rms_pool", kwargs) return sqrt_converter( avg_pool_converter( sqr_converter(data), size=size, border=border, padding=padding, stride=stride, dilation=dilation, ) ) # # Normalization def local_response_normalization_converter(data, size, alpha, beta, bias): """LRN converter""" axis = [i for i in range(len(size)) if size[i] > 1] if len(axis) == 1: axis = axis[0] else: print("Multi axis LRN is not implemented properly, using first axis where size != 1") axis = axis[0] size = size[axis] return get_relay_op("lrn")(data, size, axis, bias, alpha, beta) def local_mean_normalization_converter(data, size, **kwargs): """LMN converter""" if kwargs: __unexpected_attrs("local_mean_normalization", kwargs) mean = box_converter(data, size, "constant", [], [], [], normalize=True) return sub_converter(data, mean) def local_variance_normalization_converter(data, size, bias, epsilon, **kwargs): """LVN converter""" if kwargs: __unexpected_attrs("local_variance_normalization", kwargs) sigma = box_converter(sqr_converter(data), size, "constant", [], [], [], normalize=True) return div_converter( data, max_converter( add_converter(sqrt_converter(sigma), tvm_expr.const(bias)), tvm_expr.const(epsilon) ), ) def local_contrast_normalization_converter(data, size, bias, epsilon, **kwargs): """LCN converter""" if kwargs: __unexpected_attrs("local_contrast_normalization", kwargs) centered = local_mean_normalization_converter(data, size) return local_variance_normalization_converter(centered, size, bias, epsilon) def l1_normalization_converter(data, axes, bias, epsilon, **kwargs): """L1 norm converter""" if kwargs: __unexpected_attrs("l1_normalization", kwargs) sigma = sum_reduce_converter(abs_converter(data), axes, False) return div_converter( data, max_converter(add_converter(sigma, tvm_expr.const(bias)), tvm_expr.const(epsilon)) ) def l2_normalization_converter(data, axes, bias, epsilon, **kwargs): """L2 norm converter""" if kwargs: __unexpected_attrs("l2_normalization", kwargs) epsilon = epsilon**2 if bias != 0.0: print("Bias is not supported, assumed 0.0.") # data = add_converter(data, tvm_expr.const(bias)) return get_relay_op("l2_normalize")(data, epsilon, axes) # ok ish def batch_normalization_converter(data, mean, variance, offset, scale, epsilon, **kwargs): """Batch norm converter""" if kwargs: __unexpected_attrs("batch_normalization", kwargs) mean = squeeze_converter(mean, 0) variance = squeeze_converter(variance, 0) offset = squeeze_converter(offset, 0) scale = squeeze_converter(scale, 0) return get_relay_op("batch_norm")(data, scale, offset, mean, variance, epsilon=epsilon)[0] # # Misc ops ================================================ FILE: nnef_tools-pyproject/nnef_tools/generate.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import numpy as np import nnef import sys import os def _is_lambda(value): LAMBDA = lambda: 0 return isinstance(value, type(LAMBDA)) and value.__name__ == LAMBDA.__name__ def _ensure_lambda(value): return value() if not _is_lambda(value) else value def uniform(min=0.0, max=1.0): return lambda shape: np.random.uniform(min, max, shape).astype(np.float32) def normal(mean=0.0, std=1.0): return lambda shape: np.random.normal(mean, std, shape).astype(np.float32) def bernoulli(prob=0.5): return lambda shape: np.random.uniform(0.0, 1.0, shape) > prob def integers(min=0, max=100): return lambda shape: np.random.randint(min, max, shape).astype(np.int32) def main(args): if args.seed is not None: np.random.seed(args.seed) distributions = { 'scalar': uniform(0.0, 1.0), 'integer': integers(0, 100), 'logical': bernoulli(0.5), } try: random = eval(args.random) if isinstance(random, dict): distributions.update({key: _ensure_lambda(value) for key, value in random.items()}) else: random = _ensure_lambda(random) if args.random.startswith('integers'): distributions['integer'] = random elif args.random.startswith('bernoulli'): distributions['logical'] = random else: distributions['scalar'] = random except Exception as e: print("Could not evaluate distribution: " + str(e), file=sys.stderr) return -1 graph = nnef.parse_file(os.path.join(args.model, 'graph.nnef')) for op in graph.operations: if args.weights and op.name == 'variable': label = op.attribs['label'] shape = op.attribs['shape'] data = distributions[op.dtype](shape) filename = os.path.join(args.model, label + '.dat') os.makedirs(os.path.split(filename)[0], exist_ok=True) with open(filename, 'wb') as file: nnef.write_tensor(file, data) if args.verbose: print("Generated weight '{}'".format(filename)) if args.inputs and op.name == 'external': name = op.outputs['output'] shape = op.attribs['shape'] data = distributions[op.dtype](shape) filename = os.path.join(args.model, args.inputs, name + '.dat') os.makedirs(os.path.split(filename)[0], exist_ok=True) with open(filename, 'wb') as file: nnef.write_tensor(file, data) if args.verbose: print("Generated input '{}'".format(filename)) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('model', type=str, help='The model to generate') parser.add_argument('--random', type=str, required=True, help='Random distribution for input generation, possibly per dtype') parser.add_argument('--seed', type=int, default=None, help='Random seed for input generation') parser.add_argument('--weights', action='store_true', help='Generate weights') parser.add_argument('--inputs', type=str, nargs='?', default=None, const='.', help='Generate inputs') parser.add_argument('--verbose', action='store_true', help='Weather to print generated file names') exit(main(parser.parse_args())) ================================================ FILE: nnef_tools-pyproject/nnef_tools/gmac.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from functools import reduce from .io.nnef import Reader def _volume(shape): return reduce(lambda x, y: x * y, shape, 1) def _count_macs(op, include_pooling, include_upsampling, include_normalization, include_reduction): if len(op.inputs) == 0 or len(op.outputs) == 0: return 0 input_volume = _volume(op.inputs[0].shape) output_volume = _volume(op.outputs[0].shape) if op.type in ['conv', 'deconv']: volume = input_volume if op.type == 'deconv' else output_volume filter_shape = op.inputs[1].shape return volume * _volume(filter_shape[1:]) elif op.type in ['separable_conv', 'separable_deconv']: volume = input_volume if op.type == 'separable_deconv' else output_volume filter_shape = op.inputs[1].shape inter_channels = filter_shape[0] inter_volume = output_volume / op.outputs[0].shape[1] * inter_channels if op.type == 'separable_deconv' else \ input_volume / op.inputs[0].shape[1] * inter_channels return inter_volume * _volume(filter_shape[2:]) + volume * inter_channels elif op.type == 'linear': filter_shape = op.inputs[1].shape return output_volume * filter_shape[-1] elif op.type == 'matmul': filter_shape = op.inputs[1].shape return output_volume * (filter_shape[-1] if op.attribs['transposeB'] else filter_shape[-2]) elif op.type in ['max_pool', 'avg_pool', 'rms_pool', 'max_pool_with_index', 'box', 'debox'] and include_pooling: volume = input_volume if op.type == 'debox' else output_volume kernel_size = op.attribs['size'] return volume * _volume(kernel_size) elif op.type == 'multilinear_upsample' and include_upsampling: factor = op.attribs['factor'] method = op.attribs['method'] if method == 'symmetric': kernel_size = [2 * f for f in factor] elif method == 'asymmetric': kernel_size = [2 * f - 1 for f in factor] else: kernel_size = factor return input_volume * _volume(kernel_size) elif op.type in ['local_response_normalization', 'local_mean_normalization', 'local_variance_normalization', 'local_contrast_normalization'] and include_normalization: kernel_size = op.attribs['size'] return output_volume * _volume(kernel_size) elif op.type in ['l1_normalization', 'l2_normalization', 'batch_normalization'] and include_normalization: return output_volume elif op.type in ['sum_reduce', 'max_reduce', 'min_reduce', 'mean_reduce', 'all_reduce', 'any_reduce'] and include_reduction: return input_volume else: return 0 def get_custom_shapes(module_names): import importlib CUSTOM_SHAPES = "CUSTOM_SHAPES" shapes = {} for module_name in module_names: module = importlib.import_module(module_name) if hasattr(module, CUSTOM_SHAPES): shapes.update(getattr(module, CUSTOM_SHAPES)) return shapes def main(args): custom_shapes = get_custom_shapes(args.custom_shapes) if args.custom_shapes is not None else None reader = Reader(infer_shapes=True, custom_shapes=custom_shapes) graph = reader(args.model) macs = 0 for op in graph.operations: macs += _count_macs(op, args.include_pooling, args.include_upsampling, args.include_normalization, args.include_reduction) volume = 0 for tensor in graph.tensors: volume += _volume(tensor.shape) gmacs = macs / 1000 / 1000 / 1000 mbytes = volume * 4 / 1000 / 1000 print('GMACs = {}'.format(gmacs)) print('Total memory in Mbytes (supposing float32) = {}'.format(mbytes)) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('model', type=str, help='The model to visualize') parser.add_argument('--include-pooling', action='store_true', help='Whether to include pooling operations in the calculation') parser.add_argument('--include-upsampling', action='store_true', help='Whether to include (linear) upsampling operations in the calculation') parser.add_argument('--include-normalization', action='store_true', help='Whether to include normalization operations in the calculation') parser.add_argument('--include-reduction', action='store_true', help='Whether to include reduction operations in the calculation') parser.add_argument('--custom-shapes', type=str, nargs='+', help='Module(s) containing custom shape inference code (when converting to NNEF)') exit(main(parser.parse_args())) ================================================ FILE: nnef_tools-pyproject/nnef_tools/image_tensor.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .utils import stdio import numpy as np import argparse import nnef import sys import skimage import skimage.io import skimage.color import skimage.transform import glob import os def transform_image(img, color, range, mean, std, size, dtype, data_format): img = img.astype(np.float32) / 255.0 if color.upper() == 'RGB': img = img[..., (0, 1, 2)] # remove alpha channel if present else: img = img[..., (2, 1, 0)] if range is not None: min = np.array(range[0], dtype=np.float32) max = np.array(range[1], dtype=np.float32) img *= max - min img += min if mean is not None: mean = np.array(mean, dtype=np.float32) img -= mean if std is not None: std = np.array(std, dtype=np.float32) img /= std if size is not None: img = skimage.transform.resize(img, size, preserve_range=True, anti_aliasing=True, mode='reflect') if dtype is not None: img = img.astype(dtype) if data_format.upper() == 'NCHW': img = img.transpose((2, 0, 1)) return img def main(args): if args.output is None: if not stdio.is_stdout_piped(): print("Output must be piped", file=sys.stderr) return -1 stdio.set_stdout_to_binary() images = [] for pattern in args.images: filenames = sorted(glob.glob(os.path.expanduser(pattern))) assert filenames, "No files found for path: {}".format(pattern) for filename in filenames: img = skimage.img_as_ubyte(skimage.io.imread(filename)) if len(img.shape) == 2: img = skimage.color.gray2rgb(img) img = transform_image(img, args.color, args.range, args.mean, args.std, args.size, np.dtype(args.dtype), args.format) images.append(img) if not all(img.shape == images[0].shape for img in images): print("The size of all images must be the same, or --size must be specified", file=sys.stderr) return -1 tensor = np.stack(images) if args.output is not None: with open(args.output, 'wb') as file: nnef.write_tensor(file, tensor) else: nnef.write_tensor(sys.stdout, tensor) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('images', type=str, nargs='+', help='The path(s) of images to turn into a tensor; may include wildcard expressions') parser.add_argument('--size', type=int, nargs=2, default=None, help='The spatial size of the resulting tensor') parser.add_argument('--dtype', type=str, default='float32', help='The data-type of the resulting tensor') parser.add_argument("--color", type=str.upper, choices=['RGB', 'BGR'], default='RGB', help="The resulting color-format") parser.add_argument("--format", type=str.upper, choices=['NCHW', 'NHWC'], default='NCHW', help="The resulting data-format") parser.add_argument("--range", type=float, nargs=2, default=[0, 1], help="Resulting range for representing the image") parser.add_argument("--mean", type=float, nargs='+', default=None, help="Mean to subtract from the image; may be per-channel") parser.add_argument("--std", type=float, nargs='+', default=None, help="Standard deviation to divide the image with; may be per-channel") parser.add_argument('--output', type=str, default=None, help='File name to save the result into') exit(main(parser.parse_args())) ================================================ FILE: nnef_tools-pyproject/nnef_tools/interpreter/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math class Statistics: def __init__(self, num, min, max, sum, ssum): self.num = num self.min = min self.max = max self.sum = sum self.ssum = ssum def __add__(self, other): self.num += other.num self.min = min(self.min, other.min) self.max = max(self.max, other.max) self.sum += other.sum self.ssum += other.ssum return self def mean(self): return self.sum / self.num if self.num != 0 else 0.0 def variance(self, unbiased=True): if self.num <= 1: return 0.0 count = self.num - 1 if unbiased else self.num return self.ssum / count - self.sum * self.sum / (self.num * count) def std(self, unbiased=True): return math.sqrt(max(self.variance(unbiased), 0)) ================================================ FILE: nnef_tools-pyproject/nnef_tools/interpreter/pytorch/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import import torch import nnef import os from .nnef_module import NNEFModule from .. import Statistics class Interpreter: def __init__(self, model, device=None, decomposed=None, custom_operators=None): if isinstance(model, nnef.Graph): self._nnef_graph = model else: self._nnef_graph = nnef.parse_file(os.path.join(model, 'graph.nnef'), lowered=decomposed) self._init_input_shapes(self._nnef_graph) self._nnef_module = NNEFModule(model=model, custom_operators=custom_operators, decomposed=decomposed) if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' self._nnef_module.to(device) self._nnef_module.eval() self._device = device def __call__(self, inputs, output_names=None, collect_statistics=False): outputs = {} statistics = {} if collect_statistics else None def callback(name, tensor): if output_names is not None and name in output_names: outputs[name] = tensor.detach().cpu().numpy() if collect_statistics: statistics[name] = self._compute_statistics(tensor) if output_names is not None: assert all(name in self._nnef_graph.tensors for name in output_names), \ "could not find tensor(s) named {}".format({name for name in output_names if name not in self._nnef_graph.tensors}) if output_names is not None or collect_statistics: self._nnef_module.activation_callback = callback torch_inputs = [torch.tensor(input).to(self._device) for input in inputs] with torch.no_grad(): # Without this, gradients are calculated even in eval mode torch_outputs = self._nnef_module.forward(*torch_inputs) self._nnef_module.activation_callback = None if output_names is None: outputs = {name: torch_tensor.detach().cpu().numpy() for name, torch_tensor in zip(self._nnef_graph.outputs, torch_outputs)} return (outputs, statistics) if collect_statistics else outputs def input_details(self): return [self._nnef_graph.tensors[name] for name in self._nnef_graph.inputs] def output_details(self): return [self._nnef_graph.tensors[name] for name in self._nnef_graph.outputs] def tensor_details(self): return self._nnef_graph.tensors.values() @staticmethod def _compute_statistics(torch_tensor): num = torch_tensor.numel() if num == 0: return Statistics(num=0, min=0.0, max=0.0, sum=0.0, ssum=0.0) else: return Statistics( num=num, min=float(torch.min(torch_tensor)), max=float(torch.max(torch_tensor)), sum=float(torch.sum(torch_tensor)), ssum=float(torch.sum(torch_tensor * torch_tensor)), ) @staticmethod def _init_input_shapes(graph): from nnef.shapes import _set_shape for op in graph.operations: if op.name == 'external': _set_shape(graph, op.outputs['output'], op.attribs['shape']) ================================================ FILE: nnef_tools-pyproject/nnef_tools/interpreter/pytorch/nnef_module.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import import nnef import torch import keyword from . import nnef_operators from ...io import nnef as nnef_io from ...io.nnef.reader import _build_graph from ...model.graph import * class NNEFModule(torch.nn.Module): """ A torch.nn.Module that interprets the given NNEF model """ def __init__(self, model, # type: str decomposed=None, # type: typing.Optional[typing.List[str]] custom_operators=None, # type: typing.Optional[typing.Dict[str, typing.Callable]] activation_callback=None, # type: typing.Optional[typing.Callable[[str, torch.Tensor], None]] training_attributes=None, # type: typing.Optional[typing.Dict[str, typing.Dict[str, typing.Any]]] ): # type: (...)->None """ nnef_graph might be modified by this class if training and write_nnef is used """ super(NNEFModule, self).__init__() if isinstance(model, nnef.Graph): self._nnef_graph = _build_graph(model) else: reader = nnef_io.Reader(decomposed=decomposed, infer_shapes=False) self._nnef_graph = reader(model) self._name_inline_constants(self._nnef_graph) for nnef_tensor in self._nnef_graph.tensors: if self._is_variable(nnef_tensor): name = self._registered_name(nnef_tensor.name) data = self._dequantize(nnef_tensor.data, nnef_tensor.quant, channel_axis=0) \ if nnef_tensor.quant else nnef_tensor.data data = self.normalize_dtype(data) self.register_parameter(name, torch.nn.Parameter(torch.tensor(data), requires_grad=data.dtype == np.float32)) elif self._is_constant(nnef_tensor): name = self._registered_name(nnef_tensor.name) data = nnef_tensor.data if not nnef_tensor.producer else \ self._as_numpy(nnef_tensor.producer.attribs['value'], nnef_tensor.producer.attribs['shape'], nnef_tensor.producer.attribs['dtype']) data = self.normalize_dtype(data) self.register_buffer(name, torch.tensor(data)) self._operators = {} self._operators.update(nnef_operators.Operators) if custom_operators: self._operators.update(custom_operators) self._activation_callback = activation_callback self._training_attributes = training_attributes or {} def forward(self, *inputs): assert len(inputs) == len(self._nnef_graph.inputs) activations = {nnef_tensor.name: torch_tensor for torch_tensor, nnef_tensor in zip(inputs, self._nnef_graph.inputs)} def get_tensor(name): if hasattr(self, self._registered_name(name)): return getattr(self, self._registered_name(name)) else: return activations[name] def has_tensor(name): return hasattr(self, self._registered_name(name)) or name in activations for op in self._nnef_graph.operations: if op.type == 'external' or op.type == 'variable' or op.type == 'constant': output = get_tensor(op.output.name) if self._activation_callback: self._activation_callback(op.output.name, output) else: assert op.type in self._operators, "Unsupported operation: {}".format(op.type) func = self._operators[op.type] assert all(has_tensor(tensor.name) for tensor in op.inputs),\ "could not fetch input tensor(s) {} for operation {}"\ .format({tensor.name for tensor in op.inputs}, op.type) training_attribs = self._training_attributes.get(op.type, {}) attribs = {**op.attribs, **training_attribs} attribs = {self._escape_keyword(name): value for name, value in six.iteritems(attribs)} if 'dtype' in attribs and op.type != 'constant' and op.type != 'cast': del attribs['dtype'] inputs = [get_tensor(tensor.name) if tensor.name else torch.tensor(tensor.data) for tensor in op.inputs] outputs = func(*inputs, **attribs) if isinstance(op.inputs, tuple) else func(inputs, **attribs) if not isinstance(outputs, (list, tuple)): outputs = (outputs,) for nnef_tensor, output in zip(op.outputs, outputs): if nnef_tensor.quant and not self._is_variable(nnef_tensor): output = self._fake_quantize(output, nnef_tensor.quant, channel_axis=0) activations[nnef_tensor.name] = output if self._activation_callback: self._activation_callback(nnef_tensor.name, output) for nnef_tensor in op.inputs: if nnef_tensor.name in activations and op is nnef_tensor.consumers[-1] and \ nnef_tensor not in self._nnef_graph.outputs: del activations[nnef_tensor.name] return tuple(get_tensor(nnef_tensor.name) for nnef_tensor in self._nnef_graph.outputs) def save_nnef(self, path): for nnef_tensor in self._nnef_graph.tensors: if self._is_variable(nnef_tensor.name): torch_tensor = getattr(self, self._registered_name(nnef_tensor.name)) nnef_tensor.data = torch_tensor.detach().cpu().numpy().astype(nnef_tensor.dtype) writer = nnef_io.Writer() writer(self._nnef_graph, path) @property def activation_callback(self): return self._activation_callback @activation_callback.setter def activation_callback(self, callback): self._activation_callback = callback @staticmethod def _is_variable(tensor): return tensor.producer and tensor.producer.type == 'variable' @staticmethod def _is_constant(tensor): return not tensor.producer or tensor.producer.type == 'constant' @staticmethod def _as_numpy(value, shape, dtype): if isinstance(value, list): if len(value) == 1 and int(np.prod(shape)) != 1: return np.full(shape, value[0], dtype=dtype) else: return np.array(value, dtype=dtype).reshape(shape) else: return np.full(shape, value, dtype=dtype) @staticmethod def _escape_keyword(name): return name if not keyword.iskeyword(name) else '_' + name + '_' @staticmethod def _name_inline_constants(graph): constants = 0 for tensor in graph.tensors: if not tensor.name: assert not tensor.producer tensor.name = '$' + str(constants) constants += 1 @staticmethod def _registered_name(name): return '_nnef_' + name @staticmethod def normalize_dtype(data): dtype = NNEFModule._dtypeRemap.get(data.dtype.type) return data.astype(dtype) if dtype is not None else data @staticmethod def _dequantize(data, quant, channel_axis): op_name = quant['op-name'] rank = len(data.shape) if op_name == 'zero_point_linear_quantize': return NNEFModule._dequantize_zero_point(data, NNEFModule._ensure_rank(quant['zero_point'], rank, channel_axis), NNEFModule._ensure_rank(quant['scale'], rank, channel_axis)) elif op_name == 'min_max_linear_quantize' or op_name == 'linear_quantize': return NNEFModule._dequantize_min_max(data, NNEFModule._ensure_rank(quant['min'], rank, channel_axis), NNEFModule._ensure_rank(quant['max'], rank, channel_axis), quant['signed'], quant['symmetric'], quant['bits']) else: raise ValueError("Quantization operation '{}' not implemented".format(op_name)) @staticmethod def _dequantize_zero_point(data, zero_point, scale): return (data - zero_point) * scale @staticmethod def _dequantize_min_max(data, min, max, signed, symmetric, bits): if signed: data += 2 ** (bits - 1) - int(symmetric) r = 2 ** bits - 1 - int(signed and symmetric) return data * ((max - min) / r) + min def _fake_quantize(self, tensor, quant, channel_axis): op_type = quant['op-name'] rank = len(tensor.shape) attribs = {key: NNEFModule._ensure_rank(value, rank, channel_axis) if isinstance(value, np.ndarray) else value for key, value in six.iteritems(quant) if key != 'op-name'} assert op_type in self._operators, "Unsupported quantization operation: {}".format(op_type) func = self._operators[op_type] return func(tensor, **attribs) @staticmethod def _ensure_rank(value, rank, offset=0): array = np.array(value) return np.reshape(array, newshape=(1,) * offset + array.shape + (1,) * (rank - offset - len(array.shape))) _dtypeRemap = { np.float16: np.float32, np.float64: np.float32, np.int8: np.int64, np.uint8: np.int64, np.int16: np.int64, np.uint16: np.int64, np.int32: np.int64, np.uint32: np.int64, np.uint64: np.int64, } ================================================ FILE: nnef_tools-pyproject/nnef_tools/interpreter/pytorch/nnef_operators.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from typing import Optional, List, Tuple, Callable, Any from functools import reduce import numpy as np import functools import torch import torch.nn.functional as F import nnef import math # Helpers _numpy_dtype_to_torch = { np.int8: torch.int8, np.int16: torch.int16, np.int32: torch.int32, np.int64: torch.int64, np.uint8: torch.uint8, np.double: torch.double, np.float16: torch.float16, np.float32: torch.float32, np.float64: torch.float64, np.short: torch.short, np.longlong: torch.long, int: torch.int, bool: torch.bool, float: torch.float, } def _clamp(x, a, b): return max(a, min(b, x)) def _expand_to_rank(input, rank): # type: (torch.Tensor, int)->torch.Tensor rank_diff = rank - len(input.shape) return input.reshape(tuple(input.shape) + rank_diff * (1,)) def _expand_binary(input1, input2): # type: (torch.Tensor, torch.Tensor)->Tuple[torch.Tensor, torch.Tensor] rank = max(len(input1.shape), len(input2.shape)) return _expand_to_rank(input1, rank), _expand_to_rank(input2, rank) def _binary(f): def g(x, y): x, y = _expand_binary(x, y) return f(x, y) return g def _prod(items): return functools.reduce(lambda x, y: x * y, items, 1) def _same_padding(input, filter, stride, dilation): assert len(input) == len(filter) == len(stride) == len(dilation) output = [(ui + (s - 1)) // s for ui, s in zip(input, stride)] dilated = [(f - 1) * d + 1 for f, d in zip(filter, dilation)] total = [max(0, (di - 1) * s + df - ui) for di, s, df, ui in zip(output, stride, dilated, input)] return [(pad // 2, (pad + 1) // 2) for pad in total] def _inverse_permutation(perm): inverse = [0] * len(perm) for i, p in enumerate(perm): inverse[p] = i return inverse def _apply_permutation(items, perm): return [items[ind] for ind in perm] # Operations def _positive_pad(input, padding, border='constant', value=0.0): # type: (torch.Tensor, List[Tuple[int, int]], str, float)->torch.Tensor assert all(p >= 0 and q >= 0 for p, q in padding), "Negative padding is not supported " assert padding assert len(input.shape) in (3, 4, 5) assert padding[:2] == [(0, 0), (0, 0)] or (padding[0] == (0, 0) and padding[-1] == (0, 0)) assert border in ("constant", "reflect", "replicate") rank = len(input.shape) needs_transpose = padding[0] == (0, 0) and padding[1] != (0, 0) and padding[-1] == (0, 0) if needs_transpose: padding = padding[1:-1] input = input.permute([0, rank - 1] + list(range(1, rank - 1))) else: padding = padding[2:] pad = [] for p, q in reversed(padding): pad += [p, q] padded = F.pad(input=input, pad=pad, mode=border, value=value) if not all(p == 0 for p in pad) else input if needs_transpose: padded = padded.permute([0] + list(range(2, rank)) + [1]) return padded def nnef_pad(input, padding, border='constant', value=0.0): # type: (torch.Tensor, List[Tuple[int, int]], str, float)->torch.Tensor assert padding, \ "nnef.pad does not support empty list as padding" assert len(input.shape) in (3, 4, 5), \ "nnef.pad is only implemented for 3D, 4D, 5D tensors; got: {}D.".format(len(input.shape)) assert padding[:2] == [(0, 0), (0, 0)] or (padding[0] == (0, 0) and padding[-1] == (0, 0)), \ "nnef.pad is not implemented in N, C dimensions; got: {}.".format(padding) if all(p <= 1 and q <= 1 for p, q in padding) and border == "reflect-even": border = "replicate" assert border in ("constant", "reflect", "replicate"), \ "nnef.pad is only implemented with 'constant', 'reflect' and 'replicate' border; got: {}.".format(border) input = _positive_pad(input, padding=[(p if p > 0 else 0, q if q > 0 else 0) for p, q in padding], border=border, value=value) return nnef_slice(input, axes=list(range(len(input.shape))), begin=[-p if p < 0 else 0 for p, _q in padding], end=[q if q < 0 else 0 for _p, q in padding]) nnef_add = _binary(lambda x, y: x + y) def nnef_add_n(values): return nnef_add(values[0], nnef_add_n(values[1:])) if len(values) > 1 else values[0] def nnef_conv(input, # type: torch.Tensor filter, # type: torch.Tensor bias, # type: torch.Tensor border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] groups=1, # type: int ): # type: (...)->torch.Tensor assert len(input.shape) in (3, 4, 5), "nnef.conv is only implemented for 3D, 4D, 5D tensors, given: {}D.".format(len(input.shape)) bias = bias.reshape(1, 1).expand((1, filter.shape[0])) if _prod(bias.size()) == 1 else bias spatial_dims = len(input.shape[2:]) groups = input.shape[1] if groups == 0 else groups stride = [1] * spatial_dims if not stride else stride dilation = [1] * spatial_dims if not dilation else dilation if not padding: padding = _same_padding(input=input.shape[2:], filter=filter.shape[2:], stride=stride, dilation=dilation) pad = nnef_pad(input=input, padding=[(0, 0)] * 2 + padding, border=border) conv = {1: F.conv1d, 2: F.conv2d, 3: F.conv3d}[spatial_dims](input=pad, weight=filter, bias=bias.squeeze(dim=0).contiguous(), stride=tuple(stride), padding=0, dilation=tuple(dilation), groups=groups) return conv def nnef_deconv(input, # type: torch.Tensor filter, # type: torch.Tensor bias, # type: torch.Tensor border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] output_shape=None, # type: Optional[List[int]] groups=1, # type: int ): # type: (...)->torch.Tensor assert border == 'constant' or border == 'replicate', "nnef.deconv: '{}' border unsupported.".format(border) if output_shape and output_shape[0] != input.shape[0]: output_shape = list(output_shape) output_shape[0] = input.shape[0] rank = len(input.shape) assert rank in (3, 4, 5), "nnef.deconv is only implemented for 3D, 4D, 5D tensors, given: {}D.".format(len(input.shape)) spatial_dims = len(input.shape[2:]) stride = [1] * spatial_dims if not stride else stride dilation = [1] * spatial_dims if not dilation else dilation if groups == 0: if output_shape: groups = output_shape[1] else: # Planewise deconvolution without output_size, assuming that #(input channels) = #(output channels) groups = filter.shape[0] output_channels = filter.shape[1] * groups if output_shape: assert output_shape[1] == output_channels else: output_shape = nnef.shapes.deconv_shape(input=list(input.shape), filter=filter.shape, bias=bias.shape, border=border, padding=padding, stride=stride, dilation=dilation, groups=groups, output_shape=None) if not padding: padding = _same_padding(input=output_shape[2:], filter=filter.shape[2:], stride=stride, dilation=dilation) if border == 'replicate': input = F.pad(input=input, pad=(1,) * 2 * spatial_dims, mode='replicate') padding = [(p + s, q + s) for (p, q), s in zip(padding, stride)] uncropped_output_shape = nnef.shapes.deconv_shape(input=list(input.shape), filter=filter.shape, bias=bias.shape, border=border, padding=[(0, 0)] * (rank - 2), stride=stride, dilation=dilation, groups=groups, output_shape=None) crop_before = [p for p, _q in padding] crop_after = [uncropped - out - before for uncropped, out, before in zip(uncropped_output_shape[2:], output_shape[2:], crop_before)] bias = bias.reshape(1, 1).expand((1, output_channels)) if _prod(bias.size()) == 1 else bias deconv = {1: F.conv_transpose1d, 2: F.conv_transpose2d, 3: F.conv_transpose3d}[spatial_dims](input=input, weight=filter, bias=bias.squeeze(dim=0).contiguous(), stride=tuple(stride), padding=0, output_padding=0, groups=groups, dilation=tuple(dilation)) return nnef_pad(deconv, padding=[(0, 0), (0, 0)] + [(-cb, -ca) for cb, ca in zip(crop_before, crop_after)]) def _evaluate_max_pool_or_box_params(input_shape, size, padding, stride, dilation): rank = len(input_shape) stride = [1] * rank if not stride else stride dilation = [1] * rank if not dilation else dilation padding = _same_padding(input=input_shape, filter=size, stride=stride, dilation=dilation) if not padding else padding return padding, stride, dilation def _max_pool_impl(input, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] with_index=False, # type: bool ): # type: (...)->torch.Tensor spatial_dims = len(input.shape) - 2 value = float('-inf') if border == 'ignore' else 0.0 border = 'constant' if border == 'ignore' else border pad = nnef_pad(input=input, padding=padding, border=border, value=value) result = {1: F.max_pool1d, 2: F.max_pool2d, 3: F.max_pool3d}[spatial_dims](input=pad, kernel_size=size[2:], stride=stride[2:], padding=0, dilation=dilation[2:], return_indices=with_index) return result def _box_impl(input, # type: torch.Tensor size, # type: List[int] border, # type: str padding, # type: List[Tuple[int, int]] stride, # type: List[int] dilation, # type: List[int] normalize, # type: bool ): # type: (...)->torch.Tensor assert 3 <= len(input.shape) <= 5 assert len(input.shape) == len(size) == len(padding) == len(stride) == len(dilation) assert padding[:2] == [(0, 0), (0, 0)] assert size[:2] == stride[:2] == dilation[:2] assert not dilation or all(d == 1 for d in dilation), \ "nnef.box (avg or sum pooling) is only implemented for dilation = 1." spatial_dims = len(input.shape) - 2 pad = nnef_pad(input=input, padding=padding, border='constant' if border == 'ignore' else border) avg_pool = {1: F.avg_pool1d, 2: F.avg_pool2d, 3: F.avg_pool3d}[spatial_dims]( input=pad, kernel_size=size[2:], stride=stride[2:], padding=0) if border == 'ignore' and normalize: ones = torch.ones_like(input) padded_ones = nnef_pad(input=ones, padding=padding, border='constant') avg_pool_ones = {1: F.avg_pool1d, 2: F.avg_pool2d, 3: F.avg_pool3d}[spatial_dims]( input=padded_ones, kernel_size=size[2:], stride=stride[2:], padding=0) # If padding is big, zero averages can happen on the border, don't divide by zero avg_pool_ones = nnef_select(avg_pool_ones > 0, avg_pool_ones, torch.ones_like(avg_pool_ones)) avg_pool /= avg_pool_ones if normalize: return avg_pool else: return avg_pool * _prod(size) def _get_transform_for_box_or_max_pool(input_shape, active): # type: (List[int], List[bool])->Any assert len(input_shape) >= 3 assert len(input_shape) == len(active) assert sum(active) <= 3, \ "Sliding window operations are not supported if they have more than 3 'active' dimensions; got {}".format(sum(active)) if 3 <= len(input_shape) <= 5 and not active[0] and not active[1]: # Direct support return None, None, None, None else: inactive_dims = [i for i, a in enumerate(active) if not a] active_dims = [i for i, a in enumerate(active) if a] inactive_shape = [s for i, s in enumerate(input_shape) if i not in active_dims] active_shape = [s for i, s in enumerate(input_shape) if i in active_dims] perm = inactive_dims + active_dims perm_inv = _inverse_permutation(perm) return perm, perm_inv, inactive_shape, active_shape def _box_or_max_pool(input, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]], stride=None, # type: Optional[List[int]], dilation=None, # type: Optional[List[int]] normalize=False, # type: bool is_max_pool=False, # type: bool ): assert not (normalize and is_max_pool) rank = len(input.shape) padding, stride, dilation = _evaluate_max_pool_or_box_params(input_shape=list(input.shape), size=size, padding=padding, stride=stride, dilation=dilation) active = [size_ != 1 or padding_ != (0, 0) or stride_ != 1 or dilation_ != 1 for size_, padding_, stride_, dilation_ in zip(size, padding, stride, dilation)] if sum(active) == 0: return input if rank < 3: perm, perm_inv, inactive_shape, active_shape = None, None, None, None else: perm, perm_inv, inactive_shape, active_shape = _get_transform_for_box_or_max_pool(list(input.shape), active) if rank < 3: input = input.unsqueeze(0).unsqueeze(0) size = [1, 1] + size padding = [(0, 0), (0, 0)] + padding stride = [1, 1] + stride dilation = [1, 1] + dilation elif perm is not None: input = input.permute(*perm) size = _apply_permutation(size, perm) padding = _apply_permutation(padding, perm) stride = _apply_permutation(stride, perm) dilation = _apply_permutation(dilation, perm) active_rank = len(active_shape) input = input.reshape(*[_prod(inactive_shape), 1] + active_shape) size = [1, 1] + size[-active_rank:] padding = [(0, 0), (0, 0)] + padding[-active_rank:] stride = [1, 1] + stride[-active_rank:] dilation = [1, 1] + dilation[-active_rank:] if is_max_pool: output = _max_pool_impl( input=input, size=size, border=border, padding=padding, stride=stride, dilation=dilation, with_index=False) else: output = _box_impl(input=input, size=size, border=border, padding=padding, stride=stride, dilation=dilation, normalize=normalize) if rank < 3: output = output.squeeze(0).squeeze(0) elif perm is not None: active_rank = len(active_shape) output = output.reshape(inactive_shape + list(output.shape)[-active_rank:]) output = output.permute(*perm_inv) return output def nnef_max_pool(input, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] ): # type: (...)->torch.Tensor return _box_or_max_pool( input, size=size, border=border, padding=padding, stride=stride, dilation=dilation, is_max_pool=True) def nnef_max_pool_with_index(input, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] ): # type: (...)->torch.Tensor input_shape = list(input.shape) padding, stride, dilation = _evaluate_max_pool_or_box_params(input_shape=input_shape, size=size, padding=padding, stride=stride, dilation=dilation) assert len(input_shape) in (3, 4, 5), \ "nnef.max_pool_with_index is only implemented for 3D, 4D, 5D tensors, given: {}D".format(len(input_shape)) assert size[:2] == [1, 1], \ "nnef.max_pool_with_index is only implemented for size = 1 in N and C dimensions" assert padding[:2] == [(0, 0), (0, 0)],\ "nnef.max_pool_with_index is only implemented for padding = (0, 0) in N and C dimensions." assert stride[:2] == [1, 1], \ "nnef.max_pool_with_index is only implemented for stride = 1 in N and C dimensions" assert dilation[:2] == [1, 1], \ "nnef.max_pool_with_index is only implemented for dilation = 1 in N and C dimensions" return _max_pool_impl(input, size=size, border=border, padding=padding, stride=stride, dilation=dilation, with_index=True) def nnef_argmax_pool(input, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] ): # type: (...)->torch.Tensor _, index = nnef_max_pool_with_index( input, size=size, border=border, padding=padding, stride=stride, dilation=dilation) return index def nnef_box(input, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] normalize=False, # type: bool ): # type: (...)->torch.Tensor return _box_or_max_pool( input, size=size, border=border, padding=padding, stride=stride, dilation=dilation, normalize=normalize) def nnef_debox(input, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] output_shape=None, # type: Optional[List[int]] normalize=False, # type: bool ): assert border in ('constant', 'ignore'), \ "nnef.debox: '{}' border unsupported".format(border) assert len(size) in (3, 4, 5), \ "nnef.debox is only implemented for 3D, 4D, 5D tensors, given: {}D".format(len(size)) assert size[:2] == [1, 1], \ "nnef.debox is only implemented for size = 1 in N and C dimensions" assert not padding or padding[:2] == [(0, 0), (0, 0)], \ "nnef.debox is only implemented for padding = (0, 0) in N and C dimensions" assert not stride or stride[:2] == [1, 1], \ "nnef.debox is only implemented for stride = 1 in N and C dimensions" assert not dilation or dilation[:2] == [1, 1], \ "nnef.debox is only implemented for dilation = 1 in N and C dimensions" filter = torch.full(size=[input.shape[1], 1] + list(size)[2:], fill_value=(1.0 / _prod(size) if normalize else 1.0), device=input.device, dtype=input.dtype) bias = torch.zeros(size=tuple(), device=input.device, dtype=input.dtype) return nnef_deconv(input=input, filter=filter, bias=bias, border='constant', padding=padding[2:] if padding else padding, stride=stride[2:] if stride else stride, dilation=dilation[2:] if dilation else dilation, output_shape=output_shape, groups=input.shape[1]) def nnef_avg_pool(input, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]], stride=None, # type: Optional[List[int]], dilation=None, # type: Optional[List[int]] ): # type: (...)->torch.Tensor return nnef_box(input, size=size, border=border, padding=padding, stride=stride, dilation=dilation, normalize=True) def nnef_rms_pool(input, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]], stride=None, # type: Optional[List[int]], dilation=None, # type: Optional[List[int]] ): # type: (...)->torch.Tensor return torch.sqrt(nnef_avg_pool(torch.pow(input, 2.0), size=size, border=border, padding=padding, stride=stride, dilation=dilation)) def nnef_desample(input, # type: torch.Tensor index, # type: torch.Tensor size, # type: List[int] border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] output_shape=None, # type: Optional[List[int]] ): # type: (...)->torch.Tensor if output_shape and output_shape[0] != input.shape[0]: output_shape = list(output_shape) output_shape[0] = input.shape[0] input_shape = list(input.shape) index_shape = list(index.shape) rank = len(input_shape) spatial_dims = len(input_shape[2:]) assert len(input_shape) in (3, 4, 5), \ "nnef.desample is only implemented for 3D, 4D, 5D tensors, given: {}D".format(len(input_shape)) assert not size or size[:2] == [1, 1], \ "nnef.desample is only implemented for size = 1 in N and C dimensions" assert not padding or padding[:2] == [(0, 0), (0, 0)], \ "nnef.desample is only implemented for padding = (0, 0) in N and C dimensions" assert not stride or stride[:2] == [1, 1], \ "nnef.desample is only implemented for stride = 1 in N and C dimensions" assert not dilation or all(d == 1 for d in dilation), \ "nnef.desample is only implemented for dilation = 1" stride = [1] * rank if not stride else stride dilation = [1] * rank if not dilation else dilation if not padding: calculated_output_shape = [i * s for i, s in zip(input_shape, stride)] padding = _same_padding(input=calculated_output_shape, filter=size, stride=stride, dilation=dilation) else: calculated_output_shape = nnef.shapes.desample_shape(input_shape, index_shape, size=size, border=border, padding=padding, stride=stride, dilation=dilation, output_shape=None) output_shape = output_shape if output_shape else calculated_output_shape padded_output_shape = [s + p + q for s, (p, q) in zip(output_shape, padding)] unpooled = {1: F.max_unpool1d, 2: F.max_unpool2d, 3: F.max_unpool3d}[spatial_dims]( input=input, indices=index, kernel_size=size[2:], stride=stride[2:], padding=0, output_size=padded_output_shape) return nnef_slice(unpooled, axes=list(range(rank)), begin=[p for p, _q in padding], end=[p + s for (p, _q), s in zip(padding, output_shape)]) def nnef_batch_normalization(input, # type: torch.Tensor mean, # type: torch.Tensor variance, # type: torch.Tensor offset, # type: torch.Tensor scale, # type: torch.Tensor epsilon, # type: float is_training=False, # type: bool momentum=0.1, # type: float ): # type: (...)->torch.Tensor if isinstance(mean, torch.nn.Parameter): mean.requires_grad = False if isinstance(variance, torch.nn.Parameter): variance.requires_grad = False return F.batch_norm(input=input, running_mean=nnef_squeeze(mean, axes=[0]), running_var=nnef_squeeze(variance, axes=[0]), weight=nnef_squeeze(scale, axes=[0]), bias=nnef_squeeze(offset, axes=[0]), training=is_training, momentum=momentum, eps=epsilon) def _upsample_weights_1d(factor, symmetric): if symmetric: weights = [1 - (i + 0.5) / factor for i in range(factor)] weights = list(reversed(weights)) + weights else: weights = [1 - abs(i) / float(factor) for i in range(-factor + 1, factor)] return np.array(weights) def _upsample_weights_2d(factor, symmetric): w0 = _upsample_weights_1d(factor[0], symmetric) w1 = _upsample_weights_1d(factor[1], symmetric) return np.outer(w0, w1) def _upsample_weights_nd(factor, symmetric): ws = [_upsample_weights_1d(f, symmetric) for f in factor] return reduce(np.multiply, np.ix_(*ws)) def nnef_multilinear_upsample(input, factor, method='symmetric', border='replicate'): # type: (torch.Tensor, List[int], str, str)->torch.Tensor rank = len(factor) assert len(input.shape) == rank + 2 mode = 'linear' if rank == 1 else 'bilinear' if method == 'aligned': return F.interpolate(input=input, scale_factor=tuple(factor), mode=mode, align_corners=True) elif method == 'symmetric' and border == 'replicate': return F.interpolate(input=input, scale_factor=tuple(factor), mode=mode, align_corners=False) n, c, = input.shape[:2] symmetric = method == 'symmetric' replicate = border == 'replicate' weights = _upsample_weights_nd(factor, symmetric) weights = np.tile(np.reshape(weights, newshape=(1, 1) + weights.shape), reps=(c, 1) + (1,) * rank) filter = torch.from_numpy(weights).to(device=input.device, dtype=input.dtype) bias = torch.zeros(size=tuple(), device=input.device, dtype=input.dtype) output_shape = [n, c] + [f * s for f, s in zip(factor, input.shape[2:])] if symmetric: return nnef_deconv(input, filter, bias, stride=factor, padding=[(f - 1, f - 1) for f in factor], border='constant', groups=c, output_shape=output_shape) else: if replicate: input = nnef_pad(input, padding=[(0, 0), (0, 0)] + [(1, 0)] * rank, border=border) padding = factor if replicate else [f // 2 for f in factor] return nnef_deconv(input, filter, bias, stride=factor, padding=[(p, p) for p in padding], border='constant', groups=c, output_shape=output_shape) def nnef_nearest_upsample(input, factor): # type: (torch.Tensor, List[int])->torch.Tensor assert len(input.shape) in (3, 4, 5), \ "nnef.nearest_upsample is only implemented for 3D, 4D, 5D tensors, given: {}D.".format(len(input.shape)) return F.interpolate(input=input, scale_factor=tuple(factor), mode='nearest') def nnef_softmax(x, axes=None): # type: (torch.Tensor, Optional[List[int]])->torch.Tensor axes = [1] if axes is None else axes if len(axes) == 0: return x elif len(axes) == 1: return F.softmax(x, dim=axes[0]) else: m = nnef_max_reduce(x, axes=axes) e = torch.exp(x - m) return e / nnef_sum_reduce(x, axes=axes) def nnef_local_response_normalization(input, size, alpha=1.0, beta=0.5, bias=1.0): # type: (torch.Tensor, List[int], float, float, float)->torch.Tensor sigma = bias + alpha * nnef_box(torch.pow(input, 2.0), size=size, normalize=True) return input / torch.pow(sigma, beta) def nnef_local_mean_normalization(input, size): # type: (torch.Tensor, List[int])->torch.Tensor mean = nnef_box(input, size=size, normalize=True) return input - mean def nnef_local_variance_normalization(input, size, bias=0.0, epsilon=0.0): # type: (torch.Tensor, List[int], float, float)->torch.Tensor sigma = torch.sqrt(nnef_box(torch.pow(input, 2.0), size=size, normalize=True)) return input / torch.max(sigma + bias, torch.full(size=[], fill_value=epsilon, device=input.device, dtype=input.dtype)) def nnef_local_contrast_normalization(input, size, bias=0.0, epsilon=0.0): # type: (torch.Tensor, List[int], float, float)->torch.Tensor centered = nnef_local_mean_normalization(input, size=size) return nnef_local_variance_normalization(centered, size=size, bias=bias, epsilon=epsilon) def nnef_l1_normalization(input, axes, bias=0.0, epsilon=0.0): # type: (torch.Tensor, List[int], float, float)->torch.Tensor sigma = nnef_sum_reduce(torch.abs(input), axes=axes) return input / torch.max(sigma + bias, torch.full(size=[], fill_value=epsilon, device=input.device, dtype=input.dtype)) def nnef_l2_normalization(input, axes, bias=0.0, epsilon=0.0): # type: (torch.Tensor, List[int], float, float)->torch.Tensor sigma = torch.sqrt(nnef_sum_reduce(torch.pow(input, 2.0), axes=axes)) return input / torch.max(sigma + bias, torch.full(size=[], fill_value=epsilon, device=input.device, dtype=input.dtype)) def nnef_matmul(A, B, transposeA=False, transposeB=False): # type:(torch.Tensor, torch.Tensor, bool, bool)->torch.Tensor return torch.matmul(torch.transpose(A, len(A.shape) - 2, len(A.shape) - 1) if transposeA else A, torch.transpose(B, len(B.shape) - 2, len(B.shape) - 1) if transposeB else B) def nnef_split(value, axis, ratios): # type:(torch.Tensor, int, List[int])->torch.Tensor assert value.shape[axis] % sum(ratios) == 0 multiplier = value.shape[axis] // sum(ratios) sections = [ratio * multiplier for ratio in ratios] return torch.split(value, split_size_or_sections=sections, dim=axis) def nnef_slice(input, axes, begin, end, stride=None): # type:(torch.Tensor, List[int], List[int], List[int], List[int])->torch.Tensor if stride is None: stride = [1] * len(axes) shape = list(input.shape) slices = [slice(None)] * len(shape) for axis, b, e, s in zip(axes, begin, end, stride): if b < 0: b += shape[axis] if e < 0: e += shape[axis] elif e == 0 and s == 1: e = shape[axis] b = _clamp(b, -1, shape[axis]) e = _clamp(e, -1, shape[axis]) if s > 0: slices[axis] = slice(b, e, s) else: offs = (b - e - 1) % (-s) + 1 if b != e else 1 slices[axis] = slice(e+offs, b+1, -s) input = input[slices] flip_axes = [axis for axis, s in zip(axes, stride) if s < 0] if len(flip_axes) != 0: input = torch.flip(input, dims=flip_axes) return input def nnef_select(condition, true_value, false_value): # type:(torch.Tensor, torch.Tensor, torch.Tensor)->torch.Tensor rank = max(len(condition.shape), len(true_value.shape), len(false_value.shape)) return torch.where(_expand_to_rank(condition, rank), _expand_to_rank(true_value, rank), _expand_to_rank(false_value, rank)) def _nnef_generic_reduce(input, axes, f): # type:(torch.Tensor, List[int], Callable)->torch.Tensor if not axes: return input for axis in reversed(sorted(axes)): input = f(input=input, dim=axis, keepdim=True) return input def nnef_sum_reduce(input, axes, normalize=False): # type:(torch.Tensor, List[int], bool)->torch.Tensor return _nnef_generic_reduce(input=input, axes=axes, f=torch.mean if normalize else torch.sum) def nnef_max_reduce(input, axes): # type:(torch.Tensor, List[int])->torch.Tensor return _nnef_generic_reduce(input=input, axes=axes, f=lambda input, dim, keepdim: torch.max(input, dim=dim, keepdim=keepdim)[0]) def nnef_min_reduce(input, axes): # type:(torch.Tensor, List[int])->torch.Tensor return _nnef_generic_reduce(input=input, axes=axes, f=lambda input, dim, keepdim: torch.min(input, dim=dim, keepdim=keepdim)[0]) def nnef_mean_reduce(input, axes): # type:(torch.Tensor, List[int])->torch.Tensor return _nnef_generic_reduce(input=input, axes=axes, f=torch.mean) def _nnef_argminmax_reduce(input, axes, argmin=False): # type:(torch.Tensor, List[int], bool)->torch.Tensor if len(axes) == 1: return _nnef_generic_reduce(input=input, axes=axes, f=torch.argmin if argmin else torch.argmax) else: axes = sorted(axes) consecutive_axes = list(range(axes[0], axes[0] + len(axes))) assert axes == consecutive_axes, \ "{} is only implemented for consecutive axes.".format("argmin_reduce" if argmin else "argmax_reduce") reshaped = nnef_reshape(input, shape=(list(input.shape)[:axes[0]] + [-1] + list(input.shape[axes[0] + len(axes):]))) reduced = _nnef_generic_reduce(input=reshaped, axes=[axes[0]], f=torch.argmin if argmin else torch.argmax) reshaped = nnef_reshape(reduced, shape=list(dim if axis not in axes else 1 for axis, dim in enumerate(input.shape))) return reshaped def nnef_argmax_reduce(input, axes): # type:(torch.Tensor, List[int])->torch.Tensor return _nnef_argminmax_reduce(input, axes, argmin=False) def nnef_argmin_reduce(input, axes): # type:(torch.Tensor, List[int])->torch.Tensor return _nnef_argminmax_reduce(input, axes, argmin=True) def nnef_clamp(x, a, b): # type:(torch.Tensor, torch.Tensor, torch.Tensor)->torch.Tensor rank = max(len(x.shape), len(a.shape), len(b.shape)) x = _expand_to_rank(x, rank) a = _expand_to_rank(a, rank) b = _expand_to_rank(b, rank) return torch.max(torch.min(x, b), a) def nnef_nearest_downsample(input, factor): # type: (torch.Tensor, List[int])->torch.Tensor dims = len(input.shape) return nnef_box(input, size=[1] * dims, stride=[1, 1] + factor, padding=[(0, 0)] * dims) def nnef_area_downsample(input, factor): # type: (torch.Tensor, List[int])->torch.Tensor dims = len(input.shape) return nnef_box(input, size=[1, 1] + factor, stride=[1, 1] + factor, padding=[(0, 0)] * dims, normalize=True) def nnef_moments(input, axes): # type: (torch.Tensor, List[int])->Tuple[torch.Tensor, torch.Tensor] mean = nnef_mean_reduce(input, axes=axes) variance = nnef_mean_reduce(torch.pow(input - mean, 2.0), axes=axes) return mean, variance def nnef_linear(input, filter, bias): # type: (torch.Tensor, torch.Tensor, torch.Tensor)->torch.Tensor matmul = nnef_matmul(A=input, B=filter, transposeB=True) matmul, bias = _expand_binary(matmul, bias) return matmul + bias def nnef_separable_conv(input, # type: torch.Tensor plane_filter, # type: torch.Tensor point_filter, # type: torch.Tensor bias, # type: torch.Tensor border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] groups=1, # type: int ): # type: (...)->torch.Tensor filtered = nnef_conv(input, plane_filter, bias=torch.zeros(size=tuple(), device=input.device, dtype=input.dtype), border=border, padding=padding, stride=stride, dilation=dilation, groups=0) return nnef_conv(filtered, point_filter, bias, groups=groups) def nnef_separable_deconv(input, # type: torch.Tensor plane_filter, # type: torch.Tensor point_filter, # type: torch.Tensor bias, # type: torch.Tensor border='constant', # type: str padding=None, # type: Optional[List[Tuple[int, int]]] stride=None, # type: Optional[List[int]] dilation=None, # type: Optional[List[int]] output_shape=None, # type: Optional[List[int]] groups=1, # type: int ): # type: (...)->torch.Tensor filtered = nnef_deconv(input, point_filter, torch.zeros(size=tuple(), device=input.device, dtype=input.dtype), groups=groups) return nnef_deconv(filtered, plane_filter, bias, border=border, padding=padding, stride=stride, dilation=dilation, output_shape=output_shape, groups=0) def nnef_copy_n(x, times): # type: (torch.Tensor, int)->List[torch.Tensor] return [x.clone() for _ in range(times)] def nnef_zero_point_linear_quantize(x, zero_point, scale, bits, signed, symmetric): # type: (torch.Tensor, torch.Tensor, torch.Tensor, int, bool, bool)->torch.Tensor z = torch.round(x / scale) + zero_point r = 2 ** (bits - 1) - 1 if signed else 2 ** bits - 1 q = torch.clamp(z, 0 if not signed else -r if symmetric else -r - 1, r) y = (q - zero_point) * scale return y.type(x.dtype) def nnef_min_max_linear_quantize(x, min, max, bits, signed, symmetric): # type: (torch.Tensor, torch.Tensor, torch.Tensor, int, bool, bool)->torch.Tensor r = float(2 ** bits - 1 - int(signed and symmetric)) z = torch.clamp(x, min, max) q = torch.round((z - min) / (max - min) * r) return q * ((max - min) / r) + min def nnef_logarithmic_quantize(x, max, bits): # type: (torch.Tensor, torch.Tensor, int)->torch.Tensor r = float(2 ** bits - 1) m = math.ceil(math.log2(max)) q = torch.round(torch.clamp(torch.log2(torch.abs(x)), m - r, m)) return torch.sign(x) * torch.pow(2.0, q) def nnef_reshape(input, shape, axis_start=0, axis_count=-1): # type: (torch.Tensor, List[int], int, int)->torch.Tensor return input.reshape(nnef.shapes.reshape_shape(input=list(input.shape), shape=shape, axis_start=axis_start, axis_count=axis_count)) def nnef_update(variable, value): # type: (torch.Tensor, torch.Tensor)->torch.Tensor return value def nnef_transpose(input, axes): return input.permute(*(axes + list(range(len(axes), len(input.shape))))) def nnef_squeeze(input, axes): return input.reshape(nnef.shapes.squeeze_shape(input.shape, axes)) def nnef_unsqueeze(input, axes): return input.reshape(nnef.shapes.unsqueeze_shape(input.shape, axes)) def nnef_cast(input, dtype): return input.to(_numpy_dtype_to_torch[dtype]) def nnef_gather(input, indices, axis): shape = tuple(indices.shape) if len(shape) != 1: indices = torch.flatten(indices) result = input.index_select(dim=axis, index=indices.to(torch.int64)) if len(shape) != 1: result = torch.reshape(result, shape=input.shape[:axis] + shape + input.shape[axis + 1:]) return result """ The supported operators """ Operators = { 'update': nnef_update, 'reshape': nnef_reshape, 'transpose': nnef_transpose, 'concat': lambda values, axis: torch.cat(values, axis), 'split': nnef_split, 'slice': nnef_slice, 'squeeze': nnef_squeeze, 'unsqueeze': nnef_unsqueeze, 'stack': lambda values, axis: torch.stack(values, axis), 'unstack': lambda value, axis: torch.unbind(value, axis), 'add': nnef_add, 'add_n': nnef_add_n, 'sub': _binary(lambda x, y: x - y), 'mul': _binary(lambda x, y: x * y), 'div': _binary(lambda x, y: x / y), 'pow': _binary(torch.pow), 'exp': torch.exp, 'log': torch.log, 'abs': torch.abs, 'sign': torch.sign, 'rcp': torch.reciprocal, 'neg': torch.neg, 'copy': torch.clone, 'lt': _binary(lambda x, y: x < y), 'gt': _binary(lambda x, y: x > y), 'le': _binary(lambda x, y: x <= y), 'ge': _binary(lambda x, y: x >= y), 'eq': _binary(torch.eq), 'ne': _binary(torch.ne), 'and': _binary(lambda x, y: x & y), 'or': _binary(lambda x, y: x | y), 'not': lambda x: ~x, 'floor': torch.floor, 'ceil': torch.ceil, 'round': torch.round, 'select': nnef_select, 'sqr': lambda x: torch.pow(x, 2.0), 'sqrt': torch.sqrt, 'rsqr': lambda x: torch.pow(x, -2.0), 'rsqrt': torch.rsqrt, 'log2': torch.log2, 'min': _binary(torch.min), 'max': _binary(torch.max), 'clamp': nnef_clamp, 'matmul': nnef_matmul, 'conv': nnef_conv, 'deconv': nnef_deconv, 'box': nnef_box, 'debox': nnef_debox, 'argmax_pool': nnef_argmax_pool, # 'sample': unsupported, 'desample': nnef_desample, 'nearest_downsample': nnef_nearest_downsample, 'area_downsample': nnef_area_downsample, 'nearest_upsample': nnef_nearest_upsample, 'multilinear_upsample': nnef_multilinear_upsample, 'sum_reduce': nnef_sum_reduce, 'max_reduce': nnef_max_reduce, 'min_reduce': nnef_min_reduce, 'argmax_reduce': nnef_argmax_reduce, 'argmin_reduce': nnef_argmin_reduce, 'mean_reduce': nnef_mean_reduce, 'moments': nnef_moments, 'relu': F.relu, 'sigmoid': torch.sigmoid, 'softabs': lambda x, epsilon: torch.sqrt(torch.pow(x, 2.0) + epsilon), 'softmax': nnef_softmax, 'softplus': lambda x: torch.log(torch.exp(x) + 1.0), 'elu': F.elu, 'selu': lambda x, alpha, _lambda_: F.selu(x), 'gelu': F.gelu, 'silu': lambda x: x * torch.sigmoid(x), 'prelu': lambda x, alpha: F.prelu(x, alpha), 'leaky_relu': lambda x, alpha: F.leaky_relu(x, alpha), 'max_pool_with_index': nnef_max_pool_with_index, 'max_pool': nnef_max_pool, 'avg_pool': nnef_avg_pool, 'rms_pool': nnef_rms_pool, 'linear': nnef_linear, 'separable_conv': nnef_separable_conv, 'separable_deconv': nnef_separable_deconv, 'local_response_normalization': nnef_local_response_normalization, 'local_mean_normalization': nnef_local_mean_normalization, 'local_variance_normalization': nnef_local_variance_normalization, 'local_contrast_normalization': nnef_local_contrast_normalization, 'l1_normalization': nnef_l1_normalization, 'l2_normalization': nnef_l2_normalization, 'batch_normalization': nnef_batch_normalization, # 'avg_roi_pool': unsupported, # 'max_roi_pool': unsupported, # 'roi_resample': unsupported, # 'avg_roi_align': unsupported, # 'max_roi_align': unsupported, 'linear_quantize': nnef_min_max_linear_quantize, 'min_max_linear_quantize': nnef_min_max_linear_quantize, 'zero_point_linear_quantize': nnef_zero_point_linear_quantize, 'logarithmic_quantize': nnef_logarithmic_quantize, 'copy_n': nnef_copy_n, 'sin': lambda x: torch.sin(x), 'cos': lambda x: torch.cos(x), 'tan': lambda x: torch.tan(x), 'asin': lambda x: torch.asin(x), 'acos': lambda x: torch.acos(x), 'atan': lambda x: torch.atan(x), 'sinh': lambda x: torch.sinh(x), 'cosh': lambda x: torch.cosh(x), 'tanh': lambda x: torch.tanh(x), 'asinh': lambda x: torch.asinh(x), 'acosh': lambda x: torch.acosh(x), 'atanh': lambda x: torch.atanh(x), 'tile': lambda input, repeats: input.repeat(*repeats), 'pad': nnef_pad, 'cast': nnef_cast, 'gather': nnef_gather, 'any_reduce': lambda input, axes: _nnef_generic_reduce(input, axes=axes, f=torch.any), 'all_reduce': lambda input, axes: _nnef_generic_reduce(input, axes=axes, f=torch.all), } ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/caffe2/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .reader import Reader from .writer import Writer ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/caffe2/caffe/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/caffe2/caffe/proto/__init__.py ================================================ ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/caffe2/caffe/proto/caffe.proto ================================================ syntax = "proto2"; package caffe; // Specifies the shape (dimensions) of a Blob. message BlobShape { repeated int64 dim = 1 [packed = true]; } message BlobProto { optional BlobShape shape = 7; repeated float data = 5 [packed = true]; repeated float diff = 6 [packed = true]; repeated double double_data = 8 [packed = true]; repeated double double_diff = 9 [packed = true]; // 4D dimensions -- deprecated. Use "shape" instead. optional int32 num = 1 [default = 0]; optional int32 channels = 2 [default = 0]; optional int32 height = 3 [default = 0]; optional int32 width = 4 [default = 0]; } // The BlobProtoVector is simply a way to pass multiple blobproto instances // around. message BlobProtoVector { repeated BlobProto blobs = 1; } message Datum { optional int32 channels = 1; optional int32 height = 2; optional int32 width = 3; // the actual image data, in bytes optional bytes data = 4; optional int32 label = 5; // Optionally, the datum could also hold float data. repeated float float_data = 6; // If true data contains an encoded image that need to be decoded optional bool encoded = 7 [default = false]; } message FillerParameter { // The filler type. optional string type = 1 [default = 'constant']; optional float value = 2 [default = 0]; // the value in constant filler optional float min = 3 [default = 0]; // the min value in uniform filler optional float max = 4 [default = 1]; // the max value in uniform filler optional float mean = 5 [default = 0]; // the mean value in Gaussian filler optional float std = 6 [default = 1]; // the std value in Gaussian filler // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; // Normalize the filler variance by fan_in, fan_out, or their average. // Applies to 'xavier' and 'msra' fillers. enum VarianceNorm { FAN_IN = 0; FAN_OUT = 1; AVERAGE = 2; } optional VarianceNorm variance_norm = 8 [default = FAN_IN]; } message NetParameter { optional string name = 1; // consider giving the network a name // DEPRECATED. See InputParameter. The input blobs to the network. repeated string input = 3; // DEPRECATED. See InputParameter. The shape of the input blobs. repeated BlobShape input_shape = 8; // 4D input dimensions -- deprecated. Use "input_shape" instead. // If specified, for each input blob there should be four // values specifying the num, channels, height and width of the input blob. // Thus, there should be a total of (4 * #input) numbers. repeated int32 input_dim = 4; // Whether the network will force every layer to carry out backward operation. // If set False, then whether to carry out backward is determined // automatically according to the net structure and learning rates. optional bool force_backward = 5 [default = false]; // The current "state" of the network, including the phase, level, and stage. // Some layers may be included/excluded depending on this state and the states // specified in the layers' include and exclude fields. optional NetState state = 6; // Print debugging information about results while running Net::Forward, // Net::Backward, and Net::Update. optional bool debug_info = 7 [default = false]; // The layers that make up the net. Each of their configurations, including // connectivity and behavior, is specified as a LayerParameter. repeated LayerParameter layer = 100; // ID 100 so layers are printed last. // DEPRECATED: use 'layer' instead. repeated V1LayerParameter layers = 2; } // NOTE // Update the next available ID when you add a new SolverParameter field. // // SolverParameter next available ID: 43 (last added: weights) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks // // Exactly one train net must be specified using one of the following fields: // train_net_param, train_net, net_param, net // One or more test nets may be specified using any of the following fields: // test_net_param, test_net, net_param, net // If more than one test net field is specified (e.g., both net and // test_net are specified), they will be evaluated in the field order given // above: (1) test_net_param, (2) test_net, (3) net_param/net. // A test_iter must be specified for each test_net. // A test_level and/or a test_stage may also be specified for each test_net. ////////////////////////////////////////////////////////////////////////////// // Proto filename for the train net, possibly combined with one or more // test nets. optional string net = 24; // Inline train net param, possibly combined with one or more test nets. optional NetParameter net_param = 25; optional string train_net = 1; // Proto filename for the train net. repeated string test_net = 2; // Proto filenames for the test nets. optional NetParameter train_net_param = 21; // Inline train net params. repeated NetParameter test_net_param = 22; // Inline test net params. // The states for the train/test nets. Must be unspecified or // specified once per net. // // By default, train_state will have phase = TRAIN, // and all test_state's will have phase = TEST. // Other defaults are set according to the NetState defaults. optional NetState train_state = 26; repeated NetState test_state = 27; // The number of iterations for each test net. repeated int32 test_iter = 3; // The number of iterations between two testing phases. optional int32 test_interval = 4 [default = 0]; optional bool test_compute_loss = 19 [default = false]; // If true, run an initial test pass before the first iteration, // ensuring memory availability and printing the starting value of the loss. optional bool test_initialization = 32 [default = true]; optional float base_lr = 5; // The base learning rate // the number of iterations between displaying info. If display = 0, no info // will be displayed. optional int32 display = 6; // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations // accumulate gradients over `iter_size` x `batch_size` instances optional int32 iter_size = 36 [default = 1]; // The learning rate decay policy. The currently implemented learning rate // policies are as follows: // - fixed: always return base_lr. // - step: return base_lr * gamma ^ (floor(iter / step)) // - exp: return base_lr * gamma ^ iter // - inv: return base_lr * (1 + gamma * iter) ^ (- power) // - multistep: similar to step but it allows non uniform steps defined by // stepvalue // - poly: the effective learning rate follows a polynomial decay, to be // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) // - sigmoid: the effective learning rate follows a sigmod decay // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) // // where base_lr, max_iter, gamma, step, stepvalue and power are defined // in the solver parameter protocol buffer, and iter is the current iteration. optional string lr_policy = 8; optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. optional float momentum = 11; // The momentum value. optional float weight_decay = 12; // The weight decay. // regularization types supported: L1 and L2 // controlled by weight_decay optional string regularization_type = 29 [default = "L2"]; // the stepsize for learning rate policy "step" optional int32 stepsize = 13; // the stepsize for learning rate policy "multistep" repeated int32 stepvalue = 34; // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, // whenever their actual L2 norm is larger. optional float clip_gradients = 35 [default = -1]; optional int32 snapshot = 14 [default = 0]; // The snapshot interval // The prefix for the snapshot. // If not set then is replaced by prototxt file path without extension. // If is set to directory then is augmented by prototxt file name // without extention. optional string snapshot_prefix = 15; // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. optional bool snapshot_diff = 16 [default = false]; enum SnapshotFormat { HDF5 = 0; BINARYPROTO = 1; } optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. enum SolverMode { CPU = 0; GPU = 1; } optional SolverMode solver_mode = 17 [default = GPU]; // the device_id will that be used in GPU mode. Use device_id = 0 in default. optional int32 device_id = 18 [default = 0]; // If non-negative, the seed with which the Solver will initialize the Caffe // random number generator -- useful for reproducible results. Otherwise, // (and by default) initialize using a seed derived from the system clock. optional int64 random_seed = 20 [default = -1]; // type of the solver optional string type = 40 [default = "SGD"]; // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam optional float delta = 31 [default = 1e-8]; // parameters for the Adam solver optional float momentum2 = 39 [default = 0.999]; // RMSProp decay value // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) optional float rms_decay = 38 [default = 0.99]; // If true, print information about the state of the net that may help with // debugging learning problems. optional bool debug_info = 23 [default = false]; // If false, don't save a snapshot after training finishes. optional bool snapshot_after_train = 28 [default = true]; // DEPRECATED: old solver enum types, use string instead enum SolverType { SGD = 0; NESTEROV = 1; ADAGRAD = 2; RMSPROP = 3; ADADELTA = 4; ADAM = 5; } // DEPRECATED: use type instead of solver_type optional SolverType solver_type = 30 [default = SGD]; // Overlap compute and communication for data parallel training optional bool layer_wise_reduce = 41 [default = true]; // Path to caffemodel file(s) with pretrained weights to initialize finetuning. // Tha same as command line --weights parameter for caffe train command. // If command line --weights parameter is specified, it has higher priority // and overwrites this one(s). // If --snapshot command line parameter is specified, this one(s) are ignored. // If several model files are expected, they can be listed in a one // weights parameter separated by ',' (like in a command string) or // in repeated weights parameters separately. repeated string weights = 42; } // A message that stores the solver snapshots message SolverState { optional int32 iter = 1; // The current iteration optional string learned_net = 2; // The file that stores the learned net. repeated BlobProto history = 3; // The history for sgd solvers optional int32 current_step = 4 [default = 0]; // The current step for learning rate } enum Phase { TRAIN = 0; TEST = 1; } message NetState { optional Phase phase = 1 [default = TEST]; optional int32 level = 2 [default = 0]; repeated string stage = 3; } message NetStateRule { // Set phase to require the NetState have a particular phase (TRAIN or TEST) // to meet this rule. optional Phase phase = 1; // Set the minimum and/or maximum levels in which the layer should be used. // Leave undefined to meet the rule regardless of level. optional int32 min_level = 2; optional int32 max_level = 3; // Customizable sets of stages to include or exclude. // The net must have ALL of the specified stages and NONE of the specified // "not_stage"s to meet the rule. // (Use multiple NetStateRules to specify conjunctions of stages.) repeated string stage = 4; repeated string not_stage = 5; } // Specifies training parameters (multipliers on global learning constants, // and the name and other settings used for weight sharing). message ParamSpec { // The names of the parameter blobs -- useful for sharing parameters among // layers, but never required otherwise. To share a parameter between two // layers, give it a (non-empty) name. optional string name = 1; // Whether to require shared weights to have the same shape, or just the same // count -- defaults to STRICT if unspecified. optional DimCheckMode share_mode = 2; enum DimCheckMode { // STRICT (default) requires that num, channels, height, width each match. STRICT = 0; // PERMISSIVE requires only the count (num*channels*height*width) to match. PERMISSIVE = 1; } // The multiplier on the global learning rate for this parameter. optional float lr_mult = 3 [default = 1.0]; // The multiplier on the global weight decay for this parameter. optional float decay_mult = 4 [default = 1.0]; } // NOTE // Update the next available ID when you add a new LayerParameter field. // // LayerParameter next available layer-specific ID: 149 (last added: clip_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type repeated string bottom = 3; // the name of each bottom blob repeated string top = 4; // the name of each top blob // The train / test phase for computation. optional Phase phase = 10; // The amount of weight to assign each top blob in the objective. // Each layer assigns a default value, usually of either 0 or 1, // to each top blob. repeated float loss_weight = 5; // Specifies training parameters (multipliers on global learning constants, // and the name and other settings used for weight sharing). repeated ParamSpec param = 6; // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; // Specifies whether to backpropagate to each bottom. If unspecified, // Caffe will automatically infer whether each input needs backpropagation // to compute parameter gradients. If set to true for some inputs, // backpropagation to those inputs is forced; if set false for some inputs, // backpropagation to those inputs is skipped. // // The size must be either 0 or equal to the number of bottoms. repeated bool propagate_down = 11; // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules // to include OR exclude, but not both. If no include or exclude rules are // specified, the layer is always included. If the current NetState meets // ANY (i.e., one or more) of the specified rules, the layer is // included/excluded. repeated NetStateRule include = 8; repeated NetStateRule exclude = 9; // Parameters for data pre-processing. optional TransformationParameter transform_param = 100; // Parameters shared by loss layers. optional LossParameter loss_param = 101; // Layer type-specific parameters. // // Note: certain layers may have more than one computational engine // for their implementation. These layers include an Engine type and // engine parameter for selecting the implementation. // The default for the engine is set by the ENGINE switch at compile-time. optional AccuracyParameter accuracy_param = 102; optional ArgMaxParameter argmax_param = 103; optional BatchNormParameter batch_norm_param = 139; optional BiasParameter bias_param = 141; optional ClipParameter clip_param = 148; optional ConcatParameter concat_param = 104; optional ContrastiveLossParameter contrastive_loss_param = 105; optional ConvolutionParameter convolution_param = 106; optional CropParameter crop_param = 144; optional DataParameter data_param = 107; optional DropoutParameter dropout_param = 108; optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; optional ELUParameter elu_param = 140; optional EmbedParameter embed_param = 137; optional ExpParameter exp_param = 111; optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; optional InputParameter input_param = 143; optional LogParameter log_param = 134; optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; optional ParameterParameter parameter_param = 145; optional PoolingParameter pooling_param = 121; optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; optional RecurrentParameter recurrent_param = 146; optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; optional ReshapeParameter reshape_param = 133; optional ScaleParameter scale_param = 142; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; optional SPPParameter spp_param = 132; optional SliceParameter slice_param = 126; optional SwishParameter swish_param = 147; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; } // Message that stores parameters used to apply transformation // to the data layer's data message TransformationParameter { // For data pre-processing, we can do simple scaling and subtracting the // data mean, if provided. Note that the mean subtraction is always carried // out before scaling. optional float scale = 1 [default = 1]; // Specify if we want to randomly mirror data. optional bool mirror = 2 [default = false]; // Specify if we would like to randomly crop an image. optional uint32 crop_size = 3 [default = 0]; // mean_file and mean_value cannot be specified at the same time optional string mean_file = 4; // if specified can be repeated once (would subtract it from all the channels) // or can be repeated the same number of times as channels // (would subtract them from the corresponding channel) repeated float mean_value = 5; // Force the decoded image to have 3 color channels. optional bool force_color = 6 [default = false]; // Force the decoded image to have 1 color channels. optional bool force_gray = 7 [default = false]; } // Message that stores parameters shared by loss layers message LossParameter { // If specified, ignore instances with the given label. optional int32 ignore_label = 1; // How to normalize the loss for loss layers that aggregate across batches, // spatial dimensions, or other dimensions. Currently only implemented in // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. enum NormalizationMode { // Divide by the number of examples in the batch times spatial dimensions. // Outputs that receive the ignore label will NOT be ignored in computing // the normalization factor. FULL = 0; // Divide by the total number of output locations that do not take the // ignore_label. If ignore_label is not set, this behaves like FULL. VALID = 1; // Divide by the batch size. BATCH_SIZE = 2; // Do not normalize the loss. NONE = 3; } // For historical reasons, the default normalization for // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. optional NormalizationMode normalization = 3 [default = VALID]; // Deprecated. Ignored if normalization is specified. If normalization // is not specified, then setting this to false will be equivalent to // normalization = BATCH_SIZE to be consistent with previous behavior. optional bool normalize = 2; } // Messages that store parameters used by individual layer types follow, in // alphabetical order. message AccuracyParameter { // When computing accuracy, count as correct by comparing the true label to // the top k scoring classes. By default, only compare to the top scoring // class (i.e. argmax). optional uint32 top_k = 1 [default = 1]; // The "label" axis of the prediction blob, whose argmax corresponds to the // predicted label -- may be negative to index from the end (e.g., -1 for the // last axis). For example, if axis == 1 and the predictions are // (N x C x H x W), the label blob is expected to contain N*H*W ground truth // labels with integer values in {0, 1, ..., C-1}. optional int32 axis = 2 [default = 1]; // If specified, ignore instances with the given label. optional int32 ignore_label = 3; } message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; optional uint32 top_k = 2 [default = 1]; // The axis along which to maximise -- may be negative to index from the // end (e.g., -1 for the last axis). // By default ArgMaxLayer maximizes over the flattened trailing dimensions // for each index of the first / num dimension. optional int32 axis = 3; } // Message that stores parameters used by ClipLayer message ClipParameter { required float min = 1; required float max = 2; } message ConcatParameter { // The axis along which to concatenate -- may be negative to index from the // end (e.g., -1 for the last axis). Other axes must have the // same dimension for all the bottom blobs. // By default, ConcatLayer concatenates blobs along the "channels" axis (1). optional int32 axis = 2 [default = 1]; // DEPRECATED: alias for "axis" -- does not support negative indexing. optional uint32 concat_dim = 1 [default = 1]; } message BatchNormParameter { // If false, normalization is performed over the current mini-batch // and global statistics are accumulated (but not yet used) by a moving // average. // If true, those accumulated mean and variance values are used for the // normalization. // By default, it is set to false when the network is in the training // phase and true when the network is in the testing phase. optional bool use_global_stats = 1; // What fraction of the moving average remains each iteration? // Smaller values make the moving average decay faster, giving more // weight to the recent values. // Each iteration updates the moving average @f$S_{t-1}@f$ with the // current mean @f$ Y_t @f$ by // @f$ S_t = (1-\beta)Y_t + \beta \cdot S_{t-1} @f$, where @f$ \beta @f$ // is the moving_average_fraction parameter. optional float moving_average_fraction = 2 [default = .999]; // Small value to add to the variance estimate so that we don't divide by // zero. optional float eps = 3 [default = 1e-5]; } message BiasParameter { // The first axis of bottom[0] (the first input Blob) along which to apply // bottom[1] (the second input Blob). May be negative to index from the end // (e.g., -1 for the last axis). // // For example, if bottom[0] is 4D with shape 100x3x40x60, the output // top[0] will have the same shape, and bottom[1] may have any of the // following shapes (for the given value of axis): // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 // (axis == 1 == -3) 3; 3x40; 3x40x60 // (axis == 2 == -2) 40; 40x60 // (axis == 3 == -1) 60 // Furthermore, bottom[1] may have the empty shape (regardless of the value of // "axis") -- a scalar bias. optional int32 axis = 1 [default = 1]; // (num_axes is ignored unless just one bottom is given and the bias is // a learned parameter of the layer. Otherwise, num_axes is determined by the // number of axes by the second bottom.) // The number of axes of the input (bottom[0]) covered by the bias // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. // Set num_axes := 0, to add a zero-axis Blob: a scalar. optional int32 num_axes = 2 [default = 1]; // (filler is ignored unless just one bottom is given and the bias is // a learned parameter of the layer.) // The initialization for the learned bias parameter. // Default is the zero (0) initialization, resulting in the BiasLayer // initially performing the identity operation. optional FillerParameter filler = 3; } message ContrastiveLossParameter { // margin for dissimilar pair optional float margin = 1 [default = 1.0]; // The first implementation of this cost did not exactly match the cost of // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. // legacy_version = false (the default) uses (margin - d)^2 as proposed in the // Hadsell paper. New models should probably use this version. // legacy_version = true uses (margin - d^2). This is kept to support / // reproduce existing models and results optional bool legacy_version = 2 [default = false]; } message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms // Pad, kernel size, and stride are all given as a single value for equal // dimensions in all spatial dimensions, or once per spatial dimension. repeated uint32 pad = 3; // The padding size; defaults to 0 repeated uint32 kernel_size = 4; // The kernel size repeated uint32 stride = 6; // The stride; defaults to 1 // Factor used to dilate the kernel, (implicitly) zero-filling the resulting // holes. (Kernel dilation is sometimes referred to by its use in the // algorithme à trous from Holschneider et al. 1987.) repeated uint32 dilation = 18; // The dilation; defaults to 1 // For 2D convolution only, the *_h and *_w versions may also be used to // specify both spatial dimensions. optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only) optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only) optional uint32 kernel_h = 11; // The kernel height (2D only) optional uint32 kernel_w = 12; // The kernel width (2D only) optional uint32 stride_h = 13; // The stride height (2D only) optional uint32 stride_w = 14; // The stride width (2D only) optional uint32 group = 5 [default = 1]; // The group size for group conv optional FillerParameter weight_filler = 7; // The filler for the weight optional FillerParameter bias_filler = 8; // The filler for the bias enum Engine { DEFAULT = 0; CAFFE = 1; CUDNN = 2; } optional Engine engine = 15 [default = DEFAULT]; // The axis to interpret as "channels" when performing convolution. // Preceding dimensions are treated as independent inputs; // succeeding dimensions are treated as "spatial". // With (N, C, H, W) inputs, and axis == 1 (the default), we perform // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for // groups g>1) filters across the spatial axes (H, W) of the input. // With (N, C, D, H, W) inputs, and axis == 1, we perform // N independent 3D convolutions, sliding (C/g)-channels // filters across the spatial axes (D, H, W) of the input. optional int32 axis = 16 [default = 1]; // Whether to force use of the general ND convolution, even if a specific // implementation for blobs of the appropriate number of spatial dimensions // is available. (Currently, there is only a 2D-specific convolution // implementation; for input blobs with num_axes != 2, this option is // ignored and the ND implementation will be used.) optional bool force_nd_im2col = 17 [default = false]; } message CropParameter { // To crop, elements of the first bottom are selected to fit the dimensions // of the second, reference bottom. The crop is configured by // - the crop `axis` to pick the dimensions for cropping // - the crop `offset` to set the shift for all/each dimension // to align the cropped bottom with the reference bottom. // All dimensions up to but excluding `axis` are preserved, while // the dimensions including and trailing `axis` are cropped. // If only one `offset` is set, then all dimensions are offset by this amount. // Otherwise, the number of offsets must equal the number of cropped axes to // shift the crop in each dimension accordingly. // Note: standard dimensions are N,C,H,W so the default is a spatial crop, // and `axis` may be negative to index from the end (e.g., -1 for the last // axis). optional int32 axis = 1 [default = 2]; repeated uint32 offset = 2; } message DataParameter { enum DB { LEVELDB = 0; LMDB = 1; } // Specify the data source. optional string source = 1; // Specify the batch size. optional uint32 batch_size = 4; // The rand_skip variable is for the data layer to skip a few data points // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. // DEPRECATED. Each solver accesses a different subset of the database. optional uint32 rand_skip = 7 [default = 0]; optional DB backend = 8 [default = LEVELDB]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do // simple scaling and subtracting the data mean, if provided. Note that the // mean subtraction is always carried out before scaling. optional float scale = 2 [default = 1]; optional string mean_file = 3; // DEPRECATED. See TransformationParameter. Specify if we would like to randomly // crop an image. optional uint32 crop_size = 5 [default = 0]; // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror // data. optional bool mirror = 6 [default = false]; // Force the encoded image to have 3 color channels optional bool force_encoded_color = 9 [default = false]; // Prefetch queue (Increase if data feeding bandwidth varies, within the // limit of device memory for GPU training) optional uint32 prefetch = 10 [default = 4]; } message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N // shape fields, and 0, 1 or N data_fillers. // // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. // If 1 data_filler is specified, it is applied to all top blobs. If N are // specified, the ith is applied to the ith top blob. repeated FillerParameter data_filler = 1; repeated BlobShape shape = 6; // 4D dimensions -- deprecated. Use "shape" instead. repeated uint32 num = 2; repeated uint32 channels = 3; repeated uint32 height = 4; repeated uint32 width = 5; } message EltwiseParameter { enum EltwiseOp { PROD = 0; SUM = 1; MAX = 2; } optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation repeated float coeff = 2; // blob-wise coefficient for SUM operation // Whether to use an asymptotically slower (for >2 inputs) but stabler method // of computing the gradient for the PROD operation. (No effect for SUM op.) optional bool stable_prod_grad = 3 [default = true]; } // Message that stores parameters used by ELULayer message ELUParameter { // Described in: // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate // Deep Network Learning by Exponential Linear Units (ELUs). arXiv optional float alpha = 1 [default = 1]; } // Message that stores parameters used by EmbedLayer message EmbedParameter { optional uint32 num_output = 1; // The number of outputs for the layer // The input is given as integers to be interpreted as one-hot // vector indices with dimension num_input. Hence num_input should be // 1 greater than the maximum possible input value. optional uint32 input_dim = 2; optional bool bias_term = 3 [default = true]; // Whether to use a bias term optional FillerParameter weight_filler = 4; // The filler for the weight optional FillerParameter bias_filler = 5; // The filler for the bias } // Message that stores parameters used by ExpLayer message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, // so y = exp(shift + scale * x). optional float base = 1 [default = -1.0]; optional float scale = 2 [default = 1.0]; optional float shift = 3 [default = 0.0]; } /// Message that stores parameters used by FlattenLayer message FlattenParameter { // The first axis to flatten: all preceding axes are retained in the output. // May be negative to index from the end (e.g., -1 for the last axis). optional int32 axis = 1 [default = 1]; // The last axis to flatten: all following axes are retained in the output. // May be negative to index from the end (e.g., the default -1 for the last // axis). optional int32 end_axis = 2 [default = -1]; } // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. optional string source = 1; // Specify the batch size. optional uint32 batch_size = 2; // Specify whether to shuffle the data. // If shuffle == true, the ordering of the HDF5 files is shuffled, // and the ordering of data within any given HDF5 file is shuffled, // but data between different files are not interleaved; all of a file's // data are output (in a random order) before moving onto another file. optional bool shuffle = 3 [default = false]; } message HDF5OutputParameter { optional string file_name = 1; } message HingeLossParameter { enum Norm { L1 = 1; L2 = 2; } // Specify the Norm to use L1 or L2 optional Norm norm = 1 [default = L1]; } message ImageDataParameter { // Specify the data source. optional string source = 1; // Specify the batch size. optional uint32 batch_size = 4 [default = 1]; // The rand_skip variable is for the data layer to skip a few data points // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. optional uint32 rand_skip = 7 [default = 0]; // Whether or not ImageLayer should shuffle the list of files at every epoch. optional bool shuffle = 8 [default = false]; // It will also resize images if new_height or new_width are not zero. optional uint32 new_height = 9 [default = 0]; optional uint32 new_width = 10 [default = 0]; // Specify if the images are color or gray optional bool is_color = 11 [default = true]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do // simple scaling and subtracting the data mean, if provided. Note that the // mean subtraction is always carried out before scaling. optional float scale = 2 [default = 1]; optional string mean_file = 3; // DEPRECATED. See TransformationParameter. Specify if we would like to randomly // crop an image. optional uint32 crop_size = 5 [default = 0]; // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror // data. optional bool mirror = 6 [default = false]; optional string root_folder = 12 [default = ""]; } message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; optional int32 axis = 2 [default = 1]; // axis of prob } message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms optional FillerParameter weight_filler = 3; // The filler for the weight optional FillerParameter bias_filler = 4; // The filler for the bias // The first axis to be lumped into a single inner product computation; // all preceding axes are retained in the output. // May be negative to index from the end (e.g., -1 for the last axis). optional int32 axis = 5 [default = 1]; // Specify whether to transpose the weight matrix or not. // If transpose == true, any operations will be performed on the transpose // of the weight matrix. The weight matrix itself is not going to be transposed // but rather the transfer flag of operations will be toggled accordingly. optional bool transpose = 6 [default = false]; } message InputParameter { // This layer produces N >= 1 top blob(s) to be assigned manually. // Define N shapes to set a shape for each top. // Define 1 shape to set the same shape for every top. // Define no shape to defer to reshaping manually. repeated BlobShape shape = 1; } // Message that stores parameters used by LogLayer message LogParameter { // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, // so y = ln(shift + scale * x) = log_e(shift + scale * x) optional float base = 1 [default = -1.0]; optional float scale = 2 [default = 1.0]; optional float shift = 3 [default = 0.0]; } // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; optional float alpha = 2 [default = 1.]; optional float beta = 3 [default = 0.75]; enum NormRegion { ACROSS_CHANNELS = 0; WITHIN_CHANNEL = 1; } optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; optional float k = 5 [default = 1.]; enum Engine { DEFAULT = 0; CAFFE = 1; CUDNN = 2; } optional Engine engine = 6 [default = DEFAULT]; } message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; optional uint32 height = 3; optional uint32 width = 4; } message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; // Epsilon for not dividing by zero while normalizing variance optional float eps = 3 [default = 1e-9]; } message ParameterParameter { optional BlobShape shape = 1; } message PoolingParameter { enum PoolMethod { MAX = 0; AVE = 1; STOCHASTIC = 2; } optional PoolMethod pool = 1 [default = MAX]; // The pooling method // Pad, kernel size, and stride are all given as a single value for equal // dimensions in height and width or as Y, X pairs. optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) optional uint32 pad_h = 9 [default = 0]; // The padding height optional uint32 pad_w = 10 [default = 0]; // The padding width optional uint32 kernel_size = 2; // The kernel size (square) optional uint32 kernel_h = 5; // The kernel height optional uint32 kernel_w = 6; // The kernel width optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) optional uint32 stride_h = 7; // The stride height optional uint32 stride_w = 8; // The stride width enum Engine { DEFAULT = 0; CAFFE = 1; CUDNN = 2; } optional Engine engine = 11 [default = DEFAULT]; // If global_pooling then it will pool over the size of the bottom by doing // kernel_h = bottom->height and kernel_w = bottom->width optional bool global_pooling = 12 [default = false]; // How to calculate the output size - using ceil (default) or floor rounding. enum RoundMode { CEIL = 0; FLOOR = 1; } optional RoundMode round_mode = 13 [default = CEIL]; } message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; optional float scale = 2 [default = 1.0]; optional float shift = 3 [default = 0.0]; } message PythonParameter { optional string module = 1; optional string layer = 2; // This value is set to the attribute `param_str` of the `PythonLayer` object // in Python before calling the `setup()` method. This could be a number, // string, dictionary in Python dict format, JSON, etc. You may parse this // string in `setup` method and use it in `forward` and `backward`. optional string param_str = 3 [default = '']; // DEPRECATED optional bool share_in_parallel = 4 [default = false]; } // Message that stores parameters used by RecurrentLayer message RecurrentParameter { // The dimension of the output (and usually hidden state) representation -- // must be explicitly set to non-zero. optional uint32 num_output = 1 [default = 0]; optional FillerParameter weight_filler = 2; // The filler for the weight optional FillerParameter bias_filler = 3; // The filler for the bias // Whether to enable displaying debug_info in the unrolled recurrent net. optional bool debug_info = 4 [default = false]; // Whether to add as additional inputs (bottoms) the initial hidden state // blobs, and add as additional outputs (tops) the final timestep hidden state // blobs. The number of additional bottom/top blobs required depends on the // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs. optional bool expose_hidden = 5 [default = false]; } // Message that stores parameters used by ReductionLayer message ReductionParameter { enum ReductionOp { SUM = 1; ASUM = 2; SUMSQ = 3; MEAN = 4; } optional ReductionOp operation = 1 [default = SUM]; // reduction operation // The first axis to reduce to a scalar -- may be negative to index from the // end (e.g., -1 for the last axis). // (Currently, only reduction along ALL "tail" axes is supported; reduction // of axis M through N, where N < num_axes - 1, is unsupported.) // Suppose we have an n-axis bottom Blob with shape: // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). // If axis == m, the output Blob will have shape // (d0, d1, d2, ..., d(m-1)), // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. // If axis == 0 (the default), the output Blob always has the empty shape // (count 1), performing reduction across the entire input -- // often useful for creating new loss functions. optional int32 axis = 2 [default = 0]; optional float coeff = 3 [default = 1.0]; // coefficient for output } // Message that stores parameters used by ReLULayer message ReLUParameter { // Allow non-zero slope for negative inputs to speed up optimization // Described in: // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities // improve neural network acoustic models. In ICML Workshop on Deep Learning // for Audio, Speech, and Language Processing. optional float negative_slope = 1 [default = 0]; enum Engine { DEFAULT = 0; CAFFE = 1; CUDNN = 2; } optional Engine engine = 2 [default = DEFAULT]; } message ReshapeParameter { // Specify the output dimensions. If some of the dimensions are set to 0, // the corresponding dimension from the bottom layer is used (unchanged). // Exactly one dimension may be set to -1, in which case its value is // inferred from the count of the bottom blob and the remaining dimensions. // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: // // layer { // type: "Reshape" bottom: "input" top: "output" // reshape_param { ... } // } // // If "input" is 2D with shape 2 x 8, then the following reshape_param // specifications are all equivalent, producing a 3D blob "output" with shape // 2 x 2 x 4: // // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } // reshape_param { shape { dim: 0 dim:-1 dim: 4 } } // optional BlobShape shape = 1; // axis and num_axes control the portion of the bottom blob's shape that are // replaced by (included in) the reshape. By default (axis == 0 and // num_axes == -1), the entire bottom blob shape is included in the reshape, // and hence the shape field must specify the entire output shape. // // axis may be non-zero to retain some portion of the beginning of the input // shape (and may be negative to index from the end; e.g., -1 to begin the // reshape after the last axis, including nothing in the reshape, // -2 to include only the last axis, etc.). // // For example, suppose "input" is a 2D blob with shape 2 x 8. // Then the following ReshapeLayer specifications are all equivalent, // producing a blob "output" with shape 2 x 2 x 4: // // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } // // num_axes specifies the extent of the reshape. // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on // input axes in the range [axis, axis+num_axes]. // num_axes may also be -1, the default, to include all remaining axes // (starting from axis). // // For example, suppose "input" is a 2D blob with shape 2 x 8. // Then the following ReshapeLayer specifications are equivalent, // producing a blob "output" with shape 1 x 2 x 8. // // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } // reshape_param { shape { dim: 1 } num_axes: 0 } // // On the other hand, these would produce output blob shape 2 x 1 x 8: // // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } // optional int32 axis = 2 [default = 0]; optional int32 num_axes = 3 [default = -1]; } message ScaleParameter { // The first axis of bottom[0] (the first input Blob) along which to apply // bottom[1] (the second input Blob). May be negative to index from the end // (e.g., -1 for the last axis). // // For example, if bottom[0] is 4D with shape 100x3x40x60, the output // top[0] will have the same shape, and bottom[1] may have any of the // following shapes (for the given value of axis): // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 // (axis == 1 == -3) 3; 3x40; 3x40x60 // (axis == 2 == -2) 40; 40x60 // (axis == 3 == -1) 60 // Furthermore, bottom[1] may have the empty shape (regardless of the value of // "axis") -- a scalar multiplier. optional int32 axis = 1 [default = 1]; // (num_axes is ignored unless just one bottom is given and the scale is // a learned parameter of the layer. Otherwise, num_axes is determined by the // number of axes by the second bottom.) // The number of axes of the input (bottom[0]) covered by the scale // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar. optional int32 num_axes = 2 [default = 1]; // (filler is ignored unless just one bottom is given and the scale is // a learned parameter of the layer.) // The initialization for the learned scale parameter. // Default is the unit (1) initialization, resulting in the ScaleLayer // initially performing the identity operation. optional FillerParameter filler = 3; // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but // may be more efficient). Initialized with bias_filler (defaults to 0). optional bool bias_term = 4 [default = false]; optional FillerParameter bias_filler = 5; } message SigmoidParameter { enum Engine { DEFAULT = 0; CAFFE = 1; CUDNN = 2; } optional Engine engine = 1 [default = DEFAULT]; } message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). // By default, SliceLayer concatenates blobs along the "channels" axis (1). optional int32 axis = 3 [default = 1]; repeated uint32 slice_point = 2; // DEPRECATED: alias for "axis" -- does not support negative indexing. optional uint32 slice_dim = 1 [default = 1]; } // Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer message SoftmaxParameter { enum Engine { DEFAULT = 0; CAFFE = 1; CUDNN = 2; } optional Engine engine = 1 [default = DEFAULT]; // The axis along which to perform the softmax -- may be negative to index // from the end (e.g., -1 for the last axis). // Any other axes will be evaluated as independent softmaxes. optional int32 axis = 2 [default = 1]; } // Message that stores parameters used by SwishLayer message SwishParameter { // Beta parameter for the Swish activation function // Described in: // Prajit Ramachandran, Barret Zoph, Quoc V. Le. (2017). Searching for // Activation Functions. https://arxiv.org/abs/1710.05941v2 optional float beta = 1 [default = 1]; } message TanHParameter { enum Engine { DEFAULT = 0; CAFFE = 1; CUDNN = 2; } optional Engine engine = 1 [default = DEFAULT]; } // Message that stores parameters used by TileLayer message TileParameter { // The index of the axis to tile. optional int32 axis = 1 [default = 1]; // The number of copies (tiles) of the blob to output. optional int32 tiles = 2; } // Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } message WindowDataParameter { // Specify the data source. optional string source = 1; // For data pre-processing, we can do simple scaling and subtracting the // data mean, if provided. Note that the mean subtraction is always carried // out before scaling. optional float scale = 2 [default = 1]; optional string mean_file = 3; // Specify the batch size. optional uint32 batch_size = 4; // Specify if we would like to randomly crop an image. optional uint32 crop_size = 5 [default = 0]; // Specify if we want to randomly mirror data. optional bool mirror = 6 [default = false]; // Foreground (object) overlap threshold optional float fg_threshold = 7 [default = 0.5]; // Background (non-object) overlap threshold optional float bg_threshold = 8 [default = 0.5]; // Fraction of batch that should be foreground objects optional float fg_fraction = 9 [default = 0.25]; // Amount of contextual padding to add around a window // (used only by the window_data_layer) optional uint32 context_pad = 10 [default = 0]; // Mode for cropping out a detection window // warp: cropped window is warped to a fixed size and aspect ratio // square: the tightest square around the window is cropped optional string crop_mode = 11 [default = "warp"]; // cache_images: will load all images in memory for faster access optional bool cache_images = 12 [default = false]; // append root_folder to locate images optional string root_folder = 13 [default = ""]; } message SPPParameter { enum PoolMethod { MAX = 0; AVE = 1; STOCHASTIC = 2; } optional uint32 pyramid_height = 1; optional PoolMethod pool = 2 [default = MAX]; // The pooling method enum Engine { DEFAULT = 0; CAFFE = 1; CUDNN = 2; } optional Engine engine = 6 [default = DEFAULT]; } // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; repeated string top = 3; optional string name = 4; repeated NetStateRule include = 32; repeated NetStateRule exclude = 33; enum LayerType { NONE = 0; ABSVAL = 35; ACCURACY = 1; ARGMAX = 30; BNLL = 2; CONCAT = 3; CONTRASTIVE_LOSS = 37; CONVOLUTION = 4; DATA = 5; DECONVOLUTION = 39; DROPOUT = 6; DUMMY_DATA = 32; EUCLIDEAN_LOSS = 7; ELTWISE = 25; EXP = 38; FLATTEN = 8; HDF5_DATA = 9; HDF5_OUTPUT = 10; HINGE_LOSS = 28; IM2COL = 11; IMAGE_DATA = 12; INFOGAIN_LOSS = 13; INNER_PRODUCT = 14; LRN = 15; MEMORY_DATA = 29; MULTINOMIAL_LOGISTIC_LOSS = 16; MVN = 34; POOLING = 17; POWER = 26; RELU = 18; SIGMOID = 19; SIGMOID_CROSS_ENTROPY_LOSS = 27; SILENCE = 36; SOFTMAX = 20; SOFTMAX_LOSS = 21; SPLIT = 22; SLICE = 33; TANH = 23; WINDOW_DATA = 24; THRESHOLD = 31; } optional LayerType type = 5; repeated BlobProto blobs = 6; repeated string param = 1001; repeated DimCheckMode blob_share_mode = 1002; enum DimCheckMode { STRICT = 0; PERMISSIVE = 1; } repeated float blobs_lr = 7; repeated float weight_decay = 8; repeated float loss_weight = 35; optional AccuracyParameter accuracy_param = 27; optional ArgMaxParameter argmax_param = 23; optional ConcatParameter concat_param = 9; optional ContrastiveLossParameter contrastive_loss_param = 40; optional ConvolutionParameter convolution_param = 10; optional DataParameter data_param = 11; optional DropoutParameter dropout_param = 12; optional DummyDataParameter dummy_data_param = 26; optional EltwiseParameter eltwise_param = 24; optional ExpParameter exp_param = 41; optional HDF5DataParameter hdf5_data_param = 13; optional HDF5OutputParameter hdf5_output_param = 14; optional HingeLossParameter hinge_loss_param = 29; optional ImageDataParameter image_data_param = 15; optional InfogainLossParameter infogain_loss_param = 16; optional InnerProductParameter inner_product_param = 17; optional LRNParameter lrn_param = 18; optional MemoryDataParameter memory_data_param = 22; optional MVNParameter mvn_param = 34; optional PoolingParameter pooling_param = 19; optional PowerParameter power_param = 21; optional ReLUParameter relu_param = 30; optional SigmoidParameter sigmoid_param = 38; optional SoftmaxParameter softmax_param = 39; optional SliceParameter slice_param = 31; optional TanHParameter tanh_param = 37; optional ThresholdParameter threshold_param = 25; optional WindowDataParameter window_data_param = 20; optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters // in Caffe. We keep this message type around for legacy support. message V0LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the string to specify the layer type // Parameters to specify layers with inner products. optional uint32 num_output = 3; // The number of outputs for the layer optional bool biasterm = 4 [default = true]; // whether to have bias terms optional FillerParameter weight_filler = 5; // The filler for the weight optional FillerParameter bias_filler = 6; // The filler for the bias optional uint32 pad = 7 [default = 0]; // The padding size optional uint32 kernelsize = 8; // The kernel size optional uint32 group = 9 [default = 1]; // The group size for group conv optional uint32 stride = 10 [default = 1]; // The stride enum PoolMethod { MAX = 0; AVE = 1; STOCHASTIC = 2; } optional PoolMethod pool = 11 [default = MAX]; // The pooling method optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio optional uint32 local_size = 13 [default = 5]; // for local response norm optional float alpha = 14 [default = 1.]; // for local response norm optional float beta = 15 [default = 0.75]; // for local response norm optional float k = 22 [default = 1.]; // For data layers, specify the data source optional string source = 16; // For data pre-processing, we can do simple scaling and subtracting the // data mean, if provided. Note that the mean subtraction is always carried // out before scaling. optional float scale = 17 [default = 1]; optional string meanfile = 18; // For data layers, specify the batch size. optional uint32 batchsize = 19; // For data layers, specify if we would like to randomly crop an image. optional uint32 cropsize = 20 [default = 0]; // For data layers, specify if we want to randomly mirror data. optional bool mirror = 21 [default = false]; // The blobs containing the numeric parameters of the layer repeated BlobProto blobs = 50; // The ratio that is multiplied on the global learning rate. If you want to // set the learning ratio for one blob, you need to set it for all blobs. repeated float blobs_lr = 51; // The weight decay that is multiplied on the global weight decay. repeated float weight_decay = 52; // The rand_skip variable is for the data layer to skip a few data points // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. optional uint32 rand_skip = 53 [default = 0]; // Fields related to detection (det_*) // foreground (object) overlap threshold optional float det_fg_threshold = 54 [default = 0.5]; // background (non-object) overlap threshold optional float det_bg_threshold = 55 [default = 0.5]; // Fraction of batch that should be foreground objects optional float det_fg_fraction = 56 [default = 0.25]; // optional bool OBSOLETE_can_clobber = 57 [default = true]; // Amount of contextual padding to add around a window // (used only by the window_data_layer) optional uint32 det_context_pad = 58 [default = 0]; // Mode for cropping out a detection window // warp: cropped window is warped to a fixed size and aspect ratio // square: the tightest square around the window is cropped optional string det_crop_mode = 59 [default = "warp"]; // For ReshapeLayer, one needs to specify the new dimensions. optional int32 new_num = 60 [default = 0]; optional int32 new_channels = 61 [default = 0]; optional int32 new_height = 62 [default = 0]; optional int32 new_width = 63 [default = 0]; // Whether or not ImageLayer should shuffle the list of files at every epoch. // It will also resize images if new_height or new_width are not zero. optional bool shuffle_images = 64 [default = false]; // For ConcatLayer, one needs to specify the dimension for concatenation, and // the other dimensions must be the same for all the bottom blobs. // By default it will concatenate blobs along the channels dimension. optional uint32 concat_dim = 65 [default = 1]; optional HDF5OutputParameter hdf5_output_param = 1001; } message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. // Initial value of a_i. Default is a_i=0.25 for all i. optional FillerParameter filler = 1; // Whether or not slope parameters are shared across channels. optional bool channel_shared = 2 [default = false]; } ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/caffe2/caffe/proto/caffe_pb2.py ================================================ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: caffe.proto from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='caffe.proto', package='caffe', syntax='proto2', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\xd4\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\x12\x1f\n\x11layer_wise_reduce\x18) \x01(\x08:\x04true\x12\x0f\n\x07weights\x18* \x03(\t\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"s\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xda\x14\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12)\n\nclip_param\x18\x94\x01 \x01(\x0b\x32\x14.caffe.ClipParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12\x33\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x19.caffe.ParameterParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0frecurrent_param\x18\x92\x01 \x01(\x0b\x32\x19.caffe.RecurrentParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12+\n\x0bswish_param\x18\x93\x01 \x01(\x0b\x32\x15.caffe.SwishParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\"\xc2\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\"B\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\")\n\rClipParameter\x12\x0b\n\x03min\x18\x01 \x02(\x02\x12\x0b\n\x03max\x18\x02 \x02(\x02\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"8\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"5\n\x12ParameterParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\"\x81\x04\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\x12;\n\nround_mode\x18\r \x01(\x0e\x32!.caffe.PoolingParameter.RoundMode:\x04\x43\x45IL\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\" \n\tRoundMode\x12\x08\n\x04\x43\x45IL\x10\x00\x12\t\n\x05\x46LOOR\x10\x01\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xc0\x01\n\x12RecurrentParameter\x12\x15\n\nnum_output\x18\x01 \x01(\r:\x01\x30\x12-\n\rweight_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x19\n\ndebug_info\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rexpose_hidden\x18\x05 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"!\n\x0eSwishParameter\x12\x0f\n\x04\x62\x65ta\x18\x01 \x01(\x02:\x01\x31\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01' ) _PHASE = _descriptor.EnumDescriptor( name='Phase', full_name='caffe.Phase', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='TRAIN', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='TEST', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=15681, serialized_end=15709, ) _sym_db.RegisterEnumDescriptor(_PHASE) Phase = enum_type_wrapper.EnumTypeWrapper(_PHASE) TRAIN = 0 TEST = 1 _FILLERPARAMETER_VARIANCENORM = _descriptor.EnumDescriptor( name='VarianceNorm', full_name='caffe.FillerParameter.VarianceNorm', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='FAN_IN', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='FAN_OUT', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='AVERAGE', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=658, serialized_end=710, ) _sym_db.RegisterEnumDescriptor(_FILLERPARAMETER_VARIANCENORM) _SOLVERPARAMETER_SNAPSHOTFORMAT = _descriptor.EnumDescriptor( name='SnapshotFormat', full_name='caffe.SolverParameter.SnapshotFormat', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='HDF5', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BINARYPROTO', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=2188, serialized_end=2231, ) _sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SNAPSHOTFORMAT) _SOLVERPARAMETER_SOLVERMODE = _descriptor.EnumDescriptor( name='SolverMode', full_name='caffe.SolverParameter.SolverMode', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='CPU', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='GPU', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=2233, serialized_end=2263, ) _sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERMODE) _SOLVERPARAMETER_SOLVERTYPE = _descriptor.EnumDescriptor( name='SolverType', full_name='caffe.SolverParameter.SolverType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='SGD', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='NESTEROV', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ADAGRAD', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='RMSPROP', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ADADELTA', index=4, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ADAM', index=5, number=5, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=2265, serialized_end=2350, ) _sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERTYPE) _PARAMSPEC_DIMCHECKMODE = _descriptor.EnumDescriptor( name='DimCheckMode', full_name='caffe.ParamSpec.DimCheckMode', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='STRICT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='PERMISSIVE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=2781, serialized_end=2823, ) _sym_db.RegisterEnumDescriptor(_PARAMSPEC_DIMCHECKMODE) _LOSSPARAMETER_NORMALIZATIONMODE = _descriptor.EnumDescriptor( name='NormalizationMode', full_name='caffe.LossParameter.NormalizationMode', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='FULL', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='VALID', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BATCH_SIZE', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='NONE', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=5792, serialized_end=5858, ) _sym_db.RegisterEnumDescriptor(_LOSSPARAMETER_NORMALIZATIONMODE) _CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', full_name='caffe.ConvolutionParameter.Engine', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CAFFE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CUDNN', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=6866, serialized_end=6909, ) _sym_db.RegisterEnumDescriptor(_CONVOLUTIONPARAMETER_ENGINE) _DATAPARAMETER_DB = _descriptor.EnumDescriptor( name='DB', full_name='caffe.DataParameter.DB', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='LEVELDB', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='LMDB', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=7227, serialized_end=7254, ) _sym_db.RegisterEnumDescriptor(_DATAPARAMETER_DB) _ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor( name='EltwiseOp', full_name='caffe.EltwiseParameter.EltwiseOp', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='PROD', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SUM', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='MAX', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=7594, serialized_end=7633, ) _sym_db.RegisterEnumDescriptor(_ELTWISEPARAMETER_ELTWISEOP) _HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor( name='Norm', full_name='caffe.HingeLossParameter.Norm', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='L1', index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='L2', index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=8168, serialized_end=8190, ) _sym_db.RegisterEnumDescriptor(_HINGELOSSPARAMETER_NORM) _LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor( name='NormRegion', full_name='caffe.LRNParameter.NormRegion', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='ACROSS_CHANNELS', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='WITHIN_CHANNEL', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=9074, serialized_end=9127, ) _sym_db.RegisterEnumDescriptor(_LRNPARAMETER_NORMREGION) _LRNPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', full_name='caffe.LRNParameter.Engine', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CAFFE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CUDNN', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=6866, serialized_end=6909, ) _sym_db.RegisterEnumDescriptor(_LRNPARAMETER_ENGINE) _POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( name='PoolMethod', full_name='caffe.PoolingParameter.PoolMethod', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='MAX', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='AVE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='STOCHASTIC', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=9812, serialized_end=9858, ) _sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_POOLMETHOD) _POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', full_name='caffe.PoolingParameter.Engine', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CAFFE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CUDNN', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=6866, serialized_end=6909, ) _sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_ENGINE) _POOLINGPARAMETER_ROUNDMODE = _descriptor.EnumDescriptor( name='RoundMode', full_name='caffe.PoolingParameter.RoundMode', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='CEIL', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='FLOOR', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=9905, serialized_end=9937, ) _sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_ROUNDMODE) _REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor( name='ReductionOp', full_name='caffe.ReductionParameter.ReductionOp', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='SUM', index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ASUM', index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SUMSQ', index=2, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='MEAN', index=3, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=10432, serialized_end=10485, ) _sym_db.RegisterEnumDescriptor(_REDUCTIONPARAMETER_REDUCTIONOP) _RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', full_name='caffe.ReLUParameter.Engine', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CAFFE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CUDNN', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=6866, serialized_end=6909, ) _sym_db.RegisterEnumDescriptor(_RELUPARAMETER_ENGINE) _SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', full_name='caffe.SigmoidParameter.Engine', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CAFFE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CUDNN', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=6866, serialized_end=6909, ) _sym_db.RegisterEnumDescriptor(_SIGMOIDPARAMETER_ENGINE) _SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', full_name='caffe.SoftmaxParameter.Engine', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CAFFE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CUDNN', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=6866, serialized_end=6909, ) _sym_db.RegisterEnumDescriptor(_SOFTMAXPARAMETER_ENGINE) _TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', full_name='caffe.TanHParameter.Engine', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CAFFE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CUDNN', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=6866, serialized_end=6909, ) _sym_db.RegisterEnumDescriptor(_TANHPARAMETER_ENGINE) _SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( name='PoolMethod', full_name='caffe.SPPParameter.PoolMethod', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='MAX', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='AVE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='STOCHASTIC', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=9812, serialized_end=9858, ) _sym_db.RegisterEnumDescriptor(_SPPPARAMETER_POOLMETHOD) _SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', full_name='caffe.SPPParameter.Engine', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='DEFAULT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CAFFE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CUDNN', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=6866, serialized_end=6909, ) _sym_db.RegisterEnumDescriptor(_SPPPARAMETER_ENGINE) _V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor( name='LayerType', full_name='caffe.V1LayerParameter.LayerType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='NONE', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ABSVAL', index=1, number=35, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ACCURACY', index=2, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ARGMAX', index=3, number=30, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='BNLL', index=4, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CONCAT', index=5, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CONTRASTIVE_LOSS', index=6, number=37, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CONVOLUTION', index=7, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DATA', index=8, number=5, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DECONVOLUTION', index=9, number=39, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DROPOUT', index=10, number=6, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DUMMY_DATA', index=11, number=32, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EUCLIDEAN_LOSS', index=12, number=7, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='ELTWISE', index=13, number=25, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='EXP', index=14, number=38, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='FLATTEN', index=15, number=8, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='HDF5_DATA', index=16, number=9, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='HDF5_OUTPUT', index=17, number=10, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='HINGE_LOSS', index=18, number=28, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='IM2COL', index=19, number=11, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='IMAGE_DATA', index=20, number=12, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='INFOGAIN_LOSS', index=21, number=13, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='INNER_PRODUCT', index=22, number=14, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='LRN', index=23, number=15, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='MEMORY_DATA', index=24, number=29, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='MULTINOMIAL_LOGISTIC_LOSS', index=25, number=16, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='MVN', index=26, number=34, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='POOLING', index=27, number=17, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='POWER', index=28, number=26, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='RELU', index=29, number=18, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SIGMOID', index=30, number=19, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SIGMOID_CROSS_ENTROPY_LOSS', index=31, number=27, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SILENCE', index=32, number=36, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SOFTMAX', index=33, number=20, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SOFTMAX_LOSS', index=34, number=21, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SPLIT', index=35, number=22, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SLICE', index=36, number=33, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='TANH', index=37, number=23, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='WINDOW_DATA', index=38, number=24, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='THRESHOLD', index=39, number=31, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=13922, serialized_end=14522, ) _sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_LAYERTYPE) _V1LAYERPARAMETER_DIMCHECKMODE = _descriptor.EnumDescriptor( name='DimCheckMode', full_name='caffe.V1LayerParameter.DimCheckMode', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='STRICT', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='PERMISSIVE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=2781, serialized_end=2823, ) _sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_DIMCHECKMODE) _V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( name='PoolMethod', full_name='caffe.V0LayerParameter.PoolMethod', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='MAX', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='AVE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='STOCHASTIC', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=9812, serialized_end=9858, ) _sym_db.RegisterEnumDescriptor(_V0LAYERPARAMETER_POOLMETHOD) _BLOBSHAPE = _descriptor.Descriptor( name='BlobShape', full_name='caffe.BlobShape', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='dim', full_name='caffe.BlobShape.dim', index=0, number=1, type=3, cpp_type=2, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=22, serialized_end=50, ) _BLOBPROTO = _descriptor.Descriptor( name='BlobProto', full_name='caffe.BlobProto', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='shape', full_name='caffe.BlobProto.shape', index=0, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='data', full_name='caffe.BlobProto.data', index=1, number=5, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='diff', full_name='caffe.BlobProto.diff', index=2, number=6, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='double_data', full_name='caffe.BlobProto.double_data', index=3, number=8, type=1, cpp_type=5, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='double_diff', full_name='caffe.BlobProto.double_diff', index=4, number=9, type=1, cpp_type=5, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num', full_name='caffe.BlobProto.num', index=5, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='channels', full_name='caffe.BlobProto.channels', index=6, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height', full_name='caffe.BlobProto.height', index=7, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width', full_name='caffe.BlobProto.width', index=8, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=53, serialized_end=257, ) _BLOBPROTOVECTOR = _descriptor.Descriptor( name='BlobProtoVector', full_name='caffe.BlobProtoVector', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='blobs', full_name='caffe.BlobProtoVector.blobs', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=259, serialized_end=309, ) _DATUM = _descriptor.Descriptor( name='Datum', full_name='caffe.Datum', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='channels', full_name='caffe.Datum.channels', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height', full_name='caffe.Datum.height', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width', full_name='caffe.Datum.width', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='data', full_name='caffe.Datum.data', index=3, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='label', full_name='caffe.Datum.label', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='float_data', full_name='caffe.Datum.float_data', index=5, number=6, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='encoded', full_name='caffe.Datum.encoded', index=6, number=7, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=312, serialized_end=441, ) _FILLERPARAMETER = _descriptor.Descriptor( name='FillerParameter', full_name='caffe.FillerParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='type', full_name='caffe.FillerParameter.type', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"constant".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='caffe.FillerParameter.value', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min', full_name='caffe.FillerParameter.min', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max', full_name='caffe.FillerParameter.max', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mean', full_name='caffe.FillerParameter.mean', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='std', full_name='caffe.FillerParameter.std', index=5, number=6, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='sparse', full_name='caffe.FillerParameter.sparse', index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=True, default_value=-1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='variance_norm', full_name='caffe.FillerParameter.variance_norm', index=7, number=8, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _FILLERPARAMETER_VARIANCENORM, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=444, serialized_end=710, ) _NETPARAMETER = _descriptor.Descriptor( name='NetParameter', full_name='caffe.NetParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='caffe.NetParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='input', full_name='caffe.NetParameter.input', index=1, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='input_shape', full_name='caffe.NetParameter.input_shape', index=2, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='input_dim', full_name='caffe.NetParameter.input_dim', index=3, number=4, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='force_backward', full_name='caffe.NetParameter.force_backward', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='state', full_name='caffe.NetParameter.state', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='debug_info', full_name='caffe.NetParameter.debug_info', index=6, number=7, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='layer', full_name='caffe.NetParameter.layer', index=7, number=100, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='layers', full_name='caffe.NetParameter.layers', index=8, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=713, serialized_end=983, ) _SOLVERPARAMETER = _descriptor.Descriptor( name='SolverParameter', full_name='caffe.SolverParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='net', full_name='caffe.SolverParameter.net', index=0, number=24, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='net_param', full_name='caffe.SolverParameter.net_param', index=1, number=25, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='train_net', full_name='caffe.SolverParameter.train_net', index=2, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='test_net', full_name='caffe.SolverParameter.test_net', index=3, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='train_net_param', full_name='caffe.SolverParameter.train_net_param', index=4, number=21, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='test_net_param', full_name='caffe.SolverParameter.test_net_param', index=5, number=22, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='train_state', full_name='caffe.SolverParameter.train_state', index=6, number=26, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='test_state', full_name='caffe.SolverParameter.test_state', index=7, number=27, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='test_iter', full_name='caffe.SolverParameter.test_iter', index=8, number=3, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='test_interval', full_name='caffe.SolverParameter.test_interval', index=9, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='test_compute_loss', full_name='caffe.SolverParameter.test_compute_loss', index=10, number=19, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='test_initialization', full_name='caffe.SolverParameter.test_initialization', index=11, number=32, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='base_lr', full_name='caffe.SolverParameter.base_lr', index=12, number=5, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='display', full_name='caffe.SolverParameter.display', index=13, number=6, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='average_loss', full_name='caffe.SolverParameter.average_loss', index=14, number=33, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_iter', full_name='caffe.SolverParameter.max_iter', index=15, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='iter_size', full_name='caffe.SolverParameter.iter_size', index=16, number=36, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='lr_policy', full_name='caffe.SolverParameter.lr_policy', index=17, number=8, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='gamma', full_name='caffe.SolverParameter.gamma', index=18, number=9, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='power', full_name='caffe.SolverParameter.power', index=19, number=10, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='momentum', full_name='caffe.SolverParameter.momentum', index=20, number=11, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_decay', full_name='caffe.SolverParameter.weight_decay', index=21, number=12, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='regularization_type', full_name='caffe.SolverParameter.regularization_type', index=22, number=29, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"L2".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stepsize', full_name='caffe.SolverParameter.stepsize', index=23, number=13, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stepvalue', full_name='caffe.SolverParameter.stepvalue', index=24, number=34, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_gradients', full_name='caffe.SolverParameter.clip_gradients', index=25, number=35, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(-1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='snapshot', full_name='caffe.SolverParameter.snapshot', index=26, number=14, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='snapshot_prefix', full_name='caffe.SolverParameter.snapshot_prefix', index=27, number=15, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='snapshot_diff', full_name='caffe.SolverParameter.snapshot_diff', index=28, number=16, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='snapshot_format', full_name='caffe.SolverParameter.snapshot_format', index=29, number=37, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='solver_mode', full_name='caffe.SolverParameter.solver_mode', index=30, number=17, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='device_id', full_name='caffe.SolverParameter.device_id', index=31, number=18, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='random_seed', full_name='caffe.SolverParameter.random_seed', index=32, number=20, type=3, cpp_type=2, label=1, has_default_value=True, default_value=-1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='type', full_name='caffe.SolverParameter.type', index=33, number=40, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"SGD".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='delta', full_name='caffe.SolverParameter.delta', index=34, number=31, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1e-08), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='momentum2', full_name='caffe.SolverParameter.momentum2', index=35, number=39, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.999), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='rms_decay', full_name='caffe.SolverParameter.rms_decay', index=36, number=38, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.99), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='debug_info', full_name='caffe.SolverParameter.debug_info', index=37, number=23, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='snapshot_after_train', full_name='caffe.SolverParameter.snapshot_after_train', index=38, number=28, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='solver_type', full_name='caffe.SolverParameter.solver_type', index=39, number=30, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='layer_wise_reduce', full_name='caffe.SolverParameter.layer_wise_reduce', index=40, number=41, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weights', full_name='caffe.SolverParameter.weights', index=41, number=42, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _SOLVERPARAMETER_SNAPSHOTFORMAT, _SOLVERPARAMETER_SOLVERMODE, _SOLVERPARAMETER_SOLVERTYPE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=986, serialized_end=2350, ) _SOLVERSTATE = _descriptor.Descriptor( name='SolverState', full_name='caffe.SolverState', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='iter', full_name='caffe.SolverState.iter', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='learned_net', full_name='caffe.SolverState.learned_net', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='history', full_name='caffe.SolverState.history', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='current_step', full_name='caffe.SolverState.current_step', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2352, serialized_end=2460, ) _NETSTATE = _descriptor.Descriptor( name='NetState', full_name='caffe.NetState', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='phase', full_name='caffe.NetState.phase', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='level', full_name='caffe.NetState.level', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stage', full_name='caffe.NetState.stage', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2462, serialized_end=2540, ) _NETSTATERULE = _descriptor.Descriptor( name='NetStateRule', full_name='caffe.NetStateRule', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='phase', full_name='caffe.NetStateRule.phase', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='min_level', full_name='caffe.NetStateRule.min_level', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max_level', full_name='caffe.NetStateRule.max_level', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stage', full_name='caffe.NetStateRule.stage', index=3, number=4, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='not_stage', full_name='caffe.NetStateRule.not_stage', index=4, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2542, serialized_end=2657, ) _PARAMSPEC = _descriptor.Descriptor( name='ParamSpec', full_name='caffe.ParamSpec', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='caffe.ParamSpec.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='share_mode', full_name='caffe.ParamSpec.share_mode', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='lr_mult', full_name='caffe.ParamSpec.lr_mult', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='decay_mult', full_name='caffe.ParamSpec.decay_mult', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _PARAMSPEC_DIMCHECKMODE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2660, serialized_end=2823, ) _LAYERPARAMETER = _descriptor.Descriptor( name='LayerParameter', full_name='caffe.LayerParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='caffe.LayerParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='type', full_name='caffe.LayerParameter.type', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bottom', full_name='caffe.LayerParameter.bottom', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='top', full_name='caffe.LayerParameter.top', index=3, number=4, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='phase', full_name='caffe.LayerParameter.phase', index=4, number=10, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='loss_weight', full_name='caffe.LayerParameter.loss_weight', index=5, number=5, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='param', full_name='caffe.LayerParameter.param', index=6, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='blobs', full_name='caffe.LayerParameter.blobs', index=7, number=7, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='propagate_down', full_name='caffe.LayerParameter.propagate_down', index=8, number=11, type=8, cpp_type=7, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='include', full_name='caffe.LayerParameter.include', index=9, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='exclude', full_name='caffe.LayerParameter.exclude', index=10, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='transform_param', full_name='caffe.LayerParameter.transform_param', index=11, number=100, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='loss_param', full_name='caffe.LayerParameter.loss_param', index=12, number=101, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='accuracy_param', full_name='caffe.LayerParameter.accuracy_param', index=13, number=102, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='argmax_param', full_name='caffe.LayerParameter.argmax_param', index=14, number=103, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='batch_norm_param', full_name='caffe.LayerParameter.batch_norm_param', index=15, number=139, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_param', full_name='caffe.LayerParameter.bias_param', index=16, number=141, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='clip_param', full_name='caffe.LayerParameter.clip_param', index=17, number=148, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='concat_param', full_name='caffe.LayerParameter.concat_param', index=18, number=104, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='contrastive_loss_param', full_name='caffe.LayerParameter.contrastive_loss_param', index=19, number=105, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='convolution_param', full_name='caffe.LayerParameter.convolution_param', index=20, number=106, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='crop_param', full_name='caffe.LayerParameter.crop_param', index=21, number=144, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='data_param', full_name='caffe.LayerParameter.data_param', index=22, number=107, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dropout_param', full_name='caffe.LayerParameter.dropout_param', index=23, number=108, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dummy_data_param', full_name='caffe.LayerParameter.dummy_data_param', index=24, number=109, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='eltwise_param', full_name='caffe.LayerParameter.eltwise_param', index=25, number=110, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='elu_param', full_name='caffe.LayerParameter.elu_param', index=26, number=140, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='embed_param', full_name='caffe.LayerParameter.embed_param', index=27, number=137, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='exp_param', full_name='caffe.LayerParameter.exp_param', index=28, number=111, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='flatten_param', full_name='caffe.LayerParameter.flatten_param', index=29, number=135, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hdf5_data_param', full_name='caffe.LayerParameter.hdf5_data_param', index=30, number=112, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hdf5_output_param', full_name='caffe.LayerParameter.hdf5_output_param', index=31, number=113, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hinge_loss_param', full_name='caffe.LayerParameter.hinge_loss_param', index=32, number=114, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='image_data_param', full_name='caffe.LayerParameter.image_data_param', index=33, number=115, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='infogain_loss_param', full_name='caffe.LayerParameter.infogain_loss_param', index=34, number=116, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='inner_product_param', full_name='caffe.LayerParameter.inner_product_param', index=35, number=117, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='input_param', full_name='caffe.LayerParameter.input_param', index=36, number=143, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='log_param', full_name='caffe.LayerParameter.log_param', index=37, number=134, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='lrn_param', full_name='caffe.LayerParameter.lrn_param', index=38, number=118, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='memory_data_param', full_name='caffe.LayerParameter.memory_data_param', index=39, number=119, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mvn_param', full_name='caffe.LayerParameter.mvn_param', index=40, number=120, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='parameter_param', full_name='caffe.LayerParameter.parameter_param', index=41, number=145, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pooling_param', full_name='caffe.LayerParameter.pooling_param', index=42, number=121, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='power_param', full_name='caffe.LayerParameter.power_param', index=43, number=122, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='prelu_param', full_name='caffe.LayerParameter.prelu_param', index=44, number=131, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='python_param', full_name='caffe.LayerParameter.python_param', index=45, number=130, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='recurrent_param', full_name='caffe.LayerParameter.recurrent_param', index=46, number=146, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='reduction_param', full_name='caffe.LayerParameter.reduction_param', index=47, number=136, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='relu_param', full_name='caffe.LayerParameter.relu_param', index=48, number=123, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='reshape_param', full_name='caffe.LayerParameter.reshape_param', index=49, number=133, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale_param', full_name='caffe.LayerParameter.scale_param', index=50, number=142, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='sigmoid_param', full_name='caffe.LayerParameter.sigmoid_param', index=51, number=124, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='softmax_param', full_name='caffe.LayerParameter.softmax_param', index=52, number=125, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='spp_param', full_name='caffe.LayerParameter.spp_param', index=53, number=132, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='slice_param', full_name='caffe.LayerParameter.slice_param', index=54, number=126, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='swish_param', full_name='caffe.LayerParameter.swish_param', index=55, number=147, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='tanh_param', full_name='caffe.LayerParameter.tanh_param', index=56, number=127, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='threshold_param', full_name='caffe.LayerParameter.threshold_param', index=57, number=128, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='tile_param', full_name='caffe.LayerParameter.tile_param', index=58, number=138, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='window_data_param', full_name='caffe.LayerParameter.window_data_param', index=59, number=129, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=2826, serialized_end=5476, ) _TRANSFORMATIONPARAMETER = _descriptor.Descriptor( name='TransformationParameter', full_name='caffe.TransformationParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='scale', full_name='caffe.TransformationParameter.scale', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mirror', full_name='caffe.TransformationParameter.mirror', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='crop_size', full_name='caffe.TransformationParameter.crop_size', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.TransformationParameter.mean_file', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mean_value', full_name='caffe.TransformationParameter.mean_value', index=4, number=5, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='force_color', full_name='caffe.TransformationParameter.force_color', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='force_gray', full_name='caffe.TransformationParameter.force_gray', index=6, number=7, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5479, serialized_end=5661, ) _LOSSPARAMETER = _descriptor.Descriptor( name='LossParameter', full_name='caffe.LossParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='ignore_label', full_name='caffe.LossParameter.ignore_label', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='normalization', full_name='caffe.LossParameter.normalization', index=1, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='normalize', full_name='caffe.LossParameter.normalize', index=2, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _LOSSPARAMETER_NORMALIZATIONMODE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5664, serialized_end=5858, ) _ACCURACYPARAMETER = _descriptor.Descriptor( name='AccuracyParameter', full_name='caffe.AccuracyParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='top_k', full_name='caffe.AccuracyParameter.top_k', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='axis', full_name='caffe.AccuracyParameter.axis', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ignore_label', full_name='caffe.AccuracyParameter.ignore_label', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5860, serialized_end=5936, ) _ARGMAXPARAMETER = _descriptor.Descriptor( name='ArgMaxParameter', full_name='caffe.ArgMaxParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='out_max_val', full_name='caffe.ArgMaxParameter.out_max_val', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='top_k', full_name='caffe.ArgMaxParameter.top_k', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='axis', full_name='caffe.ArgMaxParameter.axis', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=5938, serialized_end=6015, ) _CLIPPARAMETER = _descriptor.Descriptor( name='ClipParameter', full_name='caffe.ClipParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='min', full_name='caffe.ClipParameter.min', index=0, number=1, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='max', full_name='caffe.ClipParameter.max', index=1, number=2, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6017, serialized_end=6058, ) _CONCATPARAMETER = _descriptor.Descriptor( name='ConcatParameter', full_name='caffe.ConcatParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='axis', full_name='caffe.ConcatParameter.axis', index=0, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='concat_dim', full_name='caffe.ConcatParameter.concat_dim', index=1, number=1, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6060, serialized_end=6117, ) _BATCHNORMPARAMETER = _descriptor.Descriptor( name='BatchNormParameter', full_name='caffe.BatchNormParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='use_global_stats', full_name='caffe.BatchNormParameter.use_global_stats', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='moving_average_fraction', full_name='caffe.BatchNormParameter.moving_average_fraction', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.999), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='eps', full_name='caffe.BatchNormParameter.eps', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1e-05), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6119, serialized_end=6225, ) _BIASPARAMETER = _descriptor.Descriptor( name='BiasParameter', full_name='caffe.BiasParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='axis', full_name='caffe.BiasParameter.axis', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_axes', full_name='caffe.BiasParameter.num_axes', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='filler', full_name='caffe.BiasParameter.filler', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6227, serialized_end=6320, ) _CONTRASTIVELOSSPARAMETER = _descriptor.Descriptor( name='ContrastiveLossParameter', full_name='caffe.ContrastiveLossParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='margin', full_name='caffe.ContrastiveLossParameter.margin', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='legacy_version', full_name='caffe.ContrastiveLossParameter.legacy_version', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6322, serialized_end=6398, ) _CONVOLUTIONPARAMETER = _descriptor.Descriptor( name='ConvolutionParameter', full_name='caffe.ConvolutionParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_output', full_name='caffe.ConvolutionParameter.num_output', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_term', full_name='caffe.ConvolutionParameter.bias_term', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad', full_name='caffe.ConvolutionParameter.pad', index=2, number=3, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernel_size', full_name='caffe.ConvolutionParameter.kernel_size', index=3, number=4, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stride', full_name='caffe.ConvolutionParameter.stride', index=4, number=6, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dilation', full_name='caffe.ConvolutionParameter.dilation', index=5, number=18, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_h', full_name='caffe.ConvolutionParameter.pad_h', index=6, number=9, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_w', full_name='caffe.ConvolutionParameter.pad_w', index=7, number=10, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernel_h', full_name='caffe.ConvolutionParameter.kernel_h', index=8, number=11, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernel_w', full_name='caffe.ConvolutionParameter.kernel_w', index=9, number=12, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stride_h', full_name='caffe.ConvolutionParameter.stride_h', index=10, number=13, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stride_w', full_name='caffe.ConvolutionParameter.stride_w', index=11, number=14, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='group', full_name='caffe.ConvolutionParameter.group', index=12, number=5, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_filler', full_name='caffe.ConvolutionParameter.weight_filler', index=13, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_filler', full_name='caffe.ConvolutionParameter.bias_filler', index=14, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='engine', full_name='caffe.ConvolutionParameter.engine', index=15, number=15, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='axis', full_name='caffe.ConvolutionParameter.axis', index=16, number=16, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='force_nd_im2col', full_name='caffe.ConvolutionParameter.force_nd_im2col', index=17, number=17, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _CONVOLUTIONPARAMETER_ENGINE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6401, serialized_end=6909, ) _CROPPARAMETER = _descriptor.Descriptor( name='CropParameter', full_name='caffe.CropParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='axis', full_name='caffe.CropParameter.axis', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='offset', full_name='caffe.CropParameter.offset', index=1, number=2, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6911, serialized_end=6959, ) _DATAPARAMETER = _descriptor.Descriptor( name='DataParameter', full_name='caffe.DataParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='source', full_name='caffe.DataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='batch_size', full_name='caffe.DataParameter.batch_size', index=1, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='rand_skip', full_name='caffe.DataParameter.rand_skip', index=2, number=7, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='backend', full_name='caffe.DataParameter.backend', index=3, number=8, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale', full_name='caffe.DataParameter.scale', index=4, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.DataParameter.mean_file', index=5, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='crop_size', full_name='caffe.DataParameter.crop_size', index=6, number=5, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mirror', full_name='caffe.DataParameter.mirror', index=7, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='force_encoded_color', full_name='caffe.DataParameter.force_encoded_color', index=8, number=9, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='prefetch', full_name='caffe.DataParameter.prefetch', index=9, number=10, type=13, cpp_type=3, label=1, has_default_value=True, default_value=4, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _DATAPARAMETER_DB, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=6962, serialized_end=7254, ) _DROPOUTPARAMETER = _descriptor.Descriptor( name='DropoutParameter', full_name='caffe.DropoutParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='dropout_ratio', full_name='caffe.DropoutParameter.dropout_ratio', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7256, serialized_end=7302, ) _DUMMYDATAPARAMETER = _descriptor.Descriptor( name='DummyDataParameter', full_name='caffe.DummyDataParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='data_filler', full_name='caffe.DummyDataParameter.data_filler', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shape', full_name='caffe.DummyDataParameter.shape', index=1, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num', full_name='caffe.DummyDataParameter.num', index=2, number=2, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='channels', full_name='caffe.DummyDataParameter.channels', index=3, number=3, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height', full_name='caffe.DummyDataParameter.height', index=4, number=4, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width', full_name='caffe.DummyDataParameter.width', index=5, number=5, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7305, serialized_end=7465, ) _ELTWISEPARAMETER = _descriptor.Descriptor( name='EltwiseParameter', full_name='caffe.EltwiseParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='operation', full_name='caffe.EltwiseParameter.operation', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='coeff', full_name='caffe.EltwiseParameter.coeff', index=1, number=2, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stable_prod_grad', full_name='caffe.EltwiseParameter.stable_prod_grad', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _ELTWISEPARAMETER_ELTWISEOP, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7468, serialized_end=7633, ) _ELUPARAMETER = _descriptor.Descriptor( name='ELUParameter', full_name='caffe.ELUParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='alpha', full_name='caffe.ELUParameter.alpha', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7635, serialized_end=7667, ) _EMBEDPARAMETER = _descriptor.Descriptor( name='EmbedParameter', full_name='caffe.EmbedParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_output', full_name='caffe.EmbedParameter.num_output', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='input_dim', full_name='caffe.EmbedParameter.input_dim', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_term', full_name='caffe.EmbedParameter.bias_term', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_filler', full_name='caffe.EmbedParameter.weight_filler', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_filler', full_name='caffe.EmbedParameter.bias_filler', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7670, serialized_end=7842, ) _EXPPARAMETER = _descriptor.Descriptor( name='ExpParameter', full_name='caffe.ExpParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='base', full_name='caffe.ExpParameter.base', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(-1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale', full_name='caffe.ExpParameter.scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shift', full_name='caffe.ExpParameter.shift', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7844, serialized_end=7912, ) _FLATTENPARAMETER = _descriptor.Descriptor( name='FlattenParameter', full_name='caffe.FlattenParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='axis', full_name='caffe.FlattenParameter.axis', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='end_axis', full_name='caffe.FlattenParameter.end_axis', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=-1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7914, serialized_end=7971, ) _HDF5DATAPARAMETER = _descriptor.Descriptor( name='HDF5DataParameter', full_name='caffe.HDF5DataParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='source', full_name='caffe.HDF5DataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='batch_size', full_name='caffe.HDF5DataParameter.batch_size', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shuffle', full_name='caffe.HDF5DataParameter.shuffle', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=7973, serialized_end=8052, ) _HDF5OUTPUTPARAMETER = _descriptor.Descriptor( name='HDF5OutputParameter', full_name='caffe.HDF5OutputParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='file_name', full_name='caffe.HDF5OutputParameter.file_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8054, serialized_end=8094, ) _HINGELOSSPARAMETER = _descriptor.Descriptor( name='HingeLossParameter', full_name='caffe.HingeLossParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='norm', full_name='caffe.HingeLossParameter.norm', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _HINGELOSSPARAMETER_NORM, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8096, serialized_end=8190, ) _IMAGEDATAPARAMETER = _descriptor.Descriptor( name='ImageDataParameter', full_name='caffe.ImageDataParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='source', full_name='caffe.ImageDataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='batch_size', full_name='caffe.ImageDataParameter.batch_size', index=1, number=4, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='rand_skip', full_name='caffe.ImageDataParameter.rand_skip', index=2, number=7, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shuffle', full_name='caffe.ImageDataParameter.shuffle', index=3, number=8, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='new_height', full_name='caffe.ImageDataParameter.new_height', index=4, number=9, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='new_width', full_name='caffe.ImageDataParameter.new_width', index=5, number=10, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='is_color', full_name='caffe.ImageDataParameter.is_color', index=6, number=11, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale', full_name='caffe.ImageDataParameter.scale', index=7, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.ImageDataParameter.mean_file', index=8, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='crop_size', full_name='caffe.ImageDataParameter.crop_size', index=9, number=5, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mirror', full_name='caffe.ImageDataParameter.mirror', index=10, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='root_folder', full_name='caffe.ImageDataParameter.root_folder', index=11, number=12, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8193, serialized_end=8472, ) _INFOGAINLOSSPARAMETER = _descriptor.Descriptor( name='InfogainLossParameter', full_name='caffe.InfogainLossParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='source', full_name='caffe.InfogainLossParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='axis', full_name='caffe.InfogainLossParameter.axis', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8474, serialized_end=8530, ) _INNERPRODUCTPARAMETER = _descriptor.Descriptor( name='InnerProductParameter', full_name='caffe.InnerProductParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_output', full_name='caffe.InnerProductParameter.num_output', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_term', full_name='caffe.InnerProductParameter.bias_term', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_filler', full_name='caffe.InnerProductParameter.weight_filler', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_filler', full_name='caffe.InnerProductParameter.bias_filler', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='axis', full_name='caffe.InnerProductParameter.axis', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='transpose', full_name='caffe.InnerProductParameter.transpose', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8533, serialized_end=8736, ) _INPUTPARAMETER = _descriptor.Descriptor( name='InputParameter', full_name='caffe.InputParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='shape', full_name='caffe.InputParameter.shape', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8738, serialized_end=8787, ) _LOGPARAMETER = _descriptor.Descriptor( name='LogParameter', full_name='caffe.LogParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='base', full_name='caffe.LogParameter.base', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(-1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale', full_name='caffe.LogParameter.scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shift', full_name='caffe.LogParameter.shift', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8789, serialized_end=8857, ) _LRNPARAMETER = _descriptor.Descriptor( name='LRNParameter', full_name='caffe.LRNParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='local_size', full_name='caffe.LRNParameter.local_size', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=True, default_value=5, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='alpha', full_name='caffe.LRNParameter.alpha', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='beta', full_name='caffe.LRNParameter.beta', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.75), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='norm_region', full_name='caffe.LRNParameter.norm_region', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='k', full_name='caffe.LRNParameter.k', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='engine', full_name='caffe.LRNParameter.engine', index=5, number=6, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _LRNPARAMETER_NORMREGION, _LRNPARAMETER_ENGINE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=8860, serialized_end=9172, ) _MEMORYDATAPARAMETER = _descriptor.Descriptor( name='MemoryDataParameter', full_name='caffe.MemoryDataParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='batch_size', full_name='caffe.MemoryDataParameter.batch_size', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='channels', full_name='caffe.MemoryDataParameter.channels', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='height', full_name='caffe.MemoryDataParameter.height', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='width', full_name='caffe.MemoryDataParameter.width', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=9174, serialized_end=9264, ) _MVNPARAMETER = _descriptor.Descriptor( name='MVNParameter', full_name='caffe.MVNParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='normalize_variance', full_name='caffe.MVNParameter.normalize_variance', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='across_channels', full_name='caffe.MVNParameter.across_channels', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='eps', full_name='caffe.MVNParameter.eps', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1e-09), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=9266, serialized_end=9366, ) _PARAMETERPARAMETER = _descriptor.Descriptor( name='ParameterParameter', full_name='caffe.ParameterParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='shape', full_name='caffe.ParameterParameter.shape', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=9368, serialized_end=9421, ) _POOLINGPARAMETER = _descriptor.Descriptor( name='PoolingParameter', full_name='caffe.PoolingParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='pool', full_name='caffe.PoolingParameter.pool', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad', full_name='caffe.PoolingParameter.pad', index=1, number=4, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_h', full_name='caffe.PoolingParameter.pad_h', index=2, number=9, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad_w', full_name='caffe.PoolingParameter.pad_w', index=3, number=10, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernel_size', full_name='caffe.PoolingParameter.kernel_size', index=4, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernel_h', full_name='caffe.PoolingParameter.kernel_h', index=5, number=5, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernel_w', full_name='caffe.PoolingParameter.kernel_w', index=6, number=6, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stride', full_name='caffe.PoolingParameter.stride', index=7, number=3, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stride_h', full_name='caffe.PoolingParameter.stride_h', index=8, number=7, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stride_w', full_name='caffe.PoolingParameter.stride_w', index=9, number=8, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='engine', full_name='caffe.PoolingParameter.engine', index=10, number=11, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='global_pooling', full_name='caffe.PoolingParameter.global_pooling', index=11, number=12, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='round_mode', full_name='caffe.PoolingParameter.round_mode', index=12, number=13, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _POOLINGPARAMETER_POOLMETHOD, _POOLINGPARAMETER_ENGINE, _POOLINGPARAMETER_ROUNDMODE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=9424, serialized_end=9937, ) _POWERPARAMETER = _descriptor.Descriptor( name='PowerParameter', full_name='caffe.PowerParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='power', full_name='caffe.PowerParameter.power', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale', full_name='caffe.PowerParameter.scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shift', full_name='caffe.PowerParameter.shift', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=9939, serialized_end=10009, ) _PYTHONPARAMETER = _descriptor.Descriptor( name='PythonParameter', full_name='caffe.PythonParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='module', full_name='caffe.PythonParameter.module', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='layer', full_name='caffe.PythonParameter.layer', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='param_str', full_name='caffe.PythonParameter.param_str', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='share_in_parallel', full_name='caffe.PythonParameter.share_in_parallel', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=10011, serialized_end=10114, ) _RECURRENTPARAMETER = _descriptor.Descriptor( name='RecurrentParameter', full_name='caffe.RecurrentParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='num_output', full_name='caffe.RecurrentParameter.num_output', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_filler', full_name='caffe.RecurrentParameter.weight_filler', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_filler', full_name='caffe.RecurrentParameter.bias_filler', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='debug_info', full_name='caffe.RecurrentParameter.debug_info', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='expose_hidden', full_name='caffe.RecurrentParameter.expose_hidden', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=10117, serialized_end=10309, ) _REDUCTIONPARAMETER = _descriptor.Descriptor( name='ReductionParameter', full_name='caffe.ReductionParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='operation', full_name='caffe.ReductionParameter.operation', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='axis', full_name='caffe.ReductionParameter.axis', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='coeff', full_name='caffe.ReductionParameter.coeff', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _REDUCTIONPARAMETER_REDUCTIONOP, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=10312, serialized_end=10485, ) _RELUPARAMETER = _descriptor.Descriptor( name='ReLUParameter', full_name='caffe.ReLUParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='negative_slope', full_name='caffe.ReLUParameter.negative_slope', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='engine', full_name='caffe.ReLUParameter.engine', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _RELUPARAMETER_ENGINE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=10488, serialized_end=10629, ) _RESHAPEPARAMETER = _descriptor.Descriptor( name='ReshapeParameter', full_name='caffe.ReshapeParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='shape', full_name='caffe.ReshapeParameter.shape', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='axis', full_name='caffe.ReshapeParameter.axis', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_axes', full_name='caffe.ReshapeParameter.num_axes', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=-1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=10631, serialized_end=10721, ) _SCALEPARAMETER = _descriptor.Descriptor( name='ScaleParameter', full_name='caffe.ScaleParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='axis', full_name='caffe.ScaleParameter.axis', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_axes', full_name='caffe.ScaleParameter.num_axes', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='filler', full_name='caffe.ScaleParameter.filler', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_term', full_name='caffe.ScaleParameter.bias_term', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_filler', full_name='caffe.ScaleParameter.bias_filler', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=10724, serialized_end=10889, ) _SIGMOIDPARAMETER = _descriptor.Descriptor( name='SigmoidParameter', full_name='caffe.SigmoidParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='engine', full_name='caffe.SigmoidParameter.engine', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _SIGMOIDPARAMETER_ENGINE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=10891, serialized_end=11011, ) _SLICEPARAMETER = _descriptor.Descriptor( name='SliceParameter', full_name='caffe.SliceParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='axis', full_name='caffe.SliceParameter.axis', index=0, number=3, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='slice_point', full_name='caffe.SliceParameter.slice_point', index=1, number=2, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='slice_dim', full_name='caffe.SliceParameter.slice_dim', index=2, number=1, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=11013, serialized_end=11089, ) _SOFTMAXPARAMETER = _descriptor.Descriptor( name='SoftmaxParameter', full_name='caffe.SoftmaxParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='engine', full_name='caffe.SoftmaxParameter.engine', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='axis', full_name='caffe.SoftmaxParameter.axis', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _SOFTMAXPARAMETER_ENGINE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=11092, serialized_end=11229, ) _SWISHPARAMETER = _descriptor.Descriptor( name='SwishParameter', full_name='caffe.SwishParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='beta', full_name='caffe.SwishParameter.beta', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=11231, serialized_end=11264, ) _TANHPARAMETER = _descriptor.Descriptor( name='TanHParameter', full_name='caffe.TanHParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='engine', full_name='caffe.TanHParameter.engine', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _TANHPARAMETER_ENGINE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=11266, serialized_end=11380, ) _TILEPARAMETER = _descriptor.Descriptor( name='TileParameter', full_name='caffe.TileParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='axis', full_name='caffe.TileParameter.axis', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='tiles', full_name='caffe.TileParameter.tiles', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=11382, serialized_end=11429, ) _THRESHOLDPARAMETER = _descriptor.Descriptor( name='ThresholdParameter', full_name='caffe.ThresholdParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='threshold', full_name='caffe.ThresholdParameter.threshold', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=11431, serialized_end=11473, ) _WINDOWDATAPARAMETER = _descriptor.Descriptor( name='WindowDataParameter', full_name='caffe.WindowDataParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='source', full_name='caffe.WindowDataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale', full_name='caffe.WindowDataParameter.scale', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.WindowDataParameter.mean_file', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='batch_size', full_name='caffe.WindowDataParameter.batch_size', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='crop_size', full_name='caffe.WindowDataParameter.crop_size', index=4, number=5, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mirror', full_name='caffe.WindowDataParameter.mirror', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fg_threshold', full_name='caffe.WindowDataParameter.fg_threshold', index=6, number=7, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bg_threshold', full_name='caffe.WindowDataParameter.bg_threshold', index=7, number=8, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fg_fraction', full_name='caffe.WindowDataParameter.fg_fraction', index=8, number=9, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.25), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='context_pad', full_name='caffe.WindowDataParameter.context_pad', index=9, number=10, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='crop_mode', full_name='caffe.WindowDataParameter.crop_mode', index=10, number=11, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"warp".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='cache_images', full_name='caffe.WindowDataParameter.cache_images', index=11, number=12, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='root_folder', full_name='caffe.WindowDataParameter.root_folder', index=12, number=13, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=11476, serialized_end=11797, ) _SPPPARAMETER = _descriptor.Descriptor( name='SPPParameter', full_name='caffe.SPPParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='pyramid_height', full_name='caffe.SPPParameter.pyramid_height', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pool', full_name='caffe.SPPParameter.pool', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='engine', full_name='caffe.SPPParameter.engine', index=2, number=6, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _SPPPARAMETER_POOLMETHOD, _SPPPARAMETER_ENGINE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=11800, serialized_end=12035, ) _V1LAYERPARAMETER = _descriptor.Descriptor( name='V1LayerParameter', full_name='caffe.V1LayerParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='bottom', full_name='caffe.V1LayerParameter.bottom', index=0, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='top', full_name='caffe.V1LayerParameter.top', index=1, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='name', full_name='caffe.V1LayerParameter.name', index=2, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='include', full_name='caffe.V1LayerParameter.include', index=3, number=32, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='exclude', full_name='caffe.V1LayerParameter.exclude', index=4, number=33, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='type', full_name='caffe.V1LayerParameter.type', index=5, number=5, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='blobs', full_name='caffe.V1LayerParameter.blobs', index=6, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='param', full_name='caffe.V1LayerParameter.param', index=7, number=1001, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='blob_share_mode', full_name='caffe.V1LayerParameter.blob_share_mode', index=8, number=1002, type=14, cpp_type=8, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='blobs_lr', full_name='caffe.V1LayerParameter.blobs_lr', index=9, number=7, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_decay', full_name='caffe.V1LayerParameter.weight_decay', index=10, number=8, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='loss_weight', full_name='caffe.V1LayerParameter.loss_weight', index=11, number=35, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='accuracy_param', full_name='caffe.V1LayerParameter.accuracy_param', index=12, number=27, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='argmax_param', full_name='caffe.V1LayerParameter.argmax_param', index=13, number=23, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='concat_param', full_name='caffe.V1LayerParameter.concat_param', index=14, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='contrastive_loss_param', full_name='caffe.V1LayerParameter.contrastive_loss_param', index=15, number=40, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='convolution_param', full_name='caffe.V1LayerParameter.convolution_param', index=16, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='data_param', full_name='caffe.V1LayerParameter.data_param', index=17, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dropout_param', full_name='caffe.V1LayerParameter.dropout_param', index=18, number=12, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dummy_data_param', full_name='caffe.V1LayerParameter.dummy_data_param', index=19, number=26, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='eltwise_param', full_name='caffe.V1LayerParameter.eltwise_param', index=20, number=24, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='exp_param', full_name='caffe.V1LayerParameter.exp_param', index=21, number=41, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hdf5_data_param', full_name='caffe.V1LayerParameter.hdf5_data_param', index=22, number=13, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hdf5_output_param', full_name='caffe.V1LayerParameter.hdf5_output_param', index=23, number=14, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hinge_loss_param', full_name='caffe.V1LayerParameter.hinge_loss_param', index=24, number=29, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='image_data_param', full_name='caffe.V1LayerParameter.image_data_param', index=25, number=15, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='infogain_loss_param', full_name='caffe.V1LayerParameter.infogain_loss_param', index=26, number=16, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='inner_product_param', full_name='caffe.V1LayerParameter.inner_product_param', index=27, number=17, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='lrn_param', full_name='caffe.V1LayerParameter.lrn_param', index=28, number=18, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='memory_data_param', full_name='caffe.V1LayerParameter.memory_data_param', index=29, number=22, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mvn_param', full_name='caffe.V1LayerParameter.mvn_param', index=30, number=34, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pooling_param', full_name='caffe.V1LayerParameter.pooling_param', index=31, number=19, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='power_param', full_name='caffe.V1LayerParameter.power_param', index=32, number=21, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='relu_param', full_name='caffe.V1LayerParameter.relu_param', index=33, number=30, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='sigmoid_param', full_name='caffe.V1LayerParameter.sigmoid_param', index=34, number=38, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='softmax_param', full_name='caffe.V1LayerParameter.softmax_param', index=35, number=39, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='slice_param', full_name='caffe.V1LayerParameter.slice_param', index=36, number=31, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='tanh_param', full_name='caffe.V1LayerParameter.tanh_param', index=37, number=37, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='threshold_param', full_name='caffe.V1LayerParameter.threshold_param', index=38, number=25, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='window_data_param', full_name='caffe.V1LayerParameter.window_data_param', index=39, number=20, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='transform_param', full_name='caffe.V1LayerParameter.transform_param', index=40, number=36, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='loss_param', full_name='caffe.V1LayerParameter.loss_param', index=41, number=42, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='layer', full_name='caffe.V1LayerParameter.layer', index=42, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _V1LAYERPARAMETER_LAYERTYPE, _V1LAYERPARAMETER_DIMCHECKMODE, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=12038, serialized_end=14566, ) _V0LAYERPARAMETER = _descriptor.Descriptor( name='V0LayerParameter', full_name='caffe.V0LayerParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='caffe.V0LayerParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='type', full_name='caffe.V0LayerParameter.type', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='num_output', full_name='caffe.V0LayerParameter.num_output', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='biasterm', full_name='caffe.V0LayerParameter.biasterm', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_filler', full_name='caffe.V0LayerParameter.weight_filler', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='bias_filler', full_name='caffe.V0LayerParameter.bias_filler', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pad', full_name='caffe.V0LayerParameter.pad', index=6, number=7, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kernelsize', full_name='caffe.V0LayerParameter.kernelsize', index=7, number=8, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='group', full_name='caffe.V0LayerParameter.group', index=8, number=9, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='stride', full_name='caffe.V0LayerParameter.stride', index=9, number=10, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='pool', full_name='caffe.V0LayerParameter.pool', index=10, number=11, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='dropout_ratio', full_name='caffe.V0LayerParameter.dropout_ratio', index=11, number=12, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='local_size', full_name='caffe.V0LayerParameter.local_size', index=12, number=13, type=13, cpp_type=3, label=1, has_default_value=True, default_value=5, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='alpha', full_name='caffe.V0LayerParameter.alpha', index=13, number=14, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='beta', full_name='caffe.V0LayerParameter.beta', index=14, number=15, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.75), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='k', full_name='caffe.V0LayerParameter.k', index=15, number=22, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='source', full_name='caffe.V0LayerParameter.source', index=16, number=16, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='scale', full_name='caffe.V0LayerParameter.scale', index=17, number=17, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='meanfile', full_name='caffe.V0LayerParameter.meanfile', index=18, number=18, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='batchsize', full_name='caffe.V0LayerParameter.batchsize', index=19, number=19, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='cropsize', full_name='caffe.V0LayerParameter.cropsize', index=20, number=20, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='mirror', full_name='caffe.V0LayerParameter.mirror', index=21, number=21, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='blobs', full_name='caffe.V0LayerParameter.blobs', index=22, number=50, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='blobs_lr', full_name='caffe.V0LayerParameter.blobs_lr', index=23, number=51, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='weight_decay', full_name='caffe.V0LayerParameter.weight_decay', index=24, number=52, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='rand_skip', full_name='caffe.V0LayerParameter.rand_skip', index=25, number=53, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='det_fg_threshold', full_name='caffe.V0LayerParameter.det_fg_threshold', index=26, number=54, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='det_bg_threshold', full_name='caffe.V0LayerParameter.det_bg_threshold', index=27, number=55, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='det_fg_fraction', full_name='caffe.V0LayerParameter.det_fg_fraction', index=28, number=56, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.25), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='det_context_pad', full_name='caffe.V0LayerParameter.det_context_pad', index=29, number=58, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='det_crop_mode', full_name='caffe.V0LayerParameter.det_crop_mode', index=30, number=59, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"warp".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='new_num', full_name='caffe.V0LayerParameter.new_num', index=31, number=60, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='new_channels', full_name='caffe.V0LayerParameter.new_channels', index=32, number=61, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='new_height', full_name='caffe.V0LayerParameter.new_height', index=33, number=62, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='new_width', full_name='caffe.V0LayerParameter.new_width', index=34, number=63, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shuffle_images', full_name='caffe.V0LayerParameter.shuffle_images', index=35, number=64, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='concat_dim', full_name='caffe.V0LayerParameter.concat_dim', index=36, number=65, type=13, cpp_type=3, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='hdf5_output_param', full_name='caffe.V0LayerParameter.hdf5_output_param', index=37, number=1001, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _V0LAYERPARAMETER_POOLMETHOD, ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=14569, serialized_end=15590, ) _PRELUPARAMETER = _descriptor.Descriptor( name='PReLUParameter', full_name='caffe.PReLUParameter', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='filler', full_name='caffe.PReLUParameter.filler', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='channel_shared', full_name='caffe.PReLUParameter.channel_shared', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=15592, serialized_end=15679, ) _BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE _BLOBPROTOVECTOR.fields_by_name['blobs'].message_type = _BLOBPROTO _FILLERPARAMETER.fields_by_name['variance_norm'].enum_type = _FILLERPARAMETER_VARIANCENORM _FILLERPARAMETER_VARIANCENORM.containing_type = _FILLERPARAMETER _NETPARAMETER.fields_by_name['input_shape'].message_type = _BLOBSHAPE _NETPARAMETER.fields_by_name['state'].message_type = _NETSTATE _NETPARAMETER.fields_by_name['layer'].message_type = _LAYERPARAMETER _NETPARAMETER.fields_by_name['layers'].message_type = _V1LAYERPARAMETER _SOLVERPARAMETER.fields_by_name['net_param'].message_type = _NETPARAMETER _SOLVERPARAMETER.fields_by_name['train_net_param'].message_type = _NETPARAMETER _SOLVERPARAMETER.fields_by_name['test_net_param'].message_type = _NETPARAMETER _SOLVERPARAMETER.fields_by_name['train_state'].message_type = _NETSTATE _SOLVERPARAMETER.fields_by_name['test_state'].message_type = _NETSTATE _SOLVERPARAMETER.fields_by_name['snapshot_format'].enum_type = _SOLVERPARAMETER_SNAPSHOTFORMAT _SOLVERPARAMETER.fields_by_name['solver_mode'].enum_type = _SOLVERPARAMETER_SOLVERMODE _SOLVERPARAMETER.fields_by_name['solver_type'].enum_type = _SOLVERPARAMETER_SOLVERTYPE _SOLVERPARAMETER_SNAPSHOTFORMAT.containing_type = _SOLVERPARAMETER _SOLVERPARAMETER_SOLVERMODE.containing_type = _SOLVERPARAMETER _SOLVERPARAMETER_SOLVERTYPE.containing_type = _SOLVERPARAMETER _SOLVERSTATE.fields_by_name['history'].message_type = _BLOBPROTO _NETSTATE.fields_by_name['phase'].enum_type = _PHASE _NETSTATERULE.fields_by_name['phase'].enum_type = _PHASE _PARAMSPEC.fields_by_name['share_mode'].enum_type = _PARAMSPEC_DIMCHECKMODE _PARAMSPEC_DIMCHECKMODE.containing_type = _PARAMSPEC _LAYERPARAMETER.fields_by_name['phase'].enum_type = _PHASE _LAYERPARAMETER.fields_by_name['param'].message_type = _PARAMSPEC _LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO _LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE _LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE _LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER _LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER _LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER _LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER _LAYERPARAMETER.fields_by_name['batch_norm_param'].message_type = _BATCHNORMPARAMETER _LAYERPARAMETER.fields_by_name['bias_param'].message_type = _BIASPARAMETER _LAYERPARAMETER.fields_by_name['clip_param'].message_type = _CLIPPARAMETER _LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER _LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER _LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER _LAYERPARAMETER.fields_by_name['crop_param'].message_type = _CROPPARAMETER _LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER _LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER _LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER _LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER _LAYERPARAMETER.fields_by_name['elu_param'].message_type = _ELUPARAMETER _LAYERPARAMETER.fields_by_name['embed_param'].message_type = _EMBEDPARAMETER _LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER _LAYERPARAMETER.fields_by_name['flatten_param'].message_type = _FLATTENPARAMETER _LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER _LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER _LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER _LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER _LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER _LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER _LAYERPARAMETER.fields_by_name['input_param'].message_type = _INPUTPARAMETER _LAYERPARAMETER.fields_by_name['log_param'].message_type = _LOGPARAMETER _LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER _LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER _LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER _LAYERPARAMETER.fields_by_name['parameter_param'].message_type = _PARAMETERPARAMETER _LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER _LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER _LAYERPARAMETER.fields_by_name['prelu_param'].message_type = _PRELUPARAMETER _LAYERPARAMETER.fields_by_name['python_param'].message_type = _PYTHONPARAMETER _LAYERPARAMETER.fields_by_name['recurrent_param'].message_type = _RECURRENTPARAMETER _LAYERPARAMETER.fields_by_name['reduction_param'].message_type = _REDUCTIONPARAMETER _LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER _LAYERPARAMETER.fields_by_name['reshape_param'].message_type = _RESHAPEPARAMETER _LAYERPARAMETER.fields_by_name['scale_param'].message_type = _SCALEPARAMETER _LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER _LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER _LAYERPARAMETER.fields_by_name['spp_param'].message_type = _SPPPARAMETER _LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER _LAYERPARAMETER.fields_by_name['swish_param'].message_type = _SWISHPARAMETER _LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER _LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER _LAYERPARAMETER.fields_by_name['tile_param'].message_type = _TILEPARAMETER _LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER _LOSSPARAMETER.fields_by_name['normalization'].enum_type = _LOSSPARAMETER_NORMALIZATIONMODE _LOSSPARAMETER_NORMALIZATIONMODE.containing_type = _LOSSPARAMETER _BIASPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER _CONVOLUTIONPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER _CONVOLUTIONPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _CONVOLUTIONPARAMETER.fields_by_name['engine'].enum_type = _CONVOLUTIONPARAMETER_ENGINE _CONVOLUTIONPARAMETER_ENGINE.containing_type = _CONVOLUTIONPARAMETER _DATAPARAMETER.fields_by_name['backend'].enum_type = _DATAPARAMETER_DB _DATAPARAMETER_DB.containing_type = _DATAPARAMETER _DUMMYDATAPARAMETER.fields_by_name['data_filler'].message_type = _FILLERPARAMETER _DUMMYDATAPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE _ELTWISEPARAMETER.fields_by_name['operation'].enum_type = _ELTWISEPARAMETER_ELTWISEOP _ELTWISEPARAMETER_ELTWISEOP.containing_type = _ELTWISEPARAMETER _EMBEDPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER _EMBEDPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _HINGELOSSPARAMETER.fields_by_name['norm'].enum_type = _HINGELOSSPARAMETER_NORM _HINGELOSSPARAMETER_NORM.containing_type = _HINGELOSSPARAMETER _INNERPRODUCTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER _INNERPRODUCTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _INPUTPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE _LRNPARAMETER.fields_by_name['norm_region'].enum_type = _LRNPARAMETER_NORMREGION _LRNPARAMETER.fields_by_name['engine'].enum_type = _LRNPARAMETER_ENGINE _LRNPARAMETER_NORMREGION.containing_type = _LRNPARAMETER _LRNPARAMETER_ENGINE.containing_type = _LRNPARAMETER _PARAMETERPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE _POOLINGPARAMETER.fields_by_name['pool'].enum_type = _POOLINGPARAMETER_POOLMETHOD _POOLINGPARAMETER.fields_by_name['engine'].enum_type = _POOLINGPARAMETER_ENGINE _POOLINGPARAMETER.fields_by_name['round_mode'].enum_type = _POOLINGPARAMETER_ROUNDMODE _POOLINGPARAMETER_POOLMETHOD.containing_type = _POOLINGPARAMETER _POOLINGPARAMETER_ENGINE.containing_type = _POOLINGPARAMETER _POOLINGPARAMETER_ROUNDMODE.containing_type = _POOLINGPARAMETER _RECURRENTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER _RECURRENTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _REDUCTIONPARAMETER.fields_by_name['operation'].enum_type = _REDUCTIONPARAMETER_REDUCTIONOP _REDUCTIONPARAMETER_REDUCTIONOP.containing_type = _REDUCTIONPARAMETER _RELUPARAMETER.fields_by_name['engine'].enum_type = _RELUPARAMETER_ENGINE _RELUPARAMETER_ENGINE.containing_type = _RELUPARAMETER _RESHAPEPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE _SCALEPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER _SCALEPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _SIGMOIDPARAMETER.fields_by_name['engine'].enum_type = _SIGMOIDPARAMETER_ENGINE _SIGMOIDPARAMETER_ENGINE.containing_type = _SIGMOIDPARAMETER _SOFTMAXPARAMETER.fields_by_name['engine'].enum_type = _SOFTMAXPARAMETER_ENGINE _SOFTMAXPARAMETER_ENGINE.containing_type = _SOFTMAXPARAMETER _TANHPARAMETER.fields_by_name['engine'].enum_type = _TANHPARAMETER_ENGINE _TANHPARAMETER_ENGINE.containing_type = _TANHPARAMETER _SPPPARAMETER.fields_by_name['pool'].enum_type = _SPPPARAMETER_POOLMETHOD _SPPPARAMETER.fields_by_name['engine'].enum_type = _SPPPARAMETER_ENGINE _SPPPARAMETER_POOLMETHOD.containing_type = _SPPPARAMETER _SPPPARAMETER_ENGINE.containing_type = _SPPPARAMETER _V1LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE _V1LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE _V1LAYERPARAMETER.fields_by_name['type'].enum_type = _V1LAYERPARAMETER_LAYERTYPE _V1LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO _V1LAYERPARAMETER.fields_by_name['blob_share_mode'].enum_type = _V1LAYERPARAMETER_DIMCHECKMODE _V1LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER _V1LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER _V1LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER _V1LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER _V1LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER _V1LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER _V1LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER _V1LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER _V1LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER _V1LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER _V1LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER _V1LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER _V1LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER _V1LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER _V1LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER _V1LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER _V1LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER _V1LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER _V1LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER _V1LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER _V1LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER _V1LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER _V1LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER _V1LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER _V1LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER _V1LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER _V1LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER _V1LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER _V1LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER _V1LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER _V1LAYERPARAMETER.fields_by_name['layer'].message_type = _V0LAYERPARAMETER _V1LAYERPARAMETER_LAYERTYPE.containing_type = _V1LAYERPARAMETER _V1LAYERPARAMETER_DIMCHECKMODE.containing_type = _V1LAYERPARAMETER _V0LAYERPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER _V0LAYERPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _V0LAYERPARAMETER.fields_by_name['pool'].enum_type = _V0LAYERPARAMETER_POOLMETHOD _V0LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO _V0LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER _V0LAYERPARAMETER_POOLMETHOD.containing_type = _V0LAYERPARAMETER _PRELUPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER DESCRIPTOR.message_types_by_name['BlobShape'] = _BLOBSHAPE DESCRIPTOR.message_types_by_name['BlobProto'] = _BLOBPROTO DESCRIPTOR.message_types_by_name['BlobProtoVector'] = _BLOBPROTOVECTOR DESCRIPTOR.message_types_by_name['Datum'] = _DATUM DESCRIPTOR.message_types_by_name['FillerParameter'] = _FILLERPARAMETER DESCRIPTOR.message_types_by_name['NetParameter'] = _NETPARAMETER DESCRIPTOR.message_types_by_name['SolverParameter'] = _SOLVERPARAMETER DESCRIPTOR.message_types_by_name['SolverState'] = _SOLVERSTATE DESCRIPTOR.message_types_by_name['NetState'] = _NETSTATE DESCRIPTOR.message_types_by_name['NetStateRule'] = _NETSTATERULE DESCRIPTOR.message_types_by_name['ParamSpec'] = _PARAMSPEC DESCRIPTOR.message_types_by_name['LayerParameter'] = _LAYERPARAMETER DESCRIPTOR.message_types_by_name['TransformationParameter'] = _TRANSFORMATIONPARAMETER DESCRIPTOR.message_types_by_name['LossParameter'] = _LOSSPARAMETER DESCRIPTOR.message_types_by_name['AccuracyParameter'] = _ACCURACYPARAMETER DESCRIPTOR.message_types_by_name['ArgMaxParameter'] = _ARGMAXPARAMETER DESCRIPTOR.message_types_by_name['ClipParameter'] = _CLIPPARAMETER DESCRIPTOR.message_types_by_name['ConcatParameter'] = _CONCATPARAMETER DESCRIPTOR.message_types_by_name['BatchNormParameter'] = _BATCHNORMPARAMETER DESCRIPTOR.message_types_by_name['BiasParameter'] = _BIASPARAMETER DESCRIPTOR.message_types_by_name['ContrastiveLossParameter'] = _CONTRASTIVELOSSPARAMETER DESCRIPTOR.message_types_by_name['ConvolutionParameter'] = _CONVOLUTIONPARAMETER DESCRIPTOR.message_types_by_name['CropParameter'] = _CROPPARAMETER DESCRIPTOR.message_types_by_name['DataParameter'] = _DATAPARAMETER DESCRIPTOR.message_types_by_name['DropoutParameter'] = _DROPOUTPARAMETER DESCRIPTOR.message_types_by_name['DummyDataParameter'] = _DUMMYDATAPARAMETER DESCRIPTOR.message_types_by_name['EltwiseParameter'] = _ELTWISEPARAMETER DESCRIPTOR.message_types_by_name['ELUParameter'] = _ELUPARAMETER DESCRIPTOR.message_types_by_name['EmbedParameter'] = _EMBEDPARAMETER DESCRIPTOR.message_types_by_name['ExpParameter'] = _EXPPARAMETER DESCRIPTOR.message_types_by_name['FlattenParameter'] = _FLATTENPARAMETER DESCRIPTOR.message_types_by_name['HDF5DataParameter'] = _HDF5DATAPARAMETER DESCRIPTOR.message_types_by_name['HDF5OutputParameter'] = _HDF5OUTPUTPARAMETER DESCRIPTOR.message_types_by_name['HingeLossParameter'] = _HINGELOSSPARAMETER DESCRIPTOR.message_types_by_name['ImageDataParameter'] = _IMAGEDATAPARAMETER DESCRIPTOR.message_types_by_name['InfogainLossParameter'] = _INFOGAINLOSSPARAMETER DESCRIPTOR.message_types_by_name['InnerProductParameter'] = _INNERPRODUCTPARAMETER DESCRIPTOR.message_types_by_name['InputParameter'] = _INPUTPARAMETER DESCRIPTOR.message_types_by_name['LogParameter'] = _LOGPARAMETER DESCRIPTOR.message_types_by_name['LRNParameter'] = _LRNPARAMETER DESCRIPTOR.message_types_by_name['MemoryDataParameter'] = _MEMORYDATAPARAMETER DESCRIPTOR.message_types_by_name['MVNParameter'] = _MVNPARAMETER DESCRIPTOR.message_types_by_name['ParameterParameter'] = _PARAMETERPARAMETER DESCRIPTOR.message_types_by_name['PoolingParameter'] = _POOLINGPARAMETER DESCRIPTOR.message_types_by_name['PowerParameter'] = _POWERPARAMETER DESCRIPTOR.message_types_by_name['PythonParameter'] = _PYTHONPARAMETER DESCRIPTOR.message_types_by_name['RecurrentParameter'] = _RECURRENTPARAMETER DESCRIPTOR.message_types_by_name['ReductionParameter'] = _REDUCTIONPARAMETER DESCRIPTOR.message_types_by_name['ReLUParameter'] = _RELUPARAMETER DESCRIPTOR.message_types_by_name['ReshapeParameter'] = _RESHAPEPARAMETER DESCRIPTOR.message_types_by_name['ScaleParameter'] = _SCALEPARAMETER DESCRIPTOR.message_types_by_name['SigmoidParameter'] = _SIGMOIDPARAMETER DESCRIPTOR.message_types_by_name['SliceParameter'] = _SLICEPARAMETER DESCRIPTOR.message_types_by_name['SoftmaxParameter'] = _SOFTMAXPARAMETER DESCRIPTOR.message_types_by_name['SwishParameter'] = _SWISHPARAMETER DESCRIPTOR.message_types_by_name['TanHParameter'] = _TANHPARAMETER DESCRIPTOR.message_types_by_name['TileParameter'] = _TILEPARAMETER DESCRIPTOR.message_types_by_name['ThresholdParameter'] = _THRESHOLDPARAMETER DESCRIPTOR.message_types_by_name['WindowDataParameter'] = _WINDOWDATAPARAMETER DESCRIPTOR.message_types_by_name['SPPParameter'] = _SPPPARAMETER DESCRIPTOR.message_types_by_name['V1LayerParameter'] = _V1LAYERPARAMETER DESCRIPTOR.message_types_by_name['V0LayerParameter'] = _V0LAYERPARAMETER DESCRIPTOR.message_types_by_name['PReLUParameter'] = _PRELUPARAMETER DESCRIPTOR.enum_types_by_name['Phase'] = _PHASE _sym_db.RegisterFileDescriptor(DESCRIPTOR) BlobShape = _reflection.GeneratedProtocolMessageType('BlobShape', (_message.Message,), { 'DESCRIPTOR' : _BLOBSHAPE, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BlobShape) }) _sym_db.RegisterMessage(BlobShape) BlobProto = _reflection.GeneratedProtocolMessageType('BlobProto', (_message.Message,), { 'DESCRIPTOR' : _BLOBPROTO, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BlobProto) }) _sym_db.RegisterMessage(BlobProto) BlobProtoVector = _reflection.GeneratedProtocolMessageType('BlobProtoVector', (_message.Message,), { 'DESCRIPTOR' : _BLOBPROTOVECTOR, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BlobProtoVector) }) _sym_db.RegisterMessage(BlobProtoVector) Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), { 'DESCRIPTOR' : _DATUM, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.Datum) }) _sym_db.RegisterMessage(Datum) FillerParameter = _reflection.GeneratedProtocolMessageType('FillerParameter', (_message.Message,), { 'DESCRIPTOR' : _FILLERPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.FillerParameter) }) _sym_db.RegisterMessage(FillerParameter) NetParameter = _reflection.GeneratedProtocolMessageType('NetParameter', (_message.Message,), { 'DESCRIPTOR' : _NETPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.NetParameter) }) _sym_db.RegisterMessage(NetParameter) SolverParameter = _reflection.GeneratedProtocolMessageType('SolverParameter', (_message.Message,), { 'DESCRIPTOR' : _SOLVERPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SolverParameter) }) _sym_db.RegisterMessage(SolverParameter) SolverState = _reflection.GeneratedProtocolMessageType('SolverState', (_message.Message,), { 'DESCRIPTOR' : _SOLVERSTATE, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SolverState) }) _sym_db.RegisterMessage(SolverState) NetState = _reflection.GeneratedProtocolMessageType('NetState', (_message.Message,), { 'DESCRIPTOR' : _NETSTATE, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.NetState) }) _sym_db.RegisterMessage(NetState) NetStateRule = _reflection.GeneratedProtocolMessageType('NetStateRule', (_message.Message,), { 'DESCRIPTOR' : _NETSTATERULE, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.NetStateRule) }) _sym_db.RegisterMessage(NetStateRule) ParamSpec = _reflection.GeneratedProtocolMessageType('ParamSpec', (_message.Message,), { 'DESCRIPTOR' : _PARAMSPEC, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ParamSpec) }) _sym_db.RegisterMessage(ParamSpec) LayerParameter = _reflection.GeneratedProtocolMessageType('LayerParameter', (_message.Message,), { 'DESCRIPTOR' : _LAYERPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.LayerParameter) }) _sym_db.RegisterMessage(LayerParameter) TransformationParameter = _reflection.GeneratedProtocolMessageType('TransformationParameter', (_message.Message,), { 'DESCRIPTOR' : _TRANSFORMATIONPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.TransformationParameter) }) _sym_db.RegisterMessage(TransformationParameter) LossParameter = _reflection.GeneratedProtocolMessageType('LossParameter', (_message.Message,), { 'DESCRIPTOR' : _LOSSPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.LossParameter) }) _sym_db.RegisterMessage(LossParameter) AccuracyParameter = _reflection.GeneratedProtocolMessageType('AccuracyParameter', (_message.Message,), { 'DESCRIPTOR' : _ACCURACYPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.AccuracyParameter) }) _sym_db.RegisterMessage(AccuracyParameter) ArgMaxParameter = _reflection.GeneratedProtocolMessageType('ArgMaxParameter', (_message.Message,), { 'DESCRIPTOR' : _ARGMAXPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ArgMaxParameter) }) _sym_db.RegisterMessage(ArgMaxParameter) ClipParameter = _reflection.GeneratedProtocolMessageType('ClipParameter', (_message.Message,), { 'DESCRIPTOR' : _CLIPPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ClipParameter) }) _sym_db.RegisterMessage(ClipParameter) ConcatParameter = _reflection.GeneratedProtocolMessageType('ConcatParameter', (_message.Message,), { 'DESCRIPTOR' : _CONCATPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ConcatParameter) }) _sym_db.RegisterMessage(ConcatParameter) BatchNormParameter = _reflection.GeneratedProtocolMessageType('BatchNormParameter', (_message.Message,), { 'DESCRIPTOR' : _BATCHNORMPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BatchNormParameter) }) _sym_db.RegisterMessage(BatchNormParameter) BiasParameter = _reflection.GeneratedProtocolMessageType('BiasParameter', (_message.Message,), { 'DESCRIPTOR' : _BIASPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BiasParameter) }) _sym_db.RegisterMessage(BiasParameter) ContrastiveLossParameter = _reflection.GeneratedProtocolMessageType('ContrastiveLossParameter', (_message.Message,), { 'DESCRIPTOR' : _CONTRASTIVELOSSPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ContrastiveLossParameter) }) _sym_db.RegisterMessage(ContrastiveLossParameter) ConvolutionParameter = _reflection.GeneratedProtocolMessageType('ConvolutionParameter', (_message.Message,), { 'DESCRIPTOR' : _CONVOLUTIONPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ConvolutionParameter) }) _sym_db.RegisterMessage(ConvolutionParameter) CropParameter = _reflection.GeneratedProtocolMessageType('CropParameter', (_message.Message,), { 'DESCRIPTOR' : _CROPPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.CropParameter) }) _sym_db.RegisterMessage(CropParameter) DataParameter = _reflection.GeneratedProtocolMessageType('DataParameter', (_message.Message,), { 'DESCRIPTOR' : _DATAPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.DataParameter) }) _sym_db.RegisterMessage(DataParameter) DropoutParameter = _reflection.GeneratedProtocolMessageType('DropoutParameter', (_message.Message,), { 'DESCRIPTOR' : _DROPOUTPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.DropoutParameter) }) _sym_db.RegisterMessage(DropoutParameter) DummyDataParameter = _reflection.GeneratedProtocolMessageType('DummyDataParameter', (_message.Message,), { 'DESCRIPTOR' : _DUMMYDATAPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.DummyDataParameter) }) _sym_db.RegisterMessage(DummyDataParameter) EltwiseParameter = _reflection.GeneratedProtocolMessageType('EltwiseParameter', (_message.Message,), { 'DESCRIPTOR' : _ELTWISEPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.EltwiseParameter) }) _sym_db.RegisterMessage(EltwiseParameter) ELUParameter = _reflection.GeneratedProtocolMessageType('ELUParameter', (_message.Message,), { 'DESCRIPTOR' : _ELUPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ELUParameter) }) _sym_db.RegisterMessage(ELUParameter) EmbedParameter = _reflection.GeneratedProtocolMessageType('EmbedParameter', (_message.Message,), { 'DESCRIPTOR' : _EMBEDPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.EmbedParameter) }) _sym_db.RegisterMessage(EmbedParameter) ExpParameter = _reflection.GeneratedProtocolMessageType('ExpParameter', (_message.Message,), { 'DESCRIPTOR' : _EXPPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ExpParameter) }) _sym_db.RegisterMessage(ExpParameter) FlattenParameter = _reflection.GeneratedProtocolMessageType('FlattenParameter', (_message.Message,), { 'DESCRIPTOR' : _FLATTENPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.FlattenParameter) }) _sym_db.RegisterMessage(FlattenParameter) HDF5DataParameter = _reflection.GeneratedProtocolMessageType('HDF5DataParameter', (_message.Message,), { 'DESCRIPTOR' : _HDF5DATAPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.HDF5DataParameter) }) _sym_db.RegisterMessage(HDF5DataParameter) HDF5OutputParameter = _reflection.GeneratedProtocolMessageType('HDF5OutputParameter', (_message.Message,), { 'DESCRIPTOR' : _HDF5OUTPUTPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.HDF5OutputParameter) }) _sym_db.RegisterMessage(HDF5OutputParameter) HingeLossParameter = _reflection.GeneratedProtocolMessageType('HingeLossParameter', (_message.Message,), { 'DESCRIPTOR' : _HINGELOSSPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.HingeLossParameter) }) _sym_db.RegisterMessage(HingeLossParameter) ImageDataParameter = _reflection.GeneratedProtocolMessageType('ImageDataParameter', (_message.Message,), { 'DESCRIPTOR' : _IMAGEDATAPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ImageDataParameter) }) _sym_db.RegisterMessage(ImageDataParameter) InfogainLossParameter = _reflection.GeneratedProtocolMessageType('InfogainLossParameter', (_message.Message,), { 'DESCRIPTOR' : _INFOGAINLOSSPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.InfogainLossParameter) }) _sym_db.RegisterMessage(InfogainLossParameter) InnerProductParameter = _reflection.GeneratedProtocolMessageType('InnerProductParameter', (_message.Message,), { 'DESCRIPTOR' : _INNERPRODUCTPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.InnerProductParameter) }) _sym_db.RegisterMessage(InnerProductParameter) InputParameter = _reflection.GeneratedProtocolMessageType('InputParameter', (_message.Message,), { 'DESCRIPTOR' : _INPUTPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.InputParameter) }) _sym_db.RegisterMessage(InputParameter) LogParameter = _reflection.GeneratedProtocolMessageType('LogParameter', (_message.Message,), { 'DESCRIPTOR' : _LOGPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.LogParameter) }) _sym_db.RegisterMessage(LogParameter) LRNParameter = _reflection.GeneratedProtocolMessageType('LRNParameter', (_message.Message,), { 'DESCRIPTOR' : _LRNPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.LRNParameter) }) _sym_db.RegisterMessage(LRNParameter) MemoryDataParameter = _reflection.GeneratedProtocolMessageType('MemoryDataParameter', (_message.Message,), { 'DESCRIPTOR' : _MEMORYDATAPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.MemoryDataParameter) }) _sym_db.RegisterMessage(MemoryDataParameter) MVNParameter = _reflection.GeneratedProtocolMessageType('MVNParameter', (_message.Message,), { 'DESCRIPTOR' : _MVNPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.MVNParameter) }) _sym_db.RegisterMessage(MVNParameter) ParameterParameter = _reflection.GeneratedProtocolMessageType('ParameterParameter', (_message.Message,), { 'DESCRIPTOR' : _PARAMETERPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ParameterParameter) }) _sym_db.RegisterMessage(ParameterParameter) PoolingParameter = _reflection.GeneratedProtocolMessageType('PoolingParameter', (_message.Message,), { 'DESCRIPTOR' : _POOLINGPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.PoolingParameter) }) _sym_db.RegisterMessage(PoolingParameter) PowerParameter = _reflection.GeneratedProtocolMessageType('PowerParameter', (_message.Message,), { 'DESCRIPTOR' : _POWERPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.PowerParameter) }) _sym_db.RegisterMessage(PowerParameter) PythonParameter = _reflection.GeneratedProtocolMessageType('PythonParameter', (_message.Message,), { 'DESCRIPTOR' : _PYTHONPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.PythonParameter) }) _sym_db.RegisterMessage(PythonParameter) RecurrentParameter = _reflection.GeneratedProtocolMessageType('RecurrentParameter', (_message.Message,), { 'DESCRIPTOR' : _RECURRENTPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.RecurrentParameter) }) _sym_db.RegisterMessage(RecurrentParameter) ReductionParameter = _reflection.GeneratedProtocolMessageType('ReductionParameter', (_message.Message,), { 'DESCRIPTOR' : _REDUCTIONPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ReductionParameter) }) _sym_db.RegisterMessage(ReductionParameter) ReLUParameter = _reflection.GeneratedProtocolMessageType('ReLUParameter', (_message.Message,), { 'DESCRIPTOR' : _RELUPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ReLUParameter) }) _sym_db.RegisterMessage(ReLUParameter) ReshapeParameter = _reflection.GeneratedProtocolMessageType('ReshapeParameter', (_message.Message,), { 'DESCRIPTOR' : _RESHAPEPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ReshapeParameter) }) _sym_db.RegisterMessage(ReshapeParameter) ScaleParameter = _reflection.GeneratedProtocolMessageType('ScaleParameter', (_message.Message,), { 'DESCRIPTOR' : _SCALEPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ScaleParameter) }) _sym_db.RegisterMessage(ScaleParameter) SigmoidParameter = _reflection.GeneratedProtocolMessageType('SigmoidParameter', (_message.Message,), { 'DESCRIPTOR' : _SIGMOIDPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SigmoidParameter) }) _sym_db.RegisterMessage(SigmoidParameter) SliceParameter = _reflection.GeneratedProtocolMessageType('SliceParameter', (_message.Message,), { 'DESCRIPTOR' : _SLICEPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SliceParameter) }) _sym_db.RegisterMessage(SliceParameter) SoftmaxParameter = _reflection.GeneratedProtocolMessageType('SoftmaxParameter', (_message.Message,), { 'DESCRIPTOR' : _SOFTMAXPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SoftmaxParameter) }) _sym_db.RegisterMessage(SoftmaxParameter) SwishParameter = _reflection.GeneratedProtocolMessageType('SwishParameter', (_message.Message,), { 'DESCRIPTOR' : _SWISHPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SwishParameter) }) _sym_db.RegisterMessage(SwishParameter) TanHParameter = _reflection.GeneratedProtocolMessageType('TanHParameter', (_message.Message,), { 'DESCRIPTOR' : _TANHPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.TanHParameter) }) _sym_db.RegisterMessage(TanHParameter) TileParameter = _reflection.GeneratedProtocolMessageType('TileParameter', (_message.Message,), { 'DESCRIPTOR' : _TILEPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.TileParameter) }) _sym_db.RegisterMessage(TileParameter) ThresholdParameter = _reflection.GeneratedProtocolMessageType('ThresholdParameter', (_message.Message,), { 'DESCRIPTOR' : _THRESHOLDPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ThresholdParameter) }) _sym_db.RegisterMessage(ThresholdParameter) WindowDataParameter = _reflection.GeneratedProtocolMessageType('WindowDataParameter', (_message.Message,), { 'DESCRIPTOR' : _WINDOWDATAPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.WindowDataParameter) }) _sym_db.RegisterMessage(WindowDataParameter) SPPParameter = _reflection.GeneratedProtocolMessageType('SPPParameter', (_message.Message,), { 'DESCRIPTOR' : _SPPPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SPPParameter) }) _sym_db.RegisterMessage(SPPParameter) V1LayerParameter = _reflection.GeneratedProtocolMessageType('V1LayerParameter', (_message.Message,), { 'DESCRIPTOR' : _V1LAYERPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.V1LayerParameter) }) _sym_db.RegisterMessage(V1LayerParameter) V0LayerParameter = _reflection.GeneratedProtocolMessageType('V0LayerParameter', (_message.Message,), { 'DESCRIPTOR' : _V0LAYERPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.V0LayerParameter) }) _sym_db.RegisterMessage(V0LayerParameter) PReLUParameter = _reflection.GeneratedProtocolMessageType('PReLUParameter', (_message.Message,), { 'DESCRIPTOR' : _PRELUPARAMETER, '__module__' : 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.PReLUParameter) }) _sym_db.RegisterMessage(PReLUParameter) _BLOBSHAPE.fields_by_name['dim']._options = None _BLOBPROTO.fields_by_name['data']._options = None _BLOBPROTO.fields_by_name['diff']._options = None _BLOBPROTO.fields_by_name['double_data']._options = None _BLOBPROTO.fields_by_name['double_diff']._options = None # @@protoc_insertion_point(module_scope) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/caffe2/reader.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import onnx from onnx.shape_inference import infer_shapes import caffe2.python.onnx.frontend from caffe2.proto import caffe2_pb2 from ..onnx.reader import onnx_model_to_graph from ...utils.types import as_str from google.protobuf import text_format from collections.abc import Sequence from caffe2.python.workspace import GlobalInit import json import sys import os try: import caffe except ImportError: from . import caffe sys.modules['caffe'] = caffe from caffe2.python.caffe_translator import TranslateModel, TranslatorRegistry, ConvertTensorProtosToInitNet, \ AddArgument, BaseTranslate @TranslatorRegistry.Register("ArgMax") def TranslateArgmax(layer, pretrained_blobs, is_test, **kwargs): param = layer.argmax_param if param.top_k != 1: raise ValueError("Unsupported attribute value 'top_k = {}' in ArgMax layer".format(param.top_k)) if param.out_max_val: raise ValueError("Conversion of ArgMax layer with 'out_max_val = True' is not supported") if param.HasField("axis"): caffe_op = BaseTranslate(layer, "ArgMax") AddArgument(caffe_op, "keepdims", True) AddArgument(caffe_op, "axis", param.axis) return caffe_op, [] else: flatten_op = caffe2_pb2.OperatorDef() flatten_op.type = "Flatten" flatten_op.input.extend(layer.bottom) flatten_op.output.append(layer.bottom[0] + "_flattened") argmax_op = caffe2_pb2.OperatorDef() argmax_op.type = "ArgMax" argmax_op.input.append(layer.bottom[0] + "_flattened") argmax_op.output.extend(layer.top) AddArgument(argmax_op, "keepdims", True) AddArgument(argmax_op, "axis", 1) return [flatten_op, argmax_op], [] @TranslatorRegistry.Register("ReLU") def TranslateRelu(layer, pretrained_blobs, is_test, **kwargs): param = layer.relu_param if param.HasField("negative_slope") and param.negative_slope: relu_op = BaseTranslate(layer, "LeakyRelu") AddArgument(relu_op, "alpha", param.negative_slope) return relu_op, [] else: return BaseTranslate(layer, "Relu"), [] def _HookedTranslateLayer(layer, pretrained_blobs, is_test, **kwargs): _pre_translate(layer, pretrained_blobs) ops, params = _TranslateLayer(layer, pretrained_blobs, is_test, **kwargs) _post_translate(layer, pretrained_blobs, ops, params) return ops, params _TranslateLayer = TranslatorRegistry.TranslateLayer TranslatorRegistry.TranslateLayer = _HookedTranslateLayer GlobalInit(['caffe2', '--caffe2_log_level=2']) _UnrecognizedAttribs = {'ws_nbytes_limit'} def _caffe_to_caffe2(prototxt, caffemodel): if prototxt.layer[0].type == 'Input': input_names = list(prototxt.layer[0].top) input_shapes = list(item.dim for item in prototxt.layer[0].input_param.shape) else: input_names = list(item for item in prototxt.input) input_shapes = list(item.dim for item in prototxt.input_shape) if len(input_shapes) == 0: input_dims = prototxt.input_dim assert len(input_dims) == 4 * len(input_names) input_shapes = [None] * len(input_names) for i in range(len(input_names)): input_shapes[i] = input_dims[4 * i: 4 * (i+1)] predict_net, params = TranslateModel(prototxt, caffemodel, is_test=True, remove_legacy_pad=False, input_dims=[]) # Assume there is one input and one output external_input = predict_net.op[0].input[0] external_output = predict_net.op[-1].output[0] predict_net.external_input.extend([external_input]) predict_net.external_input.extend([param.name for param in params.protos]) predict_net.external_output.extend([external_output]) init_net = ConvertTensorProtosToInitNet(params, external_input) value_info = {name: (onnx.TensorProto.FLOAT, shape) for name, shape in zip(input_names, input_shapes)} return predict_net, init_net, value_info _DeconvGroups = {} def _pre_translate(layer, blobs): if layer.type == 'Convolution': _fix_conv_pool_param(layer.convolution_param) elif layer.type == 'Deconvolution': _fix_conv_pool_param(layer.convolution_param) if layer.convolution_param.group != 1: _DeconvGroups[layer.name] = layer.convolution_param.group layer.convolution_param.group = 0 # to trick the conversion check elif layer.type == 'Pooling': _fix_conv_pool_param(layer.pooling_param) elif layer.type == 'Eltwise': _fix_eltwise_param(layer.eltwise_param) elif layer.type == 'BatchNorm': _fix_batch_norm_param(layer.batch_norm_param, blobs) def _post_translate(layer, blobs, ops, params): if layer.type == 'Deconvolution' and layer.convolution_param.group == 0: AddArgument(ops[0], 'group', _DeconvGroups[layer.name]) def _fix_conv_pool_param(param): if isinstance(param.kernel_size, Sequence) and len(param.kernel_size) == 2: param.kernel_h = param.kernel_size[0] param.kernel_w = param.kernel_size[1] del param.kernel_size[1] del param.kernel_size[0] if isinstance(param.stride, Sequence) and len(param.stride) == 2: param.stride_h = param.stride[0] param.stride_w = param.stride[1] del param.stride[1] del param.stride[0] if isinstance(param.pad, Sequence) and len(param.pad) == 2: param.pad_h = param.pad[0] param.pad_w = param.pad[1] del param.pad[1] del param.pad[0] def _fix_eltwise_param(param): if len(param.coeff) > 0 and all(c == 1 for c in param.coeff): for i in reversed(range(len(param.coeff))): del param.coeff[i] def _fix_batch_norm_param(param, blobs): if len(blobs) > 2: if blobs[2].data[0] == 0: blobs[2].data[0] = 1 def _caffe2_net_to_onnx_model(predict_net, init_net, value_info): graph = caffe2.python.onnx.frontend.caffe2_net_to_onnx_graph(predict_net, init_net, value_info) if not graph.name: graph.name = 'Graph' opset_id = onnx.OperatorSetIdProto() opset_id.domain = '' opset_id.version = 11 model = onnx.helper.make_model(graph, opset_imports=[opset_id]) onnx.checker.check_model(model) return model def _remove_unrecognized_attributes(net_def): for op_def in net_def.op: for idx in reversed(range(len(op_def.arg))): name = as_str(op_def.arg[idx].name) if name in _UnrecognizedAttribs: del op_def.arg[idx] def load_caffe_model(path): base, ext = os.path.splitext(path) assert ext == '.prototxt' with open(path) as file: prototxt = caffe.proto.caffe_pb2.NetParameter() text_format.Merge(file.read(), prototxt) with open(base + '.caffemodel', 'rb') as file: caffemodel = caffe.proto.caffe_pb2.NetParameter() caffemodel.ParseFromString(file.read()) return prototxt, caffemodel def load_caffe_model_as_onnx(path): prototxt, caffemodel = load_caffe_model(path) predict_net, init_net, value_info = _caffe_to_caffe2(prototxt, caffemodel) _remove_unrecognized_attributes(predict_net) return _caffe2_net_to_onnx_model(predict_net, init_net, value_info) def load_caffe2_model(folder): predict_net = caffe2_pb2.NetDef() with open(os.path.join(folder, 'predict_net.pb'), 'rb') as file: predict_net.ParseFromString(file.read()) init_net = caffe2_pb2.NetDef() with open(os.path.join(folder, 'init_net.pb'), 'rb') as file: init_net.ParseFromString(file.read()) with open(os.path.join(folder, 'value_info.json')) as file: value_info = json.load(file) return predict_net, init_net, value_info def load_caffe2_model_as_onnx(folder): predict_net, init_net, value_info = load_caffe2_model(folder) _remove_unrecognized_attributes(predict_net) return _caffe2_net_to_onnx_model(predict_net, init_net, value_info) class Reader: def __init__(self, legacy=False): self._legacy = legacy def __call__(self, path): onnx_model = load_caffe_model_as_onnx(path) if self._legacy else load_caffe2_model_as_onnx(path) onnx.checker.check_model(onnx_model) onnx_model = infer_shapes(onnx_model) return onnx_model_to_graph(onnx_model) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/caffe2/writer.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from caffe2.python.onnx.backend import Caffe2Backend from ..onnx.writer import build_model, build_dtype from ..onnx.reader import _get_value_info from ...utils.types import as_str import json import os def save_caffe2_model(folder, init_net, predict_net, value_info): with open(os.path.join(folder, 'init_net.pb'), 'wb') as file: file.write(init_net.SerializeToString()) with open(os.path.join(folder, 'predict_net.pb'), 'wb') as file: file.write(predict_net.SerializeToString()) with open(os.path.join(folder, 'value_info.json'), 'w') as file: json.dump(value_info, file) def get_value_info(onnx_model): initializer_names = {as_str(info.name) for info in onnx_model.graph.initializer} value_info = {} for info in onnx_model.graph.input: name, shape, dtype = _get_value_info(info) if name not in initializer_names: value_info[name] = (build_dtype(dtype), shape) return value_info class Writer: def __init__(self): pass def __call__(self, graph, folder): onnx_model = build_model(graph, ir_version=6, opset_version=9) if not onnx_model.graph.name: onnx_model.graph.name = 'Graph' init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model) value_info = get_value_info(onnx_model) if not os.path.exists(folder): os.mkdir(folder) save_caffe2_model(folder, init_net, predict_net, value_info) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/nnef/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .reader import Reader from .writer import Writer ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/nnef/helpers.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tarfile def tgz_compress(dir_path, file_path, compression_level=0): target_directory = os.path.dirname(file_path) if target_directory and not os.path.exists(target_directory): os.makedirs(target_directory) with tarfile.open(file_path, 'w:gz', compresslevel=compression_level) as tar: for file_ in os.listdir(dir_path): tar.add(dir_path + '/' + file_, file_) def tgz_extract(file_path, dir_path): if dir_path and not os.path.exists(dir_path): os.makedirs(dir_path) with tarfile.open(file_path, 'r:gz') as tar: tar.extractall(dir_path) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/nnef/reader.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import import os import shutil import tempfile from collections import OrderedDict import nnef import numpy as np import six from ...model import * from ...utils import types from .helpers import tgz_extract _DtypeToNumpy = { 'scalar': np.float32, 'integer': np.int64, 'logical': np.bool_, } def _recursive_itemize(arg): if type(arg) is tuple or type(arg) is list: for item in arg: yield from _recursive_itemize(item) elif type(arg) is dict or type(arg) is OrderedDict: for item in six.itervalues(arg): yield from _recursive_itemize(item) else: yield arg def _make_constant_tensor(graph, value): value = types.to_numpy(value) return Tensor(graph=graph, shape=(), dtype=value.dtype.type, data=value) def _make_tensor(graph, nnef_tensor): dtype = nnef_tensor.data.dtype.type if isinstance(nnef_tensor.data, np.ndarray) else _DtypeToNumpy[nnef_tensor.dtype] return Tensor(graph=graph, name=nnef_tensor.name, shape=tuple(nnef_tensor.shape) if nnef_tensor.shape is not None else None, dtype=dtype, data=nnef_tensor.data, quant=nnef_tensor.quantization) def _build_graph(nnef_graph): graph = Graph(name=nnef_graph.name) tensor_by_name = {} for nnef_op in nnef_graph.operations: inputs = (tensor_by_name[item] if isinstance(item, nnef.Identifier) else _make_constant_tensor(graph, item) for item in _recursive_itemize(nnef_op.inputs)) inputs = list(inputs) if any(isinstance(item, list) for item in six.itervalues(nnef_op.inputs)) else tuple(inputs) outputs = (_make_tensor(graph, nnef_graph.tensors[str(item)]) for item in _recursive_itemize(nnef_op.outputs)) outputs = list(outputs) if any(isinstance(item, list) for item in six.itervalues(nnef_op.outputs)) else tuple(outputs) for tensor in outputs: tensor_by_name[str(tensor.name)] = tensor attribs = dict(nnef_op.attribs) if nnef_op.dtype is not None: attribs['dtype'] = outputs[0].dtype if nnef_op.name == 'constant' or nnef_op.name == 'variable' else \ _DtypeToNumpy[nnef_op.dtype] _substitute_empty_array(nnef_op.name, 'stride', attribs, inputs) _substitute_empty_array(nnef_op.name, 'dilation', attribs, inputs) custom = nnef_op.name not in nnef.StandardOperations Operation(graph=graph, type=nnef_op.name, attribs=attribs, inputs=inputs, outputs=outputs, custom=custom) graph.inputs = [tensor_by_name[str(item)] for item in nnef_graph.inputs] graph.outputs = [tensor_by_name[str(item)] for item in nnef_graph.outputs] return graph def _substitute_empty_array(op, key, attribs, inputs): value = attribs.get(key) if value is not None and len(value) == 0: rank = None if op == 'slice': rank = len(attribs['axes']) elif len(inputs) > 0 and inputs[0].rank is not None: rank = inputs[0].rank - 2 if op.endswith('conv') else inputs[0].rank if rank is not None: attribs[key] = [1] * rank class Reader(object): def __init__(self, stdlib=None, decomposed=None, custom_shapes=None, infer_shapes=True, load_variables=True): self._stdlib = stdlib self._decomposed = decomposed self._custom_shapes = custom_shapes self._infer_shapes = infer_shapes self._load_variables = load_variables def __call__(self, path, input_shapes=None): filename, extension = os.path.splitext(path) compressed = extension in ['.tgz', '.gz'] and not os.path.isdir(path) folder = None try: if compressed: folder = tempfile.mkdtemp(prefix="nnef_") tgz_extract(path, folder) path = folder if not os.path.isdir(path): raise IOError("NNEF model must be a (compressed) folder, but an uncompressed file was provided") nnef_graph = nnef.load_graph(path, stdlib=self._stdlib, lowered=self._decomposed, load_variables=self._load_variables) if self._infer_shapes: nnef.infer_shapes(nnef_graph, external_shapes=input_shapes or {}, custom_shapes=self._custom_shapes or {}) return _build_graph(nnef_graph) finally: if folder is not None: shutil.rmtree(folder) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/nnef/writer.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import import nnef import numpy as np import tempfile import shutil import six import os from .helpers import tgz_compress from ...model import Tensor from ...utils.types import as_str, from_numpy _DtypeFromNumpy = { np.float16: 'scalar', np.float32: 'scalar', np.float64: 'scalar', np.int8: 'integer', np.uint8: 'integer', np.int16: 'integer', np.uint16: 'integer', np.int32: 'integer', np.uint32: 'integer', np.int64: 'integer', np.uint64: 'integer', np.bool_: 'logical', } _DtypeFromPyType = { str: 'string', float: 'scalar', int: 'integer', bool: 'logical', None: 'dtype', } def _nnef_dtype(dtype): return _DtypeFromNumpy[dtype.type if isinstance(dtype, np.dtype) else dtype] if dtype is not None else None def _print(graph, file, extensions, fragments, version_custom_ops, annotate_shapes): assert graph.is_sorted(), "graph must be topologically sorted" assert all(tensor.name is not None or (tensor.producer is None and tensor.data is not None) for tensor in graph.tensors), \ "all tensors must have names" assert all(all(s is not None for s in op.attribs['shape']) for op in graph.operations if op.type == 'external'), \ "external ops must not contain undefined shapes" print(nnef.format_version((1, 0)), file=file) if len(extensions): print(file=file) print(nnef.format_extensions(extensions), file=file) if fragments: print(file=file) print(fragments, file=file) print(file=file) graph_name = as_str(graph.name) if graph.name is not None else "G" graph_inputs = [as_str(item.name) for item in graph.inputs] graph_outputs = [as_str(item.name) for item in graph.outputs] print("graph {}({}) -> ({})".format(graph_name, ', '.join(graph_inputs), ', '.join(graph_outputs)), file=file) print("{", file=file) versions = {} for op in graph.operations: assert all(isinstance(item, Tensor) for item in op.outputs) inputs = ((from_numpy(item.data) if item.producer is None else nnef.Identifier(as_str(item.name))) if isinstance(item, Tensor) else item for item in op.inputs) inputs = tuple(inputs) if isinstance(op.inputs, tuple) else (list(inputs),) outputs = (nnef.Identifier(as_str(item.name)) for item in op.outputs) outputs = tuple(outputs) if isinstance(op.outputs, tuple) else (list(outputs),) attribs = {as_str(key): value for key, value in six.iteritems(op.attribs)} name = _next_version(op.type, versions) if op.type not in nnef.StandardOperations and version_custom_ops else op.type dtype = attribs.get('dtype') if dtype is not None: dtype = _nnef_dtype(dtype) del attribs['dtype'] for key, value in six.iteritems(attribs): if isinstance(value, (type, np.dtype)): attribs[key] = _nnef_dtype(value) invocation = nnef.format_invocation(name=name, dtype=dtype, attribs=attribs, inputs=inputs, outputs=outputs) annotation = " # " + ", ".join(_nnef_dtype(output.dtype) + str(output.shape) for output in op.outputs) \ if annotate_shapes else '' print(" {};{}".format(invocation, annotation), file=file) print("}", file=file) def _write_tensor(array, filename, quantized): directory = os.path.dirname(filename) if directory and not os.path.exists(directory): os.makedirs(directory) with open(filename, "wb") as file: nnef.write_tensor(file=file, tensor=array, quantized=quantized) def _write_quantization(graph, file): for tensor in graph.tensors: if tensor.quant: op_name = tensor.quant['op-name'] attribs = ', '.join("{} = {}".format(k, _printable_value(v)) for k, v in six.iteritems(tensor.quant) if k != 'op-name' and v is not None) if attribs: print('"{}": {}({});'.format(tensor.name, op_name, attribs), file=file) def _printable_value(v): if type(v) == bool: return 'true' if v else 'false' elif type(v) == np.ndarray: return v.tolist() else: return v def _next_version(name, versions): version = versions.get(name, 0) + 1 versions[name] = version return '{}_v{}'.format(name, version) def _generate_custom_fragments(graph, fragments, version): versions = {} if version else None return '\n'.join(_generate_fragment(op, versions) for op in graph.operations if op.type not in nnef.StandardOperations and op.type not in fragments) def _generate_fragment(op, versions): attribs = {key: _make_attrib_type(value) for key, value in op.attribs.items()} inputs = [_make_tensor_type(value) for value in op.inputs] outputs = [_make_tensor_type(value) for value in op.outputs] dtype = _nnef_dtype(op.attribs.get('dtype')) name = _next_version(op.type, versions) if versions is not None else op.type return 'fragment ' + _fragment_signature(name, dtype, attribs, inputs, outputs) + ';' def _fragment_signature(name, dtype, attribs, inputs, outputs): str = name if dtype is not None: str += '<' + dtype + '>' str += '( ' str += _types_str(['_I{}'.format(i + 1) for i in range(len(inputs))], inputs, True) if len(inputs) and len(attribs): str += ', ' str += _types_str(attribs.keys(), attribs.values(), False) str += ' ) -> ( ' str += _types_str(['_O{}'.format(i + 1) for i in range(len(outputs))], outputs, True) str += ' )' return str def _make_attrib_type(value): repeated = False if isinstance(value, list): if len(value) == 0: return None, False tp = type(value[0]) if not all(type(v) == tp for v in value): return None, False repeated = True value = value[0] if not isinstance(value, (float, int, bool, str)): return None, False return _DtypeFromPyType[type(value)], repeated def _make_tensor_type(value): repeated = False if isinstance(value, list): if len(value) == 0: return None, False dtype = value[0].dtype if not all(v.dtype == dtype for v in value): return None, False repeated = True value = value[0] return _nnef_dtype(value.dtype), repeated def _types_str(names, items, tensor): return ', '.join(name + ': ' + ('tensor<{}>'.format(type) if tensor else type) + ('[]' if repeated else '') for name, (type, repeated) in zip(names, items)) class Writer(object): def __init__(self, compression=None, extensions=None, fragments=None, fragment_dependencies=None, generate_custom_fragments=False, version_custom_fragments=True, annotate_shapes=False): self._compression = compression self._extensions = extensions or [] self._fragments = fragments or {} self._fragment_dependencies = fragment_dependencies or {} self._generate_custom_fragments = generate_custom_fragments self._version_custom_fragments = version_custom_fragments self._annotate_shapes = annotate_shapes def __call__(self, graph, path): folder = None try: if self._compression is not None: folder = tempfile.mkdtemp(prefix="nnef_") else: folder = path if not os.path.exists(folder): os.makedirs(folder) used_operators = self._used_operators(graph, self._fragment_dependencies) fragments = "".join(text for name, text in six.iteritems(self._fragments) if name in used_operators) if self._generate_custom_fragments: customs = _generate_custom_fragments(graph, fragments=self._fragments, version=self._version_custom_fragments) if fragments and customs: fragments += "\n" fragments += customs if len(fragments): if "KHR_enable_fragment_definitions" not in self._extensions: self._extensions.append("KHR_enable_fragment_definitions") if "KHR_enable_operator_expressions" not in self._extensions: self._extensions.append("KHR_enable_operator_expressions") graph_filename = os.path.join(folder, 'graph.nnef') with open(graph_filename, 'w') as file: _print(graph, file, extensions=self._extensions, fragments=fragments, version_custom_ops=self._generate_custom_fragments and self._version_custom_fragments, annotate_shapes=self._annotate_shapes) for op in graph.operations: if op.type == 'variable': filename = op.attribs['label'] + ".dat" if filename.startswith('/'): filename = filename[1:] _write_tensor(np.asarray(op.output.data, order='C'), os.path.join(folder, filename), quantized=True if op.output.quant else False) if any(tensor.quant for tensor in graph.tensors): quant_filename = os.path.join(folder, 'graph.quant') with open(quant_filename, 'w') as file: _write_quantization(graph, file) finally: if self._compression is not None and folder: tgz_compress(folder, path + '.tgz', compression_level=self._compression) shutil.rmtree(folder) @staticmethod def _used_operators(graph, dependencies): used = {op.type for op in graph.operations} count = len(used) changed = True while changed: for key, deps in six.iteritems(dependencies): if key in used: used.update(deps) changed = len(used) > count count = len(used) return used ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/onnx/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .reader import Reader from .writer import Writer ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/onnx/reader.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from ...model import * from ...utils.types import as_str from onnx.shape_inference import infer_shapes import numpy as np import onnx import sys _is_little_endian_system = (sys.byteorder == 'little') _DtypeToNumpy = { 'UNDEFINED': None, 'FLOAT': np.float32, 'UINT8': np.uint8, 'INT8': np.int8, 'UINT16': np.uint16, 'INT16': np.int16, 'INT32': np.int32, 'INT64': np.int64, 'STRING': np.str_, 'BOOL': np.bool_, 'FLOAT16': np.float16, 'DOUBLE': np.float64, 'UINT32': np.uint32, 'UINT64': np.uint64, 'COMPLEX64': np.complex64, 'COMPLEX128': np.complex128, } def _get_shape(tensor_shape_proto): return ([int(dim.dim_value) if dim.HasField('dim_value') else None for dim in tensor_shape_proto.dim] if tensor_shape_proto is not None else None) def _get_dtype(dtype_int): return _DtypeToNumpy[onnx.TensorProto.DataType.Name(dtype_int)] def _get_field(proto, name, default=None): return getattr(proto, name) if proto.HasField(name) else default def _get_value_info(value_info_proto): name = as_str(value_info_proto.name) shape = _get_shape(_get_field(value_info_proto.type.tensor_type, 'shape')) dtype = _get_dtype(value_info_proto.type.tensor_type.elem_type) return name, shape, dtype def _get_tensor(tensor_proto): assert not tensor_proto.HasField('segment'), 'TensorProto.segment is not supported' name = as_str(tensor_proto.name) shape = [int(dim) for dim in tensor_proto.dims] dtype = _get_dtype(tensor_proto.data_type) assert dtype is not None if tensor_proto.HasField('raw_data'): assert dtype != np.str_ data = np.frombuffer(tensor_proto.raw_data, dtype) if not _is_little_endian_system: data = data.byteswap() else: if dtype == np.float32: data = np.array(tensor_proto.float_data, dtype) elif dtype == np.float64: data = np.array(tensor_proto.double_data, dtype) elif dtype == np.int64: data = np.array(tensor_proto.int64_data, dtype) elif dtype == np.str_: data = np.array(as_str(tensor_proto.string_data)) elif dtype == np.float16: data = np.array(tensor_proto.int32_data, np.uint16).view(np.float16) elif dtype == np.complex64: data = np.array(tensor_proto.float_data, np.float32) data = data[0::2] + data[1::2] * 1j elif dtype == np.complex128: data = np.array(tensor_proto.double_data, np.float64) data = data[0::2] + data[1::2] * 1j elif dtype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.bool_]: data = np.array(tensor_proto.int32_data, dtype) elif dtype in [np.uint32, np.uint64]: data = np.array(tensor_proto.uint64_data, dtype) else: assert False data = data.reshape(shape) return name, shape, dtype, data def _get_tensor_data(tensor_proto): name, shape, dtype, data = _get_tensor(tensor_proto) return data def _get_tensors(graph_proto, graph, tensors_by_name): for value_info in graph_proto.input: name, shape, dtype = _get_value_info(value_info) tensors_by_name[name] = Tensor(graph=graph, name=name, shape=shape, dtype=dtype) for value_info in graph_proto.output: name, shape, dtype = _get_value_info(value_info) tensors_by_name[name] = Tensor(graph=graph, name=name, shape=shape, dtype=dtype) for value_info in graph_proto.value_info: name, shape, dtype = _get_value_info(value_info) tensors_by_name[name] = Tensor(graph=graph, name=name, shape=shape, dtype=dtype) for tensor_proto in graph_proto.initializer: name, shape, dtype, data = _get_tensor(tensor_proto) tensors_by_name[name] = Tensor(graph=graph, name=name, shape=shape, dtype=dtype, data=data) for node in graph_proto.node: for tensor_name in node.output: tensor_name = as_str(tensor_name) if tensor_name not in tensors_by_name: if len(tensor_name) == 0: tensors_by_name[tensor_name] = Tensor(graph, name='', shape=(), dtype=np.float32, data=np.zeros(shape=(), dtype=np.float32)) else: tensors_by_name[tensor_name] = Tensor(graph, name=tensor_name) for node in graph_proto.node: for attribute in node.attribute: if attribute.HasField('g'): _get_tensors(attribute.g, graph, tensors_by_name) if attribute.graphs: for g in attribute.graphs: _get_tensors(g, graph, tensors_by_name) def _get_node(node_proto, graph, tensors_by_name): inputs = [as_str(input) for input in node_proto.input] outputs = [as_str(output) for output in node_proto.output] name = as_str(_get_field(node_proto, 'name')) domain = as_str(_get_field(node_proto, 'domain')) op_type = as_str(node_proto.op_type) attributes = {} for attribute in node_proto.attribute: key, value = _get_attribute(attribute, graph, tensors_by_name) attributes[key] = value return inputs, outputs, name, domain, op_type, attributes def _get_attribute(attribute_proto, graph, tensors_by_name): assert not attribute_proto.HasField('ref_attr_name') name = as_str(attribute_proto.name) if attribute_proto.HasField('f'): value = float(attribute_proto.f) elif attribute_proto.HasField('i'): value = int(attribute_proto.i) elif attribute_proto.HasField('s'): value = as_str(attribute_proto.s) elif attribute_proto.HasField('t'): value = _get_tensor_data(attribute_proto.t) elif attribute_proto.HasField('g'): g = attribute_proto.g value = _get_block(g, Graph(name=as_str(_get_field(g, 'name'))), tensors_by_name) elif attribute_proto.floats: value = [float(f) for f in attribute_proto.floats] elif attribute_proto.ints: value = [int(i) for i in attribute_proto.ints] elif attribute_proto.strings: value = [as_str(s) for s in attribute_proto.strings] elif attribute_proto.tensors: value = [_get_tensor_data(t) for t in attribute_proto.tensors] elif attribute_proto.graphs: value = [_get_block(g, Graph(name=as_str(_get_field(g, 'name'))), tensors_by_name) for g in attribute_proto.graphs] else: value = [] return name, value def _get_block(graph_proto, graph, tensors_by_name): initializer_names = {as_str(value_info.name) for value_info in graph_proto.initializer} input_names = [as_str(value_info.name) for value_info in graph_proto.input if as_str(value_info.name) not in initializer_names] output_names = [as_str(value_info.name) for value_info in graph_proto.output] graph.inputs = [tensors_by_name[name] for name in input_names] graph.outputs = [tensors_by_name[name] for name in output_names] for value_info in graph_proto.input: name, shape, dtype = _get_value_info(value_info) tensor = tensors_by_name[name] tensor.shape, tensor.dtype = shape, dtype for value_info in graph_proto.output: name, shape, dtype = _get_value_info(value_info) tensor = tensors_by_name[name] tensor.shape, tensor.dtype = shape, dtype for value_info in graph_proto.value_info: name, shape, dtype = _get_value_info(value_info) tensor = tensors_by_name[name] tensor.shape, tensor.dtype = shape, dtype for tensor_proto in graph_proto.initializer: name, shape, dtype, data = _get_tensor(tensor_proto) tensor = tensors_by_name[name] tensor.shape, tensor.dtype, tensor.data = shape, dtype, data for annotation in graph_proto.quantization_annotation: tensor = tensors_by_name[annotation.tensor_name] tensor.quant = {item.key: tensors_by_name[item.value].data for item in annotation.quant_parameter_tensor_names} for node in graph_proto.node: inputs, outputs, name, domain, op_type, attributes = _get_node(node, graph, tensors_by_name) Operation( graph=graph, type=op_type, name=name, inputs=tuple(tensors_by_name[input] for input in inputs), outputs=tuple(tensors_by_name[output] for output in outputs), attribs=attributes) return graph def _set_input_shapes(graph_proto, input_shapes): for value_info in graph_proto.input: name, shape, dtype = _get_value_info(value_info) input_shape = input_shapes.get(name) if input_shape is not None: assert len(input_shape) == len(shape) and all(s is None or z == s for s, z in zip(shape, input_shape)) for i, s in enumerate(input_shape): value_info.type.tensor_type.shape.dim[i].dim_value = s # This is for working around a bug in ONNX IR, see https://github.com/onnx/onnx/issues/2903 def _add_value_info_for_constants(model: onnx.ModelProto): """ Currently onnx.shape_inference doesn't use the shape of initializers, so add that info explicitly as ValueInfoProtos. Mutates the model. Args: model: The ModelProto to update. """ # All (top-level) constants will have ValueInfos before IRv4 as they are all inputs if model.ir_version < 4: return def add_const_value_infos_to_graph(graph: onnx.GraphProto): inputs = {i.name for i in graph.input} existing_info = {vi.name: vi for vi in graph.value_info} for init in graph.initializer: # Check it really is a constant, not an input if init.name in inputs: continue # The details we want to add elem_type = init.data_type shape = init.dims # Get existing or create new value info for this constant vi = existing_info.get(init.name) if vi is None: vi = graph.value_info.add() vi.name = init.name # Even though it would be weird, we will not overwrite info even if it doesn't match tt = vi.type.tensor_type if tt.elem_type == onnx.TensorProto.UNDEFINED: tt.elem_type = elem_type if not tt.HasField("shape"): # Ensure we set an empty list if the const is scalar (zero dims) tt.shape.dim.extend([]) for dim in shape: tt.shape.dim.add().dim_value = dim # Handle subgraphs for node in graph.node: for attr in node.attribute: # Ref attrs refer to other attrs, so we don't need to do anything if attr.ref_attr_name != "": continue if attr.type == onnx.AttributeProto.GRAPH: add_const_value_infos_to_graph(attr.g) if attr.type == onnx.AttributeProto.GRAPHS: for g in attr.graphs: add_const_value_infos_to_graph(g) return add_const_value_infos_to_graph(model.graph) def onnx_model_to_graph(onnx_model): graph = Graph(name=as_str(_get_field(onnx_model.graph, 'name'))) tensors_by_name = {'': Tensor(graph, name='', shape=(), dtype=np.float32, data=np.zeros(shape=(), dtype=np.float32))} _get_tensors(onnx_model.graph, graph, tensors_by_name) _get_block(onnx_model.graph, graph, tensors_by_name) return graph def read_tensor(filename): with open(filename, 'rb') as file: return _get_tensor_data(onnx.load_tensor(file)) class Reader(object): def __init__(self, simplify=False, optimize=None): self._simplify = simplify self._optimize = optimize or simplify def __call__(self, filename, input_shapes=None): model_proto = onnx.load_model(filename) _add_value_info_for_constants(model_proto) if self._simplify: from onnxsim import simplify model_proto, _ = simplify(model_proto, overwrite_input_shapes=input_shapes, perform_optimization=self._optimize) if input_shapes: _set_input_shapes(model_proto.graph, input_shapes) model_proto = infer_shapes(model_proto) return onnx_model_to_graph(model_proto) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/onnx/writer.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...model import * import numpy as np import six import onnx _DtypeFromNumpy = { None: 'UNDEFINED', np.float32: 'FLOAT', np.uint8: 'UINT8', np.int8: 'INT8', np.uint16: 'UINT16', np.int16: 'INT16', np.int32: 'INT32', np.int64: 'INT64', np.str_: 'STRING', np.bool_: 'BOOL', np.float16: 'FLOAT16', np.float64: 'DOUBLE', np.uint32: 'UINT32', np.uint64: 'UINT64', np.complex64: 'COMPLEX64', np.complex128: 'COMPLEX128', } def build_model(graph, ir_version, opset_version): # type: (Graph)->onnx.ModelProto model_proto = onnx.ModelProto() build_graph(graph, model_proto.graph) model_proto.ir_version = ir_version model_proto.opset_import.add() model_proto.opset_import[0].version = opset_version return model_proto def build_graph(graph, graph_proto): # type: (Graph, onnx.GraphProto)->None for idx, op in enumerate(graph.operations): node_proto = graph_proto.node.add() build_node(op, node_proto, idx) if graph.name is not None: graph_proto.name = graph.name for input in list(graph.inputs) + list(t for t in graph.tensors if t.is_constant and t.name != ''): value_info_proto = graph_proto.input.add() build_value_info(input, value_info_proto) for output in graph.outputs: value_info_proto = graph_proto.output.add() build_value_info(output, value_info_proto) for tensor in graph.tensors: if tensor.is_constant and tensor.name != '': tensor_proto = graph_proto.initializer.add() build_tensor_proto(tensor, tensor_proto) if tensor.quant: build_quantization(tensor, graph_proto) def build_value_info(tensor, value_info_proto): # type: (Tensor, onnx.ValueInfoProto)->None value_info_proto.name = tensor.name value_info_proto.type.tensor_type.elem_type = build_dtype(tensor.dtype) if tensor.shape: for s in tensor.shape: dim = value_info_proto.type.tensor_type.shape.dim.add() if s is not None: dim.dim_value = s else: value_info_proto.type.tensor_type.shape.SetInParent() def build_dtype(dtype): dtype = dtype.type if isinstance(dtype, np.dtype) else dtype return onnx.TensorProto.DataType.Value(_DtypeFromNumpy[dtype]) def build_attribute_type(name): return onnx.AttributeProto.AttributeType.Value(name) def build_tensor_data(data, tensor_proto): # type: (np.ndarray, onnx.TensorProto)->None for s in data.shape: tensor_proto.dims.append(s) tensor_proto.data_type = build_dtype(data.dtype) if data.dtype == np.str_: tensor_proto.string_data = str(data) else: data = data.flatten().astype(data.dtype) if data.dtype in [np.complex64, np.complex128]: data = np.column_stack((np.real(data), np.imag(data))).flatten() if data.dtype.str[0] != "<": data = data.byteswap() tensor_proto.raw_data = data.tobytes() def build_tensor_proto(tensor, tensor_proto): # type: (Tensor, onnx.TensorProto)->None if isinstance(tensor.data, np.ndarray): data = tensor.data elif isinstance(tensor.data, (list, tuple)): data = np.array(tensor.data, dtype=tensor.dtype).reshape(tensor.shape) else: data = np.full(shape=tensor.shape, fill_value=tensor.data, dtype=tensor.dtype) build_tensor_data(data, tensor_proto) tensor_proto.name = tensor.name def build_quantization(tensor, graph_proto): tensor_annotation = graph_proto.quantization_annotation.add() tensor_annotation.tensor_name = tensor.name for key, value in tensor.quant: value_tensor_name = tensor.name + '/' + key if not isinstance(value, np.ndarray): value = np.array(value) tensor_proto = graph_proto.initializer.add() build_tensor_data(value, tensor_proto) tensor_proto.name = value_tensor_name item = tensor_annotation.quant_parameter_tensor_names.add() item.key = key item.value = value_tensor_name def build_node(op, node_proto, idx): # type: (Operation, onnx.NodeProto)->None inputs = op.inputs attribs = op.attribs for input in inputs: node_proto.input.append(input.name) for output in op.outputs: node_proto.output.append(output.name) node_proto.op_type = op.type node_proto.name = op.name or (op.type + str(idx)) for k, v in six.iteritems(attribs): attribute_proto = node_proto.attribute.add() build_attribute(k, v, attribute_proto) def build_attribute(key, value, attribute_proto): # type: (str, typing.Any, onnx.AttributeProto)->None attribute_proto.name = key if isinstance(value, np.ndarray): attribute_proto.type = build_attribute_type('TENSOR') build_tensor_data(value, attribute_proto.t) elif isinstance(value, int): attribute_proto.type = build_attribute_type('INT') attribute_proto.i = value elif isinstance(value, float): attribute_proto.type = build_attribute_type('FLOAT') attribute_proto.f = value elif isinstance(value, str): attribute_proto.type = build_attribute_type('STRING') attribute_proto.s = value.encode('utf-8') elif isinstance(value, (type, np.dtype)): attribute_proto.type = build_attribute_type('INT') attribute_proto.i = build_dtype(value) elif isinstance(value, Graph): attribute_proto.type = build_attribute_type('GRAPH') build_graph(value, attribute_proto.g) elif isinstance(value, (list, tuple)): if len(value) == 0: attribute_proto.type = build_attribute_type('INTS') # TODO better else: if isinstance(value[0], int): attribute_proto.type = build_attribute_type('INTS') for v in value: attribute_proto.ints.append(v) elif isinstance(value[0], float): attribute_proto.type = build_attribute_type('FLOATS') for v in value: attribute_proto.floats.append(v) elif isinstance(value[0], str): attribute_proto.type = build_attribute_type('STRINGS') for v in value: attribute_proto.strings.append(v.encode('utf-8')) elif isinstance(value[0], Graph): attribute_proto.type = build_attribute_type('GRAPHS') for v in value: g = attribute_proto.graphs.add() build_graph(v, g) else: assert False, \ "Unsupported attribute: {}: {} of type: List[{}]".format(key, value, type(value[0]).__name__) else: assert False, "Unsupported attribute: {}: {} of type: {}".format(key, value, type(value).__name__) class Writer(object): def __init__(self, ir_version=6, opset_version=11): self._ir_version = ir_version self._opset_version = opset_version def __call__(self, graph, filename): model_proto = build_model(graph, self._ir_version, self._opset_version) onnx.checker.check_model(model_proto) with open(filename, 'wb') as file: file.write(model_proto.SerializeToString()) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/graphdef/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .reader import Reader from .writer import Writer from .composite import replace_composites_with_py_functions, reset_composites from .utils import set_input_shapes, fold_constant_tensors, retain_reachables_from_outputs, insert_rename_identities from .utils import import_graph_def, export_graph_def, check_finite, check_variables try: import tensorflow.compat.v1 as tf except ImportError: import tensorflow as tf composite_function = composite.function def save_default_graph(filename, session, outputs, input_shapes=None, fold_constants=True, collapse_composites=True): check_variables(session) if not isinstance(outputs, dict): outputs = {tensor: tensor.name[:-2] if tensor.name.endswith(':0') else tensor.name.replace(':', '_') for tensor in outputs} output_names = list(outputs.values()) graph_def = export_graph_def(tf.get_default_graph()) graph_def = insert_rename_identities(graph_def, outputs) graph_def = tf.graph_util.convert_variables_to_constants(session, graph_def, output_names) graph_def = retain_reachables_from_outputs(graph_def, output_names) check_finite(graph_def) if input_shapes: graph_def = set_input_shapes(graph_def, input_shapes) if fold_constants: graph_def = fold_constant_tensors(graph_def) if collapse_composites: graph_def = replace_composites_with_py_functions(graph_def) check_finite(graph_def) with open(filename, 'wb') as file: file.write(graph_def.SerializeToString()) def load_default_graph(filename): from .protobuf import GraphDef with tf.io.gfile.GFile(filename, "rb") as file: graph_def = GraphDef() graph_def.ParseFromString(file.read()) tf.import_graph_def(graph_def, name='') ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/graphdef/composite.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .protobuf import GraphDef, NodeDef from .writer import _build_attribute from .utils import import_graph_def try: import tensorflow.compat.v1 as tf except ImportError: import tensorflow as tf from collections.abc import Sequence import inspect class _Composite: instances = [] def __init__(self, id, func, attribs, inputs, outputs): self.id = id self.func = func self.attribs = attribs self.inputs = inputs self.outputs = outputs @staticmethod def function(func): def wrapper(*args, **kwargs): results = func(*args, **kwargs) name = kwargs.get('name') if name is not None: del kwargs['name'] id = name or len(_Composite.instances) signature = inspect.signature(func) bound = signature.bind(*args, **kwargs) bound.apply_defaults() attribs = {name: value for name, value in bound.arguments.items() if not isinstance(value, tf.Tensor) and value is not None} inputs = [value for value in bound.arguments.values() if isinstance(value, tf.Tensor)] outputs = (results,) if not isinstance(results, (list, tuple)) else results assert all(isinstance(value, tf.Tensor) for value in outputs), \ "Results of composite function must be tensors" assert not any(tensor in inputs for tensor in outputs), \ "Results of composite function cannot be input arguments at the same time" _Composite.instances.append(_Composite(id, func, attribs, inputs, outputs)) return results return wrapper @property def name(self): return self.id if isinstance(self.id, str) else 'Composite' + str(self.id) function = _Composite.function def _is_tensor(value): return isinstance(value, tf.Tensor) or (isinstance(value, Sequence) and all(_is_tensor(item) for item in value)) def _node_name_from_tensor(name): if name[0] == '^': name = name[1:] pos = name.find(':') if pos != -1 and name[pos+1:].isdigit(): name = name[:pos] return name def _input_name_from_tensor(name): return name[:-2] if name.endswith(':0') else name def _build_node_def(composite): node_def = NodeDef() node_def.op = 'PyFunc' node_def.name = composite.name node_def.input.extend([_input_name_from_tensor(arg.name) for arg in composite.inputs]) input_dtypes = [tensor.dtype.as_numpy_dtype for tensor in composite.inputs] output_dtypes = [tensor.dtype.as_numpy_dtype for tensor in composite.outputs] output_shapes = [tuple(tensor.shape.as_list()) for tensor in composite.outputs] _build_attribute(node_def.attr['Tin'], input_dtypes) _build_attribute(node_def.attr['Tout'], output_dtypes) _build_attribute(node_def.attr['token'], composite.func.__name__) _build_attribute(node_def.attr['_output_shapes'], output_shapes) for name, value in composite.attribs.items(): _build_attribute(node_def.attr['_$' + name + '$_'], value) return node_def def _remap_tensors(tensors, graph): return type(tensors)(graph.get_tensor_by_name(tensor.name) for tensor in tensors) def _tensor_producers_and_consumers(graph): producers_and_consumers = {tensor: [tensor.op] for op in graph.get_operations() for tensor in op.outputs} for op in graph.get_operations(): for tensor in op.inputs: ops = producers_and_consumers[tensor] if op not in ops: ops.append(op) return producers_and_consumers def _find_subgraph(composite, producers_and_consumers): queue = [tensor.op for tensor in composite.outputs] subgraph = {item.name for item in queue} idx = 0 while idx < len(queue): op = queue[idx] idx += 1 tensors = [tensor for tensor in op.inputs if tensor not in composite.inputs] + \ [tensor for tensor in op.outputs if tensor not in composite.outputs] for tensor in tensors: for op in producers_and_consumers[tensor]: if op.name not in subgraph: subgraph.add(op.name) queue.append(op) return subgraph def replace_composites_with_py_functions(graph_def): graph = import_graph_def(graph_def) for composite in _Composite.instances: composite.inputs = _remap_tensors(composite.inputs, graph) composite.outputs = _remap_tensors(composite.outputs, graph) producers_and_consumers = _tensor_producers_and_consumers(graph) tensor_remap = {} subgraph_ops = set() for composite in _Composite.instances: subgraph_ops.update(_find_subgraph(composite, producers_and_consumers)) for idx, tensor in enumerate(composite.outputs): tensor_remap[_input_name_from_tensor(tensor.name)] = \ composite.name + ':' + str(idx) if idx > 0 else composite.name new_graph_def = GraphDef() for node in graph_def.node: if node.name not in subgraph_ops: new_graph_def.node.append(node) for composite in _Composite.instances: new_graph_def.node.append(_build_node_def(composite)) for node in new_graph_def.node: for i in range(len(node.input)): remapped = tensor_remap.get(node.input[i]) if remapped is not None: node.input[i] = remapped return new_graph_def def reset_composites(): _Composite.instances = [] ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/graphdef/protobuf.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tensorflow.core.framework.graph_pb2 import GraphDef from tensorflow.core.framework.node_def_pb2 import NodeDef from tensorflow.core.framework.attr_value_pb2 import AttrValue from tensorflow.core.framework.types_pb2 import DataType from tensorflow.core.framework.tensor_pb2 import TensorProto from tensorflow.core.framework.tensor_shape_pb2 import TensorShapeProto ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/graphdef/reader.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from collections import namedtuple from ....model import * from ....utils.types import as_str from .protobuf import * import numpy as np import six Function = namedtuple('Function', ['name', 'attrs']) _DtypeToNumpy = { 'DT_INVALID': None, 'DT_RESOURCE': np.dtype([('resource', np.int32)]), 'DT_HALF': np.float16, 'DT_FLOAT': np.float32, 'DT_DOUBLE': np.float64, 'DT_INT8': np.int8, 'DT_INT16': np.int16, 'DT_INT32': np.int32, 'DT_INT64': np.int64, 'DT_UINT8': np.uint8, 'DT_UINT16': np.uint16, 'DT_UINT32': np.uint32, 'DT_UINT64': np.uint64, 'DT_BOOL': np.bool_, 'DT_STRING': np.str_, 'DT_COMPLEX64': np.complex64, 'DT_COMPLEX128': np.complex128, } def _get_shape(shape_proto): return tuple(int(dim.size) if dim.size >= 0 else None for dim in shape_proto.dim) \ if not shape_proto.unknown_rank else None def _get_dtype(dtype_enum): dtype = _DtypeToNumpy[DataType.Name(dtype_enum)] assert dtype is not None, "non-numeric dtype '{}' in attribute".format(DataType.Name(dtype_enum)) return dtype def _get_nonempty_items(message, fields): for field in fields: items = getattr(message, field) if len(items): return field, items return None, None def _get_tensor(tensor_proto): shape = _get_shape(tensor_proto.tensor_shape) dtype = _get_dtype(tensor_proto.dtype) if len(tensor_proto.tensor_content): data = np.frombuffer(tensor_proto.tensor_content, dtype=dtype).reshape(shape) else: field, items = _get_nonempty_items(tensor_proto, fields=['half_val', 'float_val', 'double_val', 'int_val', 'int64_val', 'bool_val', 'string_val', 'uint32_val', 'uint64_val', 'resource_handle_val', 'scomplex_val', 'dcomplex_val']) if items is None and any(s == 0 for s in shape): items = [] assert items is not None, "tensor items are empty, dtype = {}, shape = {}".format(dtype, shape) items = [item for item in items] if len(items) == int(np.prod(shape)): data = np.array(items, dtype=dtype).reshape(shape) else: assert len(items) == 1 data = np.full(shape=shape, dtype=dtype, fill_value=items[0]) return data def _get_func(name_attrlist_proto): return Function(name_attrlist_proto.name, _get_attributes(name_attrlist_proto.attr)) def _get_attribute(field, value): if field == 'i' or field == 'f' or field == 'b' or field == 'placeholder': return value elif field == 's': return as_str(value.decode()) elif field == 'shape': return _get_shape(value) elif field == 'type': return _get_dtype(value) elif field == 'tensor': return _get_tensor(value) elif field == 'func': return _get_func(value) elif field == 'list': field, items = _get_nonempty_items(value, fields=['i', 'f', 'b', 's', 'shape', 'type', 'tensor', 'func']) return [_get_attribute(field, item) for item in items] if items is not None else [] assert False def _get_attributes(attr_map_proto): attributes = {} for name, value in attr_map_proto.items(): field = value.WhichOneof('value') if field is not None: value = getattr(value, field) attributes[as_str(name)] = _get_attribute(field, value) else: attributes[as_str(name)] = None return attributes def _get_output_name(node_name, idx): return node_name + ':' + str(idx) if idx > 0 else node_name def _has_output_shapes(graph_def): return all('_output_shapes' in node.attr and node.attr['_output_shapes'].WhichOneof('value') is not None for node in graph_def.node) def _add_output_shapes(graph_def): try: import tensorflow.compat.v1 as tf except ImportError: import tensorflow as tf graph = tf.Graph() with graph.as_default(): tf.import_graph_def(graph_def, name='') return graph.as_graph_def(add_shapes=True) def _get_dtypes(graph_def): try: import tensorflow.compat.v1 as tf except ImportError: import tensorflow as tf dtypes = {} graph = tf.Graph() with graph.as_default(): tf.import_graph_def(graph_def, name='') for op in graph.get_operations(): for tensor in op.outputs: name = tensor.name[:-2] if tensor.name.endswith(':0') else tensor.name dtypes[name] = tensor.dtype.as_numpy_dtype if tensor.dtype != tf.resource else _DtypeToNumpy['DT_RESOURCE'].type return dtypes def _get_output_shapes(attr_map_proto): value = attr_map_proto['_output_shapes'] field = value.WhichOneof('value') if field is None: return None value = getattr(value, field) return _get_attribute(field, value) def build_graph(graph_def): graph = Graph() dtypes = _get_dtypes(graph_def) # create tensors node_outputs = {} for node in graph_def.node: output_shapes = _get_output_shapes(node.attr) if output_shapes is not None: name = as_str(node.name) node_outputs[name] = [Tensor(graph, _get_output_name(name, idx), shape=shape, dtype=dtypes.get(name)) for idx, shape in enumerate(output_shapes)] tensors = {tensor.name: tensor for outputs in six.itervalues(node_outputs) for tensor in outputs} # create ops for node in graph_def.node: attributes = _get_attributes(node.attr) inputs = [tensors[name] for name in node.input if not name.startswith('^')] outputs = node_outputs[node.name] if node.name in node_outputs else [] Operation(graph, type=as_str(node.op), name=as_str(node.name), inputs=inputs, outputs=outputs, attribs=attributes) graph.inputs = [node_outputs[node.name][0] for node in graph_def.node if node.op == 'Placeholder'] graph.outputs = [output for op in graph.operations if all(len(output.consumers) == 0 for output in op.outputs) for output in op.outputs] return graph def _unpack_custom_ops(graph): for op in graph.operations: if op.type == 'PyFunc': op.custom = True op.type = op.attribs['token'] op.attribs = {key[2:-2]: value for key, value in six.iteritems(op.attribs) if key.startswith('_$') and key.endswith('$_')} def read_graphdef(filename, input_shapes, fold_constants): graph_def = GraphDef() with open(filename, 'rb') as file: graph_def.ParseFromString(file.read()) if not _has_output_shapes(graph_def): graph_def = _add_output_shapes(graph_def) if input_shapes is not None: from .utils import set_input_shapes graph_def = set_input_shapes(graph_def, input_shapes) if fold_constants: from .utils import fold_constant_tensors graph_def = fold_constant_tensors(graph_def) graph = build_graph(graph_def) _unpack_custom_ops(graph) return graph class Reader(object): def __init__(self, fold_constants=False): self._fold_constants = fold_constants def __call__(self, filename, input_shapes=None): return read_graphdef(filename, input_shapes, self._fold_constants) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/graphdef/utils.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .protobuf import * from .writer import _build_attribute from .reader import _get_attributes import numpy as np import six try: import tensorflow.compat.v1 as tf except ImportError: import tensorflow as tf def import_graph_def(graph_def): graph = tf.Graph() with graph.as_default(): tf.import_graph_def(graph_def, name='') return graph def export_graph_def(graph): return graph.as_graph_def(add_shapes=True) def reinfer_shapes(graph_def): return export_graph_def(import_graph_def(graph_def)) def _try_eval(tensor, session): try: if tensor.dtype == tf.resource or tensor.dtype == tf.string: return None value = tensor.eval(session=session) print("Evaluated constant tensor '{}'".format(tensor.name)) return value except: return None def _build_node(type, name, attribs, inputs): node_def = NodeDef() node_def.op = type node_def.name = name if len(inputs): node_def.input.extend(inputs) for name, value in attribs.items(): _build_attribute(node_def.attr[name], value) return node_def def _make_const_node(value, name): return _build_node('Const', name, {'dtype': value.dtype.type, 'value': value, '_output_shapes': [value.shape]}, []) def _make_identity_node(input, name, dtype, shape): return _build_node('Identity', name, {'T': dtype, '_output_shapes': [shape]}, [input]) def _freeze_shape_tensors(graph_def): graph = import_graph_def(graph_def) evaluated = {} for op in graph.get_operations(): if op.type == 'Shape': shape = op.inputs[0].shape if shape.dims is not None and all(item is not None for item in shape.as_list()): evaluated[op.name] = np.array(shape, dtype=np.int32) print("Evaluated Shape op '{}' to {}".format(op.name, str(shape))) changed = False new_graph_def = GraphDef() for node in graph_def.node: value = evaluated.get(node.name) if value is not None: new_graph_def.node.append(_make_const_node(value, node.name)) changed = True else: new_graph_def.node.append(node) return new_graph_def, changed def _remove_const_control_dependencies(graph_def): for node in graph_def.node: if node.op == 'Const': for idx in reversed(range(len(node.input))): name = node.input[idx] if name[0] == '^': del node.input[idx] return graph_def def _remove_zero_index(name): return name[:-2] if name.endswith(':0') else name def _remove_const_identities(graph_def): graph = import_graph_def(graph_def) removables = {op.name: _remove_zero_index(op.inputs[0].name) for op in graph.get_operations() if op.type == 'Identity' and op.inputs[0].op.type == 'Const'} for node in graph_def.node: for i in range(len(node.input)): replacement = removables.get(_op_name_from_tensor(node.input[i])) if replacement: node.input[i] = replacement new_graph_def = GraphDef() for node in graph_def.node: if node.name not in removables: new_graph_def.node.append(node) return new_graph_def def _eval_candidates(graph): evaluables = set() changed = True while changed: changed = False for op in graph.get_operations(): if op not in evaluables and all(tensor.op in evaluables for tensor in op.inputs) and op.type != 'Placeholder': evaluables.add(op) changed = True candidates = set() for op in graph.get_operations(): if op not in evaluables: for tensor in op.inputs: if tensor.op in evaluables and tensor.op.type != 'Const': candidates.add(tensor) return candidates def _fold_constant_tensors(graph_def): graph = import_graph_def(graph_def) evaluated = {} with tf.Session(graph=graph) as session: for tensor in _eval_candidates(graph): evaluated[tensor.name] = _try_eval(tensor, session) results = {} for op in graph.get_operations(): results[op.name] = [evaluated.get(tensor.name) for tensor in op.outputs] remap = {} changed = False new_graph_def = GraphDef() for node in graph_def.node: values = results[node.name] all_evaluated = all(value is not None for value in values) and len(values) > 0 for idx, value in enumerate(values): if value is not None: arg_name = node.name if idx == 0 else node.name + ':{}'.format(idx) const_name = node.name if idx == 0 and all_evaluated else node.name + '//{}'.format(idx) remap[arg_name] = const_name new_graph_def.node.append(_make_const_node(value, const_name)) changed = True if not all_evaluated: new_graph_def.node.append(node) for node in new_graph_def.node: for i in range(len(node.input)): remapped = remap.get(node.input[i]) if remapped is not None: node.input[i] = remapped return new_graph_def, changed def _find_reachables_forward(graph, reachables): changed = True while changed: changed = False for op in graph.get_operations(): if op.name not in reachables and any(tensor.op.name in reachables for tensor in op.inputs): reachables.add(op.name) changed = True return reachables def _find_reachables_backward(graph, reachables): changed = True while changed: changed = False for op in reversed(graph.get_operations()): if op.name in reachables: for tensor in op.inputs: if tensor.op.name not in reachables: reachables.add(tensor.op.name) changed = True return reachables def _retain_nodes(graph_def, node_names): new_graph_def = GraphDef() for node in graph_def.node: if node.name in node_names: new_graph_def.node.append(node) for node in new_graph_def.node: for idx in reversed(range(len(node.input))): name = node.input[idx] if name[0] == '^' and name[1:] not in node_names: del node.input[idx] return new_graph_def def _retain_reachables_from_placeholders(graph_def): graph = import_graph_def(graph_def) reachables = {op.name for op in graph.get_operations() if op.type == 'Placeholder'} if len(reachables) == 0: return graph_def reachables = _find_reachables_forward(graph, reachables) reachables = _find_reachables_backward(graph, reachables) return _retain_nodes(graph_def, reachables) def _op_name_from_tensor(name): if name[0] == '^': name = name[1:] pos = name.find(':') if pos != -1 and name[pos+1:].isdigit(): name = name[:pos] return name def fold_constant_tensors(graph_def): graph_def = _remove_const_control_dependencies(graph_def) graph_def = _remove_const_identities(graph_def) graph_def, changed = _freeze_shape_tensors(graph_def) graph_def, changed = _fold_constant_tensors(graph_def) while changed: graph_def, changed = _freeze_shape_tensors(graph_def) if changed: graph_def, changed = _fold_constant_tensors(graph_def) graph_def = _retain_reachables_from_placeholders(graph_def) return reinfer_shapes(graph_def) def set_input_shapes(graph_def, input_shapes): graph = import_graph_def(graph_def) placeholders = {op.name: (op.outputs[0].shape, op.outputs[0].dtype) for op in graph.get_operations() if op.type == 'Placeholder'} graph = tf.Graph() with graph.as_default(): input_map = {} for name, shape in six.iteritems(input_shapes): if name not in placeholders: raise IOError("Model has no input named '{}'".format(name)) orig_shape, dtype = placeholders[name] if orig_shape.rank is not None and len(shape) != orig_shape.rank: raise IOError("Shape rank for input '{}' does not match that of the model ({} vs {})" .format(name, len(shape), orig_shape.rank)) input_map[name] = tf.placeholder(shape=shape, dtype=dtype, name=name) for name, (shape, dtype) in placeholders.items(): if name not in input_map: input_map[name] = tf.placeholder(shape=shape, dtype=dtype, name=name) tf.import_graph_def(graph_def, name='', input_map=input_map) used = {tensor.op.name for op in graph.get_operations() for tensor in op.inputs} graph_def = GraphDef() for op in graph.get_operations(): if op.type != 'Placeholder' or op.name in used: graph_def.node.append(op.node_def) return reinfer_shapes(graph_def) def retain_reachables_from_outputs(graph_def, output_names): graph = import_graph_def(graph_def) reachables = _find_reachables_backward(graph, set(output_names)) return _retain_nodes(graph_def, reachables) def insert_rename_identities(graph_def, tensor_rename): for tensor, name in six.iteritems(tensor_rename): tensor_name = _remove_zero_index(tensor.name) if name != tensor_name: graph_def.node.append(_make_identity_node(tensor_name, name, tensor.dtype.as_numpy_dtype, tuple(tensor.shape.as_list()))) return graph_def def check_finite(graph_def): for node in graph_def.node: attribs = _get_attributes(node.attr) for key, value in six.iteritems(attribs): if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.number) and not np.all(np.isfinite(value)): raise ValueError("Attribute '{}' of op '{}' named '{}' contains nan or inf". format(key, node.op, node.name)) def check_variables(session): variables = tf.global_variables() for variable in variables: value = session.run(variable) if np.issubdtype(value.dtype, np.number) and not np.all(np.isfinite(value)): raise ValueError("Variable '{}' contains nan or inf".format(variable.name)) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/graphdef/writer.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from .protobuf import * import numpy as np import six _DtypeFromNumpy = { None: 'DT_INVALID', np.float16: 'DT_HALF', np.float32: 'DT_FLOAT', np.float64: 'DT_DOUBLE', np.int8: 'DT_INT8', np.int16: 'DT_INT16', np.int32: 'DT_INT32', np.int64: 'DT_INT64', np.uint8: 'DT_UINT8', np.uint16: 'DT_UINT16', np.uint32: 'DT_UINT32', np.uint64: 'DT_UINT64', np.bool_: 'DT_BOOL', np.str_: 'DT_STRING', np.complex64: 'DT_COMPLEX64', np.complex128: 'DT_COMPLEX128', np.dtype([('resource', np.int32)]): 'DT_RESOURCE', } _NumpyDtypes = { np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float32, np.float64, np.complex64, np.complex128, np.bool_, np.str_, np.dtype([('resource', np.int32)]), } def _build_shape(shape_proto, shape): shape_proto.unknown_rank = (shape is None) if shape is not None: for item in shape: dim = shape_proto.dim.add() dim.size = item if item is not None else -1 def _build_dtype(dtype): return DataType.Value(_DtypeFromNumpy[dtype]) def _build_tensor(tensor_proto, data): if data.dtype is not None: tensor_proto.dtype = _build_dtype(data.dtype.type) if data.shape is not None: _build_shape(tensor_proto.tensor_shape, data.shape) tensor_proto.tensor_content = data.reshape([-1]).view(np.uint8).tobytes() return tensor_proto def _build_attribute(attr_proto, value): if value is None: return attr_proto if type(value) in _NumpyDtypes: value = np.array(value) if isinstance(value, bool): # must be before int attr_proto.b = value elif isinstance(value, int): attr_proto.i = value elif isinstance(value, float): attr_proto.f = value elif isinstance(value, str): attr_proto.s = value.encode() elif isinstance(value, (type, np.dtype)): attr_proto.type = _build_dtype(value) elif isinstance(value, tuple): _build_shape(attr_proto.shape, value) elif isinstance(value, np.ndarray): _build_tensor(attr_proto.tensor, value) elif isinstance(value, list): if len(value) == 0: attr_proto.list.i.extend([]) # to signal that the 'list' is the active in the oneof field else: first = value[0] if isinstance(first, int): attr_proto.list.i.extend(value) elif isinstance(first, float): attr_proto.list.f.extend(value) elif isinstance(first, bool): attr_proto.list.b.extend(value) elif isinstance(first, str): attr_proto.list.s.extend([item.encode() for item in value]) elif isinstance(first, (type, np.dtype)): attr_proto.list.type.extend([_build_dtype(item) for item in value]) elif isinstance(first, tuple): for item in value: _build_shape(attr_proto.list.shape.add(), item) elif isinstance(first, np.ndarray): for item in value: _build_tensor(attr_proto.list.tensor.add(), item) else: raise TypeError('unable to build attribute proto message from type: ' + str(type(first))) else: raise TypeError('unable to build attribute proto message from type: ' + str(type(value))) return attr_proto def _build_output_shapes(attr_proto, output_shapes): for item in output_shapes: _build_shape(attr_proto.list.shape.add(), item) def _tensor_name(tensor): name = tensor.producer.name idx = tensor.producer.outputs.index(tensor) return name + ':' + str(idx) if idx > 0 else name def _custom_attribs(operation): attribs = {'_$' + key + '$_': value for key, value in six.iteritems(operation.attribs)} attribs['token'] = operation.type attribs['Tin'] = [tensor.dtype for tensor in operation.inputs] attribs['Tout'] = [tensor.dtype for tensor in operation.outputs] def _build_node(node_def, operation): node_def.op = operation.type if not operation.custom else 'PyFunc' node_def.name = operation.name node_def.input.extend([_tensor_name(tensor) for tensor in operation.inputs]) attribs = operation.attribs if not operation.custom else _custom_attribs(operation) output_shapes = attribs.get('_output_shapes') if output_shapes is not None: _build_output_shapes(node_def.attr['_output_shapes'], output_shapes) del attribs['_output_shapes'] else: _build_output_shapes(node_def.attr['_output_shapes'], [tensor.shape for tensor in operation.outputs]) for name, value in attribs.items(): _build_attribute(node_def.attr[name], value) return node_def def build_graphdef(graph): graph_def = GraphDef() for operation in graph.operations: node_def = graph_def.node.add() _build_node(node_def, operation) return graph_def def write_graphdef(graph, filename): graph_def = build_graphdef(graph) with open(filename, 'wb') as file: file.write(graph_def.SerializeToString()) class Writer(object): def __call__(self, graph, filename): return write_graphdef(graph, filename) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .reader import Reader from .writer import Writer from .helpers import CustomOptionsKey ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/AbsOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class AbsOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsAbsOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = AbsOptions() x.Init(buf, n + offset) return x @classmethod def AbsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # AbsOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def AbsOptionsStart(builder): builder.StartObject(0) def AbsOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ActivationFunctionType.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class ActivationFunctionType(object): NONE = 0 RELU = 1 RELU_N1_TO_1 = 2 RELU6 = 3 TANH = 4 SIGN_BIT = 5 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/AddNOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class AddNOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsAddNOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = AddNOptions() x.Init(buf, n + offset) return x @classmethod def AddNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # AddNOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def AddNOptionsStart(builder): builder.StartObject(0) def AddNOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/AddOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class AddOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsAddOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = AddOptions() x.Init(buf, n + offset) return x @classmethod def AddOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # AddOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # AddOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def AddOptionsStart(builder): builder.StartObject(1) def AddOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def AddOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ArgMaxOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ArgMaxOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsArgMaxOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ArgMaxOptions() x.Init(buf, n + offset) return x @classmethod def ArgMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ArgMaxOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ArgMaxOptions def OutputType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def ArgMaxOptionsStart(builder): builder.StartObject(1) def ArgMaxOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0) def ArgMaxOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ArgMinOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ArgMinOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsArgMinOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ArgMinOptions() x.Init(buf, n + offset) return x @classmethod def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ArgMinOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ArgMinOptions def OutputType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def ArgMinOptionsStart(builder): builder.StartObject(1) def ArgMinOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0) def ArgMinOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/BatchMatMulOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class BatchMatMulOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsBatchMatMulOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = BatchMatMulOptions() x.Init(buf, n + offset) return x @classmethod def BatchMatMulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # BatchMatMulOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # BatchMatMulOptions def AdjX(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # BatchMatMulOptions def AdjY(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def BatchMatMulOptionsStart(builder): builder.StartObject(2) def BatchMatMulOptionsAddAdjX(builder, adjX): builder.PrependBoolSlot(0, adjX, 0) def BatchMatMulOptionsAddAdjY(builder, adjY): builder.PrependBoolSlot(1, adjY, 0) def BatchMatMulOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/BatchToSpaceNDOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class BatchToSpaceNDOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsBatchToSpaceNDOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = BatchToSpaceNDOptions() x.Init(buf, n + offset) return x @classmethod def BatchToSpaceNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # BatchToSpaceNDOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def BatchToSpaceNDOptionsStart(builder): builder.StartObject(0) def BatchToSpaceNDOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/BidirectionalSequenceLSTMOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class BidirectionalSequenceLSTMOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsBidirectionalSequenceLSTMOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = BidirectionalSequenceLSTMOptions() x.Init(buf, n + offset) return x @classmethod def BidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # BidirectionalSequenceLSTMOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # BidirectionalSequenceLSTMOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # BidirectionalSequenceLSTMOptions def CellClip(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # BidirectionalSequenceLSTMOptions def ProjClip(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # BidirectionalSequenceLSTMOptions def MergeOutputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # BidirectionalSequenceLSTMOptions def TimeMajor(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return True # BidirectionalSequenceLSTMOptions def AsymmetricQuantizeInputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def BidirectionalSequenceLSTMOptionsStart(builder): builder.StartObject(6) def BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def BidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0) def BidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0) def BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, mergeOutputs): builder.PrependBoolSlot(3, mergeOutputs, 0) def BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(4, timeMajor, 1) def BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(5, asymmetricQuantizeInputs, 0) def BidirectionalSequenceLSTMOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/BidirectionalSequenceRNNOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class BidirectionalSequenceRNNOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsBidirectionalSequenceRNNOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = BidirectionalSequenceRNNOptions() x.Init(buf, n + offset) return x @classmethod def BidirectionalSequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # BidirectionalSequenceRNNOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # BidirectionalSequenceRNNOptions def TimeMajor(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # BidirectionalSequenceRNNOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # BidirectionalSequenceRNNOptions def MergeOutputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # BidirectionalSequenceRNNOptions def AsymmetricQuantizeInputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def BidirectionalSequenceRNNOptionsStart(builder): builder.StartObject(4) def BidirectionalSequenceRNNOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(0, timeMajor, 0) def BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0) def BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, mergeOutputs): builder.PrependBoolSlot(2, mergeOutputs, 0) def BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0) def BidirectionalSequenceRNNOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Buffer.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Buffer(object): __slots__ = ['_tab'] @classmethod def GetRootAsBuffer(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Buffer() x.Init(buf, n + offset) return x @classmethod def BufferBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Buffer def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Buffer def Data(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) return 0 # Buffer def DataAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) return 0 # Buffer def DataLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # Buffer def DataIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 def BufferStart(builder): builder.StartObject(1) def BufferAddData(builder, data): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0) def BufferStartDataVector(builder, numElems): return builder.StartVector(1, numElems, 1) def BufferEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/BuiltinOperator.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class BuiltinOperator(object): ADD = 0 AVERAGE_POOL_2D = 1 CONCATENATION = 2 CONV_2D = 3 DEPTHWISE_CONV_2D = 4 DEPTH_TO_SPACE = 5 DEQUANTIZE = 6 EMBEDDING_LOOKUP = 7 FLOOR = 8 FULLY_CONNECTED = 9 HASHTABLE_LOOKUP = 10 L2_NORMALIZATION = 11 L2_POOL_2D = 12 LOCAL_RESPONSE_NORMALIZATION = 13 LOGISTIC = 14 LSH_PROJECTION = 15 LSTM = 16 MAX_POOL_2D = 17 MUL = 18 RELU = 19 RELU_N1_TO_1 = 20 RELU6 = 21 RESHAPE = 22 RESIZE_BILINEAR = 23 RNN = 24 SOFTMAX = 25 SPACE_TO_DEPTH = 26 SVDF = 27 TANH = 28 CONCAT_EMBEDDINGS = 29 SKIP_GRAM = 30 CALL = 31 CUSTOM = 32 EMBEDDING_LOOKUP_SPARSE = 33 PAD = 34 UNIDIRECTIONAL_SEQUENCE_RNN = 35 GATHER = 36 BATCH_TO_SPACE_ND = 37 SPACE_TO_BATCH_ND = 38 TRANSPOSE = 39 MEAN = 40 SUB = 41 DIV = 42 SQUEEZE = 43 UNIDIRECTIONAL_SEQUENCE_LSTM = 44 STRIDED_SLICE = 45 BIDIRECTIONAL_SEQUENCE_RNN = 46 EXP = 47 TOPK_V2 = 48 SPLIT = 49 LOG_SOFTMAX = 50 DELEGATE = 51 BIDIRECTIONAL_SEQUENCE_LSTM = 52 CAST = 53 PRELU = 54 MAXIMUM = 55 ARG_MAX = 56 MINIMUM = 57 LESS = 58 NEG = 59 PADV2 = 60 GREATER = 61 GREATER_EQUAL = 62 LESS_EQUAL = 63 SELECT = 64 SLICE = 65 SIN = 66 TRANSPOSE_CONV = 67 SPARSE_TO_DENSE = 68 TILE = 69 EXPAND_DIMS = 70 EQUAL = 71 NOT_EQUAL = 72 LOG = 73 SUM = 74 SQRT = 75 RSQRT = 76 SHAPE = 77 POW = 78 ARG_MIN = 79 FAKE_QUANT = 80 REDUCE_PROD = 81 REDUCE_MAX = 82 PACK = 83 LOGICAL_OR = 84 ONE_HOT = 85 LOGICAL_AND = 86 LOGICAL_NOT = 87 UNPACK = 88 REDUCE_MIN = 89 FLOOR_DIV = 90 REDUCE_ANY = 91 SQUARE = 92 ZEROS_LIKE = 93 FILL = 94 FLOOR_MOD = 95 RANGE = 96 RESIZE_NEAREST_NEIGHBOR = 97 LEAKY_RELU = 98 SQUARED_DIFFERENCE = 99 MIRROR_PAD = 100 ABS = 101 SPLIT_V = 102 UNIQUE = 103 CEIL = 104 REVERSE_V2 = 105 ADD_N = 106 GATHER_ND = 107 COS = 108 WHERE = 109 RANK = 110 ELU = 111 REVERSE_SEQUENCE = 112 MATRIX_DIAG = 113 QUANTIZE = 114 MATRIX_SET_DIAG = 115 ROUND = 116 HARD_SWISH = 117 IF = 118 WHILE = 119 NON_MAX_SUPPRESSION_V4 = 120 NON_MAX_SUPPRESSION_V5 = 121 SCATTER_ND = 122 SELECT_V2 = 123 DENSIFY = 124 SEGMENT_SUM = 125 BATCH_MATMUL = 126 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/BuiltinOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class BuiltinOptions(object): NONE = 0 Conv2DOptions = 1 DepthwiseConv2DOptions = 2 ConcatEmbeddingsOptions = 3 LSHProjectionOptions = 4 Pool2DOptions = 5 SVDFOptions = 6 RNNOptions = 7 FullyConnectedOptions = 8 SoftmaxOptions = 9 ConcatenationOptions = 10 AddOptions = 11 L2NormOptions = 12 LocalResponseNormalizationOptions = 13 LSTMOptions = 14 ResizeBilinearOptions = 15 CallOptions = 16 ReshapeOptions = 17 SkipGramOptions = 18 SpaceToDepthOptions = 19 EmbeddingLookupSparseOptions = 20 MulOptions = 21 PadOptions = 22 GatherOptions = 23 BatchToSpaceNDOptions = 24 SpaceToBatchNDOptions = 25 TransposeOptions = 26 ReducerOptions = 27 SubOptions = 28 DivOptions = 29 SqueezeOptions = 30 SequenceRNNOptions = 31 StridedSliceOptions = 32 ExpOptions = 33 TopKV2Options = 34 SplitOptions = 35 LogSoftmaxOptions = 36 CastOptions = 37 DequantizeOptions = 38 MaximumMinimumOptions = 39 ArgMaxOptions = 40 LessOptions = 41 NegOptions = 42 PadV2Options = 43 GreaterOptions = 44 GreaterEqualOptions = 45 LessEqualOptions = 46 SelectOptions = 47 SliceOptions = 48 TransposeConvOptions = 49 SparseToDenseOptions = 50 TileOptions = 51 ExpandDimsOptions = 52 EqualOptions = 53 NotEqualOptions = 54 ShapeOptions = 55 PowOptions = 56 ArgMinOptions = 57 FakeQuantOptions = 58 PackOptions = 59 LogicalOrOptions = 60 OneHotOptions = 61 LogicalAndOptions = 62 LogicalNotOptions = 63 UnpackOptions = 64 FloorDivOptions = 65 SquareOptions = 66 ZerosLikeOptions = 67 FillOptions = 68 BidirectionalSequenceLSTMOptions = 69 BidirectionalSequenceRNNOptions = 70 UnidirectionalSequenceLSTMOptions = 71 FloorModOptions = 72 RangeOptions = 73 ResizeNearestNeighborOptions = 74 LeakyReluOptions = 75 SquaredDifferenceOptions = 76 MirrorPadOptions = 77 AbsOptions = 78 SplitVOptions = 79 UniqueOptions = 80 ReverseV2Options = 81 AddNOptions = 82 GatherNdOptions = 83 CosOptions = 84 WhereOptions = 85 RankOptions = 86 ReverseSequenceOptions = 87 MatrixDiagOptions = 88 QuantizeOptions = 89 MatrixSetDiagOptions = 90 HardSwishOptions = 91 IfOptions = 92 WhileOptions = 93 DepthToSpaceOptions = 94 NonMaxSuppressionV4Options = 95 NonMaxSuppressionV5Options = 96 ScatterNdOptions = 97 SelectV2Options = 98 DensifyOptions = 99 SegmentSumOptions = 100 BatchMatMulOptions = 101 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/CallOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class CallOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsCallOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = CallOptions() x.Init(buf, n + offset) return x @classmethod def CallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # CallOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # CallOptions def Subgraph(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) return 0 def CallOptionsStart(builder): builder.StartObject(1) def CallOptionsAddSubgraph(builder, subgraph): builder.PrependUint32Slot(0, subgraph, 0) def CallOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/CastOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class CastOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsCastOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = CastOptions() x.Init(buf, n + offset) return x @classmethod def CastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # CastOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # CastOptions def InDataType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # CastOptions def OutDataType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def CastOptionsStart(builder): builder.StartObject(2) def CastOptionsAddInDataType(builder, inDataType): builder.PrependInt8Slot(0, inDataType, 0) def CastOptionsAddOutDataType(builder, outDataType): builder.PrependInt8Slot(1, outDataType, 0) def CastOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/CombinerType.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class CombinerType(object): SUM = 0 MEAN = 1 SQRTN = 2 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ConcatEmbeddingsOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ConcatEmbeddingsOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsConcatEmbeddingsOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ConcatEmbeddingsOptions() x.Init(buf, n + offset) return x @classmethod def ConcatEmbeddingsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ConcatEmbeddingsOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ConcatEmbeddingsOptions def NumChannels(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # ConcatEmbeddingsOptions def NumColumnsPerChannel(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # ConcatEmbeddingsOptions def NumColumnsPerChannelAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # ConcatEmbeddingsOptions def NumColumnsPerChannelLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.VectorLen(o) return 0 # ConcatEmbeddingsOptions def NumColumnsPerChannelIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) return o == 0 # ConcatEmbeddingsOptions def EmbeddingDimPerChannel(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # ConcatEmbeddingsOptions def EmbeddingDimPerChannelAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # ConcatEmbeddingsOptions def EmbeddingDimPerChannelLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.VectorLen(o) return 0 # ConcatEmbeddingsOptions def EmbeddingDimPerChannelIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) return o == 0 def ConcatEmbeddingsOptionsStart(builder): builder.StartObject(3) def ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels): builder.PrependInt32Slot(0, numChannels, 0) def ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(numColumnsPerChannel), 0) def ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4) def ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(embeddingDimPerChannel), 0) def ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4) def ConcatEmbeddingsOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ConcatenationOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ConcatenationOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsConcatenationOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ConcatenationOptions() x.Init(buf, n + offset) return x @classmethod def ConcatenationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ConcatenationOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ConcatenationOptions def Axis(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # ConcatenationOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def ConcatenationOptionsStart(builder): builder.StartObject(2) def ConcatenationOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0) def ConcatenationOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0) def ConcatenationOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Conv2DOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Conv2DOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsConv2DOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Conv2DOptions() x.Init(buf, n + offset) return x @classmethod def Conv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Conv2DOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Conv2DOptions def Padding(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # Conv2DOptions def StrideW(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Conv2DOptions def StrideH(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Conv2DOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # Conv2DOptions def DilationWFactor(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 1 # Conv2DOptions def DilationHFactor(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 1 def Conv2DOptionsStart(builder): builder.StartObject(6) def Conv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) def Conv2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) def Conv2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) def Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(3, fusedActivationFunction, 0) def Conv2DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(4, dilationWFactor, 1) def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(5, dilationHFactor, 1) def Conv2DOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/CosOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class CosOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsCosOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = CosOptions() x.Init(buf, n + offset) return x @classmethod def CosOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # CosOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def CosOptionsStart(builder): builder.StartObject(0) def CosOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/CustomOptionsFormat.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class CustomOptionsFormat(object): FLEXBUFFERS = 0 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/CustomQuantization.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class CustomQuantization(object): __slots__ = ['_tab'] @classmethod def GetRootAsCustomQuantization(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = CustomQuantization() x.Init(buf, n + offset) return x @classmethod def CustomQuantizationBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # CustomQuantization def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # CustomQuantization def Custom(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) return 0 # CustomQuantization def CustomAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) return 0 # CustomQuantization def CustomLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # CustomQuantization def CustomIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 def CustomQuantizationStart(builder): builder.StartObject(1) def CustomQuantizationAddCustom(builder, custom): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(custom), 0) def CustomQuantizationStartCustomVector(builder, numElems): return builder.StartVector(1, numElems, 1) def CustomQuantizationEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/DensifyOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class DensifyOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsDensifyOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = DensifyOptions() x.Init(buf, n + offset) return x @classmethod def DensifyOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # DensifyOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def DensifyOptionsStart(builder): builder.StartObject(0) def DensifyOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/DepthToSpaceOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class DepthToSpaceOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsDepthToSpaceOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = DepthToSpaceOptions() x.Init(buf, n + offset) return x @classmethod def DepthToSpaceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # DepthToSpaceOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # DepthToSpaceOptions def BlockSize(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def DepthToSpaceOptionsStart(builder): builder.StartObject(1) def DepthToSpaceOptionsAddBlockSize(builder, blockSize): builder.PrependInt32Slot(0, blockSize, 0) def DepthToSpaceOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/DepthwiseConv2DOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class DepthwiseConv2DOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsDepthwiseConv2DOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = DepthwiseConv2DOptions() x.Init(buf, n + offset) return x @classmethod def DepthwiseConv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # DepthwiseConv2DOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # DepthwiseConv2DOptions def Padding(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # DepthwiseConv2DOptions def StrideW(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # DepthwiseConv2DOptions def StrideH(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # DepthwiseConv2DOptions def DepthMultiplier(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # DepthwiseConv2DOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # DepthwiseConv2DOptions def DilationWFactor(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 1 # DepthwiseConv2DOptions def DilationHFactor(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 1 def DepthwiseConv2DOptionsStart(builder): builder.StartObject(7) def DepthwiseConv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) def DepthwiseConv2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) def DepthwiseConv2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) def DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier): builder.PrependInt32Slot(3, depthMultiplier, 0) def DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(4, fusedActivationFunction, 0) def DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(5, dilationWFactor, 1) def DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(6, dilationHFactor, 1) def DepthwiseConv2DOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/DequantizeOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class DequantizeOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsDequantizeOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = DequantizeOptions() x.Init(buf, n + offset) return x @classmethod def DequantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # DequantizeOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def DequantizeOptionsStart(builder): builder.StartObject(0) def DequantizeOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/DimensionMetadata.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class DimensionMetadata(object): __slots__ = ['_tab'] @classmethod def GetRootAsDimensionMetadata(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = DimensionMetadata() x.Init(buf, n + offset) return x @classmethod def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # DimensionMetadata def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # DimensionMetadata def Format(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # DimensionMetadata def DenseSize(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # DimensionMetadata def ArraySegmentsType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) return 0 # DimensionMetadata def ArraySegments(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: from flatbuffers.table import Table obj = Table(bytearray(), 0) self._tab.Union(obj, o) return obj return None # DimensionMetadata def ArrayIndicesType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) return 0 # DimensionMetadata def ArrayIndices(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: from flatbuffers.table import Table obj = Table(bytearray(), 0) self._tab.Union(obj, o) return obj return None def DimensionMetadataStart(builder): builder.StartObject(6) def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0) def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0) def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): builder.PrependUint8Slot(2, arraySegmentsType, 0) def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0) def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): builder.PrependUint8Slot(4, arrayIndicesType, 0) def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0) def DimensionMetadataEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/DimensionType.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class DimensionType(object): DENSE = 0 SPARSE_CSR = 1 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/DivOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class DivOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsDivOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = DivOptions() x.Init(buf, n + offset) return x @classmethod def DivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # DivOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # DivOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def DivOptionsStart(builder): builder.StartObject(1) def DivOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def DivOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/EmbeddingLookupSparseOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class EmbeddingLookupSparseOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsEmbeddingLookupSparseOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = EmbeddingLookupSparseOptions() x.Init(buf, n + offset) return x @classmethod def EmbeddingLookupSparseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # EmbeddingLookupSparseOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # EmbeddingLookupSparseOptions def Combiner(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def EmbeddingLookupSparseOptionsStart(builder): builder.StartObject(1) def EmbeddingLookupSparseOptionsAddCombiner(builder, combiner): builder.PrependInt8Slot(0, combiner, 0) def EmbeddingLookupSparseOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/EqualOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class EqualOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsEqualOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = EqualOptions() x.Init(buf, n + offset) return x @classmethod def EqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # EqualOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def EqualOptionsStart(builder): builder.StartObject(0) def EqualOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ExpOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ExpOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsExpOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ExpOptions() x.Init(buf, n + offset) return x @classmethod def ExpOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ExpOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def ExpOptionsStart(builder): builder.StartObject(0) def ExpOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ExpandDimsOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ExpandDimsOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsExpandDimsOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ExpandDimsOptions() x.Init(buf, n + offset) return x @classmethod def ExpandDimsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ExpandDimsOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def ExpandDimsOptionsStart(builder): builder.StartObject(0) def ExpandDimsOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/FakeQuantOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class FakeQuantOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsFakeQuantOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = FakeQuantOptions() x.Init(buf, n + offset) return x @classmethod def FakeQuantOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # FakeQuantOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # FakeQuantOptions def Min(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # FakeQuantOptions def Max(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # FakeQuantOptions def NumBits(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # FakeQuantOptions def NarrowRange(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def FakeQuantOptionsStart(builder): builder.StartObject(4) def FakeQuantOptionsAddMin(builder, min): builder.PrependFloat32Slot(0, min, 0.0) def FakeQuantOptionsAddMax(builder, max): builder.PrependFloat32Slot(1, max, 0.0) def FakeQuantOptionsAddNumBits(builder, numBits): builder.PrependInt32Slot(2, numBits, 0) def FakeQuantOptionsAddNarrowRange(builder, narrowRange): builder.PrependBoolSlot(3, narrowRange, 0) def FakeQuantOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/FillOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class FillOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsFillOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = FillOptions() x.Init(buf, n + offset) return x @classmethod def FillOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # FillOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def FillOptionsStart(builder): builder.StartObject(0) def FillOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/FloorDivOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class FloorDivOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsFloorDivOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = FloorDivOptions() x.Init(buf, n + offset) return x @classmethod def FloorDivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # FloorDivOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def FloorDivOptionsStart(builder): builder.StartObject(0) def FloorDivOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/FloorModOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class FloorModOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsFloorModOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = FloorModOptions() x.Init(buf, n + offset) return x @classmethod def FloorModOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # FloorModOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def FloorModOptionsStart(builder): builder.StartObject(0) def FloorModOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/FullyConnectedOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class FullyConnectedOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsFullyConnectedOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = FullyConnectedOptions() x.Init(buf, n + offset) return x @classmethod def FullyConnectedOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # FullyConnectedOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # FullyConnectedOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # FullyConnectedOptions def WeightsFormat(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # FullyConnectedOptions def KeepNumDims(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # FullyConnectedOptions def AsymmetricQuantizeInputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def FullyConnectedOptionsStart(builder): builder.StartObject(4) def FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def FullyConnectedOptionsAddWeightsFormat(builder, weightsFormat): builder.PrependInt8Slot(1, weightsFormat, 0) def FullyConnectedOptionsAddKeepNumDims(builder, keepNumDims): builder.PrependBoolSlot(2, keepNumDims, 0) def FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0) def FullyConnectedOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/FullyConnectedOptionsWeightsFormat.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class FullyConnectedOptionsWeightsFormat(object): DEFAULT = 0 SHUFFLED4x16INT8 = 1 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/GatherNdOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class GatherNdOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsGatherNdOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = GatherNdOptions() x.Init(buf, n + offset) return x @classmethod def GatherNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # GatherNdOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def GatherNdOptionsStart(builder): builder.StartObject(0) def GatherNdOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/GatherOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class GatherOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsGatherOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = GatherOptions() x.Init(buf, n + offset) return x @classmethod def GatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # GatherOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # GatherOptions def Axis(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def GatherOptionsStart(builder): builder.StartObject(1) def GatherOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0) def GatherOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/GreaterEqualOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class GreaterEqualOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsGreaterEqualOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = GreaterEqualOptions() x.Init(buf, n + offset) return x @classmethod def GreaterEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # GreaterEqualOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def GreaterEqualOptionsStart(builder): builder.StartObject(0) def GreaterEqualOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/GreaterOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class GreaterOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsGreaterOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = GreaterOptions() x.Init(buf, n + offset) return x @classmethod def GreaterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # GreaterOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def GreaterOptionsStart(builder): builder.StartObject(0) def GreaterOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/HardSwishOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class HardSwishOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsHardSwishOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = HardSwishOptions() x.Init(buf, n + offset) return x @classmethod def HardSwishOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # HardSwishOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def HardSwishOptionsStart(builder): builder.StartObject(0) def HardSwishOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/IfOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class IfOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsIfOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = IfOptions() x.Init(buf, n + offset) return x @classmethod def IfOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # IfOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # IfOptions def ThenSubgraphIndex(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # IfOptions def ElseSubgraphIndex(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def IfOptionsStart(builder): builder.StartObject(2) def IfOptionsAddThenSubgraphIndex(builder, thenSubgraphIndex): builder.PrependInt32Slot(0, thenSubgraphIndex, 0) def IfOptionsAddElseSubgraphIndex(builder, elseSubgraphIndex): builder.PrependInt32Slot(1, elseSubgraphIndex, 0) def IfOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Int32Vector.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Int32Vector(object): __slots__ = ['_tab'] @classmethod def GetRootAsInt32Vector(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Int32Vector() x.Init(buf, n + offset) return x @classmethod def Int32VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Int32Vector def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Int32Vector def Values(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # Int32Vector def ValuesAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # Int32Vector def ValuesLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # Int32Vector def ValuesIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 def Int32VectorStart(builder): builder.StartObject(1) def Int32VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) def Int32VectorStartValuesVector(builder, numElems): return builder.StartVector(4, numElems, 4) def Int32VectorEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/L2NormOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class L2NormOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsL2NormOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = L2NormOptions() x.Init(buf, n + offset) return x @classmethod def L2NormOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # L2NormOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # L2NormOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def L2NormOptionsStart(builder): builder.StartObject(1) def L2NormOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def L2NormOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LSHProjectionOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LSHProjectionOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLSHProjectionOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LSHProjectionOptions() x.Init(buf, n + offset) return x @classmethod def LSHProjectionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LSHProjectionOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # LSHProjectionOptions def Type(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def LSHProjectionOptionsStart(builder): builder.StartObject(1) def LSHProjectionOptionsAddType(builder, type): builder.PrependInt8Slot(0, type, 0) def LSHProjectionOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LSHProjectionType.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class LSHProjectionType(object): UNKNOWN = 0 SPARSE = 1 DENSE = 2 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LSTMKernelType.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class LSTMKernelType(object): FULL = 0 BASIC = 1 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LSTMOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LSTMOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLSTMOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LSTMOptions() x.Init(buf, n + offset) return x @classmethod def LSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LSTMOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # LSTMOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # LSTMOptions def CellClip(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # LSTMOptions def ProjClip(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # LSTMOptions def KernelType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # LSTMOptions def AsymmetricQuantizeInputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def LSTMOptionsStart(builder): builder.StartObject(5) def LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def LSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0) def LSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0) def LSTMOptionsAddKernelType(builder, kernelType): builder.PrependInt8Slot(3, kernelType, 0) def LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0) def LSTMOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LeakyReluOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LeakyReluOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLeakyReluOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LeakyReluOptions() x.Init(buf, n + offset) return x @classmethod def LeakyReluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LeakyReluOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # LeakyReluOptions def Alpha(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 def LeakyReluOptionsStart(builder): builder.StartObject(1) def LeakyReluOptionsAddAlpha(builder, alpha): builder.PrependFloat32Slot(0, alpha, 0.0) def LeakyReluOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LessEqualOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LessEqualOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLessEqualOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LessEqualOptions() x.Init(buf, n + offset) return x @classmethod def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LessEqualOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def LessEqualOptionsStart(builder): builder.StartObject(0) def LessEqualOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LessOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LessOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLessOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LessOptions() x.Init(buf, n + offset) return x @classmethod def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LessOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def LessOptionsStart(builder): builder.StartObject(0) def LessOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LocalResponseNormalizationOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LocalResponseNormalizationOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLocalResponseNormalizationOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LocalResponseNormalizationOptions() x.Init(buf, n + offset) return x @classmethod def LocalResponseNormalizationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LocalResponseNormalizationOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # LocalResponseNormalizationOptions def Radius(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # LocalResponseNormalizationOptions def Bias(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # LocalResponseNormalizationOptions def Alpha(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # LocalResponseNormalizationOptions def Beta(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 def LocalResponseNormalizationOptionsStart(builder): builder.StartObject(4) def LocalResponseNormalizationOptionsAddRadius(builder, radius): builder.PrependInt32Slot(0, radius, 0) def LocalResponseNormalizationOptionsAddBias(builder, bias): builder.PrependFloat32Slot(1, bias, 0.0) def LocalResponseNormalizationOptionsAddAlpha(builder, alpha): builder.PrependFloat32Slot(2, alpha, 0.0) def LocalResponseNormalizationOptionsAddBeta(builder, beta): builder.PrependFloat32Slot(3, beta, 0.0) def LocalResponseNormalizationOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LogSoftmaxOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LogSoftmaxOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLogSoftmaxOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LogSoftmaxOptions() x.Init(buf, n + offset) return x @classmethod def LogSoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LogSoftmaxOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def LogSoftmaxOptionsStart(builder): builder.StartObject(0) def LogSoftmaxOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LogicalAndOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LogicalAndOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLogicalAndOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LogicalAndOptions() x.Init(buf, n + offset) return x @classmethod def LogicalAndOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LogicalAndOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def LogicalAndOptionsStart(builder): builder.StartObject(0) def LogicalAndOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LogicalNotOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LogicalNotOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLogicalNotOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LogicalNotOptions() x.Init(buf, n + offset) return x @classmethod def LogicalNotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LogicalNotOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def LogicalNotOptionsStart(builder): builder.StartObject(0) def LogicalNotOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/LogicalOrOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class LogicalOrOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsLogicalOrOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = LogicalOrOptions() x.Init(buf, n + offset) return x @classmethod def LogicalOrOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # LogicalOrOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def LogicalOrOptionsStart(builder): builder.StartObject(0) def LogicalOrOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/MatrixDiagOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class MatrixDiagOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsMatrixDiagOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = MatrixDiagOptions() x.Init(buf, n + offset) return x @classmethod def MatrixDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # MatrixDiagOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def MatrixDiagOptionsStart(builder): builder.StartObject(0) def MatrixDiagOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/MatrixSetDiagOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class MatrixSetDiagOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsMatrixSetDiagOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = MatrixSetDiagOptions() x.Init(buf, n + offset) return x @classmethod def MatrixSetDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # MatrixSetDiagOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def MatrixSetDiagOptionsStart(builder): builder.StartObject(0) def MatrixSetDiagOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/MaximumMinimumOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class MaximumMinimumOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsMaximumMinimumOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = MaximumMinimumOptions() x.Init(buf, n + offset) return x @classmethod def MaximumMinimumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # MaximumMinimumOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def MaximumMinimumOptionsStart(builder): builder.StartObject(0) def MaximumMinimumOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Metadata.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Metadata(object): __slots__ = ['_tab'] @classmethod def GetRootAsMetadata(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Metadata() x.Init(buf, n + offset) return x @classmethod def MetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Metadata def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Metadata def Name(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.String(o + self._tab.Pos) return None # Metadata def Buffer(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) return 0 def MetadataStart(builder): builder.StartObject(2) def MetadataAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) def MetadataAddBuffer(builder, buffer): builder.PrependUint32Slot(1, buffer, 0) def MetadataEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/MirrorPadMode.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class MirrorPadMode(object): REFLECT = 0 SYMMETRIC = 1 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/MirrorPadOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class MirrorPadOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsMirrorPadOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = MirrorPadOptions() x.Init(buf, n + offset) return x @classmethod def MirrorPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # MirrorPadOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # MirrorPadOptions def Mode(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def MirrorPadOptionsStart(builder): builder.StartObject(1) def MirrorPadOptionsAddMode(builder, mode): builder.PrependInt8Slot(0, mode, 0) def MirrorPadOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Model.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Model(object): __slots__ = ['_tab'] @classmethod def GetRootAsModel(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Model() x.Init(buf, n + offset) return x @classmethod def ModelBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Model def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Model def Version(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) return 0 # Model def OperatorCodes(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: x = self._tab.Vector(o) x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 x = self._tab.Indirect(x) from .OperatorCode import OperatorCode obj = OperatorCode() obj.Init(self._tab.Bytes, x) return obj return None # Model def OperatorCodesLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.VectorLen(o) return 0 # Model def OperatorCodesIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) return o == 0 # Model def Subgraphs(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: x = self._tab.Vector(o) x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 x = self._tab.Indirect(x) from .SubGraph import SubGraph obj = SubGraph() obj.Init(self._tab.Bytes, x) return obj return None # Model def SubgraphsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.VectorLen(o) return 0 # Model def SubgraphsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) return o == 0 # Model def Description(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.String(o + self._tab.Pos) return None # Model def Buffers(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: x = self._tab.Vector(o) x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 x = self._tab.Indirect(x) from .Buffer import Buffer obj = Buffer() obj.Init(self._tab.Bytes, x) return obj return None # Model def BuffersLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.VectorLen(o) return 0 # Model def BuffersIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) return o == 0 # Model def MetadataBuffer(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # Model def MetadataBufferAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # Model def MetadataBufferLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.VectorLen(o) return 0 # Model def MetadataBufferIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) return o == 0 # Model def Metadata(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: x = self._tab.Vector(o) x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 x = self._tab.Indirect(x) from .Metadata import Metadata obj = Metadata() obj.Init(self._tab.Bytes, x) return obj return None # Model def MetadataLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: return self._tab.VectorLen(o) return 0 # Model def MetadataIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) return o == 0 def ModelStart(builder): builder.StartObject(7) def ModelAddVersion(builder, version): builder.PrependUint32Slot(0, version, 0) def ModelAddOperatorCodes(builder, operatorCodes): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operatorCodes), 0) def ModelStartOperatorCodesVector(builder, numElems): return builder.StartVector(4, numElems, 4) def ModelAddSubgraphs(builder, subgraphs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0) def ModelStartSubgraphsVector(builder, numElems): return builder.StartVector(4, numElems, 4) def ModelAddDescription(builder, description): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(description), 0) def ModelAddBuffers(builder, buffers): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(buffers), 0) def ModelStartBuffersVector(builder, numElems): return builder.StartVector(4, numElems, 4) def ModelAddMetadataBuffer(builder, metadataBuffer): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(metadataBuffer), 0) def ModelStartMetadataBufferVector(builder, numElems): return builder.StartVector(4, numElems, 4) def ModelAddMetadata(builder, metadata): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(metadata), 0) def ModelStartMetadataVector(builder, numElems): return builder.StartVector(4, numElems, 4) def ModelEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/MulOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class MulOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsMulOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = MulOptions() x.Init(buf, n + offset) return x @classmethod def MulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # MulOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # MulOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def MulOptionsStart(builder): builder.StartObject(1) def MulOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def MulOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/NegOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class NegOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsNegOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = NegOptions() x.Init(buf, n + offset) return x @classmethod def NegOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # NegOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def NegOptionsStart(builder): builder.StartObject(0) def NegOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/NonMaxSuppressionV4Options.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class NonMaxSuppressionV4Options(object): __slots__ = ['_tab'] @classmethod def GetRootAsNonMaxSuppressionV4Options(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = NonMaxSuppressionV4Options() x.Init(buf, n + offset) return x @classmethod def NonMaxSuppressionV4OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # NonMaxSuppressionV4Options def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def NonMaxSuppressionV4OptionsStart(builder): builder.StartObject(0) def NonMaxSuppressionV4OptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/NonMaxSuppressionV5Options.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class NonMaxSuppressionV5Options(object): __slots__ = ['_tab'] @classmethod def GetRootAsNonMaxSuppressionV5Options(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = NonMaxSuppressionV5Options() x.Init(buf, n + offset) return x @classmethod def NonMaxSuppressionV5OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # NonMaxSuppressionV5Options def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def NonMaxSuppressionV5OptionsStart(builder): builder.StartObject(0) def NonMaxSuppressionV5OptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/NotEqualOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class NotEqualOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsNotEqualOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = NotEqualOptions() x.Init(buf, n + offset) return x @classmethod def NotEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # NotEqualOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def NotEqualOptionsStart(builder): builder.StartObject(0) def NotEqualOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/OneHotOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class OneHotOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsOneHotOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = OneHotOptions() x.Init(buf, n + offset) return x @classmethod def OneHotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # OneHotOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # OneHotOptions def Axis(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def OneHotOptionsStart(builder): builder.StartObject(1) def OneHotOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0) def OneHotOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Operator.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Operator(object): __slots__ = ['_tab'] @classmethod def GetRootAsOperator(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Operator() x.Init(buf, n + offset) return x @classmethod def OperatorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Operator def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Operator def OpcodeIndex(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) return 0 # Operator def Inputs(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # Operator def InputsAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # Operator def InputsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.VectorLen(o) return 0 # Operator def InputsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) return o == 0 # Operator def Outputs(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # Operator def OutputsAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # Operator def OutputsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.VectorLen(o) return 0 # Operator def OutputsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) return o == 0 # Operator def BuiltinOptionsType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) return 0 # Operator def BuiltinOptions(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: from flatbuffers.table import Table obj = Table(bytearray(), 0) self._tab.Union(obj, o) return obj return None # Operator def CustomOptions(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) return 0 # Operator def CustomOptionsAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) return 0 # Operator def CustomOptionsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.VectorLen(o) return 0 # Operator def CustomOptionsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) return o == 0 # Operator def CustomOptionsFormat(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # Operator def MutatingVariableInputs(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) return 0 # Operator def MutatingVariableInputsAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o) return 0 # Operator def MutatingVariableInputsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: return self._tab.VectorLen(o) return 0 # Operator def MutatingVariableInputsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) return o == 0 # Operator def Intermediates(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # Operator def IntermediatesAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # Operator def IntermediatesLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) if o != 0: return self._tab.VectorLen(o) return 0 # Operator def IntermediatesIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) return o == 0 def OperatorStart(builder): builder.StartObject(9) def OperatorAddOpcodeIndex(builder, opcodeIndex): builder.PrependUint32Slot(0, opcodeIndex, 0) def OperatorAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) def OperatorStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) def OperatorAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0) def OperatorStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) def OperatorAddBuiltinOptionsType(builder, builtinOptionsType): builder.PrependUint8Slot(3, builtinOptionsType, 0) def OperatorAddBuiltinOptions(builder, builtinOptions): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions), 0) def OperatorAddCustomOptions(builder, customOptions): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(customOptions), 0) def OperatorStartCustomOptionsVector(builder, numElems): return builder.StartVector(1, numElems, 1) def OperatorAddCustomOptionsFormat(builder, customOptionsFormat): builder.PrependInt8Slot(6, customOptionsFormat, 0) def OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(mutatingVariableInputs), 0) def OperatorStartMutatingVariableInputsVector(builder, numElems): return builder.StartVector(1, numElems, 1) def OperatorAddIntermediates(builder, intermediates): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(intermediates), 0) def OperatorStartIntermediatesVector(builder, numElems): return builder.StartVector(4, numElems, 4) def OperatorEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/OperatorCode.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class OperatorCode(object): __slots__ = ['_tab'] @classmethod def GetRootAsOperatorCode(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = OperatorCode() x.Init(buf, n + offset) return x @classmethod def OperatorCodeBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # OperatorCode def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # OperatorCode def BuiltinCode(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # OperatorCode def CustomCode(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.String(o + self._tab.Pos) return None # OperatorCode def Version(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 1 def OperatorCodeStart(builder): builder.StartObject(3) def OperatorCodeAddBuiltinCode(builder, builtinCode): builder.PrependInt8Slot(0, builtinCode, 0) def OperatorCodeAddCustomCode(builder, customCode): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(customCode), 0) def OperatorCodeAddVersion(builder, version): builder.PrependInt32Slot(2, version, 1) def OperatorCodeEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/PackOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class PackOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsPackOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = PackOptions() x.Init(buf, n + offset) return x @classmethod def PackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # PackOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # PackOptions def ValuesCount(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # PackOptions def Axis(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def PackOptionsStart(builder): builder.StartObject(2) def PackOptionsAddValuesCount(builder, valuesCount): builder.PrependInt32Slot(0, valuesCount, 0) def PackOptionsAddAxis(builder, axis): builder.PrependInt32Slot(1, axis, 0) def PackOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/PadOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class PadOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsPadOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = PadOptions() x.Init(buf, n + offset) return x @classmethod def PadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # PadOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def PadOptionsStart(builder): builder.StartObject(0) def PadOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/PadV2Options.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class PadV2Options(object): __slots__ = ['_tab'] @classmethod def GetRootAsPadV2Options(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = PadV2Options() x.Init(buf, n + offset) return x @classmethod def PadV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # PadV2Options def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def PadV2OptionsStart(builder): builder.StartObject(0) def PadV2OptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Padding.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class Padding(object): SAME = 0 VALID = 1 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Pool2DOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Pool2DOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsPool2DOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Pool2DOptions() x.Init(buf, n + offset) return x @classmethod def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Pool2DOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Pool2DOptions def Padding(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # Pool2DOptions def StrideW(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Pool2DOptions def StrideH(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Pool2DOptions def FilterWidth(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Pool2DOptions def FilterHeight(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # Pool2DOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def Pool2DOptionsStart(builder): builder.StartObject(6) def Pool2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) def Pool2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) def Pool2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) def Pool2DOptionsAddFilterWidth(builder, filterWidth): builder.PrependInt32Slot(3, filterWidth, 0) def Pool2DOptionsAddFilterHeight(builder, filterHeight): builder.PrependInt32Slot(4, filterHeight, 0) def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(5, fusedActivationFunction, 0) def Pool2DOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/PowOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class PowOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsPowOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = PowOptions() x.Init(buf, n + offset) return x @classmethod def PowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # PowOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def PowOptionsStart(builder): builder.StartObject(0) def PowOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/QuantizationDetails.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class QuantizationDetails(object): NONE = 0 CustomQuantization = 1 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/QuantizationParameters.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class QuantizationParameters(object): __slots__ = ['_tab'] @classmethod def GetRootAsQuantizationParameters(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = QuantizationParameters() x.Init(buf, n + offset) return x @classmethod def QuantizationParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # QuantizationParameters def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # QuantizationParameters def Min(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # QuantizationParameters def MinAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) return 0 # QuantizationParameters def MinLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # QuantizationParameters def MinIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 # QuantizationParameters def Max(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # QuantizationParameters def MaxAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) return 0 # QuantizationParameters def MaxLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.VectorLen(o) return 0 # QuantizationParameters def MaxIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) return o == 0 # QuantizationParameters def Scale(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # QuantizationParameters def ScaleAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) return 0 # QuantizationParameters def ScaleLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.VectorLen(o) return 0 # QuantizationParameters def ScaleIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) return o == 0 # QuantizationParameters def ZeroPoint(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) return 0 # QuantizationParameters def ZeroPointAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) return 0 # QuantizationParameters def ZeroPointLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.VectorLen(o) return 0 # QuantizationParameters def ZeroPointIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) return o == 0 # QuantizationParameters def DetailsType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) return 0 # QuantizationParameters def Details(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: from flatbuffers.table import Table obj = Table(bytearray(), 0) self._tab.Union(obj, o) return obj return None # QuantizationParameters def QuantizedDimension(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def QuantizationParametersStart(builder): builder.StartObject(7) def QuantizationParametersAddMin(builder, min): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(min), 0) def QuantizationParametersStartMinVector(builder, numElems): return builder.StartVector(4, numElems, 4) def QuantizationParametersAddMax(builder, max): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(max), 0) def QuantizationParametersStartMaxVector(builder, numElems): return builder.StartVector(4, numElems, 4) def QuantizationParametersAddScale(builder, scale): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0) def QuantizationParametersStartScaleVector(builder, numElems): return builder.StartVector(4, numElems, 4) def QuantizationParametersAddZeroPoint(builder, zeroPoint): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(zeroPoint), 0) def QuantizationParametersStartZeroPointVector(builder, numElems): return builder.StartVector(8, numElems, 8) def QuantizationParametersAddDetailsType(builder, detailsType): builder.PrependUint8Slot(4, detailsType, 0) def QuantizationParametersAddDetails(builder, details): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(details), 0) def QuantizationParametersAddQuantizedDimension(builder, quantizedDimension): builder.PrependInt32Slot(6, quantizedDimension, 0) def QuantizationParametersEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/QuantizeOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class QuantizeOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsQuantizeOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = QuantizeOptions() x.Init(buf, n + offset) return x @classmethod def QuantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # QuantizeOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def QuantizeOptionsStart(builder): builder.StartObject(0) def QuantizeOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/RNNOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class RNNOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsRNNOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = RNNOptions() x.Init(buf, n + offset) return x @classmethod def RNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # RNNOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # RNNOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # RNNOptions def AsymmetricQuantizeInputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def RNNOptionsStart(builder): builder.StartObject(2) def RNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def RNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(1, asymmetricQuantizeInputs, 0) def RNNOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/RangeOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class RangeOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsRangeOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = RangeOptions() x.Init(buf, n + offset) return x @classmethod def RangeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # RangeOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def RangeOptionsStart(builder): builder.StartObject(0) def RangeOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/RankOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class RankOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsRankOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = RankOptions() x.Init(buf, n + offset) return x @classmethod def RankOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # RankOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def RankOptionsStart(builder): builder.StartObject(0) def RankOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ReducerOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ReducerOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsReducerOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ReducerOptions() x.Init(buf, n + offset) return x @classmethod def ReducerOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ReducerOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ReducerOptions def KeepDims(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def ReducerOptionsStart(builder): builder.StartObject(1) def ReducerOptionsAddKeepDims(builder, keepDims): builder.PrependBoolSlot(0, keepDims, 0) def ReducerOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ReshapeOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ReshapeOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsReshapeOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ReshapeOptions() x.Init(buf, n + offset) return x @classmethod def ReshapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ReshapeOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ReshapeOptions def NewShape(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # ReshapeOptions def NewShapeAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # ReshapeOptions def NewShapeLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # ReshapeOptions def NewShapeIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 def ReshapeOptionsStart(builder): builder.StartObject(1) def ReshapeOptionsAddNewShape(builder, newShape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(newShape), 0) def ReshapeOptionsStartNewShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4) def ReshapeOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ResizeBilinearOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ResizeBilinearOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsResizeBilinearOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ResizeBilinearOptions() x.Init(buf, n + offset) return x @classmethod def ResizeBilinearOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ResizeBilinearOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ResizeBilinearOptions def AlignCorners(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # ResizeBilinearOptions def HalfPixelCenters(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def ResizeBilinearOptionsStart(builder): builder.StartObject(4) def ResizeBilinearOptionsAddAlignCorners(builder, alignCorners): builder.PrependBoolSlot(2, alignCorners, 0) def ResizeBilinearOptionsAddHalfPixelCenters(builder, halfPixelCenters): builder.PrependBoolSlot(3, halfPixelCenters, 0) def ResizeBilinearOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ResizeNearestNeighborOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ResizeNearestNeighborOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsResizeNearestNeighborOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ResizeNearestNeighborOptions() x.Init(buf, n + offset) return x @classmethod def ResizeNearestNeighborOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ResizeNearestNeighborOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ResizeNearestNeighborOptions def AlignCorners(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # ResizeNearestNeighborOptions def HalfPixelCenters(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def ResizeNearestNeighborOptionsStart(builder): builder.StartObject(2) def ResizeNearestNeighborOptionsAddAlignCorners(builder, alignCorners): builder.PrependBoolSlot(0, alignCorners, 0) def ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, halfPixelCenters): builder.PrependBoolSlot(1, halfPixelCenters, 0) def ResizeNearestNeighborOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ReverseSequenceOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ReverseSequenceOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsReverseSequenceOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ReverseSequenceOptions() x.Init(buf, n + offset) return x @classmethod def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ReverseSequenceOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ReverseSequenceOptions def SeqDim(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # ReverseSequenceOptions def BatchDim(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def ReverseSequenceOptionsStart(builder): builder.StartObject(2) def ReverseSequenceOptionsAddSeqDim(builder, seqDim): builder.PrependInt32Slot(0, seqDim, 0) def ReverseSequenceOptionsAddBatchDim(builder, batchDim): builder.PrependInt32Slot(1, batchDim, 0) def ReverseSequenceOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ReverseV2Options.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ReverseV2Options(object): __slots__ = ['_tab'] @classmethod def GetRootAsReverseV2Options(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ReverseV2Options() x.Init(buf, n + offset) return x @classmethod def ReverseV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ReverseV2Options def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def ReverseV2OptionsStart(builder): builder.StartObject(0) def ReverseV2OptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SVDFOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SVDFOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSVDFOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SVDFOptions() x.Init(buf, n + offset) return x @classmethod def SVDFOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SVDFOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SVDFOptions def Rank(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # SVDFOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # SVDFOptions def AsymmetricQuantizeInputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def SVDFOptionsStart(builder): builder.StartObject(3) def SVDFOptionsAddRank(builder, rank): builder.PrependInt32Slot(0, rank, 0) def SVDFOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0) def SVDFOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) def SVDFOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ScatterNdOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ScatterNdOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsScatterNdOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ScatterNdOptions() x.Init(buf, n + offset) return x @classmethod def ScatterNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ScatterNdOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def ScatterNdOptionsStart(builder): builder.StartObject(0) def ScatterNdOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SegmentSumOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SegmentSumOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSegmentSumOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SegmentSumOptions() x.Init(buf, n + offset) return x @classmethod def SegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SegmentSumOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def SegmentSumOptionsStart(builder): builder.StartObject(0) def SegmentSumOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SelectOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SelectOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSelectOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SelectOptions() x.Init(buf, n + offset) return x @classmethod def SelectOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SelectOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def SelectOptionsStart(builder): builder.StartObject(0) def SelectOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SelectV2Options.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SelectV2Options(object): __slots__ = ['_tab'] @classmethod def GetRootAsSelectV2Options(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SelectV2Options() x.Init(buf, n + offset) return x @classmethod def SelectV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SelectV2Options def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def SelectV2OptionsStart(builder): builder.StartObject(0) def SelectV2OptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SequenceRNNOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SequenceRNNOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSequenceRNNOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SequenceRNNOptions() x.Init(buf, n + offset) return x @classmethod def SequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SequenceRNNOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SequenceRNNOptions def TimeMajor(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # SequenceRNNOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # SequenceRNNOptions def AsymmetricQuantizeInputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def SequenceRNNOptionsStart(builder): builder.StartObject(3) def SequenceRNNOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(0, timeMajor, 0) def SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0) def SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) def SequenceRNNOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ShapeOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ShapeOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsShapeOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ShapeOptions() x.Init(buf, n + offset) return x @classmethod def ShapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ShapeOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ShapeOptions def OutType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def ShapeOptionsStart(builder): builder.StartObject(1) def ShapeOptionsAddOutType(builder, outType): builder.PrependInt8Slot(0, outType, 0) def ShapeOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SkipGramOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SkipGramOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSkipGramOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SkipGramOptions() x.Init(buf, n + offset) return x @classmethod def SkipGramOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SkipGramOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SkipGramOptions def NgramSize(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # SkipGramOptions def MaxSkipSize(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # SkipGramOptions def IncludeAllNgrams(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def SkipGramOptionsStart(builder): builder.StartObject(3) def SkipGramOptionsAddNgramSize(builder, ngramSize): builder.PrependInt32Slot(0, ngramSize, 0) def SkipGramOptionsAddMaxSkipSize(builder, maxSkipSize): builder.PrependInt32Slot(1, maxSkipSize, 0) def SkipGramOptionsAddIncludeAllNgrams(builder, includeAllNgrams): builder.PrependBoolSlot(2, includeAllNgrams, 0) def SkipGramOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SliceOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SliceOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSliceOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SliceOptions() x.Init(buf, n + offset) return x @classmethod def SliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SliceOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def SliceOptionsStart(builder): builder.StartObject(0) def SliceOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SoftmaxOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SoftmaxOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSoftmaxOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SoftmaxOptions() x.Init(buf, n + offset) return x @classmethod def SoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SoftmaxOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SoftmaxOptions def Beta(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 def SoftmaxOptionsStart(builder): builder.StartObject(1) def SoftmaxOptionsAddBeta(builder, beta): builder.PrependFloat32Slot(0, beta, 0.0) def SoftmaxOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SpaceToBatchNDOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SpaceToBatchNDOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSpaceToBatchNDOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SpaceToBatchNDOptions() x.Init(buf, n + offset) return x @classmethod def SpaceToBatchNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SpaceToBatchNDOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def SpaceToBatchNDOptionsStart(builder): builder.StartObject(0) def SpaceToBatchNDOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SpaceToDepthOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SpaceToDepthOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSpaceToDepthOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SpaceToDepthOptions() x.Init(buf, n + offset) return x @classmethod def SpaceToDepthOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SpaceToDepthOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SpaceToDepthOptions def BlockSize(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def SpaceToDepthOptionsStart(builder): builder.StartObject(1) def SpaceToDepthOptionsAddBlockSize(builder, blockSize): builder.PrependInt32Slot(0, blockSize, 0) def SpaceToDepthOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SparseIndexVector.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class SparseIndexVector(object): NONE = 0 Int32Vector = 1 Uint16Vector = 2 Uint8Vector = 3 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SparseToDenseOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SparseToDenseOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSparseToDenseOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SparseToDenseOptions() x.Init(buf, n + offset) return x @classmethod def SparseToDenseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SparseToDenseOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SparseToDenseOptions def ValidateIndices(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def SparseToDenseOptionsStart(builder): builder.StartObject(1) def SparseToDenseOptionsAddValidateIndices(builder, validateIndices): builder.PrependBoolSlot(0, validateIndices, 0) def SparseToDenseOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SparsityParameters.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SparsityParameters(object): __slots__ = ['_tab'] @classmethod def GetRootAsSparsityParameters(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SparsityParameters() x.Init(buf, n + offset) return x @classmethod def SparsityParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SparsityParameters def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SparsityParameters def TraversalOrder(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # SparsityParameters def TraversalOrderAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # SparsityParameters def TraversalOrderLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # SparsityParameters def TraversalOrderIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 # SparsityParameters def BlockMap(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # SparsityParameters def BlockMapAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # SparsityParameters def BlockMapLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.VectorLen(o) return 0 # SparsityParameters def BlockMapIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) return o == 0 # SparsityParameters def DimMetadata(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: x = self._tab.Vector(o) x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 x = self._tab.Indirect(x) from .DimensionMetadata import DimensionMetadata obj = DimensionMetadata() obj.Init(self._tab.Bytes, x) return obj return None # SparsityParameters def DimMetadataLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.VectorLen(o) return 0 # SparsityParameters def DimMetadataIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) return o == 0 def SparsityParametersStart(builder): builder.StartObject(3) def SparsityParametersAddTraversalOrder(builder, traversalOrder): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(traversalOrder), 0) def SparsityParametersStartTraversalOrderVector(builder, numElems): return builder.StartVector(4, numElems, 4) def SparsityParametersAddBlockMap(builder, blockMap): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blockMap), 0) def SparsityParametersStartBlockMapVector(builder, numElems): return builder.StartVector(4, numElems, 4) def SparsityParametersAddDimMetadata(builder, dimMetadata): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dimMetadata), 0) def SparsityParametersStartDimMetadataVector(builder, numElems): return builder.StartVector(4, numElems, 4) def SparsityParametersEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SplitOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SplitOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSplitOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SplitOptions() x.Init(buf, n + offset) return x @classmethod def SplitOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SplitOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SplitOptions def NumSplits(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def SplitOptionsStart(builder): builder.StartObject(1) def SplitOptionsAddNumSplits(builder, numSplits): builder.PrependInt32Slot(0, numSplits, 0) def SplitOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SplitVOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SplitVOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSplitVOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SplitVOptions() x.Init(buf, n + offset) return x @classmethod def SplitVOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SplitVOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SplitVOptions def NumSplits(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def SplitVOptionsStart(builder): builder.StartObject(1) def SplitVOptionsAddNumSplits(builder, numSplits): builder.PrependInt32Slot(0, numSplits, 0) def SplitVOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SquareOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SquareOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSquareOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SquareOptions() x.Init(buf, n + offset) return x @classmethod def SquareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SquareOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def SquareOptionsStart(builder): builder.StartObject(0) def SquareOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SquaredDifferenceOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SquaredDifferenceOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSquaredDifferenceOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SquaredDifferenceOptions() x.Init(buf, n + offset) return x @classmethod def SquaredDifferenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SquaredDifferenceOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def SquaredDifferenceOptionsStart(builder): builder.StartObject(0) def SquaredDifferenceOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SqueezeOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SqueezeOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSqueezeOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SqueezeOptions() x.Init(buf, n + offset) return x @classmethod def SqueezeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SqueezeOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SqueezeOptions def SqueezeDims(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # SqueezeOptions def SqueezeDimsAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # SqueezeOptions def SqueezeDimsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # SqueezeOptions def SqueezeDimsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 def SqueezeOptionsStart(builder): builder.StartObject(1) def SqueezeOptionsAddSqueezeDims(builder, squeezeDims): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(squeezeDims), 0) def SqueezeOptionsStartSqueezeDimsVector(builder, numElems): return builder.StartVector(4, numElems, 4) def SqueezeOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/StridedSliceOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class StridedSliceOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsStridedSliceOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = StridedSliceOptions() x.Init(buf, n + offset) return x @classmethod def StridedSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # StridedSliceOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # StridedSliceOptions def BeginMask(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # StridedSliceOptions def EndMask(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # StridedSliceOptions def EllipsisMask(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # StridedSliceOptions def NewAxisMask(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # StridedSliceOptions def ShrinkAxisMask(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def StridedSliceOptionsStart(builder): builder.StartObject(5) def StridedSliceOptionsAddBeginMask(builder, beginMask): builder.PrependInt32Slot(0, beginMask, 0) def StridedSliceOptionsAddEndMask(builder, endMask): builder.PrependInt32Slot(1, endMask, 0) def StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask): builder.PrependInt32Slot(2, ellipsisMask, 0) def StridedSliceOptionsAddNewAxisMask(builder, newAxisMask): builder.PrependInt32Slot(3, newAxisMask, 0) def StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask): builder.PrependInt32Slot(4, shrinkAxisMask, 0) def StridedSliceOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SubGraph.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SubGraph(object): __slots__ = ['_tab'] @classmethod def GetRootAsSubGraph(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SubGraph() x.Init(buf, n + offset) return x @classmethod def SubGraphBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SubGraph def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SubGraph def Tensors(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: x = self._tab.Vector(o) x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 x = self._tab.Indirect(x) from .Tensor import Tensor obj = Tensor() obj.Init(self._tab.Bytes, x) return obj return None # SubGraph def TensorsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # SubGraph def TensorsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 # SubGraph def Inputs(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # SubGraph def InputsAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # SubGraph def InputsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.VectorLen(o) return 0 # SubGraph def InputsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) return o == 0 # SubGraph def Outputs(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # SubGraph def OutputsAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # SubGraph def OutputsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.VectorLen(o) return 0 # SubGraph def OutputsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) return o == 0 # SubGraph def Operators(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: x = self._tab.Vector(o) x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 x = self._tab.Indirect(x) from .Operator import Operator obj = Operator() obj.Init(self._tab.Bytes, x) return obj return None # SubGraph def OperatorsLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.VectorLen(o) return 0 # SubGraph def OperatorsIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) return o == 0 # SubGraph def Name(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.String(o + self._tab.Pos) return None def SubGraphStart(builder): builder.StartObject(5) def SubGraphAddTensors(builder, tensors): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0) def SubGraphStartTensorsVector(builder, numElems): return builder.StartVector(4, numElems, 4) def SubGraphAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) def SubGraphStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) def SubGraphAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0) def SubGraphStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) def SubGraphAddOperators(builder, operators): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0) def SubGraphStartOperatorsVector(builder, numElems): return builder.StartVector(4, numElems, 4) def SubGraphAddName(builder, name): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) def SubGraphEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/SubOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class SubOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsSubOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = SubOptions() x.Init(buf, n + offset) return x @classmethod def SubOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # SubOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # SubOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 def SubOptionsStart(builder): builder.StartObject(1) def SubOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def SubOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Tensor.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Tensor(object): __slots__ = ['_tab'] @classmethod def GetRootAsTensor(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Tensor() x.Init(buf, n + offset) return x @classmethod def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Tensor def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Tensor def Shape(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # Tensor def ShapeAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # Tensor def ShapeLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # Tensor def ShapeIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 # Tensor def Type(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # Tensor def Buffer(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) return 0 # Tensor def Name(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return self._tab.String(o + self._tab.Pos) return None # Tensor def Quantization(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: x = self._tab.Indirect(o + self._tab.Pos) from .QuantizationParameters import QuantizationParameters obj = QuantizationParameters() obj.Init(self._tab.Bytes, x) return obj return None # Tensor def IsVariable(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # Tensor def Sparsity(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: x = self._tab.Indirect(o + self._tab.Pos) from .SparsityParameters import SparsityParameters obj = SparsityParameters() obj.Init(self._tab.Bytes, x) return obj return None # Tensor def ShapeSignature(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) return 0 # Tensor def ShapeSignatureAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) return 0 # Tensor def ShapeSignatureLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: return self._tab.VectorLen(o) return 0 # Tensor def ShapeSignatureIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) return o == 0 def TensorStart(builder): builder.StartObject(8) def TensorAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0) def TensorStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4) def TensorAddType(builder, type): builder.PrependInt8Slot(1, type, 0) def TensorAddBuffer(builder, buffer): builder.PrependUint32Slot(2, buffer, 0) def TensorAddName(builder, name): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) def TensorAddQuantization(builder, quantization): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0) def TensorAddIsVariable(builder, isVariable): builder.PrependBoolSlot(5, isVariable, 0) def TensorAddSparsity(builder, sparsity): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(sparsity), 0) def TensorAddShapeSignature(builder, shapeSignature): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(shapeSignature), 0) def TensorStartShapeSignatureVector(builder, numElems): return builder.StartVector(4, numElems, 4) def TensorEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/TensorType.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite class TensorType(object): FLOAT32 = 0 FLOAT16 = 1 INT32 = 2 UINT8 = 3 INT64 = 4 STRING = 5 BOOL = 6 INT16 = 7 COMPLEX64 = 8 INT8 = 9 FLOAT64 = 10 ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/TileOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class TileOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsTileOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = TileOptions() x.Init(buf, n + offset) return x @classmethod def TileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # TileOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def TileOptionsStart(builder): builder.StartObject(0) def TileOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/TopKV2Options.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class TopKV2Options(object): __slots__ = ['_tab'] @classmethod def GetRootAsTopKV2Options(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = TopKV2Options() x.Init(buf, n + offset) return x @classmethod def TopKV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # TopKV2Options def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def TopKV2OptionsStart(builder): builder.StartObject(0) def TopKV2OptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/TransposeConvOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class TransposeConvOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsTransposeConvOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = TransposeConvOptions() x.Init(buf, n + offset) return x @classmethod def TransposeConvOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # TransposeConvOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # TransposeConvOptions def Padding(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # TransposeConvOptions def StrideW(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # TransposeConvOptions def StrideH(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def TransposeConvOptionsStart(builder): builder.StartObject(3) def TransposeConvOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) def TransposeConvOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) def TransposeConvOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) def TransposeConvOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/TransposeOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class TransposeOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsTransposeOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = TransposeOptions() x.Init(buf, n + offset) return x @classmethod def TransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # TransposeOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def TransposeOptionsStart(builder): builder.StartObject(0) def TransposeOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Uint16Vector.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Uint16Vector(object): __slots__ = ['_tab'] @classmethod def GetRootAsUint16Vector(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Uint16Vector() x.Init(buf, n + offset) return x @classmethod def Uint16VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Uint16Vector def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Uint16Vector def Values(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Uint16Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2)) return 0 # Uint16Vector def ValuesAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint16Flags, o) return 0 # Uint16Vector def ValuesLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # Uint16Vector def ValuesIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 def Uint16VectorStart(builder): builder.StartObject(1) def Uint16VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) def Uint16VectorStartValuesVector(builder, numElems): return builder.StartVector(2, numElems, 2) def Uint16VectorEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/Uint8Vector.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class Uint8Vector(object): __slots__ = ['_tab'] @classmethod def GetRootAsUint8Vector(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = Uint8Vector() x.Init(buf, n + offset) return x @classmethod def Uint8VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # Uint8Vector def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # Uint8Vector def Values(self, j): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: a = self._tab.Vector(o) return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) return 0 # Uint8Vector def ValuesAsNumpy(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) return 0 # Uint8Vector def ValuesLength(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.VectorLen(o) return 0 # Uint8Vector def ValuesIsNone(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) return o == 0 def Uint8VectorStart(builder): builder.StartObject(1) def Uint8VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) def Uint8VectorStartValuesVector(builder, numElems): return builder.StartVector(1, numElems, 1) def Uint8VectorEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/UnidirectionalSequenceLSTMOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class UnidirectionalSequenceLSTMOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsUnidirectionalSequenceLSTMOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = UnidirectionalSequenceLSTMOptions() x.Init(buf, n + offset) return x @classmethod def UnidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # UnidirectionalSequenceLSTMOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # UnidirectionalSequenceLSTMOptions def FusedActivationFunction(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # UnidirectionalSequenceLSTMOptions def CellClip(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # UnidirectionalSequenceLSTMOptions def ProjClip(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # UnidirectionalSequenceLSTMOptions def TimeMajor(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # UnidirectionalSequenceLSTMOptions def AsymmetricQuantizeInputs(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def UnidirectionalSequenceLSTMOptionsStart(builder): builder.StartObject(5) def UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) def UnidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0) def UnidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0) def UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(3, timeMajor, 0) def UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0) def UnidirectionalSequenceLSTMOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/UniqueOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class UniqueOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsUniqueOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = UniqueOptions() x.Init(buf, n + offset) return x @classmethod def UniqueOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # UniqueOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # UniqueOptions def IdxOutType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 2 def UniqueOptionsStart(builder): builder.StartObject(1) def UniqueOptionsAddIdxOutType(builder, idxOutType): builder.PrependInt8Slot(0, idxOutType, 2) def UniqueOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/UnpackOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class UnpackOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsUnpackOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = UnpackOptions() x.Init(buf, n + offset) return x @classmethod def UnpackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # UnpackOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # UnpackOptions def Num(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # UnpackOptions def Axis(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def UnpackOptionsStart(builder): builder.StartObject(2) def UnpackOptionsAddNum(builder, num): builder.PrependInt32Slot(0, num, 0) def UnpackOptionsAddAxis(builder, axis): builder.PrependInt32Slot(1, axis, 0) def UnpackOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/WhereOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class WhereOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsWhereOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = WhereOptions() x.Init(buf, n + offset) return x @classmethod def WhereOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # WhereOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def WhereOptionsStart(builder): builder.StartObject(0) def WhereOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/WhileOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class WhileOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsWhileOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = WhileOptions() x.Init(buf, n + offset) return x @classmethod def WhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # WhileOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # WhileOptions def CondSubgraphIndex(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # WhileOptions def BodySubgraphIndex(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def WhileOptionsStart(builder): builder.StartObject(2) def WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex): builder.PrependInt32Slot(0, condSubgraphIndex, 0) def WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): builder.PrependInt32Slot(1, bodySubgraphIndex, 0) def WhileOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/ZerosLikeOptions.py ================================================ # automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ZerosLikeOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsZerosLikeOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ZerosLikeOptions() x.Init(buf, n + offset) return x @classmethod def ZerosLikeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ZerosLikeOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) def ZerosLikeOptionsStart(builder): builder.StartObject(0) def ZerosLikeOptionsEnd(builder): return builder.EndObject() ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/__init__.py ================================================ # Copyright (c) 2017 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .AbsOptions import * from .ActivationFunctionType import * from .AddNOptions import * from .AddOptions import * from .ArgMaxOptions import * from .ArgMinOptions import * from .BatchToSpaceNDOptions import * from .BidirectionalSequenceLSTMOptions import * from .BidirectionalSequenceRNNOptions import * from .Buffer import * from .BuiltinOperator import * from .BuiltinOptions import * from .CallOptions import * from .CastOptions import * from .CombinerType import * from .ConcatEmbeddingsOptions import * from .ConcatenationOptions import * from .Conv2DOptions import * from .CosOptions import * from .CustomOptionsFormat import * from .CustomQuantization import * from .DepthToSpaceOptions import * from .DepthwiseConv2DOptions import * from .DequantizeOptions import * from .DivOptions import * from .EmbeddingLookupSparseOptions import * from .EqualOptions import * from .ExpandDimsOptions import * from .ExpOptions import * from .FakeQuantOptions import * from .FillOptions import * from .FloorDivOptions import * from .FloorModOptions import * from .FullyConnectedOptions import * from .FullyConnectedOptionsWeightsFormat import * from .GatherNdOptions import * from .GatherOptions import * from .GreaterEqualOptions import * from .GreaterOptions import * from .HardSwishOptions import * from .IfOptions import * from .L2NormOptions import * from .LeakyReluOptions import * from .LessEqualOptions import * from .LessOptions import * from .LocalResponseNormalizationOptions import * from .LogicalAndOptions import * from .LogicalNotOptions import * from .LogicalOrOptions import * from .LogSoftmaxOptions import * from .LSHProjectionOptions import * from .LSHProjectionType import * from .LSTMKernelType import * from .LSTMOptions import * from .MatrixDiagOptions import * from .MatrixSetDiagOptions import * from .MaximumMinimumOptions import * from .Metadata import * from .MirrorPadMode import * from .MirrorPadOptions import * from .Model import * from .MulOptions import * from .NegOptions import * from .NonMaxSuppressionV4Options import * from .NonMaxSuppressionV5Options import * from .NotEqualOptions import * from .OneHotOptions import * from .OperatorCode import * from .Operator import * from .PackOptions import * from .Padding import * from .PadOptions import * from .PadV2Options import * from .Pool2DOptions import * from .PowOptions import * from .QuantizationDetails import * from .QuantizationParameters import * from .QuantizeOptions import * from .RangeOptions import * from .RankOptions import * from .ReducerOptions import * from .ReshapeOptions import * from .ResizeBilinearOptions import * from .ResizeNearestNeighborOptions import * from .ReverseSequenceOptions import * from .ReverseV2Options import * from .RNNOptions import * from .ScatterNdOptions import * from .SelectOptions import * from .SequenceRNNOptions import * from .ShapeOptions import * from .SkipGramOptions import * from .SliceOptions import * from .SoftmaxOptions import * from .SpaceToBatchNDOptions import * from .SpaceToDepthOptions import * from .SparseToDenseOptions import * from .SplitOptions import * from .SplitVOptions import * from .SquaredDifferenceOptions import * from .SquareOptions import * from .SqueezeOptions import * from .StridedSliceOptions import * from .SubGraph import * from .SubOptions import * from .SVDFOptions import * from .Tensor import * from .TensorType import * from .TileOptions import * from .TopKV2Options import * from .TransposeConvOptions import * from .TransposeOptions import * from .UnidirectionalSequenceLSTMOptions import * from .UniqueOptions import * from .UnpackOptions import * from .WhereOptions import * from .WhileOptions import * from .ZerosLikeOptions import * from .BatchMatMulOptions import * from .DensifyOptions import * from .SegmentSumOptions import * from .SelectV2Options import * ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/flatbuffers/schema.fbs ================================================ // Copyright 2017 The TensorFlow Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Revision History // Version 0: Initial version. // Version 1: Add subgraphs to schema. // Version 2: Rename operators to conform to NN API. // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers. namespace tflite; // This corresponds to the version. file_identifier "TFL3"; // File extension of any written files. file_extension "tflite"; // IMPORTANT: All new members of tables, enums and unions must be added at the // end to ensure backwards compatibility. // The type of data stored in a tensor. enum TensorType : byte { FLOAT32 = 0, FLOAT16 = 1, INT32 = 2, UINT8 = 3, INT64 = 4, STRING = 5, BOOL = 6, INT16 = 7, COMPLEX64 = 8, INT8 = 9, FLOAT64 = 10, } // Custom quantization parameters for experimenting with new quantization // techniques. table CustomQuantization { custom:[ubyte] (force_align: 16); } // Represents a specific quantization technique's parameters. union QuantizationDetails { CustomQuantization, } // Parameters for converting a quantized tensor back to float. table QuantizationParameters { // These four parameters are the asymmetric linear quantization parameters. // Given a quantized value q, the corresponding float value f should be: // f = scale * (q - zero_point) // For other quantization types, the QuantizationDetails below is used. min:[float]; // For importing back into tensorflow. max:[float]; // For importing back into tensorflow. scale:[float]; // For dequantizing the tensor's values. zero_point:[long]; // If this is not none, the other quantization parameters (i.e. min, max, // scale, zero_point fields above) are ignored and the value of the // QuantizationDetails union should be used. details:QuantizationDetails; // Specifies the dimension of the Tensor's shape that the scales and // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1] // with quantization params: // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1 // will be quantized across the second dimension of t. // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1 // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2 // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3 quantized_dimension:int; } // Sparse tensors. // We use a modification of the TACO format. // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf // // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1), // potentially with a k-dimensional block (0 <= k <= n) with dims // (dn, ..., dn+k-1), the format needs to specify: // 1. In what order to traverse these dimensions. For example, to store a 2-D // matrix in row major order, the traversal order would be (d0, d1), // whereas to store it in column major order, the traversal order would be // (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order // could be (d0, d1, d2, d3). // 2. How each block dimension in (dn, ..., dn+k-1) maps to the original // tensor dimension in (d0, ..., dn-1). // 3. In the traversal order defined above, the format (dense vs. sparse) and // index metadata for each dimension. For a dense dimension, this is just // the size of that dimension. For a sparse dimension, it's the same as // the compressed index defined in the Compressed Sparse Row (CSR) format. // (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html) // The storage type for a dimension. Currently we support: // 1. DENSE: each coordinate in this dimension is stored implicitly. // 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The // compression technique is the same what CSR uses. // More types like a sparse dimension with a different compression technique // could be added to the list in the future. enum DimensionType : byte { DENSE = 0, SPARSE_CSR = 1, } table Int32Vector { values:[int]; } table Uint16Vector { values:[ushort] (force_align: 4); } table Uint8Vector { values:[ubyte] (force_align: 4); } // Variable-typed buffer to store the index metadata for a sparse dimension. // The widest type is Int32 instead of UInt32 because tensor's shape is a int32 // vector. We don't want the per-dimensional index to overflow that range. union SparseIndexVector { Int32Vector, Uint16Vector, Uint8Vector } table DimensionMetadata { // Whether a dimension is dense or sparse. format:DimensionType; // Index metadata used for a dimension. // - If format is DimensionType.DENSE then we use the dense_size field to // store the size of that dimension. Each index in that dimension is // stored implicitly. // - If format is DimensionType.SPARSE_CSR then we use array_segments and // array_indices to encode that dimension. array_segments represents how // to segment the indices array, each segment corresponds to one element // in the previous dimension. array_indices represents the index of the // non-zero elements within this dimension (as those in the CSR matrix // format, where the first array is row pointers and the second array is // column indices). dense_size:int; array_segments:SparseIndexVector; array_indices:SparseIndexVector; } // Parameters to encode a sparse TfLite tensor. table SparsityParameters { // The traversal order of the dimensions defined in the `shape` field of the // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1, // ..., dn-1), // - if not block sparse, the traversal_order is just a permutation of (d0, // ..., dn-1). For example, a 2-D matrix stored in row-major order would // have traversal_order = (d0, d1). // - if block sparse with a k-dimensional block (0 <= k <= n), the // traversal_order has n + k elements. The first n elements are still a // permutation of (d0, ..., dn-1). The lask k elements are a permutation // of (dn, ..., dn+k-1), defining how to traverse a block internally. For // example, a 2-D matrix with 2-D blocks, both stored in row-major order // would have traversal_order = (d0, d1, d2, d3). traversal_order:[int]; // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n), // stores how a block dimension in (dn, ..., dn+k-1) maps to the original // tensor dimension in (d0, ..., dn). // It's stored in the order of (dn, ..., dn+k-1). // If not block-sparse, this field is NULL. block_map:[int]; // In the traversal order defined above, the metadata needed for // each dimension to locate the non-zero values in the original dense tensor. // The size of the dim_metadata array = the size of the traversal_order array // = n + k. dim_metadata:[DimensionMetadata]; } table Tensor { // The tensor shape. The meaning of each entry is operator-specific but // builtin ops use: [batch size, height, width, number of channels] (That's // Tensorflow's NHWC). shape:[int]; type:TensorType; // An index that refers to the buffers table at the root of the model. Or, // if there is no data buffer associated (i.e. intermediate results), then // this is 0 (which refers to an always existent empty buffer). // // The data_buffer itself is an opaque container, with the assumption that the // target device is little-endian. In addition, all builtin operators assume // the memory is ordered such that if `shape` is [4, 3, 2], then index // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k]. buffer:uint; name:string; // For debugging and importing back into tensorflow. quantization:QuantizationParameters; // Optional. is_variable:bool = false; // Parameters to encode a sparse tensor. See the example in // tensorflow/lite/testdata/sparse_tensor.json. sparsity:SparsityParameters; // Optional. // Encodes `shape` with unknown dimensions. Unknown dimensions are // represented with -1. shape_signature:[int]; // Optional. } // A list of builtin operators. Builtin operators are slightly faster than custom // ones, but not by much. Moreover, while custom operators accept an opaque // object containing configuration parameters, builtins have a predetermined // set of acceptable options. enum BuiltinOperator : byte { ADD = 0, AVERAGE_POOL_2D = 1, CONCATENATION = 2, CONV_2D = 3, DEPTHWISE_CONV_2D = 4, DEPTH_TO_SPACE = 5, DEQUANTIZE = 6, EMBEDDING_LOOKUP = 7, FLOOR = 8, FULLY_CONNECTED = 9, HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20, RELU6 = 21, RESHAPE = 22, RESIZE_BILINEAR = 23, RNN = 24, SOFTMAX = 25, SPACE_TO_DEPTH = 26, SVDF = 27, TANH = 28, // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS CONCAT_EMBEDDINGS = 29, SKIP_GRAM = 30, CALL = 31, CUSTOM = 32, EMBEDDING_LOOKUP_SPARSE = 33, PAD = 34, UNIDIRECTIONAL_SEQUENCE_RNN = 35, GATHER = 36, BATCH_TO_SPACE_ND = 37, SPACE_TO_BATCH_ND = 38, TRANSPOSE = 39, MEAN = 40, SUB = 41, DIV = 42, SQUEEZE = 43, UNIDIRECTIONAL_SEQUENCE_LSTM = 44, STRIDED_SLICE = 45, BIDIRECTIONAL_SEQUENCE_RNN = 46, EXP = 47, TOPK_V2 = 48, SPLIT = 49, LOG_SOFTMAX = 50, // DELEGATE is a special op type for the operations which are delegated to // other backends. // WARNING: Experimental interface, subject to change DELEGATE = 51, BIDIRECTIONAL_SEQUENCE_LSTM = 52, CAST = 53, PRELU = 54, MAXIMUM = 55, ARG_MAX = 56, MINIMUM = 57, LESS = 58, NEG = 59, PADV2 = 60, GREATER = 61, GREATER_EQUAL = 62, LESS_EQUAL = 63, SELECT = 64, SLICE = 65, SIN = 66, TRANSPOSE_CONV = 67, SPARSE_TO_DENSE = 68, TILE = 69, EXPAND_DIMS = 70, EQUAL = 71, NOT_EQUAL = 72, LOG = 73, SUM = 74, SQRT = 75, RSQRT = 76, SHAPE = 77, POW = 78, ARG_MIN = 79, FAKE_QUANT = 80, REDUCE_PROD = 81, REDUCE_MAX = 82, PACK = 83, LOGICAL_OR = 84, ONE_HOT = 85, LOGICAL_AND = 86, LOGICAL_NOT = 87, UNPACK = 88, REDUCE_MIN = 89, FLOOR_DIV = 90, REDUCE_ANY = 91, SQUARE = 92, ZEROS_LIKE = 93, FILL = 94, FLOOR_MOD = 95, RANGE = 96, RESIZE_NEAREST_NEIGHBOR = 97, LEAKY_RELU = 98, SQUARED_DIFFERENCE = 99, MIRROR_PAD = 100, ABS = 101, SPLIT_V = 102, UNIQUE = 103, CEIL = 104, REVERSE_V2 = 105, ADD_N = 106, GATHER_ND = 107, COS = 108, WHERE = 109, RANK = 110, ELU = 111, REVERSE_SEQUENCE = 112, MATRIX_DIAG = 113, QUANTIZE = 114, MATRIX_SET_DIAG = 115, ROUND = 116, HARD_SWISH = 117, IF = 118, WHILE = 119, NON_MAX_SUPPRESSION_V4 = 120, NON_MAX_SUPPRESSION_V5 = 121, SCATTER_ND = 122, SELECT_V2 = 123, DENSIFY = 124, SEGMENT_SUM = 125, BATCH_MATMUL = 126 } // Options for the builtin operators. union BuiltinOptions { Conv2DOptions, DepthwiseConv2DOptions, ConcatEmbeddingsOptions, LSHProjectionOptions, Pool2DOptions, SVDFOptions, RNNOptions, FullyConnectedOptions, SoftmaxOptions, ConcatenationOptions, AddOptions, L2NormOptions, LocalResponseNormalizationOptions, LSTMOptions, ResizeBilinearOptions, CallOptions, ReshapeOptions, SkipGramOptions, SpaceToDepthOptions, EmbeddingLookupSparseOptions, MulOptions, PadOptions, GatherOptions, BatchToSpaceNDOptions, SpaceToBatchNDOptions, TransposeOptions, ReducerOptions, SubOptions, DivOptions, SqueezeOptions, SequenceRNNOptions, StridedSliceOptions, ExpOptions, TopKV2Options, SplitOptions, LogSoftmaxOptions, CastOptions, DequantizeOptions, MaximumMinimumOptions, ArgMaxOptions, LessOptions, NegOptions, PadV2Options, GreaterOptions, GreaterEqualOptions, LessEqualOptions, SelectOptions, SliceOptions, TransposeConvOptions, SparseToDenseOptions, TileOptions, ExpandDimsOptions, EqualOptions, NotEqualOptions, ShapeOptions, PowOptions, ArgMinOptions, FakeQuantOptions, PackOptions, LogicalOrOptions, OneHotOptions, LogicalAndOptions, LogicalNotOptions, UnpackOptions, FloorDivOptions, SquareOptions, ZerosLikeOptions, FillOptions, BidirectionalSequenceLSTMOptions, BidirectionalSequenceRNNOptions, UnidirectionalSequenceLSTMOptions, FloorModOptions, RangeOptions, ResizeNearestNeighborOptions, LeakyReluOptions, SquaredDifferenceOptions, MirrorPadOptions, AbsOptions, SplitVOptions, UniqueOptions, ReverseV2Options, AddNOptions, GatherNdOptions, CosOptions, WhereOptions, RankOptions, ReverseSequenceOptions, MatrixDiagOptions, QuantizeOptions, MatrixSetDiagOptions, HardSwishOptions, IfOptions, WhileOptions, DepthToSpaceOptions, NonMaxSuppressionV4Options, NonMaxSuppressionV5Options, ScatterNdOptions, SelectV2Options, DensifyOptions, SegmentSumOptions, BatchMatMulOptions } enum Padding : byte { SAME, VALID } enum ActivationFunctionType : byte { NONE = 0, RELU = 1, RELU_N1_TO_1 = 2, RELU6 = 3, TANH = 4, SIGN_BIT = 5, } table Conv2DOptions { padding:Padding; stride_w:int; stride_h:int; fused_activation_function:ActivationFunctionType; dilation_w_factor:int = 1; dilation_h_factor:int = 1; } table Pool2DOptions { padding:Padding; stride_w:int; stride_h:int; filter_width:int; filter_height:int; fused_activation_function:ActivationFunctionType; } table DepthwiseConv2DOptions { // Parameters for DepthwiseConv version 1 or above. padding:Padding; stride_w:int; stride_h:int; // `depth_multiplier` is redundant. It's used by CPU kernels in // TensorFlow 2.0 or below, but ignored in versions above. // See comments in lite/c/builtin_op_data.h for more details. depth_multiplier:int; fused_activation_function:ActivationFunctionType; // Parameters for DepthwiseConv version 2 or above. dilation_w_factor:int = 1; dilation_h_factor:int = 1; } table ConcatEmbeddingsOptions { num_channels:int; num_columns_per_channel:[int]; embedding_dim_per_channel:[int]; // This could be inferred from parameters. } enum LSHProjectionType: byte { UNKNOWN = 0, SPARSE = 1, DENSE = 2, } table LSHProjectionOptions { type: LSHProjectionType; } table SVDFOptions { rank:int; fused_activation_function:ActivationFunctionType; // For weights-only quantization, use asymmetric quantization for non // constant inputs at evaluation time. asymmetric_quantize_inputs:bool; } // An implementation of TensorFlow RNNCell. table RNNOptions { fused_activation_function:ActivationFunctionType; asymmetric_quantize_inputs:bool; } // An implementation of TensorFlow dynamic_rnn with RNNCell. table SequenceRNNOptions { time_major:bool; fused_activation_function:ActivationFunctionType; asymmetric_quantize_inputs:bool; } // An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell. table BidirectionalSequenceRNNOptions { time_major:bool; fused_activation_function:ActivationFunctionType; merge_outputs: bool; asymmetric_quantize_inputs:bool; } enum FullyConnectedOptionsWeightsFormat: byte { DEFAULT = 0, SHUFFLED4x16INT8 = 1, } // An implementation of TensorFlow fully_connected (a.k.a Dense) layer. table FullyConnectedOptions { // Parameters for FullyConnected version 1 or above. fused_activation_function:ActivationFunctionType; // Parameters for FullyConnected version 2 or above. weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT; // Parameters for FullyConnected version 5 or above. // If set to true, then the number of dimension is preserved. Furthermore, // all but the last dimension of the input and output shapes will be equal. keep_num_dims: bool; // Parameters for FullyConnected version 7 or above. // If set to true, then weights-only op will use asymmetric quantization for // inputs. asymmetric_quantize_inputs: bool; } table SoftmaxOptions { beta: float; } // An implementation of TensorFlow concat. table ConcatenationOptions { axis:int; fused_activation_function:ActivationFunctionType; } table AddOptions { fused_activation_function:ActivationFunctionType; } table MulOptions { fused_activation_function:ActivationFunctionType; } table L2NormOptions { fused_activation_function:ActivationFunctionType; } table LocalResponseNormalizationOptions { radius:int; bias:float; alpha:float; beta:float; } enum LSTMKernelType : byte { // Full LSTM kernel which supports peephole and projection. FULL = 0, // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell. BASIC = 1, } // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell table LSTMOptions { // Parameters for LSTM version 1 or above. fused_activation_function:ActivationFunctionType; cell_clip: float; // Optional, 0.0 means no clipping proj_clip: float; // Optional, 0.0 means no clipping // Parameters for LSTM version 2 or above. // Basic kernel is only supported in version 2 or above. kernel_type: LSTMKernelType = FULL; // Parameters for LSTM version 4 or above. asymmetric_quantize_inputs: bool; } // An implementation of TensorFlow dynamic_rnn with LSTMCell. table UnidirectionalSequenceLSTMOptions { fused_activation_function:ActivationFunctionType; cell_clip: float; // Optional, 0.0 means no clipping proj_clip: float; // Optional, 0.0 means no clipping // If true then first dimension is sequence, otherwise batch. time_major:bool; // Parameter for Unidirectional Sequence LSTM version 4. asymmetric_quantize_inputs:bool; } table BidirectionalSequenceLSTMOptions { // Parameters supported by version 1: fused_activation_function:ActivationFunctionType; cell_clip: float; // Optional, 0.0 means no clipping proj_clip: float; // Optional, 0.0 means no clipping // If true, store the outputs of both directions into the first output. merge_outputs: bool; // Parameters supported by version 2: // If true then first dimension is sequence, otherwise batch. // Version 1 implementations assumed time_major to be true, so this default // value should never change. time_major: bool = true; // Parameters for version 3 or above. asymmetric_quantize_inputs:bool; } table ResizeBilinearOptions { new_height: int (deprecated); new_width: int (deprecated); align_corners: bool; half_pixel_centers: bool; } table ResizeNearestNeighborOptions { align_corners: bool; half_pixel_centers: bool; } // A call operation options table CallOptions { // The subgraph index that needs to be called. subgraph:uint; } table PadOptions { } table PadV2Options { } table ReshapeOptions { new_shape:[int]; } table SpaceToBatchNDOptions { } table BatchToSpaceNDOptions { } table SkipGramOptions { ngram_size: int; max_skip_size: int; include_all_ngrams: bool; } table SpaceToDepthOptions { block_size: int; } table DepthToSpaceOptions { block_size: int; } table SubOptions { fused_activation_function:ActivationFunctionType; } table DivOptions { fused_activation_function:ActivationFunctionType; } table TopKV2Options { } enum CombinerType : byte { SUM = 0, MEAN = 1, SQRTN = 2, } table EmbeddingLookupSparseOptions { combiner:CombinerType; } table GatherOptions { axis: int; } table TransposeOptions { } table ExpOptions { } table CosOptions { } table ReducerOptions { keep_dims: bool; } table SqueezeOptions { squeeze_dims:[int]; } table SplitOptions { num_splits: int; } table SplitVOptions { num_splits: int; } table StridedSliceOptions { begin_mask: int; end_mask: int; ellipsis_mask: int; new_axis_mask: int; shrink_axis_mask: int; } table LogSoftmaxOptions { } table CastOptions { in_data_type: TensorType; out_data_type: TensorType; } table DequantizeOptions { } table MaximumMinimumOptions { } table TileOptions { } table ArgMaxOptions { output_type : TensorType; } table ArgMinOptions { output_type : TensorType; } table GreaterOptions { } table GreaterEqualOptions { } table LessOptions { } table LessEqualOptions { } table NegOptions { } table SelectOptions { } table SliceOptions { } table TransposeConvOptions { padding:Padding; stride_w:int; stride_h:int; } table ExpandDimsOptions { } table SparseToDenseOptions { validate_indices:bool; } table EqualOptions { } table NotEqualOptions { } table ShapeOptions { // Optional output type of the operation (int32 or int64). Defaults to int32. out_type : TensorType; } table RankOptions { } table PowOptions { } table FakeQuantOptions { // Parameters supported by version 1: min:float; max:float; num_bits:int; // Parameters supported by version 2: narrow_range:bool; } table PackOptions { values_count:int; axis:int; } table LogicalOrOptions { } table OneHotOptions { axis:int; } table AbsOptions { } table HardSwishOptions { } table LogicalAndOptions { } table LogicalNotOptions { } table UnpackOptions { num:int; axis:int; } table FloorDivOptions { } table SquareOptions { } table ZerosLikeOptions { } table FillOptions { } table FloorModOptions { } table RangeOptions { } table LeakyReluOptions { alpha:float; } table SquaredDifferenceOptions { } enum MirrorPadMode : byte { // Doesn't include borders. REFLECT = 0, // Includes borders. SYMMETRIC = 1, } table MirrorPadOptions { mode:MirrorPadMode; } table UniqueOptions { idx_out_type:TensorType = INT32; } table ReverseV2Options { } table AddNOptions { } table GatherNdOptions { } table WhereOptions { } table ReverseSequenceOptions { seq_dim:int; batch_dim:int = 0; } table MatrixDiagOptions { } table QuantizeOptions { } table MatrixSetDiagOptions { } table IfOptions { then_subgraph_index:int; else_subgraph_index:int; } table WhileOptions { cond_subgraph_index:int; body_subgraph_index:int; } table NonMaxSuppressionV4Options { } table NonMaxSuppressionV5Options { } table ScatterNdOptions { } table SelectV2Options { } table DensifyOptions { } table SegmentSumOptions { } table BatchMatMulOptions { adj_x:bool; adj_y:bool; } // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a // builtin, or a string if the operator is custom. table OperatorCode { builtin_code:BuiltinOperator; custom_code:string; // The version of the operator. The version need to be bumped whenever new // parameters are introduced into an op. version:int = 1; } enum CustomOptionsFormat : byte { FLEXBUFFERS = 0, } // An operator takes tensors as inputs and outputs. The type of operation being // performed is determined by an index into the list of valid OperatorCodes, // while the specifics of each operations is configured using builtin_options // or custom_options. table Operator { // Index into the operator_codes array. Using an integer here avoids // complicate map lookups. opcode_index:uint; // Optional input are indicated by -1. inputs:[int]; outputs:[int]; builtin_options:BuiltinOptions; custom_options:[ubyte]; custom_options_format:CustomOptionsFormat; // A list of booleans indicating the input tensors which are being mutated by // this operator.(e.g. used by RNN and LSTM). // For example, if the "inputs" array refers to 5 tensors and the second and // fifth are mutable variables, then this list will contain // [false, true, false, false, true]. // // If the list is empty, no variable is mutated in this operator. // The list either has the same length as `inputs`, or is empty. mutating_variable_inputs:[bool]; // A list of indices to the subgraph's "tensors" that are internal to an Op. // Internal tensors are those that do not flow in or out of the operation, // but instead are part of internal computation. As such, the operation's // implementation may manage its memory more efficiently. They are needed // however (i.e. not just an implementation detail) since they are part of the // computation, which may require relevant metadata such as quantization // parameters. intermediates:[int]; } // The root type, defining a subgraph, which typically represents an entire // model. table SubGraph { // A list of all tensors used in this subgraph. tensors:[Tensor]; // Indices of the tensors that are inputs into this subgraph. Note this is // the list of non-static tensors that feed into the subgraph for inference. inputs:[int]; // Indices of the tensors that are outputs out of this subgraph. Note this is // the list of output tensors that are considered the product of the // subgraph's inference. outputs:[int]; // All operators, in execution order. operators:[Operator]; // Name of this subgraph (used for debugging). name:string; } // Table of raw data buffers (used for constant tensors). Referenced by tensors // by index. The generous alignment accommodates mmap-friendly data structures. table Buffer { data:[ubyte] (force_align: 16); } table Metadata { // A human readable string to uniquely identify a Metadata. name:string; // An index to the buffers table. buffer:uint; } table Model { // Version of the schema. version:uint; // A list of all operator codes used in this model. This is // kept in order because operators carry an index into this // vector. operator_codes:[OperatorCode]; // All the subgraphs of the model. The 0th is assumed to be the main // model. subgraphs:[SubGraph]; // A description of the model. description:string; // Buffers of the model. // Note the 0th entry of this array must be an empty buffer (sentinel). // This is a convention so that tensors without a buffer can provide 0 as // their buffer. buffers:[Buffer]; // Metadata about the model. Indirects into the existings buffers list. // Deprecated, prefer to use metadata field. metadata_buffer:[int]; // Metadata about the model. metadata:[Metadata]; } root_type Model; ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/helpers.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import flatbuffers as fb import numpy as np import sys import re # See: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/schema/schema.fbs OUTPUT_FILE_IDENTIFIER = "TFL3" OUTPUT_SCHEMA_VERSION = 3 BuiltinOptionsClasses = [ None, fb.Conv2DOptions, fb.DepthwiseConv2DOptions, fb.ConcatEmbeddingsOptions, fb.LSHProjectionOptions, fb.Pool2DOptions, fb.SVDFOptions, fb.RNNOptions, fb.FullyConnectedOptions, fb.SoftmaxOptions, fb.ConcatenationOptions, fb.AddOptions, fb.L2NormOptions, fb.LocalResponseNormalizationOptions, fb.LSTMOptions, fb.ResizeBilinearOptions, fb.CallOptions, fb.ReshapeOptions, fb.SkipGramOptions, fb.SpaceToDepthOptions, fb.EmbeddingLookupSparseOptions, fb.MulOptions, fb.PadOptions, fb.GatherOptions, fb.BatchToSpaceNDOptions, fb.SpaceToBatchNDOptions, fb.TransposeOptions, fb.ReducerOptions, fb.SubOptions, fb.DivOptions, fb.SqueezeOptions, fb.SequenceRNNOptions, fb.StridedSliceOptions, fb.ExpOptions, fb.TopKV2Options, fb.SplitOptions, fb.LogSoftmaxOptions, fb.CastOptions, fb.DequantizeOptions, fb.MaximumMinimumOptions, fb.ArgMaxOptions, fb.LessOptions, fb.NegOptions, fb.PadV2Options, fb.GreaterOptions, fb.GreaterEqualOptions, fb.LessEqualOptions, fb.SelectOptions, fb.SliceOptions, fb.TransposeConvOptions, fb.SparseToDenseOptions, fb.TileOptions, fb.ExpandDimsOptions, fb.EqualOptions, fb.NotEqualOptions, fb.ShapeOptions, fb.PowOptions, fb.ArgMinOptions, fb.FakeQuantOptions, fb.PackOptions, fb.LogicalOrOptions, fb.OneHotOptions, fb.LogicalAndOptions, fb.LogicalNotOptions, fb.UnpackOptions, fb.FloorDivOptions, fb.SquareOptions, fb.ZerosLikeOptions, fb.FillOptions, fb.BidirectionalSequenceLSTMOptions, fb.BidirectionalSequenceRNNOptions, fb.UnidirectionalSequenceLSTMOptions, fb.FloorModOptions, fb.RangeOptions, fb.ResizeNearestNeighborOptions, fb.LeakyReluOptions, fb.SquaredDifferenceOptions, fb.MirrorPadOptions, fb.AbsOptions, fb.SplitVOptions, fb.UniqueOptions, fb.ReverseV2Options, fb.AddNOptions, fb.GatherNdOptions, fb.CosOptions, fb.WhereOptions, fb.RankOptions, fb.ReverseSequenceOptions, fb.MatrixDiagOptions, fb.QuantizeOptions, fb.MatrixSetDiagOptions, fb.HardSwishOptions, fb.IfOptions, fb.WhileOptions, fb.DepthToSpaceOptions, fb.NonMaxSuppressionV4Options, fb.NonMaxSuppressionV5Options, fb.ScatterNdOptions, fb.SelectV2Options, fb.DensifyOptions, fb.SegmentSumOptions, fb.BatchMatMulOptions, ] BuiltinOptionsByOperator = { fb.BuiltinOperator.ADD: fb.BuiltinOptions.AddOptions, fb.BuiltinOperator.AVERAGE_POOL_2D: fb.BuiltinOptions.Pool2DOptions, fb.BuiltinOperator.CONCATENATION: fb.BuiltinOptions.ConcatenationOptions, fb.BuiltinOperator.CONV_2D: fb.BuiltinOptions.Conv2DOptions, fb.BuiltinOperator.DEPTHWISE_CONV_2D: fb.BuiltinOptions.DepthwiseConv2DOptions, fb.BuiltinOperator.DEQUANTIZE: fb.BuiltinOptions.DequantizeOptions, fb.BuiltinOperator.EMBEDDING_LOOKUP: fb.BuiltinOptions.NONE, fb.BuiltinOperator.FLOOR: fb.BuiltinOptions.NONE, fb.BuiltinOperator.FULLY_CONNECTED: fb.BuiltinOptions.FullyConnectedOptions, fb.BuiltinOperator.HASHTABLE_LOOKUP: fb.BuiltinOptions.NONE, fb.BuiltinOperator.L2_NORMALIZATION: fb.BuiltinOptions.L2NormOptions, fb.BuiltinOperator.L2_POOL_2D: fb.BuiltinOptions.Pool2DOptions, fb.BuiltinOperator.LOCAL_RESPONSE_NORMALIZATION: fb.BuiltinOptions.LocalResponseNormalizationOptions, fb.BuiltinOperator.LOGISTIC: fb.BuiltinOptions.NONE, fb.BuiltinOperator.LSH_PROJECTION: fb.BuiltinOptions.NONE, fb.BuiltinOperator.LSTM: fb.BuiltinOptions.LSTMOptions, fb.BuiltinOperator.MAX_POOL_2D: fb.BuiltinOptions.Pool2DOptions, fb.BuiltinOperator.MUL: fb.BuiltinOptions.MulOptions, fb.BuiltinOperator.RELU: fb.BuiltinOptions.NONE, fb.BuiltinOperator.RELU_N1_TO_1: fb.BuiltinOptions.NONE, fb.BuiltinOperator.RELU6: fb.BuiltinOptions.NONE, fb.BuiltinOperator.RESHAPE: fb.BuiltinOptions.ReshapeOptions, fb.BuiltinOperator.RESIZE_BILINEAR: fb.BuiltinOptions.ResizeBilinearOptions, fb.BuiltinOperator.RNN: fb.BuiltinOptions.RNNOptions, fb.BuiltinOperator.SOFTMAX: fb.BuiltinOptions.SoftmaxOptions, fb.BuiltinOperator.SPACE_TO_DEPTH: fb.BuiltinOptions.SpaceToDepthOptions, fb.BuiltinOperator.SVDF: fb.BuiltinOptions.SVDFOptions, fb.BuiltinOperator.TANH: fb.BuiltinOptions.NONE, fb.BuiltinOperator.CONCAT_EMBEDDINGS: fb.BuiltinOptions.ConcatEmbeddingsOptions, fb.BuiltinOperator.SKIP_GRAM: fb.BuiltinOptions.SkipGramOptions, fb.BuiltinOperator.CALL: fb.BuiltinOptions.CallOptions, fb.BuiltinOperator.CUSTOM: fb.BuiltinOptions.NONE, fb.BuiltinOperator.EMBEDDING_LOOKUP_SPARSE: fb.BuiltinOptions.EmbeddingLookupSparseOptions, fb.BuiltinOperator.PAD: fb.BuiltinOptions.PadOptions, fb.BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_RNN: fb.BuiltinOptions.NONE, fb.BuiltinOperator.GATHER: fb.BuiltinOptions.GatherOptions, fb.BuiltinOperator.BATCH_TO_SPACE_ND: fb.BuiltinOptions.BatchToSpaceNDOptions, fb.BuiltinOperator.SPACE_TO_BATCH_ND: fb.BuiltinOptions.SpaceToBatchNDOptions, fb.BuiltinOperator.TRANSPOSE: fb.BuiltinOptions.TransposeOptions, fb.BuiltinOperator.MEAN: fb.BuiltinOptions.ReducerOptions, fb.BuiltinOperator.SUB: fb.BuiltinOptions.SubOptions, fb.BuiltinOperator.DIV: fb.BuiltinOptions.DivOptions, fb.BuiltinOperator.SQUEEZE: fb.BuiltinOptions.SqueezeOptions, fb.BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_LSTM: fb.BuiltinOptions.UnidirectionalSequenceLSTMOptions, fb.BuiltinOperator.STRIDED_SLICE: fb.BuiltinOptions.StridedSliceOptions, fb.BuiltinOperator.BIDIRECTIONAL_SEQUENCE_RNN: fb.BuiltinOptions.BidirectionalSequenceRNNOptions, fb.BuiltinOperator.EXP: fb.BuiltinOptions.ExpOptions, fb.BuiltinOperator.TOPK_V2: fb.BuiltinOptions.TopKV2Options, fb.BuiltinOperator.SPLIT: fb.BuiltinOptions.SplitOptions, fb.BuiltinOperator.LOG_SOFTMAX: fb.BuiltinOptions.LogSoftmaxOptions, fb.BuiltinOperator.DELEGATE: fb.BuiltinOptions.NONE, fb.BuiltinOperator.BIDIRECTIONAL_SEQUENCE_LSTM: fb.BuiltinOptions.BidirectionalSequenceLSTMOptions, fb.BuiltinOperator.CAST: fb.BuiltinOptions.CastOptions, fb.BuiltinOperator.PRELU: fb.BuiltinOptions.NONE, fb.BuiltinOperator.MAXIMUM: fb.BuiltinOptions.MaximumMinimumOptions, fb.BuiltinOperator.ARG_MAX: fb.BuiltinOptions.ArgMaxOptions, fb.BuiltinOperator.MINIMUM: fb.BuiltinOptions.MaximumMinimumOptions, fb.BuiltinOperator.LESS: fb.BuiltinOptions.LessOptions, fb.BuiltinOperator.NEG: fb.BuiltinOptions.NegOptions, fb.BuiltinOperator.PADV2: fb.BuiltinOptions.PadV2Options, fb.BuiltinOperator.GREATER: fb.BuiltinOptions.GreaterOptions, fb.BuiltinOperator.GREATER_EQUAL: fb.BuiltinOptions.GreaterEqualOptions, fb.BuiltinOperator.LESS_EQUAL: fb.BuiltinOptions.LessEqualOptions, fb.BuiltinOperator.SELECT: fb.BuiltinOptions.SelectOptions, fb.BuiltinOperator.SLICE: fb.BuiltinOptions.SliceOptions, fb.BuiltinOperator.SIN: fb.BuiltinOptions.NONE, fb.BuiltinOperator.TRANSPOSE_CONV: fb.BuiltinOptions.TransposeConvOptions, fb.BuiltinOperator.SPARSE_TO_DENSE: fb.BuiltinOptions.SparseToDenseOptions, fb.BuiltinOperator.TILE: fb.BuiltinOptions.TileOptions, fb.BuiltinOperator.EXPAND_DIMS: fb.BuiltinOptions.ExpandDimsOptions, fb.BuiltinOperator.EQUAL: fb.BuiltinOptions.EqualOptions, fb.BuiltinOperator.NOT_EQUAL: fb.BuiltinOptions.NotEqualOptions, fb.BuiltinOperator.LOG: fb.BuiltinOptions.NONE, fb.BuiltinOperator.SUM: fb.BuiltinOptions.ReducerOptions, fb.BuiltinOperator.SQRT: fb.BuiltinOptions.NONE, fb.BuiltinOperator.RSQRT: fb.BuiltinOptions.NONE, fb.BuiltinOperator.SHAPE: fb.BuiltinOptions.ShapeOptions, fb.BuiltinOperator.POW: fb.BuiltinOptions.PowOptions, fb.BuiltinOperator.ARG_MIN: fb.BuiltinOptions.ArgMinOptions, fb.BuiltinOperator.FAKE_QUANT: fb.BuiltinOptions.FakeQuantOptions, fb.BuiltinOperator.REDUCE_PROD: fb.BuiltinOptions.ReducerOptions, fb.BuiltinOperator.REDUCE_MAX: fb.BuiltinOptions.ReducerOptions, fb.BuiltinOperator.PACK: fb.BuiltinOptions.PackOptions, fb.BuiltinOperator.LOGICAL_OR: fb.BuiltinOptions.LogicalOrOptions, fb.BuiltinOperator.ONE_HOT: fb.BuiltinOptions.OneHotOptions, fb.BuiltinOperator.LOGICAL_AND: fb.BuiltinOptions.LogicalAndOptions, fb.BuiltinOperator.LOGICAL_NOT: fb.BuiltinOptions.LogicalNotOptions, fb.BuiltinOperator.UNPACK: fb.BuiltinOptions.UnpackOptions, fb.BuiltinOperator.REDUCE_MIN: fb.BuiltinOptions.ReducerOptions, fb.BuiltinOperator.FLOOR_DIV: fb.BuiltinOptions.FloorDivOptions, fb.BuiltinOperator.REDUCE_ANY: fb.BuiltinOptions.ReducerOptions, fb.BuiltinOperator.SQUARE: fb.BuiltinOptions.SquareOptions, fb.BuiltinOperator.ZEROS_LIKE: fb.BuiltinOptions.ZerosLikeOptions, fb.BuiltinOperator.FILL: fb.BuiltinOptions.FillOptions, fb.BuiltinOperator.FLOOR_MOD: fb.BuiltinOptions.FloorModOptions, fb.BuiltinOperator.RANGE: fb.BuiltinOptions.RangeOptions, fb.BuiltinOperator.RESIZE_NEAREST_NEIGHBOR: fb.BuiltinOptions.ResizeNearestNeighborOptions, fb.BuiltinOperator.LEAKY_RELU: fb.BuiltinOptions.LeakyReluOptions, fb.BuiltinOperator.SQUARED_DIFFERENCE: fb.BuiltinOptions.SquaredDifferenceOptions, fb.BuiltinOperator.MIRROR_PAD: fb.BuiltinOptions.MirrorPadOptions, fb.BuiltinOperator.ABS: fb.BuiltinOptions.AbsOptions, fb.BuiltinOperator.SPLIT_V: fb.BuiltinOptions.SplitVOptions, fb.BuiltinOperator.UNIQUE: fb.BuiltinOptions.UniqueOptions, fb.BuiltinOperator.CEIL: fb.BuiltinOptions.NONE, fb.BuiltinOperator.REVERSE_V2: fb.BuiltinOptions.ReverseV2Options, fb.BuiltinOperator.ADD_N: fb.BuiltinOptions.AddNOptions, fb.BuiltinOperator.GATHER_ND: fb.BuiltinOptions.GatherNdOptions, fb.BuiltinOperator.COS: fb.BuiltinOptions.CosOptions, fb.BuiltinOperator.WHERE: fb.BuiltinOptions.WhereOptions, fb.BuiltinOperator.RANK: fb.BuiltinOptions.RankOptions, fb.BuiltinOperator.ELU: fb.BuiltinOptions.NONE, fb.BuiltinOperator.REVERSE_SEQUENCE: fb.BuiltinOptions.ReverseSequenceOptions, fb.BuiltinOperator.MATRIX_DIAG: fb.BuiltinOptions.MatrixDiagOptions, fb.BuiltinOperator.QUANTIZE: fb.BuiltinOptions.QuantizeOptions, fb.BuiltinOperator.MATRIX_SET_DIAG: fb.BuiltinOptions.MatrixSetDiagOptions, fb.BuiltinOperator.HARD_SWISH: fb.HardSwishOptions, fb.BuiltinOperator.IF: fb.IfOptions, fb.BuiltinOperator.WHILE: fb.WhileOptions, fb.BuiltinOperator.DEPTH_TO_SPACE: fb.DepthToSpaceOptions, fb.BuiltinOperator.NON_MAX_SUPPRESSION_V4: fb.NonMaxSuppressionV4Options, fb.BuiltinOperator.NON_MAX_SUPPRESSION_V5: fb.NonMaxSuppressionV5Options, fb.BuiltinOperator.SCATTER_ND: fb.ScatterNdOptions, fb.BuiltinOperator.ROUND: fb.BuiltinOptions.NONE, fb.BuiltinOperator.SELECT_V2: fb.BuiltinOptions.SelectV2Options, fb.BuiltinOperator.DENSIFY: fb.BuiltinOptions.DensifyOptions, fb.BuiltinOperator.SEGMENT_SUM: fb.BuiltinOptions.SegmentSumOptions, fb.BuiltinOperator.BATCH_MATMUL: fb.BuiltinOptions.BatchMatMulOptions, } CustomOptionsKey = 'custom_options' DtypeToNumpy = { fb.TensorType.FLOAT16: np.float16, fb.TensorType.FLOAT32: np.float32, fb.TensorType.INT8: np.int8, fb.TensorType.INT16: np.int16, fb.TensorType.INT32: np.int32, fb.TensorType.INT64: np.int64, fb.TensorType.UINT8: np.uint8, fb.TensorType.STRING: np.str_, fb.TensorType.BOOL: np.bool_, fb.TensorType.COMPLEX64: np.complex64, } DtypeFromNumpy = { np.float16: fb.TensorType.FLOAT16, np.float32: fb.TensorType.FLOAT32, np.int8: fb.TensorType.INT8, np.int16: fb.TensorType.INT16, np.int32: fb.TensorType.INT32, np.int64: fb.TensorType.INT64, np.uint8: fb.TensorType.UINT8, np.str_: fb.TensorType.STRING, np.bool_: fb.TensorType.BOOL, np.complex64: fb.TensorType.COMPLEX64, } _regex1 = re.compile('(.)([A-Z][a-z]+)') _regex2 = re.compile('([a-z0-9])([A-Z])') def camel_to_snake(s): subbed = _regex1.sub(r'\1_\2', s) return _regex2.sub(r'\1_\2', subbed).lower() def snake_to_camel(s): return ''.join(c for c in s.title() if c != '_') def substitute_enum_value_with_name(key, value, optionsClass): cls, map = _OptionEnumNameByValueMaps.get(key, (None, None)) return map[value] if map is not None and (cls is None or cls == optionsClass) else value def substitute_enum_name_with_value(key, name, optionsClass): cls, map = _OptionEnumValueByNameMaps.get(key, (None, None)) return map[name] if map is not None and (cls is None or cls == optionsClass) else name def _generate_enum_value_by_name(enumClass): return {name: value for name, value in enumClass.__dict__.items() if not name.startswith('_')} def _generate_enum_name_by_value(enumClass): return {value: name for name, value in enumClass.__dict__.items() if not name.startswith('_')} def enumerate_options_getters(optionsClass): return {camel_to_snake(name): func for name, func in optionsClass.__dict__.items() if not name.startswith('_') and name != 'Init' and not name.startswith('GetRootAs') and not name.endswith('AsNumpy') and not name.endswith('Length') and not isinstance(func, classmethod)} def enumerate_options_length_getters(optionsClass): return {camel_to_snake(name[:-6]): func for name, func in optionsClass.__dict__.items() if not name.startswith('_') and not name.startswith('GetRootAs') and name.endswith('Length')} def enumerate_options_adders(optionsClass): className = optionsClass.__name__ prefix = className + 'Add' optionsModule = sys.modules[optionsClass.__module__] return {camel_to_snake(name[len(prefix):]): func for name, func in optionsModule.__dict__.items() if name.startswith(prefix)} def enumerate_options_vector_starters(optionsClass): className = optionsClass.__name__ prefix, suffix = className + 'Start', 'Vector' optionsModule = sys.modules[optionsClass.__module__] return {camel_to_snake(name[len(prefix):-len(suffix)]): func for name, func in optionsModule.__dict__.items() if name.startswith(prefix) and name.endswith(suffix)} def get_options_starter_ender(optionsClass): className = optionsClass.__name__ optionsModule = sys.modules[optionsClass.__module__] moduleDict = optionsModule.__dict__ return moduleDict[className + 'Start'], moduleDict[className + 'End'] _OptionEnumNameByValueMaps = { 'padding': (None, _generate_enum_name_by_value(fb.Padding)), 'fused_activation_function': (None, _generate_enum_name_by_value(fb.ActivationFunctionType)), 'weights_format': ( fb.FullyConnectedOptions, _generate_enum_name_by_value(fb.FullyConnectedOptionsWeightsFormat)), 'type': (fb.LSHProjectionOptions, _generate_enum_name_by_value(fb.LSHProjectionType)), 'kernel_type': (fb.LSTMOptions, _generate_enum_name_by_value(fb.LSTMKernelType)), 'combiner': (fb.EmbeddingLookupSparseOptions, _generate_enum_name_by_value(fb.CombinerType)), } _OptionEnumValueByNameMaps = { 'padding': (None, _generate_enum_value_by_name(fb.Padding)), 'fused_activation_function': (None, _generate_enum_value_by_name(fb.ActivationFunctionType)), 'weights_format': ( fb.FullyConnectedOptions, _generate_enum_value_by_name(fb.FullyConnectedOptionsWeightsFormat)), 'type': (fb.LSHProjectionOptions, _generate_enum_value_by_name(fb.LSHProjectionType)), 'kernel_type': (fb.LSTMOptions, _generate_enum_value_by_name(fb.LSTMKernelType)), 'combiner': (fb.EmbeddingLookupSparseOptions, _generate_enum_value_by_name(fb.CombinerType)), } BuiltinOperatorTypeByValue = _generate_enum_name_by_value(fb.BuiltinOperator) BuiltinOperatorValueByType = _generate_enum_value_by_name(fb.BuiltinOperator) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/reader.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from .helpers import * from ....model import * try: from flatbuffers import flexbuffers has_flexbuffers = True except ImportError: has_flexbuffers = False import sys import six def _get_quantization(tensor): quant = tensor.Quantization() if quant is None: return None if quant.MinLength() == 0: min = None elif quant.MinLength() == 1: min = float(quant.Min(0)) else: min = quant.MinAsNumpy() if quant.MaxLength() == 0: max = None elif quant.MaxLength() == 1: max = float(quant.Max(0)) else: max = quant.MaxAsNumpy() if quant.ScaleLength() == 0: scale = None elif quant.ScaleLength() == 1: scale = float(quant.Scale(0)) else: scale = quant.ScaleAsNumpy() if quant.ZeroPointLength() == 0: zero_point = None elif quant.ZeroPointLength() == 1: zero_point = int(quant.ZeroPoint(0)) else: zero_point = quant.ZeroPointAsNumpy() if all(x is None for x in [min, max, scale, zero_point]): return None else: return dict(min=min, max=max, zero_point=zero_point, scale=scale) def _get_data_as_ndarray(buffer, dtype, shape): return buffer.DataAsNumpy().view(dtype).reshape(shape) if buffer.DataLength() != 0 else None def _get_options_starter_ender(optionsClass): className = optionsClass.__name__ optionsModule = sys.modules[optionsClass.__module__] moduleDict = optionsModule.__dict__ return moduleDict[className + 'Start'], moduleDict[className + 'End'] def _enumerate_attributes(optionsClass, optionsObject): getters = enumerate_options_getters(optionsClass) length_getters = enumerate_options_length_getters(optionsClass) attribs = {} for name, getter in getters.items(): length_getter = length_getters.get(name) value = getter(optionsObject) if length_getter is None else \ [getter(optionsObject, i) for i in range(length_getter(optionsObject))] attribs[name] = substitute_enum_value_with_name(name, value, optionsClass) return attribs def _decode_custom_options(bytes): root = flexbuffers.GetRoot(bytes) assert root.IsMap return {key: value for key, value in six.iteritems(root.AsMap.Value) if not key.startswith('_')} def read_flatbuffers(filename): with open(filename, 'rb') as file: bytes = bytearray(file.read()) model = fb.Model.GetRootAsModel(bytes, 0) if model.SubgraphsLength() != 1: raise NotImplementedError('graphs with multiple sub-graphs are not supported') subgraph = model.Subgraphs(0) name = subgraph.Name() graph = Graph(name.decode() if name is not None else None) tensors = [] for i in range(subgraph.TensorsLength()): tensor = subgraph.Tensors(i) name = tensor.Name().decode() shape = tuple(tensor.Shape(i) for i in range(tensor.ShapeLength())) dtype = DtypeToNumpy[tensor.Type()] buffer = model.Buffers(tensor.Buffer()) data = _get_data_as_ndarray(buffer, dtype, shape) quant = _get_quantization(tensor) tensors.append(Tensor(graph, name, shape, dtype, data, quant)) for i in range(subgraph.OperatorsLength()): operator = subgraph.Operators(i) operatorCode = model.OperatorCodes(operator.OpcodeIndex()) builtinCode = operatorCode.BuiltinCode() opType = BuiltinOperatorTypeByValue[builtinCode] if builtinCode != fb.BuiltinOperator.CUSTOM else \ operatorCode.CustomCode().decode('ascii') custom = builtinCode == fb.BuiltinOperator.CUSTOM options = operator.BuiltinOptions() optionsClass = BuiltinOptionsClasses[operator.BuiltinOptionsType()] inputs = [tensors[operator.Inputs(i)] for i in range(operator.InputsLength()) if operator.Inputs(i) != -1] outputs = [tensors[operator.Outputs(i)] for i in range(operator.OutputsLength()) if operator.Outputs(i) != -1] if options is not None and optionsClass is not None: optionsObject = optionsClass() optionsObject.Init(options.Bytes, options.Pos) attribs = _enumerate_attributes(optionsClass, optionsObject) elif custom: bytes = operator.CustomOptionsAsNumpy().tobytes() attribs = _decode_custom_options(bytes) if has_flexbuffers else {CustomOptionsKey: bytes} else: attribs = {} Operation(graph, type=opType, custom=custom, attribs=attribs, inputs=tuple(inputs), outputs=tuple(outputs)) inputs = [] for i in range(subgraph.InputsLength()): tensor_index = subgraph.Inputs(i) inputs.append(tensors[tensor_index]) outputs = [] for i in range(subgraph.OutputsLength()): tensor_index = subgraph.Outputs(i) outputs.append(tensors[tensor_index]) graph.inputs = inputs graph.outputs = outputs return graph class Reader(object): def __call__(self, filename): return read_flatbuffers(filename) ================================================ FILE: nnef_tools-pyproject/nnef_tools/io/tf/lite/writer.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from .helpers import * import flatbuffers try: from flatbuffers import flexbuffers has_flexbuffers = True except ImportError: has_flexbuffers = False import numpy as np def _CreateNumpyVector(builder, x): if not isinstance(x, np.ndarray): raise TypeError("non-numpy-ndarray passed to CreateNumpyVector") if x.dtype.kind not in ['b', 'i', 'u', 'f']: raise TypeError("numpy-ndarray holds elements of unsupported datatype") if x.ndim > 1: raise TypeError("multidimensional-ndarray passed to CreateNumpyVector") builder.StartVector(x.itemsize, x.size, x.dtype.alignment) # Ensure little endian byte ordering if x.dtype.str[0] != "<": x = x.byteswap() length = x.itemsize * x.size builder.head -= length builder.Bytes[builder.head: builder.head + length] = x.tobytes() return builder.EndVector(x.size) def _build_buffer(builder, bytes): data = _CreateNumpyVector(builder, bytes) fb.BufferStart(builder) fb.BufferAddData(builder, data) return fb.BufferEnd(builder) def _build_tensor(builder, tensor, buffer_index): name = builder.CreateString(tensor.name) type = DtypeFromNumpy[tensor.data.dtype.type if isinstance(tensor.data, np.ndarray) else tensor.dtype] fb.TensorStartShapeVector(builder, len(tensor.shape)) for s in reversed(tensor.shape): builder.PrependInt32(s) shape = builder.EndVector(len(tensor.shape)) buffer = buffer_index if tensor.data is not None else 0 quant = _build_quantization(builder, tensor.quant, type) fb.TensorStart(builder) fb.TensorAddName(builder, name) fb.TensorAddShape(builder, shape) fb.TensorAddType(builder, type) fb.TensorAddBuffer(builder, buffer) if quant is not None: fb.TensorAddQuantization(builder, quant) return fb.TensorEnd(builder) def _ensure_numpy_array(x, dtype): if isinstance(x, np.ndarray): assert x.dtype == dtype return x else: return np.array(x, dtype=dtype) def _build_quantization(builder, quant, dtype): if quant is None: return None min = quant.get('min') max = quant.get('max') zero_point = quant.get('zero_point') scale = quant.get('scale') if all(item is None or item == 0 for item in [min, max, zero_point, scale]): return None min = _CreateNumpyVector(builder, _ensure_numpy_array(min, dtype=np.float32)) if min is not None else None max = _CreateNumpyVector(builder, _ensure_numpy_array(max, dtype=np.float32)) if max is not None else None scale = _CreateNumpyVector(builder, _ensure_numpy_array(scale, dtype=np.float32)) if scale is not None else None zero_point = _CreateNumpyVector(builder, _ensure_numpy_array(zero_point, dtype=np.int64)) if zero_point is not None else None fb.QuantizationParametersStart(builder) if dtype != fb.TensorType.INT32: if min is not None: fb.QuantizationParametersAddMin(builder, min) if max is not None: fb.QuantizationParametersAddMax(builder, max) if scale is not None: fb.QuantizationParametersAddScale(builder, scale) if zero_point is not None: fb.QuantizationParametersAddZeroPoint(builder, zero_point) return fb.QuantizationParametersEnd(builder) def _build_operator_code(builder, operation): builtinCode = BuiltinOperatorValueByType[operation.type] if not operation.custom else fb.BuiltinOperator.CUSTOM customCode = builder.CreateString(operation.type) if operation.custom else None fb.OperatorCodeStart(builder) fb.OperatorCodeAddBuiltinCode(builder, builtinCode) if customCode: fb.OperatorCodeAddCustomCode(builder, customCode) return fb.OperatorCodeEnd(builder) def _build_operator_options(builder, attribs, optionsClass): starter, ender = get_options_starter_ender(optionsClass) adders = enumerate_options_adders(optionsClass) vector_starters = enumerate_options_vector_starters(optionsClass) vector_values = {} for name, vector_starter in vector_starters.items(): value = attribs[name] assert isinstance(value, (list, tuple)) and (len(value) == 0 or isinstance(value[0], int)) vector_starter(builder, len(value)) for i in reversed(value): builder.PrependInt32(i) vector_values[name] = builder.EndVector(len(value)) starter(builder) for name, adder in adders.items(): if name == 'fused_activation_function' and name not in attribs: value = 'NONE' else: value = attribs[name] if isinstance(value, type): value = DtypeFromNumpy[value] value = vector_values.get(name, value) value = substitute_enum_name_with_value(name, value, optionsClass) adder(builder, value) return ender(builder) def _encode_custom_options(attribs): builder = flexbuffers.Builder() builder.MapFromElements(attribs) return builder.Finish() def _build_operator_custom_options(builder, attribs): value = _encode_custom_options(attribs) if has_flexbuffers else attribs[CustomOptionsKey] fb.OperatorStartCustomOptionsVector(builder, len(value)) for b in reversed(value): builder.PrependUint8(b) return builder.EndVector(len(value)) def _build_operator(builder, operation, op_code_index, tensor_index): inputs = [tensor_index[tensor] for tensor in operation.inputs] fb.OperatorStartInputsVector(builder, len(inputs)) for input in reversed(inputs): builder.PrependInt32(input) inputs = builder.EndVector(len(inputs)) outputs = [tensor_index[tensor] for tensor in operation.outputs] fb.OperatorStartOutputsVector(builder, len(outputs)) for output in reversed(outputs): builder.PrependInt32(output) outputs = builder.EndVector(len(outputs)) attribs = {name: value for name, value in operation.attribs.items()} optionsType = BuiltinOptionsByOperator[BuiltinOperatorValueByType.get(operation.type, fb.BuiltinOperator.CUSTOM)] optionsClass = BuiltinOptionsClasses[optionsType] options = _build_operator_options(builder, attribs, optionsClass) if optionsClass is not None else None custom_options = _build_operator_custom_options(builder, attribs) if operation.custom else None fb.OperatorStart(builder) fb.OperatorAddOpcodeIndex(builder, op_code_index[operation.type]) fb.OperatorAddInputs(builder, inputs) fb.OperatorAddOutputs(builder, outputs) fb.OperatorAddBuiltinOptionsType(builder, optionsType) if options: fb.OperatorAddBuiltinOptions(builder, options) if custom_options: fb.OperatorAddCustomOptions(builder, custom_options) return fb.OperatorEnd(builder) # https://github.com/google/flatbuffers/issues/4814 def FinishWithFileIdentifier(builder, rootTable, fid): from flatbuffers import number_types as N from flatbuffers import encode if fid is None or len(fid) != 4: raise Exception('fid must be 4 chars') flags = N.Uint8Flags prepSize = 4 builder.Prep(builder.minalign, prepSize + len(fid)) for i in range(3, -1, -1): builder.head = builder.head - flags.bytewidth encode.Write(flags.packer_type, builder.Bytes, builder.Head(), ord(fid[i])) return builder.Finish(rootTable) def write_flatbuffers(graph, filename): graph.sort() builder = flatbuffers.Builder(0) fb.BufferStartDataVector(builder, 0) data = builder.EndVector(0) fb.BufferStart(builder) fb.BufferAddData(builder, data) buffer = fb.BufferEnd(builder) buffers = [buffer] for tensor in graph.tensors: if tensor.data is not None: tensor_data = tensor.data if not isinstance(tensor_data, np.ndarray): tensor_data = np.array(tensor_data, dtype=tensor.dtype) bytes = tensor_data.reshape([-1]).view(np.uint8) buffers.append(_build_buffer(builder, bytes)) fb.ModelStartBuffersVector(builder, len(buffers)) for buffer in reversed(buffers): builder.PrependUOffsetTRelative(buffer) buffers = builder.EndVector(len(buffers)) buffer_index = 1 tensors = [] tensor_index = {} for tensor in graph.tensors: tensor_index[tensor] = len(tensors) tensors.append(_build_tensor(builder, tensor, buffer_index)) if tensor.data is not None: buffer_index += 1 fb.SubGraphStartTensorsVector(builder, len(tensors)) for tensor in reversed(tensors): builder.PrependUOffsetTRelative(tensor) tensors = builder.EndVector(len(tensors)) op_codes = [] op_code_index = {} for operation in graph.operations: if operation.type not in op_code_index: op_code_index[operation.type] = len(op_codes) op_codes.append(_build_operator_code(builder, operation)) fb.ModelStartOperatorCodesVector(builder, len(op_codes)) for op_code in reversed(op_codes): builder.PrependUOffsetTRelative(op_code) op_codes = builder.EndVector(len(op_codes)) operators = [] for operation in graph.operations: operators.append(_build_operator(builder, operation, op_code_index, tensor_index)) fb.SubGraphStartOperatorsVector(builder, len(operators)) for operator in reversed(operators): builder.PrependUOffsetTRelative(operator) operators = builder.EndVector(len(operators)) name = builder.CreateString(graph.name) if graph.name is not None else None inputs = graph.inputs fb.SubGraphStartInputsVector(builder, len(inputs)) for input in reversed(inputs): builder.PrependInt32(tensor_index[input]) inputs = builder.EndVector(len(inputs)) outputs = graph.outputs fb.SubGraphStartInputsVector(builder, len(outputs)) for output in reversed(outputs): builder.PrependInt32(tensor_index[output]) outputs = builder.EndVector(len(outputs)) fb.SubGraphStart(builder) if name is not None: fb.SubGraphAddName(builder, name) fb.SubGraphAddTensors(builder, tensors) fb.SubGraphAddOperators(builder, operators) fb.SubGraphAddInputs(builder, inputs) fb.SubGraphAddOutputs(builder, outputs) subgraph = fb.SubGraphEnd(builder) fb.ModelStartSubgraphsVector(builder, 1) builder.PrependUOffsetTRelative(subgraph) subgraphs = builder.EndVector(1) fb.ModelStart(builder) fb.ModelAddVersion(builder, OUTPUT_SCHEMA_VERSION) fb.ModelAddBuffers(builder, buffers) fb.ModelAddOperatorCodes(builder, op_codes) fb.ModelAddSubgraphs(builder, subgraphs) model = fb.ModelEnd(builder) FinishWithFileIdentifier(builder, model, OUTPUT_FILE_IDENTIFIER) with open(filename, 'wb') as file: file.write(builder.Output()) class Writer(object): def __call__(self, graph, filename): write_flatbuffers(graph, filename) ================================================ FILE: nnef_tools-pyproject/nnef_tools/model/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .graph import Tensor, Operation, Graph ================================================ FILE: nnef_tools-pyproject/nnef_tools/model/graph.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division, print_function, absolute_import from collections.abc import Sequence from functools import reduce import six import typing import numpy as np # noinspection PyProtectedMember class Tensor: def __init__(self, graph, # type: Graph, name=None, # type: typing.Optional[str] shape=None, # type: typing.Optional[typing.Tuple[int, ...]] dtype=None, # type: typing.Optional[type] data=None, # type: typing.Union[None, np.ndarray, typing.Any] quant=None # type: typing.Optional[typing.Dict[str, typing.Any]] ): # type: (...)->None self._graph = graph self._producers = [] self._consumers = [] self.name = name # type: typing.Optional[str] self.shape = shape # type: typing.Optional[typing.Tuple[int, ...]] self.dtype = dtype # type: typing.Optional[type] self.data = data # type: typing.Union[None, np.ndarray, typing.Any] self.quant = quant or {} # type: typing.Optional[typing.Dict[str, typing.Any]] assert isinstance(graph, Graph) graph._tensors.append(self) def copy_with(self, graph=None, name=None, dtype=None, shape=None, data=None, quant=None): return Tensor(graph=graph if graph is not None else self.graph, name=name if name is not None else self.name, dtype=dtype if dtype is not None else self.dtype, shape=shape if shape is not None else self.shape, data=data if data is not None else self.data, quant=quant if quant is not None else self.quant) @property def graph(self): # type: ()->typing.Optional[Graph] return self._graph @property def has_producer(self): return len(self._producers) != 0 @property def producers(self): # type: ()->typing.List[Operation] return self._producers @property def producer(self): # type: ()->typing.Optional[Operation] assert len(self._producers) <= 1 return self._producers[0] if len(self._producers) == 1 else None @property def has_consumer(self): return len(self._consumers) != 0 @property def consumers(self): # type: ()->typing.List[Operation] return self._consumers @property def consumer(self): # type: ()->typing.Optional[Operation] return self._consumers[0] if len(self._consumers) == 1 else None @property def name(self): return self._name @name.setter def name(self, name): assert name is None or isinstance(name, str) self._name = name @property def shape(self): return self._shape @shape.setter def shape(self, shape): assert shape is None or isinstance(shape, (list, tuple)) assert shape is None or all(s is None or isinstance(s, int) for s in shape) self._shape = tuple(shape) if shape is not None else None @property def dtype(self): return self._dtype @dtype.setter def dtype(self, dtype): assert dtype is None or isinstance(dtype, type) self._dtype = dtype @property def rank(self): # type: ()->typing.Optional[int] return len(self.shape) if self.shape is not None else None @property def volume(self): # type: ()->typing.Optional[int] return reduce((lambda x, y: x * y), self.shape) if self.shape is not None and \ all(s is not None for s in self.shape) else None @property def is_constant(self): # type: ()->bool return self.data is not None def __repr__(self): return self.name if self.name is not None else _hex_id(self) def __str__(self): return '{name}: {dtype}[{shape}]'.format( name=self.name if self.name is not None else _hex_id(self), dtype=self.dtype.__name__, shape=', '.join(str(s) for s in self.shape) if self.shape else '...') _TensorListOrTupleT = typing.Union[typing.List[Tensor], typing.Tuple[Tensor, ...]] # noinspection PyProtectedMember class Operation: def __init__(self, graph, # type: Graph type=None, # type: typing.Optional[str] name=None, # type: typing.Optional[str] attribs=None, # type: typing.Dict[str, typing.Any] inputs=None, # type: typing.Union[None, Tensor, _TensorListOrTuple] outputs=None, # type: typing.Union[None, Tensor, _TensorListOrTuple] custom=False, # type: bool ): # type:(...)->None self._graph = graph self._inputs = tuple() self._outputs = tuple() assert name is None or isinstance(name, str) if attribs is not None: assert isinstance(attribs, dict) assert all(isinstance(key, str) for key in six.iterkeys(attribs)) assert all(not isinstance(value, Tensor) for value in six.itervalues(attribs)) self.type = type # type: typing.Optional[str] self.name = name # type: typing.Optional[str] self.attribs = attribs or {} # type: typing.Dict[str, typing.Any] self.custom = custom # type: bool assert isinstance(graph, Graph) graph._operations.append(self) if inputs is not None: self.inputs = inputs if outputs is not None: self.outputs = outputs def copy_with(self, graph=None, type=None, name=None, attribs=None, inputs=None, outputs=None, custom=None): return Operation(graph=graph if graph is not None else self.graph, type=type if type is not None else self.type, name=name if name is not None else self.name, attribs=attribs if attribs is not None else self.attribs, inputs=inputs if inputs is not None else self.inputs, outputs=outputs if outputs is not None else self.outputs, custom=custom if custom is not None else self.custom) @property def graph(self): # type: ()->typing.Optional[Graph] return self._graph @property def inputs(self): # type: ()->_TensorListOrTupleT return self._inputs @property def input(self): # type: ()->Tensor assert len(self._inputs) == 1 return self._inputs[0] @inputs.setter def inputs(self, tensors): # type: (typing.Union[Tensor, _TensorListOrTupleT])->None if isinstance(tensors, Tensor): tensors = (tensors,) for tensor in self._inputs: assert self in tensor._consumers for tensor in self._inputs: if self in tensor._consumers: tensor._consumers.remove(self) self._inputs = _ListView(tensors) if isinstance(tensors, list) else tensors for tensor in tensors: assert isinstance(tensor, Tensor), "got {}".format(type(tensor)) if self not in tensor._consumers: tensor._consumers.append(self) @property def outputs(self): # type: ()->_TensorListOrTupleT return self._outputs @property def output(self): # type: ()->Tensor assert len(self._outputs) == 1 return self._outputs[0] @outputs.setter def outputs(self, tensors): # type: (typing.Union[Tensor, _TensorListOrTupleT])->None if isinstance(tensors, Tensor): tensors = (tensors,) for tensor in self._outputs: assert self in tensor._producers tensor._producers.remove(self) self._outputs = _ListView(tensors) if isinstance(tensors, list) else tensors for tensor in tensors: assert isinstance(tensor, Tensor), "got {}".format(type(tensor)) assert self not in tensor._producers tensor._producers.append(self) @property def type(self): return self._type @type.setter def type(self, type): assert type is None or isinstance(type, str), "got '{}'".format(type) self._type = type @property def name(self): return self._name @name.setter def name(self, name): assert name is None or isinstance(name, str), "got '{}'".format(name) self._name = name def __repr__(self): return self.type if self.type is not None else _hex_id(self) def __str__(self): return '{outputs} = {op}{{{attribs}}}({inputs})'.format( op=self.type if self.type is not None else _hex_id(self), inputs=', '.join(repr(tensor) for tensor in self._inputs), outputs=', '.join(str(tensor) for tensor in self._outputs), attribs=', '.join('{}={}'.format(key, value) for key, value in self.attribs.items())) # noinspection PyProtectedMember class Graph: def __init__(self, name=None): # type:(typing.Optional[str])->None self._operations = [] self._tensors = [] self._inputs = [] self._outputs = [] self._name = name @property def name(self): return self._name @name.setter def name(self, name): assert name is None or isinstance(name, str) self._name = name @property def operations(self): # type: ()->typing.Sequence[Operation] return _ListView(self._operations) @property def tensors(self): # type: ()->typing.Sequence[Tensor] return _ListView(self._tensors) @property def inputs(self): # type: ()->typing.Sequence[Tensor] return _ListView(self._inputs) @inputs.setter def inputs(self, tensors): # type: (_TensorListOrTupleT)->None assert isinstance(tensors, (list, tuple)) self._inputs = tensors for tensor in self._inputs: assert isinstance(tensor, Tensor) @property def outputs(self): # type: ()->typing.Sequence[Tensor] return _ListView(self._outputs) @outputs.setter def outputs(self, tensors): # type: (_TensorListOrTupleT)->None assert isinstance(tensors, (list, tuple)) self._outputs = tensors for tensor in self._outputs: assert isinstance(tensor, Tensor) def remove_tensor(self, tensor): # type: (Tensor)->None assert len(tensor.producers) == 0 assert len(tensor.consumers) == 0 assert tensor not in self._inputs assert tensor not in self._outputs self._tensors.remove(tensor) tensor._graph = None def remove_tensors(self, tensors): # type: (typing.Iterable[Tensor])->None for tensor in tensors: assert len(tensor.producers) == 0 assert len(tensor.consumers) == 0 assert tensor not in self._inputs assert tensor not in self._outputs self._tensors = [tensor for tensor in self._tensors if tensor not in tensors] for tensor in tensors: tensor._graph = None def remove_operation(self, operation, unlink=False): # type: (Operation, bool)->None if unlink: operation.inputs = [] operation.outputs = [] else: assert len(operation.inputs) == 0 assert len(operation.outputs) == 0 self._operations.remove(operation) operation._graph = None def remove_operations(self, operations, unlink=False): # type: (typing.Iterable[Operation], bool)->None operations = operations if isinstance(operations, set) else set(operations) for operation in operations: if unlink: operation.inputs = [] operation.outputs = [] else: assert len(operation.inputs) == 0 assert len(operation.outputs) == 0 self._operations = [op for op in self._operations if op not in operations] for operation in operations: operation._graph = None def is_unique(self): return all(len(t.producers) <= 1 for t in self.tensors) def is_sorted(self): seen = set() for op in self.operations: for tensor in op.inputs: for producer in tensor.producers: if producer not in seen: return False seen.add(op) return True def sort(self, offset=0): count = len(self._operations) sorted = {op: False for op in self._operations[offset:]} for idx in range(offset, count): i = idx while i < count and not all(sorted.get(tensor.producer, True) for tensor in self._operations[i].inputs): i += 1 if i == count: # the graph contains a loop return False while i > idx: self._operations[i-1], self._operations[i] = self._operations[i], self._operations[i-1] i -= 1 sorted[self._operations[i]] = True return True def move_operation(self, at_idx, to_idx): self._operations.insert(to_idx, self._operations.pop(at_idx)) def reverse(self, offset=0): self._operations[offset:] = reversed(self._operations[offset:]) def __repr__(self): return self.name if self.name is not None else _hex_id(self) def __str__(self): return "graph {name}({inputs}) -> ({outputs})".format( name=repr(self), inputs=', '.join(repr(input) for input in self.inputs), outputs=', '.join(repr(input) for input in self.outputs), ) def print(self, file=None): print(f'graph {repr(self)} {{', file=file) print(f'\tinputs {{', file=file) for tensor in self.inputs: print('\t\t' + str(tensor) + ',', file=file) print(f'\t}}', file=file) print(f'\toutputs {{', file=file) for tensor in self.outputs: print('\t\t' + str(tensor) + ',', file=file) print(f'\t}}', file=file) print(f'\tparams {{', file=file) for tensor in self.tensors: if tensor.producer is None and tensor.data is not None: print('\t\t' + str(tensor) + ',', file=file) print(f'\t}}', file=file) print(f'\toperators {{', file=file) for operation in self._operations: print('\t\t' + str(operation) + ',', file=file) print(f'\t}}', file=file) print(f'}}') def assert_consistent(self): assert len(self.tensors) == len(set(self.tensors)) assert len(self.operations) == len(set(self.operations)) for t in self.tensors: assert t._graph == self assert all(t in consumer.inputs for consumer in t.consumers) assert all(t in producer.outputs for producer in t.producers) assert all(consumer in self.operations for consumer in t.consumers) assert all(producer in self.operations for producer in t.producers) for op in self.operations: assert op._graph == self assert all(op in t.consumers for t in op.inputs) assert all(op in t.producers for t in op.outputs) for t in self.inputs: assert t in self.tensors for t in self.outputs: assert t in self.tensors class _ListView(Sequence): def __init__(self, lst): self._list = lst def __len__(self): return self._list.__len__() def __getitem__(self, item): return self._list.__getitem__(item) def __iter__(self): return self._list.__iter__() def __repr__(self): return self._list.__repr__() def __str__(self): return self._list.__str__() def __contains__(self, item): return self._list.__contains__(item) def __reversed__(self): return reversed(self._list) def _hex_id(obj): return '@' + hex(id(obj))[2:] ================================================ FILE: nnef_tools-pyproject/nnef_tools/model/utils.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Iterable def _split_counter_from_name(str): if len(str) > 0 and not str[-1].isdigit(): return str, None i = len(str) while i > 0: if not str[i-1].isdigit(): return str[:i], int(str[i:]) i -= 1 return None, int(str) def generate_tensor_names_from_op_type(graph, keep_io_names=False): used_names = set() if keep_io_names: used_names.update(tensor.name for tensor in graph.inputs if tensor.name is not None) used_names.update(tensor.name for tensor in graph.outputs if tensor.name is not None) op_counts = {} for op in graph.operations: for tensor in op.outputs: if keep_io_names and tensor.name is not None and (tensor in graph.inputs or tensor in graph.outputs): continue idx = op_counts.get(op.type, 0) + 1 while op.type + str(idx) in used_names: idx += 1 op_counts[op.type] = idx tensor.name = op.type + str(idx) for tensor in graph.tensors: if tensor.producer is None: tensor.name = None def generate_missing_tensor_names_from_op_type(graph): counters = {} for tensor in graph.tensors: if tensor.name is not None: name, count = _split_counter_from_name(tensor.name) if name is not None and count is not None: counters[name] = max(counters.get(name, 0), count) for tensor in graph.tensors: if tensor.name is None and tensor.producer is not None: op = tensor.producer idx = counters.get(op.type, 0) + 1 counters[op.type] = idx tensor.name = op.type + str(idx) def generate_op_names_from_op_type(graph): op_counts = {} for op in graph.operations: idx = op_counts.get(op.type, 0) + 1 op_counts[op.type] = idx op.name = op.type + str(idx) def replace_tensor_in_graph_inputs(graph, old_tensor, new_tensor): graph.inputs = [new_tensor if t is old_tensor else t for t in graph.inputs] def replace_tensor_in_graph_outputs(graph, old_tensor, new_tensor): graph.outputs = [new_tensor if t is old_tensor else t for t in graph.outputs] def replace_tensor_in_consumers(graph, old_tensor, new_tensor): for consumer in list(old_tensor.consumers): # copy list to avoid changes during iteration sequence = tuple if isinstance(consumer.inputs, tuple) else list consumer.inputs = sequence(new_tensor if t is old_tensor else t for t in consumer.inputs) replace_tensor_in_graph_outputs(graph, old_tensor, new_tensor) def replace_tensor_in_producers(graph, old_tensor, new_tensor): for producer in list(old_tensor.producers): # copy list to avoid changes during iteration sequence = tuple if isinstance(producer.outputs, tuple) else list producer.outputs = sequence(new_tensor if t is old_tensor else t for t in producer.outputs) replace_tensor_in_graph_inputs(graph, old_tensor, new_tensor) def bypass_and_remove(graph, op, remove_input_not_output=False): assert len(op.outputs) == 1 and len(op.inputs) == 1 op_input = op.input op_output = op.output graph.remove_operation(op, unlink=True) if remove_input_not_output: replace_tensor_in_consumers(graph, op_input, op_output) replace_tensor_in_producers(graph, op_input, op_output) graph.remove_tensor(op_input) else: replace_tensor_in_consumers(graph, op_output, op_input) replace_tensor_in_producers(graph, op_output, op_input) graph.remove_tensor(op_output) def replace_chain(graph, types, func, allow_forks=False): def _match_type(type, template): return type == template if isinstance(template, str) else\ type in template if isinstance(template, Iterable) else False def _match_link(op, template, is_last): return _match_type(op.type, template) and (len(op.outputs) == 1 or is_last) def _match_chain(op, types, allow_forks): if not _match_link(op, types[0], is_last=len(types) == 1): return None chain = [op] tensor = op.output for idx, type in enumerate(types[1:]): is_last = idx + 1 == len(types) - 1 if not allow_forks and len(tensor.consumers) > 1: return None op = next((consumer for consumer in tensor.consumers if _match_link(consumer, type, is_last)), None) if op is None: return None chain.append(op) if not is_last: tensor = op.output return chain changed = False i = 0 while i < len(graph.operations): count = len(graph.operations) chain = _match_chain(graph.operations[i], types, allow_forks) if chain is not None and func(*chain) is not False: k = i while graph.operations[k] is not chain[-1]: k += 1 for j in range(count, len(graph.operations)): graph.move_operation(j, k) k += 1 offs = len(chain) - 1 while offs > 0 and len(chain[offs - 1].output.consumers) == 1: offs -= 1 interns = [op.output for op in chain[offs:-1]] graph.remove_operations(chain[offs:], unlink=True) graph.remove_tensors(interns) changed = True else: i += 1 return changed def remove_unreachable(graph): visited = {tensor.producer for tensor in graph.outputs} queue = list(visited) k = 0 while k < len(queue): op = queue[k] k += 1 for tensor in op.inputs: if tensor.producer is not None and tensor.producer not in visited and \ (tensor not in graph.inputs or len(tensor.producer.inputs) == 0): visited.add(tensor.producer) queue.append(tensor.producer) graph.remove_operations({op for op in graph.operations if op not in visited}, unlink=True) graph.remove_tensors({tensor for tensor in graph.tensors if len(tensor.producers) == 0 and len(tensor.consumers) == 0 and tensor not in graph.inputs and tensor not in graph.outputs}) def remove_dynamic(graph): for tensor in graph.inputs: if tensor.shape is None or any(s is None for s in tensor.shape): return False dynamic_tensors = {tensor for tensor in graph.tensors if tensor.shape is None or any(s is None for s in tensor.shape)} dynamic_ops = {tensor.producer for tensor in dynamic_tensors} queue = list(dynamic_ops) k = 0 while k < len(queue): op = queue[k] k += 1 for tensor in op.outputs: dynamic_tensors.add(tensor) for op in tensor.consumers: if op not in dynamic_ops: dynamic_ops.add(op) queue.append(op) kept_outputs = [tensor for tensor in graph.outputs if tensor not in dynamic_tensors] new_outputs = kept_outputs + [tensor for tensor in graph.tensors if all(op in dynamic_ops for op in tensor.consumers) and tensor not in dynamic_tensors] graph.outputs = kept_outputs graph.remove_operations(dynamic_ops, unlink=True) graph.remove_tensors(dynamic_tensors) graph.outputs = new_outputs return True ================================================ FILE: nnef_tools-pyproject/nnef_tools/operation_mapping.md ================================================ # TensorFlow The following table lists the correspondence between operations in TensorFlow and NNEF. | TensorFlow | NNEF | --- | --- | tf.Variable | variable | tf.get_variable | variable | tf.placeholder | external | tf.constant | constant | tf.zeros | constant | tf.ones | constant | tf.zeros_like | constant | tf.ones_like | constant | tf.concat | concat | tf.split | split | tf.stack | stack | tf.unstack | unstack | tf.reshape | reshape | tf.squeeze | squeeze | tf.expand_dims | unsqueeze | tf.transpose | transpose | tf.add | add | tf.subtract | sub | tf.multiply | mul | tf.divide | div | tf.pow | pow | tf.logical_and | and | tf.logical_or | or | tf.logical_not | not | tf.negative | neg | tf.identity | copy | tf.abs | abs | tf.sign | sign | tf.exp | exp | tf.log | log | tf.sqrt | sqrt | tf.rsqrt | rsqrt | tf.square | sqr | tf.floor | floor | tf.ceil | ceil | tf.round | round | tf.where | select | tf.greater | gt | tf.greater_equal | ge | tf.less | lt | tf.less_equal | le | tf.equal | eq | tf.not_equal | ne | tf.minimum | min | tf.maximum | max | tf.assign | update | tf.reduce_sum | sum_reduce | tf.reduce_mean | mean_reduce | tf.reduce_max | max_reduce | tf.argmax | argmax_reduce | tf.matmul | matmul | tf.add_n | add_n | tf.sigmoid | sigmoid | tf.nn.sigmoid | sigmoid | tf.tanh | tanh | tf.nn.tanh | tanh | tf.nn.elu | elu | tf.nn.relu | relu | tf.nn.softsign | softsign | tf.nn.softplus | softplus | tf.nn.conv1d | conv | tf.nn.conv2d | conv | tf.nn.conv3d | conv | tf.nn.convolution | conv | tf.nn.conv2d_transpose | deconv | tf.nn.conv3d_transpose | deconv | tf.nn.depthwise_conv2d | conv | tf.nn.depthwise_conv2d_native | conv | tf.nn.separable_conv2d | separable_conv | tf.nn.max_pool | max_pool | tf.nn.max_pool_with_argmax | max_pool_with_index | tf.nn.avg_pool | avg_pool | tf.nn.bias_add | add | tf.nn.lrn | local_response_normalization | tf.nn.local_response_normalization | local_response_normalization | tf.nn.batch_normalization | batch_normalization | tf.nn.fused_batch_norm | batch_normalization | tf.nn.l2_normalize | l2_normalization | tf.nn.softmax | softmax | tf.nn.moments | moments | tf.image.resize_images | multilinear_upsample | | nearest_upsample | | nearest_downsample | | area_downsample | tf.image.resize_bilinear | multilinear_upsample | tf.image.resize_nearest_neighbor | nearest_upsample | | nearest_downsample | tf.image.resize_area | area_downsample | tf.sin | sin | tf.cos | cos | tf.pad | pad | tf.tile | tile | tf.reduce_any | any_reduce | tf.reduce_all | all_reduce # Caffe The following table lists the correspondence between operations in Caffe and NNEF. | Caffe | NNEF | Notes | --- | --- | --- | Input | external | Convolution | conv | Pooling | max_pool | if pool == MAX and not global_pooling | | avg_pool | if pool == AVE and not global_pooling | | max_reduce | if pool == MAX and global_pooling | | mean_reduce | if pool == AVE and global_pooling | Crop | slice | Deconvolution | multilinear_upsample | if weight_filler.type == 'bilinear' and depth-wise | | deconv | otherwise | InnerProduct | linear | if bias_term and not transpose and axis == 1 | | add(matmul) | if bias_term and (transpose or axis != 1) | | matmul | if not bias_term | | | + reshape if axis != input-rank - 1
+ unsqueeze if axis == 0 | Dropout | | skipped in inference | LRN | local_response_normalization | MVN | local_contrast_normalization | if normalize_variance | | local_mean_normalization | if not normalize_variance | BatchNorm | batch_normalization | scale factor merged into mean and variance if not 1
merged with following scale layer if any | ReLU | relu | if negative_slope == 0 | | leaky_relu | if negative_slope != 0 | PReLU | prelu | ELU | elu | if alpha == 1 | | select(x > 0.0, x, alpha * (exp(x) - 1.0)) | if alpha != 1 | Sigmoid | sigmoid | TanH | tanh | AbsVal | abs | Power(a, b, n) | pow(a * x + b, n) | '*' or '+' omitted if the corresponding parameter is 1 or 0 | Exp(base, a, b) | exp(a * x + b) | if base == -1 | | pow(base, a * x + b) | if base != -1 | | | '*' or '+' omitted if the corresponding parameter is 1 or 0 | Log(base, a, b) | log(a * x + b) | if base == -1 | | log2(a * x + b) | if base == 2 | | log(a * x + b) / log(base) | otherwise | | | '*' or '+' omitted if the corresponding parameter is 1 or 0 | BNLL | softplus | Threshold(x, t) | select(x > t, 1.0, 0.0) | Bias(x) | add(x, weight) | + unsqueeze if axis > 0 | Scale(x) | mul(x + bias, weight) | '+' omitted if no bias_term
+ unsqueeze if axis > 0 | Flatten | reshape | Reshape | reshape | Split | copy_n | Concat | concat | Slice | split | Eltwise | x_1 * ... * x_n | if operation == PROD | | x_1 + ... + x_n | if operation == SUM and coeff == [] | | coeff_1 * x_1 + ... + coeff_n * x_n | if operation == SUM and coeff != [] | | max(x_1, ... , x_n) | if operation == MAX | Reduction | squeeze(sum_reduce) * coeff | if operation == SUM | | squeeze(sum_reduce(abs)) * coeff | if operation == ASUM | | squeeze(sum_reduce(sqr)) * coeff | if operation == SUMSQ | | squeeze(mean_reduce) * coeff | if operation == MEAN | | | '*' omitted if coeff == 1 | Silence | | skipped in inference | ArgMax | argmax_reduce | if top_k == 1 and out_max_val == false | Softmax | softmax # Caffe2 The following tables show the correspondence between operations in Caffe2 and NNEF. All operations without outputs (e.g. Assert) are stripped from the graph. Only the NCHW version of the operations are supported (as opposed to NHWC). **Normal operations:** | Caffe2 | NNEF | Notes | --- | --- | --- | Abs | abs | Add | add | + unsqueeze if axis != 0 | And | and | + unsqueeze if axis != 0 | ArgMax | argmax_reduce | + squeeze if not keepdims | ArgMin | argmin_reduce | + squeeze if not keepdims | AveragePool
AveragePool1D
AveragePool2D
AveragePool3D | avg_pool | if not global_pooling | | mean_reduce | if global_pooling | BatchMatMul | matmul | + reshape if input ranks are not equal or less than 2 | Cast | select | logical to scalar or integer | | ne | scalar to logical | | copy | cast to same type, may be optimized away | Ceil | ceil | ChannelShuffle | reshape(transpose(reshape)) | Clip | clamp | if both min and max is given | | min | if only max is given | | max | if only min is given | | copy | if neither min nor max is given, may be optimized away | Concat
DepthConcat
Append | concat | Conditional | select | Conv
Conv1D
Conv2D
Conv3D | conv | ConvTranspose | deconv | Copy
CopyFromCPUInput
CopyOnDeviceLike
EnsureCPUOutput
StopGradient | copy | may be optimized away | Cos | cos | Div | div | + unsqueeze if axis != 0 | DotProduct | mul | + sum_reduce + squeeze if input-rank > 1 | Dropout | copy | may be optimized away | ElementwiseLinear(x, w, b) | x * w + b | + reshape if X.rank != 2 or axis != 1 | EQ | eq | + unsqueeze if axis != 0 | FC | linear | + reshape if X.rank != 2 or W.rank != 2 or axis != 1 or axis_w != 1 | FCTransposed | add(matmul) | + reshape if X.rank != 2 or W.rank != 2 or axis != 1 or axis_w != 1 | Flatten | reshape | FlattenToVec | reshape | Floor | floor | GE | ge | + unsqueeze if axis != 0 | GT | gt | + unsqueeze if axis != 0 | L1Distance(a, b) | abs(a-b) | + sum_reduce + squeeze if input-rank > 1 | LE | le | + unsqueeze if axis != 0 | LT | lt | + unsqueeze if axis != 0 | LayerNorm(x) | mean_, std_ = moments(x);
y = (x - mean_) / sqrt(std_ + epsilon);
mean=squeeze(mean_);
std=squeeze(sqrt(std_ + epsilon)) | LeakyRelu | leaky_relu | Log | log | Logit(x) | x_ = clamp(x, eps, 1.0-eps);
y = log(x_ / (1 - x_)) | LpNorm | sum_reduce(abs) | if p = 1 and not average | | mean_reduce(abs) | if p = 1 and average | | sum_reduce(sqr) | if p = 2 and not average | | mean_reduce(sqr) | if p = 2 and average | | | + reshape if input-rank != 1 | LpPool(x) | pow(box(pow(abs(x), p)), 1.0/p) | MatMul | matmul | + reshape if A.rank != 2 or B.rank != 2 or axis_a != 1 or axis_b != 1 | Max | max, \[max, ...\] | if input-count >= 2 | | copy | if input-count == 1, may be optimized away | MaxPool
MaxPool1D
MaxPool2D
MaxPool3D | max_pool | if not global_pooling | | max_reduce | if global_pooling | MaxPoolWithIndex | max_pool_with_index | Caffe2 supports it only on GPU | Mean | div(add_n) | if input-count >= 3 | | div(add) | if input-count == 2 | | copy | if input-count == 1, may be optimized away | MergeDim | reshape | Min | min, \[min, ...\] | if input-count >= 2 | | copy | if input-count == 1, may be optimized away | Mul | mul | + unsqueeze if axis != 0 | NE | ne | + unsqueeze if axis != 0 | Negative | neg | Normalize | l2_normalization | NormalizeL1 | l1_normalization | Not | not | Or | or | + unsqueeze if axis != 0 | PadImage | pad | PRelu | prelu | Pow | pow | + unsqueeze if axis != 0 | PrependDim | reshape | ReduceMin | min_reduce | + squeeze if not keepdims | ReduceMax
ReduceFrontMax
ReduceBackMax
ColwiseMax
RowwiseMax | max_reduce | + squeeze if not keepdims | ReduceSum
ReduceFrontSum
ReduceBackSum
ReduceTailSum
SumElements | sum_reduce | + squeeze if not keepdims | ReduceMean
ReduceFrontMean
ReduceBackMean | mean_reduce | + squeeze if not keepdims | ReduceL1 | sum_reduce(abs) | + squeeze if not keepdims | ReduceL2 | sqrt(sum_reduce(sqr)) | + squeeze if not keepdims | Relu | relu | Reshape | reshape | if shape parameter is constant or the result of Shape or the 2nd result of Reshape (old_shape) | ResizeLike | reshape | ResizeNearest | nearest_upsample | if upsample (or same size) in both dimensions | | nearest_downsample | if downsample (or same size) in both dimensions | | nearest_upsample(nearest_downsample) | if downsample in one dimension and upsample in the other | | copy | if output-shape = input-shape, may be optimized away | Scale | mul | RowMul(x, w) | mul | + reshape if w.rank != 1 | Selu(x, alpha, scale) | select(x > 0, x, exp(x) * alpha - alpha) * scale | Sigmoid | sigmoid | Sign | sign | Sin | sin | Slice | slice | Softsign(x) | x / (abs(x) + 1) | Split | split | if split parameter is constant or the 2nd result of Concat (split_info) | Sqr | sqr | Sqrt | sqrt | SquaredL2Distance | (x - y) ^ 2 / 2 | + sum_reduce + squeeze if input-rank > 1 | Squeeze | squeeze | StumpFunc(x, threshold, low_value, high_value) | select(x > threshold, high_value, low_value) | Sub | sub | + unsqueeze if axis != 0 | Sum | add_n | if input-count >= 3 | | add | if input-count == 2 | | copy | if input-count == 1, may be optimized away | SumSqrElements | squeeze(sum_reduce(sqr)) | if not average | | squeeze(mean_reduce(sqr)) | if average | SumReduceLike | sum_reduce and/or squeeze | if output-shape != input-shape | | copy | if output-shape = input-shape, may be optimized away | Summarize(x) | min_ = min_reduce(x);
max_ = max_reduce(x);
mean_, std_ = moments(x);
min = reshape(min_);
max = reshape(max_);
mean = reshape(mean_);
std = reshape(std_);
y = concat(\[min, max, mean, sqrt(std * N / (N - 1))\]) | where N = count of x | Swish(x) | x / (1 + exp(-x)) | Tanh | tanh | ThresholdedRelu | select(x > alpha, x, 0.0) | Tile | tile | Transpose
NCHW2NHWC
NHWC2NCHW | transpose | Where | select | Xor(x, y) | or(and(x, not(y)), and(y, not(x))) | + unsqueeze if axis != 0 **Constants:** These operations/tensors are converted to constants. | Caffe2 | NNEF | Notes | --- | --- | --- | Shape | constant\ | Can be used as Reshape's 2nd input (shape) | Size | constant\ | Reshape's 2nd output (old_shape) | constant\ | Can be used as Reshape's 2nd input (shape) | Concat's 2nd output (split_info) | constant\ | Can be used as Split's 2nd input (split) | Range | constant\ **Variables:** These operations (in the param initializer network) are converted to variable tensors. | Caffe2 | NNEF | Representation | --- | --- | --- | GivenTensorFill | variable\ | float32 | GivenTensorDoubleFill | variable\ | float64 | GivenTensorInt16Fill | variable\ | int16 | GivenTensorIntFill | variable\ | int32 | GivenTensorInt64Fill | variable\ | int64 | GivenTensorBoolFill | variable\ | bool # ONNX The following table lists the correspondence between operations in ONNX and NNEF. | ONNX | NNEF | Notes | --- | --- | --- | Abs | abs | Acos | acos | Acosh | acosh | Add | add | And | and | ArgMax | argmax_reduce | ArgMin | argmin_reduce | Asin | asin | Asinh | asinh | Atan | atan | Atanh | atanh | AveragePool | avg_pool | BatchNormalization | batch_normalization | Cast | select | logical to scalar or integer | | ne | scalar to logical | | copy | to same type | Ceil | ceil | Clip | clamp | Compress | - | Concat | concat | Constant | constant | ConstantOfShape | constant | Conv | conv | ConvTranspose | deconv | Cos | cos | Cosh | cosh | DepthToSpace | reshape(transpose(reshape)) | Div | div | Dropout | copy | Elu | elu | + arithmetic when alpha != 1.0 | Equal | eq | Erf | erf | Exp | exp | Expand | tile | EyeLike | - | Flatten | reshape | Floor | floor | GRU | - | Gather | gather | Gemm | matmul | GlobalAveragePool | mean_reduce | GlobalLpPool | sum_reduce(abs) | if p == 1 | | sqrt(sum_reduce(sqr)) | if p == 2 | GlobalMaxPool | max_reduce | Greater | gt | HardSigmoid | clamp(add(mul)) | HardMax | - | Identity | copy | If | - | InstanceNormalization | div(moments) | + further arithmetic | IsNan | - | LRN | local_response_normalization | LSTM | - | LeakyRelu | leaky_relu | Less | lt | Log | log | LogSoftmax | log(softmax) | Loop | - | LpNormalization | l1_normalization | if p == 1 | | l2_normalization | if p == 2 | LpPool | avg_pool(abs) | if p == 1 | | sqrt(avg_pool(sqr)) | if p == 2 | MatMul | matmul | Max | max | MaxPool | max_pool | MaxRoiPool | max_roi_pool | MaxUnpool | desample | Mean | div(add) | Min | min | Mul | mul | Multinomial | - | Neg | neg | Not | not | OneHot | - | Or | or | PRelu | prelu | Pad | pad | Pow | pow | RNN | - | RandomNormal | - | RandomNormalLike | - | RandomUniform | - | RandomUniformLike | - | Reciprocal | rcp | ReduceL1 | sum_reduce(abs) | ReduceL2 | sqrt(sum_reduce(sqr)) | ReduceLogSum | log(sum_reduce) | ReduceLogSumExp | log(sum_reduce(exp)) | ReduceMax | max_reduce | ReduceMean | mean_reduce | ReduceMin | min_reduce | ReduceProd | - | ReduceSum | sum_reduce | ReduceSumSquare | sum_reduce(sqr) | Relu | relu | Reshape | reshape | Scan | - | Scatter | - | Selu | selu | Shape | constant | if can be evaluated | Shrink | - | Sigmoid | sigmoid | Sign | sign | Sin | sin | Sinh | sinh | Size | constant | if can be evaluated | Slice | slice | Softmax | softmax | Softplus | softplus | Softsign | div(x,1+abs(x)) | SpaceToDepth | - | Split | split | Sqrt | sqrt | Squeeze | squeeze | Sub | sub | Sum | add | Tan | tan | Tanh | tanh | Tile | tile | TopK | - | Transpose | transpose | Unsqueeze | unsqueeze | Upsample | multilinear_upsample | | nearest_upsample | Where | select | Xor | or(and(x,not(y)),and(y,not(x))) ================================================ FILE: nnef_tools-pyproject/nnef_tools/optimization/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: nnef_tools-pyproject/nnef_tools/optimization/nnef_optimizer.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..model.utils import bypass_and_remove, replace_chain from ..model.utils import generate_tensor_names_from_op_type, generate_missing_tensor_names_from_op_type from ..model.graph import * class Optimizer: def __init__(self, keep_tensor_names=True, custom_optimizers=None, dequantize=False): self._keep_tensor_names = keep_tensor_names self._custom_optimizers = custom_optimizers or {} self._dequantize = dequantize def __call__(self, graph, only_required=False): self._fix_inputs_as_output(graph) self._fix_inputs_without_producer(graph) if not only_required: changed = True while changed: changed = False changed |= self._remove_identity_ops(graph, 'copy', lambda op: True) changed |= self._remove_identity_ops(graph, 'transpose', lambda op: self._is_sorted(op.attribs['axes'])) changed |= self._remove_identity_ops(graph, 'reshape', lambda op: op.output.shape == op.input.shape) changed |= self._remove_identity_ops(graph, 'squeeze', lambda op: op.attribs['axes'] == []) changed |= self._remove_identity_ops(graph, 'unsqueeze', lambda op: op.attribs['axes'] == []) changed |= self._remove_identity_ops(graph, 'mul', lambda op: self._is_constant(op.inputs[0], 1.0) or self._is_constant(op.inputs[1], 1.0)) changed |= self._remove_identity_ops(graph, 'add', lambda op: self._is_constant(op.inputs[0], 0.0) or self._is_constant(op.inputs[1], 0.0)) changed |= self._remove_identity_ops(graph, ('box', 'debox', 'avg_pool', 'max_pool'), lambda op: self._is_uniform(op.attribs['size'], 1) and self._is_uniform(op.attribs['stride'], 1) and self._is_uniform(op.attribs['dilation'], 1) and self._is_uniform(op.attribs['padding'], 0)) changed |= self._remove_identity_ops(graph, ('nearest_downsample', 'area_downsample', 'nearest_upsample', 'multilinear_upsample'), lambda op: self._is_uniform(op.attribs['factor'], 1)) changed |= self._remove_inverse_ops(graph, 'squeeze', 'unsqueeze', lambda op1, op2: op1.attribs['axes'] == op2.attribs['axes']) changed |= self._remove_inverse_ops(graph, 'unsqueeze', 'squeeze', lambda op1, op2: op1.attribs['axes'] == op2.attribs['axes']) changed |= self._remove_inverse_ops(graph, 'transpose', 'transpose', lambda op1, op2: self._is_sorted(Optimizer._permute(op1.attribs['axes'], op2.attribs['axes']))) changed |= self._merge_op_into_variables_and_constants(graph, 'transpose', lambda data, attribs: data.transpose(attribs['axes'])) changed |= self._merge_op_into_variables_and_constants(graph, 'reshape', lambda data, attribs: data.reshape(self._get_reshape_shape(data, attribs))) changed |= self._merge_op_into_variables_and_constants(graph, 'squeeze', lambda data, attribs: data.squeeze(tuple(attribs['axes']))) changed |= self._merge_op_into_variables_and_constants(graph, 'unsqueeze', lambda data, attribs: data.reshape(self._unsqueeze_shape(data.shape, attribs['axes']))) changed |= self._merge_reshape_sequence(graph) changed |= replace_chain(graph, ['pad', {'conv', 'deconv', 'max_pool', 'avg_pool'}], self._merge_pad_with_sliding) changed |= replace_chain(graph, [{'mul', 'div'}, {'conv', 'deconv', 'linear'}], self._merge_mul_linear, allow_forks=True) changed |= replace_chain(graph, [{'conv', 'deconv', 'linear'}, {'add', 'sub'}], self._merge_linear_add) changed |= replace_chain(graph, [{'conv', 'deconv', 'linear'}, {'mul', 'div'}], self._merge_linear_mul) changed |= replace_chain(graph, ['matmul', {'add', 'sub'}], self._merge_matmul_bias) changed |= replace_chain(graph, [{'conv', 'deconv'}, 'batch_normalization'], self._merge_conv_batch_norm) changed |= replace_chain(graph, ['batch_normalization'], self._merge_batch_norm) changed |= replace_chain(graph, ['transpose', 'squeeze'], self._merge_transpose_squeeze) changed |= replace_chain(graph, ['reshape'], self._substitute_squeeze) for chain, replacer in six.iteritems(self._custom_optimizers): changed |= replace_chain(graph, chain, replacer) changed |= self._remove_unused_variables_and_constants(graph) if self._keep_tensor_names: generate_missing_tensor_names_from_op_type(graph) else: generate_tensor_names_from_op_type(graph) if self._dequantize: Optimizer._dequantize_variables(graph) Optimizer._remove_quantization_attribs(graph) return graph @staticmethod def _fix_inputs_without_producer(graph): idx = 0 for tensor in graph.inputs: if tensor.producer is None: cnt = len(graph.operations) Operation(tensor.graph, type='external', outputs=tensor, attribs={'shape': list(tensor.shape), 'dtype': tensor.dtype}) graph.move_operation(cnt, idx) idx += 1 return idx > 0 @staticmethod def _fix_inputs_as_output(graph): graph.outputs = [Optimizer._insert_copy(tensor) if tensor in graph.inputs else tensor for tensor in graph.outputs] @staticmethod def _insert_copy(tensor, copy=None): if copy is None: copy = Tensor(tensor.graph, name=tensor.name + '_copy', dtype=tensor.dtype, shape=tensor.shape, data=tensor.data, quant=tensor.quant) Operation(tensor.graph, type='copy', inputs=tensor, outputs=copy) return copy @staticmethod def _match_op_type(type, types): return type in types if isinstance(types, tuple) else type == types def _remove_identity_ops(self, graph, type, cond): changed = False for op in graph.operations: if self._match_op_type(op.type, type) and cond(op) and op.input.quant == op.output.quant: changed |= self._bypass_and_remove(graph, op) return changed def _merge_op_into_variables_and_constants(self, graph, type, func): changed = False for op in graph.operations: if (op.type == 'variable' or op.type == 'constant') and len(op.output.consumers) > 0: if self._all_consumers_same(op.output, type): data = op.output.data if op.output.data is not None else np.zeros(op.output.shape) attribs = op.output.consumers[0].attribs data = func(data, attribs) if op.output.data is not None: op.output.data = data op.output.shape = data.shape op.attribs['shape'] = list(data.shape) for consumer in list(op.output.consumers): # copy the list before removals! changed |= self._bypass_and_remove(graph, consumer) return changed def _remove_inverse_ops(self, graph, type1, type2, cond): changed = False for op in graph.operations: if op.type == type1 and len(op.output.consumers) == 1: consumer = op.output.consumer if consumer.type == type2 and cond(op, consumer): changed |= self._bypass_and_remove(graph, op) changed |= self._bypass_and_remove(graph, consumer) return changed def _merge_reshape_sequence(self, graph): changed = False for op in graph.operations: if op.type == 'reshape' and len(op.output.consumers) == 1: consumer = op.output.consumer if consumer.type == 'reshape': new_shape = self._get_reshape_shape(consumer.input, consumer.attribs) if any(s == 0 for s in new_shape): old_shape = self._get_reshape_shape(op.input, op.attribs) new_shape = [old_shape[i] if s == 0 else s for i, s in enumerate(new_shape)] consumer.attribs['shape'] = new_shape del consumer.attribs['axis_start'] del consumer.attribs['axis_count'] changed |= self._bypass_and_remove(graph, op) return changed def _get_reshape_shape(self, input, attribs): start = attribs.get('axis_start', 0) count = attribs.get('axis_count', len(input.shape) - start) shape = attribs['shape'] return input.shape[:start] + tuple(shape) + input.shape[start + count:] def _bypass_and_remove(self, graph, op): if op.output in graph.outputs and (op.input in graph.inputs or op.input in graph.outputs): self._insert_copy(op.input, op.output) graph.remove_operation(op, unlink=True) return False else: bypass_and_remove(graph, op, remove_input_not_output=op.output in graph.outputs) return True @staticmethod def _is_channelwise_shape(shape): return len(shape) <= 1 or all(s == 1 or i == 1 for i, s in enumerate(shape)) @staticmethod def _merge_linear_add(linear, add, type=None): bias = add.inputs[1] if add.inputs[0] == linear.output else add.inputs[0] if bias.data is None or not Optimizer._is_channelwise_shape(bias.shape): return False if len(linear.inputs) > 2 and linear.inputs[2].data is None: return None if len(bias.shape) == 0: bias.data = np.expand_dims(bias.data, axis=0) elif len(bias.shape) >= 2: bias.data = Optimizer._squeeze_batch_and_spatial_dims(bias.data) bias.shape = bias.data.shape if add.type == 'sub': bias.data = -bias.data if len(linear.inputs) == 2: new_shape = (1, 1) if len(bias.shape) == 0 else (1, *bias.shape) if len(bias.shape) == 1 else None if new_shape is not None: bias.data = np.reshape(bias.data, newshape=new_shape) bias.shape = new_shape else: bias.data = linear.inputs[2].data + bias.data bias.shape = bias.data.shape Optimizer._ensure_variable_producer(bias, label=linear.output.name + '_bias') linear.copy_with(type=type or linear.type, attribs=linear.attribs if type != 'linear' else {}, inputs=(linear.inputs[0], linear.inputs[1], bias), outputs=add.output) @staticmethod def _merge_matmul_bias(matmul, add): bias = add.inputs[1] if add.inputs[0] == matmul.output else add.inputs[0] if not Optimizer._is_channelwise_shape(bias.shape): return False transposeA = matmul.attribs.get('transposeA') or False transposeB = matmul.attribs.get('transposeB') or False if transposeA: return False if not transposeB: producer = matmul.inputs[1].producer data = matmul.inputs[1].data if data is None or producer.type != 'variable': return False rank = len(data.shape) data = np.transpose(data, axes=list(range(rank - 2)) + [rank - 1, rank - 2]) matmul.inputs[1].data = data producer.attribs['shape'] = list(data.shape) matmul.attribs['transposeB'] = True return Optimizer._merge_linear_add(matmul, add, type='linear') @staticmethod def _is_sorted(array): return all(array[i] <= array[i + 1] for i in range(len(array) - 1)) @staticmethod def _all_consumers_same(tensor, type): attribs = tensor.consumers[0].attribs return all(consumer.type == type and consumer.attribs == attribs for consumer in tensor.consumers) @staticmethod def _unsqueeze_shape(shape, axes): for axis in axes: shape = shape[:axis] + (1,) + shape[axis:] return shape @staticmethod def _permute(items, perm): permuted = list(items) for i in range(len(perm)): permuted[i] = items[perm[i]] return type(items)(permuted) @staticmethod def _add_variable(graph, data, name, label=None): output = Tensor(graph, name=name, shape=data.shape, dtype=data.dtype.type, data=data) Operation(graph, type='variable', outputs=output, attribs={'shape': list(data.shape), 'label': label or name}) return output @staticmethod def _ensure_variable_producer(tensor, label): if tensor.producer is None and len(tensor.shape) != 0: Operation(tensor.graph, type='variable', outputs=tensor, attribs={'shape': list(tensor.shape), 'label': label}) elif tensor.producer is not None: tensor.producer.attribs['shape'] = list(tensor.shape) @staticmethod def _merged_conv_batch_norm_params(weights, bias, mean, variance, offset, scale, epsilon, axis): std = np.sqrt(variance + epsilon) factor = scale / std new_weights = weights * np.reshape(factor, newshape=(1,) * axis + factor.shape + (1,) * (len(weights.shape) - axis - 1)) new_bias = (bias - mean) * factor + offset return new_weights, new_bias @staticmethod def _merge_conv_batch_norm(conv, bn): if any(tensor.quant for tensor in conv.inputs) or any(tensor.quant for tensor in bn.inputs): return False if conv.inputs[1].data is None: return False weights, bias = Optimizer._merged_conv_batch_norm_params(conv.inputs[1].data, np.squeeze(conv.inputs[2].data if len(conv.inputs) > 2 else 0, axis=0), np.squeeze(bn.inputs[1].data, axis=0), np.squeeze(bn.inputs[2].data, axis=0), np.squeeze(bn.inputs[3].data if len(bn.inputs) > 3 else 0, axis=0), np.squeeze(bn.inputs[4].data if len(bn.inputs) > 4 else 1, axis=0), bn.attribs['epsilon'], axis=1 if conv.type == 'deconv' else 0) bias = np.expand_dims(bias, axis=0) conv.inputs[1].data = weights if len(conv.inputs) > 2: conv.inputs[2].data = bias conv.inputs[2].shape = bias.shape Optimizer._ensure_variable_producer(conv.inputs[2], label=conv.output.name + '_bias') conv.copy_with(outputs=bn.output) else: bias = Optimizer._add_variable(conv.graph, data=bias, name=conv.output.name + '_bias') conv.copy_with(inputs=(*conv.inputs[:2], bias), outputs=bn.output) @staticmethod def _merged_batch_norm_params(mean, variance, offset, scale, epsilon): std = np.sqrt(variance + epsilon) factor = scale / std return factor, offset - factor * mean @staticmethod def _merge_batch_norm(bn): if any(tensor.quant for tensor in bn.inputs): return False scale, offset = Optimizer._merged_batch_norm_params( bn.inputs[1].data, bn.inputs[2].data, bn.inputs[3].data if len(bn.inputs) > 3 else 0, bn.inputs[4].data if len(bn.inputs) > 4 else 1, bn.attribs['epsilon']) scale = Optimizer._add_variable(bn.graph, data=scale, name=bn.output.name + '_scale') offset = Optimizer._add_variable(bn.graph, data=offset, name=bn.output.name + '_offset') scaled = Tensor(graph=bn.graph, name=bn.output.name + '_scaled', shape=bn.output.shape, dtype=bn.output.dtype) Operation(graph=bn.graph, type='mul', inputs=(bn.inputs[0], scale), outputs=scaled) Operation(graph=bn.graph, type='add', inputs=(scaled, offset), outputs=bn.output) @staticmethod def _merge_mul_linear(mul, linear): which = 0 if mul.inputs[0].data is not None else 1 other = 1 - which variable = mul.inputs[which] if variable.data is None or not Optimizer._is_channelwise_shape(variable.shape): return False if len(variable.shape) == 0: scale = np.expand_dims(variable.data, axis=0) elif len(variable.shape) >= 2: scale = Optimizer._squeeze_batch_and_spatial_dims(variable.data) weights = linear.inputs[1] if weights.data is None: return False rank = len(weights.shape) shape = scale.shape + (1,) * (rank - 1) if linear.type == 'deconv' else (1,) + scale.shape + (1,) * (rank - 2) scale = np.reshape(scale, newshape=shape) weights.data = weights.data * scale if mul.type != 'div' else weights.data / scale linear.copy_with(inputs=(mul.inputs[other], weights, *linear.inputs[2:]), outputs=linear.output) @staticmethod def _merge_linear_mul(linear, mul): variable = mul.inputs[1] if mul.inputs[0] == linear.output else mul.inputs[0] if variable.data is None or not Optimizer._is_channelwise_shape(variable.shape): return False if len(variable.shape) == 0: scale = np.expand_dims(variable.data, axis=0) elif len(variable.shape) >= 2: scale = Optimizer._squeeze_batch_and_spatial_dims(variable.data) negate = mul.type == 'div' weights = linear.inputs[1] if weights.data is None: return False if len(linear.inputs) > 2: bias = linear.inputs[2] if bias.data is None: return False bias.data = bias.data * scale if not negate else bias.data / scale bias.shape = bias.data.shape Optimizer._ensure_variable_producer(bias, label=linear.output.name + '_bias') rank = len(weights.shape) shape = (1,) + scale.shape + (1,) * (rank - 2) if linear.type == 'deconv' else scale.shape + (1,) * (rank - 1) scale = np.reshape(scale, newshape=shape) weights.data = weights.data * scale if not negate else weights.data / scale linear.copy_with(inputs=(linear.inputs[0], weights, *linear.inputs[2:]), outputs=mul.output) @staticmethod def _remove_unused_variables_and_constants(graph): ops = {op for op in graph.operations if (op.type == 'variable' or op.type == 'constant') and len(op.output.consumers) == 0} tensors = {op.output for op in ops} graph.remove_operations(ops, unlink=True) graph.remove_tensors(tensors) return len(ops) != 0 @staticmethod def _merge_pad_with_sliding(pad, sliding): offset = 2 if sliding.type == 'conv' or sliding.type == 'deconv' else 0 padding = pad.attribs['padding'] if not all(p == 0 and q == 0 for p, q in sliding.attribs['padding']) or \ len(padding) < offset or not all(p == 0 and q == 0 for p, q in padding[:offset]): return False attribs = dict(sliding.attribs) attribs['padding'] = pad.attribs['padding'][offset:] attribs['border'] = pad.attribs['border'] sliding.copy_with(inputs=(pad.input, *sliding.inputs[1:]), attribs=attribs) @staticmethod def _squeeze_batch_and_spatial_dims(data): return np.squeeze(data, axis=(0,) + tuple(i for i in range(2, len(data.shape)))) @staticmethod def _is_constant(tensor, value): if tensor.producer is not None and tensor.producer.name == 'constant': data = tensor.attribs['value'] else: data = tensor.data return (not isinstance(tensor.data, np.ndarray) or data.shape == ()) and data == value @staticmethod def _is_uniform(array, value): return all(item == value for item in array) @staticmethod def _merge_transpose_squeeze(transpose, squeeze): transpose_axes = transpose.attribs['axes'] squeeze_axes = squeeze.attribs['axes'] squeezed = [x for i, x in enumerate(transpose_axes) if i not in squeeze_axes] is_identity = squeezed == list(range(len(squeezed))) if not is_identity: return False attribs = dict(squeeze.attribs) attribs['axes'] = [transpose_axes[x] for x in squeeze_axes] squeeze.copy_with(inputs=transpose.input, attribs=attribs) @staticmethod def _substitute_squeeze(reshape): input_shape = reshape.input.shape output_shape = reshape.output.shape if not len(output_shape) < len(input_shape): return False k = 0 axes = [] for i in range(len(input_shape)): if k < len(output_shape) and input_shape[i] == output_shape[k]: k += 1 elif input_shape[i] == 1: axes.append(i) else: return False attribs = {'axes': axes} dtype = reshape.attribs.get('dtype') if dtype is not None: attribs['dtype'] = dtype Operation(reshape.graph, type='squeeze', name=reshape.name, inputs=reshape.input, outputs=reshape.output, attribs=attribs) @staticmethod def _dequantize_variables(graph): for tensor in graph.tensors: if tensor.quant and tensor.data is not None: rank = len(tensor.data.shape) scale = Optimizer._ensure_quant_param_rank(tensor.quant.get('scale'), rank) zero_point = Optimizer._ensure_quant_param_rank(tensor.quant.get('zero_point'), rank) if isinstance(zero_point, np.ndarray): assert Optimizer._broadcastable(zero_point.shape, tensor.shape), \ f"zero-point shape {zero_point.shape} cannot be broadcast to tensor shape {tensor.shape} " \ f"for tensor '{tensor.name}'" if isinstance(scale, np.ndarray): assert Optimizer._broadcastable(scale.shape, tensor.shape), \ f"scale shape {scale.shape} cannot be broadcast to tensor shape {tensor.shape} " \ f"for tensor '{tensor.name}'" if scale is not None and not Optimizer._is_zero(scale): dequantized = (tensor.data - zero_point) * scale tensor.data = dequantized.astype(np.float32) tensor.quant = None @staticmethod def _remove_quantization_attribs(graph): for tensor in graph.tensors: tensor.quant = None @staticmethod def _ensure_quant_param_rank(param, rank, offset=0): return np.reshape(param, newshape=(1,) * offset + param.shape + (1,) * (rank - 1 - offset)) \ if isinstance(param, np.ndarray) and len(param.shape) == 1 else param @staticmethod def _broadcastable(x, y): return all(xi == yi or xi == 1 for xi, yi in zip(x, y)) @staticmethod def _is_zero(value): return np.all(value == 0) if isinstance(value, np.ndarray) else value == 0 ================================================ FILE: nnef_tools-pyproject/nnef_tools/optimization/onnx_optimizer.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..model.utils import replace_chain import six class Optimizer: def __init__(self, custom_optimizers=None): self._custom_optimizers = custom_optimizers or {} def __call__(self, graph, only_required=False): self._fix_batchnorm_spatial(graph) for chain, replacer in six.iteritems(self._custom_optimizers): replace_chain(graph, chain, replacer) return graph @staticmethod def _fix_batchnorm_spatial(graph): for op in graph.operations: if op.type == 'BatchNormalization': spatial = op.attribs.get('spatial') if spatial == 0 and op.inputs[1].rank == 1: del op.attribs['spatial'] ================================================ FILE: nnef_tools-pyproject/nnef_tools/optimization/tf_optimizer.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..model.utils import replace_chain from ..model.graph import * from ..utils.types import from_numpy class Optimizer: def __init__(self, custom_optimizers=None): self._custom_optimizers = custom_optimizers or {} def __call__(self, graph, only_required=False): self._fix_inputs_without_producer(graph) replace_chain(graph, ['SpaceToBatchND', {'Conv2D', 'DepthwiseConv2dNative'}, 'BatchToSpaceND'], self._replace_dilated_conv) replace_chain(graph, ['Cast'], self._replace_bool_cast) for chain, replacer in six.iteritems(self._custom_optimizers): replace_chain(graph, chain, replacer) if not only_required: self._remove_unused_constants(graph) return graph @staticmethod def _fix_inputs_without_producer(graph): idx = 0 for tensor in graph.inputs: if tensor.producer is None: cnt = len(graph.operations) Operation(tensor.graph, type='Placeholder', name=Optimizer._op_name(tensor.name), outputs=tensor, attribs={'shape': tensor.shape, 'dtype': tensor.dtype}) graph.move_operation(cnt, idx) idx += 1 return idx > 0 @staticmethod def _op_name(tensor_name): idx = tensor_name.find(':') return tensor_name[:idx] if idx != -1 else tensor_name @staticmethod def _remove_unused_constants(graph): ops = [op for op in graph.operations if op.type == 'Const' and len(op.output.consumers) == 0] tensors = [op.output for op in ops] graph.remove_operations(ops, unlink=True) graph.remove_tensors(tensors) @staticmethod def _replace_dilated_conv(space_to_batch, conv, batch_to_space): if not Optimizer._is_constant(space_to_batch.inputs[1]) or not Optimizer._is_constant(batch_to_space.inputs[1]): return False block_shape1 = Optimizer._read_constant(space_to_batch.inputs[1]) block_shape2 = Optimizer._read_constant(batch_to_space.inputs[1]) if not np.all(block_shape1 == block_shape2): return False if conv.attribs['padding'] != 'VALID': return False dilations = from_numpy(block_shape1) input = space_to_batch.inputs[0] filter = conv.inputs[1] output = batch_to_space.outputs[0] is_nxc = Optimizer._is_nxc(conv.attribs['data_format']) same_padding = Optimizer._is_same_padded(input.shape, output.shape, conv.attribs['strides'], is_nxc) if not same_padding: return False op = conv.copy_with(inputs=(input, filter, *conv.inputs[2:]), outputs=output, attribs=dict(conv.attribs)) op.attribs['dilations'] = [1] + dilations + [1] if is_nxc else [1, 1] + dilations op.attribs['padding'] = 'SAME' if '_output_shapes' in op.attribs: op.attribs['_output_shapes'] = batch_to_space.attribs['_output_shapes'] @staticmethod def _replace_bool_cast(cast): if cast.input.dtype == bool and cast.output.dtype != bool: ones = Tensor(cast.graph, name=cast.name + '/ones', dtype=cast.output.dtype, shape=cast.output.shape, data=np.full(fill_value=1, dtype=cast.output.dtype, shape=cast.output.shape)) zeros = Tensor(cast.graph, name=cast.name + '/zeros', dtype=cast.output.dtype, shape=cast.output.shape, data=np.full(fill_value=0, dtype=cast.output.dtype, shape=cast.output.shape)) Optimizer._make_constant_producer(ones) Optimizer._make_constant_producer(zeros) Operation(cast.graph, type='Select', name=cast.name, inputs=(cast.input, ones, zeros), outputs=cast.output, attribs={'T': cast.output.dtype}) elif cast.input.dtype != bool and cast.output.dtype == bool: zeros = Tensor(cast.graph, name=cast.name + '/zeros', dtype=cast.input.dtype, shape=(), data=np.array(0, dtype=cast.input.dtype)) Optimizer._make_constant_producer(zeros) Operation(cast.graph, type='NotEqual', name=cast.name, inputs=(cast.input, zeros), outputs=cast.output, attribs={'T': cast.output.dtype}) else: return False @staticmethod def _is_constant(tensor): return tensor.producer.type == 'Const' if tensor.producer else tensor.data is not None @staticmethod def _read_constant(tensor): return tensor.producer.attribs['value'] if tensor.producer else tensor.data @staticmethod def _is_nxc(format): return format[0] == 'N' and format[-1] == 'C' and len(format) > 2 @staticmethod def _is_same_padded(input, output, stride, is_nxc): rank = len(input) return all(output[i] == (input[i] + stride[i] - 1) // stride[i] for i in (range(1, rank - 1) if is_nxc else range(2, rank))) @staticmethod def _make_constant_producer(tensor): Operation(tensor.graph, type='Const', name=tensor.name, outputs=tensor, attribs={'dtype': tensor.dtype, 'value': tensor.data}) ================================================ FILE: nnef_tools-pyproject/nnef_tools/optimization/tflite_optimizer.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..model.utils import replace_chain, bypass_and_remove from ..model.graph import * from ..utils.types import from_numpy class Optimizer: def __init__(self, custom_optimizers=None): self._custom_optimizers = custom_optimizers or {} def __call__(self, graph, only_required=False): Optimizer._eliminate_variable_dequantize_ops(graph) replace_chain(graph, ['SPACE_TO_BATCH_ND', {'CONV_2D', 'DEPTHWISE_CONV_2D'}, 'BATCH_TO_SPACE_ND'], self._replace_dilated_conv) replace_chain(graph, ['RESHAPE', 'RESHAPE', 'PACK', 'PACK', 'RESHAPE'], self._replace_resize_nearest) replace_chain(graph, ['SHAPE'], self._replace_const_shape) for chain, replacer in six.iteritems(self._custom_optimizers): replace_chain(graph, chain, replacer) return graph @staticmethod def _replace_resize_nearest(reshape1, reshape2, pack1, pack2, reshape3): def _all_inputs_same(op): return len(op.inputs) > 0 and all(tensor is op.inputs[0] for tensor in op.inputs) if not (reshape2.output.shape == reshape1.inputs[0].shape and _all_inputs_same(pack1) and _all_inputs_same(pack2) and len(pack1.inputs) == len(pack2.inputs)): return False input = reshape1.inputs[0] output = reshape3.output size = Tensor(input.graph, shape=(len(output.shape) - 2,), dtype=np.int32, data=np.array(output.shape[1:-1]), name=output.name + '/size') Operation(input.graph, type='RESIZE_NEAREST_NEIGHBOR', inputs=(input, size), outputs=output, attribs={ 'align_corners': False, 'half_pixel_centers': False, }) @staticmethod def _replace_dilated_conv(space_to_batch, conv, batch_to_space): if not Optimizer._is_constant(space_to_batch.inputs[1]) or not Optimizer._is_constant(batch_to_space.inputs[1]): return False block_shape1 = Optimizer._read_constant(space_to_batch.inputs[1]) block_shape2 = Optimizer._read_constant(batch_to_space.inputs[1]) if not np.all(block_shape1 == block_shape2): return False if conv.attribs['padding'] != 'VALID': return False strides = [1, conv.attribs['stride_h'], conv.attribs['stride_w'], 1] dilations = from_numpy(block_shape1) input = space_to_batch.inputs[0] filter = conv.inputs[1] output = batch_to_space.outputs[0] same_padding = Optimizer._is_same_padded(input.shape, output.shape, strides) if not same_padding: return False op = conv.copy_with(inputs=(input, filter, *conv.inputs[2:]), outputs=output, attribs=dict(conv.attribs)) op.attribs['dilation_h_factor'] = dilations[0] op.attribs['dilation_w_factor'] = dilations[1] op.attribs['padding'] = 'SAME' @staticmethod def _is_constant(tensor): return tensor.producer is None and tensor.data is not None @staticmethod def _read_constant(tensor): return tensor.data @staticmethod def _is_same_padded(input, output, stride, is_nxc=True): rank = len(input) return all(output[i] == (input[i] + stride[i] - 1) // stride[i] for i in (range(1, rank - 1) if is_nxc else range(2, rank))) @staticmethod def _eliminate_variable_dequantize_ops(graph): for op in list(graph.operations): if op.type == 'DEQUANTIZE' and Optimizer._is_constant(op.input): variable = op.input if 'zero_point' in variable.quant and 'scale' in variable.quant: zero_point = variable.quant['zero_point'] scale = variable.quant['scale'] variable.data = (variable.data - zero_point) * scale variable.data = variable.data.astype(np.float32) variable.dtype = np.float32 variable.quant = None bypass_and_remove(graph, op) @staticmethod def _replace_const_shape(shape): if shape.input.shape is not None: shape.output.data = np.array(shape.input.shape) ================================================ FILE: nnef_tools-pyproject/nnef_tools/quantize.py ================================================ from nnef_tools.io.nnef.reader import Reader from nnef_tools.io.nnef.writer import Writer import numpy as np import argparse import json import os _CONV_OPS = ['conv', 'deconv', 'separable_conv', 'separable_deconv'] def make_quantization(min, max, signed, symmetric): if min > 0: min = 0 if max < 0: max = 0 if signed and symmetric: if -max < min: min = -max if -min > max: max = -min scale = 255 / (max - min) zero_point = int((0 - min) * scale) if signed: zero_point -= 127 if symmetric else 128 return {'op-name': 'zero_point_linear_quantize', 'zero_point': zero_point, 'scale': scale, 'signed': signed, 'symmetric': symmetric, 'bits': 8} def quantize_params(params, zero_point, scale, signed, symmetric): min = (((-127 if symmetric else -128) if signed else 0) - zero_point) * scale max = ((127 if signed else 255) - zero_point) * scale params = np.clip(params, min, max) return np.floor((params - min) / scale).astype(np.int8 if signed else np.uint8) def quantize_bias(params, scale): return np.floor(params / scale).astype(np.int32) def is_conv_param(tensor): return all(op.type in _CONV_OPS for op in tensor.consumers) def is_conv_bias(tensor): assert len(tensor.consumers) == 1 return len(tensor.consumers) == 1 and tensor.consumer.type in _CONV_OPS and len(tensor.consumer.inputs) > 2 and \ tensor is tensor.consumer.inputs[2] def main(args): reader = Reader(infer_shapes=False) model = reader(args.model) stats_path = args.statistics or os.path.join(args.model, 'graph.stats') if not os.path.exists(stats_path): print("Could not find statistics file '{}'".format(stats_path)) return -1 with open(stats_path, 'r') as file: stats = json.load(file) for tensor in model.tensors: stat = stats.get(tensor.name) if stat is not None: if args.percentile is not None: lo = max(stat['mean'] - args.percentile * stat['std'], stat['min']) hi = min(stat['mean'] + args.percentile * stat['std'], stat['max']) else: lo = stat['min'] hi = stat['max'] tensor.quant = make_quantization(lo, hi, args.signed, args.symmetric) if tensor.data is not None: tensor.data = quantize_params(tensor.data, tensor.quant['zero_point'], tensor.quant['scale'], args.signed, args.symmetric) if args.wide_bias: for tensor in model.tensors: if len(tensor.quant) > 0 and tensor.data is not None and is_conv_bias(tensor): conv = tensor.consumer tensor.quant['bits'] = 32 tensor.quant['zero_point'] = 0 tensor.quant['scale'] = conv.inputs[0].quant['scale'] * conv.inputs[1].quant['scale'] tensor.data = quantize_bias(tensor.data, tensor.quant['scale']) writer = Writer() writer(model, args.output) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('model', type=str, help='The model to quantize') parser.add_argument('--statistics', type=str, default=None, help='The tensor statistics to use for quantization') parser.add_argument('--output', type=str, required=True, help='The path of the output model') parser.add_argument('--signed', action='store_true', help='Whether to generate signed int8 quantized values instead of uint8') parser.add_argument('--symmetric', action='store_true', help='Whether to quantize symmetrically and force zero-point to 0') parser.add_argument('--wide-bias', action='store_true', help='Whether to quantize biases into int32 values') parser.add_argument('--percentile', type=float, default=None, help='Define ranges with approximate normal distribution percentiles;' 'provide number of standard deviations from mean to be used') exit(main(parser.parse_args())) ================================================ FILE: nnef_tools-pyproject/nnef_tools/random_tensor.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .utils import stdio import numpy as np import argparse import nnef import sys def _is_lambda(v): LAMBDA = lambda: 0 return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__ def uniform(min=0, max=1): return lambda shape: np.random.uniform(min, max, shape) def normal(mean=0, std=1): return lambda shape: np.random.normal(mean, std, shape) def bernoulli(prob=0.5): return lambda shape: np.random.uniform(0, 1, shape) < prob def main(args): if args.output is None: if not stdio.is_stdout_piped(): print("Output must be piped", file=sys.stderr) return -1 stdio.set_stdout_to_binary() try: distribution = eval(args.distribution) if not _is_lambda(distribution): distribution = distribution() except Exception as e: print("Could not evaluate distribution: " + str(e), file=sys.stderr) return -1 tensor = distribution(args.shape).astype(np.dtype(args.dtype)) if args.output is not None: with open(args.output, 'wb') as file: nnef.write_tensor(file, tensor) else: nnef.write_tensor(sys.stdout, tensor) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('distribution', type=str, help='The distribution to generate values from') parser.add_argument('--shape', type=int, nargs='+', required=True, help='The dimensions of the tensor to generate') parser.add_argument('--dtype', type=str, default='float32', help='The data-type of the resulting tensor') parser.add_argument('--output', type=str, default=None, help='File name to save the result into') exit(main(parser.parse_args())) ================================================ FILE: nnef_tools-pyproject/nnef_tools/utils/__init__.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: nnef_tools-pyproject/nnef_tools/utils/stdio.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys def set_stdin_to_binary(): if sys.version_info >= (3, 0): sys.stdin = sys.stdin.buffer elif sys.platform == 'win32': import os, msvcrt msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) def set_stdout_to_binary(): if sys.version_info >= (3, 0): sys.stdout = sys.stdout.buffer elif sys.platform == 'win32': import os, msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) def is_stdin_piped(): return not sys.stdin.isatty() def is_stdout_piped(): return not sys.stdout.isatty() ================================================ FILE: nnef_tools-pyproject/nnef_tools/utils/types.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import numpy as np from collections.abc import Sequence # noinspection PyUnresolvedReferences def as_str(s): if sys.version_info[0] >= 3: return s.decode('utf-8') if isinstance(s, bytes) else s else: return s.encode('utf-8') if isinstance(s, unicode) else s PyTypeFromNumpyDtype = { np.float16: float, np.float32: float, np.float64: float, np.int8: int, np.int16: int, np.int32: int, np.int64: int, np.uint8: int, np.uint16: int, np.uint32: int, np.uint64: int, np.bool_: bool, np.str_: str, } PyTypeToNumpyDtype = { int: np.int32, float: np.float32, bool: np.bool_, str: np.str_, } _builtin_type = type def cast(value, type): return _builtin_type(value)(cast(item, type) for item in value) if isinstance(value, Sequence) else type(value) def from_numpy(array, type=None): if type is None: type = PyTypeFromNumpyDtype[array.dtype.type] return cast(array.tolist(), type) def to_numpy(value, dtype=None): def _item(value): return _item(value[0]) if isinstance(value, Sequence) else value if dtype is None: dtype = PyTypeToNumpyDtype.get(type(_item(value))) return np.array(value, dtype=dtype) ================================================ FILE: nnef_tools-pyproject/nnef_tools/visualize.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .io.nnef import Reader from .io.nnef.writer import _DtypeFromNumpy from graphviz import Digraph import numpy as np import os def _text_with_size(text, size): return '{}'.format(size, text) def _format_tensor_label(tensor): return '< {}
{}>'.format(tensor.name, _text_with_size(_dtype_str(tensor.dtype) + _shape_str(tensor.shape), size=10)) def _dtype_str(dtype): return _DtypeFromNumpy[dtype.type if isinstance(dtype, np.dtype) else dtype] def _shape_str(shape): return '[' + ','.join(str(s) if s is not None else '?' for s in shape) + ']' if shape is not None else '' def _truncate(text, max_length=32): return text[:max_length - 3] + "..." if len(text) > max_length else text def _attribs_str(attribs, separator): s = [] for k, v in sorted(attribs.items(), key=lambda e: e[0]): if k == "label": v = ".../" + v.split('/')[-1] elif k == "dtype": v = _dtype_str(v) s.append("{}{}{}".format(k, separator, _truncate(str(v)))) return s def _format_op_details(op): s = [ "inputs: " + ", ".join(str(tensor.name if tensor.producer is not None else tensor.data) for tensor in op.inputs), "outputs: " + ", ".join(str(tensor.name) for tensor in op.outputs), ] s.extend(_attribs_str(op.attribs, separator=': ')) return " ".join(s) def _format_op_label(op): attrs = (_text_with_size(s, size=10) for s in _attribs_str(op.attribs, separator='=')) return '<{}
{}>'.format(op.type, '
'.join(attrs)) if len(op.attribs) else op.type def _generate_digraph(graph, show_variables, verbose): digraph = Digraph() for op in graph.operations: if (show_variables or op.type != "variable") and op.type != "external": digraph.node(str(id(op)), _format_op_label(op) if verbose else op.type, shape="box", tooltip=_format_op_details(op)) for tensor in graph.tensors: if tensor.producer is not None and (show_variables or tensor.producer.type != "variable") and tensor.producer.type != "external": for consumer in tensor.consumers: digraph.edge(str(id(tensor.producer)), str(id(consumer)), label=_format_tensor_label(tensor) if verbose else " " + tensor.name, labeltooltip="{}{}".format(_dtype_str(tensor.dtype), _shape_str(tensor.shape))) for tensor in graph.inputs: digraph.node(str(id(tensor)), _format_tensor_label(tensor) if verbose else tensor.name, shape="ellipse", tooltip="{}{}".format(_dtype_str(tensor.dtype), _shape_str(tensor.shape))) for consumer in tensor.consumers: digraph.edge(str(id(tensor)), str(id(consumer)), label=None) for tensor in graph.outputs: digraph.node(str(id(tensor)), _format_tensor_label(tensor) if verbose else tensor.name, shape="ellipse", tooltip="{}{}".format(_dtype_str(tensor.dtype), _shape_str(tensor.shape))) digraph.edge(str(id(tensor.producer)), str(id(tensor)), label=None) return digraph def main(args): reader = Reader(decomposed=args.decompose, infer_shapes=args.infer_shapes) graph = reader(args.model) digraph = _generate_digraph(graph, args.show_variables, args.verbose) digraph.render(args.model + '.gv', format=args.format, cleanup=True) os.rename(args.model + '.gv.' + args.format, args.model + '.' + args.format) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('model', type=str, help='The model to visualize') parser.add_argument('--decompose', type=str, nargs='*', default=None, help='Names of operators to be decomposed by NNEF parser') parser.add_argument('--verbose', action='store_true', help='Add more info to the nodes and edges') parser.add_argument('--show-variables', action='store_true', help='Show variables explicitly') parser.add_argument('--infer-shapes', action='store_true', help='Perform shape inference and show in visualized graph') parser.add_argument('--format', type=str, choices=['svg', 'pdf', 'png', 'dot'], default='svg', help='The format of the output') exit(main(parser.parse_args())) ================================================ FILE: nnef_tools-pyproject/package_info.md ================================================ # NNEF Tools This package contains a set of tools for converting and transforming machine learning models. ## Usage For basic usage, you have to supply an input format, an output format and an input model. The output model name defaults to the input model name suffixed with the output format, but it can also be supplied explicitly. ``` python -m nnef_tools.convert --input-format tf --output-format nnef --input-model my_model.pb --output-model my_model.nnef ``` ### Setting input shapes If the model has (partially) undefined shapes, the concrete shapes can be supplied with the `--input-shapes` argument. The input shapes must be a Python dict expression, with string keys of input tensor names and tuple values as shapes. It is enough to supply shapes for those inputs that we want to freeze. For example: ``` --input-shapes "{'input': (1, 224, 224, 3)}" ``` ### Transposing inputs and outputs When converting between TF and NNEF, the (default) dimension ordering differs, and the model may be transposed (for example in case of 2D convolutional models). However, the inputs and outputs are not automatically transposed, as the converter cannot reliably decide which input and outputs represent images. Transposing inputs and outputs can be turned on by the `--io-transpose` option. There are two ways to use it: either to transpose all inputs and outputs, or to select the ones to be transposed. All inputs and outputs can be transposed by using `--io-transpose` without any further arguments, while selecting inputs and outputs can be done by providing a list of names: ``` --io-transpose "input1" "input2" "output1" ``` ### Retaining input/output names During conversion, the converter may generate suitable names for tensors. However, it is possible to force to keep the names of input and output tensors using the `--keep-io-names` option. ### Folding constants The original model may contain operations that are performed on constant tensors, mainly resulting from shapes that are known in conversion time, or that became known by setting with the `--input-shape` option. In this case, it can be useful to fold constant operations, because the resulting graph is simplified. Furthermore, without constant folding, the graph may not even be convertible due to the presence of non-convertible operations, but constant folding may eliminate them and make the model convertible. To use it, simply turn on the `--fold-constants` option. ### Optimizing the output model The resulting model may contain operations or sequences of operations that can be merged or even eliminated as they result in a no-op. To do so, turn on the `--optimize` flag. This works for NNEF output. The converter can also be run with the same input and output format. In this case, the tool only reads and writes the model, with an optional optimization phase in between if the `--optimize` flag is set and an optimizer is available for the given format. ### Handling unsupported operations When running into an unsupported operation, the converter stops the conversion process. It is possible to override this behavior by enabling mirror-conversion (one-to-one copying the operation to the destination format) using the `--mirror-unsupported` flag. This may not result in a valid output model, but may be helpful for debugging. ## Further options The following further options can be used when the output format is NNEF: * The `--compress` option generates a compressed `tgz` file. It can also take a further compression level argument. * The `--annotate-shapes` flag generates the graph description with the shapes of tensors annotated in comments. * The `--output-names` option takes a list of tensor names, and considers those as outputs, and only converts the sub-graph required to compute those outputs. * The `--tensor-mapping` option allows to save the mapping of tensor names (mapping from the input model to the output model) into a separate json file. ## Conversion from TF Python code When starting from Python code, the first step is to export the graph into a graph-def protobuf (.pb) file, which can then be further converted to a different format. To do so, the package contains some utility functions to freeze the graph and save it. Simply import these utilities and call them in your Python code: ``` import nnef_tools.io.tf.graphdef as graphdef # define your TF model here with tf.Session() as sess: ... # initialize variables and train graph graphdef.save_default_graph('path/to/save.pb', session=sess, outputs=...) ``` If your model contains dynamic shapes, you can save the graph with concrete shapes by providing the input shapes to the save function. Furthermore, constant operations can also be folded while saving the model: ``` graphdef.save_default_graph('path/to/save.pb', session=..., outputs=..., input_shapes={'input': (1, 224, 224, 3)}, fold_constants=True) ``` Outputs can be specified as a list of tensors, or alternatively, they can be renamed by mapping tensors to strings as new names. ### Saving composite functions as a single operation Often, when exporting a graph, it is desirable to convert a subgraph (compound operation) into a single operation. This can be done by defining the subgraph in a Python function and annotating it with `@composite_function` of the `graphdef` module: ``` @graphdef.composite_function def my_compound_op( x, a, b ): return a * x + b ``` Then `graphdef.save_default_graph` will magically take care of the rest, by converting composite functions into `PyFunc` ops in the graph-def. Note however, that if you are exporting such graphs repeatedly, you have to call `graphdef.reset_composites()` before the definition of the graph. How exactly the signature of the function is converted depends on the invocation of the function: tensor arguments are converted to inputs, while non-tensor arguments are converted to attributes. It does not matter whether positional or keyword arguments are used. Outputs must be tensors: ``` graphdef.reset_composites() # define the graph x = tf.placeholder(shape=(2,3), dtype=tf.float32, name='input') y = my_compound_op(x, a=4, b=5) # x is treated as tensor, a and b as attributes with tf.Session() as sess: graphdef.save_default_graph('path/to/save.pb', session=sess, outputs={y: 'output'}) ``` When exporting models containing composite functions, if the model has dynamic shapes it is preferable to export it with concrete shapes and folding constants during export. This is because before converting composite functions to a single op, TF can still perform shape inference and constant folding automatically, but after the conversion, it cannot infer shapes and perform the computation of the `PyFunc` operations resulting from the composite functions. If there are no composite functions in the model, then concrete shapes can be provided later as well (during conversion), accompanied by constant folding. Collapsing composites to a single op when saving the graph can be turned off by `collapse_composites=False`. See `custom/composite_export_example.py` for more examples. #### **Important note** Composite functions **must not** get tensor inputs from other sources than the function arguments (such as global or class member variables). In that case, the code must be reorganized to make the actual composite function be called with explicitly marked tensor arguments. The same practice is also useful for attributes. In general, composite functions should be stateless. ## Custom converter plugins The coverage of the converter can be extended to custom operations. This is required for example, when one wants to convert a composite function. Such a function is exported to the protobuf model as a `PyFunc` operation, that records the name, attributes, inputs and outputs of the original composite function. However, a converter must be provided for that name. In the actual conversion process, the `PyFunc` node is replaced with an operator of the original name of the composite function, so that it can be referenced. The conversion of operations is governed by `nnef_tools.conversion.Transform` instances mapped to operator types. To add a new operator to be converted, one needs to provide a map entry for the operator. This is done by providing a Python module to the converter that contains the mapping for custom operators in a dict with the standard name `CUSTOM_TRANSFORMS`. The module is injected to the converter with the `--custom-converters` option: ``` --custom-converters my.custom.plugin.module ``` where `my/custom/plugin/module.py` is a Python module accessible to the Python interpreter (either by providing an absolute path or by setting `PYTHON_PATH`). Its contents may look like the following: ``` from nnef_tools.conversion import Transform def my_conversion_helper_func(converter, ...): ... CUSTOM_TRANSFORMS = { 'op_type_to_convert_from': Transform( type='op_type_to_convert_into', name='optional_name_of_resulting op', inputs=( # one entry for each input ), outputs=( # one entry for each output ), attribs={ # one entry for each attribute } ), } ``` Entries are for the resulting operator, and may be constant Python values or expressions to be evaluated by the Python interpreter. Such expressions are written as Python strings that start with the `!` character, for example `'!a+2'` evaluates the expression `a+2`. The expressions are evaluated in the context of the source operator (the one converted from) and the converter context (that is defined by the input and output formats). It consists of the following: * The type of the source operator is accessed via the identifier `_type_`. * The name of the source operator is accessed via the identifier `_name_`. * Inputs of the source operator are accessed via the identifier `I`, which is a Python `list`. For example the expression `'!I[0]'` results in the first input. * Outputs of the source operator are accessed via the identifier `O`, which is a Python `list`. For example, the expression `'len(O)'` results in the number of outptus. * Attributes of the source operator are accessed via identifiers that match the names of the attributes. For example if the source operator has attribute `a` then the expression `'!a'` takes its value. * Furthermore, the following can be used in building complex expressions: * All built-in Python operators and functions. * All public member functions (not starting with `_`) defined by the converter in effect. * All public functions (not starting with `_`) defined in the custom module. Such functions must take a converter as their first argument, but otherwise can take arbitrary arguments. The public methods of the converter can be used in their definition. The `Transform` can further contain a `using={'id': '!expr', ...}` field, which may define intermediate expressions that are evaluated first and can be used in other expressions for attributes/inputs/outputs. If the dictionary is ordered, the entries may depend on each other. Furthermore, by adding an optional `cond='!expr'` field to the `Transform`, it is possible to achieve conditional conversion, only when the given expression evaluates to `True`. Otherwise, the converter treats it as if there was no converter provided for the given operator. This is to allow conversion of operations with only certain attribute values. See `custom/custom_transforms_example.py` for more details. Similarly to the above mechanism, custom shape inference functions and custom operator definitions (fragments) can be plugged in to converters that convert from NNEF using the `--custom-shapes` and `--custom-fragments` option. This may be required for custom NNEF operators defined as fragments in the input when such fragments are not decomposed. The fragments and shape inference functions must be defined in python module(s) supplied after the `--custom-shape` or `--custom-fragments` option. The module may look like this: ``` def my_custom_shape_function(intput1_shape, ..., attrib1, ...) ... # assert validity of input shapes / attribs ... # return calculated output shape(s) CUSTOM_SHAPES = { 'my_custom_op': my_custom_shape_function, } ``` or ``` op_fragment = """ # NNEF fragment declaration/definition goes here """ CUSTOM_FRAGMENTS = { 'op-name': op_fragment, } ``` Furthermore, the `--decompose` option can be used to let the NNEF parser decompose the (composite) operators listed after the option (as separate args). Additionally, with a similar mechanism, custom optimization passes can also be injected to the converter. The optimizer can match sequential sub-graphs (chains), and replace them with another sequence of operations. To provide custom optimizer passes, the chains of operations to be replaced must be mapped onto functions that perform generate the replacement sequence after checking the chain to bre replaced for validity: ``` def replace_my_chain(a, b, c): # a, b, c will contain the matched chain of ops in order when this is called ... # check attributes of the chain a, b, c to see if it should really be replaced; # if not, return False (do not modify the graph before all checks) ... # create new tensors and operations in the graph that will replace the chain ... # either return nothing (None), or any non-False value CUSTOM_OPTIMIZERS = { ('a', 'b', 'c'): replace_my_chain, # use a tuple as key, since list is not hashable } ``` See `custom/custom_optimizers_example.py` for mode info. ## Executing a model and saving activations A separate tool (`execute.py`) is available for executing a model. It requires a model and a format to be specified. The inputs may be read from the (binary) input stream and outputs may be written to the (binary) output stream. Tensor data files can be piped as inputs and outputs: ``` python -m nnef_tools.execute < input.dat my_model.pb --format tf > output.dat ``` Alternatively, inputs can be random generated, and selected activations may be written to a folder, allowing to specify a different name: ``` python -m nnef_tools.execute my_model.pb --format tf --random "uniform(0,1)" --seed 0 --output-path . --output-names "{'tensor-name1': 'save-name1', ...}" ``` Further options to the model executor: * The `--batch-size` option can be used to perform batched execution if a model specifies batch size of 1 in its inputs, supplying the desired batch size. If the supplied batch size is 0, it means that the (common) batch size of the actual inputs is used. Furthermore, when the supplied batch size equals the one defined by the model, execution will be done one-by-one instead of a single batch, which may be useful for reducing the memory footprint. * The `--statistics` flag (followed by an optional output file path) can be used to generate activation statistics and save it in json format. * The `--tensor-mapping` option can be used to provide a tensor name mapping obtained from the conversion step to the executor, used in remapping tensor names when generating statistics. This may be useful for comparing executions of the same model in different formats. * Inputs and outputs (or activations) may need transposing before feeding into execution or after execution upon saving. This can be achieved with the `--io-transpose` flag. If no further arguments are listed, all tensors are transposed, but the transposed tensors can be controlled by enumerating a list of tensor names (as separate args). Inputs read from the input stream are transposed from channels first to last, while the outputs that are written to the output stream or saved are transposed from channels last to first if the format dictates so (TF/Lite). * The `--decompose` option can be used to let the NNEF parser decompose the (composite) operators listed after the option (as separate args). * The `--custom-operators` option can be used to inject custom operators to the executor by supplying a python module after the option. The contents of the module may look like this: ``` def my_custom_op(input1, ..., attrib1, ...): ... # calculate output using inputs / attribs CUSTOM_OPERATORS = { 'my_custom_op': my_custom_op, } ``` See `custom/custom_operators_example.py` for more info. Further tools are available for generating random tensors (`random_tensor.py`) and converting images to tensors (`image_tensor.py`). These tools write their results to the output stream and can be directed into a file or piped to `execute.py`. ## Visualizing a model NNEF models can be visualized with the `visualize.py` tool. The tool generates and svg/pdf/png rendering of the NNEF graph: ``` python -m nnef_tools.visualize my_model.nnef --format svg ``` By default, the render only contains the names of operations and tensors. In case of and svg output, _tooltips_ contain more details about nodes (op attributes, tensor dtypes and shapes). The shapes are only calculated if the `--infer-shapes` flag is turned on. To include those details in the render itself, use the `--verbose` flag. ## GMAC calculation The script `gmac.py` can be used to calculate the GMACs required to execute a model. By default, it only calculates linear operations (convolutions, matrix multiplies), but it is possible to add other groups of operations (pooling, normalization, reduction, up-sampling) into the calculation: ``` python -m nnef_tools.gmac my_model.nnef --include-pooling ``` The calculation requires shape inference, so in case of custom operators, the `--custom-shapes` option should be used (same as for `convert.py`). ## Troubleshooting Several things can go wrong during various stages of conversion, and sometimes it's hard to find where it exactly happened. Here are a few tips on how to get started: * If the export process starts from Python code in a framework such as TensorFlow or PyTorch, the first step is saving the model into a framework specific format, such as TensorFlow protobuf or ONNX in case of PyTorch. * Check the resulting model to see if it accurately reflects the framework code. TensorBoard or Netron viewer can be used for this purpose. * If there is an error in this step, try to turn off certain flags during saving. For example in `nnef_tools.io.tf.graphdef.save_default_graph`, try turning off `fold_constants` and `collapse_composites` flag. The first merges operations on constant tensors, the second one merges composite operators into a single piece. By turning them off, errors in these transformation steps can be excluded. * If the conversion from any model format to NNEF fails, typical reasons are as follows: * Conversion of some operator is not implemented. In this case, adding a custom converter using the `--custom-converters` option can solve the problem. * There is a bug in the converter; for example it does not support some parameter/version of an operator. In this case file a bug for `nnef_tools`. * After the conversion to NNEF succeeds, check the converted model by executing it (`nnef_tools.execute`) on some (maybe random) inputs. * Execution may itself fail if there are custom operators in the model, in which case custom executors can be injected with the `--custom-operators` option. * If executed on non-random inputs, the outputs can be compared to results obtained from executing the same model in the original framework, or after saving it and executing the saved model (`nnef_tools.execute`). By comparing the results of those three stages, it is possible to tell in which stage something goes wrong. However, make sure to feed the same inputs to all stages, and beware that NNEF dimension order (channels first) is different from TensorFlow dimension order (channels last). * If the failing stage is the saving step, see above for turning off certain options too see if those are the culprits. * If the failing stage is the conversion step, first make sure to isolate optimizations by not using the `--optimize` option. The same goes for the `--fold-constants` option to see if that causes problems. * If conversion fails even without optimization and constant folding, it is usually due to the conversion of one of the operations, which must be found. Ideally, one would compare the intermediate tensors after each operation in a sequence, but exact comparison is hard to do automatically due to non 1-1 mappings during the conversion. However, generating statistics (`nnef_tools.execute --statistics`) for the same input for both models allows comparison of how execution proceeds in the two models and finding where the first difference occurs. * When in doubt about some of the tools and this documentation does not provide enough information, check the help of the command-line tool itself (`-h` or `--help`) option. ================================================ FILE: nnef_tools-pyproject/pyproject.toml ================================================ [project] name = "nnef_tools" version = "1.0.10" description = "A package for managing NNEF files" requires-python = ">=3.7" classifiers = [ 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', ] dynamic = ["readme"] license = { file = "LICENSE" } keywords = ["nnef"] authors = [ { name = "Viktor Gyenes", email = "viktor.gyenes@aimotive.com" }, { name = "Tamas Danyluk", email = "9149812+tdanyluk@users.noreply.github.com" }, ] maintainers = [{ name = "Viktor Gyenes", email = "viktor.gyenes@aimotive.com" }] dependencies = ["future", "numpy", "six", "nnef"] [build-system] requires = ["setuptools"] build-backend = "setuptools.build_meta" [project.optional-dependencies] caffe = ["protobuf", "torch"] onnx = ["protobuf", "onnx", "onnx-simplifier", "onnxruntime"] tensorflow-lite = ["nnef_tools[tensorflow-protobuf]", "flatbuffers"] tensorflow-protobuf = ["tensorflow"] visualization = ["graphviz"] full = ["nnef_tools[tensorflow-lite,onnx,caffe,visualization]"] [project.urls] "Homepage" = "https://www.khronos.org/nnef" "Repository" = "https://github.com/KhronosGroup/NNEF-Tools" [tool.setuptools.dynamic] readme = { file = ["package_info.md"], content-type = "text/markdown" } [tool.setuptools] package-dir = {"nnef_tools" = "nnef_tools"} ================================================ FILE: nnef_tools-pyproject/tests/conversion/graphdef_test.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from nnef_tools.io.tf.graphdef.protobuf import GraphDef import nnef_tools.io.nnef as nnef_io import nnef_tools.io.tf.graphdef as graphdef import nnef_tools.conversion.tf_to_nnef as tf_to_nnef import nnef_tools.conversion.nnef_to_tf as nnef_to_tf import nnef_tools.optimization.nnef_optimizer as nnef_opt import nnef_tools.optimization.tf_optimizer as tf_opt import unittest import tempfile import os try: import tensorflow.compat.v1 as tf tf.disable_v2_behavior() except ImportError: import tensorflow as tf UNITTEST_FOLDER = os.environ.get('UNITTEST_FOLDER') class TestEnv(unittest.TestCase): _network_folder = os.path.join(UNITTEST_FOLDER, 'tf/nets/') if UNITTEST_FOLDER else None _output_folder = os.path.join(UNITTEST_FOLDER, 'tf/ops/') if UNITTEST_FOLDER else None _io_transpose = True _optimize = True def setUp(self) -> None: self._tf_reader = graphdef.Reader(fold_constants=True) self._tf_writer = graphdef.Writer() self._tf_optimizer = tf_opt.Optimizer() self._nnef_optimizer = nnef_opt.Optimizer() self._tf_to_nnef_converter = tf_to_nnef.Converter(io_transpose=self._io_transpose) self._nnef_to_tf_converter = nnef_to_tf.Converter(io_transpose=self._io_transpose) self._nnef_reader = nnef_io.Reader(custom_shapes=self._nnef_to_tf_converter.defined_shapes(), decomposed=self._nnef_to_tf_converter.decomposed_operations()) self._nnef_writer = nnef_io.Writer(fragments=self._tf_to_nnef_converter.defined_operations()) def tearDown(self) -> None: tf.reset_default_graph() @staticmethod def _save_graph_def(graph_def, filename): with open(filename, 'wb') as file: file.write(graph_def.SerializeToString()) @staticmethod def _load_graph_def(filename): graph_def = GraphDef() with open(filename, 'rb') as file: graph_def.ParseFromString(file.read()) return graph_def @staticmethod def _numpy_dtype(tensor): return np.bool_ if tensor.dtype.is_bool else tensor.dtype.as_numpy_dtype() @staticmethod def _exec_graph_def(graph_def, only_first_output=False): np.random.seed(0) tf.reset_default_graph() tf.import_graph_def(graph_def, name='') ops = tf.get_default_graph().get_operations() consumed = {tensor for op in ops for tensor in op.inputs} inputs = [op.outputs[0] for op in ops if op.type == 'Placeholder'] if only_first_output: outputs = [op.outputs[0] for op in ops if len(op.inputs) and op.outputs[0] not in consumed] else: outputs = [tensor for op in ops if len(op.inputs) for tensor in op.outputs if tensor not in consumed] feed_dict = {tensor: TestEnv._random_data(TestEnv._numpy_dtype(tensor), tensor.shape.as_list()) for tensor in inputs} with tf.Session() as sess: return sess.run(outputs, feed_dict=feed_dict) @staticmethod def _random_data(dtype, shape): if dtype == bool: return np.random.random(shape) > 0.5 else: return np.random.random(shape).astype(dtype) def _convert_to_nnef(self, filename, input_shapes=None): tf_graph = self._tf_reader(filename, input_shapes=input_shapes) tf_graph = self._tf_optimizer(tf_graph) nnef_graph = self._tf_to_nnef_converter(tf_graph) if self._optimize: nnef_graph = self._nnef_optimizer(nnef_graph) self._nnef_writer(nnef_graph, filename + '.nnef') def _convert_from_nnef(self, filename): nnef_graph = self._nnef_reader(filename) tf_graph = self._nnef_to_tf_converter(nnef_graph) self._tf_writer(tf_graph, filename + '.pb') def _test_conversion(self, name, only_first_output=True, epsilon=1e-5): filename = tempfile.mktemp() if self._output_folder is None else TestEnv._output_folder + name + '.pb' graph_def = tf.get_default_graph().as_graph_def(add_shapes=True) self._save_graph_def(graph_def, filename) self._test_conversion_from_file(filename, only_first_output=only_first_output, epsilon=epsilon) def _test_conversion_from_file(self, filename, only_first_output=True, input_shapes=None, epsilon=1e-5): self._convert_to_nnef(filename, input_shapes) self._convert_from_nnef(filename + '.nnef') original_graph_def = self._load_graph_def(filename) converted_graph_def = self._load_graph_def(filename + '.nnef.pb') if input_shapes is not None: original_graph_def = graphdef.set_input_shapes(original_graph_def, input_shapes) original_outputs = self._exec_graph_def(original_graph_def, only_first_output) converted_outputs = self._exec_graph_def(converted_graph_def, only_first_output) self.assertEqual(len(original_outputs), len(converted_outputs)) for original, converted in zip(original_outputs, converted_outputs): if original.dtype == bool: self.assertTrue(np.all(original == converted)) else: diff = np.max(np.abs(original - converted)) self.assertLess(diff, epsilon) class TestCases(TestEnv): def test_conv1d(self): input = tf.placeholder(shape=(4, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 3, 16)), dtype=tf.float32) output = tf.nn.conv1d(input, filter, stride=1, padding='SAME') self._test_conversion('conv1d') def test_conv2d(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 16)), dtype=tf.float32) output = tf.nn.conv2d(input, filter, strides=1, padding='SAME') self._test_conversion('conv2d') def test_conv3d(self): input = tf.placeholder(shape=(4, 32, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 5, 3, 16)), dtype=tf.float32) output = tf.nn.conv3d(input, filter, strides=[1, 1, 1, 1, 1], padding='SAME') self._test_conversion('conv3d') def test_conv2d_explicit_padding(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 16)), dtype=tf.float32) output = tf.nn.conv2d(input, filter, strides=1, padding=[(0, 0), (2, 2), (2, 2), (0, 0)]) self._test_conversion('conv2d-explicit-padding') def test_conv2d_transpose(self): input = tf.placeholder(shape=(4, 32, 32, 16), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 16)), dtype=tf.float32) output = tf.nn.conv2d_transpose(input, filter, strides=1, padding='SAME', output_shape=(4, 32, 32, 3)) self._test_conversion('conv2d_transpose') def test_depthwise_conv2d(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 2)), dtype=tf.float32) output = tf.nn.depthwise_conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME') self._test_conversion('depthwise_conv2d') def test_depthwise_conv2d_transpose(self): input = tf.placeholder(shape=(4, 32, 32, 6), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 2)), dtype=tf.float32) output = tf.nn.depthwise_conv2d_backprop_input([4, 32, 32, 3], filter, input, strides=[1, 1, 1, 1], padding='SAME') self._test_conversion('depthwise_conv2d_transpose') def test_conv2d_dilated(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 16)), dtype=tf.float32) output = tf.nn.conv2d(input, filter, strides=1, dilations=2, padding='SAME') self._test_conversion('conv2d_dilated') def test_max_pool2d(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.max_pool2d(input, ksize=3, strides=1, padding='SAME') self._test_conversion('max_pool2d') def test_max_pool2d_with_index(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) maximum, index = tf.nn.max_pool_with_argmax(input, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') self._test_conversion('max_pool2d_with_index') def test_avg_pool2d(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.avg_pool2d(input, ksize=3, strides=1, padding='SAME') self._test_conversion('avg_pool2d') def test_min_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reduce_min(input, axis=3, keepdims=True) self._test_conversion('min_reduce') def test_max_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reduce_max(input, axis=3, keepdims=True) self._test_conversion('max_reduce') def test_mean_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reduce_mean(input, axis=3, keepdims=True) self._test_conversion('mean_reduce') def test_sum_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reduce_sum(input, axis=3, keepdims=True) self._test_conversion('sum_reduce') def test_any_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) output = tf.reduce_any(input, axis=3, keepdims=True) self._test_conversion('any_reduce') def test_all_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) output = tf.reduce_all(input, axis=3, keepdims=True) self._test_conversion('all_reduce') def test_argmin_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.argmin(input, axis=-1) output = tf.expand_dims(output, axis=-1) self._test_conversion('axgmin_reduce') def test_argmax_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.argmax(input, axis=-1) output = tf.expand_dims(output, axis=-1) self._test_conversion('axgmax_reduce') def test_concat(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) otuput = tf.concat([input1, input2], axis=3) self._test_conversion('concat') def test_split_sizes(self): input = tf.placeholder(shape=(4, 32, 32, 6), dtype=tf.float32) [output1, output2] = tf.split(input, axis=3, num_or_size_splits=[3, 3]) self._test_conversion('split-sizes') def test_split_num(self): input = tf.placeholder(shape=(4, 32, 32, 6), dtype=tf.float32) [output1, output2] = tf.split(input, axis=3, num_or_size_splits=2) self._test_conversion('split-num') def test_reshape(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reshape(input, shape=(4, 32 * 32 * 3)) self._test_conversion('reshape') def test_flatten(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reshape(input, shape=(4, -1)) self._test_conversion('flatten') def test_squeeze(self): input = tf.placeholder(shape=(4, 32, 32, 1), dtype=tf.float32) squeezed = tf.squeeze(input, axis=[3]) output = tf.expand_dims(squeezed, axis=[3]) self._test_conversion('squeeze') def test_squeeze_all(self): input = tf.placeholder(shape=(4, 32, 32, 1), dtype=tf.float32) squeezed = tf.squeeze(input) output = tf.expand_dims(squeezed, axis=[3]) self._test_conversion('squeeze_all') def test_transpose(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) trans = tf.transpose(input, perm=(0, 3, 1, 2)) output = tf.transpose(trans, perm=(0, 2, 3, 1)) self._test_conversion('transpose') def test_stack(self): input1 = tf.placeholder(shape=(4, 32, 32, 1), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 1), dtype=tf.float32) input1 = tf.squeeze(input1, axis=3) input2 = tf.squeeze(input2, axis=3) output = tf.stack([input1, input2], axis=3) self._test_conversion('stack') def test_unstack(self): input = tf.placeholder(shape=(4, 32, 32, 2), dtype=tf.float32) [output1, output2] = tf.unstack(input, axis=3) output1 = tf.expand_dims(output1, axis=3) output2 = tf.expand_dims(output2, axis=3) self._test_conversion('unstack') def test_add(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.add(input1, input2) self._test_conversion('add') def test_add_broadcast(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(3,), dtype=tf.float32) output = tf.add(input1, input2) self._test_conversion('add-broadcast') def test_sub(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.subtract(input1, input2) self._test_conversion('sub') def test_sub_broadcast(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(3,), dtype=tf.float32) output = tf.subtract(input1, input2) self._test_conversion('sub-broadcast') def test_mul(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.multiply(input1, input2) self._test_conversion('mul') def test_mul_broadcast(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(3,), dtype=tf.float32) output = tf.multiply(input1, input2) self._test_conversion('mul-broadcast') def test_div(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.divide(input1, input2) self._test_conversion('div') def test_div_boradcast(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(3,), dtype=tf.float32) output = tf.divide(input1, input2) self._test_conversion('div-broadcast') def test_pow(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.pow(input1, input2) self._test_conversion('pow') def test_min(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.minimum(input1, input2) self._test_conversion('min') def test_max(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.maximum(input1, input2) self._test_conversion('max') def test_and(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) output = tf.logical_and(input1, input2) self._test_conversion('and') def test_or(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) output = tf.logical_or(input1, input2) self._test_conversion('or') def test_lt(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.less(input1, input2) self._test_conversion('lt') def test_gt(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.greater(input1, input2) self._test_conversion('gt') def test_le(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.less_equal(input1, input2) self._test_conversion('le') def test_ge(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.greater_equal(input1, input2) self._test_conversion('ge') def test_eq(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.equal(input1, input2) self._test_conversion('eq') def test_ne(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.not_equal(input1, input2) self._test_conversion('ne') def test_identity(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.identity(input) self._test_conversion('identity') def test_relu(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.relu(input) self._test_conversion('relu') def test_elu(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.elu(input) self._test_conversion('elu') def test_selu(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.selu(input) self._test_conversion('selu') def test_relu6(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.relu6(input) self._test_conversion('relu6') def test_leaky_relu(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.leaky_relu(input, alpha=0.1) self._test_conversion('leaky_relu') def test_sigmoid(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.sigmoid(input) self._test_conversion('sigmoid') def test_softplus(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.softplus(input) self._test_conversion('softplus') def test_exp(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.exp(input) self._test_conversion('exp') def test_log(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.log(input) self._test_conversion('log') def test_sin(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.sin(input) self._test_conversion('sin') def test_cos(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.cos(input) self._test_conversion('cos') def test_tan(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.tan(input) self._test_conversion('tan') def test_sinh(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.sinh(input) self._test_conversion('sinh') def test_cosh(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.cosh(input) self._test_conversion('cosh') def test_tanh(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.tanh(input) self._test_conversion('tanh') def test_sign(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.sign(input) self._test_conversion('sign') def test_abs(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.abs(input) self._test_conversion('abs') def test_neg(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.negative(input) self._test_conversion('neg') def test_rcp(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.reciprocal(input) self._test_conversion('rcp') def test_floor(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.floor(input) self._test_conversion('floor') def test_ceil(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.ceil(input) self._test_conversion('ceil') def test_round(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.round(input) self._test_conversion('round') def test_sqr(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.square(input) self._test_conversion('sqr') def test_sqrt(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.sqrt(input) self._test_conversion('sqrt') def test_rsqrt(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.rsqrt(input) self._test_conversion('rsqrt') def test_not(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) output = tf.math.logical_not(input) self._test_conversion('not') def test_cast(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.int32) output = tf.cast(input, dtype=tf.float32) self._test_conversion('cast') def test_cast_ints(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.int32) cast = tf.cast(input, dtype=tf.int8) output = tf.cast(cast, dtype=tf.int32) self._test_conversion('cast_ints') def test_cast_float_bool(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.cast(input, dtype=tf.bool) self._test_conversion('cast_float_bool') def test_select(self): cond = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) left = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) right = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.where(cond, left, right) self._test_conversion('select') def test_clamp(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.clip_by_value(input, 0.2, 0.8) self._test_conversion('clamp') def test_batch_norm(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) mean = tf.constant(np.random.random((3,)), dtype=tf.float32) variance = tf.constant(np.random.random((3,)), dtype=tf.float32) scale = tf.constant(np.random.random((3,)), dtype=tf.float32) offset = tf.constant(np.random.random((3,)), dtype=tf.float32) outputs = tf.nn.batch_normalization(input, scale=scale, offset=offset, mean=mean, variance=variance, variance_epsilon=1e-5) self._test_conversion('batch_norm') def test_fused_batch_norm(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) mean = tf.constant(np.random.random((3,)), dtype=tf.float32) variance = tf.constant(np.random.random((3,)), dtype=tf.float32) scale = tf.constant(np.random.random((3,)), dtype=tf.float32) offset = tf.constant(np.random.random((3,)), dtype=tf.float32) outputs = tf.nn.fused_batch_norm(input, scale=scale, offset=offset, mean=mean, variance=variance, is_training=False) self._test_conversion('fused_batch_norm') def test_bias_add(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) bias = tf.constant(np.random.random((3,)), dtype=tf.float32) output = tf.nn.bias_add(input, bias) self._test_conversion('bias_add') def test_softmax(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.softmax(input) self._test_conversion('softmax') def test_conv_bias_relu_pool(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 16)), dtype=tf.float32) bias = tf.constant(np.random.random(size=16,), dtype=tf.float32) mean = tf.constant(np.random.random(size=16, ), dtype=tf.float32) variance = tf.constant(np.random.random(size=16, ), dtype=tf.float32) scale = tf.constant(np.random.random(size=16, ), dtype=tf.float32) offset = tf.constant(np.random.random(size=16, ), dtype=tf.float32) filtered = tf.nn.conv2d(input, filter, strides=1, padding='SAME') biased = tf.nn.bias_add(filtered, bias) normed, _mean, _variance = tf.nn.fused_batch_norm(biased, scale, offset, mean, variance, is_training=False) relu = tf.nn.relu(normed) pooled = tf.nn.max_pool2d(relu, ksize=2, strides=2, padding='SAME') self._test_conversion('conv_bias_relu_pool', epsilon=1e-4) def test_conv_mul_add(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 16)), dtype=tf.float32) bias = tf.constant(np.random.random(size=16, ), dtype=tf.float32) scale = tf.constant(np.random.random(size=16, ), dtype=tf.float32) offset = tf.constant(np.random.random(size=16, ), dtype=tf.float32) filtered = tf.nn.conv2d(input, filter, strides=1, padding='SAME') biased = tf.nn.bias_add(filtered, bias) scaled = biased * scale output = scaled + offset self._test_conversion('conv_mul_add', epsilon=1e-4) def test_mul_conv(self): input = tf.placeholder(shape=(4, 32, 32, 8), dtype=tf.float32) filter1 = tf.constant(np.random.random(size=(5, 5, 8, 16)), dtype=tf.float32) filter2 = tf.constant(np.random.random(size=(5, 5, 8, 16)), dtype=tf.float32) bias1 = tf.constant(np.random.random(size=16, ), dtype=tf.float32) bias2 = tf.constant(np.random.random(size=16, ), dtype=tf.float32) scale = tf.constant(np.random.random(size=8, ), dtype=tf.float32) scaled = input * scale filtered1 = tf.nn.conv2d(scaled, filter1, strides=1, padding='SAME') filtered2 = tf.nn.conv2d(scaled, filter2, strides=1, padding='SAME') biased1 = tf.nn.bias_add(filtered1, bias1) biased2 = tf.nn.bias_add(filtered2, bias2) self._test_conversion('mul_conv', epsilon=1e-4) def test_matmul(self): input1 = tf.placeholder(shape=(10, 100), dtype=tf.float32) input2 = tf.placeholder(shape=(100, 20), dtype=tf.float32) output = tf.matmul(input1, input2) self._test_conversion('matmul') def test_matmul_trans(self): input1 = tf.placeholder(shape=(10, 100), dtype=tf.float32) input2 = tf.placeholder(shape=(20, 100), dtype=tf.float32) output = tf.matmul(input1, input2, transpose_b=True) self._test_conversion('matmul-trans') def test_pad(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.pad(input, paddings=[[0, 0], [1, 2], [1, 2], [0, 0]]) self._test_conversion('pad') def test_pad_mirror(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.pad(input, paddings=[[0, 0], [1, 2], [1, 2], [0, 0]], mode='REFLECT') self._test_conversion('pad_reflect') def test_pad_symmetric(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.pad(input, paddings=[[0, 0], [1, 2], [1, 2], [0, 0]], mode='SYMMETRIC') self._test_conversion('pad_symmetric') def test_slice(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.slice(input, begin=[0, 1, 1, 0], size=[4, 30, 30, 3]) self._test_conversion('slice') def test_strided_slice(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = input[:, 1:-1, 1:-1, :] self._test_conversion('strided_slice') def test_strided_slice_shrink_axis(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = input[:, 1:-1, 1:-1, 1] output = tf.expand_dims(output, axis=3) self._test_conversion('strided_slice-shrink_axis') def test_strided_slice_new_axis(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = input[:, 1:-1, 1:-1, tf.newaxis, :] output = tf.squeeze(output, axis=3) self._test_conversion('strided_slice-new_axis') def test_strided_slice_flip(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = input[:, -2:0:-1, -2:0:-1, :] self._test_conversion('strided_slice-flip') def test_tile(self): input = tf.placeholder(shape=(4, 1, 1, 3), dtype=tf.float32) output = tf.tile(input, multiples=(1, 32, 32, 1)) self._test_conversion('tile') def test_gather(self): input = tf.placeholder(shape=(4, 32, 32, 16), dtype=tf.float32) indices = tf.constant(np.random.random_integers(size=(24,), low=0, high=15), dtype=tf.int32) output = tf.gather(input, indices, axis=3) self._test_conversion('gather') def test_upsample_nearest(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.image.resize_nearest_neighbor(input, size=(64, 64)) self._test_conversion('upsample-nearest') def test_downsample_nearest(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.image.resize_nearest_neighbor(input, size=(16, 16)) self._test_conversion('downsample-nearest') def test_downsample_area(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.image.resize_area(input, size=(16, 16)) self._test_conversion('downsample-area') def test_upsample_linear(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.image.resize_bilinear(input, size=(64, 64)) self._test_conversion('upsample-linear') def test_lrn(self): input = tf.placeholder(shape=(4, 32, 32, 8), dtype=tf.float32) output = tf.nn.local_response_normalization(input, depth_radius=2) self._test_conversion('lrn') def test_l2_normalize(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.l2_normalize(input, axis=-1) self._test_conversion('l2_normalize') def test_add_n(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.add_n([input, input, input]) self._test_conversion('add_n') @unittest.skipIf(TestEnv._network_folder is None or not os.path.isdir(TestEnv._network_folder), "no network test folder provided") class NetworkTestCases(TestEnv): def test_mobilenet_v1(self): self._test_conversion_from_file(self._network_folder + 'mobilenet_v1.pb', input_shapes={'input': (1, 224, 224, 3)}) def test_mobilenet_v2(self): self._test_conversion_from_file(self._network_folder + 'mobilenet_v2.pb', input_shapes={'input': (1, 224, 224, 3)}) def test_inception_v3(self): self._test_conversion_from_file(self._network_folder + 'inception_v3.pb', input_shapes={'input': (1, 299, 299, 3)}) def test_inception_v4(self): self._test_conversion_from_file(self._network_folder + 'inception_v4.pb', input_shapes={'input': (1, 299, 299, 3)}) def test_inception_resnet_v2(self): self._test_conversion_from_file(self._network_folder + 'inception_resnet_v2.pb', input_shapes={'input': (1, 299, 299, 3)}) def test_squeezenet(self): self._test_conversion_from_file(self._network_folder + 'squeezenet.pb', input_shapes={'Placeholder': (1, 224, 224, 3)}) def test_nasnet(self): self._test_conversion_from_file(self._network_folder + 'nasnet_mobile.pb', input_shapes={'input': (1, 224, 224, 3)}) if __name__ == '__main__': unittest.main() ================================================ FILE: nnef_tools-pyproject/tests/conversion/onnx_test.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import nnef_tools.io.nnef as nnef_io import nnef_tools.io.onnx as onnx_io import nnef_tools.conversion.onnx_to_nnef as onnx_to_nnef import nnef_tools.conversion.nnef_to_onnx as nnef_to_onnx import nnef_tools.optimization.nnef_optimizer as nnef_opt import nnef_tools.optimization.onnx_optimizer as onnx_opt import numpy as np import unittest import tempfile import onnx import sys import os from onnx import helper, TensorProto UNITTEST_FOLDER = os.environ.get('UNITTEST_FOLDER') class TestEnv(unittest.TestCase): DEFAULT_OPSET_VERSION = 11 DEFAULT_IR_VERSION = 6 _type_to_numpy = { "tensor(float)": np.float32, "tensor(double)": np.float64, "tensor(int8)": np.int8, "tensor(int16)": np.int16, "tensor(int32)": np.int32, "tensor(int64)": np.int64, "tensor(uint8)": np.uint8, "tensor(uint16)": np.uint16, "tensor(uint32)": np.uint32, "tensor(uint64)": np.uint64, "tensor(bool)": np.bool_, } _network_folder = os.path.join(UNITTEST_FOLDER, 'onnx/nets/') if UNITTEST_FOLDER else None _output_folder = os.path.join(UNITTEST_FOLDER, 'onnx/ops/') if UNITTEST_FOLDER else None _infer_shapes = False _optimize = True def setUp(self) -> None: self._onnx_reader = onnx_io.Reader(simplify=True) self._onnx_writer = onnx_io.Writer() self._nnef_optimizer = nnef_opt.Optimizer() self._onnx_optimizer = onnx_opt.Optimizer() self._onnx_to_nnef_converter = onnx_to_nnef.Converter(infer_shapes=self._infer_shapes) self._nnef_to_onnx_converter = nnef_to_onnx.Converter() self._nnef_reader = nnef_io.Reader(custom_shapes=self._nnef_to_onnx_converter.defined_shapes(), decomposed=self._nnef_to_onnx_converter.decomposed_operations()) self._nnef_writer = nnef_io.Writer(fragments=self._onnx_to_nnef_converter.defined_operations(), fragment_dependencies=self._onnx_to_nnef_converter.defined_operation_dependencies()) def tearDown(self) -> None: pass def _convert_to_nnef(self, filename): onnx_graph = self._onnx_reader(filename) if self._optimize: onnx_graph = self._onnx_optimizer(onnx_graph) nnef_graph = self._onnx_to_nnef_converter(onnx_graph) if self._optimize: nnef_graph = self._nnef_optimizer(nnef_graph) self._nnef_writer(nnef_graph, filename + '.nnef') def _convert_from_nnef(self, filename): nnef_graph = self._nnef_reader(filename) onnx_graph = self._nnef_to_onnx_converter(nnef_graph) self._onnx_writer(onnx_graph, filename + '.onnx') @staticmethod def _random_data(dtype, shape): if dtype == bool: return np.random.random(shape) > 0.5 else: return np.random.random(shape).astype(dtype) @staticmethod def _exec_model(filename): import onnxruntime np.random.seed(0) options = onnxruntime.SessionOptions() options.inter_op_num_threads = 1 options.intra_op_num_threads = 1 options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL session = onnxruntime.InferenceSession(filename, sess_options=options, providers=['CPUExecutionProvider']) inputs = {input.name: TestEnv._random_data(TestEnv._type_to_numpy[input.type], input.shape) for input in session.get_inputs()} outputs = session.run([output.name for output in session.get_outputs()], inputs) return outputs @staticmethod def _create_tensor(value_info, data): name, shape, dtype = onnx_io.reader._get_value_info(value_info) if data is None: data = TestEnv._random_data(dtype, shape) elif not isinstance(data, np.ndarray): data = np.array(data) return helper.make_tensor(name, helper.np_dtype_to_tensor_dtype(np.dtype(dtype)), shape, vals=data.flat) @staticmethod def _create_model(name, nodes, inputs, outputs, constants, values, opset_version, ir_version): tensors = [TestEnv._create_tensor(item, values.get(item.name)) for item in constants] graph_def = helper.make_graph(nodes, name, inputs, outputs, value_info=constants, initializer=tensors) model_def = helper.make_model(graph_def, producer_name='nnef-to-onnx-test') model_def.opset_import[0].version = opset_version model_def.ir_version = ir_version onnx.checker.check_model(model_def, full_check=True) return model_def @staticmethod def _save_model(model_def, filename): with open(filename, 'wb') as file: file.write(model_def.SerializeToString()) def _test_conversion(self, name, nodes, inputs, outputs, constants=None, values=None, opset_version=DEFAULT_OPSET_VERSION, ir_version=DEFAULT_IR_VERSION, epsilon=1e-5): filename = tempfile.mktemp() if self._output_folder is None else TestEnv._output_folder + name + '.onnx' model_def = self._create_model('G', nodes, inputs, outputs, constants or [], values or {}, opset_version, ir_version) self._save_model(model_def, filename) self._test_conversion_from_file(filename, epsilon=epsilon) def _test_conversion_from_file(self, filename, epsilon=1e-5): self._convert_to_nnef(filename) self._convert_from_nnef(filename + '.nnef') original_outputs = self._exec_model(filename) converted_outputs = self._exec_model(filename + '.nnef.onnx') self.assertEqual(len(original_outputs), len(converted_outputs)) for original, converted in zip(original_outputs, converted_outputs): if original.dtype == bool: self.assertTrue(np.all(original == converted)) else: diff = np.max(np.abs(original - converted)) self.assertLess(diff, epsilon) def _test_unary(self, op_type, dtype=TensorProto.FLOAT, opset_version=DEFAULT_OPSET_VERSION, ir_version=DEFAULT_IR_VERSION): input = helper.make_tensor_value_info('input', dtype, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', dtype, [1, 3, 32, 32]) node = helper.make_node( op_type=op_type, inputs=['input'], outputs=['output'], ) self._test_conversion(op_type.lower(), [node], [input], [output], opset_version=opset_version, ir_version=ir_version) def _test_binary(self, op_type, input_dtype=TensorProto.FLOAT, output_dtype=TensorProto.FLOAT, opset_version=DEFAULT_OPSET_VERSION, ir_version=DEFAULT_IR_VERSION): input1 = helper.make_tensor_value_info('input1', input_dtype, [1, 3, 32, 32]) input2 = helper.make_tensor_value_info('input2', input_dtype, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', output_dtype, [1, 3, 32, 32]) node = helper.make_node( op_type=op_type, inputs=['input1', 'input2'], outputs=['output'], ) self._test_conversion(op_type.lower(), [node], [input1, input2], [output], opset_version=opset_version, ir_version=ir_version) def _test_reduce(self, op_type, keepdims, dtype=TensorProto.FLOAT, p=None, opset_version=DEFAULT_OPSET_VERSION, ir_version=DEFAULT_IR_VERSION): input = helper.make_tensor_value_info('input', dtype, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', dtype, [1, 1, 32, 32] if keepdims else [1, 32, 32]) node = helper.make_node( op_type=op_type, inputs=['input'], outputs=['output'], axes=[1], keepdims=keepdims, ) self._test_conversion(op_type.lower(), [node], [input], [output], opset_version=opset_version, ir_version=ir_version) class TestCases(TestEnv): def test_conv1d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 5]) bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [16]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32]) node = helper.make_node( op_type='Conv', inputs=['input', 'filter', 'bias'], outputs=['output'], auto_pad='SAME_UPPER', ) self._test_conversion('conv1d', [node], [input], [output], constants=[filter, bias]) def test_conv2d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 5, 5]) bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [16]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='Conv', inputs=['input', 'filter', 'bias'], outputs=['output'], auto_pad='SAME_UPPER', ) self._test_conversion('conv2d', [node], [input], [output], constants=[filter, bias]) def test_conv2d_nobias(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 5, 5]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='Conv', inputs=['input', 'filter'], outputs=['output'], auto_pad='SAME_UPPER', ) self._test_conversion('conv2d-nobias', [node], [input], [output], constants=[filter]) def test_conv2d_valid(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 5, 5]) bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [16]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 28, 28]) node = helper.make_node( op_type='Conv', inputs=['input', 'filter', 'bias'], outputs=['output'], auto_pad='VALID', ) self._test_conversion('conv2d-valid', [node], [input], [output], constants=[filter, bias]) def test_conv2d_pads(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 5, 5]) bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [16]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 30, 30]) node = helper.make_node( op_type='Conv', inputs=['input', 'filter', 'bias'], outputs=['output'], pads=[1, 1, 1, 1], ) self._test_conversion('conv2d-pads', [node], [input], [output], constants=[filter, bias]) def test_conv2d_same_lower(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 5, 5]) bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [16]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='Conv', inputs=['input', 'filter', 'bias'], outputs=['output'], auto_pad="SAME_LOWER", ) self._test_conversion('conv2d-same-lower', [node], [input], [output], constants=[filter, bias]) def test_conv2d_transpose(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 5, 5]) bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='ConvTranspose', inputs=['input', 'filter', 'bias'], outputs=['output'], auto_pad='SAME_UPPER', ) self._test_conversion('conv2d_transpose', [node], [input], [output], constants=[filter, bias]) def test_conv2d_transpose_output_shape(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 5, 5]) bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='ConvTranspose', inputs=['input', 'filter', 'bias'], outputs=['output'], auto_pad='SAME_UPPER', output_shape=[1, 3, 32, 32], ) self._test_conversion('conv2d_transpose-output_shape', [node], [input], [output], constants=[filter, bias]) def test_conv2d_transpose_output_padding_strided(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 3, 3]) bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 64, 64]) node = helper.make_node( op_type='ConvTranspose', inputs=['input', 'filter', 'bias'], outputs=['output'], pads=(1, 1, 1, 1), output_padding=(1, 1), strides=(2, 2), ) self._test_conversion('conv2d_transpose-output_padding-strided', [node], [input], [output], constants=[filter, bias]) def test_conv3d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32, 32]) filter = helper.make_tensor_value_info('filter', TensorProto.FLOAT, [16, 3, 5, 5, 5]) bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [16]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32, 32]) node = helper.make_node( op_type='Conv', inputs=['input', 'filter', 'bias'], outputs=['output'], auto_pad='SAME_UPPER', ) self._test_conversion('conv3d', [node], [input], [output], constants=[filter, bias]) def test_max_pool1d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32]) node = helper.make_node( op_type='MaxPool', inputs=['input'], outputs=['output'], kernel_shape=[3], auto_pad='SAME_UPPER', ) self._test_conversion('max_pool1d', [node], [input], [output]) def test_max_pool2d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='MaxPool', inputs=['input'], outputs=['output'], kernel_shape=[3, 3], auto_pad='SAME_UPPER', ) self._test_conversion('max_pool2d', [node], [input], [output]) def test_max_pool2d_valid(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 30, 30]) node = helper.make_node( op_type='MaxPool', inputs=['input'], outputs=['output'], kernel_shape=[3, 3], auto_pad='VALID', ) self._test_conversion('max_pool2d-valid', [node], [input], [output]) def test_max_pool2d_pads(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='MaxPool', inputs=['input'], outputs=['output'], kernel_shape=[3, 3], pads=[1, 1, 1, 1], ) self._test_conversion('max_pool2d-pads', [node], [input], [output]) def test_max_pool3d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32, 32]) node = helper.make_node( op_type='MaxPool', inputs=['input'], outputs=['output'], kernel_shape=[3, 3, 3], auto_pad='SAME_UPPER', ) self._test_conversion('max_pool3d', [node], [input], [output]) def test_avg_pool2d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='AveragePool', inputs=['input'], outputs=['output'], kernel_shape=[3, 3], auto_pad='SAME_UPPER', ) self._test_conversion('avg_pool2d', [node], [input], [output]) def test_global_avg_pool2d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 1, 1]) node = helper.make_node( op_type='GlobalAveragePool', inputs=['input'], outputs=['output'], ) self._test_conversion('global_avg_pool2d', [node], [input], [output]) def test_global_max_pool2d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 1, 1]) node = helper.make_node( op_type='GlobalMaxPool', inputs=['input'], outputs=['output'], ) self._test_conversion('global_max_pool2d', [node], [input], [output]) def test_lp_pool2d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='LpPool', inputs=['input'], outputs=['output'], kernel_shape=[3, 3], auto_pad='SAME_UPPER', p=2, ) self._test_conversion('lp_pool2d', [node], [input], [output]) def test_global_lp_pool2d(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 1, 1]) node = helper.make_node( op_type='GlobalLpPool', inputs=['input'], outputs=['output'], p=2, ) self._test_conversion('global_lp_pool2d', [node], [input], [output]) def test_batch_norm(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3]) variance = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3]) offset = helper.make_tensor_value_info('offset', TensorProto.FLOAT, [3]) scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='BatchNormalization', inputs=['input', 'scale', 'offset', 'mean', 'variance'], outputs=['output'], ) self._test_conversion('batch_norm', [node], [input], [output], [mean, variance, offset, scale]) def test_transpose(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 32, 32, 3]) node = helper.make_node( op_type='Transpose', inputs=['input'], outputs=['output'], perm=[0, 2, 3, 1], ) self._test_conversion('transpose', [node], [input], [output]) def test_reshape(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3 * 32 * 32]) shape = helper.make_tensor_value_info('shape', TensorProto.INT64, [2]) node = helper.make_node( op_type='Reshape', inputs=['input', 'shape'], outputs=['output'], ) self._test_conversion('reshape', [node], [input, shape], [output], constants=[shape], values={'shape': [1, 3 * 32 * 32]}) def test_flatten(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3 * 32 * 32]) node = helper.make_node( op_type='Flatten', inputs=['input'], outputs=['output'], axis=1, ) self._test_conversion('flatten', [node], [input], [output]) def test_squeeze(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 1, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 32, 32]) node = helper.make_node( op_type='Squeeze', inputs=['input'], outputs=['output'], axes=[1], ) self._test_conversion('squeeze', [node], [input], [output]) def test_unsqueeze(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 1, 32, 32]) node = helper.make_node( op_type='Unsqueeze', inputs=['input'], outputs=['output'], axes=[1], ) self._test_conversion('unsqueeze', [node], [input], [output]) def test_matmul(self): input1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [10, 20]) input2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [20, 30]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [10, 30]) node = helper.make_node( op_type='MatMul', inputs=['input1', 'input2'], outputs=['output'], ) self._test_conversion('matmul', [node], [input1, input2], [output]) def test_gemm(self): input1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [10, 20]) input2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [20, 30]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [10, 30]) node = helper.make_node( op_type='Gemm', inputs=['input1', 'input2'], outputs=['output'], ) self._test_conversion('gemm', [node], [input1, input2], [output]) def test_linear(self): input1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [10, 20]) input2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [30, 20]) input3 = helper.make_tensor_value_info('input3', TensorProto.FLOAT, [30]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [10, 30]) node = helper.make_node( op_type='Gemm', inputs=['input1', 'input2', 'input3'], outputs=['output'], transB=1, ) self._test_conversion('linear', [node], [input1], [output], constants=[input2, input3]) def test_lrn(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='LRN', inputs=['input'], outputs=['output'], size=5, ) self._test_conversion('lrn', [node], [input], [output]) def test_concat(self): input1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1, 3, 32, 32]) input2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 6, 32, 32]) node = helper.make_node( op_type='Concat', inputs=['input1', 'input2'], outputs=['output'], axis=1, ) self._test_conversion('concat', [node], [input1, input2], [output]) def test_split(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 6, 32, 32]) output1 = helper.make_tensor_value_info('output1', TensorProto.FLOAT, [1, 3, 32, 32]) output2 = helper.make_tensor_value_info('output2', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='Split', inputs=['input'], outputs=['output1', 'output2'], axis=1, split=[3, 3], ) self._test_conversion('split', [node], [input], [output1, output2]) def test_split_dynamic(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 6, 32, 32]) split = helper.make_tensor_value_info('split', TensorProto.INT64, [2]) output1 = helper.make_tensor_value_info('output1', TensorProto.FLOAT, [1, 3, 32, 32]) output2 = helper.make_tensor_value_info('output2', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='Split', inputs=['input', 'split'], outputs=['output1', 'output2'], axis=1, ) self._test_conversion('split', [node], [input, split], [output1, output2], constants=[split], values={'split': [3, 3]}, opset_version=13) def test_sum(self): input1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1, 3, 32, 32]) input2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='Sum', inputs=['input1', 'input2'], outputs=['output'], ) self._test_conversion('sum', [node], [input1, input2], [output]) def test_softmax(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='Softmax', inputs=['input'], outputs=['output'], axis=1, ) self._test_conversion('softmax', [node], [input], [output]) def test_leaky_relu(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='LeakyRelu', inputs=['input'], outputs=['output'], ) self._test_conversion('leaky_relu', [node], [input], [output]) def test_prelu(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) alpha = helper.make_tensor_value_info('alpha', TensorProto.FLOAT, [16, 1, 1]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='PRelu', inputs=['input', 'alpha'], outputs=['output'], ) self._test_conversion('prelu', [node], [input, alpha], [output]) def test_where(self): cond = helper.make_tensor_value_info('cond', TensorProto.BOOL, [1, 1, 32, 32]) input1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1, 3, 32, 32]) input2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [1, 3, 1, 1]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='Where', inputs=['cond', 'input1', 'input2'], outputs=['output'], ) self._test_conversion('where', [node], [cond, input1, input2], [output]) def test_clip(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) min = helper.make_tensor_value_info('min', TensorProto.FLOAT, []) max = helper.make_tensor_value_info('max', TensorProto.FLOAT, []) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) node = helper.make_node( op_type='Clip', inputs=['input', 'min', 'max'], outputs=['output'], ) self._test_conversion('clip', [node], [input, min, max], [output]) def test_argmin(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.INT64, [1, 1, 32, 32]) node = helper.make_node( op_type='ArgMin', inputs=['input'], outputs=['output'], axis=1, keepdims=True, ) self._test_conversion('argmin_reduce', [node], [input], [output]) def test_argmax(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.INT64, [1, 32, 32]) node = helper.make_node( op_type='ArgMax', inputs=['input'], outputs=['output'], axis=1, keepdims=False, ) self._test_conversion('argmax_reduce', [node], [input], [output]) def test_pad(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 34, 34]) pads = helper.make_tensor_value_info('pads', TensorProto.INT64, [8]) node = helper.make_node( op_type='Pad', inputs=['input', 'pads'], outputs=['output'], ) self._test_conversion('pad', [node], [input], [output], constants=[pads], values={'pads': [0, 0, 1, 1, 0, 0, 1, 1]}) def test_tile(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 64, 64]) repeats = helper.make_tensor_value_info('repeats', TensorProto.INT64, [4]) node = helper.make_node( op_type='Tile', inputs=['input', 'repeats'], outputs=['output'], ) self._test_conversion('tile', [node], [input], [output], constants=[repeats], values={'repeats': [1, 1, 2, 2]}) def test_expand(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [4, 3, 1, 1]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [4, 3, 32, 32]) repeats = helper.make_tensor_value_info('shape', TensorProto.INT64, [4]) node = helper.make_node( op_type='Expand', inputs=['input', 'shape'], outputs=['output'], ) self._test_conversion('expand', [node], [input], [output], constants=[repeats], values={'shape': [4, 3, 32, 32]}) def test_slice(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 30, 30]) starts = helper.make_tensor_value_info('starts', TensorProto.INT64, [2]) ends = helper.make_tensor_value_info('ends', TensorProto.INT64, [2]) axes = helper.make_tensor_value_info('axes', TensorProto.INT64, [2]) node = helper.make_node( op_type='Slice', inputs=['input', 'starts', 'ends', 'axes'], outputs=['output'], ) self._test_conversion('slice', [node], [input], [output], constants=[starts, ends, axes], values={'starts': [1, 1], 'ends': [-1, -1], 'axes': [2, 3]}) def test_strided_slice(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 32, 32]) starts = helper.make_tensor_value_info('starts', TensorProto.INT64, [2]) ends = helper.make_tensor_value_info('ends', TensorProto.INT64, [2]) axes = helper.make_tensor_value_info('axes', TensorProto.INT64, [2]) steps = helper.make_tensor_value_info('steps', TensorProto.INT64, [2]) node = helper.make_node( op_type='Slice', inputs=['input', 'starts', 'ends', 'axes', 'steps'], outputs=['output'], ) self._test_conversion('strided_slice', [node], [input], [output], constants=[starts, ends, axes, steps], values={'starts': [-1, -1], 'ends': [-sys.maxsize, -sys.maxsize], 'axes': [2, 3], 'steps': [-1, -1]}) def test_flip(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 30, 30]) starts = helper.make_tensor_value_info('starts', TensorProto.INT64, [2]) ends = helper.make_tensor_value_info('ends', TensorProto.INT64, [2]) axes = helper.make_tensor_value_info('axes', TensorProto.INT64, [2]) steps = helper.make_tensor_value_info('steps', TensorProto.INT64, [2]) node = helper.make_node( op_type='Slice', inputs=['input', 'starts', 'ends', 'axes', 'steps'], outputs=['output'], ) self._test_conversion('flip', [node], [input], [output], constants=[starts, ends, axes, steps], values={'starts': [-2, -2], 'ends': [0, 0], 'axes': [2, 3], 'steps': [-1, -1]}) def test_l1_normalization(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='LpNormalization', inputs=['input'], outputs=['output'], axis=1, p=1, ) self._test_conversion('l1_normalization', [node], [input], [output]) def test_l2_normalization(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='LpNormalization', inputs=['input'], outputs=['output'], axis=1, p=2, ) self._test_conversion('l2_normalization', [node], [input], [output]) def test_mean_variance_normalization(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='MeanVarianceNormalization', inputs=['input'], outputs=['output'], axes=[0, 2, 3], ) self._test_conversion('mean_variance_normalization', [node], [input], [output]) def test_instance_normalization(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [16]) offset = helper.make_tensor_value_info('offset', TensorProto.FLOAT, [16]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 16, 32, 32]) node = helper.make_node( op_type='InstanceNormalization', inputs=['input', 'scale', 'offset'], outputs=['output'], ) self._test_conversion('instance_normalization', [node], [input], [output], constants=[scale, offset]) def test_lp_reduce(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 1, 32, 32]) node = helper.make_node( op_type='ReduceL1', inputs=['input'], outputs=['output'], axes=[1], keepdims=True, ) self._test_conversion('lp_reduce', [node], [input], [output]) def test_nearest_upsample(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 64, 64]) node = helper.make_node( op_type='Upsample', inputs=['input'], outputs=['output'], scales=[1.0, 1.0, 2.0, 2.0], mode='nearest', ) self._test_conversion('nearest_upsample', [node], [input], [output], opset_version=8) def test_linear_upsample(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 64, 64]) node = helper.make_node( op_type='Upsample', inputs=['input'], outputs=['output'], scales=[1.0, 1.0, 2.0, 2.0], mode='linear', ) self._test_conversion('linear_upsample', [node], [input], [output], opset_version=8) def test_resize_nearest_upsample(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) scales = helper.make_tensor_value_info('scales', TensorProto.FLOAT, [4]) roi = helper.make_tensor_value_info('roi', TensorProto.FLOAT, [0]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 64, 64]) node = helper.make_node( op_type='Resize', inputs=['input', 'roi', 'scales'], outputs=['output'], mode='nearest', ) self._test_conversion('resize_nearest_upsample', [node], [input], [output], constants=[scales, roi], values={'scales': [1.0, 1.0, 2.0, 2.0], 'roi': []}) def test_resize_linear_upsample(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) scales = helper.make_tensor_value_info('scales', TensorProto.FLOAT, [4]) roi = helper.make_tensor_value_info('roi', TensorProto.FLOAT, [0]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 64, 64]) node = helper.make_node( op_type='Resize', inputs=['input', 'roi', 'scales'], outputs=['output'], mode='linear', ) self._test_conversion('resize_liner_upsample', [node], [input], [output], constants=[scales, roi], values={'scales': [1.0, 1.0, 2.0, 2.0], 'roi': []}) def test_resize_nearest_downsample(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 32, 32]) scales = helper.make_tensor_value_info('scales', TensorProto.FLOAT, [4]) roi = helper.make_tensor_value_info('roi', TensorProto.FLOAT, [0]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 16, 16]) node = helper.make_node( op_type='Resize', inputs=['input', 'roi', 'scales'], outputs=['output'], mode='nearest', ) self._test_conversion('resize_nearest_downsample', [node], [input], [output], constants=[scales, roi], values={'scales': [1.0, 1.0, 0.5, 0.5], 'roi': []}) def test_cast(self): input = helper.make_tensor_value_info('input', TensorProto.INT32, [1, 4, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 4, 32, 32]) node = helper.make_node( op_type='Cast', inputs=['input'], outputs=['output'], to=TensorProto.FLOAT, ) self._test_conversion('cast', [node], [input], [output]) def test_gather(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 16, 32, 32]) indices = helper.make_tensor_value_info('indices', TensorProto.INT32, [24]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 24, 32, 32]) node = helper.make_node( op_type='Gather', inputs=['input', 'indices'], outputs=['output'], axis=1, ) self._test_conversion('gather', [node], [input, indices], [output]) def test_lstm(self): X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [5, 4, 32]) W = helper.make_tensor_value_info('W', TensorProto.FLOAT, [1, 256, 32]) R = helper.make_tensor_value_info('R', TensorProto.FLOAT, [1, 256, 64]) B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 512]) h0 = helper.make_tensor_value_info('h0', TensorProto.FLOAT, [1, 4, 64]) c0 = helper.make_tensor_value_info('c0', TensorProto.FLOAT, [1, 4, 64]) hn = helper.make_tensor_value_info('hn', TensorProto.FLOAT, [1, 4, 64]) cn = helper.make_tensor_value_info('cn', TensorProto.FLOAT, [1, 4, 64]) node = helper.make_node( op_type='LSTM', inputs=['X', 'W', 'R', 'B', '', 'h0', 'c0'], outputs=['', 'hn', 'cn'], hidden_size=64, direction="forward", ) self._test_conversion('lstm', [node], [X, h0, c0], [hn, cn], constants=[W, R, B]) def test_depth_to_space(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [4, 64, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [4, 4, 128, 128]) node = helper.make_node( op_type='DepthToSpace', inputs=['input'], outputs=['output'], blocksize=4, ) self._test_conversion('depth_to_space', [node], [input], [output]) def test_depth_to_space_CRD(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [4, 64, 32, 32]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [4, 4, 128, 128]) node = helper.make_node( op_type='DepthToSpace', inputs=['input'], outputs=['output'], blocksize=4, mode="CRD" ) self._test_conversion('depth_to_space_crd', [node], [input], [output]) def test_space_to_depth(self): input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [4, 4, 128, 128]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [4, 64, 32, 32]) node = helper.make_node( op_type='SpaceToDepth', inputs=['input'], outputs=['output'], blocksize=4, ) self._test_conversion('space_to_depth', [node], [input], [output]) def test_min_recude(self): self._test_reduce('ReduceMin', keepdims=False) def test_max_recude(self): self._test_reduce('ReduceMax', keepdims=False) def test_mean_recude(self): self._test_reduce('ReduceMean', keepdims=False) def test_sum_recude(self): self._test_reduce('ReduceSum', keepdims=False) def test_max_recude_keepdims(self): self._test_reduce('ReduceMax', keepdims=True) def test_relu(self): self._test_unary('Relu') def test_sigmoid(self): self._test_unary('Sigmoid') def test_tanh(self): self._test_unary('Tanh') def test_softplus(self): self._test_unary('Softplus') def test_selu(self): self._test_unary('Selu') def test_not(self): self._test_unary('Not', dtype=TensorProto.BOOL) def test_elu(self): self._test_unary('Elu') def test_erf(self): self._test_unary('Erf') def test_abs(self): self._test_unary('Abs') def test_sign(self): self._test_unary('Sign') def test_sin(self): self._test_unary('Sin') def test_cos(self): self._test_unary('Cos') def test_tan(self): self._test_unary('Tan') def test_asin(self): self._test_unary('Asin') def test_acos(self): self._test_unary('Acos') def test_atan(self): self._test_unary('Atan') def test_sinh(self): self._test_unary('Sinh') def test_cosh(self): self._test_unary('Cosh') def test_tanh(self): self._test_unary('Tanh') def test_exp(self): self._test_unary('Exp') def test_log(self): self._test_unary('Log') def test_neg(self): self._test_unary('Neg') def test_sqrt(self): self._test_unary('Sqrt') def test_ceil(self): self._test_unary('Ceil') def test_floor(self): self._test_unary('Floor') def test_round(self): self._test_unary('Round') def test_add(self): self._test_binary('Add') def test_sub(self): self._test_binary('Sub') def test_mul(self): self._test_binary('Mul') def test_div(self): self._test_binary('Div') def test_pow(self): self._test_binary('Pow') def test_min(self): self._test_binary('Min') def test_max(self): self._test_binary('Max') def test_and(self): self._test_binary('And', input_dtype=TensorProto.BOOL, output_dtype=TensorProto.BOOL) def test_or(self): self._test_binary('Or', input_dtype=TensorProto.BOOL, output_dtype=TensorProto.BOOL) def test_equal(self): self._test_binary('Equal', output_dtype=TensorProto.BOOL) def test_less(self): self._test_binary('Less', output_dtype=TensorProto.BOOL) def test_greater(self): self._test_binary('Greater', output_dtype=TensorProto.BOOL) @unittest.skipIf(TestEnv._network_folder is None or not os.path.isdir(TestEnv._network_folder), "no network test folder provided") class NetworkTestCases(TestEnv): def test_alexnet(self): self._test_conversion_from_file(self._network_folder + 'alexnet.onnx') def test_googlenet(self): self._test_conversion_from_file(self._network_folder + 'googlenet.onnx') def test_inception_v1(self): self._test_conversion_from_file(self._network_folder + 'inception_v1.onnx') def test_inception_v2(self): self._test_conversion_from_file(self._network_folder + 'inception_v2.onnx') def test_mobilenet_v1(self): self._test_conversion_from_file(self._network_folder + 'mobilenet_v1.onnx', epsilon=1e-4) def test_mobilenet_v2(self): self._test_conversion_from_file(self._network_folder + 'mobilenet_v2.onnx', epsilon=1e-4) def test_resnet50_v1(self): self._test_conversion_from_file(self._network_folder + 'resnet50_v1.onnx') def test_resnet50_v2(self): self._test_conversion_from_file(self._network_folder + 'resnet50_v2.onnx') def test_squeezenet_v1(self): self._test_conversion_from_file(self._network_folder + 'squeezenet_v1.onnx') def test_shufflenet_v1(self): self._test_conversion_from_file(self._network_folder + 'shufflenet_v1.onnx') def test_shufflenet_v2(self): self._test_conversion_from_file(self._network_folder + 'shufflenet_v2.onnx', epsilon=1e-4) if __name__ == '__main__': unittest.main() ================================================ FILE: nnef_tools-pyproject/tests/conversion/tflite_test.py ================================================ # Copyright (c) 2020 The Khronos Group Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import nnef_tools.io.nnef as nnef_io import nnef_tools.io.tf.lite as lite_io import nnef_tools.conversion.tflite_to_nnef as tflite_to_nnef import nnef_tools.conversion.nnef_to_tflite as nnef_to_tflite import nnef_tools.optimization.nnef_optimizer as nnef_opt import nnef_tools.optimization.tflite_optimizer as tflite_opt import unittest import tempfile import os try: import tensorflow.compat.v1 as tf tf.disable_v2_behavior() except ImportError: import tensorflow as tf UNITTEST_FOLDER = os.environ.get('UNITTEST_FOLDER') class TestEnv(unittest.TestCase): _network_folder = os.path.join(UNITTEST_FOLDER, 'tflite/nets/') if UNITTEST_FOLDER else None _output_folder = os.path.join(UNITTEST_FOLDER, 'tflite/ops/') if UNITTEST_FOLDER else None _mirror_unsupported = False _io_transpose = True _optimize = True def setUp(self) -> None: self._tflite_reader = lite_io.Reader() self._tflite_writer = lite_io.Writer() self._tflite_to_nnef_converter = tflite_to_nnef.Converter(io_transpose=self._io_transpose, mirror_unsupported=self._mirror_unsupported) self._nnef_to_tflite_converter = nnef_to_tflite.Converter(io_transpose=self._io_transpose, mirror_unsupported=self._mirror_unsupported) self._nnef_reader = nnef_io.Reader(custom_shapes=self._nnef_to_tflite_converter.defined_shapes(), decomposed=self._nnef_to_tflite_converter.decomposed_operations()) self._nnef_writer = nnef_io.Writer(fragments=self._tflite_to_nnef_converter.defined_operations()) self._nnef_optimizer = nnef_opt.Optimizer() self._tflite_optimizer = tflite_opt.Optimizer() def tearDown(self) -> None: tf.reset_default_graph() def _convert_to_nnef(self, filename): tflite_graph = self._tflite_reader(filename) if self._optimize: tflite_graph = self._tflite_optimizer(tflite_graph) nnef_graph = self._tflite_to_nnef_converter(tflite_graph) if self._optimize: nnef_graph = self._nnef_optimizer(nnef_graph) self._nnef_writer(nnef_graph, filename + '.nnef') def _convert_from_nnef(self, filename): nnef_graph = self._nnef_reader(filename) tflite_graph = self._nnef_to_tflite_converter(nnef_graph) self._tflite_writer(tflite_graph, filename + '.tflite') def _save_default_graph(self, inputs, outputs, filename): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) converter = tf.lite.TFLiteConverter.from_session(sess, inputs, outputs) tflite_model = converter.convert() with open(filename, "wb") as file: file.write(tflite_model) @staticmethod def _exec_model(model_path): np.random.seed(0) interpreter = tf.lite.Interpreter(model_path=model_path, experimental_op_resolver_type=tf.lite.experimental.OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES) interpreter.allocate_tensors() for input in interpreter.get_input_details(): shape = input['shape'] dtype = input['dtype'] data = TestEnv._random_data(dtype, shape) interpreter.set_tensor(input['index'], data) interpreter.invoke() return [TestEnv._dequantize(interpreter.get_tensor(output['index']), *output['quantization']) for output in interpreter.get_output_details()] @staticmethod def _dequantize(data, scale, zero_point): return scale * (data - zero_point) if scale else data @staticmethod def _random_data(dtype, shape): if dtype == bool or dtype == np.bool_: return np.random.random(shape) > 0.5 elif np.issubdtype(dtype, np.integer): return np.maximum(np.floor(np.random.random(shape) * 256).astype(dtype), 255) else: return np.random.random(shape).astype(dtype) def _test_conversion(self, name, inputs, outputs, epsilon=1e-4): filename = tempfile.mktemp() if self._output_folder is None else self._output_folder + name + '.tflite' self._save_default_graph(inputs, outputs, filename) self._test_conversion_from_file(filename, epsilon=epsilon) def _test_conversion_from_file(self, filename, epsilon=1e-4): self._convert_to_nnef(filename) self._convert_from_nnef(filename + '.nnef') original_outputs = self._exec_model(filename) converted_outputs = self._exec_model(filename + '.nnef.tflite') self.assertEqual(len(original_outputs), len(converted_outputs)) for original, converted in zip(original_outputs, converted_outputs): if original.dtype == bool: self.assertTrue(np.all(original == converted)) else: diff = np.max(np.abs(original - converted)) self.assertLess(diff, epsilon) class TestCases(TestEnv): def test_conv1d(self): input = tf.placeholder(shape=(4, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 3, 16)), dtype=tf.float32) output = tf.nn.conv1d(input, filter, stride=1, padding='SAME') self._test_conversion('conv1d', [input], [output]) def test_conv2d(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 16)), dtype=tf.float32) output = tf.nn.conv2d(input, filter, strides=1, padding='SAME') self._test_conversion('conv2d', [input], [output]) def test_conv2d_transpose(self): input = tf.placeholder(shape=(4, 32, 32, 16), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 16)), dtype=tf.float32) output = tf.nn.conv2d_transpose(input, filter, strides=1, padding='SAME', output_shape=(4, 32, 32, 3)) self._test_conversion('conv2d_transpose', [input], [output]) def test_depthwise_conv2d(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 2)), dtype=tf.float32) output = tf.nn.depthwise_conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME') self._test_conversion('depthwise_conv2d', [input], [output]) def test_max_pool2d(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.max_pool2d(input, ksize=3, strides=1, padding='SAME') self._test_conversion('max_pool2d', [input], [output]) def test_avg_pool2d(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.avg_pool2d(input, ksize=3, strides=1, padding='SAME') self._test_conversion('avg_pool2d', [input], [output]) def test_reshape(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reshape(input, shape=(4, 32 * 32 * 3)) self._test_conversion('reshape', [input], [output]) def test_flatten(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reshape(input, shape=(4, -1)) self._test_conversion('flatten', [input], [output]) def test_squeeze(self): input = tf.placeholder(shape=(4, 32, 32, 1), dtype=tf.float32) output = tf.squeeze(input, axis=[3]) self._test_conversion('squeeze', [input], [output]) def test_unsqueeze(self): input = tf.placeholder(shape=(4, 32, 32), dtype=tf.float32) output = tf.expand_dims(input, axis=[3]) self._test_conversion('unsqueeze', [input], [output]) def test_transpose(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.transpose(input, perm=(0, 3, 1, 2)) self._test_conversion('transpose', [input], [output]) def test_concat(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.concat([input1, input2], axis=3) self._test_conversion('concat', [input1, input2], [output]) def test_split_sizes(self): input = tf.placeholder(shape=(4, 32, 32, 6), dtype=tf.float32) [output1, output2] = tf.split(input, axis=3, num_or_size_splits=[3, 3]) self._test_conversion('split-sizes', [input], [output1, output2]) def test_split_num(self): input = tf.placeholder(shape=(4, 32, 32, 6), dtype=tf.float32) [output1, output2] = tf.split(input, axis=3, num_or_size_splits=2) self._test_conversion('split-num', [input], [output1, output2]) def test_pad(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.pad(input, paddings=[[0, 0], [1, 2], [1, 2], [0, 0]]) self._test_conversion('pad', [input], [output]) def test_pad_reflect(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.pad(input, paddings=[[0, 0], [1, 2], [1, 2], [0, 0]], mode='REFLECT') self._test_conversion('pad_reflect', [input], [output]) def test_pad_symmetric(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.pad(input, paddings=[[0, 0], [1, 2], [1, 2], [0, 0]], mode='SYMMETRIC') self._test_conversion('pad_symmetric', [input], [output]) def test_tile(self): input = tf.placeholder(shape=(4, 1, 1, 3), dtype=tf.float32) output = tf.tile(input, multiples=(1, 32, 32, 1)) self._test_conversion('tile', [input], [output]) def test_slice(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.slice(input, begin=[0, 1, 1, 0], size=[4, 30, 30, 3]) self._test_conversion('slice', [input], [output]) def test_strided_slice(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = input[:, 1:-1, 1:-1, :] self._test_conversion('strided_slice', [input], [output]) def test_strided_slice_flip(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = input[:, -2:0:-1, -2:0:-1, :] self._test_conversion('strided_slice_flip', [input], [output]) def test_gather(self): input = tf.placeholder(shape=(4, 32, 32, 16), dtype=tf.float32) indices = tf.constant(np.random.random_integers(size=(24,), low=0, high=15), dtype=tf.int32) output = tf.gather(input, indices, axis=3) self._test_conversion('gather', [input], [output]) def test_relu(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.relu(input) self._test_conversion('relu', [input], [output]) def test_relu6(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.relu6(input) self._test_conversion('relu6', [input], [output]) def test_elu(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.elu(input) self._test_conversion('elu', [input], [output]) def test_sigmoid(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.sigmoid(input) self._test_conversion('sigmoid', [input], [output]) def test_tanh(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.tanh(input) self._test_conversion('tanh', [input], [output]) def test_sin(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.sin(input) self._test_conversion('sin', [input], [output]) def test_cos(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.cos(input) self._test_conversion('cos', [input], [output]) def test_log(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.log(input) self._test_conversion('log', [input], [output]) def test_exp(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.exp(input) self._test_conversion('exp', [input], [output]) def test_neg(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.negative(input) self._test_conversion('neg', [input], [output]) def test_floor(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.floor(input) self._test_conversion('floor', [input], [output]) def test_ceil(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.ceil(input) self._test_conversion('ceil', [input], [output]) def test_round(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.round(input) self._test_conversion('round', [input], [output]) def test_sqr(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.square(input) self._test_conversion('sqr', [input], [output]) def test_sqrt(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.sqrt(input) self._test_conversion('sqrt', [input], [output]) def test_rsqrt(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.rsqrt(input) self._test_conversion('rsqrt', [input], [output]) def test_not(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) output = tf.math.logical_not(input) self._test_conversion('not', [input], [output]) def test_abs(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.abs(input) self._test_conversion('abs', [input], [output]) def test_leaky_relu(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.leaky_relu(input, alpha=0.1) self._test_conversion('leaky_relu', [input], [output]) def test_add(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.add(input1, input2) self._test_conversion('add', [input1, input2], [output]) def test_sub(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.subtract(input1, input2) self._test_conversion('sub', [input1, input2], [output]) def test_mul(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.multiply(input1, input2) self._test_conversion('mul', [input1, input2], [output]) def test_div(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.divide(input1, input2) self._test_conversion('div', [input1, input2], [output]) def test_pow(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.pow(input1, input2) self._test_conversion('pow', [input1, input2], [output]) def test_min(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.minimum(input1, input2) self._test_conversion('min', [input1, input2], [output]) def test_max(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.maximum(input1, input2) self._test_conversion('max', [input1, input2], [output]) def test_and(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) output = tf.logical_and(input1, input2) self._test_conversion('and', [input1, input2], [output]) def test_or(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) output = tf.logical_or(input1, input2) self._test_conversion('or', [input1, input2], [output]) def test_lt(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.less(input1, input2) self._test_conversion('lt', [input1, input2], [output]) def test_le(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.less_equal(input1, input2) self._test_conversion('le', [input1, input2], [output]) def test_gt(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.greater(input1, input2) self._test_conversion('gt', [input1, input2], [output]) def test_ge(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.greater_equal(input1, input2) self._test_conversion('ge', [input1, input2], [output]) def test_eq(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.equal(input1, input2) self._test_conversion('eq', [input1, input2], [output]) def test_ne(self): input1 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.not_equal(input1, input2) self._test_conversion('ne', [input1, input2], [output]) def test_min_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reduce_min(input, axis=3, keepdims=True) self._test_conversion('min_reduce', [input], [output]) def test_max_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reduce_max(input, axis=3, keepdims=True) self._test_conversion('max_reduce', [input], [output]) def test_mean_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reduce_mean(input, axis=3, keepdims=True) self._test_conversion('mean_reduce', [input], [output]) def test_sum_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.reduce_sum(input, axis=3, keepdims=True) self._test_conversion('sum_reduce', [input], [output]) def test_any_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) output = tf.reduce_any(input, axis=3, keepdims=True) self._test_conversion('any_reduce', [input], [output]) def test_argmin_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.argmin(input, axis=-1) self._test_conversion('axgmin_reduce', [input], [output]) def test_argmax_reduce(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.argmax(input, axis=-1) self._test_conversion('axgmax_reduce', [input], [output]) def test_stack(self): input1 = tf.placeholder(shape=(4, 32, 32, 1), dtype=tf.float32) input2 = tf.placeholder(shape=(4, 32, 32, 1), dtype=tf.float32) input1 = tf.squeeze(input1, axis=3) input2 = tf.squeeze(input2, axis=3) output = tf.stack([input1, input2], axis=3) self._test_conversion('stack', [input1, input2], [output]) def test_unstack(self): input = tf.placeholder(shape=(4, 32, 32, 2), dtype=tf.float32) [output1, output2] = tf.unstack(input, axis=3) output1 = tf.expand_dims(output1, axis=3) output2 = tf.expand_dims(output2, axis=3) self._test_conversion('unstack', [input], [output1, output2]) def test_conv_bias_relu_pool(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) filter = tf.constant(np.random.random(size=(5, 5, 3, 16)), dtype=tf.float32) bias = tf.constant(np.random.random(size=16,), dtype=tf.float32) mean = tf.constant(np.random.random(size=16, ), dtype=tf.float32) variance = tf.constant(np.random.random(size=16, ), dtype=tf.float32) scale = tf.constant(np.random.random(size=16, ), dtype=tf.float32) offset = tf.constant(np.random.random(size=16, ), dtype=tf.float32) filtered = tf.nn.conv2d(input, filter, strides=1, padding='SAME') biased = tf.nn.bias_add(filtered, bias) normed, _mean, _variance = tf.nn.fused_batch_norm(biased, scale, offset, mean, variance, is_training=False) relu = tf.nn.relu(normed) pooled = tf.nn.max_pool2d(relu, ksize=2, strides=2, padding='SAME') self._test_conversion('conv_bias_relu_pool', [input], [pooled]) def test_softmax(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.softmax(input) self._test_conversion('softmax', [input], [output]) def test_matmul(self): input1 = tf.placeholder(shape=(10, 100), dtype=tf.float32) input2 = tf.placeholder(shape=(100, 20), dtype=tf.float32) output = tf.matmul(input1, input2) self._test_conversion('matmul', [input1, input2], [output]) def test_matmul_trans(self): input1 = tf.placeholder(shape=(10, 100), dtype=tf.float32) input2 = tf.placeholder(shape=(20, 100), dtype=tf.float32) output = tf.matmul(input1, input2, transpose_b=True) self._test_conversion('matmul-trans', [input1, input2], [output]) def test_l2_normalize(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.math.l2_normalize(input, axis=-1) self._test_conversion('l2_normalize', [input], [output]) def test_lrn(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.nn.local_response_normalization(input, depth_radius=3) self._test_conversion('lrn', [input], [output]) def test_upsample_nearest(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.image.resize_nearest_neighbor(input, size=(64, 64)) self._test_conversion('upsample-nearest', [input], [output]) def test_downsample_nearest(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.image.resize_nearest_neighbor(input, size=(16, 16)) self._test_conversion('downsample-nearest', [input], [output]) def test_upsample_linear(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.image.resize_bilinear(input, size=(64, 64)) self._test_conversion('upsample-linear', [input], [output]) def test_select(self): cond = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.bool) left = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) right = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.where(cond, left, right) self._test_conversion('select', [cond, left, right], [output]) def test_batch_norm(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) mean = tf.constant(np.random.random((3,)), dtype=tf.float32) variance = tf.constant(np.random.random((3,)), dtype=tf.float32) scale = tf.constant(np.random.random((3,)), dtype=tf.float32) offset = tf.constant(np.random.random((3,)), dtype=tf.float32) output = tf.nn.batch_normalization(input, scale=scale, offset=offset, mean=mean, variance=variance, variance_epsilon=1e-5) self._test_conversion('batch_norm', [input], [output]) def test_add_n(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.float32) output = tf.add_n([input, input, input]) self._test_conversion('add_n', [input], [output]) def test_cast(self): input = tf.placeholder(shape=(4, 32, 32, 3), dtype=tf.int32) output = tf.cast(input, tf.float32) self._test_conversion('cast', [input], [output]) @unittest.skipIf(TestEnv._network_folder is None or not os.path.isdir(TestEnv._network_folder), "no network test folder provided") class NetworkTestCases(TestEnv): def test_inception_v1(self): self._test_conversion_from_file(self._network_folder + 'inception_v1.tflite') def test_inception_v2(self): self._test_conversion_from_file(self._network_folder + 'inception_v2.tflite') def test_inception_v3(self): self._test_conversion_from_file(self._network_folder + 'inception_v3.tflite') def test_inception_v4(self): self._test_conversion_from_file(self._network_folder + 'inception_v4.tflite') def test_mobilenet_v1(self): self._test_conversion_from_file(self._network_folder + 'mobilenet_v1.tflite') def test_mobilenet_v2(self): self._test_conversion_from_file(self._network_folder + 'mobilenet_v2.tflite')