Repository: KhronosGroup/NNEF-Tools
Branch: main
Commit: 6ca6fd2a0e3c
Files: 269
Total size: 1.9 MB
Directory structure:
gitextract_brhyl024/
├── .github/
│ └── workflows/
│ ├── build_nnef.yml
│ └── build_nnef_tools.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── README.md
├── _config.yml
├── fix_nnef_binary_size.py
├── models/
│ └── README.md
├── nnef-pyproject/
│ ├── README.md
│ ├── cpp_api.md
│ ├── examples/
│ │ ├── alexnet.txt
│ │ ├── googlenet.txt
│ │ ├── resnet.txt
│ │ ├── samples/
│ │ │ ├── sample.py
│ │ │ ├── sample_ext.py
│ │ │ └── sample_gen.py
│ │ └── vgg.txt
│ ├── nnef/
│ │ ├── __init__.py
│ │ ├── binary.py
│ │ ├── cpp/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── include/
│ │ │ │ ├── cnnef.h
│ │ │ │ ├── nnef/
│ │ │ │ │ ├── common/
│ │ │ │ │ │ ├── binary.h
│ │ │ │ │ │ ├── dictionary.h
│ │ │ │ │ │ ├── error.h
│ │ │ │ │ │ ├── lexer.h
│ │ │ │ │ │ ├── parser.h
│ │ │ │ │ │ ├── prototype.h
│ │ │ │ │ │ ├── shapes.h
│ │ │ │ │ │ ├── typespec.h
│ │ │ │ │ │ ├── typeutils.h
│ │ │ │ │ │ └── value.h
│ │ │ │ │ ├── comp/
│ │ │ │ │ │ ├── comp_parser.h
│ │ │ │ │ │ ├── evaluation.h
│ │ │ │ │ │ ├── expression.h
│ │ │ │ │ │ ├── fragment.h
│ │ │ │ │ │ └── stdlib_source.h
│ │ │ │ │ ├── flat/
│ │ │ │ │ │ ├── flat_parser.h
│ │ │ │ │ │ ├── quant_parser.h
│ │ │ │ │ │ └── stdlib_protos.h
│ │ │ │ │ └── runtime/
│ │ │ │ │ ├── execution.h
│ │ │ │ │ ├── ndrange.h
│ │ │ │ │ └── operations.h
│ │ │ │ └── nnef.h
│ │ │ ├── infer.cpp
│ │ │ ├── sample.cpp
│ │ │ └── src/
│ │ │ ├── cnnef.cpp
│ │ │ └── nnef.cpp
│ │ ├── nnef.cpp
│ │ ├── parser.py
│ │ ├── printer.py
│ │ ├── shapes.py
│ │ └── validate.py
│ ├── package_info.md
│ ├── pyproject.toml
│ ├── setup.py
│ ├── stdlib.nnef
│ └── tests/
│ └── test.py
└── nnef_tools-pyproject/
├── LICENSE
├── README.md
├── custom/
│ ├── composite_export_example.py
│ ├── custom_operators_example.py
│ ├── custom_optimizers_example.py
│ ├── custom_transforms_example.py
│ ├── onnx_custom_export_example.py
│ └── onnx_custom_transforms_example.py
├── nnef_tools/
│ ├── __init__.py
│ ├── conversion/
│ │ ├── __init__.py
│ │ ├── converter.py
│ │ ├── nnef_to_onnx.py
│ │ ├── nnef_to_tf.py
│ │ ├── nnef_to_tflite.py
│ │ ├── onnx_to_nnef.py
│ │ ├── tf_to_nnef.py
│ │ └── tflite_to_nnef.py
│ ├── convert.py
│ ├── execute.py
│ ├── execution/
│ │ ├── __init__.py
│ │ └── tvm/
│ │ ├── __init__.py
│ │ └── nnef_frontend/
│ │ ├── __init__.py
│ │ ├── relax/
│ │ │ ├── __init__.py
│ │ │ ├── nnef_frontend.py
│ │ │ └── nnef_ops.py
│ │ └── relay/
│ │ ├── __init__.py
│ │ ├── from_nnef.py
│ │ └── nnef_ops.py
│ ├── generate.py
│ ├── gmac.py
│ ├── image_tensor.py
│ ├── interpreter/
│ │ ├── __init__.py
│ │ └── pytorch/
│ │ ├── __init__.py
│ │ ├── nnef_module.py
│ │ └── nnef_operators.py
│ ├── io/
│ │ ├── __init__.py
│ │ ├── caffe2/
│ │ │ ├── __init__.py
│ │ │ ├── caffe/
│ │ │ │ ├── __init__.py
│ │ │ │ └── proto/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── caffe.proto
│ │ │ │ └── caffe_pb2.py
│ │ │ ├── reader.py
│ │ │ └── writer.py
│ │ ├── nnef/
│ │ │ ├── __init__.py
│ │ │ ├── helpers.py
│ │ │ ├── reader.py
│ │ │ └── writer.py
│ │ ├── onnx/
│ │ │ ├── __init__.py
│ │ │ ├── reader.py
│ │ │ └── writer.py
│ │ └── tf/
│ │ ├── __init__.py
│ │ ├── graphdef/
│ │ │ ├── __init__.py
│ │ │ ├── composite.py
│ │ │ ├── protobuf.py
│ │ │ ├── reader.py
│ │ │ ├── utils.py
│ │ │ └── writer.py
│ │ └── lite/
│ │ ├── __init__.py
│ │ ├── flatbuffers/
│ │ │ ├── AbsOptions.py
│ │ │ ├── ActivationFunctionType.py
│ │ │ ├── AddNOptions.py
│ │ │ ├── AddOptions.py
│ │ │ ├── ArgMaxOptions.py
│ │ │ ├── ArgMinOptions.py
│ │ │ ├── BatchMatMulOptions.py
│ │ │ ├── BatchToSpaceNDOptions.py
│ │ │ ├── BidirectionalSequenceLSTMOptions.py
│ │ │ ├── BidirectionalSequenceRNNOptions.py
│ │ │ ├── Buffer.py
│ │ │ ├── BuiltinOperator.py
│ │ │ ├── BuiltinOptions.py
│ │ │ ├── CallOptions.py
│ │ │ ├── CastOptions.py
│ │ │ ├── CombinerType.py
│ │ │ ├── ConcatEmbeddingsOptions.py
│ │ │ ├── ConcatenationOptions.py
│ │ │ ├── Conv2DOptions.py
│ │ │ ├── CosOptions.py
│ │ │ ├── CustomOptionsFormat.py
│ │ │ ├── CustomQuantization.py
│ │ │ ├── DensifyOptions.py
│ │ │ ├── DepthToSpaceOptions.py
│ │ │ ├── DepthwiseConv2DOptions.py
│ │ │ ├── DequantizeOptions.py
│ │ │ ├── DimensionMetadata.py
│ │ │ ├── DimensionType.py
│ │ │ ├── DivOptions.py
│ │ │ ├── EmbeddingLookupSparseOptions.py
│ │ │ ├── EqualOptions.py
│ │ │ ├── ExpOptions.py
│ │ │ ├── ExpandDimsOptions.py
│ │ │ ├── FakeQuantOptions.py
│ │ │ ├── FillOptions.py
│ │ │ ├── FloorDivOptions.py
│ │ │ ├── FloorModOptions.py
│ │ │ ├── FullyConnectedOptions.py
│ │ │ ├── FullyConnectedOptionsWeightsFormat.py
│ │ │ ├── GatherNdOptions.py
│ │ │ ├── GatherOptions.py
│ │ │ ├── GreaterEqualOptions.py
│ │ │ ├── GreaterOptions.py
│ │ │ ├── HardSwishOptions.py
│ │ │ ├── IfOptions.py
│ │ │ ├── Int32Vector.py
│ │ │ ├── L2NormOptions.py
│ │ │ ├── LSHProjectionOptions.py
│ │ │ ├── LSHProjectionType.py
│ │ │ ├── LSTMKernelType.py
│ │ │ ├── LSTMOptions.py
│ │ │ ├── LeakyReluOptions.py
│ │ │ ├── LessEqualOptions.py
│ │ │ ├── LessOptions.py
│ │ │ ├── LocalResponseNormalizationOptions.py
│ │ │ ├── LogSoftmaxOptions.py
│ │ │ ├── LogicalAndOptions.py
│ │ │ ├── LogicalNotOptions.py
│ │ │ ├── LogicalOrOptions.py
│ │ │ ├── MatrixDiagOptions.py
│ │ │ ├── MatrixSetDiagOptions.py
│ │ │ ├── MaximumMinimumOptions.py
│ │ │ ├── Metadata.py
│ │ │ ├── MirrorPadMode.py
│ │ │ ├── MirrorPadOptions.py
│ │ │ ├── Model.py
│ │ │ ├── MulOptions.py
│ │ │ ├── NegOptions.py
│ │ │ ├── NonMaxSuppressionV4Options.py
│ │ │ ├── NonMaxSuppressionV5Options.py
│ │ │ ├── NotEqualOptions.py
│ │ │ ├── OneHotOptions.py
│ │ │ ├── Operator.py
│ │ │ ├── OperatorCode.py
│ │ │ ├── PackOptions.py
│ │ │ ├── PadOptions.py
│ │ │ ├── PadV2Options.py
│ │ │ ├── Padding.py
│ │ │ ├── Pool2DOptions.py
│ │ │ ├── PowOptions.py
│ │ │ ├── QuantizationDetails.py
│ │ │ ├── QuantizationParameters.py
│ │ │ ├── QuantizeOptions.py
│ │ │ ├── RNNOptions.py
│ │ │ ├── RangeOptions.py
│ │ │ ├── RankOptions.py
│ │ │ ├── ReducerOptions.py
│ │ │ ├── ReshapeOptions.py
│ │ │ ├── ResizeBilinearOptions.py
│ │ │ ├── ResizeNearestNeighborOptions.py
│ │ │ ├── ReverseSequenceOptions.py
│ │ │ ├── ReverseV2Options.py
│ │ │ ├── SVDFOptions.py
│ │ │ ├── ScatterNdOptions.py
│ │ │ ├── SegmentSumOptions.py
│ │ │ ├── SelectOptions.py
│ │ │ ├── SelectV2Options.py
│ │ │ ├── SequenceRNNOptions.py
│ │ │ ├── ShapeOptions.py
│ │ │ ├── SkipGramOptions.py
│ │ │ ├── SliceOptions.py
│ │ │ ├── SoftmaxOptions.py
│ │ │ ├── SpaceToBatchNDOptions.py
│ │ │ ├── SpaceToDepthOptions.py
│ │ │ ├── SparseIndexVector.py
│ │ │ ├── SparseToDenseOptions.py
│ │ │ ├── SparsityParameters.py
│ │ │ ├── SplitOptions.py
│ │ │ ├── SplitVOptions.py
│ │ │ ├── SquareOptions.py
│ │ │ ├── SquaredDifferenceOptions.py
│ │ │ ├── SqueezeOptions.py
│ │ │ ├── StridedSliceOptions.py
│ │ │ ├── SubGraph.py
│ │ │ ├── SubOptions.py
│ │ │ ├── Tensor.py
│ │ │ ├── TensorType.py
│ │ │ ├── TileOptions.py
│ │ │ ├── TopKV2Options.py
│ │ │ ├── TransposeConvOptions.py
│ │ │ ├── TransposeOptions.py
│ │ │ ├── Uint16Vector.py
│ │ │ ├── Uint8Vector.py
│ │ │ ├── UnidirectionalSequenceLSTMOptions.py
│ │ │ ├── UniqueOptions.py
│ │ │ ├── UnpackOptions.py
│ │ │ ├── WhereOptions.py
│ │ │ ├── WhileOptions.py
│ │ │ ├── ZerosLikeOptions.py
│ │ │ ├── __init__.py
│ │ │ └── schema.fbs
│ │ ├── helpers.py
│ │ ├── reader.py
│ │ └── writer.py
│ ├── model/
│ │ ├── __init__.py
│ │ ├── graph.py
│ │ └── utils.py
│ ├── operation_mapping.md
│ ├── optimization/
│ │ ├── __init__.py
│ │ ├── nnef_optimizer.py
│ │ ├── onnx_optimizer.py
│ │ ├── tf_optimizer.py
│ │ └── tflite_optimizer.py
│ ├── quantize.py
│ ├── random_tensor.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── stdio.py
│ │ └── types.py
│ └── visualize.py
├── package_info.md
├── pyproject.toml
└── tests/
└── conversion/
├── graphdef_test.py
├── onnx_test.py
└── tflite_test.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/build_nnef.yml
================================================
name: Build, test and publish nnef
on:
push:
tags:
- 'nnef-v[0-9]+.[0-9]+.[0-9]+'
jobs:
build_wheels:
name: Build nnef wheels on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os:
- ubuntu-latest
- windows-latest
- macos-latest
- macos-14
steps:
- uses: actions/checkout@v4
- name: Build wheels for nnef
uses: pypa/cibuildwheel@v3.4.0
with:
package-dir: nnef-pyproject
output-dir: dist/
config-file: nnef-pyproject/pyproject.toml
env:
CIBW_BUILD: "cp38-* cp39-* cp310-* cp311-* cp312-* cp313-* cp314-*"
- uses: actions/upload-artifact@v4
with:
name: dist-${{ matrix.os }}-${{ github.ref_name }}
path: ./dist/*.whl
build_sdist:
name: Build nnef sdist
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.7"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build
- name: Build package
run: python -m build ./nnef-pyproject/ --sdist --outdir ./dist
- uses: actions/upload-artifact@v4
with:
name: dist-${{ github.ref_name }}
path: ./dist/*.tar.gz
publish:
name: Publish nnef
runs-on: ubuntu-latest
needs: [build_wheels, build_sdist]
steps:
- name: Download dist/
uses: actions/download-artifact@v4
with:
path: dist
merge-multiple: true
- name: publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.13.0
with:
user: __token__
password: ${{ secrets.PYPI_TOKEN }}
================================================
FILE: .github/workflows/build_nnef_tools.yml
================================================
name: Build, test and publish nnef_tools
on:
push:
tags:
- 'nnef_tools-v[0-9]+.[0-9]+.[0-9]+'
jobs:
build_nnef_tools:
name: Build and publish nnef_tools
runs-on: "ubuntu-latest"
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build pytest pytest-xdist
- name: Build package
run: python -m build ./nnef_tools-pyproject/ --outdir ./dist/
- name: Publish artifacts
uses: actions/upload-artifact@v4
with:
name: dist-${{ github.ref_name }}
path: ./dist/*
- name: Install
run: python -m pip install ./nnef-pyproject/ ./nnef_tools-pyproject[full]
- name: Test
run: python -m pytest ./nnef_tools-pyproject/tests/ -n auto
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.13.0
with:
user: __token__
password: ${{ secrets.PYPI_TOKEN }}
================================================
FILE: .gitignore
================================================
*.pyc
__pycache__
.idea
/_models
/out
/*/build
/*/dist
*.egg-info
================================================
FILE: CODE_OF_CONDUCT.md
================================================
A reminder that this issue tracker is managed by the Khronos Group. Interactions here should follow the Khronos Code of Conduct (https://www.khronos.org/developers/code-of-conduct), which prohibits aggressive or derogatory language. Please keep the discussion friendly and civil.
================================================
FILE: README.md
================================================
[](https://opensource.org/licenses/Apache-2.0)

**Development of the latest tools related to version 2.0 of the NNEF specification draft can be found on branch [v2.0](https://github.com/KhronosGroup/NNEF-Tools/tree/v2.0).**
# NNEF-Tools
NNEF reduces machine learning deployment fragmentation by enabling a rich mix of neural network training tools and inference engines to be used by applications across a diverse range of devices and platforms.
This repository contains tools to generate and consume NNEF documents, such as a parser (C++ and Python) that can be included in consumer applications and converters for deep learning frameworks.
* [NNEF Model Zoo](models#nnef-model-zoo)
* [NNEF Tools](nnef_tools-pyproject#nnef-tools)
* [NNEF Parser](nnef-pyproject#nnef-parser---repository)
## NNEF Model Zoo
A **Model Zoo** is now available; the 'models' folder contains a variety of [NNEF models](models#nnef-model-zoo) converted from various sources.
## NNEF Tools
[NNEF Tools](nnef_tools-pyproject#nnef-tools) folder contains tools to convert pre-trained models in `tensorFlow`/`caffe`/`caffe2`/`ONNX` to NNEF format.
## NNEF Parser
[NNEF Parser](nnef-pyproject#nnef-parser---repository) folder contains `C++` and `Python` source code for a sample NNEF graph parser.
## Release Notes
### Added new operators in spec version 1.0.4 (06.15.2021)
Following the update of the NNEF specification to version 1.0.4, conversion for the corresponding operators has been added. Furthermore, error handling of non-convertible models has been greately enhanced with error messages detailing the exact cause of failure listed for all non-convertible operations before conversion is started.
### Reworked NNEF Tools (10.21.2020)
The tools for converting models to NNEF and transforming NNEF models has been thoroughly reworked to make them more robust and unified and easier to maintain. The basic functionality of the main scripts has been kept, however their parameterization has been simplified and unified in some places; please refer to the readme and the help (`-h` option) of the respective scripts for more details. The scripts cover the following major areas of functionality: model conversion, optimization, execution and visualization. A GMAC calculator is also provide, and further utility scripts may be added in the future.
### Change in quantization information in binary files (06.12.2020)
According to the change in version 1.0.3 of the NNEF specification, quantization algorithm information has been deprecated in the tensor binary file format. The tensor binary only stores the item-type of the tensor data, and the binary reader does not return quantization information (also used to be called 'compression' info). Furthermore, the mapping between stored item-types and data-types in the structural description has been clarified, so that the reader of a tensor binary can tell what the data-type of the read tensor is. This enhances the reader as it can now properly map the binary data to C++ or Python numpy types upon reading. The C++ code has been updated to perform such a mapping, and is now able to return a typed array instead of just plain bytes.
### Change in shape inference compared to previous version (04.10.2019)
According to a change in version 1.0.1 of the NNEF specification, the `shape_of` operator in NNEF syntax is deprecated, and the parser does not support it. This enables the decoupling of parsing from shape inference, allowing parsing to succeed even if shape information is not available for all operations, such as custom defined operations before the graph definition. Shape inference can still be run after training, furthermore it can be customized (via function pointers) for custom defined operations.
### TENSOR BINARY BUG FIX (10.19.2018)
There was a bug in the Python code that reads/writes the tensor binary files (the header contained 4 extra padding bytes therefore not conforming to the spec). The code has been updated to read/write and _check_ the proper header size. As a consequence, any files written out with the code that contained the bug cannot be read back with the updated code. To aid the usage of such existing files, a script was created called `fix_nnef_binary_size.py` that can be used to remove the excess 4 bytes from existing NNEF files. The script is located in the root folder of this repo, it has no dependencies (not even the NNEF parser). It can be run on the main folder of an NNEF model, and it fixes all binary files in the folder. In case one runs it on an NNEF model that does not contain the bug, it does nothing. It can be used as follows:
```
python fix_nnef_binary_size.py my_nnef_model_folder
```
Such an invocation fixes the files in place. Optionally, a second argument can be supplied to the script to write the fixed files to a different output path. In this case, the script copies all non-binary files (such as graph.nnef) to the target folder, so the resulting folder contains the whole valid model.
================================================
FILE: _config.yml
================================================
theme: jekyll-theme-slate
================================================
FILE: fix_nnef_binary_size.py
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import struct
def fix_nnef_binary(in_fn, out_fn):
header_size = 128
with open(in_fn, 'rb') as file:
file_size = os.fstat(file.fileno()).st_size
header = file.read(header_size)
excess = file.read(4)
data = file.read()
[magic1, magic2, major, minor] = bytearray(header[:4])
if magic1 != 0x4E or magic2 != 0xEF or major != 1 or minor != 0:
return False
data_length, = struct.unpack('i', header[4:8])
if file_size != header_size + data_length + 4:
return False
with open(out_fn, 'wb') as file:
file.write(header)
file.write(data)
return True
def fix_nnef_binaries(in_path, out_path):
for root, dirs, files in os.walk(in_path):
for filename in files:
if not filename.startswith('.'):
in_fn = os.path.join(root, filename)
out_fn = os.path.join(out_path, os.path.relpath(in_fn, in_path))
if os.path.splitext(filename)[1] == '.dat':
if fix_nnef_binary(in_fn, out_fn):
print('Fixed file: ' + in_fn)
elif out_fn != in_fn:
with open(in_fn, 'rb') as in_file, open(out_fn, 'wb') as out_file:
out_file.write(in_file.read())
if __name__ == "__main__":
if len(sys.argv) < 2:
print('input path must be provided')
exit(-1)
elif len(sys.argv) > 3:
print('too many arguments provided')
exit(-1)
fix_nnef_binaries(in_path=sys.argv[1], out_path=sys.argv[2] if len(sys.argv) == 3 else sys.argv[1])
================================================
FILE: models/README.md
================================================
NNEF model zoo
==============
The following collection of models were compiled by running the converter tools in this repository on publicly available models. Each entry provides a link to the original and the converted model.
* TensorFlow models have been acquired from [https://www.tensorflow.org/lite/guide/hosted_models]
* ONNX models have been acquired from [https://github.com/onnx/models]
* Caffe models have been acquired from [https://github.com/BVLC/caffe/wiki/Model-Zoo]
* Caffe2 models have been acquired from [https://github.com/caffe2/models]
AlexNet
-------
_Floating point models_
Name | Size | Original | Converted
--- | --- | --- | ---
BVLC AlexNet | 244 Mb | [Caffe](https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/bvlc_alexnet.caffemodel.nnef.tgz)
BVLC AlexNet | 244 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/bvlc_alexnet.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/bvlc_alexnet.onnx.nnef.tgz)
VGG
---
_Floating point models_
Name | Size | Original | Converted
--- | --- | --- | ---
VGG-16 | 553.6 MB Mb | [Caffe](https://gist.github.com/ksimonyan/211839e770f7b538e2d8) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/vgg16.caffemodel.nnef.tgz)
VGG-19 | 574.8 MB Mb | [Caffe](https://gist.github.com/ksimonyan/3785162f95cd2d5fee77) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/vgg19.caffemodel.nnef.tgz)
VGG-16 | 527.8 MB Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/vgg/vgg16/vgg16.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/vgg16.onnx.nnef.tgz)
VGG-19 | 548.1 MB Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/vgg/vgg19/vgg19.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/vgg19.onnx.nnef.tgz)
GoogleNet
---------
_Floating point models_
Name | Size | Original | Converted
--- | --- | --- | ---
Inception v1 | 28 Mb | [Caffe2](https://github.com/caffe2/models/tree/master/inception_v1) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v1.caffe2.nnef.tgz)
Inception v1 | 28 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/inception_v1.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v1.onnx.nnef.tgz)
Inception v2 | 45 Mb | [Caffe2](https://github.com/caffe2/models/tree/master/inception_v2) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v2.caffe2.nnef.tgz)
Inception v2 | 45 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/inception_v2.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v2.onnx.nnef.tgz)
Inception v3 | 95.3 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v3.tfpb.nnef.tgz)
Inception v4 | 170.7 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v4.tfpb.nnef.tgz)
BVLC GoogleNet | 28 Mb | [Caffe](https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/bvlc_googlenet.caffemodel.nnef.tgz)
BVLC GoogleNet | 28 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/bvlc_googlenet.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/bvlc_googlenet.onnx.nnef.tgz)
_Quantized models_
Name | Size | Original | Converted
--- | --- | --- | ---
Inception v1 | 6.4 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v1_quant.tflite.nnef.tgz)
Inception v2 | 11 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/inception_v2_224_quant_20181026.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v2_quant.tflite.nnef.tgz)
Inception v3 | 23 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v3_quant.tflite.nnef.tgz)
Inception v4 | 41 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_v4_quant.tflite.nnef.tgz)
ResNet
------
_Floating point models_
Name | Size | Original | Converted
--- | --- | --- | ---
Resnet v1-18 | 44.7 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v1/resnet18v1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_18.onnx.nnef.tgz)
Resnet v1-34 | 83.3 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet34v1/resnet34v1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_34.onnx.nnef.tgz)
Resnet v1-50 | 97.8 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v1/resnet50v1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_50.onnx.nnef.tgz)
Resnet v1-101 | 170.6 MB Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet101v1/resnet101v1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_101.onnx.nnef.tgz)
Resnet v1-152 | 242.3 Mb | [Caffe](https://github.com/KaimingHe/deep-residual-networks) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v1_152.caffemodel.nnef.tgz)
Resnet v2-18 | 44.6 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v2/resnet18v2.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v2_18.onnx.nnef.tgz)
Resnet v2-34 | 83.2 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet34v2/resnet34v2.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v2_34.onnx.nnef.tgz)
Resnet v2-50 | 97.7 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v2_50.onnx.nnef.tgz)
Resnet v2-101 | 170.4 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet101v2/resnet101v2.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/resnet_v2_101.onnx.nnef.tgz)
Inception-Resnet v2 | 121 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/inception_resnet_v2.tfpb.nnef.tgz)
MobileNet
---------
_Floating point models_
Name | Size | Original | Converted
--- | --- | --- | ---
MobileNet v1-1.0 | 16.9 Mb | [TensorFlow](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v1_1.0.tfpb.nnef.tgz)
MobileNet v1-1.0 | 17.2 Mb | [Caffe](https://github.com/shicai/MobileNet-Caffe) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v1_1.0.caffemodel.nnef.tgz)
MobileNet v2-1.0 | 14.0 Mb | [TensorFlow](http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v2_1.0.tfpb.nnef.tgz)
MobileNet v2-1.0 | 14.4 Mb | [Caffe](https://github.com/shicai/MobileNet-Caffe) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v2_1.0.caffemodel.nnef.tgz)
MobileNet v2-1.0 | 13.6 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/mobilenet/mobilenetv2-1.0/mobilenetv2-1.0.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v2_1.0.onnx.nnef.tgz)
_Quantized models_
Name | Size | Original | Converted
--- | --- | --- | ---
MobileNet v1-1.0 | 4.3 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v1_1.0_quant.tflite.nnef.tgz)
MobileNet v2-1.0 | 3.4 Mb | [TensorFlow-Lite](http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/mobilenet_v2_1.0_quant.tflite.nnef.tgz)
SqueezeNet
----------
_Floating point models_
Name | Size | Original | Converted
--- | --- | --- | ---
SqueezeNet | 5.0 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet.tfpb.nnef.tgz)
SqueezeNet 1.0 | 4.7 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/squeezenet.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet_v1.0.onnx.nnef.tgz)
SqueezeNet 1.1 | 4.7 Mb | [ONNX](https://s3.amazonaws.com/onnx-model-zoo/squeezenet/squeezenet1.1/squeezenet1.1.onnx) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet_v1.1.onnx.nnef.tgz)
SqueezeNet 1.0 | 4.7 Mb | [Caffe](https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.0) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet_v1.0.caffemodel.nnef.tgz)
SqueezeNet 1.1 | 4.7 Mb | [Caffe](https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/squeezenet_v1.1.caffemodel.nnef.tgz)
ShuffleNet
----------
_Floating point models_
Name | Size | Original | Converted
--- | --- | --- | ---
ShuffleNet | 5.3 Mb | [ONNX](https://s3.amazonaws.com/download.onnx/models/opset_9/shufflenet.tar.gz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/shufflenet.onnx.nnef.tgz)
NASNet
------
_Floating point models_
Name | Size | Original | Converted
--- | --- | --- | ---
NasNet mobile | 21.4 Mb | [TensorFlow](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz) | [NNEF](https://sfo2.digitaloceanspaces.com/nnef-public/nasnet_mobile.tfpb.nnef.tgz)
================================================
FILE: nnef-pyproject/README.md
================================================
NNEF Parser - repository
===================
Introduction
------------
The code consists of a C++ library that contains two example parsers (one for
flat and one for compositional NNEF syntax). This library can be used to build tools
that require parsing NNEF files. It requires a C++11 compatible compiler.
The Python code wraps the C++ parser and adds some further utilities to load and save NNEF documents easily. It also contains a script to validate NNEF documents (`validate.py`) and optionally print a lowered version of the graph. If the tool encounters an invalid document, it prints the first error and stops parsing. Type `python validate.py -h` to show the usage help.
C++ Library
-----------
Documentation of the library: [cpp_api.md](cpp_api.md)
Python Package
--------------
Documentation of the Python package: [package_info.md](package_info.md)
================================================
FILE: nnef-pyproject/cpp_api.md
================================================
Building the C++ library
------------------------
The C++ library can be compiled with cmake.
The `examples/samples/sample.cpp` contains a minimal example that showcases the use of the parser.
Example of build commands under Linux:
````
$ cd nnef/cpp
$ mkdir build && cd build
$ cmake ..
$ make
````
Using the C++ library
---------------------
Using the C++ parser is as simple as follows:
```
#include "nnef.h"
nnef::Graph graph;
std::string error;
bool success = nnef::load_graph("path/to/NNEF/folder", graph, error);
```
Upon succeess, the graph structure is filled, while in case of an error, the error string is filled. The fields inside the graph structure, and further parameters to the `load_graph` function are documented in `nnef.h`. After the graph is successfully loaded, shape inference can be performed in a subsequent call if required:
```
success = nnef::infer_shapes(graph, error);
```
Upon success, the shape fields of tensors are filled in.
================================================
FILE: nnef-pyproject/examples/alexnet.txt
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version 1.0;
graph alexnet( input ) -> ( output )
{
input = external(shape = [1, 3, 224, 224]);
kernel1 = variable(shape = [64, 3, 11, 11], label = 'alexnet_v2/conv1/kernel');
bias1 = variable(shape = [1, 64], label = 'alexnet_v2/conv1/bias');
conv1 = conv(input, kernel1, bias1, padding = [(0, 0), (0, 0)], border = 'constant', stride = [4, 4], dilation = [1, 1]);
relu1 = relu(conv1);
pool1 = max_pool(relu1, size = [1, 1, 3, 3], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel2 = variable(shape = [192, 64, 5, 5], label = 'alexnet_v2/conv2/kernel');
bias2 = variable(shape = [1, 192], label = 'alexnet_v2/conv2/bias');
conv2 = conv(pool1, kernel2, bias2, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu2 = relu(conv2);
pool2 = max_pool(relu2, size = [1, 1, 3, 3], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel3 = variable(shape = [384, 192, 3, 3], label = 'alexnet_v2/conv3/kernel');
bias3 = variable(shape = [1, 384], label = 'alexnet_v2/conv3/bias');
conv3 = conv(pool2, kernel3, bias3, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu3 = relu(conv3);
kernel4 = variable(shape = [384, 384, 3, 3], label = 'alexnet_v2/conv4/kernel');
bias4 = variable(shape = [1, 384], label = 'alexnet_v2/conv4/bias');
conv4 = conv(relu3, kernel4, bias4, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu4 = relu(conv4);
kernel5 = variable(shape = [256, 384, 3, 3], label = 'alexnet_v2/conv5/kernel');
bias5 = variable(shape = [1, 256], label = 'alexnet_v2/conv5/bias');
conv5 = conv(relu4, kernel5, bias5, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu5 = relu(conv5);
pool3 = max_pool(relu5, size = [1, 1, 3, 3], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel6 = variable(shape = [4096, 256, 5, 5], label = 'alexnet_v2/fc6/kernel');
bias6 = variable(shape = [1, 4096], label = 'alexnet_v2/fc6/bias');
conv6 = conv(pool3, kernel6, bias6, padding = [(0, 0), (0, 0)], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu6 = relu(conv6);
kernel7 = variable(shape = [4096, 4096, 1, 1], label = 'alexnet_v2/fc7/kernel');
bias7 = variable(shape = [1, 4096], label = 'alexnet_v2/fc7/bias');
conv7 = conv(relu6, kernel7, bias7, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu7 = relu(conv7);
kernel8 = variable(shape = [1000, 4096, 1, 1], label = 'alexnet_v2/fc8/kernel');
bias8 = variable(shape = [1, 1000], label = 'alexnet_v2/fc8/bias');
output = conv(relu7, kernel8, bias8, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
}
================================================
FILE: nnef-pyproject/examples/googlenet.txt
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version 1.0;
graph googlenet( input ) -> ( output )
{
input = external(shape = [1, 3, 224, 224]);
kernel1 = variable(shape = [64, 3, 7, 7], label = 'InceptionV1/Conv2d_1a_7x7/kernel');
bias1 = variable(shape = [1, 64], label = 'InceptionV1/Conv2d_1a_7x7/bias');
conv1 = conv(input, kernel1, bias1, padding = [], border = 'constant', stride = [2, 2], dilation = [1, 1]);
relu1 = relu(conv1);
pool1 = max_pool(relu1, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 2, 2]);
kernel2 = variable(shape = [64, 64, 1, 1], label = 'InceptionV1/Conv2d_2b_1x1/kernel');
bias2 = variable(shape = [1, 64], label = 'InceptionV1/Conv2d_2b_1x1/bias');
conv2 = conv(pool1, kernel2, bias2, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu2 = relu(conv2);
kernel3 = variable(shape = [192, 64, 3, 3], label = 'InceptionV1/Conv2d_2c_3x3/kernel');
bias3 = variable(shape = [1, 192], label = 'InceptionV1/Conv2d_2c_3x3/bias');
conv3 = conv(relu2, kernel3, bias3, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu3 = relu(conv3);
pool2 = max_pool(relu3, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 2, 2]);
kernel4 = variable(shape = [64, 192, 1, 1], label = 'InceptionV1/Mixed_3b/Branch_0/Conv2d_0a_1x1/kernel');
bias4 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_3b/Branch_0/Conv2d_0a_1x1/bias');
conv4 = conv(pool2, kernel4, bias4, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu4 = relu(conv4);
kernel5 = variable(shape = [96, 192, 1, 1], label = 'InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/kernel');
bias5 = variable(shape = [1, 96], label = 'InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/bias');
conv5 = conv(pool2, kernel5, bias5, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu5 = relu(conv5);
kernel6 = variable(shape = [128, 96, 3, 3], label = 'InceptionV1/Mixed_3b/Branch_1/Conv2d_0b_3x3/kernel');
bias6 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_3b/Branch_1/Conv2d_0b_3x3/bias');
conv6 = conv(relu5, kernel6, bias6, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu6 = relu(conv6);
kernel7 = variable(shape = [16, 192, 1, 1], label = 'InceptionV1/Mixed_3b/Branch_2/Conv2d_0a_1x1/kernel');
bias7 = variable(shape = [1, 16], label = 'InceptionV1/Mixed_3b/Branch_2/Conv2d_0a_1x1/bias');
conv7 = conv(pool2, kernel7, bias7, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu7 = relu(conv7);
kernel8 = variable(shape = [32, 16, 3, 3], label = 'InceptionV1/Mixed_3b/Branch_2/Conv2d_0b_3x3/kernel');
bias8 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_3b/Branch_2/Conv2d_0b_3x3/bias');
conv8 = conv(relu7, kernel8, bias8, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu8 = relu(conv8);
pool3 = max_pool(pool2, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]);
kernel9 = variable(shape = [32, 192, 1, 1], label = 'InceptionV1/Mixed_3b/Branch_3/Conv2d_0b_1x1/kernel');
bias9 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_3b/Branch_3/Conv2d_0b_1x1/bias');
conv9 = conv(pool3, kernel9, bias9, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu9 = relu(conv9);
concat1 = concat([relu4,relu6,relu8,relu9], axis = 1);
kernel10 = variable(shape = [128, 256, 1, 1], label = 'InceptionV1/Mixed_3c/Branch_0/Conv2d_0a_1x1/kernel');
bias10 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_3c/Branch_0/Conv2d_0a_1x1/bias');
conv10 = conv(concat1, kernel10, bias10, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu10 = relu(conv10);
kernel11 = variable(shape = [128, 256, 1, 1], label = 'InceptionV1/Mixed_3c/Branch_1/Conv2d_0a_1x1/kernel');
bias11 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_3c/Branch_1/Conv2d_0a_1x1/bias');
conv11 = conv(concat1, kernel11, bias11, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu11 = relu(conv11);
kernel12 = variable(shape = [192, 128, 3, 3], label = 'InceptionV1/Mixed_3c/Branch_1/Conv2d_0b_3x3/kernel');
bias12 = variable(shape = [1, 192], label = 'InceptionV1/Mixed_3c/Branch_1/Conv2d_0b_3x3/bias');
conv12 = conv(relu11, kernel12, bias12, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu12 = relu(conv12);
kernel13 = variable(shape = [32, 256, 1, 1], label = 'InceptionV1/Mixed_3c/Branch_2/Conv2d_0a_1x1/kernel');
bias13 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_3c/Branch_2/Conv2d_0a_1x1/bias');
conv13 = conv(concat1, kernel13, bias13, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu13 = relu(conv13);
kernel14 = variable(shape = [96, 32, 3, 3], label = 'InceptionV1/Mixed_3c/Branch_2/Conv2d_0b_3x3/kernel');
bias14 = variable(shape = [1, 96], label = 'InceptionV1/Mixed_3c/Branch_2/Conv2d_0b_3x3/bias');
conv14 = conv(relu13, kernel14, bias14, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu14 = relu(conv14);
pool4 = max_pool(concat1, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]);
kernel15 = variable(shape = [64, 256, 1, 1], label = 'InceptionV1/Mixed_3c/Branch_3/Conv2d_0b_1x1/kernel');
bias15 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_3c/Branch_3/Conv2d_0b_1x1/bias');
conv15 = conv(pool4, kernel15, bias15, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu15 = relu(conv15);
concat2 = concat([relu10,relu12,relu14,relu15], axis = 1);
pool5 = max_pool(concat2, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 2, 2]);
kernel16 = variable(shape = [192, 480, 1, 1], label = 'InceptionV1/Mixed_4b/Branch_0/Conv2d_0a_1x1/kernel');
bias16 = variable(shape = [1, 192], label = 'InceptionV1/Mixed_4b/Branch_0/Conv2d_0a_1x1/bias');
conv16 = conv(pool5, kernel16, bias16, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu16 = relu(conv16);
kernel17 = variable(shape = [96, 480, 1, 1], label = 'InceptionV1/Mixed_4b/Branch_1/Conv2d_0a_1x1/kernel');
bias17 = variable(shape = [1, 96], label = 'InceptionV1/Mixed_4b/Branch_1/Conv2d_0a_1x1/bias');
conv17 = conv(pool5, kernel17, bias17, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu17 = relu(conv17);
kernel18 = variable(shape = [208, 96, 3, 3], label = 'InceptionV1/Mixed_4b/Branch_1/Conv2d_0b_3x3/kernel');
bias18 = variable(shape = [1, 208], label = 'InceptionV1/Mixed_4b/Branch_1/Conv2d_0b_3x3/bias');
conv18 = conv(relu17, kernel18, bias18, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu18 = relu(conv18);
kernel19 = variable(shape = [16, 480, 1, 1], label = 'InceptionV1/Mixed_4b/Branch_2/Conv2d_0a_1x1/kernel');
bias19 = variable(shape = [1, 16], label = 'InceptionV1/Mixed_4b/Branch_2/Conv2d_0a_1x1/bias');
conv19 = conv(pool5, kernel19, bias19, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu19 = relu(conv19);
kernel20 = variable(shape = [48, 16, 3, 3], label = 'InceptionV1/Mixed_4b/Branch_2/Conv2d_0b_3x3/kernel');
bias20 = variable(shape = [1, 48], label = 'InceptionV1/Mixed_4b/Branch_2/Conv2d_0b_3x3/bias');
conv20 = conv(relu19, kernel20, bias20, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu20 = relu(conv20);
pool6 = max_pool(pool5, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]);
kernel21 = variable(shape = [64, 480, 1, 1], label = 'InceptionV1/Mixed_4b/Branch_3/Conv2d_0b_1x1/kernel');
bias21 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4b/Branch_3/Conv2d_0b_1x1/bias');
conv21 = conv(pool6, kernel21, bias21, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu21 = relu(conv21);
concat3 = concat([relu16,relu18,relu20,relu21], axis = 1);
kernel22 = variable(shape = [160, 512, 1, 1], label = 'InceptionV1/Mixed_4c/Branch_0/Conv2d_0a_1x1/kernel');
bias22 = variable(shape = [1, 160], label = 'InceptionV1/Mixed_4c/Branch_0/Conv2d_0a_1x1/bias');
conv22 = conv(concat3, kernel22, bias22, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu22 = relu(conv22);
kernel23 = variable(shape = [112, 512, 1, 1], label = 'InceptionV1/Mixed_4c/Branch_1/Conv2d_0a_1x1/kernel');
bias23 = variable(shape = [1, 112], label = 'InceptionV1/Mixed_4c/Branch_1/Conv2d_0a_1x1/bias');
conv23 = conv(concat3, kernel23, bias23, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu23 = relu(conv23);
kernel24 = variable(shape = [224, 112, 3, 3], label = 'InceptionV1/Mixed_4c/Branch_1/Conv2d_0b_3x3/kernel');
bias24 = variable(shape = [1, 224], label = 'InceptionV1/Mixed_4c/Branch_1/Conv2d_0b_3x3/bias');
conv24 = conv(relu23, kernel24, bias24, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu24 = relu(conv24);
kernel25 = variable(shape = [24, 512, 1, 1], label = 'InceptionV1/Mixed_4c/Branch_2/Conv2d_0a_1x1/kernel');
bias25 = variable(shape = [1, 24], label = 'InceptionV1/Mixed_4c/Branch_2/Conv2d_0a_1x1/bias');
conv25 = conv(concat3, kernel25, bias25, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu25 = relu(conv25);
kernel26 = variable(shape = [64, 24, 3, 3], label = 'InceptionV1/Mixed_4c/Branch_2/Conv2d_0b_3x3/kernel');
bias26 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4c/Branch_2/Conv2d_0b_3x3/bias');
conv26 = conv(relu25, kernel26, bias26, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu26 = relu(conv26);
pool7 = max_pool(concat3, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]);
kernel27 = variable(shape = [64, 512, 1, 1], label = 'InceptionV1/Mixed_4c/Branch_3/Conv2d_0b_1x1/kernel');
bias27 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4c/Branch_3/Conv2d_0b_1x1/bias');
conv27 = conv(pool7, kernel27, bias27, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu27 = relu(conv27);
concat4 = concat([relu22,relu24,relu26,relu27], axis = 1);
kernel28 = variable(shape = [128, 512, 1, 1], label = 'InceptionV1/Mixed_4d/Branch_0/Conv2d_0a_1x1/kernel');
bias28 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_4d/Branch_0/Conv2d_0a_1x1/bias');
conv28 = conv(concat4, kernel28, bias28, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu28 = relu(conv28);
kernel29 = variable(shape = [128, 512, 1, 1], label = 'InceptionV1/Mixed_4d/Branch_1/Conv2d_0a_1x1/kernel');
bias29 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_4d/Branch_1/Conv2d_0a_1x1/bias');
conv29 = conv(concat4, kernel29, bias29, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu29 = relu(conv29);
kernel30 = variable(shape = [256, 128, 3, 3], label = 'InceptionV1/Mixed_4d/Branch_1/Conv2d_0b_3x3/kernel');
bias30 = variable(shape = [1, 256], label = 'InceptionV1/Mixed_4d/Branch_1/Conv2d_0b_3x3/bias');
conv30 = conv(relu29, kernel30, bias30, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu30 = relu(conv30);
kernel31 = variable(shape = [24, 512, 1, 1], label = 'InceptionV1/Mixed_4d/Branch_2/Conv2d_0a_1x1/kernel');
bias31 = variable(shape = [1, 24], label = 'InceptionV1/Mixed_4d/Branch_2/Conv2d_0a_1x1/bias');
conv31 = conv(concat4, kernel31, bias31, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu31 = relu(conv31);
kernel32 = variable(shape = [64, 24, 3, 3], label = 'InceptionV1/Mixed_4d/Branch_2/Conv2d_0b_3x3/kernel');
bias32 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4d/Branch_2/Conv2d_0b_3x3/bias');
conv32 = conv(relu31, kernel32, bias32, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu32 = relu(conv32);
pool8 = max_pool(concat4, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]);
kernel33 = variable(shape = [64, 512, 1, 1], label = 'InceptionV1/Mixed_4d/Branch_3/Conv2d_0b_1x1/kernel');
bias33 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4d/Branch_3/Conv2d_0b_1x1/bias');
conv33 = conv(pool8, kernel33, bias33, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu33 = relu(conv33);
concat5 = concat([relu28,relu30,relu32,relu33], axis = 1);
kernel34 = variable(shape = [112, 512, 1, 1], label = 'InceptionV1/Mixed_4e/Branch_0/Conv2d_0a_1x1/kernel');
bias34 = variable(shape = [1, 112], label = 'InceptionV1/Mixed_4e/Branch_0/Conv2d_0a_1x1/bias');
conv34 = conv(concat5, kernel34, bias34, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu34 = relu(conv34);
kernel35 = variable(shape = [144, 512, 1, 1], label = 'InceptionV1/Mixed_4e/Branch_1/Conv2d_0a_1x1/kernel');
bias35 = variable(shape = [1, 144], label = 'InceptionV1/Mixed_4e/Branch_1/Conv2d_0a_1x1/bias');
conv35 = conv(concat5, kernel35, bias35, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu35 = relu(conv35);
kernel36 = variable(shape = [288, 144, 3, 3], label = 'InceptionV1/Mixed_4e/Branch_1/Conv2d_0b_3x3/kernel');
bias36 = variable(shape = [1, 288], label = 'InceptionV1/Mixed_4e/Branch_1/Conv2d_0b_3x3/bias');
conv36 = conv(relu35, kernel36, bias36, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu36 = relu(conv36);
kernel37 = variable(shape = [32, 512, 1, 1], label = 'InceptionV1/Mixed_4e/Branch_2/Conv2d_0a_1x1/kernel');
bias37 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_4e/Branch_2/Conv2d_0a_1x1/bias');
conv37 = conv(concat5, kernel37, bias37, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu37 = relu(conv37);
kernel38 = variable(shape = [64, 32, 3, 3], label = 'InceptionV1/Mixed_4e/Branch_2/Conv2d_0b_3x3/kernel');
bias38 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4e/Branch_2/Conv2d_0b_3x3/bias');
conv38 = conv(relu37, kernel38, bias38, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu38 = relu(conv38);
pool9 = max_pool(concat5, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]);
kernel39 = variable(shape = [64, 512, 1, 1], label = 'InceptionV1/Mixed_4e/Branch_3/Conv2d_0b_1x1/kernel');
bias39 = variable(shape = [1, 64], label = 'InceptionV1/Mixed_4e/Branch_3/Conv2d_0b_1x1/bias');
conv39 = conv(pool9, kernel39, bias39, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu39 = relu(conv39);
concat6 = concat([relu34,relu36,relu38,relu39], axis = 1);
kernel40 = variable(shape = [256, 528, 1, 1], label = 'InceptionV1/Mixed_4f/Branch_0/Conv2d_0a_1x1/kernel');
bias40 = variable(shape = [1, 256], label = 'InceptionV1/Mixed_4f/Branch_0/Conv2d_0a_1x1/bias');
conv40 = conv(concat6, kernel40, bias40, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu40 = relu(conv40);
kernel41 = variable(shape = [160, 528, 1, 1], label = 'InceptionV1/Mixed_4f/Branch_1/Conv2d_0a_1x1/kernel');
bias41 = variable(shape = [1, 160], label = 'InceptionV1/Mixed_4f/Branch_1/Conv2d_0a_1x1/bias');
conv41 = conv(concat6, kernel41, bias41, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu41 = relu(conv41);
kernel42 = variable(shape = [320, 160, 3, 3], label = 'InceptionV1/Mixed_4f/Branch_1/Conv2d_0b_3x3/kernel');
bias42 = variable(shape = [1, 320], label = 'InceptionV1/Mixed_4f/Branch_1/Conv2d_0b_3x3/bias');
conv42 = conv(relu41, kernel42, bias42, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu42 = relu(conv42);
kernel43 = variable(shape = [32, 528, 1, 1], label = 'InceptionV1/Mixed_4f/Branch_2/Conv2d_0a_1x1/kernel');
bias43 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_4f/Branch_2/Conv2d_0a_1x1/bias');
conv43 = conv(concat6, kernel43, bias43, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu43 = relu(conv43);
kernel44 = variable(shape = [128, 32, 3, 3], label = 'InceptionV1/Mixed_4f/Branch_2/Conv2d_0b_3x3/kernel');
bias44 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_4f/Branch_2/Conv2d_0b_3x3/bias');
conv44 = conv(relu43, kernel44, bias44, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu44 = relu(conv44);
pool10 = max_pool(concat6, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]);
kernel45 = variable(shape = [128, 528, 1, 1], label = 'InceptionV1/Mixed_4f/Branch_3/Conv2d_0b_1x1/kernel');
bias45 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_4f/Branch_3/Conv2d_0b_1x1/bias');
conv45 = conv(pool10, kernel45, bias45, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu45 = relu(conv45);
concat7 = concat([relu40,relu42,relu44,relu45], axis = 1);
pool11 = max_pool(concat7, size = [1, 1, 2, 2], padding = [], border = 'ignore', stride = [1, 1, 2, 2]);
kernel46 = variable(shape = [256, 832, 1, 1], label = 'InceptionV1/Mixed_5b/Branch_0/Conv2d_0a_1x1/kernel');
bias46 = variable(shape = [1, 256], label = 'InceptionV1/Mixed_5b/Branch_0/Conv2d_0a_1x1/bias');
conv46 = conv(pool11, kernel46, bias46, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu46 = relu(conv46);
kernel47 = variable(shape = [160, 832, 1, 1], label = 'InceptionV1/Mixed_5b/Branch_1/Conv2d_0a_1x1/kernel');
bias47 = variable(shape = [1, 160], label = 'InceptionV1/Mixed_5b/Branch_1/Conv2d_0a_1x1/bias');
conv47 = conv(pool11, kernel47, bias47, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu47 = relu(conv47);
kernel48 = variable(shape = [320, 160, 3, 3], label = 'InceptionV1/Mixed_5b/Branch_1/Conv2d_0b_3x3/kernel');
bias48 = variable(shape = [1, 320], label = 'InceptionV1/Mixed_5b/Branch_1/Conv2d_0b_3x3/bias');
conv48 = conv(relu47, kernel48, bias48, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu48 = relu(conv48);
kernel49 = variable(shape = [32, 832, 1, 1], label = 'InceptionV1/Mixed_5b/Branch_2/Conv2d_0a_1x1/kernel');
bias49 = variable(shape = [1, 32], label = 'InceptionV1/Mixed_5b/Branch_2/Conv2d_0a_1x1/bias');
conv49 = conv(pool11, kernel49, bias49, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu49 = relu(conv49);
kernel50 = variable(shape = [128, 32, 3, 3], label = 'InceptionV1/Mixed_5b/Branch_2/Conv2d_0a_3x3/kernel');
bias50 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_5b/Branch_2/Conv2d_0a_3x3/bias');
conv50 = conv(relu49, kernel50, bias50, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu50 = relu(conv50);
pool12 = max_pool(pool11, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]);
kernel51 = variable(shape = [128, 832, 1, 1], label = 'InceptionV1/Mixed_5b/Branch_3/Conv2d_0b_1x1/kernel');
bias51 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_5b/Branch_3/Conv2d_0b_1x1/bias');
conv51 = conv(pool12, kernel51, bias51, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu51 = relu(conv51);
concat8 = concat([relu46,relu48,relu50,relu51], axis = 1);
kernel52 = variable(shape = [384, 832, 1, 1], label = 'InceptionV1/Mixed_5c/Branch_0/Conv2d_0a_1x1/kernel');
bias52 = variable(shape = [1, 384], label = 'InceptionV1/Mixed_5c/Branch_0/Conv2d_0a_1x1/bias');
conv52 = conv(concat8, kernel52, bias52, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu52 = relu(conv52);
kernel53 = variable(shape = [192, 832, 1, 1], label = 'InceptionV1/Mixed_5c/Branch_1/Conv2d_0a_1x1/kernel');
bias53 = variable(shape = [1, 192], label = 'InceptionV1/Mixed_5c/Branch_1/Conv2d_0a_1x1/bias');
conv53 = conv(concat8, kernel53, bias53, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu53 = relu(conv53);
kernel54 = variable(shape = [384, 192, 3, 3], label = 'InceptionV1/Mixed_5c/Branch_1/Conv2d_0b_3x3/kernel');
bias54 = variable(shape = [1, 384], label = 'InceptionV1/Mixed_5c/Branch_1/Conv2d_0b_3x3/bias');
conv54 = conv(relu53, kernel54, bias54, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu54 = relu(conv54);
kernel55 = variable(shape = [48, 832, 1, 1], label = 'InceptionV1/Mixed_5c/Branch_2/Conv2d_0a_1x1/kernel');
bias55 = variable(shape = [1, 48], label = 'InceptionV1/Mixed_5c/Branch_2/Conv2d_0a_1x1/bias');
conv55 = conv(concat8, kernel55, bias55, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu55 = relu(conv55);
kernel56 = variable(shape = [128, 48, 3, 3], label = 'InceptionV1/Mixed_5c/Branch_2/Conv2d_0b_3x3/kernel');
bias56 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_5c/Branch_2/Conv2d_0b_3x3/bias');
conv56 = conv(relu55, kernel56, bias56, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu56 = relu(conv56);
pool13 = max_pool(concat8, size = [1, 1, 3, 3], padding = [], border = 'ignore', stride = [1, 1, 1, 1]);
kernel57 = variable(shape = [128, 832, 1, 1], label = 'InceptionV1/Mixed_5c/Branch_3/Conv2d_0b_1x1/kernel');
bias57 = variable(shape = [1, 128], label = 'InceptionV1/Mixed_5c/Branch_3/Conv2d_0b_1x1/bias');
conv57 = conv(pool13, kernel57, bias57, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu57 = relu(conv57);
concat9 = concat([relu52,relu54,relu56,relu57], axis = 1);
pool14 = avg_pool(concat9, size = [1, 1, 7, 7], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 1, 1]);
kernel58 = variable(shape = [1000, 1024, 1, 1], label = 'InceptionV1/Logits/Conv2d_0c_1x1/kernel');
bias58 = variable(shape = [1, 1000], label = 'InceptionV1/Logits/Conv2d_0c_1x1/bias');
output = conv(pool14, kernel58, bias58, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
}
================================================
FILE: nnef-pyproject/examples/resnet.txt
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version 1.0;
graph resnet_v2_50( input ) -> ( output )
{
input = external(shape = [1, 3, 224, 224]);
kernel1 = variable(shape = [64, 3, 7, 7], label = 'resnet_v2_50/conv1/kernel');
bias1 = variable(shape = [1, 64], label = 'resnet_v2_50/conv1/bias');
conv1 = conv(input, kernel1, bias1, padding = [(3, 3), (3, 3)], border = 'constant', stride = [2, 2], dilation = [1, 1]);
pool1 = max_pool(conv1, size = [1, 1, 3, 3], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
beta1 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/beta');
moving_mean1 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/moving_mean');
moving_variance1 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/preact/moving_variance');
norm1 = batch_normalization(pool1, mean = moving_mean1, variance = moving_variance1, offset = beta1, scale = 1.0, epsilon = 0.001);
relu1 = relu(norm1);
kernel2 = variable(shape = [256, 64, 1, 1], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/shortcut/kernel');
bias2 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/shortcut/bias');
conv2 = conv(relu1, kernel2, bias2, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
kernel3 = variable(shape = [64, 64, 1, 1], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv1/kernel');
bias3 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv1/bias');
conv3 = conv(relu1, kernel3, bias3, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu2 = relu(conv3);
kernel4 = variable(shape = [64, 64, 3, 3], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv2/kernel');
bias4 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv2/bias');
conv4 = conv(relu2, kernel4, bias4, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu3 = relu(conv4);
kernel5 = variable(shape = [256, 64, 1, 1], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv3/kernel');
bias5 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_1/bottleneck_v2/conv3/bias');
conv5 = conv(relu3, kernel5, bias5, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add1 = add(conv2, conv5);
beta2 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/preact/beta');
moving_mean2 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/preact/moving_mean');
moving_variance2 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/preact/moving_variance');
norm2 = batch_normalization(add1, mean = moving_mean2, variance = moving_variance2, offset = beta2, scale = 1.0, epsilon = 0.001);
relu4 = relu(norm2);
kernel6 = variable(shape = [64, 256, 1, 1], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv1/kernel');
bias6 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv1/bias');
conv6 = conv(relu4, kernel6, bias6, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu5 = relu(conv6);
kernel7 = variable(shape = [64, 64, 3, 3], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv2/kernel');
bias7 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv2/bias');
conv7 = conv(relu5, kernel7, bias7, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu6 = relu(conv7);
kernel8 = variable(shape = [256, 64, 1, 1], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv3/kernel');
bias8 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_2/bottleneck_v2/conv3/bias');
conv8 = conv(relu6, kernel8, bias8, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add2 = add(add1, conv8);
beta3 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/preact/beta');
moving_mean3 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/preact/moving_mean');
moving_variance3 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/preact/moving_variance');
norm3 = batch_normalization(add2, mean = moving_mean3, variance = moving_variance3, offset = beta3, scale = 1.0, epsilon = 0.001);
relu7 = relu(norm3);
pool2 = max_pool(add2, size = [1, 1, 1, 1], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel9 = variable(shape = [64, 256, 1, 1], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv1/kernel');
bias9 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv1/bias');
conv9 = conv(relu7, kernel9, bias9, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu8 = relu(conv9);
kernel10 = variable(shape = [64, 64, 3, 3], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv2/kernel');
bias10 = variable(shape = [1, 64], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv2/bias');
conv10 = conv(relu8, kernel10, bias10, padding = [(1, 1), (1, 1)], border = 'constant', stride = [2, 2], dilation = [1, 1]);
relu9 = relu(conv10);
kernel11 = variable(shape = [256, 64, 1, 1], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv3/kernel');
bias11 = variable(shape = [1, 256], label = 'resnet_v2_50/block1/unit_3/bottleneck_v2/conv3/bias');
conv11 = conv(relu9, kernel11, bias11, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add3 = add(pool2, conv11);
beta4 = variable(shape = [1, 256], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/preact/beta');
moving_mean4 = variable(shape = [1, 256], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/preact/moving_mean');
moving_variance4 = variable(shape = [1, 256], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/preact/moving_variance');
norm4 = batch_normalization(add3, mean = moving_mean4, variance = moving_variance4, offset = beta4, scale = 1.0, epsilon = 0.001);
relu10 = relu(norm4);
kernel12 = variable(shape = [512, 256, 1, 1], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/shortcut/kernel');
bias12 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/shortcut/bias');
conv12 = conv(relu10, kernel12, bias12, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
kernel13 = variable(shape = [128, 256, 1, 1], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv1/kernel');
bias13 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv1/bias');
conv13 = conv(relu10, kernel13, bias13, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu11 = relu(conv13);
kernel14 = variable(shape = [128, 128, 3, 3], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv2/kernel');
bias14 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv2/bias');
conv14 = conv(relu11, kernel14, bias14, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu12 = relu(conv14);
kernel15 = variable(shape = [512, 128, 1, 1], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv3/kernel');
bias15 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_1/bottleneck_v2/conv3/bias');
conv15 = conv(relu12, kernel15, bias15, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add4 = add(conv12, conv15);
beta5 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/preact/beta');
moving_mean5 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/preact/moving_mean');
moving_variance5 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/preact/moving_variance');
norm5 = batch_normalization(add4, mean = moving_mean5, variance = moving_variance5, offset = beta5, scale = 1.0, epsilon = 0.001);
relu13 = relu(norm5);
kernel16 = variable(shape = [128, 512, 1, 1], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv1/kernel');
bias16 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv1/bias');
conv16 = conv(relu13, kernel16, bias16, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu14 = relu(conv16);
kernel17 = variable(shape = [128, 128, 3, 3], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv2/kernel');
bias17 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv2/bias');
conv17 = conv(relu14, kernel17, bias17, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu15 = relu(conv17);
kernel18 = variable(shape = [512, 128, 1, 1], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv3/kernel');
bias18 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_2/bottleneck_v2/conv3/bias');
conv18 = conv(relu15, kernel18, bias18, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add5 = add(add4, conv18);
beta6 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/preact/beta');
moving_mean6 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/preact/moving_mean');
moving_variance6 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/preact/moving_variance');
norm6 = batch_normalization(add5, mean = moving_mean6, variance = moving_variance6, offset = beta6, scale = 1.0, epsilon = 0.001);
relu16 = relu(norm6);
kernel19 = variable(shape = [128, 512, 1, 1], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv1/kernel');
bias19 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv1/bias');
conv19 = conv(relu16, kernel19, bias19, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu17 = relu(conv19);
kernel20 = variable(shape = [128, 128, 3, 3], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv2/kernel');
bias20 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv2/bias');
conv20 = conv(relu17, kernel20, bias20, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu18 = relu(conv20);
kernel21 = variable(shape = [512, 128, 1, 1], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv3/kernel');
bias21 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_3/bottleneck_v2/conv3/bias');
conv21 = conv(relu18, kernel21, bias21, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add6 = add(add5, conv21);
beta7 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/preact/beta');
moving_mean7 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/preact/moving_mean');
moving_variance7 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/preact/moving_variance');
norm7 = batch_normalization(add6, mean = moving_mean7, variance = moving_variance7, offset = beta7, scale = 1.0, epsilon = 0.001);
relu19 = relu(norm7);
pool3 = max_pool(add6, size = [1, 1, 1, 1], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel22 = variable(shape = [128, 512, 1, 1], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv1/kernel');
bias22 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv1/bias');
conv22 = conv(relu19, kernel22, bias22, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu20 = relu(conv22);
kernel23 = variable(shape = [128, 128, 3, 3], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv2/kernel');
bias23 = variable(shape = [1, 128], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv2/bias');
conv23 = conv(relu20, kernel23, bias23, padding = [(1, 1), (1, 1)], border = 'constant', stride = [2, 2], dilation = [1, 1]);
relu21 = relu(conv23);
kernel24 = variable(shape = [512, 128, 1, 1], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv3/kernel');
bias24 = variable(shape = [1, 512], label = 'resnet_v2_50/block2/unit_4/bottleneck_v2/conv3/bias');
conv24 = conv(relu21, kernel24, bias24, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add7 = add(pool3, conv24);
beta8 = variable(shape = [1, 512], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/preact/beta');
moving_mean8 = variable(shape = [1, 512], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/preact/moving_mean');
moving_variance8 = variable(shape = [1, 512], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/preact/moving_variance');
norm8 = batch_normalization(add7, mean = moving_mean8, variance = moving_variance8, offset = beta8, scale = 1.0, epsilon = 0.001);
relu22 = relu(norm8);
kernel25 = variable(shape = [1024, 512, 1, 1], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/shortcut/kernel');
bias25 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/shortcut/bias');
conv25 = conv(relu22, kernel25, bias25, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
kernel26 = variable(shape = [256, 512, 1, 1], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv1/kernel');
bias26 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv1/bias');
conv26 = conv(relu22, kernel26, bias26, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu23 = relu(conv26);
kernel27 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv2/kernel');
bias27 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv2/bias');
conv27 = conv(relu23, kernel27, bias27, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu24 = relu(conv27);
kernel28 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv3/kernel');
bias28 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_1/bottleneck_v2/conv3/bias');
conv28 = conv(relu24, kernel28, bias28, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add8 = add(conv25, conv28);
beta9 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/preact/beta');
moving_mean9 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/preact/moving_mean');
moving_variance9 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/preact/moving_variance');
norm9 = batch_normalization(add8, mean = moving_mean9, variance = moving_variance9, offset = beta9, scale = 1.0, epsilon = 0.001);
relu25 = relu(norm9);
kernel29 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv1/kernel');
bias29 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv1/bias');
conv29 = conv(relu25, kernel29, bias29, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu26 = relu(conv29);
kernel30 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv2/kernel');
bias30 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv2/bias');
conv30 = conv(relu26, kernel30, bias30, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu27 = relu(conv30);
kernel31 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv3/kernel');
bias31 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_2/bottleneck_v2/conv3/bias');
conv31 = conv(relu27, kernel31, bias31, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add9 = add(add8, conv31);
beta10 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/preact/beta');
moving_mean10 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/preact/moving_mean');
moving_variance10 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/preact/moving_variance');
norm10 = batch_normalization(add9, mean = moving_mean10, variance = moving_variance10, offset = beta10, scale = 1.0, epsilon = 0.001);
relu28 = relu(norm10);
kernel32 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv1/kernel');
bias32 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv1/bias');
conv32 = conv(relu28, kernel32, bias32, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu29 = relu(conv32);
kernel33 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv2/kernel');
bias33 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv2/bias');
conv33 = conv(relu29, kernel33, bias33, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu30 = relu(conv33);
kernel34 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv3/kernel');
bias34 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_3/bottleneck_v2/conv3/bias');
conv34 = conv(relu30, kernel34, bias34, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add10 = add(add9, conv34);
beta11 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/preact/beta');
moving_mean11 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/preact/moving_mean');
moving_variance11 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/preact/moving_variance');
norm11 = batch_normalization(add10, mean = moving_mean11, variance = moving_variance11, offset = beta11, scale = 1.0, epsilon = 0.001);
relu31 = relu(norm11);
kernel35 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv1/kernel');
bias35 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv1/bias');
conv35 = conv(relu31, kernel35, bias35, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu32 = relu(conv35);
kernel36 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv2/kernel');
bias36 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv2/bias');
conv36 = conv(relu32, kernel36, bias36, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu33 = relu(conv36);
kernel37 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv3/kernel');
bias37 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_4/bottleneck_v2/conv3/bias');
conv37 = conv(relu33, kernel37, bias37, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add11 = add(add10, conv37);
beta12 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/preact/beta');
moving_mean12 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/preact/moving_mean');
moving_variance12 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/preact/moving_variance');
norm12 = batch_normalization(add11, mean = moving_mean12, variance = moving_variance12, offset = beta12, scale = 1.0, epsilon = 0.001);
relu34 = relu(norm12);
kernel38 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv1/kernel');
bias38 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv1/bias');
conv38 = conv(relu34, kernel38, bias38, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu35 = relu(conv38);
kernel39 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv2/kernel');
bias39 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv2/bias');
conv39 = conv(relu35, kernel39, bias39, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu36 = relu(conv39);
kernel40 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv3/kernel');
bias40 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_5/bottleneck_v2/conv3/bias');
conv40 = conv(relu36, kernel40, bias40, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add12 = add(add11, conv40);
beta13 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/preact/beta');
moving_mean13 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/preact/moving_mean');
moving_variance13 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/preact/moving_variance');
norm13 = batch_normalization(add12, mean = moving_mean13, variance = moving_variance13, offset = beta13, scale = 1.0, epsilon = 0.001);
relu37 = relu(norm13);
pool4 = max_pool(add12, size = [1, 1, 1, 1], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel41 = variable(shape = [256, 1024, 1, 1], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv1/kernel');
bias41 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv1/bias');
conv41 = conv(relu37, kernel41, bias41, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu38 = relu(conv41);
kernel42 = variable(shape = [256, 256, 3, 3], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv2/kernel');
bias42 = variable(shape = [1, 256], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv2/bias');
conv42 = conv(relu38, kernel42, bias42, padding = [(1, 1), (1, 1)], border = 'constant', stride = [2, 2], dilation = [1, 1]);
relu39 = relu(conv42);
kernel43 = variable(shape = [1024, 256, 1, 1], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv3/kernel');
bias43 = variable(shape = [1, 1024], label = 'resnet_v2_50/block3/unit_6/bottleneck_v2/conv3/bias');
conv43 = conv(relu39, kernel43, bias43, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add13 = add(pool4, conv43);
beta14 = variable(shape = [1, 1024], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/preact/beta');
moving_mean14 = variable(shape = [1, 1024], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/preact/moving_mean');
moving_variance14 = variable(shape = [1, 1024], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/preact/moving_variance');
norm14 = batch_normalization(add13, mean = moving_mean14, variance = moving_variance14, offset = beta14, scale = 1.0, epsilon = 0.001);
relu40 = relu(norm14);
kernel44 = variable(shape = [2048, 1024, 1, 1], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/shortcut/kernel');
bias44 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/shortcut/bias');
conv44 = conv(relu40, kernel44, bias44, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
kernel45 = variable(shape = [512, 1024, 1, 1], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv1/kernel');
bias45 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv1/bias');
conv45 = conv(relu40, kernel45, bias45, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu41 = relu(conv45);
kernel46 = variable(shape = [512, 512, 3, 3], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv2/kernel');
bias46 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv2/bias');
conv46 = conv(relu41, kernel46, bias46, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu42 = relu(conv46);
kernel47 = variable(shape = [2048, 512, 1, 1], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv3/kernel');
bias47 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_1/bottleneck_v2/conv3/bias');
conv47 = conv(relu42, kernel47, bias47, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add14 = add(conv44, conv47);
beta15 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/preact/beta');
moving_mean15 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/preact/moving_mean');
moving_variance15 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/preact/moving_variance');
norm15 = batch_normalization(add14, mean = moving_mean15, variance = moving_variance15, offset = beta15, scale = 1.0, epsilon = 0.001);
relu43 = relu(norm15);
kernel48 = variable(shape = [512, 2048, 1, 1], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv1/kernel');
bias48 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv1/bias');
conv48 = conv(relu43, kernel48, bias48, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu44 = relu(conv48);
kernel49 = variable(shape = [512, 512, 3, 3], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv2/kernel');
bias49 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv2/bias');
conv49 = conv(relu44, kernel49, bias49, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu45 = relu(conv49);
kernel50 = variable(shape = [2048, 512, 1, 1], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv3/kernel');
bias50 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_2/bottleneck_v2/conv3/bias');
conv50 = conv(relu45, kernel50, bias50, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add15 = add(add14, conv50);
beta16 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/preact/beta');
moving_mean16 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/preact/moving_mean');
moving_variance16 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/preact/moving_variance');
norm16 = batch_normalization(add15, mean = moving_mean16, variance = moving_variance16, offset = beta16, scale = 1.0, epsilon = 0.001);
relu46 = relu(norm16);
kernel51 = variable(shape = [512, 2048, 1, 1], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv1/kernel');
bias51 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv1/bias');
conv51 = conv(relu46, kernel51, bias51, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu47 = relu(conv51);
kernel52 = variable(shape = [512, 512, 3, 3], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv2/kernel');
bias52 = variable(shape = [1, 512], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv2/bias');
conv52 = conv(relu47, kernel52, bias52, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu48 = relu(conv52);
kernel53 = variable(shape = [2048, 512, 1, 1], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv3/kernel');
bias53 = variable(shape = [1, 2048], label = 'resnet_v2_50/block4/unit_3/bottleneck_v2/conv3/bias');
conv53 = conv(relu48, kernel53, bias53, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
add16 = add(add15, conv53);
beta17 = variable(shape = [1, 2048], label = 'resnet_v2_50/postnorm/beta');
moving_mean17 = variable(shape = [1, 2048], label = 'resnet_v2_50/postnorm/moving_mean');
moving_variance17 = variable(shape = [1, 2048], label = 'resnet_v2_50/postnorm/moving_variance');
norm17 = batch_normalization(add16, mean = moving_mean17, variance = moving_variance17, offset = beta17, scale = 1.0, epsilon = 0.001);
relu49 = relu(norm17);
reduce1 = mean_reduce(relu49, axes = [2, 3]);
kernel54 = variable(shape = [1000, 2048, 1, 1], label = 'resnet_v2_50/logits/kernel');
bias54 = variable(shape = [1, 1000], label = 'resnet_v2_50/logits/bias');
output = conv(reduce1, kernel54, bias54, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
}
================================================
FILE: nnef-pyproject/examples/samples/sample.py
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnef
graph = nnef.parse_string(
"""
version 1.0;
graph Net( input ) -> ( output )
{
input = external(shape = [1,3,224,224]);
filter = variable(shape = [32,3,5,5], label = 'conv/filter');
output = conv(input, filter);
}
"""
)
print(nnef.format_graph(graph.name, graph.inputs, graph.outputs, graph.operations, graph.tensors))
================================================
FILE: nnef-pyproject/examples/samples/sample_ext.py
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnef
def shuffle_shape(input, groups):
assert input[1] % groups == 0, "input channels ({}) is not divisible by groups ({})".format(input[1], groups)
return input
graph = nnef.parse_string(
"""
version 1.0;
extension KHR_enable_fragment_definitions;
fragment shuffle>( input: tensor>, groups: integer ) -> ( output: tensor> );
graph Net( input ) -> ( output )
{
input = external(shape = [1,3,224,224]);
filter = variable(shape = [32,3,5,5], label = 'conv/filter');
conv = conv(input, filter);
output = shuffle(conv, groups = 4);
}
"""
)
nnef.infer_shapes(graph, custom_shapes={'shuffle': shuffle_shape})
print(nnef.format_graph(graph.name, graph.inputs, graph.outputs, graph.operations, graph.tensors))
================================================
FILE: nnef-pyproject/examples/samples/sample_gen.py
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnef
import numpy as np
from collections import OrderedDict
input = nnef.Tensor('input', dtype='scalar')
filter = nnef.Tensor('filter', dtype='scalar', data=np.random.randn(32,3,5,5))
output = nnef.Tensor('output', dtype='scalar')
external = nnef.Operation('external', attribs={'shape': [1,3,224,224]},
inputs=OrderedDict(),
outputs=OrderedDict([('output', nnef.Identifier('input'))]))
variable = nnef.Operation('variable', attribs={'shape': [32,3,5,5], 'label': 'conv/filter'},
inputs=OrderedDict(),
outputs=OrderedDict([('output', nnef.Identifier('filter'))]))
conv = nnef.Operation('conv', attribs={},
inputs=OrderedDict([('input', nnef.Identifier('input')), ('filter', nnef.Identifier('filter'))]),
outputs=OrderedDict([('output', nnef.Identifier('output'))]))
graph = nnef.Graph('G', inputs=['input'], outputs=['output'], operations=[external, variable, conv],
tensors={'input': input, 'filter': filter, 'output': output})
nnef.save_graph(graph, 'G', annotate_shapes=True)
================================================
FILE: nnef-pyproject/examples/vgg.txt
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version 1.0;
graph vgg_19( input ) -> ( output )
{
input = external(shape = [1, 3, 224, 224]);
kernel1 = variable(shape = [64, 3, 3, 3], label = 'vgg_19/conv1/conv1_1/kernel');
bias1 = variable(shape = [1, 64], label = 'vgg_19/conv1/conv1_1/bias');
conv1 = conv(input, kernel1, bias1, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu1 = relu(conv1);
kernel2 = variable(shape = [64, 64, 3, 3], label = 'vgg_19/conv1/conv1_2/kernel');
bias2 = variable(shape = [1, 64], label = 'vgg_19/conv1/conv1_2/bias');
conv2 = conv(relu1, kernel2, bias2, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu2 = relu(conv2);
pool1 = max_pool(relu2, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel3 = variable(shape = [128, 64, 3, 3], label = 'vgg_19/conv2/conv2_1/kernel');
bias3 = variable(shape = [1, 128], label = 'vgg_19/conv2/conv2_1/bias');
conv3 = conv(pool1, kernel3, bias3, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu3 = relu(conv3);
kernel4 = variable(shape = [128, 128, 3, 3], label = 'vgg_19/conv2/conv2_2/kernel');
bias4 = variable(shape = [1, 128], label = 'vgg_19/conv2/conv2_2/bias');
conv4 = conv(relu3, kernel4, bias4, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu4 = relu(conv4);
pool2 = max_pool(relu4, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel5 = variable(shape = [256, 128, 3, 3], label = 'vgg_19/conv3/conv3_1/kernel');
bias5 = variable(shape = [1, 256], label = 'vgg_19/conv3/conv3_1/bias');
conv5 = conv(pool2, kernel5, bias5, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu5 = relu(conv5);
kernel6 = variable(shape = [256, 256, 3, 3], label = 'vgg_19/conv3/conv3_2/kernel');
bias6 = variable(shape = [1, 256], label = 'vgg_19/conv3/conv3_2/bias');
conv6 = conv(relu5, kernel6, bias6, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu6 = relu(conv6);
kernel7 = variable(shape = [256, 256, 3, 3], label = 'vgg_19/conv3/conv3_3/kernel');
bias7 = variable(shape = [1, 256], label = 'vgg_19/conv3/conv3_3/bias');
conv7 = conv(relu6, kernel7, bias7, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu7 = relu(conv7);
kernel8 = variable(shape = [256, 256, 3, 3], label = 'vgg_19/conv3/conv3_4/kernel');
bias8 = variable(shape = [1, 256], label = 'vgg_19/conv3/conv3_4/bias');
conv8 = conv(relu7, kernel8, bias8, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu8 = relu(conv8);
pool3 = max_pool(relu8, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel9 = variable(shape = [512, 256, 3, 3], label = 'vgg_19/conv4/conv4_1/kernel');
bias9 = variable(shape = [1, 512], label = 'vgg_19/conv4/conv4_1/bias');
conv9 = conv(pool3, kernel9, bias9, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu9 = relu(conv9);
kernel10 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv4/conv4_2/kernel');
bias10 = variable(shape = [1, 512], label = 'vgg_19/conv4/conv4_2/bias');
conv10 = conv(relu9, kernel10, bias10, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu10 = relu(conv10);
kernel11 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv4/conv4_3/kernel');
bias11 = variable(shape = [1, 512], label = 'vgg_19/conv4/conv4_3/bias');
conv11 = conv(relu10, kernel11, bias11, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu11 = relu(conv11);
kernel12 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv4/conv4_4/kernel');
bias12 = variable(shape = [1, 512], label = 'vgg_19/conv4/conv4_4/bias');
conv12 = conv(relu11, kernel12, bias12, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu12 = relu(conv12);
pool4 = max_pool(relu12, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel13 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv5/conv5_1/kernel');
bias13 = variable(shape = [1, 512], label = 'vgg_19/conv5/conv5_1/bias');
conv13 = conv(pool4, kernel13, bias13, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu13 = relu(conv13);
kernel14 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv5/conv5_2/kernel');
bias14 = variable(shape = [1, 512], label = 'vgg_19/conv5/conv5_2/bias');
conv14 = conv(relu13, kernel14, bias14, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu14 = relu(conv14);
kernel15 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv5/conv5_3/kernel');
bias15 = variable(shape = [1, 512], label = 'vgg_19/conv5/conv5_3/bias');
conv15 = conv(relu14, kernel15, bias15, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu15 = relu(conv15);
kernel16 = variable(shape = [512, 512, 3, 3], label = 'vgg_19/conv5/conv5_4/kernel');
bias16 = variable(shape = [1, 512], label = 'vgg_19/conv5/conv5_4/bias');
conv16 = conv(relu15, kernel16, bias16, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu16 = relu(conv16);
pool5 = max_pool(relu16, size = [1, 1, 2, 2], padding = [(0, 0), (0, 0), (0, 0), (0, 0)], border = 'ignore', stride = [1, 1, 2, 2]);
kernel17 = variable(shape = [4096, 512, 7, 7], label = 'vgg_19/fc6/kernel');
bias17 = variable(shape = [1, 4096], label = 'vgg_19/fc6/bias');
conv17 = conv(pool5, kernel17, bias17, padding = [(0, 0), (0, 0)], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu17 = relu(conv17);
kernel18 = variable(shape = [4096, 4096, 1, 1], label = 'vgg_19/fc7/kernel');
bias18 = variable(shape = [1, 4096], label = 'vgg_19/fc7/bias');
conv18 = conv(relu17, kernel18, bias18, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
relu18 = relu(conv18);
kernel19 = variable(shape = [1000, 4096, 1, 1], label = 'vgg_19/fc8/kernel');
bias19 = variable(shape = [1, 1000], label = 'vgg_19/fc8/bias');
output = conv(relu18, kernel19, bias19, padding = [], border = 'constant', stride = [1, 1], dilation = [1, 1]);
}
================================================
FILE: nnef-pyproject/nnef/__init__.py
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import _nnef
from .parser import *
from .printer import *
from .binary import read_tensor, write_tensor
from .shapes import infer_shapes, _StandardShapeFuncs
import os
Identifier = _nnef.Identifier # subclass of str
Error = _nnef.Error # subclass of exception
Graph = _nnef.Graph # namedtuple('Graph', ['name': str, 'tensors': typing.Dict[str, Tensor], 'operations': typing.List[Operation],
# 'inputs': typing.List[str], 'outputs': typing.List['str']])
Tensor = _nnef.Tensor # namedtuple('Tensor', ['name': str, 'dtype': str, 'shape': typing.List[int], 'data': numpy.ndarray,
# 'quantization': Dict[str, object]])
Operation = _nnef.Operation # namedtuple('Operation', ['name': str, 'attribs': OrderedDict[str, object], 'inputs': OrderedDict[str, object],
# 'outputs': OrderedDict[str, object], 'dtype': str])
Tensor.__new__.__defaults__ = (None, None, None)
Operation.__new__.__defaults__ = (None,)
StandardOperations = set(_StandardShapeFuncs.keys())
def load_graph(path, stdlib=None, lowered=None, load_variables=True):
if os.path.isfile(path):
return parse_file(path, stdlib=stdlib, lowered=lowered)
graph_fn = os.path.join(path, 'graph.nnef')
quant_fn = os.path.join(path, 'graph.quant')
graph = parse_file(graph_fn, quant_fn if os.path.isfile(quant_fn) else None, stdlib=stdlib, lowered=lowered)
if load_variables:
for operation in graph.operations:
if operation.name == 'variable':
variable_filename = operation.attribs['label'] + '.dat'
if variable_filename.startswith('/'):
variable_filename = variable_filename[1:]
variable_filename = os.path.join(path, variable_filename)
tensor_name = operation.outputs['output']
with open(variable_filename) as variable_file:
data = read_tensor(variable_file)
data_shape = list(data.shape)
shape = operation.attribs['shape']
if data_shape != shape:
raise _nnef.Error('shape {} in variable file does not match shape {} defined in network structure'
.format(data_shape, shape))
tensor = graph.tensors[tensor_name]
graph.tensors[tensor_name] = _nnef.Tensor(tensor.name, tensor.dtype, data_shape, data, tensor.quantization)
return graph
def save_graph(graph, path, annotate_shapes=False):
if os.path.exists(path):
raise RuntimeError("folder already exists: '{}'".format(path))
os.makedirs(path)
text = format_graph(graph.name, graph.inputs, graph.outputs, graph.operations, graph.tensors, annotate_shapes=annotate_shapes)
with open(os.path.join(path, 'graph.nnef'), mode='w') as file:
file.write('version 1.0;\n\n')
file.write(text)
for operation in graph.operations:
if operation.name == 'variable':
variable_filename = operation.attribs['label'] + '.dat'
if variable_filename.startswith('/'):
variable_filename = variable_filename[1:]
variable_filename = os.path.join(path, variable_filename)
os.makedirs(os.path.split(variable_filename)[0], exist_ok=True)
tensor_name = operation.outputs['output']
tensor = graph.tensors[tensor_name]
if tensor.data is not None:
with open(variable_filename, 'wb') as variable_file:
write_tensor(variable_file, tensor.data, quantized=bool(tensor.quantization))
class Session:
def __init__(self, path, stdlib=None, lowered=None):
self._handle = _nnef.create_session(path, stdlib=stdlib, lowered=lowered)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
_nnef.cleanup_session(self._handle)
def __call__(self, *inputs):
return _nnef.execute_session(self._handle, tuple(inputs))
================================================
FILE: nnef-pyproject/nnef/binary.py
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
class ItemType:
FLOAT = 0
UINT = 1
QUINT = 2
QINT = 3
INT = 4
BOOL = 5
def _numpy_dtype_split(dtype):
splits = {
np.float16: (ItemType.FLOAT, 16),
np.float32: (ItemType.FLOAT, 32),
np.float64: (ItemType.FLOAT, 64),
np.int8: (ItemType.INT, 8),
np.uint8: (ItemType.UINT, 8),
np.int16: (ItemType.INT, 16),
np.uint16: (ItemType.UINT, 16),
np.int32: (ItemType.INT, 32),
np.uint32: (ItemType.UINT, 32),
np.int64: (ItemType.INT, 64),
np.uint64: (ItemType.UINT, 64),
np.bool_: (ItemType.BOOL, 1),
}
split = splits.get(dtype.type)
if split is None:
raise TypeError('unsupported tensor dtype: ' + str(dtype))
return split
def _numpy_dtype_make(item_type, bits):
dtypes = {
(ItemType.FLOAT, 16): np.float16,
(ItemType.FLOAT, 32): np.float32,
(ItemType.FLOAT, 64): np.float64,
(ItemType.INT, 8): np.int8,
(ItemType.INT, 16): np.int16,
(ItemType.INT, 32): np.int32,
(ItemType.INT, 64): np.int64,
(ItemType.UINT, 8): np.uint8,
(ItemType.UINT, 16): np.uint16,
(ItemType.UINT, 32): np.uint32,
(ItemType.UINT, 64): np.uint64,
(ItemType.QINT, 8): np.int8,
(ItemType.QINT, 16): np.int16,
(ItemType.QINT, 32): np.int32,
(ItemType.QINT, 64): np.int64,
(ItemType.QUINT, 8): np.uint8,
(ItemType.QUINT, 16): np.uint16,
(ItemType.QUINT, 32): np.uint32,
(ItemType.QUINT, 64): np.uint64,
(ItemType.BOOL, 1): np.bool_,
}
dtype = dtypes.get((item_type, bits))
if dtype is None:
raise ValueError('unsupported combination of item type ({}) and bits per item ({})'.format(item_type, bits))
return dtype
MaxTensorRank = 8
def _rank_of(shape):
rank = len(shape)
while rank > 1 and shape[rank - 1] == 1:
rank -= 1
return rank
_is_little_endian = sys.byteorder == 'little'
def _tofile(data, file):
if not _is_little_endian and data.dtype != np.uint8 and data.dtype != np.int8:
data = data.byteswap()
if file.seekable():
data.tofile(file)
else:
file.write(data.tobytes())
def _fromfile(file, dtype, count):
if file.seekable():
data = np.fromfile(file, dtype, count)
else:
data = np.frombuffer(file.read(count * np.dtype(dtype).itemsize), dtype, count)
if not _is_little_endian and data.dtype != np.uint8 and data.dtype != np.int8:
data = data.byteswap()
return data
def write_tensor(file, tensor, quantized=False, version=(1, 0)):
if isinstance(file, str):
raise ValueError('file parameter must be a file object not a file name')
_tofile(np.asarray([0x4E, 0xEF, version[0], version[1]], dtype=np.uint8), file)
item_type, bits = _numpy_dtype_split(tensor.dtype)
if quantized:
if item_type == ItemType.INT:
item_type = ItemType.QINT
elif item_type == ItemType.UINT:
item_type = ItemType.QUINT
else:
raise ValueError("invalid tensor dtype '{}' for quantized tensor".format(tensor.dtype))
count = int(np.prod(tensor.shape))
data_length = (count + 7) // 8 if bits == 1 else count * (bits // 8)
_tofile(np.asarray([data_length, tensor.ndim], dtype=np.uint32), file)
if tensor.ndim > MaxTensorRank:
raise ValueError('tensor rank exceeds maximum possible value of {}'.format(MaxTensorRank))
_tofile(np.asarray(tensor.shape, dtype=np.uint32), file)
_tofile(np.asarray([0] * (MaxTensorRank - tensor.ndim), dtype=np.uint32), file)
_tofile(np.asarray([bits, item_type], dtype=np.uint32), file)
_tofile(np.asarray([0] * 19, dtype=np.uint32), file)
data = np.packbits(tensor) if bits == 1 else tensor
_tofile(data, file)
def read_tensor(file, return_quantization=False):
if isinstance(file, str):
raise ValueError('file parameter must be a file object not a file name')
[magic1, magic2, major, minor] = _fromfile(file, dtype=np.uint8, count=4)
if magic1 != 0x4E or magic2 != 0xEF:
raise ValueError('not a valid NNEF file')
if major > 1 or minor > 0:
raise ValueError('unsupported file version')
[data_length, rank] = _fromfile(file, dtype=np.uint32, count=2)
if file.seekable():
header_size = 128
file_size = os.fstat(file.fileno()).st_size
if file_size != header_size + data_length:
raise ValueError('invalid tensor file; size does not match header info')
if rank > MaxTensorRank:
raise ValueError('tensor rank exceeds maximum possible value of {}'.format(MaxTensorRank))
shape = _fromfile(file, dtype=np.uint32, count=MaxTensorRank)
shape = shape[:rank]
[bits, item_type] = _fromfile(file, dtype=np.uint32, count=2)
_reserved = _fromfile(file, dtype=np.uint32, count=19)
if item_type == ItemType.UINT and _reserved[0] != 0:
item_type = ItemType.INT
quantized = item_type == ItemType.QINT or item_type == ItemType.QUINT
count = int(np.prod(shape))
if bits == 1:
byte_count = int((count + 7) // 8)
data = _fromfile(file, dtype=np.uint8, count=byte_count)
if len(data) != byte_count:
raise ValueError('could not read tensor data')
data = np.unpackbits(data).astype(bool)[:count]
else:
data = _fromfile(file, dtype=_numpy_dtype_make(item_type, bits), count=count)
if len(data) != count:
raise ValueError('could not read tensor data')
tensor = data.reshape(shape)
return (tensor, quantized) if return_quantization else tensor
def _write_tensor_provisional(file, tensor, version=(1, 0)):
_tofile(np.asarray([0x4E, 0xEF, version[0], version[1]], dtype=np.uint8), file)
header_length = 4 + 4 + (tensor.ndim + 1) * 4 + 4
_tofile(np.asarray([header_length], dtype=np.uint32), file)
_tofile(np.asarray([tensor.ndim], dtype=np.uint32), file)
_tofile(np.asarray(tensor.shape, dtype=np.uint32), file)
dtype, bits = _numpy_dtype_split(tensor.dtype)
_tofile(np.asarray([dtype, bits], dtype=np.uint8), file)
_tofile(np.asarray([0], dtype=np.uint16), file)
_tofile(tensor, file)
def _read_tensor_provisional(file):
[magic1, magic2, major, minor] = _fromfile(file, dtype=np.uint8, count=4)
if magic1 != 0x4E or magic2 != 0xEF:
raise ValueError('not a valid NNEF file')
if major > 1 or minor > 0:
raise ValueError('unsupported file version')
[_header_length] = _fromfile(file, dtype=np.uint32, count=1)
[rank] = _fromfile(file, dtype=np.uint32, count=1)
shape = _fromfile(file, dtype=np.uint32, count=rank)
[code, bits] = _fromfile(file, dtype=np.uint8, count=2)
[qlen] = _fromfile(file, dtype=np.uint16, count=1)
assert (code == 0)
assert (bits == 32)
assert (qlen == 0)
return _fromfile(file, dtype=np.float32, count=int(np.prod(shape))).reshape(shape)
================================================
FILE: nnef-pyproject/nnef/cpp/CMakeLists.txt
================================================
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required(VERSION 3.0)
project(nnef CXX)
# build information
message(STATUS "Build Configuration: ${CMAKE_BUILD_TYPE}")
message(STATUS "Build executables in: ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}")
# nnef library
add_library(${PROJECT_NAME}
include/cnnef.h
include/nnef.h
include/nnef/common/binary.h
include/nnef/common/dictionary.h
include/nnef/common/error.h
include/nnef/common/lexer.h
include/nnef/common/parser.h
include/nnef/common/prototype.h
include/nnef/common/shapes.h
include/nnef/common/typespec.h
include/nnef/common/typeutils.h
include/nnef/common/value.h
include/nnef/comp/comp_parser.h
include/nnef/comp/evaluation.h
include/nnef/comp/expression.h
include/nnef/comp/fragment.h
include/nnef/comp/stdlib_source.h
include/nnef/flat/flat_parser.h
include/nnef/flat/quant_parser.h
include/nnef/flat/stdlib_protos.h
src/nnef.cpp
src/cnnef.cpp
)
# build interface include dir is used when this cmake is included into
# a larger project
# install interface include dir will be put into the generated cmake config file
# during install step
target_include_directories(${PROJECT_NAME}
PUBLIC $
PUBLIC $)
set_target_properties(${PROJECT_NAME} PROPERTIES CXX_STANDARD 11)
set_target_properties(${PROJECT_NAME} PROPERTIES DEBUG_POSTFIX _d)
target_link_libraries(${PROJECT_NAME})
# install the library
install(TARGETS ${PROJECT_NAME} EXPORT ${PROJECT_NAME}
ARCHIVE DESTINATION lib
LIBRARY DESTINATION lib
RUNTIME DESTINATION bin)
# then the headers
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include DESTINATION .)
# generate and install cmake config file for find_package
install(EXPORT ${PROJECT_NAME} DESTINATION lib/cmake/${PROJECT_NAME})
# generate an auxiliary config file also needed by find_package
# it just includes the previously generated nnef.cmake
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake "include(\${CMAKE_CURRENT_LIST_DIR}/${PROJECT_NAME}.cmake)")
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake DESTINATION lib/cmake/${PROJECT_NAME})
================================================
FILE: nnef-pyproject/nnef/cpp/include/cnnef.h
================================================
/*
* Copyright (c) 2017 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _CNNEF_H_
#define _CNNEF_H_
#include
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
#if _WIN32
#define EXPORTDLL extern "C" __declspec(dllexport)
#else
#define EXPORTDLL extern "C"
#endif
#else // __cplusplus
#if _WIN32
#define EXPORTDLL __declspec(dllexport)
#else
#define EXPORTDLL
#endif
#endif // __cplusplus
typedef void* nnef_graph_t;
typedef void* nnef_tensor_t;
/*
* Load NNEF graph from file
*
* @param path: the path to the NNEF model folder
* @param error: the string to store the error message if any
*
* @return NNEF graph
*/
EXPORTDLL nnef_graph_t nnef_graph_load( const char* path, char *error );
/*
* Copy an NNEF graph
*
* @param graph: NNEF graph
*
* @return the copy of NNEF graph
*/
EXPORTDLL nnef_graph_t nnef_graph_copy( nnef_graph_t graph );
/*
* Release NNEF graph
*
* @param graph: NNEF graph
*/
EXPORTDLL void nnef_graph_release( nnef_graph_t graph );
/*
* Perform shape inference on the graph
*
* @param graph: the graph object
* @param error: the string to store the error message if any
*
* @return true if there were no errors, false otherwise
*/
EXPORTDLL int nnef_graph_infer_shapes( nnef_graph_t graph, char *error );
/*
* Allocate tensor buffers in the graph
*
* @param graph: the graph object
* @param error: the string to store the error message if any
*
* @return true if there were no errors, false otherwise
*/
EXPORTDLL int nnef_graph_allocate_buffers( nnef_graph_t graph, char *error );
/*
* Execute a graph
*
* @param graph: the graph object
* @param error: the string to store the error message if any
*
* @return true if there were no errors, false otherwise
*/
EXPORTDLL int nnef_graph_execute( nnef_graph_t graph, char *error );
/*
* Query input names from NNEF graph
*
* @param graph: NNEF graph
* @param inputs: input names
*
* @return input count
*/
EXPORTDLL size_t nnef_graph_input_names( nnef_graph_t graph, const char** inputs );
/*
* Query output names from NNEF graph
*
* @param graph: NNEF graph
* @param inputs: output names
*
* @return output count
*/
EXPORTDLL size_t nnef_graph_output_names( nnef_graph_t graph, const char** outputs );
/*
* Find tensor in NNEF graph by name
*
* @param graph: NNEF graph
* @param tensor_name: tensor name
*
* @return tensor
*/
EXPORTDLL nnef_tensor_t nnef_graph_find_tensor( nnef_graph_t graph, const char* tensor_name );
/*
* Query name of an NNEF graph
*
* @param graph: NNEF graph
*
* @return graph name
*/
EXPORTDLL const char* nnef_graph_name( nnef_graph_t graph );
/*
* Create a new tensor
*
* @return tensor
*/
EXPORTDLL nnef_tensor_t nnef_tensor_create(void);
/*
* Release a tensor
*/
EXPORTDLL void nnef_tensor_release( nnef_tensor_t tensor );
/*
* Query tensor name
*
* @param tensor: tensor
*
* @return tensor name
*/
EXPORTDLL const char* nnef_tensor_name( nnef_tensor_t tensor );
/*
* Query tensor data-type
*
* @param tensor: tensor
*
* @return data-type name
*/
EXPORTDLL const char* nnef_tensor_dtype( nnef_tensor_t tensor );
/*
* Query tensor rank
*
* @param tensor: tensor
*
* @return tensor rank
*/
EXPORTDLL size_t nnef_tensor_rank( nnef_tensor_t tensor );
/*
* Query tensor dims
*
* @param tensor: tensor
*
* @return tensor rank
*/
EXPORTDLL const int* nnef_tensor_dims( nnef_tensor_t tensor );
/*
* Query tensor data
*
* @param tensor: tensor
*
* @return tensor data
*/
EXPORTDLL void* nnef_tensor_data( nnef_tensor_t tensor );
/*
* Read tensor from binary file
*
* @param url: the name of the file to read from
* @param tensor: tensor
* @param error: the string to store the error message if any
*
* @return true if there were no errors, false otherwise
*/
EXPORTDLL int nnef_tensor_read( const char* path, nnef_tensor_t tensor, char *error );
/*
* Write tensor to binary file
*
* @param url: the name of the file to write to
* @param tensor: tensor
* @param error: the string to store the error message if any
*
* @return true if there were no errors, false otherwise
*/
EXPORTDLL int nnef_tensor_write( const char* path, nnef_tensor_t tensor, char *error );
#ifdef __cplusplus
}
#endif
#endif
================================================
FILE: nnef-pyproject/nnef/cpp/include/nnef/common/binary.h
================================================
/*
* Copyright (c) 2017 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _NNEF_BINARY_H_
#define _NNEF_BINARY_H_
#include "error.h"
#include
#include
#include
#include
#include
#include
namespace nnef
{
struct TensorHeader
{
enum { MaxRank = 8 };
enum ItemType { Float, Uint, Quint, Qint, Int, Bool };
uint8_t magic[2];
uint8_t version[2];
uint32_t data_length;
uint32_t rank;
uint32_t extents[MaxRank];
uint32_t bits_per_item;
uint32_t item_type;
uint32_t reserved[19];
};
template
void copy_and_cast_n( In* input, size_t n, Out* output )
{
for ( size_t i = 0; i < n; ++i )
{
*output++ = (Out)*input++;
}
}
template
inline void fill_tensor_header( TensorHeader& header, const size_t version[2], const size_t rank, const T* extents,
const size_t bits_per_item, const TensorHeader::ItemType item_type )
{
const char* magic = "N\xEF";
std::fill_n((uint8_t*)&header, sizeof(header), (uint8_t)0);
header.magic[0] = (uint8_t)magic[0];
header.magic[1] = (uint8_t)magic[1];
header.version[0] = (uint8_t)version[0];
header.version[1] = (uint8_t)version[1];
if ( rank > TensorHeader::MaxRank )
{
throw Error("tensor rank %d exceeds maximum possible value (%d)", (int)rank, (int)TensorHeader::MaxRank);
}
const uint32_t item_count = std::accumulate(extents, extents + rank, (uint32_t)1, std::multiplies());
header.data_length = (uint32_t)((item_count * bits_per_item + 7) / 8);
header.bits_per_item = (uint32_t)bits_per_item;
header.rank = (uint32_t)rank;
header.item_type = item_type;
std::copy_n(extents, rank, header.extents);
}
inline void validate_tensor_header( const TensorHeader& header )
{
if ( header.magic[0] != 'N' || header.magic[1] != 0xEF )
{
throw Error("invliad magic number in tensor binary");
}
if ( header.version[0] != 1 || header.version[1] != 0 )
{
throw Error("unknown version number %d.%d", (int)header.version[0], (int)header.version[1]);
}
if ( header.rank > TensorHeader::MaxRank )
{
throw Error("tensor rank %d exceeds maximum allowed rank (%d)", (int)header.rank, (int)TensorHeader::MaxRank);
}
const size_t item_count = std::accumulate(header.extents, header.extents + header.rank, (size_t)1, std::multiplies());
if ( (size_t)header.data_length != (item_count * header.bits_per_item + 7) / 8 )
{
throw Error("data length is not compatible with extents and bits per item");
}
if ( (header.item_type & 0xffff0000) == 0 ) // Khronos-defined item type
{
const uint32_t code = (header.item_type & 0x0000ffff);
switch ( code )
{
case TensorHeader::Float:
{
if ( header.bits_per_item != 16 && header.bits_per_item != 32 && header.bits_per_item != 64 )
{
throw Error("invalid bits per item for float item type: %d", (int)header.bits_per_item);
}
break;
}
case TensorHeader::Int:
case TensorHeader::Uint:
case TensorHeader::Quint:
case TensorHeader::Qint:
{
if ( header.bits_per_item > 64 )
{
throw Error("invalid bits per item for integer item type: %d", (int)header.bits_per_item);
}
break;
}
case TensorHeader::Bool:
{
if ( header.bits_per_item != 1 && header.bits_per_item != 8 )
{
throw Error("invalid bits per item for bool item type: %d", (int)header.bits_per_item);
}
break;
}
default:
{
throw Error("unkown Khronos-defined item type code: %x", (int)code);
}
}
}
}
inline void pack_bits( const size_t n, const bool* data, char* bytes )
{
for ( size_t i = 0; i < n; ++i )
{
bytes[i / 8] |= (data[i] << (7 - (i % 8)));
}
}
inline void unpack_bits( const size_t n, const char* bytes, bool* data )
{
for ( size_t i = 0; i < n; ++i )
{
data[i] = (bytes[i / 8] >> (7 - (i % 8))) & 0x01;
}
}
inline void from_bytes( const char* bytes, const size_t count, const size_t bits_per_item, float* data )
{
if ( bits_per_item == 32 )
{
copy_and_cast_n((const float*)bytes, count, data);
}
else if ( bits_per_item == 64 )
{
copy_and_cast_n((const double*)bytes, count, data);
}
else
{
throw std::runtime_error("cannot load float data of " + std::to_string(bits_per_item) + " bits per item");
}
}
inline void from_bytes( const char* bytes, const size_t count, const size_t bits_per_item, int* data, const bool is_signed )
{
if ( bits_per_item == 8 )
{
if ( is_signed )
{
copy_and_cast_n((const int8_t*)bytes, count, data);
}
else
{
copy_and_cast_n((const uint8_t*)bytes, count, data);
}
}
else if ( bits_per_item == 16 )
{
if ( is_signed )
{
copy_and_cast_n((const int16_t*)bytes, count, data);
}
else
{
copy_and_cast_n((const uint16_t*)bytes, count, data);
}
}
else if ( bits_per_item == 32 )
{
if ( is_signed )
{
copy_and_cast_n((const int32_t*)bytes, count, data);
}
else
{
copy_and_cast_n((const uint32_t*)bytes, count, data);
}
}
else if ( bits_per_item == 64 )
{
if ( is_signed )
{
copy_and_cast_n((const int64_t*)bytes, count, data);
}
else
{
copy_and_cast_n((const uint64_t*)bytes, count, data);
}
}
else
{
throw std::runtime_error("cannot load int data of " + std::to_string(bits_per_item) + " bits per item");
}
}
inline void from_bytes( const char* bytes, const size_t count, const size_t bits_per_item, bool* data )
{
if ( bits_per_item == 1 )
{
unpack_bits(count, bytes, data);
}
else if ( bits_per_item == 8 )
{
copy_and_cast_n((const int8_t*)bytes, count, data);
}
else
{
throw std::runtime_error("cannot load bool data of " + std::to_string(bits_per_item) + " bits per item");
}
}
inline void to_bytes( const float* data, const size_t count, char* bytes )
{
copy_and_cast_n(data, count, (float*)bytes);
}
inline void to_bytes( const int* data, const size_t count, char* bytes, const bool as_signed )
{
if ( as_signed )
{
copy_and_cast_n(data, count, (int32_t*)bytes);
}
else
{
copy_and_cast_n(data, count, (uint32_t*)bytes);
}
}
inline void to_bytes( const bool* data, const size_t count, char* bytes )
{
pack_bits(count, data, bytes);
}
} // namespace nnef
#endif
================================================
FILE: nnef-pyproject/nnef/cpp/include/nnef/common/dictionary.h
================================================
/*
* Copyright (c) 2017 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _NNEF_DICTIONARY_H_
#define _NNEF_DICTIONARY_H_
#include
#include