Showing preview only (5,372K chars total). Download the full file or copy to clipboard to get everything.
Repository: coderSkyChen/Action_Recognition_Zoo
Branch: master
Commit: 92ec5ec3efee
Files: 438
Total size: 44.7 MB
Directory structure:
gitextract_atvvsebl/
├── Images_for_readme/
│ └── README.md
├── LICENSE
├── README.md
├── average_scores.py
├── dataset.py
├── main.py
├── model_zoo/
│ ├── LICENSE
│ ├── README.md
│ ├── __init__.py
│ ├── bninception/
│ │ ├── __init__.py
│ │ ├── bn_inception.yaml
│ │ ├── caffe_pb2.py
│ │ ├── inceptionv3.yaml
│ │ ├── layer_factory.py
│ │ ├── parse_caffe.py
│ │ └── pytorch_load.py
│ ├── inceptionresnetv2/
│ │ ├── __init__.py
│ │ ├── pytorch_load.py
│ │ ├── tensorflow_dump.py
│ │ └── torch_load.lua
│ ├── inceptionv4/
│ │ ├── __init__.py
│ │ ├── pytorch_load.py
│ │ ├── tensorflow_dump.py
│ │ └── torch_load.lua
│ └── models/
│ ├── ._.DS_Store
│ ├── .github/
│ │ └── ISSUE_TEMPLATE.md
│ ├── .gitignore
│ ├── .gitmodules
│ ├── AUTHORS
│ ├── CONTRIBUTING.md
│ ├── LICENSE
│ ├── README.md
│ ├── WORKSPACE
│ ├── autoencoder/
│ │ ├── AdditiveGaussianNoiseAutoencoderRunner.py
│ │ ├── AutoencoderRunner.py
│ │ ├── MaskingNoiseAutoencoderRunner.py
│ │ ├── Utils.py
│ │ ├── VariationalAutoencoderRunner.py
│ │ ├── __init__.py
│ │ └── autoencoder_models/
│ │ ├── Autoencoder.py
│ │ ├── DenoisingAutoencoder.py
│ │ ├── VariationalAutoencoder.py
│ │ └── __init__.py
│ ├── compression/
│ │ ├── README.md
│ │ ├── decoder.py
│ │ ├── encoder.py
│ │ └── msssim.py
│ ├── differential_privacy/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── dp_sgd/
│ │ │ ├── README.md
│ │ │ ├── dp_mnist/
│ │ │ │ ├── BUILD
│ │ │ │ └── dp_mnist.py
│ │ │ ├── dp_optimizer/
│ │ │ │ ├── BUILD
│ │ │ │ ├── dp_optimizer.py
│ │ │ │ ├── dp_pca.py
│ │ │ │ ├── sanitizer.py
│ │ │ │ └── utils.py
│ │ │ └── per_example_gradients/
│ │ │ ├── BUILD
│ │ │ └── per_example_gradients.py
│ │ ├── multiple_teachers/
│ │ │ ├── BUILD
│ │ │ ├── README.md
│ │ │ ├── aggregation.py
│ │ │ ├── analysis.py
│ │ │ ├── deep_cnn.py
│ │ │ ├── input.py
│ │ │ ├── metrics.py
│ │ │ ├── train_student.py
│ │ │ ├── train_student_mnist_250_lap_20_count_50_epochs_600.sh
│ │ │ ├── train_teachers.py
│ │ │ └── utils.py
│ │ └── privacy_accountant/
│ │ ├── python/
│ │ │ ├── BUILD
│ │ │ └── gaussian_moments.py
│ │ └── tf/
│ │ ├── BUILD
│ │ └── accountant.py
│ ├── im2txt/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── WORKSPACE
│ │ └── im2txt/
│ │ ├── BUILD
│ │ ├── configuration.py
│ │ ├── data/
│ │ │ ├── build_mscoco_data.py
│ │ │ └── download_and_preprocess_mscoco.sh
│ │ ├── evaluate.py
│ │ ├── inference_utils/
│ │ │ ├── BUILD
│ │ │ ├── caption_generator.py
│ │ │ ├── caption_generator_test.py
│ │ │ ├── inference_wrapper_base.py
│ │ │ └── vocabulary.py
│ │ ├── inference_wrapper.py
│ │ ├── ops/
│ │ │ ├── BUILD
│ │ │ ├── image_embedding.py
│ │ │ ├── image_embedding_test.py
│ │ │ ├── image_processing.py
│ │ │ └── inputs.py
│ │ ├── run_inference.py
│ │ ├── show_and_tell_model.py
│ │ ├── show_and_tell_model_test.py
│ │ └── train.py
│ ├── inception/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── WORKSPACE
│ │ └── inception/
│ │ ├── BUILD
│ │ ├── data/
│ │ │ ├── build_image_data.py
│ │ │ ├── build_imagenet_data.py
│ │ │ ├── download_and_preprocess_flowers.sh
│ │ │ ├── download_and_preprocess_flowers_mac.sh
│ │ │ ├── download_and_preprocess_imagenet.sh
│ │ │ ├── download_imagenet.sh
│ │ │ ├── imagenet_2012_validation_synset_labels.txt
│ │ │ ├── imagenet_lsvrc_2015_synsets.txt
│ │ │ ├── imagenet_metadata.txt
│ │ │ ├── preprocess_imagenet_validation_data.py
│ │ │ └── process_bounding_boxes.py
│ │ ├── dataset.py
│ │ ├── flowers_data.py
│ │ ├── flowers_eval.py
│ │ ├── flowers_train.py
│ │ ├── image_processing.py
│ │ ├── imagenet_data.py
│ │ ├── imagenet_distributed_train.py
│ │ ├── imagenet_eval.py
│ │ ├── imagenet_train.py
│ │ ├── inception_distributed_train.py
│ │ ├── inception_eval.py
│ │ ├── inception_model.py
│ │ ├── inception_train.py
│ │ └── slim/
│ │ ├── BUILD
│ │ ├── README.md
│ │ ├── collections_test.py
│ │ ├── inception_model.py
│ │ ├── inception_test.py
│ │ ├── losses.py
│ │ ├── losses_test.py
│ │ ├── ops.py
│ │ ├── ops_test.py
│ │ ├── scopes.py
│ │ ├── scopes_test.py
│ │ ├── slim.py
│ │ ├── variables.py
│ │ └── variables_test.py
│ ├── lm_1b/
│ │ ├── BUILD
│ │ ├── README.md
│ │ ├── data_utils.py
│ │ └── lm_1b_eval.py
│ ├── namignizer/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── data_utils.py
│ │ ├── model.py
│ │ └── names.py
│ ├── neural_gpu/
│ │ ├── README.md
│ │ ├── data_utils.py
│ │ ├── neural_gpu.py
│ │ └── neural_gpu_trainer.py
│ ├── neural_programmer/
│ │ ├── README.md
│ │ ├── data_utils.py
│ │ ├── model.py
│ │ ├── neural_programmer.py
│ │ ├── nn_utils.py
│ │ ├── parameters.py
│ │ └── wiki_data.py
│ ├── resnet/
│ │ ├── BUILD
│ │ ├── README.md
│ │ ├── cifar_input.py
│ │ ├── resnet_main.py
│ │ └── resnet_model.py
│ ├── slim/
│ │ ├── ._.DS_Store
│ │ ├── BUILD
│ │ ├── README.md
│ │ ├── datasets/
│ │ │ ├── __init__.py
│ │ │ ├── cifar10.py
│ │ │ ├── dataset_factory.py
│ │ │ ├── dataset_utils.py
│ │ │ ├── download_and_convert_cifar10.py
│ │ │ ├── download_and_convert_flowers.py
│ │ │ ├── download_and_convert_mnist.py
│ │ │ ├── flowers.py
│ │ │ ├── imagenet.py
│ │ │ └── mnist.py
│ │ ├── deployment/
│ │ │ ├── __init__.py
│ │ │ ├── model_deploy.py
│ │ │ └── model_deploy_test.py
│ │ ├── download_and_convert_data.py
│ │ ├── eval_image_classifier.py
│ │ ├── nets/
│ │ │ ├── __init__.py
│ │ │ ├── alexnet.py
│ │ │ ├── alexnet_test.py
│ │ │ ├── cifarnet.py
│ │ │ ├── inception.py
│ │ │ ├── inception_resnet_v2.py
│ │ │ ├── inception_resnet_v2_test.py
│ │ │ ├── inception_utils.py
│ │ │ ├── inception_v1.py
│ │ │ ├── inception_v1_test.py
│ │ │ ├── inception_v2.py
│ │ │ ├── inception_v2_test.py
│ │ │ ├── inception_v3.py
│ │ │ ├── inception_v3_test.py
│ │ │ ├── inception_v4.py
│ │ │ ├── inception_v4_test.py
│ │ │ ├── lenet.py
│ │ │ ├── nets_factory.py
│ │ │ ├── nets_factory_test.py
│ │ │ ├── overfeat.py
│ │ │ ├── overfeat_test.py
│ │ │ ├── resnet_utils.py
│ │ │ ├── resnet_v1.py
│ │ │ ├── resnet_v1_test.py
│ │ │ ├── resnet_v2.py
│ │ │ ├── resnet_v2_test.py
│ │ │ ├── vgg.py
│ │ │ └── vgg_test.py
│ │ ├── preprocessing/
│ │ │ ├── __init__.py
│ │ │ ├── cifarnet_preprocessing.py
│ │ │ ├── inception_preprocessing.py
│ │ │ ├── lenet_preprocessing.py
│ │ │ ├── preprocessing_factory.py
│ │ │ └── vgg_preprocessing.py
│ │ ├── scripts/
│ │ │ ├── finetune_inception_v1_on_flowers.sh
│ │ │ ├── finetune_inception_v3_on_flowers.sh
│ │ │ ├── train_cifarnet_on_cifar10.sh
│ │ │ └── train_lenet_on_mnist.sh
│ │ ├── slim_walkthough.ipynb
│ │ └── train_image_classifier.py
│ ├── street/
│ │ ├── README.md
│ │ ├── cc/
│ │ │ └── rnn_ops.cc
│ │ ├── g3doc/
│ │ │ └── vgslspecs.md
│ │ ├── python/
│ │ │ ├── decoder.py
│ │ │ ├── decoder_test.py
│ │ │ ├── errorcounter.py
│ │ │ ├── errorcounter_test.py
│ │ │ ├── nn_ops.py
│ │ │ ├── shapes.py
│ │ │ ├── shapes_test.py
│ │ │ ├── vgsl_eval.py
│ │ │ ├── vgsl_input.py
│ │ │ ├── vgsl_model.py
│ │ │ ├── vgsl_model_test.py
│ │ │ ├── vgsl_train.py
│ │ │ ├── vgslspecs.py
│ │ │ └── vgslspecs_test.py
│ │ └── testdata/
│ │ ├── arial-32-tiny
│ │ ├── arial.charset_size=105.txt
│ │ ├── charset_size=134.txt
│ │ ├── charset_size_10.txt
│ │ ├── mnist-tiny
│ │ ├── numbers-16-tiny
│ │ └── numbers.charset_size=12.txt
│ ├── swivel/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analogy.cc
│ │ ├── eval.mk
│ │ ├── fastprep.cc
│ │ ├── fastprep.mk
│ │ ├── glove_to_shards.py
│ │ ├── nearest.py
│ │ ├── prep.py
│ │ ├── swivel.py
│ │ ├── text2bin.py
│ │ ├── vecs.py
│ │ └── wordsim.py
│ ├── syntaxnet/
│ │ ├── .gitignore
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── WORKSPACE
│ │ ├── syntaxnet/
│ │ │ ├── BUILD
│ │ │ ├── affix.cc
│ │ │ ├── affix.h
│ │ │ ├── arc_standard_transitions.cc
│ │ │ ├── arc_standard_transitions_test.cc
│ │ │ ├── base.h
│ │ │ ├── beam_reader_ops.cc
│ │ │ ├── beam_reader_ops_test.py
│ │ │ ├── binary_segment_state.cc
│ │ │ ├── binary_segment_state.h
│ │ │ ├── binary_segment_state_test.cc
│ │ │ ├── binary_segment_transitions.cc
│ │ │ ├── binary_segment_transitions_test.cc
│ │ │ ├── char_properties.cc
│ │ │ ├── char_properties.h
│ │ │ ├── char_properties_test.cc
│ │ │ ├── conll2tree.py
│ │ │ ├── context.pbtxt
│ │ │ ├── demo.sh
│ │ │ ├── dictionary.proto
│ │ │ ├── document_filters.cc
│ │ │ ├── document_format.cc
│ │ │ ├── document_format.h
│ │ │ ├── embedding_feature_extractor.cc
│ │ │ ├── embedding_feature_extractor.h
│ │ │ ├── feature_extractor.cc
│ │ │ ├── feature_extractor.h
│ │ │ ├── feature_extractor.proto
│ │ │ ├── feature_types.h
│ │ │ ├── fml_parser.cc
│ │ │ ├── fml_parser.h
│ │ │ ├── graph_builder.py
│ │ │ ├── graph_builder_test.py
│ │ │ ├── kbest_syntax.proto
│ │ │ ├── lexicon_builder.cc
│ │ │ ├── lexicon_builder_test.py
│ │ │ ├── load_parser_ops.py
│ │ │ ├── models/
│ │ │ │ ├── parsey_mcparseface/
│ │ │ │ │ ├── context.pbtxt
│ │ │ │ │ ├── label-map
│ │ │ │ │ ├── parser-params
│ │ │ │ │ ├── prefix-table
│ │ │ │ │ ├── suffix-table
│ │ │ │ │ ├── tag-map
│ │ │ │ │ ├── tagger-params
│ │ │ │ │ └── word-map
│ │ │ │ └── parsey_universal/
│ │ │ │ ├── context-tokenize-zh.pbtxt
│ │ │ │ ├── context.pbtxt
│ │ │ │ ├── parse.sh
│ │ │ │ ├── tokenize.sh
│ │ │ │ └── tokenize_zh.sh
│ │ │ ├── morpher_transitions.cc
│ │ │ ├── morphology_label_set.cc
│ │ │ ├── morphology_label_set.h
│ │ │ ├── morphology_label_set_test.cc
│ │ │ ├── ops/
│ │ │ │ └── parser_ops.cc
│ │ │ ├── parser_eval.py
│ │ │ ├── parser_features.cc
│ │ │ ├── parser_features.h
│ │ │ ├── parser_features_test.cc
│ │ │ ├── parser_state.cc
│ │ │ ├── parser_state.h
│ │ │ ├── parser_trainer.py
│ │ │ ├── parser_trainer_test.sh
│ │ │ ├── parser_transitions.cc
│ │ │ ├── parser_transitions.h
│ │ │ ├── populate_test_inputs.cc
│ │ │ ├── populate_test_inputs.h
│ │ │ ├── proto_io.h
│ │ │ ├── reader_ops.cc
│ │ │ ├── reader_ops_test.py
│ │ │ ├── registry.cc
│ │ │ ├── registry.h
│ │ │ ├── segmenter_utils.cc
│ │ │ ├── segmenter_utils.h
│ │ │ ├── segmenter_utils_test.cc
│ │ │ ├── sentence.proto
│ │ │ ├── sentence_batch.cc
│ │ │ ├── sentence_batch.h
│ │ │ ├── sentence_features.cc
│ │ │ ├── sentence_features.h
│ │ │ ├── sentence_features_test.cc
│ │ │ ├── shared_store.cc
│ │ │ ├── shared_store.h
│ │ │ ├── shared_store_test.cc
│ │ │ ├── sparse.proto
│ │ │ ├── structured_graph_builder.py
│ │ │ ├── syntaxnet.bzl
│ │ │ ├── tagger_transitions.cc
│ │ │ ├── tagger_transitions_test.cc
│ │ │ ├── task_context.cc
│ │ │ ├── task_context.h
│ │ │ ├── task_spec.proto
│ │ │ ├── term_frequency_map.cc
│ │ │ ├── term_frequency_map.h
│ │ │ ├── test_main.cc
│ │ │ ├── testdata/
│ │ │ │ ├── context.pbtxt
│ │ │ │ ├── document
│ │ │ │ └── mini-training-set
│ │ │ ├── text_formats.cc
│ │ │ ├── text_formats_test.py
│ │ │ ├── unpack_sparse_features.cc
│ │ │ ├── utils.cc
│ │ │ ├── utils.h
│ │ │ ├── workspace.cc
│ │ │ └── workspace.h
│ │ ├── third_party/
│ │ │ └── utf/
│ │ │ ├── BUILD
│ │ │ ├── README
│ │ │ ├── rune.c
│ │ │ ├── runestrcat.c
│ │ │ ├── runestrchr.c
│ │ │ ├── runestrcmp.c
│ │ │ ├── runestrcpy.c
│ │ │ ├── runestrdup.c
│ │ │ ├── runestrecpy.c
│ │ │ ├── runestrlen.c
│ │ │ ├── runestrncat.c
│ │ │ ├── runestrncmp.c
│ │ │ ├── runestrncpy.c
│ │ │ ├── runestrrchr.c
│ │ │ ├── runestrstr.c
│ │ │ ├── runetype.c
│ │ │ ├── runetypebody.c
│ │ │ ├── utf.h
│ │ │ ├── utfdef.h
│ │ │ ├── utfecpy.c
│ │ │ ├── utflen.c
│ │ │ ├── utfnlen.c
│ │ │ ├── utfrrune.c
│ │ │ ├── utfrune.c
│ │ │ └── utfutf.c
│ │ ├── tools/
│ │ │ └── bazel.rc
│ │ ├── universal.md
│ │ └── util/
│ │ └── utf8/
│ │ ├── BUILD
│ │ ├── gtest_main.cc
│ │ ├── unicodetext.cc
│ │ ├── unicodetext.h
│ │ ├── unicodetext_main.cc
│ │ ├── unicodetext_unittest.cc
│ │ ├── unilib.cc
│ │ ├── unilib.h
│ │ └── unilib_utf8_utils.h
│ ├── textsum/
│ │ ├── BUILD
│ │ ├── README.md
│ │ ├── batch_reader.py
│ │ ├── beam_search.py
│ │ ├── data/
│ │ │ ├── data
│ │ │ └── vocab
│ │ ├── data.py
│ │ ├── data_convert_example.py
│ │ ├── seq2seq_attention.py
│ │ ├── seq2seq_attention_decode.py
│ │ ├── seq2seq_attention_model.py
│ │ └── seq2seq_lib.py
│ ├── transformer/
│ │ ├── README.md
│ │ ├── cluttered_mnist.py
│ │ ├── data/
│ │ │ └── README.md
│ │ ├── example.py
│ │ ├── spatial_transformer.py
│ │ └── tf_utils.py
│ └── video_prediction/
│ ├── README.md
│ ├── download_data.sh
│ ├── lstm_ops.py
│ ├── prediction_input.py
│ ├── prediction_model.py
│ ├── prediction_train.py
│ └── push_datafiles.txt
├── models.py
├── optical_flow/
│ ├── gpu_main.cpp
│ ├── gpu_makefile
│ ├── main.cpp
│ └── makefile
├── opts.py
├── process_dataset.py
├── test_models.py
└── transforms.py
================================================
FILE CONTENTS
================================================
================================================
FILE: Images_for_readme/README.md
================================================
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2018 陈潇凯
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# Action Recognition Zoo
Codes for popular action recognition models, written based on pytorch, verified on the [something-something](https://www.twentybn.com/datasets/something-something) dataset. This code is built on top of the [TRN-pytorch](https://github.com/metalbubble/TRN-pytorch).
**Note** The main purpose of this repositoriy is to go through several methods and get familiar with their pipelines.
**Note**: always use git clone --recursive https://github.com/coderSkyChen/Action_Recognition_Zoo to clone this project Otherwise you will not be able to use the inception series CNN architecture.
## Depencies
- Opencv-2.4.13 or some greater version that has tvl1 api for the computing of optical flow.
- Pytorch-0.2.0_3
- Tensorflow-1.3.1,this is only for the using of tensorboard, it's ok without this, but you need to comment the corresponding codes.
## Data preparation
### Dataset
- **Download** the [something-something](https://www.twentybn.com/datasets/something-something) dataset. Decompress them into some folder.
- Note that this dataset contains 108,499 videos and each video is presented in JPG images. The JPG images were extracted from the orginal videos at 12 frames per seconds.
- The temporal evolution in videos is important for this dataset, so it's hard for some classic models which pay attention to short motion such as: Two-Stream Convolutional Networks for Action Recognition in Videos, NIPS 2014.
### Prepare optical flow using Opencv
Note that optical flow is an important modal feature in two-stream series methods, which contains the motion information of videos.
Since there only rgb frames in the official dataset, we need compute optical flow by ourselves.
I apply a TV-L1 optical flow algorithm, pixel values are truncated to the range \[-20, 20\], then rescaled between 0 and 255, each optical flow has two channels representing horizontal and vertical components. Note that the fps in original dataset is 12, which is too fast for optical flow computing in practice, so i sample frame at 6fps.
- The command to compute optical flow:
```
cd optical_flow
make bin #for cpu
make -f gpu_makefile gpu_bin #for gpu
./bin #for cpu
./gpu_bin #for gpu
```
Before using the code you should modify the path in main.cpp or gpu_main.cpp.
### Generate the meta files
```
python process_dataset.py
```
# Models
Before using the code you should modify the path as your own. The test time for one video is measured on one K80.
## Two stream action recognition
**Main Reference Paper**: [Two-stream convolutional networks for action recognition in videos](http://papers.nips.cc/paper/5353-two-stream-convolutional)
- Base CNN: BN-Inception pretrained on ImageNet.
- Partical BN and cross-modality tricks have been used in the code.
- Spatial stream: it's input is single rgb frame.
- Temporal stream: it's input is stacked optical flows.
### Training
- Spatial CNN: A single rgb frame is randomly selected for a video, which equals to image classification,input channel is 3.
- Temporal CNN: 5 consequent stacked optical flows are selected for a video, input channel is 5*2(2 channels:x and y).
- The command to train models:
```
train for spatial stream:
python main.py TwoStream RGB two-stream-rgb --arch BNInception --batch_size 256 --lr 0.002
train for temporal stream:
python main.py TwoStream Flow two-stream-flow --arch BNInception --batch_size 256 --lr 0.0005
```
### Testing on validation set
At test time, given a video, i sample a fixed number of frames (25 for spatial stream and 8 for temporal stream in my experiments) with equal temporal spacing between them. From each of the frames i then obtain 10 ConvNet
inputs by cropping and flipping four corners and the center of the frame. The class scores for the
whole video are then obtained by averaging the scores across the sampled frames and crops therein.
- The command to test models:
```
test for spatial stream:
python test_models.py --model TwoStream --modality RGB --weights TwoStream_RGB_BNInception_best.pth.tar --train_id two-stream-rgb --save_scores rgb.npz --arch BNInception --test_segments 25
test for temporal stream;
python test_models.py --model TwoStream --modality Flow --weights TwoStream_Flow_BNInception_best.pth.tar --train_id two-stream-flow --save_scores flow.npz --arch BNInception --test_segments 25
```
After running the test code, we get the precision scores on validation set and the probability for all class is saved in npz files which is useful in late fusion.
```
fusion: combine spatial stream and temporal stream results.
python average_scores.py
```
## Temporal Segment Networks
**Main Reference Paper**: [Temporal Segment Networks: Towards Good Practices for Deep Action Recognition](https://arxiv.org/abs/1611.05267)
- Base CNN: BN-Inception pretrained on ImageNet.
- Partical BN and cross-modality tricks have been used in the code.
- Spatial stream: it's input is k rgb frames, k is the segment number.
- Temporal stream: it's input is k stacked optical flows.
- The consensus function i've implemented is average function.
### Training
```
train spatial stream:
python main.py TSN RGB tsn-rgb --arch BNInception --batch_size 128 --lr 0.001 --num_segments 3
train temporal stream:
python main.py TSN Flow tsn-flow --arch BNInception --batch_size 128 --lr 0.0007 --num_segments 3
```
### Testing on validation set
Note that in testing phrase the k equals 1 according to the paper and it's offical code. So the segment mechanism is only used in training phrase.
```
test spatial stream:
python test_models.py --model TSN --modality RGB --weights TSN_RGB_BNInception_best.pth.tar --train_id tsn-rgb --save_scores rgb.npz --arch BNInception --test_segments 25
test temporal stream:
python test_models.py --model TSN --modality Flow --weights TSN_Flow_BNInception_best.pth.tar --train_id tsn-flow --save_scores flow.npz --arch BNInception --test_segments 25
fusion:
python average_scores.py # need modify the path to your own
```
## Pretrained-C3D :3D Convolutional Networks
**Main Reference Paper**: [Learning Spatiotemporal Features with 3D Convolutional Networks](https://arxiv.org/abs/1412.0767
- finetune the model pretrained on sports-1M, the pretrained model is upload to Baidu Cloud: [link](https://pan.baidu.com/s/1A-iAn4x45CHFgs7caOAFZw)
### Training
```
python main.py C3D RGB c3d-rgb --arch BNInception --batch_size 32 --lr 0.0001 --num_segments 1 --lr_steps 2 5 10 20 --factor 0.5
```
### Testing
```
python test_models.py --model C3D --modality RGB --weights C3D_RGB_BNInception_best.pth.tar --train_id c3d-rgb --save_scores rgb.npz --test_segments 5 --test_crops 1
```
### Results on validation set
- It seems like the C3D is faster than previous methods, but the input size for C3D is `112*112` vs `224*224` for Two-Stream models.
- The result is not good. I've found that it's hard to traing 3D CNN on this difficult dataset. This is mainly due to the poor GPU which slows the training phrase, so it's hard to choose proper hyperparameters with my machine, but this code works and it'll give you a **quick start**.
## I3D
**Main Reference Paper**: [Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset](https://arxiv.org/abs/1705.07750)
- The code for I3D model is based on [hassony2](https://github.com/hassony2/kinetics_i3d_pytorch)
- Training is too slow to report the results on Something-Something, but this code is useful
- Kinetics pretrained model is uploaded to Baidu Cloud: [link](https://pan.baidu.com/s/18pfAM2fYVsA6KxhX4A_pMQ)
### Training
```
python main.py I3D RGB i3d-rgb --arch I3D --batch_size 32 --lr 0.002 --num_segments 1 --lr_steps 2 10 20 --factor 0.5
```
================================================
FILE: average_scores.py
================================================
# @Author : Sky chen
# @Email : dzhchxk@126.com
# @Personal homepage : https://coderskychen.cn
import numpy as np
import pdb
def valid():
files_scores = ['/home/mcg/cxk/action-recognition-zoo/results/tsn-flow/output/flow.npz', '/home/mcg/cxk/action-recognition-zoo/results/tsn-rgb/output/rgb.npz']
allsum = np.zeros([11522, 174])
labels = []
for filename in files_scores:
print(filename)
data = np.load(filename)
scores = data['scores']
# print(scores.shape)
ss = scores[:, 0]
ll = scores[:, 1]
labels.append(ll)
ss = [x.reshape(174) for x in ss]
allsum += ss
preds = np.argmax(allsum,axis=1)
num_correct = np.sum(preds == labels)
acc = num_correct * 1.0 / preds.shape[0]
print('acc=%.3f' % (acc))
if __name__ == '__main__':
valid()
================================================
FILE: dataset.py
================================================
# @Author : Sky chen
# @Email : dzhchxk@126.com
# @Personal homepage : https://coderskychen.cn
import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
from numpy.random import randint
import random
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
if len(self._data) == 2:
return -1
else:
return int(self._data[2])
class TwoStreamDataSet(data.Dataset):
def __init__(self, root_path, list_file, num_segments=3,
new_length=1, modality='RGB',
image_tmpl='img_{:05d}.jpg', transform=None,
random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.num_segments = num_segments
self._parse_list()
def _load_image(self, directory, idx):
if self.modality == 'RGB':
try:
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert('RGB')]
except Exception:
print('error loading image:', os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')]
elif self.modality == 'Flow':
x_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('x', idx))).convert('L')
y_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('y', idx))).convert('L')
return [x_img, y_img]
def _parse_list(self):
# check the frame number is large >3:
# usualy it is [video_id, num_frames, class_idx]
if not self.test_mode:
tmp = [x.strip().split(' ') for x in open(self.list_file)]
if self.modality == 'Flow':
for item in tmp:
item[1] = int(item[1]) / 2
tmp = [item for item in tmp if int(item[1]) >= 3]
self.video_list = [VideoRecord(item) for item in tmp]
print('video number:%d' % (len(self.video_list)))
else:
tmp = [x.strip().split() for x in open(self.list_file)]
if self.modality == 'Flow':
for item in tmp:
item[1] = int(item[1]) / 2
# tmp = [item for item in tmp if int(item[1]) >= 3]
self.video_list = [VideoRecord(item) for item in tmp]
print('video number:%d' % (len(self.video_list)))
def _get_val_indices(self, record):
if record.num_frames > self.num_segments + self.new_length - 1:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
else:
# offsets = np.zeros((self.num_segments,))
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
if self.modality == 'Flow':
offsets = offsets * 2 + 1
else:
offsets += 1
return offsets
def __getitem__(self, index):
record = self.video_list[index]
# check this is a legit video folder
if self.modality == 'RGB':
while not os.path.exists(os.path.join(self.root_path, record.path, self.image_tmpl.format(1))):
print(os.path.join(self.root_path, record.path, self.image_tmpl.format(1)) + ' not exists jumpping')
index = np.random.randint(len(self.video_list))
record = self.video_list[index]
else:
while not os.path.exists(os.path.join(self.root_path, record.path, self.image_tmpl.format('x', 1))):
print(
os.path.join(self.root_path, record.path, self.image_tmpl.format('x', 1)) + ' not exists jumpping')
index = np.random.randint(len(self.video_list))
record = self.video_list[index]
if not self.test_mode:
sample_indice = [randint(low=1, high=record.num_frames + 2 - self.new_length)]
if self.modality == 'Flow':
sample_indice = [x * 2 - 1 for x in sample_indice] # flow index 1 3 5 7 ...
else:
sample_indice = self._get_val_indices(record)
return self.get(record, sample_indice)
def get(self, record, indice):
images = list()
for seg_ind in indice:
p = int(seg_ind)
for i in range(self.new_length): # for optical flow getting a volumn start with seg_ind
img = self._load_image(record.path, p)
images.extend(img)
if p < record.num_frames:
if self.modality == 'RGB':
p += 1
else:
p += 2
# one image: H*W*C
process_data = self.transform(images)
return process_data, record.label
def __len__(self):
return len(self.video_list)
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.jpg', transform=None,
random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self._parse_list()
def _load_image(self, directory, idx):
if self.modality == 'RGB':
try:
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert('RGB')]
except Exception:
print('error loading image:', os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')]
elif self.modality == 'Flow':
x_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('x', idx))).convert('L')
y_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('y', idx))).convert('L')
return [x_img, y_img]
def _parse_list(self):
# check the frame number is large >3:
# usualy it is [video_id, num_frames, class_idx]
if not self.test_mode:
tmp = [x.strip().split(' ') for x in open(self.list_file)]
if self.modality == 'Flow':
for item in tmp:
item[1] = int(item[1]) / 2
tmp = [item for item in tmp if int(item[1]) >= 3]
self.video_list = [VideoRecord(item) for item in tmp]
print('video number:%d' % (len(self.video_list)))
else:
tmp = [x.strip().split() for x in open(self.list_file)]
if self.modality == 'Flow':
for item in tmp:
item[1] = int(item[1]) / 2
# tmp = [item for item in tmp if int(item[1]) >= 3]
self.video_list = [VideoRecord(item) for item in tmp]
print('video number:%d' % (len(self.video_list)))
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0: # random sample
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration,
size=self.num_segments)
elif record.num_frames > self.num_segments: # [0,0,1,1,1,2,2,3] dense sample
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
if self.modality == 'Flow':
offsets = offsets * 2 + 1
else:
offsets += 1
return offsets
def _get_val_indices(self, record):
if record.num_frames > self.num_segments + self.new_length - 1:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
else:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
if self.modality == 'Flow':
offsets = offsets * 2 + 1
else:
offsets += 1
return offsets
def _get_test_indices(self, record):
if record.num_frames > self.num_segments + self.new_length - 1:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
else:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
if self.modality == 'Flow':
offsets = offsets * 2 + 1
else:
offsets += 1
return offsets
def __getitem__(self, index):
record = self.video_list[index]
# check this is a legit video folder
if self.modality == 'RGB':
while not os.path.exists(os.path.join(self.root_path, record.path, self.image_tmpl.format(1))):
print(os.path.join(self.root_path, record.path, self.image_tmpl.format(1)) + ' not exists jumpping')
index = np.random.randint(len(self.video_list))
record = self.video_list[index]
else:
while not os.path.exists(os.path.join(self.root_path, record.path, self.image_tmpl.format('x', 1))):
print(
os.path.join(self.root_path, record.path, self.image_tmpl.format('x', 1)) + ' not exists jumpping')
index = np.random.randint(len(self.video_list))
record = self.video_list[index]
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
images = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length): # for optical flow getting a volumn start with seg_ind
img = self._load_image(record.path, p)
images.extend(img)
if p < record.num_frames:
if self.modality == 'RGB':
p += 1
else:
p += 2
# one image: H*W*C
process_data = self.transform(images)
return process_data, record.label
def __len__(self):
return len(self.video_list)
class C3DDataSet(data.Dataset):
def __init__(self, root_path, list_file,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.jpg', transform=None,
random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self._parse_list()
def _load_image(self, directory, idx):
if self.modality == 'RGB':
try:
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert('RGB')]
except Exception:
print('error loading image:', os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')]
elif self.modality == 'Flow':
x_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('x', idx))).convert('L')
y_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('y', idx))).convert('L')
return [x_img, y_img]
def _parse_list(self):
# check the frame number is large >3:
# usualy it is [video_id, num_frames, class_idx]
if not self.test_mode:
tmp = [x.strip().split(' ') for x in open(self.list_file)]
if self.modality == 'Flow':
for item in tmp:
item[1] = int(item[1]) / 2
tmp = [item for item in tmp if int(item[1]) >= 3]
self.video_list = [VideoRecord(item) for item in tmp]
print('video number:%d' % (len(self.video_list)))
else:
tmp = [x.strip().split() for x in open(self.list_file)]
if self.modality == 'Flow':
for item in tmp:
item[1] = int(item[1]) / 2
# tmp = [item for item in tmp if int(item[1]) >= 3]
self.video_list = [VideoRecord(item) for item in tmp]
print('video number:%d' % (len(self.video_list)))
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0: # random sample
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration,
size=self.num_segments)
elif record.num_frames > self.num_segments: # [0,0,1,1,1,2,2,3] dense sample
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
if self.modality == 'Flow':
offsets = offsets * 2 + 1
else:
offsets += 1
return offsets
def _get_val_indices(self, record):
if record.num_frames > self.num_segments + self.new_length - 1:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
else:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
if self.modality == 'Flow':
offsets = offsets * 2 + 1
else:
offsets += 1
return offsets
def _get_test_indices(self, record):
if record.num_frames > self.num_segments + self.new_length - 1:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
else:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
offsets += 1
return offsets
def __getitem__(self, index):
record = self.video_list[index]
# check this is a legit video folder
if self.modality == 'RGB':
while not os.path.exists(os.path.join(self.root_path, record.path, self.image_tmpl.format(1))):
print(os.path.join(self.root_path, record.path, self.image_tmpl.format(1)) + ' not exists jumpping')
index = np.random.randint(len(self.video_list))
record = self.video_list[index]
else:
while not os.path.exists(os.path.join(self.root_path, record.path, self.image_tmpl.format('x', 1))):
print(
os.path.join(self.root_path, record.path, self.image_tmpl.format('x', 1)) + ' not exists jumpping')
index = np.random.randint(len(self.video_list))
record = self.video_list[index]
if not self.test_mode:
if record.num_frames > self.new_length:
segment_indices = [randint(low=1, high=record.num_frames + 2 - self.new_length)]
else:
segment_indices = [1]
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
images = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
img = self._load_image(record.path, p)
images.extend(img)
if p < record.num_frames:
if self.modality == 'RGB':
p += 1
# one image: H*W*C
process_data = self.transform(images)
return process_data, record.label
def __len__(self):
return len(self.video_list)
================================================
FILE: main.py
================================================
# @Author : Sky chen
# @Email : dzhchxk@126.com
# @Personal homepage : https://coderskychen.cn
try:
import tensorflow as tf
except ImportError:
print("Tensorflow not installed; No tensorboard logging.")
tf = None
import argparse
import os
import time
import shutil
import torch
import torchvision
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from torch.nn.utils import clip_grad_norm
from dataset import *
from models import *
from transforms import *
from opts import parser
def add_summary_value(writer, key, value, iteration):
summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])
writer.add_summary(summary, iteration)
def return_something_path(modality):
filename_categories = '/home/mcg/cxk/dataset/somthing-something/category.txt'
if modality == 'RGB':
root_data = '/home/mcg/cxk/dataset/somthing-something/something-rgb'
filename_imglist_train = '/home/mcg/cxk/dataset/somthing-something/train_videofolder_rgb.txt'
filename_imglist_val = '/home/mcg/cxk/dataset/somthing-something/val_videofolder_rgb.txt'
prefix = '{:05d}.jpg'
else:
root_data = '/home/mcg/cxk/dataset/somthing-something/something-optical-flow'
filename_imglist_train = '/home/mcg/cxk/dataset/somthing-something/train_videofolder_flow.txt'
filename_imglist_val = '/home/mcg/cxk/dataset/somthing-something/val_videofolder_flow.txt'
prefix = '{:s}_{:05d}.jpg'
with open(filename_categories) as f:
lines = f.readlines()
categories = [item.rstrip() for item in lines]
return categories, filename_imglist_train, filename_imglist_val, root_data, prefix
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
assert len(args.train_id) > 0
check_rootfolders(args.train_id)
summary_w = tf and tf.summary.FileWriter(os.path.join('results', args.train_id, args.root_log)) #tensorboard
categories, args.train_list, args.val_list, args.root_path, prefix = return_something_path(args.modality)
num_class = len(categories)
args.store_name = '_'.join([args.model, args.modality, args.arch])
print('storing name: ' + args.store_name)
policies = -1
if args.model == 'TwoStream':
model = TwoStream(num_class, args.modality,
base_model=args.arch, dropout=args.dropout,
crop_num=1, partial_bn=not args.no_partialbn)
policies = model.get_optim_policies()
elif args.model == 'TSN':
model = TSN(num_class, args.num_segments, args.modality,
base_model=args.arch, dropout=args.dropout,
crop_num=1, partial_bn=not args.no_partialbn)
policies = model.get_optim_policies()
elif args.model == 'C3D':
model = C3D()
model_dict = model.state_dict()
pretrained_dict = torch.load('./model_zoo/c3d.pickle')
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
print('c3d pretrained model loaded~')
else:
print('error!')
exit()
crop_size = model.crop_size
scale_size = model.scale_size
input_mean = model.input_mean
input_std = model.input_std
train_augmentation = model.get_augmentation()
model = torch.nn.DataParallel(model, device_ids=args.gpus).cuda()
if args.resume:
if os.path.isfile(args.resume):
print(("=> loading checkpoint '{}'".format(args.resume)))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print(("=> loaded checkpoint '{}' (epoch {})"
.format(args.evaluate, checkpoint['epoch'])))
else:
print(("=> no checkpoint found at '{}'".format(args.resume)))
cudnn.benchmark = True
# Data loading code
if args.modality != 'RGBDiff':
normalize = GroupNormalize(input_mean, input_std)
else:
normalize = IdentityTransform()
if args.modality == 'RGB':
data_length = 1
elif args.modality in ['Flow', 'RGBDiff']:
data_length = 5
if args.modality == 'RGB' and args.model == 'C3D':
data_length = 16 # clip
if args.model == 'TwoStream':
datasettrain = TwoStreamDataSet(args.root_path, args.train_list,
new_length=data_length,
modality=args.modality,
image_tmpl=prefix,
transform=torchvision.transforms.Compose([
train_augmentation,
Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(args.arch not in ['BNInception', 'InceptionV3'])),
normalize,
]))
datasetval = TwoStreamDataSet(args.root_path, args.val_list,
new_length=data_length,
modality=args.modality,
image_tmpl=prefix,
random_shift=False,
transform=torchvision.transforms.Compose([
GroupScale(int(scale_size)),
GroupCenterCrop(crop_size),
Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(args.arch not in ['BNInception', 'InceptionV3'])),
normalize,
]))
elif args.model == 'TSN':
datasettrain = TSNDataSet(args.root_path, args.train_list, args.num_segments,
new_length=data_length,
modality=args.modality,
image_tmpl=prefix,
transform=torchvision.transforms.Compose([
train_augmentation,
Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(args.arch not in ['BNInception', 'InceptionV3'])),
normalize,
]))
datasetval = TSNDataSet(args.root_path, args.val_list, args.num_segments,
new_length=data_length,
modality=args.modality,
image_tmpl=prefix,
random_shift=False,
transform=torchvision.transforms.Compose([
GroupScale(int(scale_size)),
GroupCenterCrop(crop_size),
Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(args.arch not in ['BNInception', 'InceptionV3'])),
normalize,
]))
elif args.model == 'C3D':
datasettrain = C3DDataSet(args.root_path, args.train_list, 1,
new_length=data_length,
modality=args.modality,
image_tmpl=prefix,
transform=torchvision.transforms.Compose([
train_augmentation,
Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(args.arch not in ['BNInception', 'InceptionV3', 'C3D'])),
normalize,
]))
datasetval = C3DDataSet(args.root_path, args.val_list, 1,
new_length=data_length,
modality=args.modality,
image_tmpl=prefix,
random_shift=False,
transform=torchvision.transforms.Compose([
GroupScale(int(scale_size)),
GroupCenterCrop(crop_size),
Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(
div=(args.arch not in ['BNInception', 'InceptionV3', 'C3D'])),
normalize,
]))
trainvidnum = len(datasettrain)
valvidnum = len(datasetval)
train_loader = torch.utils.data.DataLoader(
datasettrain,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasetval,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# define loss function (criterion) and optimizer
criterion = torch.nn.CrossEntropyLoss().cuda()
if policies != -1:
for group in policies:
print(('group: {} has {} params, lr_mult: {}, decay_mult: {}'.format(
group['name'], len(group['params']), group['lr_mult'], group['decay_mult'])))
optimizer = torch.optim.SGD(policies, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
else:
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# log_training = open(os.path.join(args.root_log, '%s.csv' % args.store_name), 'w')
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args.lr_steps, args.factor, policies != -1)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, trainvidnum, summary_w)
# evaluate on validation set
if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
prec1 = validate(val_loader, model, criterion, (epoch + 1) * trainvidnum, summary_w)
# prec1 = validate(val_loader, model, criterion, (epoch + 1) * len(train_loader), summary_w)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, vidnums, summary_w):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
if args.no_partialbn:
model.module.partialBN(False)
else:
# model.partialBN(True)
model.module.partialBN(True)
# switch to train mode
model.train()
samples_have_seen = epoch*vidnums
end = time.time()
for i, (input, target) in enumerate(train_loader):
# if i>5:
# break
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
bs = input_var.size(0)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1,5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
if args.clip_gradient is not None:
total_norm = clip_grad_norm(model.parameters(), args.clip_gradient)
if total_norm > args.clip_gradient:
print("clipping gradient: {} with coef {}".format(total_norm, args.clip_gradient / total_norm))
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
samples_have_seen += bs
if i % args.print_freq == 0:
output = ('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5, lr=optimizer.param_groups[-1]['lr']))
print(output)
add_summary_value(summary_w, 'train_loss', losses.val, samples_have_seen)
add_summary_value(summary_w, 'train_Prec@1', top1.val, samples_have_seen)
add_summary_value(summary_w, 'train_Prec@5', top5.val, samples_have_seen)
add_summary_value(summary_w, 'train_Prec@1_mean', top1.avg, samples_have_seen)
add_summary_value(summary_w, 'train_Prec@5_mean', top5.avg, samples_have_seen)
add_summary_value(summary_w, 'lr', optimizer.param_groups[-1]['lr'], samples_have_seen)
# log.write(output + '\n')
# log.flush()
def validate(val_loader, model, criterion, iter, summary_w):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
# if i>5:
# break
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1,5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
output = ('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(output)
# log.write(output + '\n')
# log.flush()
output = ('Testing Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss {loss.avg:.5f}'
.format(top1=top1, top5=top5, loss=losses))
print(output)
add_summary_value(summary_w, 'val_loss', losses.avg, iter)
add_summary_value(summary_w, 'val_Prec@1', top1.avg, iter)
add_summary_value(summary_w, 'val_Prec@5', top5.avg, iter)
output_best = '\nBest Prec@1: %.3f'%(best_prec1)
print(output_best)
# log.write(output + ' ' + output_best + '\n')
# log.flush()
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, './results/%s/%s/%s_checkpoint.pth.tar' % (args.train_id, args.root_model, args.store_name))
if is_best:
shutil.copyfile('./results/%s/%s/%s_checkpoint.pth.tar' % (args.train_id, args.root_model, args.store_name), './results/%s/%s/%s_best.pth.tar' % (args.train_id, args.root_model, args.store_name))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, lr_steps, factor, with_police=True):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
decay = factor ** (sum(epoch >= np.array(lr_steps)))
lr = args.lr * decay
decay = args.weight_decay
if with_police:
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_group['lr_mult']
param_group['weight_decay'] = decay * param_group['decay_mult']
else:
for param_group in optimizer.param_groups:
param_group['lr'] = lr
param_group['weight_decay'] = decay
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def check_rootfolders(trainid):
"""Create log and model folder"""
folders_util = [args.root_log, args.root_model, args.root_output]
if not os.path.exists('./results'):
os.makedirs('./results')
for folder in folders_util:
if not os.path.exists(os.path.join('./results', trainid, folder)):
print('creating folder ' + folder)
os.makedirs(os.path.join('./results', trainid, folder))
if __name__ == '__main__':
main()
================================================
FILE: model_zoo/LICENSE
================================================
Copyright (c) 2017 LIP6 Lab
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------
This product contains portions of third party software provided under this license:
dump_filters.py (x)
===============
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
(x) adapted from https://github.com/tensorflow/tensorflow/blob/411f57e/tensorflow/models/image/imagenet/classify_image.py
================================================
FILE: model_zoo/README.md
================================================
# Tensorflow Model Zoo for Torch7 and PyTorch
This is a porting of tensorflow pretrained models made by [Remi Cadene](http://remicadene.com) and [Micael Carvalho](http://micaelcarvalho.com). Special thanks to Moustapha Cissé. All models have been tested on Imagenet.
This work was inspired by [inception-v3.torch](https://github.com/Moodstocks/inception-v3.torch).
## Using pretrained models
### Torch7
#### Requirements
Please install [torchnet-vision](https://github.com/Cadene/torchnet-vision).
```
luarocks install --server=http://luarocks.org/dev torchnet-vision
```
Models available:
- inceptionv3
- inceptionv4
- inceptionresnetv2
- resnet{18, 34, 50, 101, 152, 200}
- overfeat
- vggm
- vgg16
#### Simple example
```lua
require 'image'
tnt = require 'torchnet'
vision = require 'torchnet-vision'
model = vision.models.inceptionresnetv2
net = model.load()
augmentation = tnt.transform.compose{
vision.image.transformimage.randomScale{
minSize = 299, maxSize = 350
},
vision.image.transformimage.randomCrop(299),
vision.image.transformimage.colorNormalize{
mean = model.mean, std = model.std
},
function(img) return img:float() end
}
net:evaluate()
output = net:forward(augmentation(image.lena()))
```
### PyTorch
Currently available in this repo only On pytorch/vision maybe!
Models available:
- inceptionv4
- inceptionresnetv2
#### Simple example
```python
import torch
from inceptionv4.pytorch_load import inceptionv4
net = inceptionv4()
input = torch.autograd.Variable(torch.ones(1,3,299,299))
output = net.forward(input)
```
## Reproducing the porting
### Requirements
- Tensorflow
- Torch7
- PyTorch
- hdf5 for python3
- hdf5 for lua
### Example of commands
In Tensorflow: Download tensorflow parameters and extract them in `./dump` directory.
```
python3 inceptionv4/tensorflow_dump.py
```
In Torch7 or PyTorch: Create the network, load the parameters, launch few tests and save the network in `./save` directory.
```
th inceptionv4/torch_load.lua
python3 inceptionv4/pytorch_load.py
```
================================================
FILE: model_zoo/__init__.py
================================================
from .inceptionresnetv2.pytorch_load import inceptionresnetv2
from .inceptionv4.pytorch_load import inceptionv4
from .bninception.pytorch_load import BNInception, InceptionV3
================================================
FILE: model_zoo/bninception/__init__.py
================================================
================================================
FILE: model_zoo/bninception/bn_inception.yaml
================================================
inputs: []
layers:
- attrs: {kernel_size: 7, num_output: 64, pad: 3, stride: 2}
expr: conv1_7x7_s2<=Convolution<=data
id: conv1_7x7_s2
- attrs: {frozen: true}
expr: conv1_7x7_s2_bn<=BN<=conv1_7x7_s2
id: conv1_7x7_s2_bn
- {expr: conv1_7x7_s2_bn<=ReLU<=conv1_7x7_s2_bn, id: conv1_relu_7x7}
- attrs: {kernel_size: 3, mode: max, stride: 2}
expr: pool1_3x3_s2<=Pooling<=conv1_7x7_s2_bn
id: pool1_3x3_s2
- attrs: {kernel_size: 1, num_output: 64}
expr: conv2_3x3_reduce<=Convolution<=pool1_3x3_s2
id: conv2_3x3_reduce
- attrs: {frozen: true}
expr: conv2_3x3_reduce_bn<=BN<=conv2_3x3_reduce
id: conv2_3x3_reduce_bn
- {expr: conv2_3x3_reduce_bn<=ReLU<=conv2_3x3_reduce_bn, id: conv2_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 192, pad: 1}
expr: conv2_3x3<=Convolution<=conv2_3x3_reduce_bn
id: conv2_3x3
- attrs: {frozen: true}
expr: conv2_3x3_bn<=BN<=conv2_3x3
id: conv2_3x3_bn
- {expr: conv2_3x3_bn<=ReLU<=conv2_3x3_bn, id: conv2_relu_3x3}
- attrs: {kernel_size: 3, mode: max, stride: 2}
expr: pool2_3x3_s2<=Pooling<=conv2_3x3_bn
id: pool2_3x3_s2
- attrs: {kernel_size: 1, num_output: 64}
expr: inception_3a_1x1<=Convolution<=pool2_3x3_s2
id: inception_3a_1x1
- attrs: {frozen: true}
expr: inception_3a_1x1_bn<=BN<=inception_3a_1x1
id: inception_3a_1x1_bn
- {expr: inception_3a_1x1_bn<=ReLU<=inception_3a_1x1_bn, id: inception_3a_relu_1x1}
- attrs: {kernel_size: 1, num_output: 64}
expr: inception_3a_3x3_reduce<=Convolution<=pool2_3x3_s2
id: inception_3a_3x3_reduce
- attrs: {frozen: true}
expr: inception_3a_3x3_reduce_bn<=BN<=inception_3a_3x3_reduce
id: inception_3a_3x3_reduce_bn
- {expr: inception_3a_3x3_reduce_bn<=ReLU<=inception_3a_3x3_reduce_bn, id: inception_3a_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 64, pad: 1}
expr: inception_3a_3x3<=Convolution<=inception_3a_3x3_reduce_bn
id: inception_3a_3x3
- attrs: {frozen: true}
expr: inception_3a_3x3_bn<=BN<=inception_3a_3x3
id: inception_3a_3x3_bn
- {expr: inception_3a_3x3_bn<=ReLU<=inception_3a_3x3_bn, id: inception_3a_relu_3x3}
- attrs: {kernel_size: 1, num_output: 64}
expr: inception_3a_double_3x3_reduce<=Convolution<=pool2_3x3_s2
id: inception_3a_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_3a_double_3x3_reduce_bn<=BN<=inception_3a_double_3x3_reduce
id: inception_3a_double_3x3_reduce_bn
- {expr: inception_3a_double_3x3_reduce_bn<=ReLU<=inception_3a_double_3x3_reduce_bn,
id: inception_3a_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 96, pad: 1}
expr: inception_3a_double_3x3_1<=Convolution<=inception_3a_double_3x3_reduce_bn
id: inception_3a_double_3x3_1
- attrs: {frozen: true}
expr: inception_3a_double_3x3_1_bn<=BN<=inception_3a_double_3x3_1
id: inception_3a_double_3x3_1_bn
- {expr: inception_3a_double_3x3_1_bn<=ReLU<=inception_3a_double_3x3_1_bn, id: inception_3a_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 96, pad: 1}
expr: inception_3a_double_3x3_2<=Convolution<=inception_3a_double_3x3_1_bn
id: inception_3a_double_3x3_2
- attrs: {frozen: true}
expr: inception_3a_double_3x3_2_bn<=BN<=inception_3a_double_3x3_2
id: inception_3a_double_3x3_2_bn
- {expr: inception_3a_double_3x3_2_bn<=ReLU<=inception_3a_double_3x3_2_bn, id: inception_3a_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: ave, pad: 1, stride: 1}
expr: inception_3a_pool<=Pooling<=pool2_3x3_s2
id: inception_3a_pool
- attrs: {kernel_size: 1, num_output: 32}
expr: inception_3a_pool_proj<=Convolution<=inception_3a_pool
id: inception_3a_pool_proj
- attrs: {frozen: true}
expr: inception_3a_pool_proj_bn<=BN<=inception_3a_pool_proj
id: inception_3a_pool_proj_bn
- {expr: inception_3a_pool_proj_bn<=ReLU<=inception_3a_pool_proj_bn, id: inception_3a_relu_pool_proj}
- {expr: 'inception_3a_output<=Concat<=inception_3a_1x1_bn,inception_3a_3x3_bn,inception_3a_double_3x3_2_bn,inception_3a_pool_proj_bn',
id: inception_3a_output}
- attrs: {kernel_size: 1, num_output: 64}
expr: inception_3b_1x1<=Convolution<=inception_3a_output
id: inception_3b_1x1
- attrs: {frozen: true}
expr: inception_3b_1x1_bn<=BN<=inception_3b_1x1
id: inception_3b_1x1_bn
- {expr: inception_3b_1x1_bn<=ReLU<=inception_3b_1x1_bn, id: inception_3b_relu_1x1}
- attrs: {kernel_size: 1, num_output: 64}
expr: inception_3b_3x3_reduce<=Convolution<=inception_3a_output
id: inception_3b_3x3_reduce
- attrs: {frozen: true}
expr: inception_3b_3x3_reduce_bn<=BN<=inception_3b_3x3_reduce
id: inception_3b_3x3_reduce_bn
- {expr: inception_3b_3x3_reduce_bn<=ReLU<=inception_3b_3x3_reduce_bn, id: inception_3b_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 96, pad: 1}
expr: inception_3b_3x3<=Convolution<=inception_3b_3x3_reduce_bn
id: inception_3b_3x3
- attrs: {frozen: true}
expr: inception_3b_3x3_bn<=BN<=inception_3b_3x3
id: inception_3b_3x3_bn
- {expr: inception_3b_3x3_bn<=ReLU<=inception_3b_3x3_bn, id: inception_3b_relu_3x3}
- attrs: {kernel_size: 1, num_output: 64}
expr: inception_3b_double_3x3_reduce<=Convolution<=inception_3a_output
id: inception_3b_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_3b_double_3x3_reduce_bn<=BN<=inception_3b_double_3x3_reduce
id: inception_3b_double_3x3_reduce_bn
- {expr: inception_3b_double_3x3_reduce_bn<=ReLU<=inception_3b_double_3x3_reduce_bn,
id: inception_3b_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 96, pad: 1}
expr: inception_3b_double_3x3_1<=Convolution<=inception_3b_double_3x3_reduce_bn
id: inception_3b_double_3x3_1
- attrs: {frozen: true}
expr: inception_3b_double_3x3_1_bn<=BN<=inception_3b_double_3x3_1
id: inception_3b_double_3x3_1_bn
- {expr: inception_3b_double_3x3_1_bn<=ReLU<=inception_3b_double_3x3_1_bn, id: inception_3b_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 96, pad: 1}
expr: inception_3b_double_3x3_2<=Convolution<=inception_3b_double_3x3_1_bn
id: inception_3b_double_3x3_2
- attrs: {frozen: true}
expr: inception_3b_double_3x3_2_bn<=BN<=inception_3b_double_3x3_2
id: inception_3b_double_3x3_2_bn
- {expr: inception_3b_double_3x3_2_bn<=ReLU<=inception_3b_double_3x3_2_bn, id: inception_3b_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: ave, pad: 1, stride: 1}
expr: inception_3b_pool<=Pooling<=inception_3a_output
id: inception_3b_pool
- attrs: {kernel_size: 1, num_output: 64}
expr: inception_3b_pool_proj<=Convolution<=inception_3b_pool
id: inception_3b_pool_proj
- attrs: {frozen: true}
expr: inception_3b_pool_proj_bn<=BN<=inception_3b_pool_proj
id: inception_3b_pool_proj_bn
- {expr: inception_3b_pool_proj_bn<=ReLU<=inception_3b_pool_proj_bn, id: inception_3b_relu_pool_proj}
- {expr: 'inception_3b_output<=Concat<=inception_3b_1x1_bn,inception_3b_3x3_bn,inception_3b_double_3x3_2_bn,inception_3b_pool_proj_bn',
id: inception_3b_output}
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_3c_3x3_reduce<=Convolution<=inception_3b_output
id: inception_3c_3x3_reduce
- attrs: {frozen: true}
expr: inception_3c_3x3_reduce_bn<=BN<=inception_3c_3x3_reduce
id: inception_3c_3x3_reduce_bn
- {expr: inception_3c_3x3_reduce_bn<=ReLU<=inception_3c_3x3_reduce_bn, id: inception_3c_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 160, pad: 1, stride: 2}
expr: inception_3c_3x3<=Convolution<=inception_3c_3x3_reduce_bn
id: inception_3c_3x3
- attrs: {frozen: true}
expr: inception_3c_3x3_bn<=BN<=inception_3c_3x3
id: inception_3c_3x3_bn
- {expr: inception_3c_3x3_bn<=ReLU<=inception_3c_3x3_bn, id: inception_3c_relu_3x3}
- attrs: {kernel_size: 1, num_output: 64}
expr: inception_3c_double_3x3_reduce<=Convolution<=inception_3b_output
id: inception_3c_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_3c_double_3x3_reduce_bn<=BN<=inception_3c_double_3x3_reduce
id: inception_3c_double_3x3_reduce_bn
- {expr: inception_3c_double_3x3_reduce_bn<=ReLU<=inception_3c_double_3x3_reduce_bn,
id: inception_3c_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 96, pad: 1}
expr: inception_3c_double_3x3_1<=Convolution<=inception_3c_double_3x3_reduce_bn
id: inception_3c_double_3x3_1
- attrs: {frozen: true}
expr: inception_3c_double_3x3_1_bn<=BN<=inception_3c_double_3x3_1
id: inception_3c_double_3x3_1_bn
- {expr: inception_3c_double_3x3_1_bn<=ReLU<=inception_3c_double_3x3_1_bn, id: inception_3c_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 96, pad: 1, stride: 2}
expr: inception_3c_double_3x3_2<=Convolution<=inception_3c_double_3x3_1_bn
id: inception_3c_double_3x3_2
- attrs: {frozen: true}
expr: inception_3c_double_3x3_2_bn<=BN<=inception_3c_double_3x3_2
id: inception_3c_double_3x3_2_bn
- {expr: inception_3c_double_3x3_2_bn<=ReLU<=inception_3c_double_3x3_2_bn, id: inception_3c_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: max, stride: 2}
expr: inception_3c_pool<=Pooling<=inception_3b_output
id: inception_3c_pool
- {expr: 'inception_3c_output<=Concat<=inception_3c_3x3_bn,inception_3c_double_3x3_2_bn,inception_3c_pool',
id: inception_3c_output}
- attrs: {kernel_size: 1, num_output: 224}
expr: inception_4a_1x1<=Convolution<=inception_3c_output
id: inception_4a_1x1
- attrs: {frozen: true}
expr: inception_4a_1x1_bn<=BN<=inception_4a_1x1
id: inception_4a_1x1_bn
- {expr: inception_4a_1x1_bn<=ReLU<=inception_4a_1x1_bn, id: inception_4a_relu_1x1}
- attrs: {kernel_size: 1, num_output: 64}
expr: inception_4a_3x3_reduce<=Convolution<=inception_3c_output
id: inception_4a_3x3_reduce
- attrs: {frozen: true}
expr: inception_4a_3x3_reduce_bn<=BN<=inception_4a_3x3_reduce
id: inception_4a_3x3_reduce_bn
- {expr: inception_4a_3x3_reduce_bn<=ReLU<=inception_4a_3x3_reduce_bn, id: inception_4a_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 96, pad: 1}
expr: inception_4a_3x3<=Convolution<=inception_4a_3x3_reduce_bn
id: inception_4a_3x3
- attrs: {frozen: true}
expr: inception_4a_3x3_bn<=BN<=inception_4a_3x3
id: inception_4a_3x3_bn
- {expr: inception_4a_3x3_bn<=ReLU<=inception_4a_3x3_bn, id: inception_4a_relu_3x3}
- attrs: {kernel_size: 1, num_output: 96}
expr: inception_4a_double_3x3_reduce<=Convolution<=inception_3c_output
id: inception_4a_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_4a_double_3x3_reduce_bn<=BN<=inception_4a_double_3x3_reduce
id: inception_4a_double_3x3_reduce_bn
- {expr: inception_4a_double_3x3_reduce_bn<=ReLU<=inception_4a_double_3x3_reduce_bn,
id: inception_4a_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 128, pad: 1}
expr: inception_4a_double_3x3_1<=Convolution<=inception_4a_double_3x3_reduce_bn
id: inception_4a_double_3x3_1
- attrs: {frozen: true}
expr: inception_4a_double_3x3_1_bn<=BN<=inception_4a_double_3x3_1
id: inception_4a_double_3x3_1_bn
- {expr: inception_4a_double_3x3_1_bn<=ReLU<=inception_4a_double_3x3_1_bn, id: inception_4a_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 128, pad: 1}
expr: inception_4a_double_3x3_2<=Convolution<=inception_4a_double_3x3_1_bn
id: inception_4a_double_3x3_2
- attrs: {frozen: true}
expr: inception_4a_double_3x3_2_bn<=BN<=inception_4a_double_3x3_2
id: inception_4a_double_3x3_2_bn
- {expr: inception_4a_double_3x3_2_bn<=ReLU<=inception_4a_double_3x3_2_bn, id: inception_4a_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: ave, pad: 1, stride: 1}
expr: inception_4a_pool<=Pooling<=inception_3c_output
id: inception_4a_pool
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_4a_pool_proj<=Convolution<=inception_4a_pool
id: inception_4a_pool_proj
- attrs: {frozen: true}
expr: inception_4a_pool_proj_bn<=BN<=inception_4a_pool_proj
id: inception_4a_pool_proj_bn
- {expr: inception_4a_pool_proj_bn<=ReLU<=inception_4a_pool_proj_bn, id: inception_4a_relu_pool_proj}
- {expr: 'inception_4a_output<=Concat<=inception_4a_1x1_bn,inception_4a_3x3_bn,inception_4a_double_3x3_2_bn,inception_4a_pool_proj_bn',
id: inception_4a_output}
- attrs: {kernel_size: 1, num_output: 192}
expr: inception_4b_1x1<=Convolution<=inception_4a_output
id: inception_4b_1x1
- attrs: {frozen: true}
expr: inception_4b_1x1_bn<=BN<=inception_4b_1x1
id: inception_4b_1x1_bn
- {expr: inception_4b_1x1_bn<=ReLU<=inception_4b_1x1_bn, id: inception_4b_relu_1x1}
- attrs: {kernel_size: 1, num_output: 96}
expr: inception_4b_3x3_reduce<=Convolution<=inception_4a_output
id: inception_4b_3x3_reduce
- attrs: {frozen: true}
expr: inception_4b_3x3_reduce_bn<=BN<=inception_4b_3x3_reduce
id: inception_4b_3x3_reduce_bn
- {expr: inception_4b_3x3_reduce_bn<=ReLU<=inception_4b_3x3_reduce_bn, id: inception_4b_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 128, pad: 1}
expr: inception_4b_3x3<=Convolution<=inception_4b_3x3_reduce_bn
id: inception_4b_3x3
- attrs: {frozen: true}
expr: inception_4b_3x3_bn<=BN<=inception_4b_3x3
id: inception_4b_3x3_bn
- {expr: inception_4b_3x3_bn<=ReLU<=inception_4b_3x3_bn, id: inception_4b_relu_3x3}
- attrs: {kernel_size: 1, num_output: 96}
expr: inception_4b_double_3x3_reduce<=Convolution<=inception_4a_output
id: inception_4b_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_4b_double_3x3_reduce_bn<=BN<=inception_4b_double_3x3_reduce
id: inception_4b_double_3x3_reduce_bn
- {expr: inception_4b_double_3x3_reduce_bn<=ReLU<=inception_4b_double_3x3_reduce_bn,
id: inception_4b_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 128, pad: 1}
expr: inception_4b_double_3x3_1<=Convolution<=inception_4b_double_3x3_reduce_bn
id: inception_4b_double_3x3_1
- attrs: {frozen: true}
expr: inception_4b_double_3x3_1_bn<=BN<=inception_4b_double_3x3_1
id: inception_4b_double_3x3_1_bn
- {expr: inception_4b_double_3x3_1_bn<=ReLU<=inception_4b_double_3x3_1_bn, id: inception_4b_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 128, pad: 1}
expr: inception_4b_double_3x3_2<=Convolution<=inception_4b_double_3x3_1_bn
id: inception_4b_double_3x3_2
- attrs: {frozen: true}
expr: inception_4b_double_3x3_2_bn<=BN<=inception_4b_double_3x3_2
id: inception_4b_double_3x3_2_bn
- {expr: inception_4b_double_3x3_2_bn<=ReLU<=inception_4b_double_3x3_2_bn, id: inception_4b_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: ave, pad: 1, stride: 1}
expr: inception_4b_pool<=Pooling<=inception_4a_output
id: inception_4b_pool
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_4b_pool_proj<=Convolution<=inception_4b_pool
id: inception_4b_pool_proj
- attrs: {frozen: true}
expr: inception_4b_pool_proj_bn<=BN<=inception_4b_pool_proj
id: inception_4b_pool_proj_bn
- {expr: inception_4b_pool_proj_bn<=ReLU<=inception_4b_pool_proj_bn, id: inception_4b_relu_pool_proj}
- {expr: 'inception_4b_output<=Concat<=inception_4b_1x1_bn,inception_4b_3x3_bn,inception_4b_double_3x3_2_bn,inception_4b_pool_proj_bn',
id: inception_4b_output}
- attrs: {kernel_size: 1, num_output: 160}
expr: inception_4c_1x1<=Convolution<=inception_4b_output
id: inception_4c_1x1
- attrs: {frozen: true}
expr: inception_4c_1x1_bn<=BN<=inception_4c_1x1
id: inception_4c_1x1_bn
- {expr: inception_4c_1x1_bn<=ReLU<=inception_4c_1x1_bn, id: inception_4c_relu_1x1}
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_4c_3x3_reduce<=Convolution<=inception_4b_output
id: inception_4c_3x3_reduce
- attrs: {frozen: true}
expr: inception_4c_3x3_reduce_bn<=BN<=inception_4c_3x3_reduce
id: inception_4c_3x3_reduce_bn
- {expr: inception_4c_3x3_reduce_bn<=ReLU<=inception_4c_3x3_reduce_bn, id: inception_4c_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 160, pad: 1}
expr: inception_4c_3x3<=Convolution<=inception_4c_3x3_reduce_bn
id: inception_4c_3x3
- attrs: {frozen: true}
expr: inception_4c_3x3_bn<=BN<=inception_4c_3x3
id: inception_4c_3x3_bn
- {expr: inception_4c_3x3_bn<=ReLU<=inception_4c_3x3_bn, id: inception_4c_relu_3x3}
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_4c_double_3x3_reduce<=Convolution<=inception_4b_output
id: inception_4c_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_4c_double_3x3_reduce_bn<=BN<=inception_4c_double_3x3_reduce
id: inception_4c_double_3x3_reduce_bn
- {expr: inception_4c_double_3x3_reduce_bn<=ReLU<=inception_4c_double_3x3_reduce_bn,
id: inception_4c_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 160, pad: 1}
expr: inception_4c_double_3x3_1<=Convolution<=inception_4c_double_3x3_reduce_bn
id: inception_4c_double_3x3_1
- attrs: {frozen: true}
expr: inception_4c_double_3x3_1_bn<=BN<=inception_4c_double_3x3_1
id: inception_4c_double_3x3_1_bn
- {expr: inception_4c_double_3x3_1_bn<=ReLU<=inception_4c_double_3x3_1_bn, id: inception_4c_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 160, pad: 1}
expr: inception_4c_double_3x3_2<=Convolution<=inception_4c_double_3x3_1_bn
id: inception_4c_double_3x3_2
- attrs: {frozen: true}
expr: inception_4c_double_3x3_2_bn<=BN<=inception_4c_double_3x3_2
id: inception_4c_double_3x3_2_bn
- {expr: inception_4c_double_3x3_2_bn<=ReLU<=inception_4c_double_3x3_2_bn, id: inception_4c_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: ave, pad: 1, stride: 1}
expr: inception_4c_pool<=Pooling<=inception_4b_output
id: inception_4c_pool
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_4c_pool_proj<=Convolution<=inception_4c_pool
id: inception_4c_pool_proj
- attrs: {frozen: true}
expr: inception_4c_pool_proj_bn<=BN<=inception_4c_pool_proj
id: inception_4c_pool_proj_bn
- {expr: inception_4c_pool_proj_bn<=ReLU<=inception_4c_pool_proj_bn, id: inception_4c_relu_pool_proj}
- {expr: 'inception_4c_output<=Concat<=inception_4c_1x1_bn,inception_4c_3x3_bn,inception_4c_double_3x3_2_bn,inception_4c_pool_proj_bn',
id: inception_4c_output}
- attrs: {kernel_size: 1, num_output: 96}
expr: inception_4d_1x1<=Convolution<=inception_4c_output
id: inception_4d_1x1
- attrs: {frozen: true}
expr: inception_4d_1x1_bn<=BN<=inception_4d_1x1
id: inception_4d_1x1_bn
- {expr: inception_4d_1x1_bn<=ReLU<=inception_4d_1x1_bn, id: inception_4d_relu_1x1}
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_4d_3x3_reduce<=Convolution<=inception_4c_output
id: inception_4d_3x3_reduce
- attrs: {frozen: true}
expr: inception_4d_3x3_reduce_bn<=BN<=inception_4d_3x3_reduce
id: inception_4d_3x3_reduce_bn
- {expr: inception_4d_3x3_reduce_bn<=ReLU<=inception_4d_3x3_reduce_bn, id: inception_4d_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 192, pad: 1}
expr: inception_4d_3x3<=Convolution<=inception_4d_3x3_reduce_bn
id: inception_4d_3x3
- attrs: {frozen: true}
expr: inception_4d_3x3_bn<=BN<=inception_4d_3x3
id: inception_4d_3x3_bn
- {expr: inception_4d_3x3_bn<=ReLU<=inception_4d_3x3_bn, id: inception_4d_relu_3x3}
- attrs: {kernel_size: 1, num_output: 160}
expr: inception_4d_double_3x3_reduce<=Convolution<=inception_4c_output
id: inception_4d_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_4d_double_3x3_reduce_bn<=BN<=inception_4d_double_3x3_reduce
id: inception_4d_double_3x3_reduce_bn
- {expr: inception_4d_double_3x3_reduce_bn<=ReLU<=inception_4d_double_3x3_reduce_bn,
id: inception_4d_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 192, pad: 1}
expr: inception_4d_double_3x3_1<=Convolution<=inception_4d_double_3x3_reduce_bn
id: inception_4d_double_3x3_1
- attrs: {frozen: true}
expr: inception_4d_double_3x3_1_bn<=BN<=inception_4d_double_3x3_1
id: inception_4d_double_3x3_1_bn
- {expr: inception_4d_double_3x3_1_bn<=ReLU<=inception_4d_double_3x3_1_bn, id: inception_4d_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 192, pad: 1}
expr: inception_4d_double_3x3_2<=Convolution<=inception_4d_double_3x3_1_bn
id: inception_4d_double_3x3_2
- attrs: {frozen: true}
expr: inception_4d_double_3x3_2_bn<=BN<=inception_4d_double_3x3_2
id: inception_4d_double_3x3_2_bn
- {expr: inception_4d_double_3x3_2_bn<=ReLU<=inception_4d_double_3x3_2_bn, id: inception_4d_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: ave, pad: 1, stride: 1}
expr: inception_4d_pool<=Pooling<=inception_4c_output
id: inception_4d_pool
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_4d_pool_proj<=Convolution<=inception_4d_pool
id: inception_4d_pool_proj
- attrs: {frozen: true}
expr: inception_4d_pool_proj_bn<=BN<=inception_4d_pool_proj
id: inception_4d_pool_proj_bn
- {expr: inception_4d_pool_proj_bn<=ReLU<=inception_4d_pool_proj_bn, id: inception_4d_relu_pool_proj}
- {expr: 'inception_4d_output<=Concat<=inception_4d_1x1_bn,inception_4d_3x3_bn,inception_4d_double_3x3_2_bn,inception_4d_pool_proj_bn',
id: inception_4d_output}
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_4e_3x3_reduce<=Convolution<=inception_4d_output
id: inception_4e_3x3_reduce
- attrs: {frozen: true}
expr: inception_4e_3x3_reduce_bn<=BN<=inception_4e_3x3_reduce
id: inception_4e_3x3_reduce_bn
- {expr: inception_4e_3x3_reduce_bn<=ReLU<=inception_4e_3x3_reduce_bn, id: inception_4e_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 192, pad: 1, stride: 2}
expr: inception_4e_3x3<=Convolution<=inception_4e_3x3_reduce_bn
id: inception_4e_3x3
- attrs: {frozen: true}
expr: inception_4e_3x3_bn<=BN<=inception_4e_3x3
id: inception_4e_3x3_bn
- {expr: inception_4e_3x3_bn<=ReLU<=inception_4e_3x3_bn, id: inception_4e_relu_3x3}
- attrs: {kernel_size: 1, num_output: 192}
expr: inception_4e_double_3x3_reduce<=Convolution<=inception_4d_output
id: inception_4e_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_4e_double_3x3_reduce_bn<=BN<=inception_4e_double_3x3_reduce
id: inception_4e_double_3x3_reduce_bn
- {expr: inception_4e_double_3x3_reduce_bn<=ReLU<=inception_4e_double_3x3_reduce_bn,
id: inception_4e_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 256, pad: 1}
expr: inception_4e_double_3x3_1<=Convolution<=inception_4e_double_3x3_reduce_bn
id: inception_4e_double_3x3_1
- attrs: {frozen: true}
expr: inception_4e_double_3x3_1_bn<=BN<=inception_4e_double_3x3_1
id: inception_4e_double_3x3_1_bn
- {expr: inception_4e_double_3x3_1_bn<=ReLU<=inception_4e_double_3x3_1_bn, id: inception_4e_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 256, pad: 1, stride: 2}
expr: inception_4e_double_3x3_2<=Convolution<=inception_4e_double_3x3_1_bn
id: inception_4e_double_3x3_2
- attrs: {frozen: true}
expr: inception_4e_double_3x3_2_bn<=BN<=inception_4e_double_3x3_2
id: inception_4e_double_3x3_2_bn
- {expr: inception_4e_double_3x3_2_bn<=ReLU<=inception_4e_double_3x3_2_bn, id: inception_4e_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: max, stride: 2}
expr: inception_4e_pool<=Pooling<=inception_4d_output
id: inception_4e_pool
- {expr: 'inception_4e_output<=Concat<=inception_4e_3x3_bn,inception_4e_double_3x3_2_bn,inception_4e_pool',
id: inception_4e_output}
- attrs: {kernel_size: 1, num_output: 352}
expr: inception_5a_1x1<=Convolution<=inception_4e_output
id: inception_5a_1x1
- attrs: {frozen: true}
expr: inception_5a_1x1_bn<=BN<=inception_5a_1x1
id: inception_5a_1x1_bn
- {expr: inception_5a_1x1_bn<=ReLU<=inception_5a_1x1_bn, id: inception_5a_relu_1x1}
- attrs: {kernel_size: 1, num_output: 192}
expr: inception_5a_3x3_reduce<=Convolution<=inception_4e_output
id: inception_5a_3x3_reduce
- attrs: {frozen: true}
expr: inception_5a_3x3_reduce_bn<=BN<=inception_5a_3x3_reduce
id: inception_5a_3x3_reduce_bn
- {expr: inception_5a_3x3_reduce_bn<=ReLU<=inception_5a_3x3_reduce_bn, id: inception_5a_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 320, pad: 1}
expr: inception_5a_3x3<=Convolution<=inception_5a_3x3_reduce_bn
id: inception_5a_3x3
- attrs: {frozen: true}
expr: inception_5a_3x3_bn<=BN<=inception_5a_3x3
id: inception_5a_3x3_bn
- {expr: inception_5a_3x3_bn<=ReLU<=inception_5a_3x3_bn, id: inception_5a_relu_3x3}
- attrs: {kernel_size: 1, num_output: 160}
expr: inception_5a_double_3x3_reduce<=Convolution<=inception_4e_output
id: inception_5a_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_5a_double_3x3_reduce_bn<=BN<=inception_5a_double_3x3_reduce
id: inception_5a_double_3x3_reduce_bn
- {expr: inception_5a_double_3x3_reduce_bn<=ReLU<=inception_5a_double_3x3_reduce_bn,
id: inception_5a_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 224, pad: 1}
expr: inception_5a_double_3x3_1<=Convolution<=inception_5a_double_3x3_reduce_bn
id: inception_5a_double_3x3_1
- attrs: {frozen: true}
expr: inception_5a_double_3x3_1_bn<=BN<=inception_5a_double_3x3_1
id: inception_5a_double_3x3_1_bn
- {expr: inception_5a_double_3x3_1_bn<=ReLU<=inception_5a_double_3x3_1_bn, id: inception_5a_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 224, pad: 1}
expr: inception_5a_double_3x3_2<=Convolution<=inception_5a_double_3x3_1_bn
id: inception_5a_double_3x3_2
- attrs: {frozen: true}
expr: inception_5a_double_3x3_2_bn<=BN<=inception_5a_double_3x3_2
id: inception_5a_double_3x3_2_bn
- {expr: inception_5a_double_3x3_2_bn<=ReLU<=inception_5a_double_3x3_2_bn, id: inception_5a_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: ave, pad: 1, stride: 1}
expr: inception_5a_pool<=Pooling<=inception_4e_output
id: inception_5a_pool
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_5a_pool_proj<=Convolution<=inception_5a_pool
id: inception_5a_pool_proj
- attrs: {frozen: true}
expr: inception_5a_pool_proj_bn<=BN<=inception_5a_pool_proj
id: inception_5a_pool_proj_bn
- {expr: inception_5a_pool_proj_bn<=ReLU<=inception_5a_pool_proj_bn, id: inception_5a_relu_pool_proj}
- {expr: 'inception_5a_output<=Concat<=inception_5a_1x1_bn,inception_5a_3x3_bn,inception_5a_double_3x3_2_bn,inception_5a_pool_proj_bn',
id: inception_5a_output}
- attrs: {kernel_size: 1, num_output: 352}
expr: inception_5b_1x1<=Convolution<=inception_5a_output
id: inception_5b_1x1
- attrs: {frozen: true}
expr: inception_5b_1x1_bn<=BN<=inception_5b_1x1
id: inception_5b_1x1_bn
- {expr: inception_5b_1x1_bn<=ReLU<=inception_5b_1x1_bn, id: inception_5b_relu_1x1}
- attrs: {kernel_size: 1, num_output: 192}
expr: inception_5b_3x3_reduce<=Convolution<=inception_5a_output
id: inception_5b_3x3_reduce
- attrs: {frozen: true}
expr: inception_5b_3x3_reduce_bn<=BN<=inception_5b_3x3_reduce
id: inception_5b_3x3_reduce_bn
- {expr: inception_5b_3x3_reduce_bn<=ReLU<=inception_5b_3x3_reduce_bn, id: inception_5b_relu_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 320, pad: 1}
expr: inception_5b_3x3<=Convolution<=inception_5b_3x3_reduce_bn
id: inception_5b_3x3
- attrs: {frozen: true}
expr: inception_5b_3x3_bn<=BN<=inception_5b_3x3
id: inception_5b_3x3_bn
- {expr: inception_5b_3x3_bn<=ReLU<=inception_5b_3x3_bn, id: inception_5b_relu_3x3}
- attrs: {kernel_size: 1, num_output: 192}
expr: inception_5b_double_3x3_reduce<=Convolution<=inception_5a_output
id: inception_5b_double_3x3_reduce
- attrs: {frozen: true}
expr: inception_5b_double_3x3_reduce_bn<=BN<=inception_5b_double_3x3_reduce
id: inception_5b_double_3x3_reduce_bn
- {expr: inception_5b_double_3x3_reduce_bn<=ReLU<=inception_5b_double_3x3_reduce_bn,
id: inception_5b_relu_double_3x3_reduce}
- attrs: {kernel_size: 3, num_output: 224, pad: 1}
expr: inception_5b_double_3x3_1<=Convolution<=inception_5b_double_3x3_reduce_bn
id: inception_5b_double_3x3_1
- attrs: {frozen: true}
expr: inception_5b_double_3x3_1_bn<=BN<=inception_5b_double_3x3_1
id: inception_5b_double_3x3_1_bn
- {expr: inception_5b_double_3x3_1_bn<=ReLU<=inception_5b_double_3x3_1_bn, id: inception_5b_relu_double_3x3_1}
- attrs: {kernel_size: 3, num_output: 224, pad: 1}
expr: inception_5b_double_3x3_2<=Convolution<=inception_5b_double_3x3_1_bn
id: inception_5b_double_3x3_2
- attrs: {frozen: true}
expr: inception_5b_double_3x3_2_bn<=BN<=inception_5b_double_3x3_2
id: inception_5b_double_3x3_2_bn
- {expr: inception_5b_double_3x3_2_bn<=ReLU<=inception_5b_double_3x3_2_bn, id: inception_5b_relu_double_3x3_2}
- attrs: {kernel_size: 3, mode: max, pad: 1, stride: 1}
expr: inception_5b_pool<=Pooling<=inception_5a_output
id: inception_5b_pool
- attrs: {kernel_size: 1, num_output: 128}
expr: inception_5b_pool_proj<=Convolution<=inception_5b_pool
id: inception_5b_pool_proj
- attrs: {frozen: true}
expr: inception_5b_pool_proj_bn<=BN<=inception_5b_pool_proj
id: inception_5b_pool_proj_bn
- {expr: inception_5b_pool_proj_bn<=ReLU<=inception_5b_pool_proj_bn, id: inception_5b_relu_pool_proj}
- {expr: 'inception_5b_output<=Concat<=inception_5b_1x1_bn,inception_5b_3x3_bn,inception_5b_double_3x3_2_bn,inception_5b_pool_proj_bn',
id: inception_5b_output}
- attrs: {kernel_size: 7, mode: ave, stride: 1}
expr: global_pool<=Pooling<=inception_5b_output
id: global_pool
- attrs: {num_output: 1000}
expr: fc_action<=InnerProduct<=global_pool
id: fc
name: BN-Inception
================================================
FILE: model_zoo/bninception/caffe_pb2.py
================================================
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: caffe.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='caffe.proto',
package='caffe',
serialized_pb='\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\x9a\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\xc6\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\x36\n\tmem_param\x18\xc8\x01 \x01(\x0b\x32\".caffe.MemoryOptimizationParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\xe3\x08\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x11\n\tdevice_id\x18\x12 \x03(\x05\x12\x10\n\x08group_id\x18& \x03(\x05\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12\x15\n\x08richness\x18% \x01(\x05:\x03\x33\x30\x30\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"0\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"s\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\x90\x13\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12%\n\x08\x62n_param\x18\x89\x01 \x01(\x0b\x32\x12.caffe.BNParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12\x30\n\x0eseg_data_param\x18\x8d\x01 \x01(\x0b\x32\x17.caffe.SegDataParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x34\n\x10video_data_param\x18\x8c\x01 \x01(\x0b\x32\x19.caffe.VideoDataParameter\x12\x36\n\x11roi_pooling_param\x18\x96\x01 \x01(\x0b\x32\x1a.caffe.ROIPoolingParameter\x12+\n\x0bscale_param\x18\xa0\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12)\n\nbias_param\x18\xa1\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12>\n\x15\x62\x61tch_reduction_param\x18\xa2\x01 \x01(\x0b\x32\x1e.caffe.BatchReductionParameter\"\xc0\x03\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x08\x66ix_crop\x18\n \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rmore_fix_crop\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0bmulti_scale\x18\x0b \x01(\x08:\x05\x66\x61lse\x12\x14\n\x0cscale_ratios\x18\x0c \x03(\x02\x12\x16\n\x0bmax_distort\x18\r \x01(\x05:\x01\x31\x12\x16\n\x07is_flow\x18\x0e \x01(\x08:\x05\x66\x61lse\x12\x1d\n\x0eoriginal_image\x18\x14 \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06stride\x18\x10 \x01(\x05:\x01\x31\x12\x12\n\nupper_size\x18\x11 \x01(\x05\x12\x14\n\x0cupper_height\x18\x12 \x01(\x05\x12\x13\n\x0bupper_width\x18\x13 \x01(\x05\">\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x17\n\tnormalize\x18\x02 \x01(\x08:\x04true\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"?\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\"\x8b\x02\n\x0b\x42NParameter\x12,\n\x0cslope_filler\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x15\n\x08momentum\x18\x03 \x01(\x02:\x03\x30.9\x12\x12\n\x03\x65ps\x18\x04 \x01(\x02:\x05\x31\x65-05\x12\x15\n\x06\x66rozen\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x32\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x19.caffe.BNParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0e\n\x03pad\x18\x03 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x04 \x01(\r\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\x06 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x13\n\x08\x64ilation\x18\x10 \x01(\r:\x01\x31\x12\x12\n\ndilation_h\x18\x11 \x01(\r\x12\x12\n\ndilation_w\x18\x12 \x01(\r\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xa7\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x16\n\x07shuffle\x18\n \x01(\x08:\x05\x66\x61lse\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xb9\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\";\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\x12\x12\n\x0eSTOCHASTIC_SUM\x10\x03\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x94\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\xb8\x03\n\x12VideoDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x15\n\nnew_length\x18\x0b \x01(\r:\x01\x31\x12\x17\n\x0cnum_segments\x18\x0c \x01(\r:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12:\n\x08modality\x18\r \x01(\x0e\x32\".caffe.VideoDataParameter.Modality:\x04\x46LOW\x12\x14\n\x0cname_pattern\x18\x0e \x01(\t\x12\x16\n\x07\x65ncoded\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x18\n\tgrayscale\x18\x10 \x01(\x08:\x05\x66\x61lse\"\x1d\n\x08Modality\x12\x07\n\x03RGB\x10\x00\x12\x08\n\x04\x46LOW\x10\x01\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xb1\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xd6\x01\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"C\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x11\n\tparam_str\x18\x03 \x01(\t\"\xc5\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\x12\x0c\n\x01k\x18\x04 \x01(\x05:\x01\x31\"?\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\x12\x08\n\x04TOPK\x10\x05\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"d\n\x10SegDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x08root_dir\x18\x02 \x01(\t\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x16\n\x07\x62\x61lance\x18\x04 \x01(\x08:\x05\x66\x61lse\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Y\n\x13ROIPoolingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"p\n\x17\x42\x61tchReductionParameter\x12\r\n\x05level\x18\x01 \x03(\x05\x12\x32\n\x0freduction_param\x18\x02 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12\x12\n\x03pos\x18\x03 \x01(\x08:\x05\x66\x61lse\"o\n\x1bMemoryOptimizationParameter\x12\x1c\n\x0eoptimize_train\x18\x01 \x01(\x08:\x04true\x12\x1c\n\roptimize_test\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\x0c\x65xclude_blob\x18\x03 \x03(\t*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01')
_PHASE = _descriptor.EnumDescriptor(
name='Phase',
full_name='caffe.Phase',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TRAIN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEST', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=15473,
serialized_end=15501,
)
Phase = enum_type_wrapper.EnumTypeWrapper(_PHASE)
TRAIN = 0
TEST = 1
_FILLERPARAMETER_VARIANCENORM = _descriptor.EnumDescriptor(
name='VarianceNorm',
full_name='caffe.FillerParameter.VarianceNorm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FAN_IN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAN_OUT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVERAGE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=608,
serialized_end=660,
)
_SOLVERPARAMETER_SOLVERMODE = _descriptor.EnumDescriptor(
name='SolverMode',
full_name='caffe.SolverParameter.SolverMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='CPU', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GPU', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2035,
serialized_end=2065,
)
_SOLVERPARAMETER_SOLVERTYPE = _descriptor.EnumDescriptor(
name='SolverType',
full_name='caffe.SolverParameter.SolverType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SGD', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NESTEROV', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADAGRAD', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2067,
serialized_end=2115,
)
_PARAMSPEC_DIMCHECKMODE = _descriptor.EnumDescriptor(
name='DimCheckMode',
full_name='caffe.ParamSpec.DimCheckMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRICT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSIVE', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2546,
serialized_end=2588,
)
_BNPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.BNParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5924,
serialized_end=5967,
)
_CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.ConvolutionParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5924,
serialized_end=5967,
)
_DATAPARAMETER_DB = _descriptor.EnumDescriptor(
name='DB',
full_name='caffe.DataParameter.DB',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='LEVELDB', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LMDB', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6886,
serialized_end=6913,
)
_ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor(
name='EltwiseOp',
full_name='caffe.EltwiseParameter.EltwiseOp',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PROD', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUM', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAX', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC_SUM', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7253,
serialized_end=7312,
)
_HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor(
name='Norm',
full_name='caffe.HingeLossParameter.Norm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='L1', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L2', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7638,
serialized_end=7660,
)
_VIDEODATAPARAMETER_MODALITY = _descriptor.EnumDescriptor(
name='Modality',
full_name='caffe.VideoDataParameter.Modality',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RGB', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOW', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8353,
serialized_end=8382,
)
_LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor(
name='NormRegion',
full_name='caffe.LRNParameter.NormRegion',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACROSS_CHANNELS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WITHIN_CHANNEL', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8837,
serialized_end=8890,
)
_POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name='PoolMethod',
full_name='caffe.PoolingParameter.PoolMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MAX', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9414,
serialized_end=9460,
)
_POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.PoolingParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5924,
serialized_end=5967,
)
_REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor(
name='ReductionOp',
full_name='caffe.ReductionParameter.ReductionOp',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SUM', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ASUM', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUMSQ', index=2, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEAN', index=3, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOPK', index=4, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9783,
serialized_end=9846,
)
_RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.ReLUParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5924,
serialized_end=5967,
)
_SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.SigmoidParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5924,
serialized_end=5967,
)
_SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.SoftmaxParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5924,
serialized_end=5967,
)
_TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.TanHParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5924,
serialized_end=5967,
)
_SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name='PoolMethod',
full_name='caffe.SPPParameter.PoolMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MAX', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9414,
serialized_end=9460,
)
_SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.SPPParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5924,
serialized_end=5967,
)
_V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor(
name='LayerType',
full_name='caffe.V1LayerParameter.LayerType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ABSVAL', index=1, number=35,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCURACY', index=2, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ARGMAX', index=3, number=30,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BNLL', index=4, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONCAT', index=5, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONTRASTIVE_LOSS', index=6, number=37,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONVOLUTION', index=7, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA', index=8, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DECONVOLUTION', index=9, number=39,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DROPOUT', index=10, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DUMMY_DATA', index=11, number=32,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EUCLIDEAN_LOSS', index=12, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ELTWISE', index=13, number=25,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXP', index=14, number=38,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLATTEN', index=15, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HDF5_DATA', index=16, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HDF5_OUTPUT', index=17, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HINGE_LOSS', index=18, number=28,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IM2COL', index=19, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_DATA', index=20, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INFOGAIN_LOSS', index=21, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INNER_PRODUCT', index=22, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LRN', index=23, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEMORY_DATA', index=24, number=29,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MULTINOMIAL_LOGISTIC_LOSS', index=25, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MVN', index=26, number=34,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POOLING', index=27, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POWER', index=28, number=26,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RELU', index=29, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID', index=30, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID_CROSS_ENTROPY_LOSS', index=31, number=27,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SILENCE', index=32, number=36,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTMAX', index=33, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTMAX_LOSS', index=34, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SPLIT', index=35, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SLICE', index=36, number=33,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TANH', index=37, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WINDOW_DATA', index=38, number=24,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='THRESHOLD', index=39, number=31,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=13224,
serialized_end=13824,
)
_V1LAYERPARAMETER_DIMCHECKMODE = _descriptor.EnumDescriptor(
name='DimCheckMode',
full_name='caffe.V1LayerParameter.DimCheckMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRICT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSIVE', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2546,
serialized_end=2588,
)
_V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name='PoolMethod',
full_name='caffe.V0LayerParameter.PoolMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MAX', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9414,
serialized_end=9460,
)
_BLOBSHAPE = _descriptor.Descriptor(
name='BlobShape',
full_name='caffe.BlobShape',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dim', full_name='caffe.BlobShape.dim', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001')),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=22,
serialized_end=50,
)
_BLOBPROTO = _descriptor.Descriptor(
name='BlobProto',
full_name='caffe.BlobProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='caffe.BlobProto.shape', index=0,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='caffe.BlobProto.data', index=1,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001')),
_descriptor.FieldDescriptor(
name='diff', full_name='caffe.BlobProto.diff', index=2,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001')),
_descriptor.FieldDescriptor(
name='num', full_name='caffe.BlobProto.num', index=3,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.BlobProto.channels', index=4,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.BlobProto.height', index=5,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.BlobProto.width', index=6,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=53,
serialized_end=207,
)
_BLOBPROTOVECTOR = _descriptor.Descriptor(
name='BlobProtoVector',
full_name='caffe.BlobProtoVector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.BlobProtoVector.blobs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=209,
serialized_end=259,
)
_DATUM = _descriptor.Descriptor(
name='Datum',
full_name='caffe.Datum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.Datum.channels', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.Datum.height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.Datum.width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='caffe.Datum.data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='caffe.Datum.label', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='float_data', full_name='caffe.Datum.float_data', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoded', full_name='caffe.Datum.encoded', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=262,
serialized_end=391,
)
_FILLERPARAMETER = _descriptor.Descriptor(
name='FillerParameter',
full_name='caffe.FillerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='caffe.FillerParameter.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("constant", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='caffe.FillerParameter.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min', full_name='caffe.FillerParameter.min', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max', full_name='caffe.FillerParameter.max', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean', full_name='caffe.FillerParameter.mean', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='std', full_name='caffe.FillerParameter.std', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sparse', full_name='caffe.FillerParameter.sparse', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='variance_norm', full_name='caffe.FillerParameter.variance_norm', index=7,
number=8, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FILLERPARAMETER_VARIANCENORM,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=394,
serialized_end=660,
)
_NETPARAMETER = _descriptor.Descriptor(
name='NetParameter',
full_name='caffe.NetParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.NetParameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input', full_name='caffe.NetParameter.input', index=1,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_shape', full_name='caffe.NetParameter.input_shape', index=2,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_dim', full_name='caffe.NetParameter.input_dim', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_backward', full_name='caffe.NetParameter.force_backward', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='caffe.NetParameter.state', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_info', full_name='caffe.NetParameter.debug_info', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='layer', full_name='caffe.NetParameter.layer', index=7,
number=100, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mem_param', full_name='caffe.NetParameter.mem_param', index=8,
number=200, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='layers', full_name='caffe.NetParameter.layers', index=9,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=663,
serialized_end=989,
)
_SOLVERPARAMETER = _descriptor.Descriptor(
name='SolverParameter',
full_name='caffe.SolverParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='net', full_name='caffe.SolverParameter.net', index=0,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='net_param', full_name='caffe.SolverParameter.net_param', index=1,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_net', full_name='caffe.SolverParameter.train_net', index=2,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_net', full_name='caffe.SolverParameter.test_net', index=3,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_net_param', full_name='caffe.SolverParameter.train_net_param', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_net_param', full_name='caffe.SolverParameter.test_net_param', index=5,
number=22, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_state', full_name='caffe.SolverParameter.train_state', index=6,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_state', full_name='caffe.SolverParameter.test_state', index=7,
number=27, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_iter', full_name='caffe.SolverParameter.test_iter', index=8,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_interval', full_name='caffe.SolverParameter.test_interval', index=9,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_compute_loss', full_name='caffe.SolverParameter.test_compute_loss', index=10,
number=19, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_initialization', full_name='caffe.SolverParameter.test_initialization', index=11,
number=32, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='base_lr', full_name='caffe.SolverParameter.base_lr', index=12,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display', full_name='caffe.SolverParameter.display', index=13,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='average_loss', full_name='caffe.SolverParameter.average_loss', index=14,
number=33, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_iter', full_name='caffe.SolverParameter.max_iter', index=15,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='iter_size', full_name='caffe.SolverParameter.iter_size', index=16,
number=36, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lr_policy', full_name='caffe.SolverParameter.lr_policy', index=17,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gamma', full_name='caffe.SolverParameter.gamma', index=18,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='power', full_name='caffe.SolverParameter.power', index=19,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='momentum', full_name='caffe.SolverParameter.momentum', index=20,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='caffe.SolverParameter.weight_decay', index=21,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='regularization_type', full_name='caffe.SolverParameter.regularization_type', index=22,
number=29, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("L2", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stepsize', full_name='caffe.SolverParameter.stepsize', index=23,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stepvalue', full_name='caffe.SolverParameter.stepvalue', index=24,
number=34, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='clip_gradients', full_name='caffe.SolverParameter.clip_gradients', index=25,
number=35, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snapshot', full_name='caffe.SolverParameter.snapshot', index=26,
number=14, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snapshot_prefix', full_name='caffe.SolverParameter.snapshot_prefix', index=27,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snapshot_diff', full_name='caffe.SolverParameter.snapshot_diff', index=28,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='solver_mode', full_name='caffe.SolverParameter.solver_mode', index=29,
number=17, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_id', full_name='caffe.SolverParameter.device_id', index=30,
number=18, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group_id', full_name='caffe.SolverParameter.group_id', index=31,
number=38, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='random_seed', full_name='caffe.SolverParameter.random_seed', index=32,
number=20, type=3, cpp_type=2, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='solver_type', full_name='caffe.SolverParameter.solver_type', index=33,
number=30, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='delta', full_name='caffe.SolverParameter.delta', index=34,
number=31, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1e-08,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_info', full_name='caffe.SolverParameter.debug_info', index=35,
number=23, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snapshot_after_train', full_name='caffe.SolverParameter.snapshot_after_train', index=36,
number=28, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='richness', full_name='caffe.SolverParameter.richness', index=37,
number=37, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=300,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOLVERPARAMETER_SOLVERMODE,
_SOLVERPARAMETER_SOLVERTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=992,
serialized_end=2115,
)
_SOLVERSTATE = _descriptor.Descriptor(
name='SolverState',
full_name='caffe.SolverState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='iter', full_name='caffe.SolverState.iter', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='learned_net', full_name='caffe.SolverState.learned_net', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='history', full_name='caffe.SolverState.history', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_step', full_name='caffe.SolverState.current_step', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2117,
serialized_end=2225,
)
_NETSTATE = _descriptor.Descriptor(
name='NetState',
full_name='caffe.NetState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phase', full_name='caffe.NetState.phase', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='level', full_name='caffe.NetState.level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stage', full_name='caffe.NetState.stage', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2227,
serialized_end=2305,
)
_NETSTATERULE = _descriptor.Descriptor(
name='NetStateRule',
full_name='caffe.NetStateRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phase', full_name='caffe.NetStateRule.phase', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_level', full_name='caffe.NetStateRule.min_level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_level', full_name='caffe.NetStateRule.max_level', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stage', full_name='caffe.NetStateRule.stage', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='not_stage', full_name='caffe.NetStateRule.not_stage', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2307,
serialized_end=2422,
)
_PARAMSPEC = _descriptor.Descriptor(
name='ParamSpec',
full_name='caffe.ParamSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.ParamSpec.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_mode', full_name='caffe.ParamSpec.share_mode', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lr_mult', full_name='caffe.ParamSpec.lr_mult', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='decay_mult', full_name='caffe.ParamSpec.decay_mult', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_PARAMSPEC_DIMCHECKMODE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2425,
serialized_end=2588,
)
_LAYERPARAMETER = _descriptor.Descriptor(
name='LayerParameter',
full_name='caffe.LayerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.LayerParameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='caffe.LayerParameter.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bottom', full_name='caffe.LayerParameter.bottom', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='top', full_name='caffe.LayerParameter.top', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='phase', full_name='caffe.LayerParameter.phase', index=4,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='loss_weight', full_name='caffe.LayerParameter.loss_weight', index=5,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='param', full_name='caffe.LayerParameter.param', index=6,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.LayerParameter.blobs', index=7,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='propagate_down', full_name='caffe.LayerParameter.propagate_down', index=8,
number=11, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='include', full_name='caffe.LayerParameter.include', index=9,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exclude', full_name='caffe.LayerParameter.exclude', index=10,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transform_param', full_name='caffe.LayerParameter.transform_param', index=11,
number=100, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='loss_param', full_name='caffe.LayerParameter.loss_param', index=12,
number=101, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accuracy_param', full_name='caffe.LayerParameter.accuracy_param', index=13,
number=102, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='argmax_param', full_name='caffe.LayerParameter.argmax_param', index=14,
number=103, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bn_param', full_name='caffe.LayerParameter.bn_param', index=15,
number=137, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='concat_param', full_name='caffe.LayerParameter.concat_param', index=16,
number=104, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='contrastive_loss_param', full_name='caffe.LayerParameter.contrastive_loss_param', index=17,
number=105, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='convolution_param', full_name='caffe.LayerParameter.convolution_param', index=18,
number=106, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_param', full_name='caffe.LayerParameter.data_param', index=19,
number=107, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_param', full_name='caffe.LayerParameter.dropout_param', index=20,
number=108, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dummy_data_param', full_name='caffe.LayerParameter.dummy_data_param', index=21,
number=109, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='eltwise_param', full_name='caffe.LayerParameter.eltwise_param', index=22,
number=110, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exp_param', full_name='caffe.LayerParameter.exp_param', index=23,
number=111, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='flatten_param', full_name='caffe.LayerParameter.flatten_param', index=24,
number=135, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hdf5_data_param', full_name='caffe.LayerParameter.hdf5_data_param', index=25,
number=112, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hdf5_output_param', full_name='caffe.LayerParameter.hdf5_output_param', index=26,
number=113, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hinge_loss_param', full_name='caffe.LayerParameter.hinge_loss_param', index=27,
number=114, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_data_param', full_name='caffe.LayerParameter.image_data_param', index=28,
number=115, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='infogain_loss_param', full_name='caffe.LayerParameter.infogain_loss_param', index=29,
number=116, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inner_product_param', full_name='caffe.LayerParameter.inner_product_param', index=30,
number=117, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_param', full_name='caffe.LayerParameter.log_param', index=31,
number=134, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lrn_param', full_name='caffe.LayerParameter.lrn_param', index=32,
number=118, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='memory_data_param', full_name='caffe.LayerParameter.memory_data_param', index=33,
number=119, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvn_param', full_name='caffe.LayerParameter.mvn_param', index=34,
number=120, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pooling_param', full_name='caffe.LayerParameter.pooling_param', index=35,
number=121, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='power_param', full_name='caffe.LayerParameter.power_param', index=36,
number=122, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prelu_param', full_name='caffe.LayerParameter.prelu_param', index=37,
number=131, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='python_param', full_name='caffe.LayerParameter.python_param', index=38,
number=130, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reduction_param', full_name='caffe.LayerParameter.reduction_param', index=39,
number=136, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='relu_param', full_name='caffe.LayerParameter.relu_param', index=40,
number=123, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reshape_param', full_name='caffe.LayerParameter.reshape_param', index=41,
number=133, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seg_data_param', full_name='caffe.LayerParameter.seg_data_param', index=42,
number=141, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sigmoid_param', full_name='caffe.LayerParameter.sigmoid_param', index=43,
number=124, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='softmax_param', full_name='caffe.LayerParameter.softmax_param', index=44,
number=125, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='spp_param', full_name='caffe.LayerParameter.spp_param', index=45,
number=132, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='slice_param', full_name='caffe.LayerParameter.slice_param', index=46,
number=126, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tanh_param', full_name='caffe.LayerParameter.tanh_param', index=47,
number=127, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='threshold_param', full_name='caffe.LayerParameter.threshold_param', index=48,
number=128, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='window_data_param', full_name='caffe.LayerParameter.window_data_param', index=49,
number=129, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='video_data_param', full_name='caffe.LayerParameter.video_data_param', index=50,
number=140, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='roi_pooling_param', full_name='caffe.LayerParameter.roi_pooling_param', index=51,
number=150, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale_param', full_name='caffe.LayerParameter.scale_param', index=52,
number=160, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_param', full_name='caffe.LayerParameter.bias_param', index=53,
number=161, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='batch_reduction_param', full_name='caffe.LayerParameter.batch_reduction_param', index=54,
number=162, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2591,
serialized_end=5039,
)
_TRANSFORMATIONPARAMETER = _descriptor.Descriptor(
name='TransformationParameter',
full_name='caffe.TransformationParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.TransformationParameter.scale', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.TransformationParameter.mirror', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_size', full_name='caffe.TransformationParameter.crop_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean_file', full_name='caffe.TransformationParameter.mean_file', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean_value', full_name='caffe.TransformationParameter.mean_value', index=4,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_color', full_name='caffe.TransformationParameter.force_color', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_gray', full_name='caffe.TransformationParameter.force_gray', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fix_crop', full_name='caffe.TransformationParameter.fix_crop', index=7,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='more_fix_crop', full_name='caffe.TransformationParameter.more_fix_crop', index=8,
number=15, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='multi_scale', full_name='caffe.TransformationParameter.multi_scale', index=9,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale_ratios', full_name='caffe.TransformationParameter.scale_ratios', index=10,
number=12, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_distort', full_name='caffe.TransformationParameter.max_distort', index=11,
number=13, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_flow', full_name='caffe.TransformationParameter.is_flow', index=12,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='original_image', full_name='caffe.TransformationParameter.original_image', index=13,
number=20, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride', full_name='caffe.TransformationParameter.stride', index=14,
number=16, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='upper_size', full_name='caffe.TransformationParameter.upper_size', index=15,
number=17, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='upper_height', full_name='caffe.TransformationParameter.upper_height', index=16,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='upper_width', full_name='caffe.TransformationParameter.upper_width', index=17,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5042,
serialized_end=5490,
)
_LOSSPARAMETER = _descriptor.Descriptor(
name='LossParameter',
full_name='caffe.LossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ignore_label', full_name='caffe.LossParameter.ignore_label', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='normalize', full_name='caffe.LossParameter.normalize', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5492,
serialized_end=5554,
)
_ACCURACYPARAMETER = _descriptor.Descriptor(
name='AccuracyParameter',
full_name='caffe.AccuracyParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='top_k', full_name='caffe.AccuracyParameter.top_k', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.AccuracyParameter.axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ignore_label', full_name='caffe.AccuracyParameter.ignore_label', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5556,
serialized_end=5632,
)
_ARGMAXPARAMETER = _descriptor.Descriptor(
name='ArgMaxParameter',
full_name='caffe.ArgMaxParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='out_max_val', full_name='caffe.ArgMaxParameter.out_max_val', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='top_k', full_name='caffe.ArgMaxParameter.top_k', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5634,
serialized_end=5697,
)
_BNPARAMETER = _descriptor.Descriptor(
name='BNParameter',
full_name='caffe.BNParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='slope_filler', full_name='caffe.BNParameter.slope_filler', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.BNParameter.bias_filler', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='momentum', full_name='caffe.BNParameter.momentum', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0.9,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='eps', full_name='caffe.BNParameter.eps', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1e-05,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='frozen', full_name='caffe.BNParameter.frozen', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.BNParameter.engine', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BNPARAMETER_ENGINE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5700,
serialized_end=5967,
)
_CONCATPARAMETER = _descriptor.Descriptor(
name='ConcatParameter',
full_name='caffe.ConcatParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ConcatParameter.axis', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='concat_dim', full_name='caffe.ConcatParameter.concat_dim', index=1,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5969,
serialized_end=6026,
)
_CONTRASTIVELOSSPARAMETER = _descriptor.Descriptor(
name='ContrastiveLossParameter',
full_name='caffe.ContrastiveLossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='margin', full_name='caffe.ContrastiveLossParameter.margin', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='legacy_version', full_name='caffe.ContrastiveLossParameter.legacy_version', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6028,
serialized_end=6104,
)
_CONVOLUTIONPARAMETER = _descriptor.Descriptor(
name='ConvolutionParameter',
full_name='caffe.ConvolutionParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.ConvolutionParameter.num_output', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_term', full_name='caffe.ConvolutionParameter.bias_term', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad', full_name='caffe.ConvolutionParameter.pad', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad_h', full_name='caffe.ConvolutionParameter.pad_h', index=3,
number=9, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad_w', full_name='caffe.ConvolutionParameter.pad_w', index=4,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='caffe.ConvolutionParameter.kernel_size', index=5,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_h', full_name='caffe.ConvolutionParameter.kernel_h', index=6,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_w', full_name='caffe.ConvolutionParameter.kernel_w', index=7,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group', full_name='caffe.ConvolutionParameter.group', index=8,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride', full_name='caffe.ConvolutionParameter.stride', index=9,
number=6, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride_h', full_name='caffe.ConvolutionParameter.stride_h', index=10,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride_w', full_name='caffe.ConvolutionParameter.stride_w', index=11,
number=14, type=
gitextract_atvvsebl/ ├── Images_for_readme/ │ └── README.md ├── LICENSE ├── README.md ├── average_scores.py ├── dataset.py ├── main.py ├── model_zoo/ │ ├── LICENSE │ ├── README.md │ ├── __init__.py │ ├── bninception/ │ │ ├── __init__.py │ │ ├── bn_inception.yaml │ │ ├── caffe_pb2.py │ │ ├── inceptionv3.yaml │ │ ├── layer_factory.py │ │ ├── parse_caffe.py │ │ └── pytorch_load.py │ ├── inceptionresnetv2/ │ │ ├── __init__.py │ │ ├── pytorch_load.py │ │ ├── tensorflow_dump.py │ │ └── torch_load.lua │ ├── inceptionv4/ │ │ ├── __init__.py │ │ ├── pytorch_load.py │ │ ├── tensorflow_dump.py │ │ └── torch_load.lua │ └── models/ │ ├── ._.DS_Store │ ├── .github/ │ │ └── ISSUE_TEMPLATE.md │ ├── .gitignore │ ├── .gitmodules │ ├── AUTHORS │ ├── CONTRIBUTING.md │ ├── LICENSE │ ├── README.md │ ├── WORKSPACE │ ├── autoencoder/ │ │ ├── AdditiveGaussianNoiseAutoencoderRunner.py │ │ ├── AutoencoderRunner.py │ │ ├── MaskingNoiseAutoencoderRunner.py │ │ ├── Utils.py │ │ ├── VariationalAutoencoderRunner.py │ │ ├── __init__.py │ │ └── autoencoder_models/ │ │ ├── Autoencoder.py │ │ ├── DenoisingAutoencoder.py │ │ ├── VariationalAutoencoder.py │ │ └── __init__.py │ ├── compression/ │ │ ├── README.md │ │ ├── decoder.py │ │ ├── encoder.py │ │ └── msssim.py │ ├── differential_privacy/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── dp_sgd/ │ │ │ ├── README.md │ │ │ ├── dp_mnist/ │ │ │ │ ├── BUILD │ │ │ │ └── dp_mnist.py │ │ │ ├── dp_optimizer/ │ │ │ │ ├── BUILD │ │ │ │ ├── dp_optimizer.py │ │ │ │ ├── dp_pca.py │ │ │ │ ├── sanitizer.py │ │ │ │ └── utils.py │ │ │ └── per_example_gradients/ │ │ │ ├── BUILD │ │ │ └── per_example_gradients.py │ │ ├── multiple_teachers/ │ │ │ ├── BUILD │ │ │ ├── README.md │ │ │ ├── aggregation.py │ │ │ ├── analysis.py │ │ │ ├── deep_cnn.py │ │ │ ├── input.py │ │ │ ├── metrics.py │ │ │ ├── train_student.py │ │ │ ├── train_student_mnist_250_lap_20_count_50_epochs_600.sh │ │ │ ├── train_teachers.py │ │ │ └── utils.py │ │ └── privacy_accountant/ │ │ ├── python/ │ │ │ ├── BUILD │ │ │ └── gaussian_moments.py │ │ └── tf/ │ │ ├── BUILD │ │ └── accountant.py │ ├── im2txt/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── WORKSPACE │ │ └── im2txt/ │ │ ├── BUILD │ │ ├── configuration.py │ │ ├── data/ │ │ │ ├── build_mscoco_data.py │ │ │ └── download_and_preprocess_mscoco.sh │ │ ├── evaluate.py │ │ ├── inference_utils/ │ │ │ ├── BUILD │ │ │ ├── caption_generator.py │ │ │ ├── caption_generator_test.py │ │ │ ├── inference_wrapper_base.py │ │ │ └── vocabulary.py │ │ ├── inference_wrapper.py │ │ ├── ops/ │ │ │ ├── BUILD │ │ │ ├── image_embedding.py │ │ │ ├── image_embedding_test.py │ │ │ ├── image_processing.py │ │ │ └── inputs.py │ │ ├── run_inference.py │ │ ├── show_and_tell_model.py │ │ ├── show_and_tell_model_test.py │ │ └── train.py │ ├── inception/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── WORKSPACE │ │ └── inception/ │ │ ├── BUILD │ │ ├── data/ │ │ │ ├── build_image_data.py │ │ │ ├── build_imagenet_data.py │ │ │ ├── download_and_preprocess_flowers.sh │ │ │ ├── download_and_preprocess_flowers_mac.sh │ │ │ ├── download_and_preprocess_imagenet.sh │ │ │ ├── download_imagenet.sh │ │ │ ├── imagenet_2012_validation_synset_labels.txt │ │ │ ├── imagenet_lsvrc_2015_synsets.txt │ │ │ ├── imagenet_metadata.txt │ │ │ ├── preprocess_imagenet_validation_data.py │ │ │ └── process_bounding_boxes.py │ │ ├── dataset.py │ │ ├── flowers_data.py │ │ ├── flowers_eval.py │ │ ├── flowers_train.py │ │ ├── image_processing.py │ │ ├── imagenet_data.py │ │ ├── imagenet_distributed_train.py │ │ ├── imagenet_eval.py │ │ ├── imagenet_train.py │ │ ├── inception_distributed_train.py │ │ ├── inception_eval.py │ │ ├── inception_model.py │ │ ├── inception_train.py │ │ └── slim/ │ │ ├── BUILD │ │ ├── README.md │ │ ├── collections_test.py │ │ ├── inception_model.py │ │ ├── inception_test.py │ │ ├── losses.py │ │ ├── losses_test.py │ │ ├── ops.py │ │ ├── ops_test.py │ │ ├── scopes.py │ │ ├── scopes_test.py │ │ ├── slim.py │ │ ├── variables.py │ │ └── variables_test.py │ ├── lm_1b/ │ │ ├── BUILD │ │ ├── README.md │ │ ├── data_utils.py │ │ └── lm_1b_eval.py │ ├── namignizer/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── data_utils.py │ │ ├── model.py │ │ └── names.py │ ├── neural_gpu/ │ │ ├── README.md │ │ ├── data_utils.py │ │ ├── neural_gpu.py │ │ └── neural_gpu_trainer.py │ ├── neural_programmer/ │ │ ├── README.md │ │ ├── data_utils.py │ │ ├── model.py │ │ ├── neural_programmer.py │ │ ├── nn_utils.py │ │ ├── parameters.py │ │ └── wiki_data.py │ ├── resnet/ │ │ ├── BUILD │ │ ├── README.md │ │ ├── cifar_input.py │ │ ├── resnet_main.py │ │ └── resnet_model.py │ ├── slim/ │ │ ├── ._.DS_Store │ │ ├── BUILD │ │ ├── README.md │ │ ├── datasets/ │ │ │ ├── __init__.py │ │ │ ├── cifar10.py │ │ │ ├── dataset_factory.py │ │ │ ├── dataset_utils.py │ │ │ ├── download_and_convert_cifar10.py │ │ │ ├── download_and_convert_flowers.py │ │ │ ├── download_and_convert_mnist.py │ │ │ ├── flowers.py │ │ │ ├── imagenet.py │ │ │ └── mnist.py │ │ ├── deployment/ │ │ │ ├── __init__.py │ │ │ ├── model_deploy.py │ │ │ └── model_deploy_test.py │ │ ├── download_and_convert_data.py │ │ ├── eval_image_classifier.py │ │ ├── nets/ │ │ │ ├── __init__.py │ │ │ ├── alexnet.py │ │ │ ├── alexnet_test.py │ │ │ ├── cifarnet.py │ │ │ ├── inception.py │ │ │ ├── inception_resnet_v2.py │ │ │ ├── inception_resnet_v2_test.py │ │ │ ├── inception_utils.py │ │ │ ├── inception_v1.py │ │ │ ├── inception_v1_test.py │ │ │ ├── inception_v2.py │ │ │ ├── inception_v2_test.py │ │ │ ├── inception_v3.py │ │ │ ├── inception_v3_test.py │ │ │ ├── inception_v4.py │ │ │ ├── inception_v4_test.py │ │ │ ├── lenet.py │ │ │ ├── nets_factory.py │ │ │ ├── nets_factory_test.py │ │ │ ├── overfeat.py │ │ │ ├── overfeat_test.py │ │ │ ├── resnet_utils.py │ │ │ ├── resnet_v1.py │ │ │ ├── resnet_v1_test.py │ │ │ ├── resnet_v2.py │ │ │ ├── resnet_v2_test.py │ │ │ ├── vgg.py │ │ │ └── vgg_test.py │ │ ├── preprocessing/ │ │ │ ├── __init__.py │ │ │ ├── cifarnet_preprocessing.py │ │ │ ├── inception_preprocessing.py │ │ │ ├── lenet_preprocessing.py │ │ │ ├── preprocessing_factory.py │ │ │ └── vgg_preprocessing.py │ │ ├── scripts/ │ │ │ ├── finetune_inception_v1_on_flowers.sh │ │ │ ├── finetune_inception_v3_on_flowers.sh │ │ │ ├── train_cifarnet_on_cifar10.sh │ │ │ └── train_lenet_on_mnist.sh │ │ ├── slim_walkthough.ipynb │ │ └── train_image_classifier.py │ ├── street/ │ │ ├── README.md │ │ ├── cc/ │ │ │ └── rnn_ops.cc │ │ ├── g3doc/ │ │ │ └── vgslspecs.md │ │ ├── python/ │ │ │ ├── decoder.py │ │ │ ├── decoder_test.py │ │ │ ├── errorcounter.py │ │ │ ├── errorcounter_test.py │ │ │ ├── nn_ops.py │ │ │ ├── shapes.py │ │ │ ├── shapes_test.py │ │ │ ├── vgsl_eval.py │ │ │ ├── vgsl_input.py │ │ │ ├── vgsl_model.py │ │ │ ├── vgsl_model_test.py │ │ │ ├── vgsl_train.py │ │ │ ├── vgslspecs.py │ │ │ └── vgslspecs_test.py │ │ └── testdata/ │ │ ├── arial-32-tiny │ │ ├── arial.charset_size=105.txt │ │ ├── charset_size=134.txt │ │ ├── charset_size_10.txt │ │ ├── mnist-tiny │ │ ├── numbers-16-tiny │ │ └── numbers.charset_size=12.txt │ ├── swivel/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── analogy.cc │ │ ├── eval.mk │ │ ├── fastprep.cc │ │ ├── fastprep.mk │ │ ├── glove_to_shards.py │ │ ├── nearest.py │ │ ├── prep.py │ │ ├── swivel.py │ │ ├── text2bin.py │ │ ├── vecs.py │ │ └── wordsim.py │ ├── syntaxnet/ │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── WORKSPACE │ │ ├── syntaxnet/ │ │ │ ├── BUILD │ │ │ ├── affix.cc │ │ │ ├── affix.h │ │ │ ├── arc_standard_transitions.cc │ │ │ ├── arc_standard_transitions_test.cc │ │ │ ├── base.h │ │ │ ├── beam_reader_ops.cc │ │ │ ├── beam_reader_ops_test.py │ │ │ ├── binary_segment_state.cc │ │ │ ├── binary_segment_state.h │ │ │ ├── binary_segment_state_test.cc │ │ │ ├── binary_segment_transitions.cc │ │ │ ├── binary_segment_transitions_test.cc │ │ │ ├── char_properties.cc │ │ │ ├── char_properties.h │ │ │ ├── char_properties_test.cc │ │ │ ├── conll2tree.py │ │ │ ├── context.pbtxt │ │ │ ├── demo.sh │ │ │ ├── dictionary.proto │ │ │ ├── document_filters.cc │ │ │ ├── document_format.cc │ │ │ ├── document_format.h │ │ │ ├── embedding_feature_extractor.cc │ │ │ ├── embedding_feature_extractor.h │ │ │ ├── feature_extractor.cc │ │ │ ├── feature_extractor.h │ │ │ ├── feature_extractor.proto │ │ │ ├── feature_types.h │ │ │ ├── fml_parser.cc │ │ │ ├── fml_parser.h │ │ │ ├── graph_builder.py │ │ │ ├── graph_builder_test.py │ │ │ ├── kbest_syntax.proto │ │ │ ├── lexicon_builder.cc │ │ │ ├── lexicon_builder_test.py │ │ │ ├── load_parser_ops.py │ │ │ ├── models/ │ │ │ │ ├── parsey_mcparseface/ │ │ │ │ │ ├── context.pbtxt │ │ │ │ │ ├── label-map │ │ │ │ │ ├── parser-params │ │ │ │ │ ├── prefix-table │ │ │ │ │ ├── suffix-table │ │ │ │ │ ├── tag-map │ │ │ │ │ ├── tagger-params │ │ │ │ │ └── word-map │ │ │ │ └── parsey_universal/ │ │ │ │ ├── context-tokenize-zh.pbtxt │ │ │ │ ├── context.pbtxt │ │ │ │ ├── parse.sh │ │ │ │ ├── tokenize.sh │ │ │ │ └── tokenize_zh.sh │ │ │ ├── morpher_transitions.cc │ │ │ ├── morphology_label_set.cc │ │ │ ├── morphology_label_set.h │ │ │ ├── morphology_label_set_test.cc │ │ │ ├── ops/ │ │ │ │ └── parser_ops.cc │ │ │ ├── parser_eval.py │ │ │ ├── parser_features.cc │ │ │ ├── parser_features.h │ │ │ ├── parser_features_test.cc │ │ │ ├── parser_state.cc │ │ │ ├── parser_state.h │ │ │ ├── parser_trainer.py │ │ │ ├── parser_trainer_test.sh │ │ │ ├── parser_transitions.cc │ │ │ ├── parser_transitions.h │ │ │ ├── populate_test_inputs.cc │ │ │ ├── populate_test_inputs.h │ │ │ ├── proto_io.h │ │ │ ├── reader_ops.cc │ │ │ ├── reader_ops_test.py │ │ │ ├── registry.cc │ │ │ ├── registry.h │ │ │ ├── segmenter_utils.cc │ │ │ ├── segmenter_utils.h │ │ │ ├── segmenter_utils_test.cc │ │ │ ├── sentence.proto │ │ │ ├── sentence_batch.cc │ │ │ ├── sentence_batch.h │ │ │ ├── sentence_features.cc │ │ │ ├── sentence_features.h │ │ │ ├── sentence_features_test.cc │ │ │ ├── shared_store.cc │ │ │ ├── shared_store.h │ │ │ ├── shared_store_test.cc │ │ │ ├── sparse.proto │ │ │ ├── structured_graph_builder.py │ │ │ ├── syntaxnet.bzl │ │ │ ├── tagger_transitions.cc │ │ │ ├── tagger_transitions_test.cc │ │ │ ├── task_context.cc │ │ │ ├── task_context.h │ │ │ ├── task_spec.proto │ │ │ ├── term_frequency_map.cc │ │ │ ├── term_frequency_map.h │ │ │ ├── test_main.cc │ │ │ ├── testdata/ │ │ │ │ ├── context.pbtxt │ │ │ │ ├── document │ │ │ │ └── mini-training-set │ │ │ ├── text_formats.cc │ │ │ ├── text_formats_test.py │ │ │ ├── unpack_sparse_features.cc │ │ │ ├── utils.cc │ │ │ ├── utils.h │ │ │ ├── workspace.cc │ │ │ └── workspace.h │ │ ├── third_party/ │ │ │ └── utf/ │ │ │ ├── BUILD │ │ │ ├── README │ │ │ ├── rune.c │ │ │ ├── runestrcat.c │ │ │ ├── runestrchr.c │ │ │ ├── runestrcmp.c │ │ │ ├── runestrcpy.c │ │ │ ├── runestrdup.c │ │ │ ├── runestrecpy.c │ │ │ ├── runestrlen.c │ │ │ ├── runestrncat.c │ │ │ ├── runestrncmp.c │ │ │ ├── runestrncpy.c │ │ │ ├── runestrrchr.c │ │ │ ├── runestrstr.c │ │ │ ├── runetype.c │ │ │ ├── runetypebody.c │ │ │ ├── utf.h │ │ │ ├── utfdef.h │ │ │ ├── utfecpy.c │ │ │ ├── utflen.c │ │ │ ├── utfnlen.c │ │ │ ├── utfrrune.c │ │ │ ├── utfrune.c │ │ │ └── utfutf.c │ │ ├── tools/ │ │ │ └── bazel.rc │ │ ├── universal.md │ │ └── util/ │ │ └── utf8/ │ │ ├── BUILD │ │ ├── gtest_main.cc │ │ ├── unicodetext.cc │ │ ├── unicodetext.h │ │ ├── unicodetext_main.cc │ │ ├── unicodetext_unittest.cc │ │ ├── unilib.cc │ │ ├── unilib.h │ │ └── unilib_utf8_utils.h │ ├── textsum/ │ │ ├── BUILD │ │ ├── README.md │ │ ├── batch_reader.py │ │ ├── beam_search.py │ │ ├── data/ │ │ │ ├── data │ │ │ └── vocab │ │ ├── data.py │ │ ├── data_convert_example.py │ │ ├── seq2seq_attention.py │ │ ├── seq2seq_attention_decode.py │ │ ├── seq2seq_attention_model.py │ │ └── seq2seq_lib.py │ ├── transformer/ │ │ ├── README.md │ │ ├── cluttered_mnist.py │ │ ├── data/ │ │ │ └── README.md │ │ ├── example.py │ │ ├── spatial_transformer.py │ │ └── tf_utils.py │ └── video_prediction/ │ ├── README.md │ ├── download_data.sh │ ├── lstm_ops.py │ ├── prediction_input.py │ ├── prediction_model.py │ ├── prediction_train.py │ └── push_datafiles.txt ├── models.py ├── optical_flow/ │ ├── gpu_main.cpp │ ├── gpu_makefile │ ├── main.cpp │ └── makefile ├── opts.py ├── process_dataset.py ├── test_models.py └── transforms.py
Showing preview only (206K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (2407 symbols across 290 files)
FILE: average_scores.py
function valid (line 8) | def valid():
FILE: dataset.py
class VideoRecord (line 15) | class VideoRecord(object):
method __init__ (line 16) | def __init__(self, row):
method path (line 20) | def path(self):
method num_frames (line 24) | def num_frames(self):
method label (line 28) | def label(self):
class TwoStreamDataSet (line 35) | class TwoStreamDataSet(data.Dataset):
method __init__ (line 36) | def __init__(self, root_path, list_file, num_segments=3,
method _load_image (line 53) | def _load_image(self, directory, idx):
method _parse_list (line 65) | def _parse_list(self):
method _get_val_indices (line 85) | def _get_val_indices(self, record):
method __getitem__ (line 98) | def __getitem__(self, index):
method get (line 122) | def get(self, record, indice):
method __len__ (line 141) | def __len__(self):
class TSNDataSet (line 145) | class TSNDataSet(data.Dataset):
method __init__ (line 146) | def __init__(self, root_path, list_file,
method _load_image (line 163) | def _load_image(self, directory, idx):
method _parse_list (line 175) | def _parse_list(self):
method _sample_indices (line 195) | def _sample_indices(self, record):
method _get_val_indices (line 216) | def _get_val_indices(self, record):
method _get_test_indices (line 228) | def _get_test_indices(self, record):
method __getitem__ (line 240) | def __getitem__(self, index):
method get (line 262) | def get(self, record, indices):
method __len__ (line 281) | def __len__(self):
class C3DDataSet (line 284) | class C3DDataSet(data.Dataset):
method __init__ (line 285) | def __init__(self, root_path, list_file,
method _load_image (line 302) | def _load_image(self, directory, idx):
method _parse_list (line 314) | def _parse_list(self):
method _sample_indices (line 334) | def _sample_indices(self, record):
method _get_val_indices (line 355) | def _get_val_indices(self, record):
method _get_test_indices (line 367) | def _get_test_indices(self, record):
method __getitem__ (line 377) | def __getitem__(self, index):
method get (line 403) | def get(self, record, indices):
method __len__ (line 421) | def __len__(self):
FILE: main.py
function add_summary_value (line 28) | def add_summary_value(writer, key, value, iteration):
function return_something_path (line 33) | def return_something_path(modality):
function main (line 56) | def main():
function train (line 259) | def train(train_loader, model, criterion, optimizer, epoch, vidnums, sum...
function validate (line 341) | def validate(val_loader, model, criterion, iter, summary_w):
function save_checkpoint (line 401) | def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
class AverageMeter (line 406) | class AverageMeter(object):
method __init__ (line 408) | def __init__(self):
method reset (line 411) | def reset(self):
method update (line 417) | def update(self, val, n=1):
function adjust_learning_rate (line 424) | def adjust_learning_rate(optimizer, epoch, lr_steps, factor, with_police...
function accuracy (line 439) | def accuracy(output, target, topk=(1,)):
function check_rootfolders (line 454) | def check_rootfolders(trainid):
FILE: model_zoo/bninception/caffe_pb2.py
class BlobShape (line 5246) | class BlobShape(_message.Message):
class BlobProto (line 5252) | class BlobProto(_message.Message):
class BlobProtoVector (line 5258) | class BlobProtoVector(_message.Message):
class Datum (line 5264) | class Datum(_message.Message):
class FillerParameter (line 5270) | class FillerParameter(_message.Message):
class NetParameter (line 5276) | class NetParameter(_message.Message):
class SolverParameter (line 5282) | class SolverParameter(_message.Message):
class SolverState (line 5288) | class SolverState(_message.Message):
class NetState (line 5294) | class NetState(_message.Message):
class NetStateRule (line 5300) | class NetStateRule(_message.Message):
class ParamSpec (line 5306) | class ParamSpec(_message.Message):
class LayerParameter (line 5312) | class LayerParameter(_message.Message):
class TransformationParameter (line 5318) | class TransformationParameter(_message.Message):
class LossParameter (line 5324) | class LossParameter(_message.Message):
class AccuracyParameter (line 5330) | class AccuracyParameter(_message.Message):
class ArgMaxParameter (line 5336) | class ArgMaxParameter(_message.Message):
class BNParameter (line 5342) | class BNParameter(_message.Message):
class ConcatParameter (line 5348) | class ConcatParameter(_message.Message):
class ContrastiveLossParameter (line 5354) | class ContrastiveLossParameter(_message.Message):
class ConvolutionParameter (line 5360) | class ConvolutionParameter(_message.Message):
class DataParameter (line 5366) | class DataParameter(_message.Message):
class DropoutParameter (line 5372) | class DropoutParameter(_message.Message):
class DummyDataParameter (line 5378) | class DummyDataParameter(_message.Message):
class EltwiseParameter (line 5384) | class EltwiseParameter(_message.Message):
class ExpParameter (line 5390) | class ExpParameter(_message.Message):
class FlattenParameter (line 5396) | class FlattenParameter(_message.Message):
class HDF5DataParameter (line 5402) | class HDF5DataParameter(_message.Message):
class HDF5OutputParameter (line 5408) | class HDF5OutputParameter(_message.Message):
class HingeLossParameter (line 5414) | class HingeLossParameter(_message.Message):
class ImageDataParameter (line 5420) | class ImageDataParameter(_message.Message):
class VideoDataParameter (line 5426) | class VideoDataParameter(_message.Message):
class InfogainLossParameter (line 5432) | class InfogainLossParameter(_message.Message):
class InnerProductParameter (line 5438) | class InnerProductParameter(_message.Message):
class LogParameter (line 5444) | class LogParameter(_message.Message):
class LRNParameter (line 5450) | class LRNParameter(_message.Message):
class MemoryDataParameter (line 5456) | class MemoryDataParameter(_message.Message):
class MVNParameter (line 5462) | class MVNParameter(_message.Message):
class PoolingParameter (line 5468) | class PoolingParameter(_message.Message):
class PowerParameter (line 5474) | class PowerParameter(_message.Message):
class PythonParameter (line 5480) | class PythonParameter(_message.Message):
class ReductionParameter (line 5486) | class ReductionParameter(_message.Message):
class ReLUParameter (line 5492) | class ReLUParameter(_message.Message):
class ReshapeParameter (line 5498) | class ReshapeParameter(_message.Message):
class SegDataParameter (line 5504) | class SegDataParameter(_message.Message):
class SigmoidParameter (line 5510) | class SigmoidParameter(_message.Message):
class SliceParameter (line 5516) | class SliceParameter(_message.Message):
class SoftmaxParameter (line 5522) | class SoftmaxParameter(_message.Message):
class TanHParameter (line 5528) | class TanHParameter(_message.Message):
class ThresholdParameter (line 5534) | class ThresholdParameter(_message.Message):
class WindowDataParameter (line 5540) | class WindowDataParameter(_message.Message):
class SPPParameter (line 5546) | class SPPParameter(_message.Message):
class ROIPoolingParameter (line 5552) | class ROIPoolingParameter(_message.Message):
class V1LayerParameter (line 5558) | class V1LayerParameter(_message.Message):
class V0LayerParameter (line 5564) | class V0LayerParameter(_message.Message):
class PReLUParameter (line 5570) | class PReLUParameter(_message.Message):
class ScaleParameter (line 5576) | class ScaleParameter(_message.Message):
class BiasParameter (line 5582) | class BiasParameter(_message.Message):
class BatchReductionParameter (line 5588) | class BatchReductionParameter(_message.Message):
class MemoryOptimizationParameter (line 5594) | class MemoryOptimizationParameter(_message.Message):
FILE: model_zoo/bninception/layer_factory.py
function parse_expr (line 8) | def parse_expr(expr):
function get_basic_layer (line 13) | def get_basic_layer(info, channels=None, conv_bias=False):
function build_conv (line 25) | def build_conv(attr, channels=None, conv_bias=False):
function build_pooling (line 42) | def build_pooling(attr, channels=None, conv_bias=False):
function build_relu (line 57) | def build_relu(attr, channels=None, conv_bias=False):
function build_bn (line 61) | def build_bn(attr, channels=None, conv_bias=False):
function build_linear (line 65) | def build_linear(attr, channels=None, conv_bias=False):
function build_dropout (line 69) | def build_dropout(attr, channels=None, conv_bias=False):
FILE: model_zoo/bninception/parse_caffe.py
class CaffeVendor (line 22) | class CaffeVendor(object):
method __init__ (line 23) | def __init__(self, net_name, weight_name, version=2):
method _parse_net (line 40) | def _parse_net(self, version):
method _parse_blob (line 60) | def _parse_blob(blob):
method _layer2dict (line 65) | def _layer2dict(self, layer, version):
method text_form (line 134) | def text_form(self):
method info (line 138) | def info(self):
method yaml (line 145) | def yaml(self):
method dump_weights (line 148) | def dump_weights(self, filename):
FILE: model_zoo/bninception/pytorch_load.py
class BNInception (line 8) | class BNInception(nn.Module):
method __init__ (line 9) | def __init__(self, model_path='model_zoo/bninception/bn_inception.yaml...
method forward (line 37) | def forward(self, input):
class InceptionV3 (line 64) | class InceptionV3(BNInception):
method __init__ (line 65) | def __init__(self, model_path='model_zoo/bninception/inceptionv3.yaml'...
FILE: model_zoo/inceptionresnetv2/pytorch_load.py
class BasicConv2d (line 11) | class BasicConv2d(nn.Module):
method __init__ (line 13) | def __init__(self, in_planes, out_planes, kernel_size, stride, padding...
method forward (line 19) | def forward(self, x):
class Mixed_5b (line 25) | class Mixed_5b(nn.Module):
method __init__ (line 27) | def __init__(self):
method forward (line 48) | def forward(self, x):
class Block35 (line 56) | class Block35(nn.Module):
method __init__ (line 58) | def __init__(self, scale=1.0):
method forward (line 79) | def forward(self, x):
class Mixed_6a (line 89) | class Mixed_6a(nn.Module):
method __init__ (line 91) | def __init__(self):
method forward (line 104) | def forward(self, x):
class Block17 (line 111) | class Block17(nn.Module):
method __init__ (line 113) | def __init__(self, scale=1.0):
method forward (line 129) | def forward(self, x):
class Mixed_7a (line 138) | class Mixed_7a(nn.Module):
method __init__ (line 140) | def __init__(self):
method forward (line 161) | def forward(self, x):
class Block8 (line 169) | class Block8(nn.Module):
method __init__ (line 171) | def __init__(self, scale=1.0, noReLU=False):
method forward (line 189) | def forward(self, x):
class InceptionResnetV2 (line 200) | class InceptionResnetV2(nn.Module):
method __init__ (line 202) | def __init__(self, num_classes=1001):
method forward (line 264) | def forward(self, x):
function inceptionresnetv2 (line 285) | def inceptionresnetv2(pretrained=True):
function load_conv2d (line 302) | def load_conv2d(state_dict, name_pth, name_tf):
function load_conv2d_nobn (line 312) | def load_conv2d_nobn(state_dict, name_pth, name_tf):
function load_linear (line 318) | def load_linear(state_dict, name_pth, name_tf):
function load_mixed_5b (line 324) | def load_mixed_5b(state_dict, name_pth, name_tf):
function load_block35 (line 333) | def load_block35(state_dict, name_pth, name_tf):
function load_mixed_6a (line 342) | def load_mixed_6a(state_dict, name_pth, name_tf):
function load_block17 (line 348) | def load_block17(state_dict, name_pth, name_tf):
function load_mixed_7a (line 355) | def load_mixed_7a(state_dict, name_pth, name_tf):
function load_block8 (line 364) | def load_block8(state_dict, name_pth, name_tf):
function load (line 373) | def load():
function test (line 408) | def test(model):
function test_conv2d (line 437) | def test_conv2d(module, name):
function test_conv2d_nobn (line 454) | def test_conv2d_nobn(module, name):
function test_mixed_5b (line 465) | def test_mixed_5b(module, name):
function test_block35 (line 474) | def test_block35(module, name):
function test_mixed_6a (line 483) | def test_mixed_6a(module, name):
function test_block17 (line 489) | def test_block17(module, name):
function test_mixed_7a (line 496) | def test_mixed_7a(module, name):
function test_block8 (line 505) | def test_block8(module, name):
FILE: model_zoo/inceptionresnetv2/tensorflow_dump.py
function make_padding (line 30) | def make_padding(padding_name, conv_shape):
function dump_conv2d (line 40) | def dump_conv2d(name='Conv2d_1a_3x3'):
function dump_conv2d_nobn (line 70) | def dump_conv2d_nobn(name='Conv2d_1x1'):
function dump_logits (line 90) | def dump_logits():
function dump_mixed_5b (line 132) | def dump_mixed_5b(name='Mixed_5b'):
function dump_block35 (line 141) | def dump_block35(name='Repeat/block35_1'):
function dump_mixed_6a (line 150) | def dump_mixed_6a(name='Mixed_6a'):
function dump_block17 (line 156) | def dump_block17(name='Repeat_1/block17_1'):
function dump_mixed_7a (line 163) | def dump_mixed_7a(name='Mixed_7a'):
function dump_block8 (line 172) | def dump_block8(name='Repeat_2/block8_1'):
FILE: model_zoo/inceptionv4/pytorch_load.py
class BasicConv2d (line 11) | class BasicConv2d(nn.Module):
method __init__ (line 13) | def __init__(self, in_planes, out_planes, kernel_size, stride, padding...
method forward (line 19) | def forward(self, x):
class Mixed_3a (line 25) | class Mixed_3a(nn.Module):
method __init__ (line 27) | def __init__(self):
method forward (line 32) | def forward(self, x):
class Mixed_4a (line 38) | class Mixed_4a(nn.Module):
method __init__ (line 40) | def __init__(self):
method forward (line 55) | def forward(self, x):
class Mixed_5a (line 61) | class Mixed_5a(nn.Module):
method __init__ (line 63) | def __init__(self):
method forward (line 68) | def forward(self, x):
class Inception_A (line 74) | class Inception_A(nn.Module):
method __init__ (line 76) | def __init__(self):
method forward (line 96) | def forward(self, x):
class Reduction_A (line 104) | class Reduction_A(nn.Module):
method __init__ (line 106) | def __init__(self):
method forward (line 118) | def forward(self, x):
class Inception_B (line 125) | class Inception_B(nn.Module):
method __init__ (line 127) | def __init__(self):
method forward (line 150) | def forward(self, x):
class Reduction_B (line 158) | class Reduction_B(nn.Module):
method __init__ (line 160) | def __init__(self):
method forward (line 177) | def forward(self, x):
class Inception_C (line 184) | class Inception_C(nn.Module):
method __init__ (line 186) | def __init__(self):
method forward (line 206) | def forward(self, x):
class InceptionV4 (line 226) | class InceptionV4(nn.Module):
method __init__ (line 228) | def __init__(self, num_classes=1001):
method forward (line 257) | def forward(self, x):
function inceptionv4 (line 263) | def inceptionv4(pretrained=True):
function load_conv2d (line 273) | def load_conv2d(state_dict, name_pth, name_tf):
function load_linear (line 283) | def load_linear(state_dict, name_pth, name_tf):
function load_mixed_4a_7a (line 289) | def load_mixed_4a_7a(state_dict, name_pth, name_tf):
function load_mixed_5 (line 297) | def load_mixed_5(state_dict, name_pth, name_tf):
function load_mixed_6 (line 306) | def load_mixed_6(state_dict, name_pth, name_tf):
function load_mixed_7 (line 318) | def load_mixed_7(state_dict, name_pth, name_tf):
function load (line 331) | def load():
function test (line 376) | def test(model):
function test_conv2d (line 393) | def test_conv2d(module, name):
function test_mixed_4a_7a (line 404) | def test_mixed_4a_7a(module, name):
FILE: model_zoo/inceptionv4/tensorflow_dump.py
function make_padding (line 27) | def make_padding(padding_name, conv_shape):
function dump_conv2d (line 37) | def dump_conv2d(name='Conv2d_1a_3x3'):
function dump_logits (line 71) | def dump_logits():
function dump_mixed_4a_7a (line 112) | def dump_mixed_4a_7a(name='Mixed_4a'):
function dump_mixed_5 (line 120) | def dump_mixed_5(name='Mixed_5b'):
function dump_mixed_6 (line 129) | def dump_mixed_6(name='Mixed_6b'):
function dump_mixed_7 (line 141) | def dump_mixed_7(name='Mixed_7b'):
FILE: model_zoo/models/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py
function standard_scale (line 11) | def standard_scale(X_train, X_test):
function get_random_block_from_data (line 17) | def get_random_block_from_data(data, batch_size):
FILE: model_zoo/models/autoencoder/AutoencoderRunner.py
function standard_scale (line 11) | def standard_scale(X_train, X_test):
function get_random_block_from_data (line 17) | def get_random_block_from_data(data, batch_size):
FILE: model_zoo/models/autoencoder/MaskingNoiseAutoencoderRunner.py
function standard_scale (line 11) | def standard_scale(X_train, X_test):
function get_random_block_from_data (line 17) | def get_random_block_from_data(data, batch_size):
FILE: model_zoo/models/autoencoder/Utils.py
function xavier_init (line 4) | def xavier_init(fan_in, fan_out, constant = 1):
FILE: model_zoo/models/autoencoder/VariationalAutoencoderRunner.py
function min_max_scale (line 13) | def min_max_scale(X_train, X_test):
function get_random_block_from_data (line 20) | def get_random_block_from_data(data, batch_size):
FILE: model_zoo/models/autoencoder/autoencoder_models/Autoencoder.py
class Autoencoder (line 5) | class Autoencoder(object):
method __init__ (line 7) | def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus...
method _initialize_weights (line 29) | def _initialize_weights(self):
method partial_fit (line 37) | def partial_fit(self, X):
method calc_total_cost (line 41) | def calc_total_cost(self, X):
method transform (line 44) | def transform(self, X):
method generate (line 47) | def generate(self, hidden = None):
method reconstruct (line 52) | def reconstruct(self, X):
method getWeights (line 55) | def getWeights(self):
method getBiases (line 58) | def getBiases(self):
FILE: model_zoo/models/autoencoder/autoencoder_models/DenoisingAutoencoder.py
class AdditiveGaussianNoiseAutoencoder (line 6) | class AdditiveGaussianNoiseAutoencoder(object):
method __init__ (line 7) | def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softpl...
method _initialize_weights (line 32) | def _initialize_weights(self):
method partial_fit (line 40) | def partial_fit(self, X):
method calc_total_cost (line 46) | def calc_total_cost(self, X):
method transform (line 51) | def transform(self, X):
method generate (line 56) | def generate(self, hidden = None):
method reconstruct (line 61) | def reconstruct(self, X):
method getWeights (line 66) | def getWeights(self):
method getBiases (line 69) | def getBiases(self):
class MaskingNoiseAutoencoder (line 73) | class MaskingNoiseAutoencoder(object):
method __init__ (line 74) | def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softpl...
method _initialize_weights (line 99) | def _initialize_weights(self):
method partial_fit (line 107) | def partial_fit(self, X):
method calc_total_cost (line 112) | def calc_total_cost(self, X):
method transform (line 115) | def transform(self, X):
method generate (line 118) | def generate(self, hidden = None):
method reconstruct (line 123) | def reconstruct(self, X):
method getWeights (line 126) | def getWeights(self):
method getBiases (line 129) | def getBiases(self):
FILE: model_zoo/models/autoencoder/autoencoder_models/VariationalAutoencoder.py
class VariationalAutoencoder (line 5) | class VariationalAutoencoder(object):
method __init__ (line 7) | def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimiz...
method _initialize_weights (line 37) | def _initialize_weights(self):
method partial_fit (line 47) | def partial_fit(self, X):
method calc_total_cost (line 51) | def calc_total_cost(self, X):
method transform (line 54) | def transform(self, X):
method generate (line 57) | def generate(self, hidden = None):
method reconstruct (line 62) | def reconstruct(self, X):
method getWeights (line 65) | def getWeights(self):
method getBiases (line 68) | def getBiases(self):
FILE: model_zoo/models/compression/decoder.py
function get_input_tensor_names (line 41) | def get_input_tensor_names():
function get_output_tensor_names (line 48) | def get_output_tensor_names():
function main (line 52) | def main(_):
FILE: model_zoo/models/compression/encoder.py
function get_output_tensor_names (line 43) | def get_output_tensor_names():
function main (line 50) | def main(_):
FILE: model_zoo/models/compression/msssim.py
function _FSpecialGauss (line 35) | def _FSpecialGauss(size, sigma):
function _SSIMForMultiScale (line 49) | def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11,
function MultiScaleSSIM (line 125) | def MultiScaleSSIM(img1, img2, max_val=255, filter_size=11, filter_sigma...
function main (line 187) | def main(_):
FILE: model_zoo/models/differential_privacy/dp_sgd/dp_mnist/dp_mnist.py
function MnistInput (line 129) | def MnistInput(mnist_data_file, batch_size, randomize):
function Eval (line 167) | def Eval(mnist_data_file, network_parameters, num_testing_images,
function Train (line 223) | def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
function main (line 436) | def main(_):
FILE: model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/dp_optimizer.py
class DPGradientDescentOptimizer (line 26) | class DPGradientDescentOptimizer(tf.train.GradientDescentOptimizer):
method __init__ (line 30) | def __init__(self, learning_rate, eps_delta, sanitizer,
method compute_sanitized_gradients (line 69) | def compute_sanitized_gradients(self, loss, var_list=None,
method minimize (line 100) | def minimize(self, loss, global_step=None, var_list=None,
FILE: model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/dp_pca.py
function ComputeDPPrincipalProjection (line 23) | def ComputeDPPrincipalProjection(data, projection_dims,
FILE: model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/sanitizer.py
class AmortizedGaussianSanitizer (line 36) | class AmortizedGaussianSanitizer(object):
method __init__ (line 44) | def __init__(self, accountant, default_option):
method set_option (line 56) | def set_option(self, tensor_name, option):
method sanitize (line 66) | def sanitize(self, x, eps_delta, sigma=None,
FILE: model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/utils.py
class LayerParameters (line 26) | class LayerParameters(object):
method __init__ (line 28) | def __init__(self):
class ConvParameters (line 39) | class ConvParameters(object):
method __init__ (line 41) | def __init__(self):
class NetworkParameters (line 59) | class NetworkParameters(object):
method __init__ (line 61) | def __init__(self):
function GetTensorOpName (line 70) | def GetTensorOpName(x):
function BuildNetwork (line 88) | def BuildNetwork(inputs, network_parameters):
function VaryRate (line 196) | def VaryRate(start, end, saturate_epochs, epoch):
function BatchClipByL2norm (line 220) | def BatchClipByL2norm(t, upper_bound, name=None):
function SoftThreshold (line 250) | def SoftThreshold(t, threshold_ratio, name=None):
function AddGaussianNoise (line 278) | def AddGaussianNoise(t, sigma, name=None):
function GenerateBinomialTable (line 294) | def GenerateBinomialTable(m):
FILE: model_zoo/models/differential_privacy/dp_sgd/per_example_gradients/per_example_gradients.py
function _ListUnion (line 25) | def _ListUnion(list_1, list_2):
function Interface (line 60) | def Interface(ys, xs):
class PXGRegistry (line 104) | class PXGRegistry(object):
method __init__ (line 118) | def __init__(self):
method __call__ (line 121) | def __call__(self, op,
method Register (line 131) | def Register(self, op_name, pxg_class):
class MatMulPXG (line 147) | class MatMulPXG(object):
method __init__ (line 151) | def __init__(self, op,
method __call__ (line 166) | def __call__(self, x, z_grads):
class Conv2DPXG (line 198) | class Conv2DPXG(object):
method __init__ (line 204) | def __init__(self, op,
method _PxConv2DBuilder (line 213) | def _PxConv2DBuilder(self, input_, w, strides, padding):
method __call__ (line 252) | def __call__(self, w, z_grads):
class AddPXG (line 282) | class AddPXG(object):
method __init__ (line 288) | def __init__(self, op,
method __call__ (line 297) | def __call__(self, x, z_grads):
function PerExampleGradients (line 317) | def PerExampleGradients(ys, xs, grad_ys=None, name="gradients",
FILE: model_zoo/models/differential_privacy/multiple_teachers/aggregation.py
function labels_from_probs (line 24) | def labels_from_probs(probs):
function noisy_max (line 42) | def noisy_max(logits, lap_scale, return_clean_votes=False):
function aggregation_most_frequent (line 103) | def aggregation_most_frequent(logits):
FILE: model_zoo/models/differential_privacy/multiple_teachers/analysis.py
function compute_q_noisy_max (line 69) | def compute_q_noisy_max(counts, noise_eps):
function compute_q_noisy_max_approx (line 94) | def compute_q_noisy_max_approx(counts, noise_eps):
function logmgf_exact (line 119) | def logmgf_exact(q, priv_eps, l):
function logmgf_from_counts (line 150) | def logmgf_from_counts(counts, noise_eps, l):
function sens_at_k (line 161) | def sens_at_k(counts, noise_eps, l, k):
function smoothed_sens (line 190) | def smoothed_sens(counts, noise_eps, l, beta):
function main (line 214) | def main(unused_argv):
FILE: model_zoo/models/differential_privacy/multiple_teachers/deep_cnn.py
function _variable_on_cpu (line 43) | def _variable_on_cpu(name, shape, initializer):
function _variable_with_weight_decay (line 59) | def _variable_with_weight_decay(name, shape, stddev, wd):
function inference (line 83) | def inference(images, dropout=False):
function inference_deeper (line 193) | def inference_deeper(images, dropout=False):
function loss_fun (line 326) | def loss_fun(logits, labels):
function moving_av (line 357) | def moving_av(total_loss):
function train_op_fun (line 374) | def train_op_fun(total_loss, global_step):
function _input_placeholder (line 429) | def _input_placeholder():
function train (line 446) | def train(images, labels, ckpt_path, dropout=False):
function softmax_preds (line 546) | def softmax_preds(images, ckpt_path, return_logits=False):
FILE: model_zoo/models/differential_privacy/multiple_teachers/input.py
function create_dir_if_needed (line 35) | def create_dir_if_needed(dest_directory):
function maybe_download (line 47) | def maybe_download(file_urls, directory):
function image_whitening (line 89) | def image_whitening(data):
function extract_svhn (line 120) | def extract_svhn(local_url):
function unpickle_cifar_dic (line 150) | def unpickle_cifar_dic(file):
function extract_cifar10 (line 162) | def extract_cifar10(local_url, data_dir):
function extract_mnist_data (line 255) | def extract_mnist_data(filename, num_images, image_size, pixel_depth):
function extract_mnist_labels (line 276) | def extract_mnist_labels(filename, num_images):
function ld_svhn (line 293) | def ld_svhn(extended=False, test_only=False):
function ld_cifar10 (line 337) | def ld_cifar10(test_only=False):
function ld_mnist (line 366) | def ld_mnist(test_only=False):
function partition_dataset (line 396) | def partition_dataset(data, labels, nb_teachers, teacher_id):
FILE: model_zoo/models/differential_privacy/multiple_teachers/metrics.py
function accuracy (line 23) | def accuracy(logits, labels):
FILE: model_zoo/models/differential_privacy/multiple_teachers/train_student.py
function ensemble_preds (line 52) | def ensemble_preds(dataset, nb_teachers, stdnt_data):
function prepare_student_data (line 89) | def prepare_student_data(dataset, nb_teachers, save=False):
function train_student (line 164) | def train_student(dataset, nb_teachers):
function main (line 202) | def main(argv=None): # pylint: disable=unused-argument
FILE: model_zoo/models/differential_privacy/multiple_teachers/train_teachers.py
function train_teacher (line 44) | def train_teacher(dataset, nb_teachers, teacher_id):
function main (line 99) | def main(argv=None): # pylint: disable=unused-argument
FILE: model_zoo/models/differential_privacy/multiple_teachers/utils.py
function batch_indices (line 17) | def batch_indices(batch_nb, data_length, batch_size):
FILE: model_zoo/models/differential_privacy/privacy_accountant/python/gaussian_moments.py
function _to_np_float64 (line 52) | def _to_np_float64(v):
function pdf_gauss (line 63) | def pdf_gauss(x, sigma, mean=0):
function cropped_ratio (line 67) | def cropped_ratio(a, b):
function integral_inf (line 74) | def integral_inf(fn):
function integral_bounded (line 79) | def integral_bounded(fn, lb, ub):
function distributions (line 84) | def distributions(sigma, q):
function compute_a (line 91) | def compute_a(sigma, q, lmbd, verbose=False):
function compute_b (line 118) | def compute_b(sigma, q, lmbd, verbose=False):
function pdf_gauss_mp (line 154) | def pdf_gauss_mp(x, sigma, mean):
function integral_inf_mp (line 159) | def integral_inf_mp(fn):
function integral_bounded_mp (line 164) | def integral_bounded_mp(fn, lb, ub):
function distributions_mp (line 169) | def distributions_mp(sigma, q):
function compute_a_mp (line 176) | def compute_a_mp(sigma, q, lmbd, verbose=False):
function compute_b_mp (line 199) | def compute_b_mp(sigma, q, lmbd, verbose=False):
function _compute_delta (line 232) | def _compute_delta(log_moments, eps):
function _compute_eps (line 255) | def _compute_eps(log_moments, delta):
function compute_log_moment (line 276) | def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
function get_privacy_spent (line 305) | def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
FILE: model_zoo/models/differential_privacy/privacy_accountant/tf/accountant.py
class AmortizedAccountant (line 46) | class AmortizedAccountant(object):
method __init__ (line 55) | def __init__(self, total_examples):
method accumulate_privacy_spending (line 69) | def accumulate_privacy_spending(self, eps_delta, unused_sigma,
method get_privacy_spent (line 104) | def get_privacy_spent(self, sess, target_eps=None):
class MomentsAccountant (line 123) | class MomentsAccountant(object):
method __init__ (line 175) | def __init__(self, total_examples, moment_orders=32):
method _compute_log_moment (line 196) | def _compute_log_moment(self, sigma, q, moment_order):
method accumulate_privacy_spending (line 208) | def accumulate_privacy_spending(self, unused_eps_delta,
method _compute_delta (line 237) | def _compute_delta(self, log_moments, eps):
method _compute_eps (line 257) | def _compute_eps(self, log_moments, delta):
method get_privacy_spent (line 266) | def get_privacy_spent(self, sess, target_eps=None, target_deltas=None):
class GaussianMomentsAccountant (line 295) | class GaussianMomentsAccountant(MomentsAccountant):
method __init__ (line 328) | def __init__(self, total_examples, moment_orders=32):
method _differential_moments (line 338) | def _differential_moments(self, sigma, s, t):
method _compute_log_moment (line 375) | def _compute_log_moment(self, sigma, q, moment_order):
class DummyAccountant (line 403) | class DummyAccountant(object):
method accumulate_privacy_spending (line 406) | def accumulate_privacy_spending(self, *unused_args):
method get_privacy_spent (line 409) | def get_privacy_spent(self, unused_sess, **unused_kwargs):
FILE: model_zoo/models/im2txt/im2txt/configuration.py
class ModelConfig (line 23) | class ModelConfig(object):
method __init__ (line 26) | def __init__(self):
class TrainingConfig (line 81) | class TrainingConfig(object):
method __init__ (line 84) | def __init__(self):
FILE: model_zoo/models/im2txt/im2txt/data/build_mscoco_data.py
class Vocabulary (line 142) | class Vocabulary(object):
method __init__ (line 145) | def __init__(self, vocab, unk_id):
method word_to_id (line 155) | def word_to_id(self, word):
class ImageDecoder (line 163) | class ImageDecoder(object):
method __init__ (line 166) | def __init__(self):
method decode_jpeg (line 174) | def decode_jpeg(self, encoded_jpeg):
function _int64_feature (line 182) | def _int64_feature(value):
function _bytes_feature (line 187) | def _bytes_feature(value):
function _int64_feature_list (line 192) | def _int64_feature_list(values):
function _bytes_feature_list (line 197) | def _bytes_feature_list(values):
function _to_sequence_example (line 202) | def _to_sequence_example(image, decoder, vocab):
function _process_image_files (line 240) | def _process_image_files(thread_index, ranges, name, images, decoder, vo...
function _process_dataset (line 299) | def _process_dataset(name, images, vocab, num_shards):
function _create_vocab (line 345) | def _create_vocab(captions):
function _process_caption (line 382) | def _process_caption(caption):
function _load_and_process_metadata (line 397) | def _load_and_process_metadata(captions_file, image_dir):
function main (line 441) | def main(unused_argv):
FILE: model_zoo/models/im2txt/im2txt/evaluate.py
function evaluate_model (line 56) | def evaluate_model(sess, model, global_step, summary_writer, summary_op):
function run_once (line 107) | def run_once(model, saver, summary_writer, summary_op):
function run (line 154) | def run():
function main (line 190) | def main(unused_argv):
FILE: model_zoo/models/im2txt/im2txt/inference_utils/caption_generator.py
class Caption (line 28) | class Caption(object):
method __init__ (line 31) | def __init__(self, sentence, state, logprob, score, metadata=None):
method __cmp__ (line 48) | def __cmp__(self, other):
class TopN (line 59) | class TopN(object):
method __init__ (line 62) | def __init__(self, n):
method size (line 66) | def size(self):
method push (line 70) | def push(self, x):
method extract (line 78) | def extract(self, sort=False):
method reset (line 96) | def reset(self):
class CaptionGenerator (line 101) | class CaptionGenerator(object):
method __init__ (line 104) | def __init__(self,
method beam_search (line 131) | def beam_search(self, sess, encoded_image):
FILE: model_zoo/models/im2txt/im2txt/inference_utils/caption_generator_test.py
class FakeVocab (line 27) | class FakeVocab(object):
method __init__ (line 30) | def __init__(self):
class FakeModel (line 35) | class FakeModel(object):
method __init__ (line 38) | def __init__(self):
method feed_image (line 71) | def feed_image(self, sess, encoded_image):
method inference_step (line 75) | def inference_step(self, sess, input_feed, state_feed):
class CaptionGeneratorTest (line 92) | class CaptionGeneratorTest(tf.test.TestCase):
method _assertExpectedCaptions (line 94) | def _assertExpectedCaptions(self,
method testBeamSize (line 126) | def testBeamSize(self):
method testMaxLength (line 141) | def testMaxLength(self):
method testLengthNormalization (line 163) | def testLengthNormalization(self):
FILE: model_zoo/models/im2txt/im2txt/inference_utils/inference_wrapper_base.py
class InferenceWrapperBase (line 56) | class InferenceWrapperBase(object):
method __init__ (line 59) | def __init__(self):
method build_model (line 62) | def build_model(self, model_config):
method _create_restore_fn (line 73) | def _create_restore_fn(self, checkpoint_path, saver):
method build_graph_from_config (line 102) | def build_graph_from_config(self, model_config, checkpoint_path):
method build_graph_from_proto (line 120) | def build_graph_from_proto(self, graph_def_file, saver_def_file,
method feed_image (line 150) | def feed_image(self, sess, encoded_image):
method inference_step (line 164) | def inference_step(self, sess, input_feed, state_feed):
FILE: model_zoo/models/im2txt/im2txt/inference_utils/vocabulary.py
class Vocabulary (line 25) | class Vocabulary(object):
method __init__ (line 28) | def __init__(self,
method word_to_id (line 66) | def word_to_id(self, word):
method id_to_word (line 73) | def id_to_word(self, word_id):
FILE: model_zoo/models/im2txt/im2txt/inference_wrapper.py
class InferenceWrapper (line 28) | class InferenceWrapper(inference_wrapper_base.InferenceWrapperBase):
method __init__ (line 31) | def __init__(self):
method build_model (line 34) | def build_model(self, model_config):
method feed_image (line 39) | def feed_image(self, sess, encoded_image):
method inference_step (line 44) | def inference_step(self, sess, input_feed, state_feed):
FILE: model_zoo/models/im2txt/im2txt/ops/image_embedding.py
function inception_v3 (line 30) | def inception_v3(images,
FILE: model_zoo/models/im2txt/im2txt/ops/image_embedding_test.py
class InceptionV3Test (line 28) | class InceptionV3Test(tf.test.TestCase):
method setUp (line 30) | def setUp(self):
method _countInceptionParameters (line 41) | def _countInceptionParameters(self):
method _verifyParameterCounts (line 53) | def _verifyParameterCounts(self):
method _assertCollectionSize (line 76) | def _assertCollectionSize(self, expected_size, collection):
method testTrainableTrueIsTrainingTrue (line 82) | def testTrainableTrueIsTrainingTrue(self):
method testTrainableTrueIsTrainingFalse (line 95) | def testTrainableTrueIsTrainingFalse(self):
method testTrainableFalseIsTrainingTrue (line 108) | def testTrainableFalseIsTrainingTrue(self):
method testTrainableFalseIsTrainingFalse (line 121) | def testTrainableFalseIsTrainingFalse(self):
FILE: model_zoo/models/im2txt/im2txt/ops/image_processing.py
function distort_image (line 26) | def distort_image(image, thread_id):
function process_image (line 62) | def process_image(encoded_image,
FILE: model_zoo/models/im2txt/im2txt/ops/inputs.py
function parse_sequence_example (line 26) | def parse_sequence_example(serialized, image_feature, caption_feature):
function prefetch_input_data (line 54) | def prefetch_input_data(reader,
function batch_with_dynamic_pad (line 126) | def batch_with_dynamic_pad(images_and_captions,
FILE: model_zoo/models/im2txt/im2txt/run_inference.py
function main (line 43) | def main(_):
FILE: model_zoo/models/im2txt/im2txt/show_and_tell_model.py
class ShowAndTellModel (line 34) | class ShowAndTellModel(object):
method __init__ (line 41) | def __init__(self, config, mode, train_inception=False):
method is_training (line 99) | def is_training(self):
method process_image (line 103) | def process_image(self, encoded_image, thread_id=0):
method build_inputs (line 121) | def build_inputs(self):
method build_image_embeddings (line 181) | def build_image_embeddings(self):
method build_seq_embeddings (line 212) | def build_seq_embeddings(self):
method build_model (line 230) | def build_model(self):
method setup_inception_initializer (line 327) | def setup_inception_initializer(self):
method setup_global_step (line 340) | def setup_global_step(self):
method build (line 350) | def build(self):
FILE: model_zoo/models/im2txt/im2txt/show_and_tell_model_test.py
class ShowAndTellModel (line 30) | class ShowAndTellModel(show_and_tell_model.ShowAndTellModel):
method build_inputs (line 33) | def build_inputs(self):
class ShowAndTellModelTest (line 57) | class ShowAndTellModelTest(tf.test.TestCase):
method setUp (line 59) | def setUp(self):
method _countModelParameters (line 63) | def _countModelParameters(self):
method _checkModelParameters (line 73) | def _checkModelParameters(self):
method _checkOutputs (line 90) | def _checkOutputs(self, expected_shapes, feed_dict=None):
method testBuildForTraining (line 112) | def testBuildForTraining(self):
method testBuildForEval (line 140) | def testBuildForEval(self):
method testBuildForInference (line 168) | def testBuildForInference(self):
FILE: model_zoo/models/im2txt/im2txt/train.py
function main (line 44) | def main(unused_argv):
FILE: model_zoo/models/inception/inception/data/build_image_data.py
function _int64_feature (line 107) | def _int64_feature(value):
function _bytes_feature (line 114) | def _bytes_feature(value):
function _convert_to_example (line 119) | def _convert_to_example(filename, image_buffer, label, text, height, wid...
class ImageCoder (line 150) | class ImageCoder(object):
method __init__ (line 153) | def __init__(self):
method png_to_jpeg (line 166) | def png_to_jpeg(self, image_data):
method decode_jpeg (line 170) | def decode_jpeg(self, image_data):
function _is_png (line 178) | def _is_png(filename):
function _process_image (line 190) | def _process_image(filename, coder):
function _process_image_files_batch (line 222) | def _process_image_files_batch(coder, thread_index, ranges, name, filena...
function _process_image_files (line 287) | def _process_image_files(name, filenames, texts, labels, num_shards):
function _find_image_files (line 331) | def _find_image_files(data_dir, labels_file):
function _process_dataset (line 402) | def _process_dataset(name, directory, num_shards, labels_file):
function main (line 415) | def main(unused_argv):
FILE: model_zoo/models/inception/inception/data/build_imagenet_data.py
function _int64_feature (line 158) | def _int64_feature(value):
function _float_feature (line 165) | def _float_feature(value):
function _bytes_feature (line 172) | def _bytes_feature(value):
function _convert_to_example (line 177) | def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
class ImageCoder (line 228) | class ImageCoder(object):
method __init__ (line 231) | def __init__(self):
method png_to_jpeg (line 249) | def png_to_jpeg(self, image_data):
method cmyk_to_rgb (line 253) | def cmyk_to_rgb(self, image_data):
method decode_jpeg (line 257) | def decode_jpeg(self, image_data):
function _is_png (line 265) | def _is_png(filename):
function _is_cmyk (line 279) | def _is_cmyk(filename):
function _process_image (line 304) | def _process_image(filename, coder):
function _process_image_files_batch (line 341) | def _process_image_files_batch(coder, thread_index, ranges, name, filena...
function _process_image_files (line 413) | def _process_image_files(name, filenames, synsets, labels, humans,
function _find_image_files (line 465) | def _find_image_files(data_dir, labels_file):
function _find_human_readable_labels (line 540) | def _find_human_readable_labels(synsets, synset_to_human):
function _find_image_bounding_boxes (line 558) | def _find_image_bounding_boxes(filenames, image_to_bboxes):
function _process_dataset (line 584) | def _process_dataset(name, directory, num_shards, synset_to_human,
function _build_synset_lookup (line 604) | def _build_synset_lookup(imagenet_metadata_file):
function _build_bounding_box_lookup (line 636) | def _build_bounding_box_lookup(bounding_box_file):
function main (line 684) | def main(unused_argv):
FILE: model_zoo/models/inception/inception/data/process_bounding_boxes.py
class BoundingBox (line 90) | class BoundingBox(object):
function GetItem (line 94) | def GetItem(name, root, index=0):
function GetInt (line 104) | def GetInt(name, root, index=0):
function FindNumberBoundingBoxes (line 108) | def FindNumberBoundingBoxes(root):
function ProcessXMLAnnotation (line 117) | def ProcessXMLAnnotation(xml_file):
FILE: model_zoo/models/inception/inception/dataset.py
class Dataset (line 42) | class Dataset(object):
method __init__ (line 46) | def __init__(self, name, subset):
method num_classes (line 53) | def num_classes(self):
method num_examples_per_epoch (line 59) | def num_examples_per_epoch(self):
method download_message (line 68) | def download_message(self):
method available_subsets (line 72) | def available_subsets(self):
method data_files (line 76) | def data_files(self):
method reader (line 95) | def reader(self):
FILE: model_zoo/models/inception/inception/flowers_data.py
class FlowersData (line 26) | class FlowersData(Dataset):
method __init__ (line 29) | def __init__(self, subset):
method num_classes (line 32) | def num_classes(self):
method num_examples_per_epoch (line 36) | def num_examples_per_epoch(self):
method download_message (line 43) | def download_message(self):
FILE: model_zoo/models/inception/inception/flowers_eval.py
function main (line 30) | def main(unused_argv=None):
FILE: model_zoo/models/inception/inception/flowers_train.py
function main (line 31) | def main(_):
FILE: model_zoo/models/inception/inception/image_processing.py
function inputs (line 74) | def inputs(dataset, batch_size=None, num_preprocess_threads=None):
function distorted_inputs (line 107) | def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=No...
function decode_jpeg (line 140) | def decode_jpeg(image_buffer, scope=None):
function distort_color (line 163) | def distort_color(image, thread_id=0, scope=None):
function distort_image (line 197) | def distort_image(image, height, width, bbox, thread_id=0, scope=None):
function eval_image (line 277) | def eval_image(image, height, width, scope=None):
function image_preprocessing (line 301) | def image_preprocessing(image_buffer, bbox, train, thread_id=0):
function parse_example_proto (line 336) | def parse_example_proto(example_serialized):
function batch_inputs (line 407) | def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,
FILE: model_zoo/models/inception/inception/imagenet_data.py
class ImagenetData (line 26) | class ImagenetData(Dataset):
method __init__ (line 29) | def __init__(self, subset):
method num_classes (line 32) | def num_classes(self):
method num_examples_per_epoch (line 36) | def num_examples_per_epoch(self):
method download_message (line 44) | def download_message(self):
FILE: model_zoo/models/inception/inception/imagenet_distributed_train.py
function main (line 32) | def main(unused_args):
FILE: model_zoo/models/inception/inception/imagenet_eval.py
function main (line 36) | def main(unused_argv=None):
FILE: model_zoo/models/inception/inception/imagenet_train.py
function main (line 31) | def main(_):
FILE: model_zoo/models/inception/inception/inception_distributed_train.py
function train (line 90) | def train(target, dataset, cluster_spec):
FILE: model_zoo/models/inception/inception/inception_eval.py
function _eval_once (line 55) | def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):
function evaluate (line 136) | def evaluate(dataset):
FILE: model_zoo/models/inception/inception/inception_model.py
function inference (line 48) | def inference(images, num_classes, for_training=False, restore_logits=True,
function loss (line 98) | def loss(logits, labels, batch_size=None):
function _activation_summary (line 138) | def _activation_summary(x):
function _activation_summaries (line 154) | def _activation_summaries(endpoints):
FILE: model_zoo/models/inception/inception/inception_train.py
function _tower_loss (line 82) | def _tower_loss(images, labels, num_classes, scope):
function _average_gradients (line 142) | def _average_gradients(tower_grads):
function train (line 180) | def train(dataset):
FILE: model_zoo/models/inception/inception/slim/collections_test.py
function get_variables (line 25) | def get_variables(scope=None):
function get_variables_by_name (line 29) | def get_variables_by_name(name):
class CollectionsTest (line 33) | class CollectionsTest(tf.test.TestCase):
method testVariables (line 35) | def testVariables(self):
method testVariablesWithoutBatchNorm (line 51) | def testVariablesWithoutBatchNorm(self):
method testVariablesByLayer (line 67) | def testVariablesByLayer(self):
method testVariablesToRestore (line 94) | def testVariablesToRestore(self):
method testVariablesToRestoreWithoutLogits (line 107) | def testVariablesToRestoreWithoutLogits(self):
method testRegularizationLosses (line 119) | def testRegularizationLosses(self):
method testTotalLossWithoutRegularization (line 129) | def testTotalLossWithoutRegularization(self):
method testTotalLossWithRegularization (line 154) | def testTotalLossWithRegularization(self):
FILE: model_zoo/models/inception/inception/slim/inception_model.py
function inception_v3 (line 52) | def inception_v3(inputs,
function inception_v3_parameters (line 333) | def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
FILE: model_zoo/models/inception/inception/slim/inception_test.py
class InceptionTest (line 25) | class InceptionTest(tf.test.TestCase):
method testBuildLogits (line 27) | def testBuildLogits(self):
method testBuildEndPoints (line 38) | def testBuildEndPoints(self):
method testVariablesSetDevice (line 57) | def testVariablesSetDevice(self):
method testHalfSizeImages (line 73) | def testHalfSizeImages(self):
method testUnknowBatchSize (line 87) | def testUnknowBatchSize(self):
method testEvaluation (line 102) | def testEvaluation(self):
method testTrainEvalWithReuse (line 115) | def testTrainEvalWithReuse(self):
FILE: model_zoo/models/inception/inception/slim/losses.py
function l1_regularizer (line 37) | def l1_regularizer(weight=1.0, scope=None):
function l2_regularizer (line 56) | def l2_regularizer(weight=1.0, scope=None):
function l1_l2_regularizer (line 75) | def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
function l1_loss (line 102) | def l1_loss(tensor, weight=1.0, scope=None):
function l2_loss (line 122) | def l2_loss(tensor, weight=1.0, scope=None):
function cross_entropy_loss (line 142) | def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0,
FILE: model_zoo/models/inception/inception/slim/losses_test.py
class LossesTest (line 26) | class LossesTest(tf.test.TestCase):
method testL1Loss (line 28) | def testL1Loss(self):
method testL2Loss (line 38) | def testL2Loss(self):
class RegularizersTest (line 49) | class RegularizersTest(tf.test.TestCase):
method testL1Regularizer (line 51) | def testL1Regularizer(self):
method testL1RegularizerWithScope (line 60) | def testL1RegularizerWithScope(self):
method testL1RegularizerWithWeight (line 69) | def testL1RegularizerWithWeight(self):
method testL2Regularizer (line 79) | def testL2Regularizer(self):
method testL2RegularizerWithScope (line 88) | def testL2RegularizerWithScope(self):
method testL2RegularizerWithWeight (line 97) | def testL2RegularizerWithWeight(self):
method testL1L2Regularizer (line 107) | def testL1L2Regularizer(self):
method testL1L2RegularizerWithScope (line 116) | def testL1L2RegularizerWithScope(self):
method testL1L2RegularizerWithWeights (line 125) | def testL1L2RegularizerWithWeights(self):
class CrossEntropyLossTest (line 138) | class CrossEntropyLossTest(tf.test.TestCase):
method testCrossEntropyLossAllCorrect (line 140) | def testCrossEntropyLossAllCorrect(self):
method testCrossEntropyLossAllWrong (line 152) | def testCrossEntropyLossAllWrong(self):
method testCrossEntropyLossAllWrongWithWeight (line 164) | def testCrossEntropyLossAllWrongWithWeight(self):
FILE: model_zoo/models/inception/inception/slim/ops.py
function batch_norm (line 43) | def batch_norm(inputs,
function _two_element_tuple (line 135) | def _two_element_tuple(int_or_tuple):
function conv2d (line 167) | def conv2d(inputs,
function fc (line 250) | def fc(inputs,
function one_hot_encoding (line 320) | def one_hot_encoding(labels, num_classes, scope=None):
function max_pool (line 342) | def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
function avg_pool (line 374) | def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
function dropout (line 404) | def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
function flatten (line 424) | def flatten(inputs, scope=None):
function repeat_op (line 446) | def repeat_op(repetitions, inputs, op, *args, **kwargs):
FILE: model_zoo/models/inception/inception/slim/ops_test.py
class ConvTest (line 31) | class ConvTest(tf.test.TestCase):
method testCreateConv (line 33) | def testCreateConv(self):
method testCreateSquareConv (line 41) | def testCreateSquareConv(self):
method testCreateConvWithTensorShape (line 49) | def testCreateConvWithTensorShape(self):
method testCreateFullyConv (line 57) | def testCreateFullyConv(self):
method testCreateVerticalConv (line 65) | def testCreateVerticalConv(self):
method testCreateHorizontalConv (line 74) | def testCreateHorizontalConv(self):
method testCreateConvWithStride (line 83) | def testCreateConvWithStride(self):
method testCreateConvCreatesWeightsAndBiasesVars (line 92) | def testCreateConvCreatesWeightsAndBiasesVars(self):
method testCreateConvWithScope (line 102) | def testCreateConvWithScope(self):
method testCreateConvWithoutActivation (line 109) | def testCreateConvWithoutActivation(self):
method testCreateConvValid (line 116) | def testCreateConvValid(self):
method testCreateConvWithWD (line 123) | def testCreateConvWithWD(self):
method testCreateConvWithoutWD (line 134) | def testCreateConvWithoutWD(self):
method testReuseVars (line 142) | def testReuseVars(self):
method testNonReuseVars (line 151) | def testNonReuseVars(self):
method testReuseConvWithWD (line 160) | def testReuseConvWithWD(self):
method testConvWithBatchNorm (line 174) | def testConvWithBatchNorm(self):
method testReuseConvWithBatchNorm (line 185) | def testReuseConvWithBatchNorm(self):
class FCTest (line 197) | class FCTest(tf.test.TestCase):
method testCreateFC (line 199) | def testCreateFC(self):
method testCreateFCWithScope (line 207) | def testCreateFCWithScope(self):
method testCreateFcCreatesWeightsAndBiasesVars (line 214) | def testCreateFcCreatesWeightsAndBiasesVars(self):
method testReuseVars (line 224) | def testReuseVars(self):
method testNonReuseVars (line 233) | def testNonReuseVars(self):
method testCreateFCWithoutActivation (line 242) | def testCreateFCWithoutActivation(self):
method testCreateFCWithWD (line 249) | def testCreateFCWithWD(self):
method testCreateFCWithoutWD (line 260) | def testCreateFCWithoutWD(self):
method testReuseFCWithWD (line 268) | def testReuseFCWithWD(self):
method testFCWithBatchNorm (line 281) | def testFCWithBatchNorm(self):
method testReuseFCWithBatchNorm (line 292) | def testReuseFCWithBatchNorm(self):
class MaxPoolTest (line 303) | class MaxPoolTest(tf.test.TestCase):
method testCreateMaxPool (line 305) | def testCreateMaxPool(self):
method testCreateSquareMaxPool (line 313) | def testCreateSquareMaxPool(self):
method testCreateMaxPoolWithScope (line 321) | def testCreateMaxPoolWithScope(self):
method testCreateMaxPoolSAME (line 328) | def testCreateMaxPoolSAME(self):
method testCreateMaxPoolStrideSAME (line 335) | def testCreateMaxPoolStrideSAME(self):
method testGlobalMaxPool (line 342) | def testGlobalMaxPool(self):
class AvgPoolTest (line 350) | class AvgPoolTest(tf.test.TestCase):
method testCreateAvgPool (line 352) | def testCreateAvgPool(self):
method testCreateSquareAvgPool (line 360) | def testCreateSquareAvgPool(self):
method testCreateAvgPoolWithScope (line 368) | def testCreateAvgPoolWithScope(self):
method testCreateAvgPoolSAME (line 375) | def testCreateAvgPoolSAME(self):
method testCreateAvgPoolStrideSAME (line 382) | def testCreateAvgPoolStrideSAME(self):
method testGlobalAvgPool (line 389) | def testGlobalAvgPool(self):
class OneHotEncodingTest (line 397) | class OneHotEncodingTest(tf.test.TestCase):
method testOneHotEncodingCreate (line 399) | def testOneHotEncodingCreate(self):
method testOneHotEncoding (line 406) | def testOneHotEncoding(self):
class DropoutTest (line 416) | class DropoutTest(tf.test.TestCase):
method testCreateDropout (line 418) | def testCreateDropout(self):
method testCreateDropoutNoTraining (line 426) | def testCreateDropoutNoTraining(self):
class FlattenTest (line 434) | class FlattenTest(tf.test.TestCase):
method testFlatten4D (line 436) | def testFlatten4D(self):
method testFlatten3D (line 445) | def testFlatten3D(self):
method testFlattenBatchSize (line 454) | def testFlattenBatchSize(self):
class BatchNormTest (line 468) | class BatchNormTest(tf.test.TestCase):
method testCreateOp (line 470) | def testCreateOp(self):
method testCreateVariables (line 478) | def testCreateVariables(self):
method testCreateVariablesWithScale (line 492) | def testCreateVariablesWithScale(self):
method testCreateVariablesWithoutCenterWithScale (line 506) | def testCreateVariablesWithoutCenterWithScale(self):
method testCreateVariablesWithoutCenterWithoutScale (line 520) | def testCreateVariablesWithoutCenterWithoutScale(self):
method testMovingAverageVariables (line 534) | def testMovingAverageVariables(self):
method testUpdateOps (line 544) | def testUpdateOps(self):
method testReuseVariables (line 557) | def testReuseVariables(self):
method testReuseUpdateOps (line 570) | def testReuseUpdateOps(self):
method testCreateMovingVars (line 579) | def testCreateMovingVars(self):
method testComputeMovingVars (line 593) | def testComputeMovingVars(self):
method testEvalMovingVars (line 623) | def testEvalMovingVars(self):
method testReuseVars (line 657) | def testReuseVars(self):
FILE: model_zoo/models/inception/inception/slim/scopes.py
function _get_arg_stack (line 63) | def _get_arg_stack():
function _current_arg_scope (line 73) | def _current_arg_scope():
function _add_op (line 78) | def _add_op(op):
function arg_scope (line 85) | def arg_scope(list_ops_or_scope, **kwargs):
function add_arg_scope (line 138) | def add_arg_scope(func):
function has_arg_scope (line 160) | def has_arg_scope(func):
FILE: model_zoo/models/inception/inception/slim/scopes_test.py
function func1 (line 26) | def func1(*args, **kwargs):
function func2 (line 31) | def func2(*args, **kwargs):
class ArgScopeTest (line 35) | class ArgScopeTest(tf.test.TestCase):
method testEmptyArgScope (line 37) | def testEmptyArgScope(self):
method testCurrentArgScope (line 41) | def testCurrentArgScope(self):
method testCurrentArgScopeNested (line 49) | def testCurrentArgScopeNested(self):
method testReuseArgScope (line 60) | def testReuseArgScope(self):
method testReuseArgScopeNested (line 70) | def testReuseArgScopeNested(self):
method testSimpleArgScope (line 86) | def testSimpleArgScope(self):
method testSimpleArgScopeWithTuple (line 95) | def testSimpleArgScopeWithTuple(self):
method testOverwriteArgScope (line 104) | def testOverwriteArgScope(self):
method testNestedArgScope (line 112) | def testNestedArgScope(self):
method testSharedArgScope (line 125) | def testSharedArgScope(self):
method testSharedArgScopeTuple (line 136) | def testSharedArgScopeTuple(self):
method testPartiallySharedArgScope (line 147) | def testPartiallySharedArgScope(self):
FILE: model_zoo/models/inception/inception/slim/variables.py
function add_variable (line 96) | def add_variable(var, restore=True):
function get_variables (line 114) | def get_variables(scope=None, suffix=None):
function get_variables_to_restore (line 130) | def get_variables_to_restore():
function get_variables_by_name (line 139) | def get_variables_by_name(given_name, scope=None):
function get_unique_variable (line 152) | def get_unique_variable(name):
class VariableDeviceChooser (line 174) | class VariableDeviceChooser(object):
method __init__ (line 181) | def __init__(self,
method __call__ (line 198) | def __call__(self, op):
function variable_device (line 209) | def variable_device(device, name):
function global_step (line 221) | def global_step(device=''):
function variable (line 248) | def variable(name, shape=None, dtype=tf.float32, initializer=None,
FILE: model_zoo/models/inception/inception/slim/variables_test.py
class VariablesTest (line 26) | class VariablesTest(tf.test.TestCase):
method testCreateVariable (line 28) | def testCreateVariable(self):
method testGetVariables (line 35) | def testGetVariables(self):
method testGetVariablesSuffix (line 45) | def testGetVariablesSuffix(self):
method testGetVariableWithSingleVar (line 54) | def testGetVariableWithSingleVar(self):
method testGetVariableWithDistractors (line 60) | def testGetVariableWithDistractors(self):
method testGetVariableThrowsExceptionWithNoMatch (line 69) | def testGetVariableThrowsExceptionWithNoMatch(self):
method testGetThrowsExceptionWithChildrenButNoMatch (line 75) | def testGetThrowsExceptionWithChildrenButNoMatch(self):
method testGetVariablesToRestore (line 84) | def testGetVariablesToRestore(self):
method testNoneGetVariablesToRestore (line 92) | def testNoneGetVariablesToRestore(self):
method testGetMixedVariablesToRestore (line 101) | def testGetMixedVariablesToRestore(self):
method testReuseVariable (line 112) | def testReuseVariable(self):
method testVariableWithDevice (line 121) | def testVariableWithDevice(self):
method testVariableWithDeviceFromScope (line 129) | def testVariableWithDeviceFromScope(self):
method testVariableWithDeviceFunction (line 137) | def testVariableWithDeviceFunction(self):
method testVariableWithReplicaDeviceSetter (line 167) | def testVariableWithReplicaDeviceSetter(self):
method testVariableWithVariableDeviceChooser (line 190) | def testVariableWithVariableDeviceChooser(self):
method testVariableGPUPlacement (line 215) | def testVariableGPUPlacement(self):
method testVariableCollection (line 240) | def testVariableCollection(self):
method testVariableCollections (line 247) | def testVariableCollections(self):
method testVariableCollectionsWithArgScope (line 254) | def testVariableCollectionsWithArgScope(self):
method testVariableCollectionsWithArgScopeNested (line 261) | def testVariableCollectionsWithArgScopeNested(self):
method testVariableCollectionsWithArgScopeNonNested (line 270) | def testVariableCollectionsWithArgScopeNonNested(self):
method testVariableRestoreWithArgScopeNested (line 280) | def testVariableRestoreWithArgScopeNested(self):
class GetVariablesByNameTest (line 295) | class GetVariablesByNameTest(tf.test.TestCase):
method testGetVariableGivenNameScoped (line 297) | def testGetVariableGivenNameScoped(self):
method testGetVariablesByNameReturnsByValueWithScope (line 305) | def testGetVariablesByNameReturnsByValueWithScope(self):
method testGetVariablesByNameReturnsByValueWithoutScope (line 319) | def testGetVariablesByNameReturnsByValueWithoutScope(self):
class GlobalStepTest (line 333) | class GlobalStepTest(tf.test.TestCase):
method testStable (line 335) | def testStable(self):
method testDevice (line 341) | def testDevice(self):
method testDeviceFn (line 347) | def testDeviceFn(self):
method testReplicaDeviceSetter (line 365) | def testReplicaDeviceSetter(self):
method testVariableWithVariableDeviceChooser (line 377) | def testVariableWithVariableDeviceChooser(self):
FILE: model_zoo/models/lm_1b/data_utils.py
class Vocabulary (line 24) | class Vocabulary(object):
method __init__ (line 27) | def __init__(self, filename):
method bos (line 58) | def bos(self):
method eos (line 62) | def eos(self):
method unk (line 66) | def unk(self):
method size (line 70) | def size(self):
method word_to_id (line 73) | def word_to_id(self, word):
method id_to_word (line 78) | def id_to_word(self, cur_id):
method decode (line 83) | def decode(self, cur_ids):
method encode (line 87) | def encode(self, sentence):
class CharsVocabulary (line 93) | class CharsVocabulary(Vocabulary):
method __init__ (line 96) | def __init__(self, filename, max_word_length):
method word_char_ids (line 134) | def word_char_ids(self):
method max_word_length (line 138) | def max_word_length(self):
method _convert_word_to_char_ids (line 141) | def _convert_word_to_char_ids(self, word):
method word_to_char_ids (line 152) | def word_to_char_ids(self, word):
method encode_chars (line 158) | def encode_chars(self, sentence):
function get_batch (line 164) | def get_batch(generator, batch_size, num_steps, max_word_length, pad=Fal...
class LM1BDataset (line 217) | class LM1BDataset(object):
method __init__ (line 223) | def __init__(self, filepattern, vocab):
method _load_random_shard (line 234) | def _load_random_shard(self):
method _load_shard (line 238) | def _load_shard(self, shard_name):
method _get_sentence (line 265) | def _get_sentence(self, forever=True):
method get_batch (line 273) | def get_batch(self, batch_size, num_steps, pad=False, forever=True):
method vocab (line 278) | def vocab(self):
FILE: model_zoo/models/lm_1b/lm_1b_eval.py
function _LoadModel (line 73) | def _LoadModel(gd_file, ckpt_file):
function _EvalModel (line 120) | def _EvalModel(dataset):
function _SampleSoftmax (line 158) | def _SampleSoftmax(softmax):
function _SampleModel (line 162) | def _SampleModel(prefix_words, vocab):
function _DumpEmb (line 213) | def _DumpEmb(vocab):
function _DumpSentenceEmbedding (line 250) | def _DumpSentenceEmbedding(sentence, vocab):
function main (line 290) | def main(unused_argv):
FILE: model_zoo/models/namignizer/data_utils.py
function read_names (line 31) | def read_names(names_path):
function _letter_to_number (line 52) | def _letter_to_number(letter):
function namignizer_iterator (line 58) | def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size):
function name_to_batch (line 97) | def name_to_batch(name, batch_size, num_steps):
FILE: model_zoo/models/namignizer/model.py
class NamignizerModel (line 23) | class NamignizerModel(object):
method __init__ (line 26) | def __init__(self, is_training, config):
method assign_lr (line 92) | def assign_lr(self, session, lr_value):
method input_data (line 96) | def input_data(self):
method targets (line 100) | def targets(self):
method activations (line 104) | def activations(self):
method weights (line 108) | def weights(self):
method initial_state (line 112) | def initial_state(self):
method cost (line 116) | def cost(self):
method loss (line 120) | def loss(self):
method final_state (line 124) | def final_state(self):
method lr (line 128) | def lr(self):
method train_op (line 132) | def train_op(self):
FILE: model_zoo/models/namignizer/names.py
class SmallConfig (line 50) | class SmallConfig(object):
class LargeConfig (line 67) | class LargeConfig(object):
class TestConfig (line 84) | class TestConfig(object):
function run_epoch (line 101) | def run_epoch(session, m, names, counts, epoch_size, eval_op, verbose=Fa...
function train (line 141) | def train(data_dir, checkpoint_path, config):
function namignize (line 178) | def namignize(names, checkpoint_path, config):
function namignator (line 212) | def namignator(checkpoint_path, config):
FILE: model_zoo/models/neural_gpu/data_utils.py
function pad (line 37) | def pad(l):
function add (line 53) | def add(n1, n2, base=10):
function init_data (line 73) | def init_data(task, length, nbr_cases, nclass):
function to_symbol (line 206) | def to_symbol(i):
function to_id (line 214) | def to_id(s):
function get_batch (line 221) | def get_batch(max_length, batch_size, do_train, task, offset=None, prese...
function print_out (line 255) | def print_out(s, newline=True):
function decode (line 268) | def decode(output):
function accuracy (line 272) | def accuracy(inpt, output, target, batch_size, nprint):
function safe_exp (line 312) | def safe_exp(x):
FILE: model_zoo/models/neural_gpu/neural_gpu.py
function conv_linear (line 24) | def conv_linear(args, kw, kh, nin, nout, do_bias, bias_start, prefix):
function sigmoid_cutoff (line 41) | def sigmoid_cutoff(x, cutoff):
function tanh_cutoff (line 49) | def tanh_cutoff(x, cutoff):
function conv_gru (line 57) | def conv_gru(inpts, mem, kw, kh, nmaps, cutoff, prefix):
function _custom_id_grad (line 70) | def _custom_id_grad(_, grads):
function quantize (line 74) | def quantize(t, quant_scale, max_value=1.0):
function quantize_weights_op (line 83) | def quantize_weights_op(quant_scale, max_value):
function relaxed_average (line 89) | def relaxed_average(var_name_suffix, rx_step):
function relaxed_distance (line 105) | def relaxed_distance(rx_step):
function make_dense (line 119) | def make_dense(targets, noclass):
function check_for_zero (line 130) | def check_for_zero(sparse):
class NeuralGPU (line 143) | class NeuralGPU(object):
method __init__ (line 146) | def __init__(self, nmaps, vec_size, niclass, noclass, dropout, rx_step,
method step (line 290) | def step(self, sess, inp, target, do_backward, noise_param=None,
FILE: model_zoo/models/neural_gpu/neural_gpu_trainer.py
function initialize (line 70) | def initialize(sess):
function single_test (line 154) | def single_test(l, model, sess, task, nprint, batch_size, print_out=True,
function multi_test (line 192) | def multi_test(l, model, sess, task, nprint, batch_size, offset=None,
function train (line 218) | def train():
function animate (line 338) | def animate(l, test_data, anim_size):
function evaluate (line 399) | def evaluate():
function interactive (line 429) | def interactive():
function main (line 450) | def main(_):
FILE: model_zoo/models/neural_programmer/data_utils.py
function return_index (line 24) | def return_index(a):
function construct_vocab (line 30) | def construct_vocab(data, utility, add_word=False):
function word_lookup (line 82) | def word_lookup(word, utility):
function convert_to_int_2d_and_pad (line 89) | def convert_to_int_2d_and_pad(a, utility):
function convert_to_bool_and_pad (line 106) | def convert_to_bool_and_pad(a, utility):
function partial_match (line 121) | def partial_match(question, table, number):
function exact_match (line 143) | def exact_match(question, table, number):
function partial_column_match (line 173) | def partial_column_match(question, table, number):
function exact_column_match (line 184) | def exact_column_match(question, table, number):
function get_max_entry (line 200) | def get_max_entry(a):
function list_join (line 218) | def list_join(a):
function group_by_max (line 225) | def group_by_max(table, number):
function pick_one (line 248) | def pick_one(a):
function check_processed_cols (line 255) | def check_processed_cols(col, utility):
function complete_wiki_processing (line 263) | def complete_wiki_processing(data, utility, train=True):
function add_special_words (line 533) | def add_special_words(utility):
function perform_word_cutoff (line 559) | def perform_word_cutoff(utility):
function word_dropout (line 570) | def word_dropout(question, utility):
function generate_feed_dict (line 584) | def generate_feed_dict(data, curr, batch_size, gr, train=False, utility=...
FILE: model_zoo/models/neural_programmer/model.py
class Graph (line 23) | class Graph():
method __init__ (line 25) | def __init__(self, utility, batch_size, max_passes, mode="train"):
method LSTM_question_embedding (line 95) | def LSTM_question_embedding(self, sentence, sentence_length):
method history_recurrent_step (line 127) | def history_recurrent_step(self, curr_hprev, hprev):
method question_number_softmax (line 134) | def question_number_softmax(self, hidden_vectors):
method perform_attention (line 187) | def perform_attention(self, context_vector, hidden_vectors, length, ma...
method get_column_hidden_vectors (line 201) | def get_column_hidden_vectors(self):
method create_summary_embeddings (line 210) | def create_summary_embeddings(self):
method compute_column_softmax (line 220) | def compute_column_softmax(self, column_controller_vector, time_step):
method compute_first_or_last (line 239) | def compute_first_or_last(self, select, first=True):
method make_hard_softmax (line 265) | def make_hard_softmax(self, softmax):
method compute_max_or_min (line 274) | def compute_max_or_min(self, select, maxi=True):
method perform_operations (line 304) | def perform_operations(self, softmax, full_column_softmax, select,
method one_pass (line 391) | def one_pass(self, select, question_embedding, hidden_vectors, hprev,
method compute_lookup_error (line 429) | def compute_lookup_error(self, val):
method soft_min (line 445) | def soft_min(self, x, y):
method error_computation (line 451) | def error_computation(self):
method batch_process (line 490) | def batch_process(self):
method compute_error (line 578) | def compute_error(self):
method create_graph (line 631) | def create_graph(self, params, global_step):
FILE: model_zoo/models/neural_programmer/neural_programmer.py
class Utility (line 81) | class Utility:
method __init__ (line 83) | def __init__(self):
function evaluate (line 106) | def evaluate(sess, data, batch_size, graph, i):
function Train (line 121) | def Train(graph, utility, batch_size, train_data, sess, model_dir,
function master (line 151) | def master(train_data, dev_data, utility):
function main (line 204) | def main(args):
FILE: model_zoo/models/neural_programmer/nn_utils.py
function get_embedding (line 20) | def get_embedding(word, utility, params):
function apply_dropout (line 24) | def apply_dropout(x, dropout_rate, mode):
function LSTMCell (line 33) | def LSTMCell(x, mprev, cprev, key, params):
FILE: model_zoo/models/neural_programmer/parameters.py
class Parameters (line 22) | class Parameters:
method __init__ (line 24) | def __init__(self, u):
method parameters (line 29) | def parameters(self, utility):
method RandomUniformInit (line 78) | def RandomUniformInit(self, shape):
FILE: model_zoo/models/neural_programmer/wiki_data.py
function is_nan_or_inf (line 34) | def is_nan_or_inf(number):
function strip_accents (line 37) | def strip_accents(s):
function correct_unicode (line 43) | def correct_unicode(string):
function simple_normalize (line 64) | def simple_normalize(string):
function full_normalize (line 75) | def full_normalize(string):
function final_normalize (line 103) | def final_normalize(string):
function is_number (line 118) | def is_number(x):
class WikiExample (line 128) | class WikiExample(object):
method __init__ (line 130) | def __init__(self, id, question, answer, table_key):
class TableInfo (line 144) | class TableInfo(object):
method __init__ (line 146) | def __init__(self, word_columns, word_column_names, word_column_indices,
class WikiQuestionLoader (line 160) | class WikiQuestionLoader(object):
method __init__ (line 162) | def __init__(self, data_name, root_folder):
method num_questions (line 168) | def num_questions(self):
method load_qa (line 171) | def load_qa(self):
method load (line 180) | def load(self):
function is_date (line 184) | def is_date(word):
class WikiQuestionGenerator (line 200) | class WikiQuestionGenerator(object):
method __init__ (line 202) | def __init__(self, train_name, dev_name, test_name, root_folder):
method is_money (line 219) | def is_money(self, word):
method remove_consecutive (line 228) | def remove_consecutive(self, ner_tags, ner_values):
method pre_process_sentence (line 245) | def pre_process_sentence(self, tokens, ner_tags, ner_values):
method load_annotated_data (line 285) | def load_annotated_data(self, in_file):
method is_number_column (line 304) | def is_number_column(self, a):
method convert_table (line 312) | def convert_table(self, table):
method load_annotated_tables (line 321) | def load_annotated_tables(self):
method answer_classification (line 410) | def answer_classification(self):
method load (line 504) | def load(self):
FILE: model_zoo/models/resnet/cifar_input.py
function build_input (line 22) | def build_input(dataset, data_path, batch_size, mode):
FILE: model_zoo/models/resnet/resnet_main.py
function train (line 47) | def train(hps):
function evaluate (line 99) | def evaluate(hps):
function main (line 156) | def main(_):
FILE: model_zoo/models/resnet/resnet_model.py
class ResNet (line 37) | class ResNet(object):
method __init__ (line 40) | def __init__(self, hps, images, labels, mode):
method build_graph (line 56) | def build_graph(self):
method _stride_arr (line 64) | def _stride_arr(self, stride):
method _build_model (line 68) | def _build_model(self):
method _build_train_op (line 127) | def _build_train_op(self):
method _batch_norm (line 148) | def _batch_norm(self, name, x):
method _residual (line 193) | def _residual(self, x, in_filter, out_filter, stride,
method _bottleneck_residual (line 226) | def _bottleneck_residual(self, x, in_filter, out_filter, stride,
method _decay (line 261) | def _decay(self):
method _conv (line 271) | def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
method _relu (line 281) | def _relu(self, x, leakiness=0.0):
method _fully_connected (line 285) | def _fully_connected(self, x, out_dim):
method _global_avg_pool (line 295) | def _global_avg_pool(self, x):
FILE: model_zoo/models/slim/datasets/cifar10.py
function get_split (line 44) | def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
FILE: model_zoo/models/slim/datasets/dataset_factory.py
function get_dataset (line 34) | def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader...
FILE: model_zoo/models/slim/datasets/dataset_utils.py
function int64_feature (line 30) | def int64_feature(values):
function bytes_feature (line 44) | def bytes_feature(values):
function image_to_tfexample (line 56) | def image_to_tfexample(image_data, image_format, height, width, class_id):
function download_and_uncompress_tarball (line 66) | def download_and_uncompress_tarball(tarball_url, dataset_dir):
function write_label_file (line 87) | def write_label_file(labels_to_class_names, dataset_dir,
function has_labels (line 103) | def has_labels(dataset_dir, filename=LABELS_FILENAME):
function read_label_file (line 116) | def read_label_file(dataset_dir, filename=LABELS_FILENAME):
FILE: model_zoo/models/slim/datasets/download_and_convert_cifar10.py
function _add_to_tfrecord (line 64) | def _add_to_tfrecord(filename, tfrecord_writer, offset=0):
function _get_output_filename (line 108) | def _get_output_filename(dataset_dir, split_name):
function _download_and_uncompress_dataset (line 121) | def _download_and_uncompress_dataset(dataset_dir):
function _clean_up_temporary_files (line 142) | def _clean_up_temporary_files(dataset_dir):
function run (line 156) | def run(dataset_dir):
FILE: model_zoo/models/slim/datasets/download_and_convert_flowers.py
class ImageReader (line 52) | class ImageReader(object):
method __init__ (line 55) | def __init__(self):
method read_image_dims (line 60) | def read_image_dims(self, sess, image_data):
method decode_jpeg (line 64) | def decode_jpeg(self, sess, image_data):
function _get_filenames_and_classes (line 72) | def _get_filenames_and_classes(dataset_dir):
function _get_dataset_filename (line 101) | def _get_dataset_filename(dataset_dir, split_name, shard_id):
function _convert_dataset (line 107) | def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_...
function _clean_up_temporary_files (line 153) | def _clean_up_temporary_files(dataset_dir):
function _dataset_exists (line 167) | def _dataset_exists(dataset_dir):
function run (line 177) | def run(dataset_dir):
FILE: model_zoo/models/slim/datasets/download_and_convert_mnist.py
function _extract_images (line 64) | def _extract_images(filename, num_images):
function _extract_labels (line 84) | def _extract_labels(filename, num_labels):
function _add_to_tfrecord (line 102) | def _add_to_tfrecord(data_filename, labels_filename, num_images,
function _get_output_filename (line 132) | def _get_output_filename(dataset_dir, split_name):
function _download_dataset (line 145) | def _download_dataset(dataset_dir):
function _clean_up_temporary_files (line 172) | def _clean_up_temporary_files(dataset_dir):
function run (line 186) | def run(dataset_dir):
FILE: model_zoo/models/slim/datasets/flowers.py
function get_split (line 44) | def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
FILE: model_zoo/models/slim/datasets/imagenet.py
function create_readable_names_for_imagenet_labels (line 62) | def create_readable_names_for_imagenet_labels():
function get_split (line 118) | def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
FILE: model_zoo/models/slim/datasets/mnist.py
function get_split (line 44) | def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
FILE: model_zoo/models/slim/deployment/model_deploy.py
function create_clones (line 145) | def create_clones(config, model_fn, args=None, kwargs=None):
function _gather_clone_loss (line 200) | def _gather_clone_loss(clone, num_clones, regularization_losses):
function _optimize_clone (line 243) | def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
function optimize_clones (line 269) | def optimize_clones(clones, optimizer,
function deploy (line 314) | def deploy(config,
function _sum_clones_gradients (line 419) | def _sum_clones_gradients(clone_grads):
function _add_gradients_summaries (line 452) | def _add_gradients_summaries(grads_and_vars):
class DeploymentConfig (line 479) | class DeploymentConfig(object):
method __init__ (line 487) | def __init__(self,
method num_clones (line 540) | def num_clones(self):
method clone_on_cpu (line 544) | def clone_on_cpu(self):
method replica_id (line 548) | def replica_id(self):
method num_replicas (line 552) | def num_replicas(self):
method num_ps_tasks (line 556) | def num_ps_tasks(self):
method ps_device (line 560) | def ps_device(self):
method worker_device (line 564) | def worker_device(self):
method caching_device (line 567) | def caching_device(self):
method clone_device (line 580) | def clone_device(self, clone_index):
method clone_scope (line 604) | def clone_scope(self, clone_index):
method optimizer_device (line 623) | def optimizer_device(self):
method inputs_device (line 634) | def inputs_device(self):
method variables_device (line 646) | def variables_device(self):
FILE: model_zoo/models/slim/deployment/model_deploy_test.py
class DeploymentConfigTest (line 29) | class DeploymentConfigTest(tf.test.TestCase):
method testDefaults (line 31) | def testDefaults(self):
method testCPUonly (line 42) | def testCPUonly(self):
method testMultiGPU (line 52) | def testMultiGPU(self):
method testPS (line 64) | def testPS(self):
method testMultiGPUPS (line 88) | def testMultiGPUPS(self):
method testReplicasPS (line 103) | def testReplicasPS(self):
method testReplicasMultiGPUPS (line 115) | def testReplicasMultiGPUPS(self):
method testVariablesPS (line 130) | def testVariablesPS(self):
function LogisticClassifier (line 149) | def LogisticClassifier(inputs, labels, scope=None, reuse=None):
function BatchNormClassifier (line 158) | def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
class CreatecloneTest (line 169) | class CreatecloneTest(tf.test.TestCase):
method setUp (line 171) | def setUp(self):
method testCreateLogisticClassifier (line 183) | def testCreateLogisticClassifier(self):
method testCreateSingleclone (line 209) | def testCreateSingleclone(self):
method testCreateMulticlone (line 235) | def testCreateMulticlone(self):
method testCreateOnecloneWithPS (line 263) | def testCreateOnecloneWithPS(self):
method testCreateMulticloneWithPS (line 288) | def testCreateMulticloneWithPS(self):
class OptimizeclonesTest (line 316) | class OptimizeclonesTest(tf.test.TestCase):
method setUp (line 318) | def setUp(self):
method testCreateLogisticClassifier (line 330) | def testCreateLogisticClassifier(self):
method testCreateSingleclone (line 356) | def testCreateSingleclone(self):
method testCreateMulticlone (line 382) | def testCreateMulticlone(self):
method testCreateMulticloneCPU (line 409) | def testCreateMulticloneCPU(self):
method testCreateOnecloneWithPS (line 437) | def testCreateOnecloneWithPS(self):
class DeployTest (line 465) | class DeployTest(tf.test.TestCase):
method setUp (line 467) | def setUp(self):
method testLocalTrainOp (line 479) | def testLocalTrainOp(self):
method testNoSummariesOnGPU (line 526) | def testNoSummariesOnGPU(self):
method testNoSummariesOnGPUForEvals (line 545) | def testNoSummariesOnGPUForEvals(self):
FILE: model_zoo/models/slim/download_and_convert_data.py
function main (line 56) | def main(_):
FILE: model_zoo/models/slim/eval_image_classifier.py
function main (line 85) | def main(_):
FILE: model_zoo/models/slim/nets/alexnet.py
function alexnet_v2_arg_scope (line 45) | def alexnet_v2_arg_scope(weight_decay=0.0005):
function alexnet_v2 (line 55) | def alexnet_v2(inputs,
FILE: model_zoo/models/slim/nets/alexnet_test.py
class AlexnetV2Test (line 27) | class AlexnetV2Test(tf.test.TestCase):
method testBuild (line 29) | def testBuild(self):
method testFullyConvolutional (line 40) | def testFullyConvolutional(self):
method testEndPoints (line 51) | def testEndPoints(self):
method testModelVariables (line 72) | def testModelVariables(self):
method testEvaluation (line 99) | def testEvaluation(self):
method testTrainEvalWithReuse (line 111) | def testTrainEvalWithReuse(self):
method testForward (line 134) | def testForward(self):
FILE: model_zoo/models/slim/nets/cifarnet.py
function cifarnet (line 28) | def cifarnet(images, num_classes=10, is_training=False,
function cifarnet_arg_scope (line 93) | def cifarnet_arg_scope(weight_decay=0.004):
FILE: model_zoo/models/slim/nets/inception_resnet_v2.py
function block35 (line 33) | def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=...
function block17 (line 54) | def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=...
function block8 (line 74) | def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=N...
function inception_resnet_v2 (line 94) | def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
function inception_resnet_v2_arg_scope (line 254) | def inception_resnet_v2_arg_scope(weight_decay=0.00004,
FILE: model_zoo/models/slim/nets/inception_resnet_v2_test.py
class InceptionTest (line 25) | class InceptionTest(tf.test.TestCase):
method testBuildLogits (line 27) | def testBuildLogits(self):
method testBuildEndPoints (line 38) | def testBuildEndPoints(self):
method testVariablesSetDevice (line 57) | def testVariablesSetDevice(self):
method testHalfSizeImages (line 73) | def testHalfSizeImages(self):
method testUnknownBatchSize (line 87) | def testUnknownBatchSize(self):
method testEvaluation (line 102) | def testEvaluation(self):
method testTrainEvalWithReuse (line 116) | def testTrainEvalWithReuse(self):
FILE: model_zoo/models/slim/nets/inception_utils.py
function inception_arg_scope (line 32) | def inception_arg_scope(weight_decay=0.00004,
FILE: model_zoo/models/slim/nets/inception_v1.py
function inception_v1_base (line 29) | def inception_v1_base(inputs,
function inception_v1 (line 248) | def inception_v1(inputs,
FILE: model_zoo/models/slim/nets/inception_v1_test.py
class InceptionV1Test (line 29) | class InceptionV1Test(tf.test.TestCase):
method testBuildClassificationNetwork (line 31) | def testBuildClassificationNetwork(self):
method testBuildBaseNetwork (line 45) | def testBuildBaseNetwork(self):
method testBuildOnlyUptoFinalEndpoint (line 61) | def testBuildOnlyUptoFinalEndpoint(self):
method testBuildAndCheckAllEndPointsUptoMixed5c (line 78) | def testBuildAndCheckAllEndPointsUptoMixed5c(self):
method testModelHasExpectedNumberOfParameters (line 109) | def testModelHasExpectedNumberOfParameters(self):
method testHalfSizeImages (line 119) | def testHalfSizeImages(self):
method testUnknownImageShape (line 129) | def testUnknownImageShape(self):
method testUnknowBatchSize (line 147) | def testUnknowBatchSize(self):
method testEvaluation (line 164) | def testEvaluation(self):
method testTrainEvalWithReuse (line 179) | def testTrainEvalWithReuse(self):
method testLogitsNotSqueezed (line 196) | def testLogitsNotSqueezed(self):
FILE: model_zoo/models/slim/nets/inception_v2.py
function inception_v2_base (line 29) | def inception_v2_base(inputs,
function inception_v2 (line 416) | def inception_v2(inputs,
function _reduced_kernel_size_for_small_input (line 489) | def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
FILE: model_zoo/models/slim/nets/inception_v2_test.py
class InceptionV2Test (line 29) | class InceptionV2Test(tf.test.TestCase):
method testBuildClassificationNetwork (line 31) | def testBuildClassificationNetwork(self):
method testBuildBaseNetwork (line 45) | def testBuildBaseNetwork(self):
method testBuildOnlyUptoFinalEndpoint (line 61) | def testBuildOnlyUptoFinalEndpoint(self):
method testBuildAndCheckAllEndPointsUptoMixed5c (line 77) | def testBuildAndCheckAllEndPointsUptoMixed5c(self):
method testModelHasExpectedNumberOfParameters (line 106) | def testModelHasExpectedNumberOfParameters(self):
method testBuildEndPointsWithDepthMultiplierLessThanOne (line 116) | def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
method testBuildEndPointsWithDepthMultiplierGreaterThanOne (line 136) | def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
method testRaiseValueErrorWithInvalidDepthMultiplier (line 156) | def testRaiseValueErrorWithInvalidDepthMultiplier(self):
method testHalfSizeImages (line 167) | def testHalfSizeImages(self):
method testUnknownImageShape (line 181) | def testUnknownImageShape(self):
method testUnknowBatchSize (line 199) | def testUnknowBatchSize(self):
method testEvaluation (line 216) | def testEvaluation(self):
method testTrainEvalWithReuse (line 231) | def testTrainEvalWithReuse(self):
method testLogitsNotSqueezed (line 248) | def testLogitsNotSqueezed(self):
FILE: model_zoo/models/slim/nets/inception_v3.py
function inception_v3_base (line 29) | def inception_v3_base(inputs,
function inception_v3 (line 419) | def inception_v3(inputs,
function _reduced_kernel_size_for_small_input (line 529) | def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
FILE: model_zoo/models/slim/nets/inception_v3_test.py
class InceptionV3Test (line 29) | class InceptionV3Test(tf.test.TestCase):
method testBuildClassificationNetwork (line 31) | def testBuildClassificationNetwork(self):
method testBuildBaseNetwork (line 45) | def testBuildBaseNetwork(self):
method testBuildOnlyUptoFinalEndpoint (line 62) | def testBuildOnlyUptoFinalEndpoint(self):
method testBuildAndCheckAllEndPointsUptoMixed7c (line 80) | def testBuildAndCheckAllEndPointsUptoMixed7c(self):
method testModelHasExpectedNumberOfParameters (line 112) | def testModelHasExpectedNumberOfParameters(self):
method testBuildEndPoints (line 122) | def testBuildEndPoints(self):
method testBuildEndPointsWithDepthMultiplierLessThanOne (line 146) | def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
method testBuildEndPointsWithDepthMultiplierGreaterThanOne (line 166) | def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
method testRaiseValueErrorWithInvalidDepthMultiplier (line 186) | def testRaiseValueErrorWithInvalidDepthMultiplier(self):
method testHalfSizeImages (line 197) | def testHalfSizeImages(self):
method testUnknownImageShape (line 211) | def testUnknownImageShape(self):
method testUnknowBatchSize (line 228) | def testUnknowBatchSize(self):
method testEvaluation (line 245) | def testEvaluation(self):
method testTrainEvalWithReuse (line 260) | def testTrainEvalWithReuse(self):
method testLogitsNotSqueezed (line 278) | def testLogitsNotSqueezed(self):
FILE: model_zoo/models/slim/nets/inception_v4.py
function block_inception_a (line 34) | def block_inception_a(inputs, scope=None, reuse=None):
function block_reduction_a (line 55) | def block_reduction_a(inputs, scope=None, reuse=None):
function block_inception_b (line 75) | def block_inception_b(inputs, scope=None, reuse=None):
function block_reduction_b (line 99) | def block_reduction_b(inputs, scope=None, reuse=None):
function block_inception_c (line 121) | def block_inception_c(inputs, scope=None, reuse=None):
function inception_v4_base (line 147) | def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
function inception_v4 (line 257) | def inception_v4(inputs, num_classes=1001, is_training=True,
FILE: model_zoo/models/slim/nets/inception_v4_test.py
class InceptionTest (line 25) | class InceptionTest(tf.test.TestCase):
method testBuildLogits (line 27) | def testBuildLogits(self):
method testBuildWithoutAuxLogits (line 46) | def testBuildWithoutAuxLogits(self):
method testAllEndPointsShapes (line 58) | def testAllEndPointsShapes(self):
method testBuildBaseNetwork (line 103) | def testBuildBaseNetwork(self):
method testBuildOnlyUpToFinalEndpoint (line 121) | def testBuildOnlyUpToFinalEndpoint(self):
method testVariablesSetDevice (line 139) | def testVariablesSetDevice(self):
method testHalfSizeImages (line 154) | def testHalfSizeImages(self):
method testUnknownBatchSize (line 167) | def testUnknownBatchSize(self):
method testEvaluation (line 182) | def testEvaluation(self):
method testTrainEvalWithReuse (line 196) | def testTrainEvalWithReuse(self):
FILE: model_zoo/models/slim/nets/lenet.py
function lenet (line 26) | def lenet(images, num_classes=10, is_training=False,
function lenet_arg_scope (line 79) | def lenet_arg_scope(weight_decay=0.0):
FILE: model_zoo/models/slim/nets/nets_factory.py
function get_network_fn (line 81) | def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
FILE: model_zoo/models/slim/nets/nets_factory_test.py
class NetworksTest (line 28) | class NetworksTest(tf.test.TestCase):
method testGetNetworkFn (line 30) | def testGetNetworkFn(self):
FILE: model_zoo/models/slim/nets/overfeat.py
function overfeat_arg_scope (line 40) | def overfeat_arg_scope(weight_decay=0.0005):
function overfeat (line 50) | def overfeat(inputs,
FILE: model_zoo/models/slim/nets/overfeat_test.py
class OverFeatTest (line 27) | class OverFeatTest(tf.test.TestCase):
method testBuild (line 29) | def testBuild(self):
method testFullyConvolutional (line 40) | def testFullyConvolutional(self):
method testEndPoints (line 51) | def testEndPoints(self):
method testModelVariables (line 72) | def testModelVariables(self):
method testEvaluation (line 99) | def testEvaluation(self):
method testTrainEvalWithReuse (line 111) | def testTrainEvalWithReuse(self):
method testForward (line 134) | def testForward(self):
FILE: model_zoo/models/slim/nets/resnet_utils.py
class Block (line 46) | class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
function subsample (line 59) | def subsample(inputs, factor, scope=None):
function conv2d_same (line 77) | def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=...
function stack_blocks_dense (line 126) | def stack_blocks_dense(net, blocks, output_stride=None,
function resnet_arg_scope (line 209) | def resnet_arg_scope(weight_decay=0.0001,
FILE: model_zoo/models/slim/nets/resnet_v1.py
function bottleneck (line 69) | def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
function resnet_v1 (line 115) | def resnet_v1(inputs,
function resnet_v1_50 (line 208) | def resnet_v1_50(inputs,
function resnet_v1_101 (line 231) | def resnet_v1_101(inputs,
function resnet_v1_152 (line 254) | def resnet_v1_152(inputs,
function resnet_v1_200 (line 276) | def resnet_v1_200(inputs,
FILE: model_zoo/models/slim/nets/resnet_v1_test.py
function create_test_input (line 30) | def create_test_input(batch_size, height, width, channels):
class ResnetUtilsTest (line 56) | class ResnetUtilsTest(tf.test.TestCase):
method testSubsampleThreeByThree (line 58) | def testSubsampleThreeByThree(self):
method testSubsampleFourByFour (line 65) | def testSubsampleFourByFour(self):
method testConv2DSameEven (line 72) | def testConv2DSameEven(self):
method testConv2DSameOdd (line 113) | def testConv2DSameOdd(self):
method _resnet_plain (line 154) | def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
method testEndPointsV1 (line 162) | def testEndPointsV1(self):
method _stack_blocks_nondense (line 187) | def _stack_blocks_nondense(self, net, blocks):
method _atrousValues (line 201) | def _atrousValues(self, bottleneck):
method testAtrousValuesBottleneck (line 247) | def testAtrousValuesBottleneck(self):
class ResnetCompleteNetworkTest (line 251) | class ResnetCompleteNetworkTest(tf.test.TestCase):
method _resnet_small (line 254) | def _resnet_small(self,
method testClassificationEndPoints (line 282) | def testClassificationEndPoints(self):
method testClassificationShapes (line 296) | def testClassificationShapes(self):
method testFullyConvolutionalEndpointShapes (line 313) | def testFullyConvolutionalEndpointShapes(self):
method testRootlessFullyConvolutionalEndpointShapes (line 330) | def testRootlessFullyConvolutionalEndpointShapes(self):
method testAtrousFullyConvolutionalEndpointShapes (line 348) | def testAtrousFullyConvolutionalEndpointShapes(self):
method testAtrousFullyConvolutionalValues (line 368) | def testAtrousFullyConvolutionalValues(self):
method testUnknownBatchSize (line 395) | def testUnknownBatchSize(self):
method testFullyConvolutionalUnknownHeightWidth (line 414) | def testFullyConvolutionalUnknownHeightWidth(self):
method testAtrousFullyConvolutionalUnknownHeightWidth (line 429) | def testAtrousFullyConvolutionalUnknownHeightWidth(self):
FILE: model_zoo/models/slim/nets/resnet_v2.py
function bottleneck (line 64) | def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
function resnet_v2 (line 113) | def resnet_v2(inputs,
function resnet_v2_50 (line 217) | def resnet_v2_50(inputs,
function resnet_v2_101 (line 239) | def resnet_v2_101(inputs,
function resnet_v2_152 (line 261) | def resnet_v2_152(inputs,
function resnet_v2_200 (line 283) | def resnet_v2_200(inputs,
FILE: model_zoo/models/slim/nets/resnet_v2_test.py
function create_test_input (line 30) | def create_test_input(batch_size, height, width, channels):
class ResnetUtilsTest (line 56) | class ResnetUtilsTest(tf.test.TestCase):
method testSubsampleThreeByThree (line 58) | def testSubsampleThreeByThree(self):
method testSubsampleFourByFour (line 65) | def testSubsampleFourByFour(self):
method testConv2DSameEven (line 72) | def testConv2DSameEven(self):
method testConv2DSameOdd (line 113) | def testConv2DSameOdd(self):
method _resnet_plain (line 154) | def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
method testEndPointsV2 (line 162) | def testEndPointsV2(self):
method _stack_blocks_nondense (line 187) | def _stack_blocks_nondense(self, net, blocks):
method _atrousValues (line 201) | def _atrousValues(self, bottleneck):
method testAtrousValuesBottleneck (line 247) | def testAtrousValuesBottleneck(self):
class ResnetCompleteNetworkTest (line 251) | class ResnetCompleteNetworkTest(tf.test.TestCase):
method _resnet_small (line 254) | def _resnet_small(self,
method testClassificationEndPoints (line 282) | def testClassificationEndPoints(self):
method testClassificationShapes (line 296) | def testClassificationShapes(self):
method testFullyConvolutionalEndpointShapes (line 313) | def testFullyConvolutionalEndpointShapes(self):
method testRootlessFullyConvolutionalEndpointShapes (line 330) | def testRootlessFullyConvolutionalEndpointShapes(self):
method testAtrousFullyConvolutionalEndpointShapes (line 348) | def testAtrousFullyConvolutionalEndpointShapes(self):
method testAtrousFullyConvolutionalValues (line 368) | def testAtrousFullyConvolutionalValues(self):
method testUnknownBatchSize (line 397) | def testUnknownBatchSize(self):
method testFullyConvolutionalUnknownHeightWidth (line 416) | def testFullyConvolutionalUnknownHeightWidth(self):
method testAtrousFullyConvolutionalUnknownHeightWidth (line 432) | def testAtrousFullyConvolutionalUnknownHeightWidth(self):
FILE: model_zoo/models/slim/nets/vgg.py
function vgg_arg_scope (line 49) | def vgg_arg_scope(weight_decay=0.0005):
function vgg_a (line 66) | def vgg_a(inputs,
function vgg_16 (line 125) | def vgg_16(inputs,
function vgg_19 (line 184) | def vgg_19(inputs,
FILE: model_zoo/models/slim/nets/vgg_test.py
class VGGATest (line 27) | class VGGATest(tf.test.TestCase):
method testBuild (line 29) | def testBuild(self):
method testFullyConvolutional (line 40) | def testFullyConvolutional(self):
method testEndPoints (line 51) | def testEndPoints(self):
method testModelVariables (line 77) | def testModelVariables(self):
method testEvaluation (line 110) | def testEvaluation(self):
method testTrainEvalWithReuse (line 122) | def testTrainEvalWithReuse(self):
method testForward (line 145) | def testForward(self):
class VGG16Test (line 156) | class VGG16Test(tf.test.TestCase):
method testBuild (line 158) | def testBuild(self):
method testFullyConvolutional (line 169) | def testFullyConvolutional(self):
method testEndPoints (line 180) | def testEndPoints(self):
method testModelVariables (line 211) | def testModelVariables(self):
method testEvaluation (line 254) | def testEvaluation(self):
method testTrainEvalWithReuse (line 266) | def testTrainEvalWithReuse(self):
method testForward (line 289) | def testForward(self):
class VGG19Test (line 300) | class VGG19Test(tf.test.TestCase):
method testBuild (line 302) | def testBuild(self):
method testFullyConvolutional (line 313) | def testFullyConvolutional(self):
method testEndPoints (line 324) | def testEndPoints(self):
method testModelVariables (line 359) | def testModelVariables(self):
method testEvaluation (line 409) | def testEvaluation(self):
method testTrainEvalWithReuse (line 421) | def testTrainEvalWithReuse(self):
method testForward (line 444) | def testForward(self):
FILE: model_zoo/models/slim/preprocessing/cifarnet_preprocessing.py
function preprocess_for_train (line 30) | def preprocess_for_train(image,
function preprocess_for_eval (line 73) | def preprocess_for_eval(image, output_height, output_width):
function preprocess_image (line 98) | def preprocess_image(image, output_height, output_width, is_training=Fal...
FILE: model_zoo/models/slim/preprocessing/inception_preprocessing.py
function apply_with_random_selector (line 26) | def apply_with_random_selector(x, func, num_cases):
function distort_color (line 45) | def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
function distorted_bounding_box_crop (line 99) | def distorted_bounding_box_crop(image,
function preprocess_for_train (line 156) | def preprocess_for_train(image, height, width, bbox,
function preprocess_for_eval (line 237) | def preprocess_for_eval(image, height, width,
function preprocess_image (line 278) | def preprocess_image(image, height, width,
FILE: model_zoo/models/slim/preprocessing/lenet_preprocessing.py
function preprocess_image (line 26) | def preprocess_image(image, output_height, output_width, is_training):
FILE: model_zoo/models/slim/preprocessing/preprocessing_factory.py
function get_preprocessing (line 31) | def get_preprocessing(name, is_training=False):
FILE: model_zoo/models/slim/preprocessing/vgg_preprocessing.py
function _crop (line 49) | def _crop(image, offset_height, offset_width, crop_height, crop_width):
function _random_crop (line 94) | def _random_crop(image_list, crop_height, crop_width):
function _central_crop (line 178) | def _central_crop(image_list, crop_height, crop_width):
function _mean_image_subtraction (line 203) | def _mean_image_subtraction(image, means):
function _smallest_size_at_least (line 236) | def _smallest_size_at_least(height, width, smallest_side):
function _aspect_preserving_resize (line 266) | def _aspect_preserving_resize(image, smallest_side):
function preprocess_for_train (line 291) | def preprocess_for_train(image,
function preprocess_for_eval (line 324) | def preprocess_for_eval(image, output_height, output_width, resize_side):
function preprocess_image (line 343) | def preprocess_image(image, output_height, output_width, is_training=False,
FILE: model_zoo/models/slim/train_image_classifier.py
function _configure_learning_rate (line 224) | def _configure_learning_rate(num_samples_per_epoch, global_step):
function _configure_optimizer (line 264) | def _configure_optimizer(learning_rate):
function _add_variables_summaries (line 316) | def _add_variables_summaries(learning_rate):
function _get_init_fn (line 324) | def _get_init_fn():
function _get_variables_to_train (line 373) | def _get_variables_to_train():
function main (line 391) | def main(_):
FILE: model_zoo/models/street/cc/rnn_ops.cc
type tensorflow (line 37) | namespace tensorflow {
function Status (line 43) | Status AreDimsEqual(int dim1, int dim2, const string& message) {
class VariableLSTMOp (line 54) | class VariableLSTMOp : public OpKernel {
method VariableLSTMOp (line 56) | explicit VariableLSTMOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
method Compute (line 63) | void Compute(OpKernelContext* ctx) override {
class VariableLSTMGradOp (line 271) | class VariableLSTMGradOp : public OpKernel {
method VariableLSTMGradOp (line 273) | explicit VariableLSTMGradOp(OpKernelConstruction* ctx) : OpKernel(ct...
method Compute (line 275) | void Compute(OpKernelContext* ctx) override {
FILE: model_zoo/models/street/python/decoder.py
class Decoder (line 38) | class Decoder(object):
method __init__ (line 41) | def __init__(self, filename):
method SoftmaxEval (line 62) | def SoftmaxEval(self, sess, model, num_steps):
method StringFromCTC (line 123) | def StringFromCTC(self, ctc_labels, merge_dups, null_label):
method _InitializeDecoder (line 180) | def _InitializeDecoder(self, filename):
method _CodesFromCTC (line 206) | def _CodesFromCTC(self, ctc_labels, merge_dups, null_label):
FILE: model_zoo/models/street/python/decoder_test.py
function _testdata (line 22) | def _testdata(filename):
class DecoderTest (line 26) | class DecoderTest(tf.test.TestCase):
method testCodesFromCTC (line 28) | def testCodesFromCTC(self):
method testStringFromCTC (line 46) | def testStringFromCTC(self):
FILE: model_zoo/models/street/python/errorcounter.py
function CountWordErrors (line 38) | def CountWordErrors(ocr_text, truth_text):
function CountErrors (line 52) | def CountErrors(ocr_text, truth_text):
function AddErrors (line 73) | def AddErrors(counts1, counts2):
function ComputeErrorRates (line 87) | def ComputeErrorRates(label_counts, word_counts, seq_errors, num_seqs):
function ComputeErrorRate (line 107) | def ComputeErrorRate(error_count, truth_count):
FILE: model_zoo/models/street/python/errorcounter_test.py
class ErrorcounterTest (line 20) | class ErrorcounterTest(tf.test.TestCase):
method testComputeErrorRate (line 22) | def testComputeErrorRate(self):
method testCountErrors (line 36) | def testCountErrors(self):
method testCountWordErrors (line 74) | def testCountWordErrors(self):
FILE: model_zoo/models/street/python/nn_ops.py
function rnn_helper (line 25) | def rnn_helper(inp,
function _variable_lstm_shape (line 100) | def _variable_lstm_shape(op):
function _variable_lstm_grad (line 120) | def _variable_lstm_grad(op, act_grad, gate_grad, mem_grad):
function lstm_layer (line 133) | def lstm_layer(inp,
FILE: model_zoo/models/street/python/shapes.py
function rotate_dimensions (line 30) | def rotate_dimensions(num_dims, src_dim, dest_dim):
function transposing_reshape (line 55) | def transposing_reshape(tensor,
function tensor_dim (line 179) | def tensor_dim(tensor, dim):
function tensor_shape (line 203) | def tensor_shape(tensor):
FILE: model_zoo/models/street/python/shapes_test.py
function _rand (line 22) | def _rand(*size):
class ShapesTest (line 26) | class ShapesTest(tf.test.TestCase):
method __init__ (line 29) | def __init__(self, other):
method testReshapeTile (line 36) | def testReshapeTile(self):
method testReshapeDepth (line 49) | def testReshapeDepth(self):
class DataTest (line 63) | class DataTest(tf.test.TestCase):
method testTransposingReshape_2_2_3_2_1 (line 68) | def testTransposingReshape_2_2_3_2_1(self):
method testTransposingReshape_2_2_3_2_3 (line 88) | def testTransposingReshape_2_2_3_2_3(self):
method testTransposingReshape_2_2_3_2_2 (line 108) | def testTransposingReshape_2_2_3_2_2(self):
method testTransposingReshape_2_2_3_1_2 (line 128) | def testTransposingReshape_2_2_3_1_2(self):
method testTransposingReshape_2_2_3_3_2 (line 149) | def testTransposingReshape_2_2_3_3_2(self):
FILE: model_zoo/models/street/python/vgsl_eval.py
function main (line 41) | def main(argv):
FILE: model_zoo/models/street/python/vgsl_input.py
function ImageInput (line 30) | def ImageInput(input_pattern, num_threads, shape, using_ctc, reader=None):
function _ReadExamples (line 86) | def _ReadExamples(filename_queue, shape, using_ctc, reader=None):
function _ImageProcessing (line 134) | def _ImageProcessing(image_buffer, shape):
FILE: model_zoo/models/street/python/vgsl_model.py
function Train (line 40) | def Train(train_dir,
function Eval (line 111) | def Eval(train_dir,
function InitNetwork (line 183) | def InitNetwork(input_pattern,
class VGSLImageModel (line 229) | class VGSLImageModel(object):
method __init__ (line 233) | def __init__(self, mode, model_spec, initial_learning_rate,
method Build (line 274) | def Build(self, input_pattern, input_spec, model_spec, output_spec,
method TrainAStep (line 324) | def TrainAStep(self, sess):
method Restore (line 335) | def Restore(self, checkpoint_path, sess):
method RunAStep (line 347) | def RunAStep(self, sess):
method _AddOutputs (line 357) | def _AddOutputs(self, prev_layer, out_dims, out_func, num_classes):
method _AddOutputLayer (line 382) | def _AddOutputLayer(self, prev_layer, out_dims, out_func, num_classes):
method _AddLossFunction (line 425) | def _AddLossFunction(self, logits, height_in, out_dims, out_func):
method _AddOptimizer (line 464) | def _AddOptimizer(self, optimizer_type):
function _PadLabels3d (line 493) | def _PadLabels3d(logits, labels):
function _PadLabels2d (line 514) | def _PadLabels2d(logits_size, labels):
function _ParseInputSpec (line 537) | def _ParseInputSpec(input_spec):
function _ParseOutputSpec (line 562) | def _ParseOutputSpec(output_spec):
function _AddRateToSummary (line 588) | def _AddRateToSummary(tag, rate, step, sw):
FILE: model_zoo/models/street/python/vgsl_model_test.py
function _testdata (line 24) | def _testdata(filename):
function _rand (line 28) | def _rand(*size):
class VgslModelTest (line 32) | class VgslModelTest(tf.test.TestCase):
method testParseInputSpec (line 34) | def testParseInputSpec(self):
method testParseOutputSpec (line 49) | def testParseOutputSpec(self):
method testPadLabels2d (line 68) | def testPadLabels2d(self):
method testPadLabels3d (line 94) | def testPadLabels3d(self):
method testEndToEndSizes0d (line 172) | def testEndToEndSizes0d(self):
method testEndToEndSizes1dCTC (line 196) | def testEndToEndSizes1dCTC(self):
method testEndToEndSizes1dFixed (line 220) | def testEndToEndSizes1dFixed(self):
FILE: model_zoo/models/street/python/vgsl_train.py
function main (line 45) | def main(argv):
FILE: model_zoo/models/street/python/vgslspecs.py
class VGSLSpecs (line 31) | class VGSLSpecs(object):
method __init__ (line 34) | def __init__(self, widths, heights, is_training):
method Build (line 62) | def Build(self, prev_layer, model_str):
method GetLengths (line 125) | def GetLengths(self, dim=2, factor=1):
method BuildFromString (line 155) | def BuildFromString(self, prev_layer, index):
method AddSeries (line 177) | def AddSeries(self, prev_layer, index):
method AddParallel (line 199) | def AddParallel(self, prev_layer, index):
method AddConvLayer (line 240) | def AddConvLayer(self, prev_layer, index):
method AddMaxPool (line 263) | def AddMaxPool(self, prev_layer, index):
method AddDropout (line 289) | def AddDropout(self, prev_layer, index):
method AddReShape (line 308) | def AddReShape(self, prev_layer, index):
method AddFCLayer (line 348) | def AddFCLayer(self, prev_layer, index):
method AddLSTMLayer (line 380) | def AddLSTMLayer(self, prev_layer, index):
method _LSTMLayer (line 421) | def _LSTMLayer(self, prev_layer, direction, dim, summarize, depth, name):
method _NonLinearity (line 484) | def _NonLinearity(self, code):
method _GetLayerName (line 504) | def _GetLayerName(self, op_str, index, name_str):
method _SkipWhitespace (line 520) | def _SkipWhitespace(self, index):
FILE: model_zoo/models/street/python/vgslspecs_test.py
function _rand (line 22) | def _rand(*size):
class VgslspecsTest (line 26) | class VgslspecsTest(tf.test.TestCase):
method __init__ (line 28) | def __init__(self, other):
method SetupInputs (line 34) | def SetupInputs(self):
method ExpectScaledSize (line 46) | def ExpectScaledSize(self, spec, target_shape, factor=1):
method testSameSizeConv (line 70) | def testSameSizeConv(self):
method testSameSizeLSTM (line 76) | def testSameSizeLSTM(self):
method testSameSizeParallel (line 82) | def testSameSizeParallel(self):
method testScalingOps (line 88) | def testScalingOps(self):
method testXReduction (line 94) | def testXReduction(self):
method testYReduction (line 99) | def testYReduction(self):
method testXYReduction (line 104) | def testXYReduction(self):
method testReshapeTile (line 110) | def testReshapeTile(self):
method testReshapeDepth (line 115) | def testReshapeDepth(self):
FILE: model_zoo/models/swivel/analogy.cc
function ReadVocab (line 64) | static std::unordered_map<std::string, int> ReadVocab(
function ReadQueries (line 83) | std::vector<AnalogyQuery> ReadQueries(
class AnalogyEvaluator (line 129) | class AnalogyEvaluator {
method AnalogyEvaluator (line 132) | AnalogyEvaluator(std::vector<AnalogyQuery>::const_iterator begin,
method GetNumCorrect (line 153) | int GetNumCorrect() const { return correct_; }
function main (line 220) | int main(int argc, char *argv[]) {
FILE: model_zoo/models/swivel/fastprep.cc
type cooc_t (line 84) | struct cooc_t {
function NextWord (line 95) | bool NextWord(std::ifstream &fin, std::string* word) {
function CreateVocabulary (line 135) | std::vector<std::string> CreateVocabulary(const std::string input_filename,
function ReadVocabulary (line 199) | std::vector<std::string> ReadVocabulary(const std::string vocab_filename) {
function WriteVocabulary (line 214) | void WriteVocabulary(const std::vector<std::string> &vocab,
class CoocBuffer (line 223) | class CoocBuffer {
class CoocCounter (line 394) | class CoocCounter {
method CoocCounter (line 396) | CoocCounter(const std::string &input_filename, const off_t start,
function WriteMarginals (line 507) | void WriteMarginals(const std::vector<double> &marginals,
function main (line 516) | int main(int argc, char *argv[]) {
FILE: model_zoo/models/swivel/glove_to_shards.py
function make_shard_files (line 61) | def make_shard_files(coocs, nshards, vocab_sz):
function main (line 134) | def main(_):
FILE: model_zoo/models/swivel/prep.py
function words (line 86) | def words(line):
function create_vocabulary (line 91) | def create_vocabulary(lines):
function write_vocab_and_sums (line 127) | def write_vocab_and_sums(vocab, sums, vocab_filename, sums_filename):
function compute_coocs (line 136) | def compute_coocs(lines, vocab):
function write_shards (line 216) | def write_shards(vocab, shardfiles):
function main (line 288) | def main(_):
FILE: model_zoo/models/swivel/swivel.py
function embeddings_with_init (line 94) | def embeddings_with_init(vocab_size, embedding_dim, name):
function count_matrix_input (line 102) | def count_matrix_input(filenames, submatrix_rows, submatrix_cols):
function read_marginals_file (line 142) | def read_marginals_file(filename):
function write_embedding_tensor_to_disk (line 148) | def write_embedding_tensor_to_disk(vocab_path, output_path, sess, embedd...
function write_embeddings_to_disk (line 161) | def write_embeddings_to_disk(config, model, sess):
class SwivelModel (line 180) | class SwivelModel(object):
method __init__ (line 183) | def __init__(self, config):
function main (line 289) | def main(_):
FILE: model_zoo/models/swivel/text2bin.py
function go (line 64) | def go(fhs):
FILE: model_zoo/models/swivel/vecs.py
class Vecs (line 20) | class Vecs(object):
method __init__ (line 21) | def __init__(self, vocab_filename, rows_filename, cols_filename=None):
method similarity (line 63) | def similarity(self, word1, word2):
method neighbors (line 72) | def neighbors(self, query):
method lookup (line 87) | def lookup(self, word):
FILE: model_zoo/models/swivel/wordsim.py
function evaluate (line 72) | def evaluate(lines):
FILE: model_zoo/models/syntaxnet/syntaxnet/affix.cc
type syntaxnet (line 33) | namespace syntaxnet {
function TermHash (line 42) | int TermHash(const string &term) {
function UnicodeSubstring (line 47) | static void UnicodeSubstring(const UnicodeText::const_iterator &start,
function Affix (line 131) | Affix *AffixTable::AddAffixesForWord(const char *word, size_t size) {
function Affix (line 189) | Affix *AffixTable::GetAffix(int id) const {
function string (line 197) | string AffixTable::AffixForm(int id) const {
function Affix (line 215) | Affix *AffixTable::AddNewAffix(const string &form, int length) {
function Affix (line 232) | Affix *AffixTable::FindAffix(const string &form) const {
FILE: model_zoo/models/syntaxnet/syntaxnet/affix.h
function namespace (line 33) | namespace syntaxnet {
function class (line 85) | class AffixTable {
FILE: model_zoo/models/syntaxnet/syntaxnet/arc_standard_transitions.cc
type syntaxnet (line 38) | namespace syntaxnet {
class ArcStandardTransitionState (line 40) | class ArcStandardTransitionState : public ParserTransitionState {
method ParserTransitionState (line 43) | ParserTransitionState *Clone() const override {
method Init (line 48) | void Init(ParserState *state) override { state->Push(-1); }
method AddParseToDocument (line 51) | void AddParseToDocument(const ParserState &state, bool rewrite_root_...
method IsTokenCorrect (line 68) | bool IsTokenCorrect(const ParserState &state, int index) const overr...
method string (line 73) | string ToString(const ParserState &state) const override {
class ArcStandardTransitionSystem (line 93) | class ArcStandardTransitionSystem : public ParserTransitionSystem {
type ParserActionType (line 96) | enum ParserActionType {
method ParserAction (line 103) | static ParserAction ShiftAction() { return SHIFT; }
method ParserAction (line 107) | static ParserAction LeftArcAction(int label) { return 1 + (label << ...
method ParserAction (line 111) | static ParserAction RightArcAction(int label) {
method ParserActionType (line 116) | static ParserActionType ActionType(ParserAction action) {
method Label (line 123) | static int Label(ParserAction action) {
method NumActionTypes (line 128) | int NumActionTypes() const override { return 3; }
method NumActions (line 131) | int NumActions(int num_labels) const override { return 1 + 2 * num_l...
method ParserAction (line 134) | ParserAction GetDefaultAction(const ParserState &state) const overri...
method ParserAction (line 144) | ParserAction GetNextGoldAction(const ParserState &state) const overr...
method DoneChildrenRightOf (line 174) | static bool DoneChildrenRightOf(const ParserState &state, int head) {
method IsAllowedAction (line 195) | bool IsAllowedAction(ParserAction action,
method IsAllowedShift (line 210) | bool IsAllowedShift(const ParserState &state) const {
method IsAllowedLeftArc (line 216) | bool IsAllowedLeftArc(const ParserState &state) const {
method IsAllowedRightArc (line 223) | bool IsAllowedRightArc(const ParserState &state) const {
method PerformActionWithoutHistory (line 230) | void PerformActionWithoutHistory(ParserAction action,
method PerformShift (line 247) | void PerformShift(ParserState *state) const {
method PerformLeftArc (line 255) | void PerformLeftArc(ParserState *state, int label) const {
method PerformRightArc (line 263) | void PerformRightArc(ParserState *state, int label) const {
method IsDeterministicState (line 273) | bool IsDeterministicState(const ParserState &state) const override {
method IsFinalState (line 279) | bool IsFinalState(const ParserState &state) const override {
method string (line 284) | string ActionAsString(ParserAction action,
method ParserTransitionState (line 298) | ParserTransitionState *NewTransitionState(bool training_mode) const ...
FILE: model_zoo/models/syntaxnet/syntaxnet/arc_standard_transitions_test.cc
type syntaxnet (line 32) | namespace syntaxnet {
class ArcStandardTransitionTest (line 34) | class ArcStandardTransitionTest : public ::testing::Test {
method ArcStandardTransitionTest (line 36) | ArcStandardTransitionTest()
method SetUpForDocument (line 42) | void SetUpForDocument(const Sentence &document) {
method ParserState (line 54) | ParserState *NewClonedState(Sentence *sentence) {
method GoldParse (line 63) | void GoldParse(Sentence *sentence) {
method DefaultParse (line 83) | void DefaultParse(Sentence *sentence) {
function TEST_F (line 103) | TEST_F(ArcStandardTransitionTest, SingleSentenceDocumentTest) {
FILE: model_zoo/models/syntaxnet/syntaxnet/base.h
type char32 (line 46) | typedef signed int char32;
FILE: model_zoo/models/syntaxnet/syntaxnet/beam_reader_ops.cc
type syntaxnet (line 56) | namespace syntaxnet {
type ParserStateWithHistory (line 60) | struct ParserStateWithHistory {
method ParserStateWithHistory (line 63) | explicit ParserStateWithHistory(const ParserState &s) : state(s.Clon...
method ParserStateWithHistory (line 67) | ParserStateWithHistory(const ParserStateWithHistory &next,
type BatchStateOptions (line 89) | struct BatchStateOptions {
class BeamState (line 118) | class BeamState {
type State (line 143) | enum State { ALIVE = 0, DYING = 1, DEAD = 2 }
method BeamState (line 145) | explicit BeamState(const BatchStateOptions &options) : options_(opti...
method Reset (line 147) | void Reset() {
method UpdateAllFinal (line 163) | void UpdateAllFinal() {
method Advance (line 184) | void Advance(const ScoreMatrixType &scores) {
method PopulateFeatureOutputs (line 233) | void PopulateFeatureOutputs(
method BeamSize (line 243) | int BeamSize() const { return slots_.size(); }
method IsAlive (line 245) | bool IsAlive() const { return state_ == ALIVE; }
method IsDead (line 247) | bool IsDead() const { return state_ == DEAD; }
method AllFinal (line 249) | bool AllFinal() const { return all_final_; }
method AdvanceSentence (line 280) | void AdvanceSentence() {
method AdvanceGold (line 291) | void AdvanceGold() {
method PruneBeam (line 306) | void PruneBeam() {
method MaybeInsertWithNewAction (line 324) | void MaybeInsertWithNewAction(const AgendaItem &item, const int slot,
method MaybeInsert (line 345) | void MaybeInsert(AgendaItem *item) {
class BatchState (line 366) | class BatchState {
method BatchState (line 368) | explicit BatchState(const BatchStateOptions &options)
method Init (line 373) | void Init(TaskContext *task_context) {
method ResetBeams (line 414) | void ResetBeams() {
method ResetOffsets (line 429) | void ResetOffsets() {
method AdvanceBeam (line 435) | void AdvanceBeam(const int beam_id,
method UpdateOffsets (line 445) | void UpdateOffsets() {
method PopulateFeatureOutputs (line 459) | tensorflow::Status PopulateFeatureOutputs(OpKernelContext *context) {
method GetOffset (line 499) | int GetOffset(const int step, const int beam_id) const {
method FeatureSize (line 503) | int FeatureSize() const { return features_.embedding_dims().size(); }
method NumActions (line 505) | int NumActions() const {
method BatchSize (line 509) | int BatchSize() const { return options_.batch_size; }
method BeamState (line 511) | const BeamState &Beam(const int i) const { return beams_[i]; }
method Epoch (line 513) | int Epoch() const { return epoch_; }
method string (line 515) | const string &ScoringType() const { return options_.scoring_type; }
class BeamParseReader (line 551) | class BeamParseReader : public OpKernel {
method BeamParseReader (line 553) | explicit BeamParseReader(OpKernelConstruction *context) : OpKernel(c...
method Compute (line 607) | void Compute(OpKernelContext *context) override {
class BeamParser (line 643) | class BeamParser : public OpKernel {
method BeamParser (line 645) | explicit BeamParser(OpKernelConstruction *context) : OpKernel(contex...
method Compute (line 657) | void Compute(OpKernelContext *context) override {
class BeamParserOutput (line 702) | class BeamParserOutput : public OpKernel {
method BeamParserOutput (line 704) | explicit BeamParserOutput(OpKernelConstruction *context) : OpKernel(...
method Compute (line 711) | void Compute(OpKernelContext *context) override {
class BeamEvalOutput (line 829) | class BeamEvalOutput : public OpKernel {
method BeamEvalOutput (line 831) | explicit BeamEvalOutput(OpKernelConstruction *context) : OpKernel(co...
method Compute (line 837) | void Compute(OpKernelContext *context) override {
method ComputeTokenAccuracy (line 873) | void ComputeTokenAccuracy(const ParserState &state,
FILE: model_zoo/models/syntaxnet/syntaxnet/beam_reader_ops_test.py
class ParsingReaderOpsTest (line 37) | class ParsingReaderOpsTest(test_util.TensorFlowTestCase):
method setUp (line 39) | def setUp(self):
method MakeGraph (line 59) | def MakeGraph(self,
method Train (line 102) | def Train(self, **kwargs):
method PathScores (line 166) | def PathScores(self, iterations, beam_size, max_steps, batch_size):
method testParseUntilNotAlive (line 191) | def testParseUntilNotAlive(self):
method testParseMomentum (line 201) | def testParseMomentum(self):
method testPathScoresAgree (line 209) | def testPathScoresAgree(self):
method testBatchPathScoresAgree (line 215) | def testBatchPathScoresAgree(self):
method testBatchOneStepPathScoresAgree (line 221) | def testBatchOneStepPathScoresAgree(self):
FILE: model_zoo/models/syntaxnet/syntaxnet/binary_segment_state.cc
type syntaxnet (line 22) | namespace syntaxnet {
function ParserTransitionState (line 24) | ParserTransitionState *BinarySegmentState::Clone() const {
function string (line 28) | string BinarySegmentState::ToString(const ParserState &state) const {
FILE: model_zoo/models/syntaxnet/syntaxnet/binary_segment_state.h
function namespace (line 22) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/binary_segment_state_test.cc
type syntaxnet (line 25) | namespace syntaxnet {
class BinarySegmentStateTest (line 27) | class BinarySegmentStateTest : public ::testing::Test {
method SetUp (line 29) | void SetUp() override {
function TEST_F (line 48) | TEST_F(BinarySegmentStateTest, AddStartLastStartNumStartsTest) {
function TEST_F (line 73) | TEST_F(BinarySegmentStateTest, AddParseToDocumentTest) {
function TEST_F (line 144) | TEST_F(BinarySegmentStateTest, SpaceDocumentTest) {
function TEST_F (line 172) | TEST_F(BinarySegmentStateTest, DocumentBeginWithSpaceTest) {
function TEST_F (line 208) | TEST_F(BinarySegmentStateTest, EmptyDocumentTest) {
FILE: model_zoo/models/syntaxnet/syntaxnet/binary_segment_transitions.cc
type syntaxnet (line 22) | namespace syntaxnet {
class BinarySegmentTransitionSystem (line 33) | class BinarySegmentTransitionSystem : public ParserTransitionSystem {
method BinarySegmentTransitionSystem (line 35) | BinarySegmentTransitionSystem() {}
method ParserTransitionState (line 36) | ParserTransitionState *NewTransitionState(bool train_mode) const ove...
type ParserActionType (line 41) | enum ParserActionType {
method StartAction (line 47) | static int StartAction() { return 0; }
method MergeAction (line 48) | static int MergeAction() { return 1; }
method ParserAction (line 51) | ParserAction GetDefaultAction(const ParserState &state) const overri...
method NumActionTypes (line 56) | int NumActionTypes() const override {
method NumActions (line 61) | int NumActions(int num_labels) const override {
method ParserAction (line 70) | ParserAction GetNextGoldAction(const ParserState &state) const overr...
method IsAllowedAction (line 78) | bool IsAllowedAction(
method PerformActionWithoutHistory (line 85) | void PerformActionWithoutHistory(
method BackOffToBestAllowableTransition (line 95) | bool BackOffToBestAllowableTransition() const override { return true; }
method IsDeterministicState (line 98) | bool IsDeterministicState(const ParserState &state) const override {
method IsFinalState (line 104) | bool IsFinalState(const ParserState &state) const override {
method string (line 109) | string ActionAsString(
method BinarySegmentState (line 115) | static BinarySegmentState *MutableTransitionState(ParserState *state) {
class OffsetFeatureLocator (line 125) | class OffsetFeatureLocator : public ParserIndexLocator<OffsetFeatureLo...
method UpdateArgs (line 129) | void UpdateArgs(const WorkspaceSet &workspaces, const ParserState &s...
class LastWordFeatureFunction (line 149) | class LastWordFeatureFunction : public ParserFeatureFunction {
method Setup (line 151) | void Setup(TaskContext *context) override {
method Init (line 155) | void Init(TaskContext *context) override {
method int64 (line 167) | int64 NumValues() const {
method string (line 172) | string GetFeatureValueName(FeatureValue value) const {
method FeatureValue (line 180) | FeatureValue Compute(const WorkspaceSet &workspaces, const ParserSta...
FILE: model_zoo/models/syntaxnet/syntaxnet/binary_segment_transitions_test.cc
type syntaxnet (line 25) | namespace syntaxnet {
class SegmentationTransitionTest (line 27) | class SegmentationTransitionTest : public ::testing::Test {
method SetUp (line 29) | void SetUp() override {
method AddInputToContext (line 68) | void AddInputToContext(const string &name,
method PrepareFeature (line 80) | void PrepareFeature(const string &feature_name, ParserState *state) {
method FeatureValue (line 92) | FeatureValue ComputeFeature(const ParserState &state) const {
method CheckStarts (line 98) | void CheckStarts(const ParserState &state, const vector<int> &target) {
function TEST_F (line 120) | TEST_F(SegmentationTransitionTest, GoldNextActionTest) {
function TEST_F (line 150) | TEST_F(SegmentationTransitionTest, DefaultActionTest) {
function TEST_F (line 174) | TEST_F(SegmentationTransitionTest, LastWordFeatureTest) {
FILE: model_zoo/models/syntaxnet/syntaxnet/char_properties.cc
type CharPropertyImplementation (line 89) | struct CharPropertyImplementation {
method CharPropertyImplementation (line 92) | CharPropertyImplementation() {
method AddChar (line 97) | void AddChar(char *buf, int len) {
method HoldsFor (line 125) | bool HoldsFor(const char *buf) const {
function CharProperty (line 266) | const CharProperty *CharProperty::Lookup(const char *subclass) {
function string (line 286) | string CharProperty::UnicodeToString(int c) {
FILE: model_zoo/models/syntaxnet/syntaxnet/char_properties.h
function namespace (line 84) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/char_properties_test.cc
type syntaxnet (line 41) | namespace syntaxnet {
class CharPropertiesTest (line 52) | class CharPropertiesTest : public testing::Test {
method CollectChars (line 55) | void CollectChars(const std::set<char32> &chars) {
method CollectArray (line 60) | void CollectArray(const char32 arr[], int len) {
method CollectCharProperty (line 65) | void CollectCharProperty(const char *name) {
method CollectAsciiPredicate (line 77) | void CollectAsciiPredicate(AsciiPredicate *pred) {
method ExpectCharPropertyEqualsCollectedSet (line 87) | void ExpectCharPropertyEqualsCollectedSet(const char *name) {
method ExpectCharPropertyContainsCollectedSet (line 118) | void ExpectCharPropertyContainsCollectedSet(const char *name) {
method string (line 128) | string EncodeAsUTF8(const char32 *in, int size) {
method EncodeAsUTF8Char (line 139) | int EncodeAsUTF8Char(char32 in, char *out) {
function TEST_F (line 225) | TEST_F(CharPropertiesTest, TestDigit) {
function TEST_F (line 230) | TEST_F(CharPropertiesTest, TestWavyDash) {
function TEST_F (line 235) | TEST_F(CharPropertiesTest, TestDigitOrWavyDash) {
function TEST_F (line 241) | TEST_F(CharPropertiesTest, TestPunctuationPlus) {
function TEST_F (line 252) | TEST_F(CharPropertiesTest, StartSentencePunc) {
function TEST_F (line 257) | TEST_F(CharPropertiesTest, EndSentencePunc) {
function TEST_F (line 262) | TEST_F(CharPropertiesTest, OpenExprPunc) {
function TEST_F (line 267) | TEST_F(CharPropertiesTest, CloseExprPunc) {
function TEST_F (line 272) | TEST_F(CharPropertiesTest, OpenQuote) {
function TEST_F (line 277) | TEST_F(CharPropertiesTest, CloseQuote) {
function TEST_F (line 282) | TEST_F(CharPropertiesTest, OpenBookquote) {
function TEST_F (line 287) | TEST_F(CharPropertiesTest, CloseBookquote) {
function TEST_F (line 292) | TEST_F(CharPropertiesTest, OpenPunc) {
function TEST_F (line 298) | TEST_F(CharPropertiesTest, ClosePunc) {
function TEST_F (line 304) | TEST_F(CharPropertiesTest, LeadingSentencePunc) {
function TEST_F (line 311) | TEST_F(CharPropertiesTest, TrailingSentencePunc) {
function TEST_F (line 318) | TEST_F(CharPropertiesTest, NoncurrencyTokenPrefixSymbol) {
function TEST_F (line 323) | TEST_F(CharPropertiesTest, TokenSuffixSymbol) {
function TEST_F (line 328) | TEST_F(CharPropertiesTest, TokenPrefixSymbol) {
function TEST_F (line 334) | TEST_F(CharPropertiesTest, SubscriptSymbol) {
function TEST_F (line 339) | TEST_F(CharPropertiesTest, SuperscriptSymbol) {
function TEST_F (line 344) | TEST_F(CharPropertiesTest, CurrencySymbol) {
function TEST_F (line 349) | TEST_F(CharPropertiesTest, DirectionalFormattingCode) {
function TEST_F (line 354) | TEST_F(CharPropertiesTest, Punctuation) {
function TEST_F (line 359) | TEST_F(CharPropertiesTest, Separator) {
FILE: model_zoo/models/syntaxnet/syntaxnet/conll2tree.py
function to_dict (line 41) | def to_dict(sentence):
function main (line 77) | def main(unused_argv):
FILE: model_zoo/models/syntaxnet/syntaxnet/document_filters.cc
type syntaxnet (line 42) | namespace syntaxnet {
function GetTaskContext (line 46) | void GetTaskContext(OpKernelConstruction *context, TaskContext *task_c...
function OutputDocuments (line 57) | void OutputDocuments(OpKernelContext *context,
class DocumentSource (line 71) | class DocumentSource : public OpKernel {
method DocumentSource (line 73) | explicit DocumentSource(OpKernelConstruction *context) : OpKernel(co...
method Compute (line 84) | void Compute(OpKernelContext *context) override {
method OutputLast (line 101) | void OutputLast(OpKernelContext *context, bool last) {
class DocumentSink (line 122) | class DocumentSink : public OpKernel {
method DocumentSink (line 124) | explicit DocumentSink(OpKernelConstruction *context) : OpKernel(cont...
method Compute (line 132) | void Compute(OpKernelContext *context) override {
class WellFormedFilter (line 159) | class WellFormedFilter : public OpKernel {
method WellFormedFilter (line 161) | explicit WellFormedFilter(OpKernelConstruction *context) : OpKernel(...
method Compute (line 167) | void Compute(OpKernelContext *context) override {
method ShouldKeep (line 184) | bool ShouldKeep(const Sentence &doc) {
class ProjectivizeFilter (line 227) | class ProjectivizeFilter : public OpKernel {
method ProjectivizeFilter (line 229) | explicit ProjectivizeFilter(OpKernelConstruction *context)
method Compute (line 236) | void Compute(OpKernelContext *context) override {
method Process (line 252) | bool Process(Sentence *doc) {
FILE: model_zoo/models/syntaxnet/syntaxnet/document_format.cc
type syntaxnet (line 18) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/document_format.h
function namespace (line 30) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/embedding_feature_extractor.cc
type syntaxnet (line 25) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/embedding_feature_extractor.h
function namespace (line 34) | namespace syntaxnet {
function Preprocess (line 160) | void Preprocess(WorkspaceSet *workspaces, OBJ *obj) const {
function ExtractFeatures (line 181) | void ExtractFeatures(const WorkspaceSet &workspaces, const OBJ &obj,
FILE: model_zoo/models/syntaxnet/syntaxnet/feature_extractor.cc
type syntaxnet (line 21) | namespace syntaxnet {
function FeatureValue (line 66) | FeatureValue GenericFeatureExtractor::GetDomainSize() const {
function string (line 81) | string GenericFeatureFunction::GetParameter(const string &name) const {
function FeatureType (line 109) | FeatureType *GenericFeatureFunction::GetFeatureType() const {
FILE: model_zoo/models/syntaxnet/syntaxnet/feature_extractor.h
function namespace (line 58) | namespace syntaxnet {
function set_prefix (line 272) | void set_prefix(const string &prefix) { prefix_ = prefix; }
function set_feature_type (line 280) | void set_feature_type(FeatureType *feature_type) {
function virtual (line 320) | virtual void Preprocess(WorkspaceSet *workspaces, OBJ *object) const {}
function virtual (line 325) | virtual void Evaluate(const WorkspaceSet &workspaces, const OBJ &object,
function Self (line 343) | static Self *Instantiate(GenericFeatureExtractor *extractor,
function GetFeatureTypes (line 386) | void GetFeatureTypes(vector<FeatureType *> *types) const override {
function Setup (line 393) | void Setup(TaskContext *context) override {
function Preprocess (line 448) | void Preprocess(WorkspaceSet *workspaces, OBJ *object) const override {
function Evaluate (line 475) | void Evaluate(const WorkspaceSet &workspaces, const OBJ &object,
FILE: model_zoo/models/syntaxnet/syntaxnet/feature_types.h
type int64 (line 31) | typedef int64 Predicate;
type Predicate (line 32) | typedef Predicate FeatureValue;
function class (line 39) | class FeatureType {
FILE: model_zoo/models/syntaxnet/syntaxnet/fml_parser.cc
type syntaxnet (line 24) | namespace syntaxnet {
function ToFMLFunction (line 224) | void ToFMLFunction(const FeatureFunctionDescriptor &function, string *...
function ToFML (line 246) | void ToFML(const FeatureFunctionDescriptor &function, string *output) {
function ToFML (line 261) | void ToFML(const FeatureExtractorDescriptor &extractor, string *output) {
function string (line 268) | string AsFML(const FeatureFunctionDescriptor &function) {
function string (line 274) | string AsFML(const FeatureExtractorDescriptor &extractor) {
function StripFML (line 280) | void StripFML(string *fml_string) {
FILE: model_zoo/models/syntaxnet/syntaxnet/fml_parser.h
function namespace (line 48) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/graph_builder.py
function BatchedSparseToDense (line 29) | def BatchedSparseToDense(sparse_indices, output_size):
function EmbeddingLookupFeatures (line 45) | def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
class GreedyParser (line 79) | class GreedyParser(object):
method __init__ (line 112) | def __init__(self,
method embedding_size (line 207) | def embedding_size(self):
method _AddParam (line 213) | def _AddParam(self,
method GetStep (line 258) | def GetStep(self):
method _AddVariable (line 263) | def _AddVariable(self, shape, dtype, name, initializer=None):
method _ReluWeightInitializer (line 272) | def _ReluWeightInitializer(self):
method _EmbeddingMatrixInitializer (line 277) | def _EmbeddingMatrixInitializer(self, index, embedding_size):
method _AddEmbedding (line 285) | def _AddEmbedding(self,
method _BuildNetwork (line 306) | def _BuildNetwork(self, feature_endpoints, return_average=False):
method _AddGoldReader (line 375) | def _AddGoldReader(self, task_context, batch_size, corpus_name):
method _AddDecodedReader (line 388) | def _AddDecodedReader(self, task_context, batch_size, transition_scores,
method _AddCostFunction (line 403) | def _AddCostFunction(self, batch_size, gold_actions, logits):
method AddEvaluation (line 415) | def AddEvaluation(self,
method _IncrementCounter (line 453) | def _IncrementCounter(self, counter):
method _AddLearningRate (line 456) | def _AddLearningRate(self, initial_learning_rate, decay_steps):
method AddPretrainedEmbeddings (line 475) | def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):
method AddTraining (line 490) | def AddTraining(self,
method AddSaver (line 550) | def AddSaver(self, slim_model=False):
FILE: model_zoo/models/syntaxnet/syntaxnet/graph_builder_test.py
class GraphBuilderTest (line 38) | class GraphBuilderTest(test_util.TensorFlowTestCase):
method setUp (line 40) | def setUp(self):
method MakeBuilder (line 60) | def MakeBuilder(self, use_averaging=True, **kw_args):
method FindNode (line 67) | def FindNode(self, name):
method NodeFound (line 73) | def NodeFound(self, name):
method testScope (line 76) | def testScope(self):
method testNestedScope (line 110) | def testNestedScope(self):
method testUseCustomGraphs (line 128) | def testUseCustomGraphs(self):
method testTrainingAndEvalAreIndependent (line 158) | def testTrainingAndEvalAreIndependent(self):
method testReproducibility (line 185) | def testReproducibility(self):
method testAddTrainingAndEvalOrderIndependent (line 208) | def testAddTrainingAndEvalOrderIndependent(self):
method testEvalMetrics (line 251) | def testEvalMetrics(self):
method MakeSparseFeatures (line 271) | def MakeSparseFeatures(self, ids, weights):
method testEmbeddingOp (line 278) | def testEmbeddingOp(self):
method testOnlyTrainSomeParameters (line 299) | def testOnlyTrainSomeParameters(self):
FILE: model_zoo/models/syntaxnet/syntaxnet/lexicon_builder.cc
type syntaxnet (line 44) | namespace syntaxnet {
class LexiconBuilder (line 53) | class LexiconBuilder : public OpKernel {
method LexiconBuilder (line 55) | explicit LexiconBuilder(OpKernelConstruction *context) : OpKernel(co...
method Compute (line 72) | void Compute(OpKernelContext *context) override {
method HasSpaces (line 159) | static bool HasSpaces(const string &word) {
method WriteAffixTable (line 167) | static void WriteAffixTable(const AffixTable &affixes,
class FeatureSize (line 189) | class FeatureSize : public OpKernel {
method FeatureSize (line 191) | explicit FeatureSize(OpKernelConstruction *context) : OpKernel(conte...
method Compute (line 213) | void Compute(OpKernelContext *context) override {
FILE: model_zoo/models/syntaxnet/syntaxnet/lexicon_builder_test.py
class LexiconBuilderTest (line 76) | class LexiconBuilderTest(test_util.TensorFlowTestCase):
method setUp (line 78) | def setUp(self):
method AddInput (line 86) | def AddInput(self, name, file_pattern, record_format, context):
method WriteContext (line 92) | def WriteContext(self, corpus_format):
method ReadNextDocument (line 103) | def ReadNextDocument(self, sess, doc_source):
method ValidateDocuments (line 112) | def ValidateDocuments(self):
method ValidateTagToCategoryMap (line 130) | def ValidateTagToCategoryMap(self):
method LoadMap (line 137) | def LoadMap(self, map_name):
method ValidateCharMap (line 146) | def ValidateCharMap(self):
method ValidateWordMap (line 152) | def ValidateWordMap(self):
method BuildLexicon (line 157) | def BuildLexicon(self):
method testCoNLLFormat (line 161) | def testCoNLLFormat(self):
method testCoNLLFormatExtraNewlinesAndComments (line 173) | def testCoNLLFormatExtraNewlinesAndComments(self):
method testTokenizedTextFormat (line 182) | def testTokenizedTextFormat(self):
method testTokenizedTextFormatExtraNewlines (line 189) | def testTokenizedTextFormatExtraNewlines(self):
FILE: model_zoo/models/syntaxnet/syntaxnet/morpher_transitions.cc
type syntaxnet (line 39) | namespace syntaxnet {
class MorphologyTransitionState (line 41) | class MorphologyTransitionState : public ParserTransitionState {
method MorphologyTransitionState (line 43) | explicit MorphologyTransitionState(const MorphologyLabelSet *label_set)
method MorphologyTransitionState (line 46) | explicit MorphologyTransitionState(const MorphologyTransitionState *...
method ParserTransitionState (line 53) | ParserTransitionState *Clone() const override {
method Init (line 58) | void Init(ParserState *state) override {
method Tag (line 73) | int Tag(int index) const {
method SetTag (line 80) | void SetTag(int index, int tag) {
method GoldTag (line 87) | int GoldTag(int index) const {
method TokenMorphology (line 95) | const TokenMorphology &TagAsProto(int tag) const {
method AddParseToDocument (line 103) | void AddParseToDocument(const ParserState &state, bool rewrite_root_...
method IsTokenCorrect (line 113) | bool IsTokenCorrect(const ParserState &state, int index) const overr...
method string (line 118) | string ToString(const ParserState &state) const override {
class MorphologyTransitionSystem (line 147) | class MorphologyTransitionSystem : public ParserTransitionSystem {
method Setup (line 152) | void Setup(TaskContext *context) override {
method Init (line 157) | void Init(TaskContext *context) override {
method ParserAction (line 165) | static ParserAction ShiftAction(int tag) { return tag; }
method AllowsNonProjective (line 169) | bool AllowsNonProjective() const override { return true; }
method NumActionTypes (line 172) | int NumActionTypes() const override { return 1; }
method NumActions (line 175) | int NumActions(int num_labels) const override { return label_set_->S...
method ParserAction (line 178) | ParserAction GetDefaultAction(const ParserState &state) const overri...
method ParserAction (line 184) | ParserAction GetNextGoldAction(const ParserState &state) const overr...
method IsAllowedAction (line 192) | bool IsAllowedAction(ParserAction action,
method PerformActionWithoutHistory (line 199) | void PerformActionWithoutHistory(ParserAction action,
method IsFinalState (line 211) | bool IsFinalState(const ParserState &state) const override {
method string (line 216) | string ActionAsString(ParserAction action,
method IsDeterministicState (line 223) | bool IsDeterministicState(const ParserState &state) const override {
method ParserTransitionState (line 228) | ParserTransitionState *NewTransitionState(bool training_mode) const ...
method MorphologyTransitionState (line 234) | static const MorphologyTransitionState &TransitionState(
method MorphologyTransitionState (line 242) | static MorphologyTransitionState *MutableTransitionState(ParserState...
class PredictedMorphTagFeatureFunction (line 259) | class PredictedMorphTagFeatureFunction : public ParserIndexFeatureFunc...
method PredictedMorphTagFeatureFunction (line 261) | PredictedMorphTagFeatureFunction() {}
method Setup (line 264) | void Setup(TaskContext *context) override {
method Init (line 269) | void Init(TaskContext *context) override {
method FeatureValue (line 279) | FeatureValue Compute(const WorkspaceSet &workspaces, const ParserSta...
FILE: model_zoo/models/syntaxnet/syntaxnet/morphology_label_set.cc
type syntaxnet (line 18) | namespace syntaxnet {
function TokenMorphology (line 41) | const TokenMorphology &MorphologyLabelSet::Lookup(int i) const {
function string (line 71) | string MorphologyLabelSet::StringForMatch(const TokenMorphology &morph...
function string (line 81) | string FullLabelFeatureType::GetFeatureValueName(FeatureValue value) c...
FILE: model_zoo/models/syntaxnet/syntaxnet/morphology_label_set.h
function namespace (line 29) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/morphology_label_set_test.cc
type syntaxnet (line 24) | namespace syntaxnet {
class MorphologyLabelSetTest (line 26) | class MorphologyLabelSetTest : public ::testing::Test {
function TEST_F (line 32) | TEST_F(MorphologyLabelSetTest, AddLookupExisting) {
function TEST_F (line 78) | TEST_F(MorphologyLabelSetTest, Serialization) {
FILE: model_zoo/models/syntaxnet/syntaxnet/ops/parser_ops.cc
type syntaxnet (line 18) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/parser_eval.py
function RewriteContext (line 63) | def RewriteContext(task_context):
function Eval (line 76) | def Eval(sess):
function main (line 154) | def main(unused_argv):
FILE: model_zoo/models/syntaxnet/syntaxnet/parser_features.cc
type syntaxnet (line 24) | namespace syntaxnet {
function string (line 38) | string RootFeatureType::GetFeatureValueName(FeatureValue value) const {
function FeatureValue (line 43) | FeatureValue RootFeatureType::GetDomainSize() const {
class InputParserLocator (line 51) | class InputParserLocator : public ParserLocator<InputParserLocator> {
method GetFocus (line 54) | int GetFocus(const WorkspaceSet &workspaces, const ParserState &stat...
class StackParserLocator (line 64) | class StackParserLocator : public ParserLocator<StackParserLocator> {
method GetFocus (line 67) | int GetFocus(const WorkspaceSet &workspaces, const ParserState &stat...
class HeadFeatureLocator (line 78) | class HeadFeatureLocator : public ParserIndexLocator<HeadFeatureLocato...
method UpdateArgs (line 82) | void UpdateArgs(const WorkspaceSet &workspaces, const ParserState &s...
class ChildFeatureLocator (line 99) | class ChildFeatureLocator : public ParserIndexLocator<ChildFeatureLoca...
method UpdateArgs (line 103) | void UpdateArgs(const WorkspaceSet &workspaces, const ParserState &s...
class SiblingFeatureLocator (line 124) | class SiblingFeatureLocator
method UpdateArgs (line 129) | void UpdateArgs(const WorkspaceSet &workspaces, const ParserState &s...
class LabelFeatureFunction (line 149) | class LabelFeatureFunction : public BasicParserSentenceFeatureFunction...
method FeatureValue (line 153) | FeatureValue Compute(const WorkspaceSet &workspaces, const ParserSta...
class ParserTokenFeatureFunction (line 204) | class ParserTokenFeatureFunction : public NestedFeatureFunction<
method Preprocess (line 207) | void Preprocess(WorkspaceSet *workspaces, ParserState *state) const ...
method Evaluate (line 213) | void Evaluate(const WorkspaceSet &workspaces, const ParserState &state,
method FeatureValue (line 221) | FeatureValue Compute(const WorkspaceSet &workspaces, const ParserSta...
FILE: model_zoo/models/syntaxnet/syntaxnet/parser_features.h
function namespace (line 29) | namespace syntaxnet {
type FeatureFunction (line 43) | typedef FeatureFunction<ParserState> ParserFeatureFunction;
type FeatureFunction (line 47) | typedef FeatureFunction<ParserState, int> ParserIndexFeatureFunction;
type FeatureExtractor (line 67) | typedef FeatureExtractor<ParserState> ParserFeatureExtractor;
function class (line 70) | class RootFeatureType : public FeatureType {
function Init (line 107) | void Init(TaskContext *context) override {
function RequestWorkspaces (line 115) | void RequestWorkspaces(WorkspaceRegistry *registry) override {
function Preprocess (line 119) | void Preprocess(WorkspaceSet *workspaces, ParserState *state) const over...
FILE: model_zoo/models/syntaxnet/syntaxnet/parser_features_test.cc
type syntaxnet (line 32) | namespace syntaxnet {
class ParserFeatureFunctionTest (line 38) | class ParserFeatureFunctionTest : public ::testing::Test {
method SetUp (line 41) | void SetUp() override {
method string (line 85) | string ExtractFeature(const string &feature_name) {
function TEST_F (line 112) | TEST_F(ParserFeatureFunctionTest, TagFeatureFunction) {
function TEST_F (line 122) | TEST_F(ParserFeatureFunctionTest, LabelFeatureFunction) {
FILE: model_zoo/models/syntaxnet/syntaxnet/parser_state.cc
type syntaxnet (line 23) | namespace syntaxnet {
function ParserState (line 51) | ParserState *ParserState::Clone() const {
function string (line 236) | string ParserState::LabelAsString(int label) const {
function string (line 244) | string ParserState::ToString() const {
FILE: model_zoo/models/syntaxnet/syntaxnet/parser_state.h
function namespace (line 29) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/parser_trainer.py
function StageName (line 86) | def StageName():
function OutputPath (line 90) | def OutputPath(path):
function RewriteContext (line 94) | def RewriteContext():
function WriteStatus (line 107) | def WriteStatus(num_steps, eval_metric, best_eval_metric):
function Eval (line 118) | def Eval(sess, parser, num_steps, best_eval_metric):
function Train (line 161) | def Train(sess, num_actions, feature_sizes, domain_sizes, embedding_dims):
function main (line 256) | def main(unused_argv):
FILE: model_zoo/models/syntaxnet/syntaxnet/parser_transitions.cc
type syntaxnet (line 20) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/parser_transitions.h
function namespace (line 27) | namespace tensorflow {
type ParserAction (line 41) | typedef int ParserAction;
function LabelType (line 44) | enum class LabelType {
FILE: model_zoo/models/syntaxnet/syntaxnet/populate_test_inputs.cc
type syntaxnet (line 34) | namespace syntaxnet {
function string (line 139) | string PopulateTestInputs::AddPart(TaskInput *input, const string &fil...
FILE: model_zoo/models/syntaxnet/syntaxnet/populate_test_inputs.h
function namespace (line 71) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/proto_io.h
function namespace (line 42) | namespace syntaxnet {
function class (line 146) | class TextReader {
function class (line 210) | class TextWriter {
FILE: model_zoo/models/syntaxnet/syntaxnet/reader_ops.cc
type syntaxnet (line 57) | namespace syntaxnet {
class ParsingReader (line 59) | class ParsingReader : public OpKernel {
method ParsingReader (line 61) | explicit ParsingReader(OpKernelConstruction *context) : OpKernel(con...
method AdvanceSentence (line 108) | virtual void AdvanceSentence(int index) {
method Compute (line 119) | void Compute(OpKernelContext *context) override {
method default_outputs (line 196) | std::vector<DataType> default_outputs() const {
method max_batch_size (line 203) | int max_batch_size() const { return max_batch_size_; }
method batch_size (line 204) | int batch_size() const { return sentence_batch_->size(); }
method additional_output_index (line 205) | int additional_output_index() const { return feature_size_ + 1; }
method ParserState (line 206) | ParserState *state(int i) const { return states_[i].get(); }
method ParserTransitionSystem (line 207) | const ParserTransitionSystem &transition_system() const {
method TaskContext (line 212) | const TaskContext &task_context() const { return task_context_; }
method string (line 214) | const string &arg_prefix() const { return arg_prefix_; }
class GoldParseReader (line 259) | class GoldParseReader : public ParsingReader {
method GoldParseReader (line 261) | explicit GoldParseReader(OpKernelConstruction *context)
method PerformActions (line 271) | void PerformActions(OpKernelContext *context) override {
method AddAdditionalOutputs (line 281) | void AddAdditionalOutputs(OpKernelContext *context) const override {
class DecodedParseReader (line 319) | class DecodedParseReader : public ParsingReader {
method DecodedParseReader (line 321) | explicit DecodedParseReader(OpKernelConstruction *context)
method AdvanceSentence (line 335) | void AdvanceSentence(int index) override {
method ComputeTokenAccuracy (line 343) | void ComputeTokenAccuracy(const ParserState &state) {
method PerformActions (line 356) | void PerformActions(OpKernelContext *context) override {
method AddAdditionalOutputs (line 389) | void AddAdditionalOutputs(OpKernelContext *context) const override {
class WordEmbeddingInitializer (line 438) | class WordEmbeddingInitializer : public OpKernel {
method WordEmbeddingInitializer (line 440) | explicit WordEmbeddingInitializer(OpKernelConstruction *context)
method Compute (line 457) | void Compute(OpKernelContext *context) override {
method SetNormalizedRow (line 497) | void SetNormalizedRow(const TokenEmbedding::Vector &vector, const in...
method CopyToTmpPath (line 513) | static tensorflow::Status CopyToTmpPath(const string &source_path,
FILE: model_zoo/models/syntaxnet/syntaxnet/reader_ops_test.py
class ParsingReaderOpsTest (line 40) | class ParsingReaderOpsTest(test_util.TensorFlowTestCase):
method setUp (line 42) | def setUp(self):
method GetMaxId (line 62) | def GetMaxId(self, sparse_features):
method testParsingReaderOp (line 72) | def testParsingReaderOp(self):
method testParsingReaderOpWhileLoop (line 129) | def testParsingReaderOpWhileLoop(self):
method testWordEmbeddingInitializer (line 171) | def testWordEmbeddingInitializer(self):
FILE: model_zoo/models/syntaxnet/syntaxnet/registry.cc
type syntaxnet (line 18) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/registry.h
function namespace (line 62) | namespace syntaxnet {
function class (line 100) | class RegistryMetadata : public ComponentMetadata {
type ComponentRegistry (line 122) | typedef ComponentRegistry<T> Self;
function class (line 125) | class Registrar : public ComponentMetadata {
function Registrar (line 161) | const Registrar *GetComponent(const char *type) const {
function T (line 171) | T *Lookup(const char *type) const { return GetComponent(type)->object(); }
function T (line 172) | T *Lookup(const string &type) const { return Lookup(type.c_str()); }
type ComponentRegistry (line 196) | typedef ComponentRegistry<Factory> Registry;
function T (line 199) | static T *Create(const string &type) { return registry()->Lookup(type)(); }
function Registry (line 202) | static Registry *registry() { return ®istry_; }
FILE: model_zoo/models/syntaxnet/syntaxnet/segmenter_utils.cc
type syntaxnet (line 21) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/segmenter_utils.h
function namespace (line 27) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/segmenter_utils_test.cc
type syntaxnet (line 26) | namespace syntaxnet {
function Sentence (line 29) | static Sentence GetKoSentence() {
function GetStartEndBytes (line 61) | static void GetStartEndBytes(const string &text,
function TEST (line 75) | TEST(SegmenterUtilsTest, GetCharsTest) {
function TEST (line 118) | TEST(SegmenterUtilsTest, SetCharsAsTokensTest) {
FILE: model_zoo/models/syntaxnet/syntaxnet/sentence_batch.cc
type syntaxnet (line 24) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/sentence_batch.h
function namespace (line 34) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/sentence_features.cc
type syntaxnet (line 23) | namespace syntaxnet {
function string (line 46) | string TermFrequencyMapFeature::GetFeatureValueName(FeatureValue value...
function string (line 55) | string TermFrequencyMapFeature::WorkspaceName() const {
function string (line 80) | string TermFrequencyMapSetFeature::WorkspaceName() const {
function GetUTF8Chars (line 86) | void GetUTF8Chars(const string &word, vector<tensorflow::StringPiece> ...
function UTF8FirstLetterNumBytes (line 94) | int UTF8FirstLetterNumBytes(const char *utf8_str) {
function string (line 137) | string Hyphen::GetFeatureValueName(FeatureValue value) const {
function FeatureValue (line 147) | FeatureValue Hyphen::ComputeValue(const Token &token) const {
function string (line 169) | string Capitalization::GetFeatureValueName(FeatureValue value) const {
function FeatureValue (line 185) | FeatureValue Capitalization::ComputeValueWithFocus(const Token &token,
function string (line 214) | string PunctuationAmount::GetFeatureValueName(FeatureValue value) const {
function FeatureValue (line 226) | FeatureValue PunctuationAmount::ComputeValue(const Token &token) const {
function string (line 245) | string Quote::GetFeatureValueName(FeatureValue value) const {
function FeatureValue (line 259) | FeatureValue Quote::ComputeValue(const Token &token) const {
function string (line 298) | string Digit::GetFeatureValueName(FeatureValue value) const {
function FeatureValue (line 310) | FeatureValue Digit::ComputeValue(const Token &token) const {
function string (line 338) | string AffixTableFeature::WorkspaceName() const {
function AffixTable (line 345) | static AffixTable *CreateAffixTable(const string &filename,
function FeatureValue (line 376) | FeatureValue AffixTableFeature::ComputeValue(const Token &token) const {
function string (line 394) | string AffixTableFeature::GetFeatureValueName(FeatureValue value) const {
FILE: model_zoo/models/syntaxnet/syntaxnet/sentence_features.h
function namespace (line 31) | namespace syntaxnet {
function class (line 99) | class TokenLookupSetFeature : public SentenceFeature {
function virtual (line 119) | virtual string WorkspaceName() const = 0;
function vector (line 140) | const vector<int> &GetCachedValueSet(const WorkspaceSet &workspaces,
function Evaluate (line 152) | void Evaluate(const WorkspaceSet &workspaces, const Sentence &sentence,
function FeatureValue (line 165) | FeatureValue Compute(const WorkspaceSet &workspaces, const Sentence &sen...
function class (line 176) | class TermFrequencyMapFeature : public TokenLookupFeature {
function class (line 224) | class TermFrequencyMapSetFeature : public TokenLookupSetFeature {
function class (line 298) | class Word : public TermFrequencyMapFeature {
function class (line 308) | class Char : public TermFrequencyMapFeature {
function class (line 338) | class LowercaseWord : public TermFrequencyMapFeature {
function class (line 348) | class Tag : public TermFrequencyMapFeature {
function class (line 357) | class Label : public TermFrequencyMapFeature {
function class (line 366) | class CharNgram : public TermFrequencyMapSetFeature {
function class (line 389) | class MorphologySet : public TermFrequencyMapSetFeature {
function class (line 407) | class LexicalCategoryFeature : public TokenLookupFeature {
function class (line 429) | class Hyphen : public LexicalCategoryFeature {
function class (line 451) | class Capitalization : public LexicalCategoryFeature {
function class (line 492) | class PunctuationAmount : public LexicalCategoryFeature {
function class (line 516) | class Quote : public LexicalCategoryFeature {
function class (line 542) | class Digit : public LexicalCategoryFeature {
function class (line 567) | class AffixTableFeature : public TokenLookupFeature {
function class (line 613) | class PrefixFeature : public AffixTableFeature {
function class (line 620) | class SuffixFeature : public AffixTableFeature {
function class (line 626) | class Offset : public Locator<Offset> {
type FeatureExtractor (line 634) | typedef FeatureExtractor<Sentence, int> SentenceExtractor;
FILE: model_zoo/models/syntaxnet/syntaxnet/sentence_features_test.cc
type syntaxnet (line 33) | namespace syntaxnet {
class SentenceFeaturesTest (line 37) | class SentenceFeaturesTest : public ::testing::Test {
method SentenceFeaturesTest (line 39) | explicit SentenceFeaturesTest(const string &prototxt)
method Sentence (line 43) | static Sentence ParseASCII(const string &prototxt) {
method PrepareFeature (line 52) | virtual void PrepareFeature(const string &fml) {
method string (line 67) | virtual string ExtractFeature(int index) {
method ExtractMultiFeature (line 76) | virtual vector<string> ExtractMultiFeature(int index) {
method AddInputToContext (line 88) | void AddInputToContext(const string &name, const string &file_pattern,
method CheckVectorWorkspace (line 99) | void CheckVectorWorkspace(const VectorIntWorkspace &workspace,
class CommonSentenceFeaturesTest (line 118) | class CommonSentenceFeaturesTest : public SentenceFeaturesTest {
method CommonSentenceFeaturesTest (line 120) | CommonSentenceFeaturesTest()
function TEST_F (line 147) | TEST_F(CommonSentenceFeaturesTest, TagFeature) {
function TEST_F (line 157) | TEST_F(CommonSentenceFeaturesTest, TagFeaturePassesArgs) {
function TEST_F (line 167) | TEST_F(CommonSentenceFeaturesTest, OffsetPlusTag) {
function TEST_F (line 182) | TEST_F(CommonSentenceFeaturesTest, CharNgramFeature) {
function TEST_F (line 201) | TEST_F(CommonSentenceFeaturesTest, MorphologySetFeature) {
function TEST_F (line 219) | TEST_F(CommonSentenceFeaturesTest, CapitalizationProcessesCorrectly) {
class CharFeatureTest (line 237) | class CharFeatureTest : public SentenceFeaturesTest {
method CharFeatureTest (line 239) | CharFeatureTest()
function TEST_F (line 251) | TEST_F(CharFeatureTest, CharFeature) {
FILE: model_zoo/models/syntaxnet/syntaxnet/shared_store.cc
type syntaxnet (line 22) | namespace syntaxnet {
function string (line 64) | string SharedStoreUtils::CreateDefaultName() { return string(); }
function string (line 66) | string SharedStoreUtils::ToString(const string &input) {
function string (line 70) | string SharedStoreUtils::ToString(const char *input) {
function string (line 74) | string SharedStoreUtils::ToString(tensorflow::StringPiece input) {
function string (line 79) | string SharedStoreUtils::ToString(bool input) {
function string (line 83) | string SharedStoreUtils::ToString(float input) {
function string (line 87) | string SharedStoreUtils::ToString(double input) {
FILE: model_zoo/models/syntaxnet/syntaxnet/shared_store.h
function class (line 33) | class SharedStore {
FILE: model_zoo/models/syntaxnet/syntaxnet/shared_store_test.cc
type syntaxnet (line 26) | namespace syntaxnet {
type NoArgs (line 28) | struct NoArgs {
method NoArgs (line 29) | NoArgs() {
type OneArg (line 34) | struct OneArg {
method OneArg (line 36) | explicit OneArg(const string &n) : name(n) {
type TwoArgs (line 41) | struct TwoArgs {
method TwoArgs (line 44) | TwoArgs(const string &n, int a) : name(n), age(a) {
type Slow (line 49) | struct Slow {
method Slow (line 51) | Slow() {
type CountCalls (line 57) | struct CountCalls {
method CountCalls (line 58) | CountCalls() {
method Reset (line 68) | static void Reset() {
class PointerSet (line 80) | class PointerSet {
method PointerSet (line 82) | PointerSet() { }
method Add (line 84) | void Add(const void *p) {
method size (line 89) | int size() {
class SharedStoreTest (line 99) | class SharedStoreTest : public testing::Test {
function TEST_F (line 109) | TEST_F(SharedStoreTest, ConstructorArgs) {
function TEST_F (line 116) | TEST_F(SharedStoreTest, Shared) {
function TEST_F (line 126) | TEST_F(SharedStoreTest, DifferentTypes) {
function OneArg (line 136) | OneArg *MakeOneArg(const string &n) {
method OneArg (line 36) | explicit OneArg(const string &n) : name(n) {
function TEST_F (line 140) | TEST_F(SharedStoreTest, ClosureGet) {
function TEST_F (line 149) | TEST_F(SharedStoreTest, PermanentCallback) {
function NoArgs (line 158) | NoArgs *BogusMakeNoArgs(NoArgs *ob) {
method NoArgs (line 29) | NoArgs() {
function CountCalls (line 163) | CountCalls *MakeFailedCountCalls() {
method CountCalls (line 58) | CountCalls() {
method Reset (line 68) | static void Reset() {
function TEST_F (line 171) | TEST_F(SharedStoreTest, FailedClosureGet) {
function TEST_F (line 184) | TEST_F(SharedStoreDeathTest, ClosureGetOrDie) {
function TEST_F (line 190) | TEST_F(SharedStoreTest, Release) {
function TEST_F (line 200) | TEST_F(SharedStoreTest, Clear) {
function GetSharedObject (line 214) | void GetSharedObject(PointerSet *ps) {
function TEST_F (line 225) | TEST_F(SharedStoreTest, ThreadSafety) {
FILE: model_zoo/models/syntaxnet/syntaxnet/structured_graph_builder.py
function AddCrossEntropy (line 32) | def AddCrossEntropy(batch_size, n):
class StructuredGraphBuilder (line 60) | class StructuredGraphBuilder(graph_builder.GreedyParser):
method __init__ (line 71) | def __init__(self, *args, **kwargs):
method _AddBeamReader (line 76) | def _AddBeamReader(self,
method _BuildSequence (line 95) | def _BuildSequence(self,
method AddTraining (line 134) | def AddTraining(self,
method AddEvaluation (line 222) | def AddEvaluation(self,
FILE: model_zoo/models/syntaxnet/syntaxnet/tagger_transitions.cc
type syntaxnet (line 38) | namespace syntaxnet {
class TaggerTransitionState (line 40) | class TaggerTransitionState : public ParserTransitionState {
method TaggerTransitionState (line 42) | explicit TaggerTransitionState(const TermFrequencyMap *tag_map,
method TaggerTransitionState (line 46) | explicit TaggerTransitionState(const TaggerTransitionState *state)
method ParserTransitionState (line 53) | ParserTransitionState *Clone() const override {
method Init (line 58) | void Init(ParserState *state) override {
method Tag (line 68) | int Tag(int index) const {
method SetTag (line 75) | void SetTag(int index, int tag) {
method GoldTag (line 82) | int GoldTag(int index) const {
method string (line 90) | string TagAsString(int tag) const {
method AddParseToDocument (line 98) | void AddParseToDocument(const ParserState &state, bool rewrite_root_...
method IsTokenCorrect (line 110) | bool IsTokenCorrect(const ParserState &state, int index) const overr...
method string (line 115) | string ToString(const ParserState &state) const override {
class TaggerTransitionSystem (line 146) | class TaggerTransitionSystem : public ParserTransitionSystem {
method Setup (line 151) | void Setup(TaskContext *context) override {
method Init (line 158) | void Init(TaskContext *context) override {
method ParserAction (line 171) | static ParserAction ShiftAction(int tag) { return tag; }
method AllowsNonProjective (line 175) | bool AllowsNonProjective() const override { return true; }
method NumActionTypes (line 178) | int NumActionTypes() const override { return 1; }
method NumActions (line 181) | int NumActions(int num_labels) const override { return tag_map_->Siz...
method ParserAction (line 184) | ParserAction GetDefaultAction(const ParserState &state) const overri...
method ParserAction (line 190) | ParserAction GetNextGoldAction(const ParserState &state) const overr...
method IsAllowedAction (line 198) | bool IsAllowedAction(ParserAction action,
method PerformActionWithoutHistory (line 205) | void PerformActionWithoutHistory(ParserAction action,
method IsFinalState (line 217) | bool IsFinalState(const ParserState &state) const override {
method string (line 222) | string ActionAsString(ParserAction action,
method IsDeterministicState (line 229) | bool IsDeterministicState(const ParserState &state) const override {
method ParserTransitionState (line 234) | ParserTransitionState *NewTransitionState(bool training_mode) const ...
method TaggerTransitionState (line 240) | static const TaggerTransitionState &TransitionState(
method TaggerTransitionState (line 248) | static TaggerTransitionState *MutableTransitionState(ParserState *st...
class PredictedTagFeatureFunction (line 273) | class PredictedTagFeatureFunction
method PredictedTagFeatureFunction (line 276) | PredictedTagFeatureFunction() {}
method FeatureValue (line 280) | FeatureValue Compute(const WorkspaceSet &workspaces, const ParserSta...
FILE: model_zoo/models/syntaxnet/syntaxnet/tagger_transitions_test.cc
type syntaxnet (line 31) | namespace syntaxnet {
class TaggerTransitionTest (line 33) | class TaggerTransitionTest : public ::testing::Test {
method TaggerTransitionTest (line 35) | TaggerTransitionTest()
method SetUpForDocument (line 41) | void SetUpForDocument(const Sentence &document) {
method ParserState (line 54) | ParserState *NewClonedState(Sentence *sentence) {
method GoldParse (line 63) | void GoldParse(Sentence *sentence) {
method DefaultParse (line 79) | void DefaultParse(Sentence *sentence) {
function TEST_F (line 99) | TEST_F(TaggerTransitionTest, SingleSentenceDocumentTest) {
FILE: model_zoo/models/syntaxnet/syntaxnet/task_context.cc
type syntaxnet (line 21) | namespace syntaxnet {
function TaskInput (line 28) | TaskInput *TaskContext::GetInput(const string &name) {
function TaskInput (line 40) | TaskInput *TaskContext::GetInput(const string &name, const string &fil...
function string (line 75) | string TaskContext::GetParameter(const string &name) const {
function int64 (line 90) | int64 TaskContext::GetInt64Parameter(const string &name) const {
function string (line 105) | string TaskContext::Get(const string &name, const char *defval) const {
function string (line 115) | string TaskContext::Get(const string &name, const string &defval) const {
function int64 (line 124) | int64 TaskContext::Get(const string &name, int64 defval) const {
function string (line 139) | string TaskContext::InputFile(const TaskInput &input) {
FILE: model_zoo/models/syntaxnet/syntaxnet/task_context.h
function namespace (line 25) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/term_frequency_map.cc
type syntaxnet (line 28) | namespace syntaxnet {
type TermFrequencyMap::SortByFrequencyThenTerm (line 107) | struct TermFrequencyMap::SortByFrequencyThenTerm {
function string (line 159) | const string &TagToCategoryMap::GetCategory(const string &tag) const {
FILE: model_zoo/models/syntaxnet/syntaxnet/term_frequency_map.h
function namespace (line 28) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/test_main.cc
function GTEST_API_ (line 32) | GTEST_API_ int main(int argc, char **argv) {
FILE: model_zoo/models/syntaxnet/syntaxnet/text_formats.cc
type syntaxnet (line 29) | namespace syntaxnet {
class CoNLLSyntaxFormat (line 63) | class CoNLLSyntaxFormat : public DocumentFormat {
method CoNLLSyntaxFormat (line 65) | CoNLLSyntaxFormat() {}
method Setup (line 67) | void Setup(TaskContext *context) override {
method ReadRecord (line 73) | bool ReadRecord(tensorflow::io::BufferedInputStream *buffer,
method ConvertFromString (line 85) | void ConvertFromString(const string &key, const string &value,
method ConvertToString (line 166) | void ConvertToString(const Sentence &sentence, string *key,
method string (line 192) | string UnderscoreIfEmpty(const string &field) {
method AddMorphAttributes (line 198) | void AddMorphAttributes(const string &attributes, Token *token) {
method string (line 229) | string GetMorphAttributes(const Token &token) {
method JoinCategoryToPos (line 244) | void JoinCategoryToPos(Token *token) {
method SplitCategoryFromPos (line 250) | void SplitCategoryFromPos(Token *token) {
method AddPosAsAttribute (line 259) | void AddPosAsAttribute(Token *token) {
method RemovePosFromAttributes (line 269) | void RemovePosFromAttributes(Token *token) {
class SegmentationTrainingDataFormat (line 320) | class SegmentationTrainingDataFormat : public CoNLLSyntaxFormat {
method ConvertFromString (line 327) | void ConvertFromString(const string &key, const string &value,
class TokenizedTextFormat (line 406) | class TokenizedTextFormat : public DocumentFormat {
method TokenizedTextFormat (line 408) | TokenizedTextFormat() {}
method ReadRecord (line 411) | bool ReadRecord(tensorflow::io::BufferedInputStream *buffer,
method ConvertFromString (line 416) | void ConvertFromString(const string &key, const string &value,
method ConvertToString (line 443) | void ConvertToString(const Sentence &sentence, string *key,
class UntokenizedTextFormat (line 472) | class UntokenizedTextFormat : public TokenizedTextFormat {
method UntokenizedTextFormat (line 474) | UntokenizedTextFormat() {}
method ConvertFromString (line 476) | void ConvertFromString(const string &key, const string &value,
class EnglishTextFormat (line 512) | class EnglishTextFormat : public TokenizedTextFormat {
method EnglishTextFormat (line 514) | EnglishTextFormat() {}
method ConvertFromString (line 516) | void ConvertFromString(const string &key, const string &value,
FILE: model_zoo/models/syntaxnet/syntaxnet/text_formats_test.py
class TextFormatsTest (line 37) | class TextFormatsTest(test_util.TensorFlowTestCase):
method setUp (line 39) | def setUp(self):
method AddInput (line 47) | def AddInput(self, name, file_pattern, record_format, context):
method AddParameter (line 53) | def AddParameter(self, name, value, context):
method WriteContext (line 58) | def WriteContext(self, corpus_format):
method ReadNextDocument (line 69) | def ReadNextDocument(self, sess, sentence):
method CheckTokenization (line 78) | def CheckTokenization(self, sentence, tokenization):
method CheckUntokenizedDoc (line 90) | def CheckUntokenizedDoc(self, sentence, words, starts, ends):
method testUntokenized (line 107) | def testUntokenized(self):
method testSegmentationTrainingData (line 113) | def testSegmentationTrainingData(self):
method CheckSegmentationTrainingData (line 136) | def CheckSegmentationTrainingData(self, doc_lines, doc_text, doc_words,
method testSimple (line 156) | def testSimple(self):
method testUrl (line 172) | def testUrl(self):
FILE: model_zoo/models/syntaxnet/syntaxnet/unpack_sparse_features.cc
type syntaxnet (line 42) | namespace syntaxnet {
class UnpackSparseFeatures (line 45) | class UnpackSparseFeatures : public OpKernel {
method UnpackSparseFeatures (line 47) | explicit UnpackSparseFeatures(OpKernelConstruction *context)
method Compute (line 53) | void Compute(OpKernelContext *context) override {
FILE: model_zoo/models/syntaxnet/syntaxnet/utils.cc
type syntaxnet (line 19) | namespace syntaxnet {
type utils (line 20) | namespace utils {
function ParseInt32 (line 22) | bool ParseInt32(const char *c_str, int *value) {
function ParseInt64 (line 28) | bool ParseInt64(const char *c_str, int64 *value) {
function ParseDouble (line 34) | bool ParseDouble(const char *c_str, double *value) {
function string (line 42) | string CEscape(const string &src) {
function Split (line 84) | std::vector<string> Split(const string &text, char delim) {
function SplitOne (line 98) | std::vector<string> SplitOne(const string &text, char delim) {
function IsAbsolutePath (line 108) | bool IsAbsolutePath(tensorflow::StringPiece path) {
function string (line 114) | string JoinPath(std::initializer_list<tensorflow::StringPiece> paths) {
function RemoveLeadingWhitespace (line 145) | size_t RemoveLeadingWhitespace(tensorflow::StringPiece *text) {
function RemoveTrailingWhitespace (line 156) | size_t RemoveTrailingWhitespace(tensorflow::StringPiece *text) {
function RemoveWhitespaceContext (line 167) | size_t RemoveWhitespaceContext(tensorflow::StringPiece *text) {
function uint32 (line 175) | inline uint32 DecodeFixed32(const char *ptr) {
function uint32 (line 183) | static inline uint32 ByteAs32(char c) { return static_cast<uint32>(c...
function uint32 (line 186) | uint32 Hash32(const char *data, size_t n, uint32 seed) {
function string (line 228) | string Lowercase(tensorflow::StringPiece s) {
function NormalizeDigits (line 263) | void NormalizeDigits(string *form) {
FILE: model_zoo/models/syntaxnet/syntaxnet/utils.h
function namespace (line 30) | namespace syntaxnet {
FILE: model_zoo/models/syntaxnet/syntaxnet/workspace.cc
type syntaxnet (line 20) | namespace syntaxnet {
function string (line 22) | string WorkspaceRegistry::DebugString() const {
function string (line 43) | string VectorIntWorkspace::TypeName() { return "Vector"; }
function string (line 48) | string VectorVectorIntWorkspace::TypeName() { return "VectorVector"; }
FILE: model_zoo/models/syntaxnet/syntaxnet/workspace.h
function namespace (line 31) | namespace syntaxnet {
function class (line 89) | class WorkspaceSet {
function class (line 144) | class SingletonIntWorkspace : public Workspace {
function class (line 167) | class VectorIntWorkspace : public Workspace {
function class (line 196) | class VectorVectorIntWorkspace : public Workspace {
FILE: model_zoo/models/syntaxnet/third_party/utf/rune.c
function charntorune (line 65) | int
function chartorune (line 168) | int
function isvalidcharntorune (line 246) | int
function runetochar (line 252) | int
function runelen (line 309) | int
function runenlen (line 317) | int
function fullrune (line 340) | int
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrcat.c
function Rune (line 19) | Rune*
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrchr.c
function Rune (line 19) | const
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrcmp.c
function runestrcmp (line 19) | int
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrcpy.c
function Rune (line 19) | Rune*
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrdup.c
function Rune (line 20) | Rune*
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrecpy.c
function Rune (line 19) | Rune*
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrlen.c
function runestrlen (line 19) | long
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrncat.c
function Rune (line 19) | Rune*
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrncmp.c
function runestrncmp (line 19) | int
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrncpy.c
function Rune (line 19) | Rune*
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrrchr.c
function Rune (line 19) | const
FILE: model_zoo/models/syntaxnet/third_party/utf/runestrstr.c
function Rune (line 23) | const
FILE: model_zoo/models/syntaxnet/third_party/utf/runetype.c
function Rune (line 17) | static
function isideographicrune (line 59) | int
FILE: model_zoo/models/syntaxnet/third_party/utf/runetypebody.c
function isspacerune (line 17) | int
function isdigitrune (line 73) | int
function isalpharune (line 576) | int
function isupperrune (line 748) | int
function islowerrune (line 931) | int
function istitlerune (line 1062) | int
function Rune (line 1250) | Rune
function Rune (line 1436) | Rune
function Rune (line 1622) | Rune
FILE: model_zoo/models/syntaxnet/third_party/utf/utf.h
type Rune (line 19) | typedef signed int Rune;
FILE: model_zoo/models/syntaxnet/third_party/utf/utfdef.h
type uchar (line 8) | typedef unsigned char uchar;
type ushort (line 9) | typedef unsigned short ushort;
type uint (line 10) | typedef unsigned int uint;
type ulong (line 11) | typedef unsigned long ulong;
FILE: model_zoo/models/syntaxnet/third_party/utf/utflen.c
function utflen (line 19) | int
FILE: model_zoo/models/syntaxnet/third_party/utf/utfnlen.c
function utfnlen (line 19) | int
FILE: model_zoo/models/syntaxnet/util/utf8/gtest_main.cc
function main (line 23) | int main(int argc, char **argv) {
FILE: model_zoo/models/syntaxnet/util/utf8/unicodetext.cc
function CodepointDistance (line 30) | static int CodepointDistance(const char* start, const char* end) {
function CodepointCount (line 39) | static int CodepointCount(const char* utf8, int len) {
function distance (line 43) | UnicodeText::const_iterator::difference_type
function ConvertToInterchangeValid (line 51) | static int ConvertToInterchangeValid(char* start, int len) {
function string (line 169) | string UnicodeText::Repr::DebugString() const {
function string (line 198) | string UnicodeText::UTF8Substring(const const_iterator& first,
function UnicodeText (line 207) | UnicodeText& UnicodeText::operator=(const UnicodeText& src) {
function UnicodeText (line 214) | UnicodeText& UnicodeText::Copy(const UnicodeText& src) {
function UnicodeText (line 219) | UnicodeText& UnicodeText::CopyUTF8(const char* buffer, int byte_length) {
function UnicodeText (line 228) | UnicodeText& UnicodeText::UnsafeCopyUTF8(const char* buffer,
function UnicodeText (line 236) | UnicodeText& UnicodeText::TakeOwnershipOfUTF8(char* buffer,
function UnicodeText (line 247) | UnicodeText& UnicodeText::UnsafeTakeOwnershipOfUTF8(char* buffer,
function UnicodeText (line 256) | UnicodeText& UnicodeText::PointToUTF8(const char* buffer, int byte_lengt...
function UnicodeText (line 267) | UnicodeText& UnicodeText::UnsafePointToUTF8(const char* buffer,
function UnicodeText (line 273) | UnicodeText& UnicodeText::PointTo(const UnicodeText& src) {
function UnicodeText (line 278) | UnicodeText& UnicodeText::PointTo(const const_iterator &first,
function UnicodeText (line 287) | UnicodeText& UnicodeText::append(const UnicodeText& u) {
function UnicodeText (line 292) | UnicodeText& UnicodeText::append(const const_iterator& first,
function UnicodeText (line 299) | UnicodeText& UnicodeText::UnsafeAppendUTF8(const char* utf8, int len) {
function string (line 381) | string UnicodeText::DebugString() const {
function char32 (line 421) | char32 UnicodeText::const_iterator::operator*() const {
function string (line 468) | string UnicodeText::const_iterator::get_utf8_string() const {
function string (line 495) | string UnicodeText::const_iterator::DebugString() const {
function string (line 502) | string CodepointString(const UnicodeText& t) {
FILE: model_zoo/models/syntaxnet/util/utf8/unicodetext.h
function class (line 116) | class UnicodeText {
function string (line 229) | string get_utf8_string() const;
function const_iterator (line 246) | const_iterator begin() const;
function string (line 261) | string get_utf8_string() const {
function HasReplacementChar (line 286) | bool HasReplacementChar() const;
function class (line 335) | class Repr { // A byte-string.
type pair (line 386) | typedef pair<UnicodeText::const_iterator,
function UnicodeTextRangeIsEmpty (line 389) | inline bool UnicodeTextRangeIsEmpty(const UnicodeTextRange& r) {
function UnicodeText (line 405) | inline UnicodeText MakeUnicodeTextAcceptingOwnership(
function UnicodeText (line 415) | inline UnicodeText MakeUnicodeTextWithoutAcceptingOwnership(
function UnicodeText (line 437) | inline UnicodeText UTF8ToUnicodeText(const char* utf8_buf, int len,
function UnicodeText (line 448) | inline UnicodeText UTF8ToUnicodeText(const string& utf_string, bool do_c...
function UnicodeText (line 452) | inline UnicodeText UTF8ToUnicodeText(const char* utf8_buf, int len) {
function UnicodeText (line 455) | inline UnicodeText UTF8ToUnicodeText(const string& utf8_string) {
function string (line 461) | inline string UnicodeTextToUTF8(const UnicodeText& t) {
FILE: model_zoo/models/syntaxnet/util/utf8/unicodetext_main.cc
function main (line 28) | int main(int argc, char** argv) {
FILE: model_zoo/models/syntaxnet/util/utf8/unicodetext_unittest.cc
class UnicodeTextTest (line 28) | class UnicodeTextTest : public testing::Test {
method UnicodeTextTest (line 30) | UnicodeTextTest() : empty_text_() {
function TEST (line 40) | TEST(UnicodeTextTest, Ownership) {
class IteratorTest (line 104) | class IteratorTest : public UnicodeTextTest {}
function TEST_F (line 106) | TEST_F(IteratorTest, Iterates) {
function TEST_F (line 121) | TEST_F(IteratorTest, Reverse) {
function TEST_F (line 136) | TEST_F(IteratorTest, MultiPass) {
function TEST_F (line 149) | TEST_F(IteratorTest, ReverseIterates) {
function TEST_F (line 165) | TEST_F(IteratorTest, Comparable) {
function TEST_F (line 177) | TEST_F(IteratorTest, Advance) {
function TEST_F (line 186) | TEST_F(IteratorTest, Distance) {
function TEST_F (line 201) | TEST_F(IteratorTest, Encode) {
function TEST_F (line 259) | TEST_F(IteratorTest, Decode) {
class OperatorTest (line 270) | class OperatorTest : public UnicodeTextTest {}
function TEST_F (line 272) | TEST_F(OperatorTest, Clear) {
function TEST_F (line 279) | TEST_F(OperatorTest, Empty) {
function TEST (line 286) | TEST(UnicodeTextTest, InterchangeValidity) {
class SubstringSearchTest (line 308) | class SubstringSearchTest : public UnicodeTextTest {}
FILE: model_zoo/models/syntaxnet/util/utf8/unilib.cc
type UniLib (line 24) | namespace UniLib {
function IsInterchangeValid (line 33) | bool IsInterchangeValid(char32 c) {
function SpanInterchangeValid (line 40) | int SpanInterchangeValid(const char* begin, int byte_length) {
FILE: model_zoo/models/syntaxnet/util/utf8/unilib.h
function namespace (line 41) | namespace UniLib {
FILE: model_zoo/models/syntaxnet/util/utf8/unilib_utf8_utils.h
function namespace (line 26) | namespace UniLib {
FILE: model_zoo/models/textsum/batch_reader.py
class Batcher (line 37) | class Batcher(object):
method __init__ (line 40) | def __init__(self, data_path, vocab, hps,
method NextBatch (line 83) | def NextBatch(self):
method _FillInputQueue (line 128) | def _FillInputQueue(self):
method _FillBucketInputQueue (line 197) | def _FillBucketInputQueue(self):
method _WatchThreads (line 213) | def _WatchThreads(self):
method _TextGenerator (line 241) | def _TextGenerator(self, example_gen):
method _GetExFeatureText (line 254) | def _GetExFeatureText(self, ex, key):
FILE: model_zoo/models/textsum/beam_search.py
class Hypothesis (line 30) | class Hypothesis(object):
method __init__ (line 33) | def __init__(self, tokens, log_prob, state):
method Extend (line 45) | def Extend(self, token, log_prob, new_state):
method latest_token (line 59) | def latest_token(self):
method __str__ (line 62) | def __str__(self):
class BeamSearch (line 67) | class BeamSearch(object):
method __init__ (line 70) | def __init__(self, model, beam_size, start_token, end_token, max_steps):
method BeamSearch (line 86) | def BeamSearch(self, sess, enc_inputs, enc_seqlen):
method _BestHyps (line 143) | def _BestHyps(self, hyps):
FILE: model_zoo/models/textsum/data.py
class Vocab (line 37) | class Vocab(object):
method __init__ (line 40) | def __init__(self, vocab_file, max_size):
method WordToId (line 59) | def WordToId(self, word):
method IdToWord (line 64) | def IdToWord(self, word_id):
method NumIds (line 69) | def NumIds(self):
function ExampleGen (line 73) | def ExampleGen(data_path, num_epochs=None):
function Pad (line 108) | def Pad(ids, pad_id, length):
function GetWordIds (line 129) | def GetWordIds(text, vocab, pad_len=None, pad_id=None):
function Ids2Words (line 155) | def Ids2Words(ids_list, vocab):
function SnippetGen (line 169) | def SnippetGen(text, start_tok, end_tok, inclusive=True):
function GetExFeatureText (line 195) | def GetExFeatureText(ex, key):
function ToSentences (line 199) | def ToSentences(paragraph, include_token=True):
FILE: model_zoo/models/textsum/data_convert_example.py
function _binary_to_text (line 22) | def _binary_to_text():
function _text_to_binary (line 41) | def _text_to_binary():
function main (line 56) | def main(unused_argv):
FILE: model_zoo/models/textsum/seq2seq_attention.py
function _RunningAvgLoss (line 68) | def _RunningAvgLoss(loss, running_avg_loss, summary_writer, step, decay=...
function _Train (line 82) | def _Train(model, data_batcher):
function _Eval (line 118) | def _Eval(model, data_batcher, vocab=None):
function main (line 160) | def main(unused_argv):
FILE: model_zoo/models/textsum/seq2seq_attention_decode.py
class DecodeIO (line 36) | class DecodeIO(object):
method __init__ (line 42) | def __init__(self, outdir):
method Write (line 50) | def Write(self, reference, decode):
method ResetFiles (line 64) | def ResetFiles(self):
class BSDecoder (line 75) | class BSDecoder(object):
method __init__ (line 78) | def __init__(self, model, batch_reader, hps, vocab):
method DecodeLoop (line 95) | def DecodeLoop(self):
method _Decode (line 105) | def _Decode(self, saver, sess):
method _DecodeBatch (line 146) | def _DecodeBatch(self, article, abstract, output_ids):
FILE: model_zoo/models/textsum/seq2seq_attention_model.py
function _extract_argmax_and_embed (line 32) | def _extract_argmax_and_embed(embedding, output_projection=None,
class Seq2SeqAttentionModel (line 61) | class Seq2SeqAttentionModel(object):
method __init__ (line 64) | def __init__(self, hps, vocab, num_gpus=0):
method run_train_step (line 70) | def run_train_step(self, sess, article_batch, abstract_batch, targets,
method run_eval_step (line 81) | def run_eval_step(self, sess, article_batch, abstract_batch, targets,
method run_decode_step (line 92) | def run_decode_step(self, sess, article_batch, abstract_batch, targets,
method _next_device (line 103) | def _next_device(self):
method _get_gpu (line 112) | def _get_gpu(self, gpu_id):
method _add_placeholders (line 117) | def _add_placeholders(self):
method _add_seq2seq (line 137) | def _add_seq2seq(self):
method _add_train_op (line 241) | def _add_train_op(self):
method encode_top_state (line 259) | def encode_top_state(self, sess, enc_inputs, enc_len):
method decode_topk (line 275) | def decode_topk(self, sess, latest_tokens, enc_top_states, dec_init_st...
method build_graph (line 293) | def build_graph(self):
FILE: model_zoo/models/textsum/seq2seq_lib.py
function sequence_loss_by_example (line 23) | def sequence_loss_by_example(inputs, targets, weights, loss_function,
function sampled_sequence_loss (line 59) | def sampled_sequence_loss(inputs, targets, weights, loss_function,
function linear (line 91) | def linear(args, output_size, bias, bias_start=0.0, scope=None):
FILE: model_zoo/models/transformer/spatial_transformer.py
function transformer (line 18) | def transformer(U, theta, out_size, name='SpatialTransformer', **kwargs):
function batch_transformer (line 183) | def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer'):
FILE: model_zoo/models/transformer/tf_utils.py
function conv2d (line 20) | def conv2d(x, n_filters,
function linear (line 69) | def linear(x, n_units, scope=None, stddev=0.02,
function weight_variable (line 97) | def weight_variable(shape):
function bias_variable (line 110) | def bias_variable(shape):
function dense_to_one_hot (line 122) | def dense_to_one_hot(labels, n_classes=2):
FILE: model_zoo/models/video_prediction/lstm_ops.py
function init_state (line 24) | def init_state(inputs,
function basic_conv_lstm_cell (line 56) | def basic_conv_lstm_cell(inputs,
FILE: model_zoo/models/video_prediction/prediction_input.py
function build_tfrecord_input (line 42) | def build_tfrecord_input(training=True):
FILE: model_zoo/models/video_prediction/prediction_model.py
function construct_model (line 32) | def construct_model(images,
function stp_transformation (line 227) | def stp_transformation(prev_image, stp_input, num_masks):
function cdna_transformation (line 252) | def cdna_transformation(prev_image, cdna_input, num_masks, color_channels):
function dna_transformation (line 296) | def dna_transformation(prev_image, dna_input):
function scheduled_sample (line 327) | def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground...
FILE: model_zoo/models/video_prediction/prediction_train.py
function peak_signal_to_noise_ratio (line 75) | def peak_signal_to_noise_ratio(true, pred):
function mean_squared_error (line 87) | def mean_squared_error(true, pred):
class Model (line 99) | class Model(object):
method __init__ (line 101) | def __init__(self,
function main (lin
Condensed preview — 438 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (5,609K chars).
[
{
"path": "Images_for_readme/README.md",
"chars": 1,
"preview": "\n"
},
{
"path": "LICENSE",
"chars": 1060,
"preview": "MIT License\n\nCopyright (c) 2018 陈潇凯\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof thi"
},
{
"path": "README.md",
"chars": 7714,
"preview": "# Action Recognition Zoo\nCodes for popular action recognition models, written based on pytorch, verified on the [somethi"
},
{
"path": "average_scores.py",
"chars": 854,
"preview": "# @Author : Sky chen\n# @Email : dzhchxk@126.com\n# @Personal homepage : https://coderskychen.cn\n\nimport numpy as np\ni"
},
{
"path": "dataset.py",
"chars": 17670,
"preview": "# @Author : Sky chen\n# @Email : dzhchxk@126.com\n# @Personal homepage : https://coderskychen.cn\n\nimport torch.utils.d"
},
{
"path": "main.py",
"chars": 18540,
"preview": "# @Author : Sky chen\n# @Email : dzhchxk@126.com\n# @Personal homepage : https://coderskychen.cn\n\ntry:\n import tens"
},
{
"path": "model_zoo/LICENSE",
"chars": 1922,
"preview": "Copyright (c) 2017 LIP6 Lab\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this softwa"
},
{
"path": "model_zoo/README.md",
"chars": 2057,
"preview": "# Tensorflow Model Zoo for Torch7 and PyTorch\n\nThis is a porting of tensorflow pretrained models made by [Remi Cadene](h"
},
{
"path": "model_zoo/__init__.py",
"chars": 175,
"preview": "from .inceptionresnetv2.pytorch_load import inceptionresnetv2\nfrom .inceptionv4.pytorch_load import inceptionv4\nfrom .bn"
},
{
"path": "model_zoo/bninception/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "model_zoo/bninception/bn_inception.yaml",
"chars": 28653,
"preview": "inputs: []\nlayers:\n- attrs: {kernel_size: 7, num_output: 64, pad: 3, stride: 2}\n expr: conv1_7x7_s2<=Convolution<=data\n"
},
{
"path": "model_zoo/bninception/caffe_pb2.py",
"chars": 245732,
"preview": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: caffe.proto\n\nfrom google.protobuf.internal import e"
},
{
"path": "model_zoo/bninception/inceptionv3.yaml",
"chars": 40415,
"preview": "inputs: []\nlayers:\n- attrs: {kernel_h: 3, kernel_w: 3, num_output: 32, pad_h: 0, pad_w: 0, stride_h: 2,\n stride_w: 2}"
},
{
"path": "model_zoo/bninception/layer_factory.py",
"chars": 2525,
"preview": "import torch\nfrom torch import nn\n\n\nLAYER_BUILDER_DICT=dict()\n\n\ndef parse_expr(expr):\n parts = expr.split('<=')\n r"
},
{
"path": "model_zoo/bninception/parse_caffe.py",
"chars": 5363,
"preview": "#!/usr/bin/env python\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Convert a Caffe model and its lear"
},
{
"path": "model_zoo/bninception/pytorch_load.py",
"chars": 2801,
"preview": "import torch\nfrom torch import nn\nfrom .layer_factory import get_basic_layer, parse_expr\nimport torch.utils.model_zoo as"
},
{
"path": "model_zoo/inceptionresnetv2/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "model_zoo/inceptionresnetv2/pytorch_load.py",
"chars": 21330,
"preview": "import torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport os\nimport sys\n\nmodel_urls = {\n 'i"
},
{
"path": "model_zoo/inceptionresnetv2/tensorflow_dump.py",
"chars": 9834,
"preview": "# python3\n\n# TensorBoard\n# python ~/.local/lib/python3.5/site-packages/tensorflow/tensorboard/tensorboard.py --logdir=lo"
},
{
"path": "model_zoo/inceptionresnetv2/torch_load.lua",
"chars": 18119,
"preview": "require 'nn'\nlocal hdf5 = require 'hdf5'\ntorch.setdefaulttensortype('torch.FloatTensor')\nrequire 'image'\n\nlocal function"
},
{
"path": "model_zoo/inceptionv4/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "model_zoo/inceptionv4/pytorch_load.py",
"chars": 17252,
"preview": "import torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport os\nimport sys\n\nmodel_urls = {\n 'i"
},
{
"path": "model_zoo/inceptionv4/tensorflow_dump.py",
"chars": 8519,
"preview": "# python3\n\n# TensorBoard\n# python3 ~/.local/lib/python3.5/site-packages/tensorflow/tensorboard/tensorboard.py --logdir=l"
},
{
"path": "model_zoo/inceptionv4/torch_load.lua",
"chars": 18144,
"preview": "require 'nn'\nlocal hdf5 = require 'hdf5'\ntorch.setdefaulttensortype('torch.FloatTensor')\nrequire 'image'\n\nlocal function"
},
{
"path": "model_zoo/models/.github/ISSUE_TEMPLATE.md",
"chars": 88,
"preview": "## Please let us know which model this issue is about (specify the top-level directory)\n"
},
{
"path": "model_zoo/models/.gitignore",
"chars": 1045,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
},
{
"path": "model_zoo/models/.gitmodules",
"chars": 106,
"preview": "[submodule \"tensorflow\"]\n\tpath = syntaxnet/tensorflow\n\turl = https://github.com/tensorflow/tensorflow.git\n"
},
{
"path": "model_zoo/models/AUTHORS",
"chars": 337,
"preview": "# This is the official list of authors for copyright purposes.\n# This file is distinct from the CONTRIBUTORS files.\n# Se"
},
{
"path": "model_zoo/models/CONTRIBUTING.md",
"chars": 1292,
"preview": "# Contributing guidelines\n\nIf you have created a model and would like to publish it here, please send us a\npull request."
},
{
"path": "model_zoo/models/LICENSE",
"chars": 11405,
"preview": "Copyright 2016 The TensorFlow Authors. All rights reserved.\n\n Apache License\n "
},
{
"path": "model_zoo/models/README.md",
"chars": 1235,
"preview": "# TensorFlow Models\n\nThis repository contains machine learning models implemented in\n[TensorFlow](https://tensorflow.org"
},
{
"path": "model_zoo/models/WORKSPACE",
"chars": 0,
"preview": ""
},
{
"path": "model_zoo/models/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py",
"chars": 1859,
"preview": "import numpy as np\n\nimport sklearn.preprocessing as prep\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnis"
},
{
"path": "model_zoo/models/autoencoder/AutoencoderRunner.py",
"chars": 1684,
"preview": "import numpy as np\n\nimport sklearn.preprocessing as prep\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnis"
},
{
"path": "model_zoo/models/autoencoder/MaskingNoiseAutoencoderRunner.py",
"chars": 1689,
"preview": "import numpy as np\n\nimport sklearn.preprocessing as prep\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnis"
},
{
"path": "model_zoo/models/autoencoder/Utils.py",
"chars": 359,
"preview": "import numpy as np\nimport tensorflow as tf\n\ndef xavier_init(fan_in, fan_out, constant = 1):\n low = -constant * np.sqr"
},
{
"path": "model_zoo/models/autoencoder/VariationalAutoencoderRunner.py",
"chars": 1677,
"preview": "import numpy as np\n\nimport sklearn.preprocessing as prep\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnis"
},
{
"path": "model_zoo/models/autoencoder/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "model_zoo/models/autoencoder/autoencoder_models/Autoencoder.py",
"chars": 2227,
"preview": "import tensorflow as tf\nimport numpy as np\nimport autoencoder.Utils\n\nclass Autoencoder(object):\n\n def __init__(self, "
},
{
"path": "model_zoo/models/autoencoder/autoencoder_models/DenoisingAutoencoder.py",
"chars": 5630,
"preview": "import tensorflow as tf\nimport numpy as np\nimport autoencoder.Utils\n\n\nclass AdditiveGaussianNoiseAutoencoder(object):\n "
},
{
"path": "model_zoo/models/autoencoder/autoencoder_models/VariationalAutoencoder.py",
"chars": 2980,
"preview": "import tensorflow as tf\nimport numpy as np\nimport autoencoder.Utils\n\nclass VariationalAutoencoder(object):\n\n def __in"
},
{
"path": "model_zoo/models/autoencoder/autoencoder_models/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "model_zoo/models/compression/README.md",
"chars": 3803,
"preview": "# Image Compression with Neural Networks\n\nThis is a [TensorFlow](http://www.tensorflow.org/) model for compressing and\nd"
},
{
"path": "model_zoo/models/compression/decoder.py",
"chars": 4661,
"preview": "#!/usr/bin/python\n#\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Licensed under the Apache License, V"
},
{
"path": "model_zoo/models/compression/encoder.py",
"chars": 3911,
"preview": "#!/usr/bin/python\n#\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License,"
},
{
"path": "model_zoo/models/compression/msssim.py",
"chars": 7961,
"preview": "#!/usr/bin/python\n#\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License,"
},
{
"path": "model_zoo/models/differential_privacy/README.md",
"chars": 1455,
"preview": "<font size=4><b>Deep Learning with Differential Privacy</b></font>\n\nOpen Sourced By: Xin Pan (xpan@google.com, github: p"
},
{
"path": "model_zoo/models/differential_privacy/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/README.md",
"chars": 3042,
"preview": "<font size=4><b>Deep Learning with Differential Privacy</b></font>\n\nAuthors:\nMartín Abadi, Andy Chu, Ian Goodfellow, H. "
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/dp_mnist/BUILD",
"chars": 486,
"preview": "package(default_visibility = [\":internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npackage_gro"
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/dp_mnist/dp_mnist.py",
"chars": 21095,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/BUILD",
"chars": 685,
"preview": "package(default_visibility = [\":internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npackage_gro"
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/dp_optimizer.py",
"chars": 9487,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/dp_pca.py",
"chars": 2596,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/sanitizer.py",
"chars": 4433,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/utils.py",
"chars": 10661,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/per_example_gradients/BUILD",
"chars": 339,
"preview": "package(default_visibility = [\":internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npackage_gro"
},
{
"path": "model_zoo/models/differential_privacy/dp_sgd/per_example_gradients/per_example_gradients.py",
"chars": 12247,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/BUILD",
"chars": 1249,
"preview": "package(default_visibility = [\":internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npackage_gro"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/README.md",
"chars": 5689,
"preview": "# Learning private models with multiple teachers\n\nThis repository contains code to create a setup for learning privacy-p"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/aggregation.py",
"chars": 4912,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/analysis.py",
"chars": 10948,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/deep_cnn.py",
"chars": 21812,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/input.py",
"chars": 14192,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/metrics.py",
"chars": 1648,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/train_student.py",
"chars": 9187,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/train_student_mnist_250_lap_20_count_50_epochs_600.sh",
"chars": 475,
"preview": "# Be sure to clone https://github.com/openai/improved-gan\n# and add improved-gan/mnist_svhn_cifar10 to your PATH variabl"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/train_teachers.py",
"chars": 3965,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/multiple_teachers/utils.py",
"chars": 1323,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/privacy_accountant/python/BUILD",
"chars": 336,
"preview": "package(default_visibility = [\":internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npackage_gro"
},
{
"path": "model_zoo/models/differential_privacy/privacy_accountant/python/gaussian_moments.py",
"chars": 10367,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/differential_privacy/privacy_accountant/tf/BUILD",
"chars": 377,
"preview": "package(default_visibility = [\":internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npackage_gro"
},
{
"path": "model_zoo/models/differential_privacy/privacy_accountant/tf/accountant.py",
"chars": 17526,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/.gitignore",
"chars": 100,
"preview": "/bazel-bin\n/bazel-ci_build-cache\n/bazel-genfiles\n/bazel-out\n/bazel-im2txt\n/bazel-testlogs\n/bazel-tf\n"
},
{
"path": "model_zoo/models/im2txt/README.md",
"chars": 13369,
"preview": "# Show and Tell: A Neural Image Caption Generator\n\nA TensorFlow implementation of the image-to-text model described in t"
},
{
"path": "model_zoo/models/im2txt/WORKSPACE",
"chars": 27,
"preview": "workspace(name = \"im2txt\")\n"
},
{
"path": "model_zoo/models/im2txt/im2txt/BUILD",
"chars": 1860,
"preview": "package(default_visibility = [\":internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npackage_gro"
},
{
"path": "model_zoo/models/im2txt/im2txt/configuration.py",
"chars": 3810,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/data/build_mscoco_data.py",
"chars": 17473,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/data/download_and_preprocess_mscoco.sh",
"chars": 2872,
"preview": "#!/bin/bash\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version"
},
{
"path": "model_zoo/models/im2txt/im2txt/evaluate.py",
"chars": 6756,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/inference_utils/BUILD",
"chars": 595,
"preview": "package(default_visibility = [\"//im2txt:internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npy_"
},
{
"path": "model_zoo/models/im2txt/im2txt/inference_utils/caption_generator.py",
"chars": 6936,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/inference_utils/caption_generator_test.py",
"chars": 5787,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/inference_utils/inference_wrapper_base.py",
"chars": 6548,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/inference_utils/vocabulary.py",
"chars": 2814,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/inference_wrapper.py",
"chars": 1866,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/ops/BUILD",
"chars": 585,
"preview": "package(default_visibility = [\"//im2txt:internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npy_"
},
{
"path": "model_zoo/models/im2txt/im2txt/ops/image_embedding.py",
"chars": 4141,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/ops/image_embedding_test.py",
"chars": 5519,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/ops/image_processing.py",
"chars": 4930,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/ops/inputs.py",
"chars": 7463,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/run_inference.py",
"chars": 3047,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/show_and_tell_model.py",
"chars": 13371,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/show_and_tell_model_test.py",
"chars": 6827,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/im2txt/im2txt/train.py",
"chars": 4250,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/inception/.gitignore",
"chars": 103,
"preview": "/bazel-bin\n/bazel-ci_build-cache\n/bazel-genfiles\n/bazel-out\n/bazel-inception\n/bazel-testlogs\n/bazel-tf\n"
},
{
"path": "model_zoo/models/inception/README.md",
"chars": 37300,
"preview": "# Inception in TensorFlow\n\n[ImageNet](http://www.image-net.org/) is a common academic data set in machine\nlearning for t"
},
{
"path": "model_zoo/models/inception/WORKSPACE",
"chars": 30,
"preview": "workspace(name = \"inception\")\n"
},
{
"path": "model_zoo/models/inception/inception/BUILD",
"chars": 3360,
"preview": "# Description:\n# Example TensorFlow models for ImageNet.\n\npackage(default_visibility = [\":internal\"])\n\nlicenses([\"notice"
},
{
"path": "model_zoo/models/inception/inception/data/build_image_data.py",
"chars": 15491,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/data/build_imagenet_data.py",
"chars": 26199,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/data/download_and_preprocess_flowers.sh",
"chars": 3454,
"preview": "#!/bin/bash\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/inception/inception/data/download_and_preprocess_flowers_mac.sh",
"chars": 3455,
"preview": "#!/bin/bash\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/inception/inception/data/download_and_preprocess_imagenet.sh",
"chars": 3812,
"preview": "#!/bin/bash\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/inception/inception/data/download_imagenet.sh",
"chars": 3905,
"preview": "#!/bin/bash\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/inception/inception/data/imagenet_2012_validation_synset_labels.txt",
"chars": 500000,
"preview": "n01751748\nn09193705\nn02105855\nn04263257\nn03125729\nn01735189\nn02346627\nn02776631\nn03794056\nn02328150\nn01917289\nn02125311\n"
},
{
"path": "model_zoo/models/inception/inception/data/imagenet_lsvrc_2015_synsets.txt",
"chars": 10000,
"preview": "n01440764\nn01443537\nn01484850\nn01491361\nn01494475\nn01496331\nn01498041\nn01514668\nn01514859\nn01518878\nn01530575\nn01531178\n"
},
{
"path": "model_zoo/models/inception/inception/data/imagenet_metadata.txt",
"chars": 741401,
"preview": "n00004475\torganism, being\nn00005787\tbenthos\nn00006024\theterotroph\nn00006484\tcell\nn00007846\tperson, individual, someone, "
},
{
"path": "model_zoo/models/inception/inception/data/preprocess_imagenet_validation_data.py",
"chars": 3017,
"preview": "#!/usr/bin/python\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 ("
},
{
"path": "model_zoo/models/inception/inception/data/process_bounding_boxes.py",
"chars": 8792,
"preview": "#!/usr/bin/python\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 ("
},
{
"path": "model_zoo/models/inception/inception/dataset.py",
"chars": 3265,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/flowers_data.py",
"chars": 1859,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/flowers_eval.py",
"chars": 1293,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/flowers_train.py",
"chars": 1278,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/image_processing.py",
"chars": 20298,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/imagenet_data.py",
"chars": 2379,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/imagenet_distributed_train.py",
"chars": 2353,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/imagenet_eval.py",
"chars": 1484,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/imagenet_train.py",
"chars": 1282,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/inception_distributed_train.py",
"chars": 14024,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/inception_eval.py",
"chars": 6560,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/inception_model.py",
"chars": 5796,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/inception_train.py",
"chars": 15222,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/BUILD",
"chars": 1689,
"preview": "# Description:\n# Contains the operations and nets for building TensorFlow-Slim models.\n\npackage(default_visibility = ["
},
{
"path": "model_zoo/models/inception/inception/slim/README.md",
"chars": 27238,
"preview": "# TensorFlow-Slim\n\nTF-Slim is a lightweight library for defining, training and evaluating models in\nTensorFlow. It enabl"
},
{
"path": "model_zoo/models/inception/inception/slim/collections_test.py",
"chars": 8100,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/inception_model.py",
"chars": 17823,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/inception_test.py",
"chars": 5491,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/losses.py",
"chars": 6422,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/losses_test.py",
"chars": 6414,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/ops.py",
"chars": 18761,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/ops_test.py",
"chars": 29534,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/scopes.py",
"chars": 5612,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/scopes_test.py",
"chars": 6009,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/slim.py",
"chars": 1057,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/variables.py",
"chars": 10319,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/inception/inception/slim/variables_test.py",
"chars": 16157,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/lm_1b/BUILD",
"chars": 393,
"preview": "package(default_visibility = [\":internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npackage_gro"
},
{
"path": "model_zoo/models/lm_1b/README.md",
"chars": 7816,
"preview": "<font size=4><b>Language Model on One Billion Word Benchmark</b></font>\n\n<b>Authors:</b>\n\nOriol Vinyals (vinyals@google."
},
{
"path": "model_zoo/models/lm_1b/data_utils.py",
"chars": 8407,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/lm_1b/lm_1b_eval.py",
"chars": 11389,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/namignizer/.gitignore",
"chars": 75,
"preview": "# Remove the pyc files\n*.pyc\n\n# Ignore the model and the data\nmodel/\ndata/\n"
},
{
"path": "model_zoo/models/namignizer/README.md",
"chars": 2798,
"preview": "# Namignizer\n\nUse a variation of the [PTB](https://www.tensorflow.org/versions/r0.8/tutorials/recurrent/index.html#recur"
},
{
"path": "model_zoo/models/namignizer/data_utils.py",
"chars": 4239,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/namignizer/model.py",
"chars": 4401,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/namignizer/names.py",
"chars": 9339,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/neural_gpu/README.md",
"chars": 1960,
"preview": "# NeuralGPU\nCode for the Neural GPU model as described\nin [[http://arxiv.org/abs/1511.08228]].\n\nRequirements:\n* TensorFl"
},
{
"path": "model_zoo/models/neural_gpu/data_utils.py",
"chars": 10229,
"preview": "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/neural_gpu/neural_gpu.py",
"chars": 13300,
"preview": "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/neural_gpu/neural_gpu_trainer.py",
"chars": 19650,
"preview": "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/neural_programmer/README.md",
"chars": 510,
"preview": "Implementation of the Neural Programmer model described in https://openreview.net/pdf?id=ry2YOrcge\n\nDownload the data fr"
},
{
"path": "model_zoo/models/neural_programmer/data_utils.py",
"chars": 27691,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/neural_programmer/model.py",
"chars": 32977,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/neural_programmer/neural_programmer.py",
"chars": 9533,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/neural_programmer/nn_utils.py",
"chars": 2396,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/neural_programmer/parameters.py",
"chars": 3842,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/neural_programmer/wiki_data.py",
"chars": 20188,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/resnet/BUILD",
"chars": 579,
"preview": "package(default_visibility = [\":internal\"])\n\nlicenses([\"notice\"]) # Apache 2.0\n\nexports_files([\"LICENSE\"])\n\npackage_gro"
},
{
"path": "model_zoo/models/resnet/README.md",
"chars": 2889,
"preview": "<font size=4><b>Reproduced ResNet on CIFAR-10 and CIFAR-100 dataset.</b></font>\n\ncontact: panyx0718 (xpan@google.com)\n\n<"
},
{
"path": "model_zoo/models/resnet/cifar_input.py",
"chars": 4440,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/resnet/resnet_main.py",
"chars": 6816,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/resnet/resnet_model.py",
"chars": 11068,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/BUILD",
"chars": 7122,
"preview": "# Description:\n# Contains files for loading, training and evaluating TF-Slim-based models.\n\npackage(default_visibility"
},
{
"path": "model_zoo/models/slim/README.md",
"chars": 17431,
"preview": "# TensorFlow-Slim image classification library\n\n[TF-slim](https://github.com/tensorflow/tensorflow/tree/master/tensorflo"
},
{
"path": "model_zoo/models/slim/datasets/__init__.py",
"chars": 1,
"preview": "\n"
},
{
"path": "model_zoo/models/slim/datasets/cifar10.py",
"chars": 3218,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/datasets/dataset_factory.py",
"chars": 1918,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/datasets/dataset_utils.py",
"chars": 4391,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/datasets/download_and_convert_cifar10.py",
"chars": 6215,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/datasets/download_and_convert_flowers.py",
"chars": 7199,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/datasets/download_and_convert_mnist.py",
"chars": 7380,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/datasets/flowers.py",
"chars": 3223,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/datasets/imagenet.py",
"chars": 7105,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/datasets/mnist.py",
"chars": 3245,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/deployment/__init__.py",
"chars": 1,
"preview": "\n"
},
{
"path": "model_zoo/models/slim/deployment/model_deploy.py",
"chars": 24007,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/deployment/model_deploy_test.py",
"chars": 23994,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/download_and_convert_data.py",
"chars": 2306,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/eval_image_classifier.py",
"chars": 6664,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/__init__.py",
"chars": 1,
"preview": "\n"
},
{
"path": "model_zoo/models/slim/nets/alexnet.py",
"chars": 5355,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/alexnet_test.py",
"chars": 5815,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/cifarnet.py",
"chars": 4439,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception.py",
"chars": 1614,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_resnet_v2.py",
"chars": 12713,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_resnet_v2_test.py",
"chars": 5706,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_utils.py",
"chars": 2630,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_v1.py",
"chars": 15194,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_v1_test.py",
"chars": 8701,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_v2.py",
"chars": 23633,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_v2_test.py",
"chars": 10853,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_v3.py",
"chars": 27070,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_v3_test.py",
"chars": 12145,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_v4.py",
"chars": 15524,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/inception_v4_test.py",
"chars": 9912,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/lenet.py",
"chars": 3499,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/nets_factory.py",
"chars": 4558,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
},
{
"path": "model_zoo/models/slim/nets/nets_factory_test.py",
"chars": 1663,
"preview": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# "
},
{
"path": "model_zoo/models/slim/nets/overfeat.py",
"chars": 5202,
"preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
}
]
// ... and 238 more files (download for full content)
About this extraction
This page contains the full source code of the coderSkyChen/Action_Recognition_Zoo GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 438 files (44.7 MB), approximately 1.3M tokens, and a symbol index with 2407 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.